desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Set the input stream\'s current position. Resets the codec buffers used for keeping state.'
def seek(self, offset, whence=0):
self.stream.seek(offset, whence) self.reset()
'Return the next decoded line from the input stream.'
def next(self):
line = self.readline() if line: return line raise StopIteration
'Inherit all other methods from the underlying stream.'
def __getattr__(self, name, getattr=getattr):
return getattr(self.stream, name)
'Creates a StreamReaderWriter instance. stream must be a Stream-like object. Reader, Writer must be factory functions or classes providing the StreamReader, StreamWriter interface resp. Error handling is done in the same way as defined for the StreamWriter/Readers.'
def __init__(self, stream, Reader, Writer, errors='strict'):
self.stream = stream self.reader = Reader(stream, errors) self.writer = Writer(stream, errors) self.errors = errors
'Return the next decoded line from the input stream.'
def next(self):
return self.reader.next()
'Inherit all other methods from the underlying stream.'
def __getattr__(self, name, getattr=getattr):
return getattr(self.stream, name)
'Creates a StreamRecoder instance which implements a two-way conversion: encode and decode work on the frontend (the input to .read() and output of .write()) while Reader and Writer work on the backend (reading and writing to the stream). You can use these objects to do transparent direct recodings from e.g. latin-1 to utf-8 and back. stream must be a file-like object. encode, decode must adhere to the Codec interface, Reader, Writer must be factory functions or classes providing the StreamReader, StreamWriter interface resp. encode and decode are needed for the frontend translation, Reader and Writer for the backend translation. Unicode is used as intermediate encoding. Error handling is done in the same way as defined for the StreamWriter/Readers.'
def __init__(self, stream, encode, decode, Reader, Writer, errors='strict'):
self.stream = stream self.encode = encode self.decode = decode self.reader = Reader(stream, errors) self.writer = Writer(stream, errors) self.errors = errors
'Return the next decoded line from the input stream.'
def next(self):
data = self.reader.next() (data, bytesencoded) = self.encode(data, self.errors) return data
'Inherit all other methods from the underlying stream.'
def __getattr__(self, name, getattr=getattr):
return getattr(self.stream, name)
'Initialize an ordered dictionary. The signature is the same as regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary.'
def __init__(*args, **kwds):
if (not args): raise TypeError("descriptor '__init__' of 'OrderedDict' object needs an argument") self = args[0] args = args[1:] if (len(args) > 1): raise TypeError(('expected at most 1 arguments, got %d' % len(args))) try: self.__root except AttributeError: self.__root = root = [] root[:] = [root, root, None] self.__map = {} self.__update(*args, **kwds)
'od.__setitem__(i, y) <==> od[i]=y'
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
if (key not in self): root = self.__root last = root[0] last[1] = root[0] = self.__map[key] = [last, root, key] return dict_setitem(self, key, value)
'od.__delitem__(y) <==> del od[y]'
def __delitem__(self, key, dict_delitem=dict.__delitem__):
dict_delitem(self, key) (link_prev, link_next, _) = self.__map.pop(key) link_prev[1] = link_next link_next[0] = link_prev
'od.__iter__() <==> iter(od)'
def __iter__(self):
root = self.__root curr = root[1] while (curr is not root): (yield curr[2]) curr = curr[1]
'od.__reversed__() <==> reversed(od)'
def __reversed__(self):
root = self.__root curr = root[0] while (curr is not root): (yield curr[2]) curr = curr[0]
'od.clear() -> None. Remove all items from od.'
def clear(self):
root = self.__root root[:] = [root, root, None] self.__map.clear() dict.clear(self)
'od.keys() -> list of keys in od'
def keys(self):
return list(self)
'od.values() -> list of values in od'
def values(self):
return [self[key] for key in self]
'od.items() -> list of (key, value) pairs in od'
def items(self):
return [(key, self[key]) for key in self]
'od.iterkeys() -> an iterator over the keys in od'
def iterkeys(self):
return iter(self)
'od.itervalues -> an iterator over the values in od'
def itervalues(self):
for k in self: (yield self[k])
'od.iteritems -> an iterator over the (key, value) pairs in od'
def iteritems(self):
for k in self: (yield (k, self[k]))
'od.pop(k[,d]) -> v, remove specified key and return the corresponding value. If key is not found, d is returned if given, otherwise KeyError is raised.'
def pop(self, key, default=__marker):
if (key in self): result = self[key] del self[key] return result if (default is self.__marker): raise KeyError(key) return default
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
def setdefault(self, key, default=None):
if (key in self): return self[key] self[key] = default return default
'od.popitem() -> (k, v), return and remove a (key, value) pair. Pairs are returned in LIFO order if last is true or FIFO order if false.'
def popitem(self, last=True):
if (not self): raise KeyError('dictionary is empty') key = next((reversed(self) if last else iter(self))) value = self.pop(key) return (key, value)
'od.__repr__() <==> repr(od)'
def __repr__(self, _repr_running={}):
call_key = (id(self), _get_ident()) if (call_key in _repr_running): return '...' _repr_running[call_key] = 1 try: if (not self): return ('%s()' % (self.__class__.__name__,)) return ('%s(%r)' % (self.__class__.__name__, self.items())) finally: del _repr_running[call_key]
'Return state information for pickling'
def __reduce__(self):
items = [[k, self[k]] for k in self] inst_dict = vars(self).copy() for k in vars(OrderedDict()): inst_dict.pop(k, None) if inst_dict: return (self.__class__, (items,), inst_dict) return (self.__class__, (items,))
'od.copy() -> a shallow copy of od'
def copy(self):
return self.__class__(self)
'OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. If not specified, the value defaults to None.'
@classmethod def fromkeys(cls, iterable, value=None):
self = cls() for key in iterable: self[key] = value return self
'od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive while comparison to a regular mapping is order-insensitive.'
def __eq__(self, other):
if isinstance(other, OrderedDict): return (dict.__eq__(self, other) and all(_imap(_eq, self, other))) return dict.__eq__(self, other)
'od.__ne__(y) <==> od!=y'
def __ne__(self, other):
return (not (self == other))
'od.viewkeys() -> a set-like object providing a view on od\'s keys'
def viewkeys(self):
return KeysView(self)
'od.viewvalues() -> an object providing a view on od\'s values'
def viewvalues(self):
return ValuesView(self)
'od.viewitems() -> a set-like object providing a view on od\'s items'
def viewitems(self):
return ItemsView(self)
'Create a new, empty Counter object. And if given, count elements from an input iterable. Or, initialize the count from another mapping of elements to their counts. >>> c = Counter() # a new, empty counter >>> c = Counter(\'gallahad\') # a new counter from an iterable >>> c = Counter({\'a\': 4, \'b\': 2}) # a new counter from a mapping >>> c = Counter(a=4, b=2) # a new counter from keyword args'
def __init__(*args, **kwds):
if (not args): raise TypeError("descriptor '__init__' of 'Counter' object needs an argument") self = args[0] args = args[1:] if (len(args) > 1): raise TypeError(('expected at most 1 arguments, got %d' % len(args))) super(Counter, self).__init__() self.update(*args, **kwds)
'The count of elements not in the Counter is zero.'
def __missing__(self, key):
return 0
'List the n most common elements and their counts from the most common to the least. If n is None, then list all element counts. >>> Counter(\'abcdeabcdabcaba\').most_common(3) [(\'a\', 5), (\'b\', 4), (\'c\', 3)]'
def most_common(self, n=None):
if (n is None): return sorted(self.iteritems(), key=_itemgetter(1), reverse=True) return _heapq.nlargest(n, self.iteritems(), key=_itemgetter(1))
'Iterator over elements repeating each as many times as its count. >>> c = Counter(\'ABCABC\') >>> sorted(c.elements()) [\'A\', \'A\', \'B\', \'B\', \'C\', \'C\'] # Knuth\'s example for prime factors of 1836: 2**2 * 3**3 * 17**1 >>> prime_factors = Counter({2: 2, 3: 3, 17: 1}) >>> product = 1 >>> for factor in prime_factors.elements(): # loop over factors ... product *= factor # and multiply them >>> product 1836 Note, if an element\'s count has been set to zero or is a negative number, elements() will ignore it.'
def elements(self):
return _chain.from_iterable(_starmap(_repeat, self.iteritems()))
'Like dict.update() but add counts instead of replacing them. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter(\'which\') >>> c.update(\'witch\') # add elements from another iterable >>> d = Counter(\'watch\') >>> c.update(d) # add elements from another counter >>> c[\'h\'] # four \'h\' in which, witch, and watch 4'
def update(*args, **kwds):
if (not args): raise TypeError("descriptor 'update' of 'Counter' object needs an argument") self = args[0] args = args[1:] if (len(args) > 1): raise TypeError(('expected at most 1 arguments, got %d' % len(args))) iterable = (args[0] if args else None) if (iterable is not None): if isinstance(iterable, Mapping): if self: self_get = self.get for (elem, count) in iterable.iteritems(): self[elem] = (self_get(elem, 0) + count) else: super(Counter, self).update(iterable) else: self_get = self.get for elem in iterable: self[elem] = (self_get(elem, 0) + 1) if kwds: self.update(kwds)
'Like dict.update() but subtracts counts instead of replacing them. Counts can be reduced below zero. Both the inputs and outputs are allowed to contain zero and negative counts. Source can be an iterable, a dictionary, or another Counter instance. >>> c = Counter(\'which\') >>> c.subtract(\'witch\') # subtract elements from another iterable >>> c.subtract(Counter(\'watch\')) # subtract elements from another counter >>> c[\'h\'] # 2 in which, minus 1 in witch, minus 1 in watch 0 >>> c[\'w\'] # 1 in which, minus 1 in witch, minus 1 in watch -1'
def subtract(*args, **kwds):
if (not args): raise TypeError("descriptor 'subtract' of 'Counter' object needs an argument") self = args[0] args = args[1:] if (len(args) > 1): raise TypeError(('expected at most 1 arguments, got %d' % len(args))) iterable = (args[0] if args else None) if (iterable is not None): self_get = self.get if isinstance(iterable, Mapping): for (elem, count) in iterable.items(): self[elem] = (self_get(elem, 0) - count) else: for elem in iterable: self[elem] = (self_get(elem, 0) - 1) if kwds: self.subtract(kwds)
'Return a shallow copy.'
def copy(self):
return self.__class__(self)
'Like dict.__delitem__() but does not raise KeyError for missing values.'
def __delitem__(self, elem):
if (elem in self): super(Counter, self).__delitem__(elem)
'Add counts from two counters. >>> Counter(\'abbb\') + Counter(\'bcc\') Counter({\'b\': 4, \'c\': 2, \'a\': 1})'
def __add__(self, other):
if (not isinstance(other, Counter)): return NotImplemented result = Counter() for (elem, count) in self.items(): newcount = (count + other[elem]) if (newcount > 0): result[elem] = newcount for (elem, count) in other.items(): if ((elem not in self) and (count > 0)): result[elem] = count return result
'Subtract count, but keep only results with positive counts. >>> Counter(\'abbbc\') - Counter(\'bccd\') Counter({\'b\': 2, \'a\': 1})'
def __sub__(self, other):
if (not isinstance(other, Counter)): return NotImplemented result = Counter() for (elem, count) in self.items(): newcount = (count - other[elem]) if (newcount > 0): result[elem] = newcount for (elem, count) in other.items(): if ((elem not in self) and (count < 0)): result[elem] = (0 - count) return result
'Union is the maximum of value in either of the input counters. >>> Counter(\'abbb\') | Counter(\'bcc\') Counter({\'b\': 3, \'c\': 2, \'a\': 1})'
def __or__(self, other):
if (not isinstance(other, Counter)): return NotImplemented result = Counter() for (elem, count) in self.items(): other_count = other[elem] newcount = (other_count if (count < other_count) else count) if (newcount > 0): result[elem] = newcount for (elem, count) in other.items(): if ((elem not in self) and (count > 0)): result[elem] = count return result
'Intersection is the minimum of corresponding counts. >>> Counter(\'abbb\') & Counter(\'bcc\') Counter({\'b\': 1})'
def __and__(self, other):
if (not isinstance(other, Counter)): return NotImplemented result = Counter() for (elem, count) in self.items(): other_count = other[elem] newcount = (count if (count < other_count) else other_count) if (newcount > 0): result[elem] = newcount return result
'Return the per-file header as a string.'
def FileHeader(self, zip64=None):
dt = self.date_time dosdate = ((((dt[0] - 1980) << 9) | (dt[1] << 5)) | dt[2]) dostime = (((dt[3] << 11) | (dt[4] << 5)) | (dt[5] // 2)) if (self.flag_bits & 8): CRC = compress_size = file_size = 0 else: CRC = self.CRC compress_size = self.compress_size file_size = self.file_size extra = self.extra if (zip64 is None): zip64 = ((file_size > ZIP64_LIMIT) or (compress_size > ZIP64_LIMIT)) if zip64: fmt = '<HHQQ' extra = (extra + struct.pack(fmt, 1, (struct.calcsize(fmt) - 4), file_size, compress_size)) if ((file_size > ZIP64_LIMIT) or (compress_size > ZIP64_LIMIT)): if (not zip64): raise LargeZipFile('Filesize would require ZIP64 extensions') file_size = 4294967295 compress_size = 4294967295 self.extract_version = max(45, self.extract_version) self.create_version = max(45, self.extract_version) (filename, flag_bits) = self._encodeFilenameFlags() header = struct.pack(structFileHeader, stringFileHeader, self.extract_version, self.reserved, flag_bits, self.compress_type, dostime, dosdate, CRC, compress_size, file_size, len(filename), len(extra)) return ((header + filename) + extra)
'Generate a CRC-32 table. ZIP encryption uses the CRC32 one-byte primitive for scrambling some internal keys. We noticed that a direct implementation is faster than relying on binascii.crc32().'
def _GenerateCRCTable():
poly = 3988292384 table = ([0] * 256) for i in range(256): crc = i for j in range(8): if (crc & 1): crc = (((crc >> 1) & 2147483647) ^ poly) else: crc = ((crc >> 1) & 2147483647) table[i] = crc return table
'Compute the CRC32 primitive on one byte.'
def _crc32(self, ch, crc):
return (((crc >> 8) & 16777215) ^ self.crctable[((crc ^ ord(ch)) & 255)])
'Decrypt a single character.'
def __call__(self, c):
c = ord(c) k = (self.key2 | 2) c = (c ^ (((k * (k ^ 1)) >> 8) & 255)) c = chr(c) self._UpdateKeys(c) return c
'Read and return a line from the stream. If limit is specified, at most limit bytes will be read.'
def readline(self, limit=(-1)):
if ((not self._universal) and (limit < 0)): i = (self._readbuffer.find('\n', self._offset) + 1) if (i > 0): line = self._readbuffer[self._offset:i] self._offset = i return line if (not self._universal): return io.BufferedIOBase.readline(self, limit) line = '' while ((limit < 0) or (len(line) < limit)): readahead = self.peek(2) if (readahead == ''): return line match = self.PATTERN.search(readahead) newline = match.group('newline') if (newline is not None): if (self.newlines is None): self.newlines = [] if (newline not in self.newlines): self.newlines.append(newline) self._offset += len(newline) return (line + '\n') chunk = match.group('chunk') if (limit >= 0): chunk = chunk[:(limit - len(line))] self._offset += len(chunk) line += chunk return line
'Returns buffered bytes without advancing the position.'
def peek(self, n=1):
if (n > (len(self._readbuffer) - self._offset)): chunk = self.read(n) if (len(chunk) > self._offset): self._readbuffer = (chunk + self._readbuffer[self._offset:]) self._offset = 0 else: self._offset -= len(chunk) return self._readbuffer[self._offset:(self._offset + 512)]
'Read and return up to n bytes. If the argument is omitted, None, or negative, data is read and returned until EOF is reached..'
def read(self, n=(-1)):
buf = '' if (n is None): n = (-1) while True: if (n < 0): data = self.read1(n) elif (n > len(buf)): data = self.read1((n - len(buf))) else: return buf if (len(data) == 0): return buf buf += data
'Read up to n bytes with at most one read() system call.'
def read1(self, n):
if ((n < 0) or (n is None)): n = self.MAX_N len_readbuffer = (len(self._readbuffer) - self._offset) if ((self._compress_left > 0) and (n > (len_readbuffer + len(self._unconsumed)))): nbytes = ((n - len_readbuffer) - len(self._unconsumed)) nbytes = max(nbytes, self.MIN_READ_SIZE) nbytes = min(nbytes, self._compress_left) data = self._fileobj.read(nbytes) self._compress_left -= len(data) if (data and (self._decrypter is not None)): data = ''.join(map(self._decrypter, data)) if (self._compress_type == ZIP_STORED): self._update_crc(data, eof=(self._compress_left == 0)) self._readbuffer = (self._readbuffer[self._offset:] + data) self._offset = 0 else: self._unconsumed += data if ((len(self._unconsumed) > 0) and (n > len_readbuffer) and (self._compress_type == ZIP_DEFLATED)): data = self._decompressor.decompress(self._unconsumed, max((n - len_readbuffer), self.MIN_READ_SIZE)) self._unconsumed = self._decompressor.unconsumed_tail eof = ((len(self._unconsumed) == 0) and (self._compress_left == 0)) if eof: data += self._decompressor.flush() self._update_crc(data, eof=eof) self._readbuffer = (self._readbuffer[self._offset:] + data) self._offset = 0 data = self._readbuffer[self._offset:(self._offset + n)] self._offset += len(data) return data
'Open the ZIP file with mode read "r", write "w" or append "a".'
def __init__(self, file, mode='r', compression=ZIP_STORED, allowZip64=False):
if (mode not in ('r', 'w', 'a')): raise RuntimeError('ZipFile() requires mode "r", "w", or "a"') if (compression == ZIP_STORED): pass elif (compression == ZIP_DEFLATED): if (not zlib): raise RuntimeError, 'Compression requires the (missing) zlib module' else: raise RuntimeError, 'That compression method is not supported' self._allowZip64 = allowZip64 self._didModify = False self.debug = 0 self.NameToInfo = {} self.filelist = [] self.compression = compression self.mode = key = mode.replace('b', '')[0] self.pwd = None self._comment = '' if isinstance(file, basestring): self._filePassed = 0 self.filename = file modeDict = {'r': 'rb', 'w': 'wb', 'a': 'r+b'} try: self.fp = open(file, modeDict[mode]) except IOError: if (mode == 'a'): mode = key = 'w' self.fp = open(file, modeDict[mode]) else: raise else: self._filePassed = 1 self.fp = file self.filename = getattr(file, 'name', None) try: if (key == 'r'): self._RealGetContents() elif (key == 'w'): self._didModify = True elif (key == 'a'): try: self._RealGetContents() self.fp.seek(self.start_dir, 0) except BadZipfile: self.fp.seek(0, 2) self._didModify = True else: raise RuntimeError('Mode must be "r", "w" or "a"') except: fp = self.fp self.fp = None if (not self._filePassed): fp.close() raise
'Read in the table of contents for the ZIP file.'
def _RealGetContents(self):
fp = self.fp try: endrec = _EndRecData(fp) except IOError: raise BadZipfile('File is not a zip file') if (not endrec): raise BadZipfile, 'File is not a zip file' if (self.debug > 1): print endrec size_cd = endrec[_ECD_SIZE] offset_cd = endrec[_ECD_OFFSET] self._comment = endrec[_ECD_COMMENT] concat = ((endrec[_ECD_LOCATION] - size_cd) - offset_cd) if (endrec[_ECD_SIGNATURE] == stringEndArchive64): concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator) if (self.debug > 2): inferred = (concat + offset_cd) print 'given, inferred, offset', offset_cd, inferred, concat self.start_dir = (offset_cd + concat) fp.seek(self.start_dir, 0) data = fp.read(size_cd) fp = cStringIO.StringIO(data) total = 0 while (total < size_cd): centdir = fp.read(sizeCentralDir) if (len(centdir) != sizeCentralDir): raise BadZipfile('Truncated central directory') centdir = struct.unpack(structCentralDir, centdir) if (centdir[_CD_SIGNATURE] != stringCentralDir): raise BadZipfile('Bad magic number for central directory') if (self.debug > 2): print centdir filename = fp.read(centdir[_CD_FILENAME_LENGTH]) x = ZipInfo(filename) x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH]) x.comment = fp.read(centdir[_CD_COMMENT_LENGTH]) x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET] (x.create_version, x.create_system, x.extract_version, x.reserved, x.flag_bits, x.compress_type, t, d, x.CRC, x.compress_size, x.file_size) = centdir[1:12] (x.volume, x.internal_attr, x.external_attr) = centdir[15:18] x._raw_time = t x.date_time = (((d >> 9) + 1980), ((d >> 5) & 15), (d & 31), (t >> 11), ((t >> 5) & 63), ((t & 31) * 2)) x._decodeExtra() x.header_offset = (x.header_offset + concat) x.filename = x._decodeFilename() self.filelist.append(x) self.NameToInfo[x.filename] = x total = ((((total + sizeCentralDir) + centdir[_CD_FILENAME_LENGTH]) + centdir[_CD_EXTRA_FIELD_LENGTH]) + centdir[_CD_COMMENT_LENGTH]) if (self.debug > 2): print 'total', total
'Return a list of file names in the archive.'
def namelist(self):
l = [] for data in self.filelist: l.append(data.filename) return l
'Return a list of class ZipInfo instances for files in the archive.'
def infolist(self):
return self.filelist
'Print a table of contents for the zip file.'
def printdir(self):
print ('%-46s %19s %12s' % ('File Name', 'Modified ', 'Size')) for zinfo in self.filelist: date = ('%d-%02d-%02d %02d:%02d:%02d' % zinfo.date_time[:6]) print ('%-46s %s %12d' % (zinfo.filename, date, zinfo.file_size))
'Read all the files and check the CRC.'
def testzip(self):
chunk_size = (2 ** 20) for zinfo in self.filelist: try: with self.open(zinfo.filename, 'r') as f: while f.read(chunk_size): pass except BadZipfile: return zinfo.filename
'Return the instance of ZipInfo given \'name\'.'
def getinfo(self, name):
info = self.NameToInfo.get(name) if (info is None): raise KeyError(('There is no item named %r in the archive' % name)) return info
'Set default password for encrypted files.'
def setpassword(self, pwd):
self.pwd = pwd
'The comment text associated with the ZIP file.'
@property def comment(self):
return self._comment
'Return file bytes (as a string) for name.'
def read(self, name, pwd=None):
return self.open(name, 'r', pwd).read()
'Return file-like object for \'name\'.'
def open(self, name, mode='r', pwd=None):
if (mode not in ('r', 'U', 'rU')): raise RuntimeError, 'open() requires mode "r", "U", or "rU"' if (not self.fp): raise RuntimeError, 'Attempt to read ZIP archive that was already closed' if self._filePassed: zef_file = self.fp should_close = False else: zef_file = open(self.filename, 'rb') should_close = True try: if isinstance(name, ZipInfo): zinfo = name else: zinfo = self.getinfo(name) zef_file.seek(zinfo.header_offset, 0) fheader = zef_file.read(sizeFileHeader) if (len(fheader) != sizeFileHeader): raise BadZipfile('Truncated file header') fheader = struct.unpack(structFileHeader, fheader) if (fheader[_FH_SIGNATURE] != stringFileHeader): raise BadZipfile('Bad magic number for file header') fname = zef_file.read(fheader[_FH_FILENAME_LENGTH]) if fheader[_FH_EXTRA_FIELD_LENGTH]: zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH]) if (fname != zinfo.orig_filename): raise BadZipfile, ('File name in directory "%s" and header "%s" differ.' % (zinfo.orig_filename, fname)) is_encrypted = (zinfo.flag_bits & 1) zd = None if is_encrypted: if (not pwd): pwd = self.pwd if (not pwd): raise RuntimeError, ('File %s is encrypted, password required for extraction' % name) zd = _ZipDecrypter(pwd) bytes = zef_file.read(12) h = map(zd, bytes[0:12]) if (zinfo.flag_bits & 8): check_byte = ((zinfo._raw_time >> 8) & 255) else: check_byte = ((zinfo.CRC >> 24) & 255) if (ord(h[11]) != check_byte): raise RuntimeError('Bad password for file', name) return ZipExtFile(zef_file, mode, zinfo, zd, close_fileobj=should_close) except: if should_close: zef_file.close() raise
'Extract a member from the archive to the current working directory, using its full name. Its file information is extracted as accurately as possible. `member\' may be a filename or a ZipInfo object. You can specify a different directory using `path\'.'
def extract(self, member, path=None, pwd=None):
if (not isinstance(member, ZipInfo)): member = self.getinfo(member) if (path is None): path = os.getcwd() return self._extract_member(member, path, pwd)
'Extract all members from the archive to the current working directory. `path\' specifies a different directory to extract to. `members\' is optional and must be a subset of the list returned by namelist().'
def extractall(self, path=None, members=None, pwd=None):
if (members is None): members = self.namelist() for zipinfo in members: self.extract(zipinfo, path, pwd)
'Extract the ZipInfo object \'member\' to a physical file on the path targetpath.'
def _extract_member(self, member, targetpath, pwd):
arcname = member.filename.replace('/', os.path.sep) if os.path.altsep: arcname = arcname.replace(os.path.altsep, os.path.sep) arcname = os.path.splitdrive(arcname)[1] arcname = os.path.sep.join((x for x in arcname.split(os.path.sep) if (x not in ('', os.path.curdir, os.path.pardir)))) if (os.path.sep == '\\'): illegal = ':<>|"?*' if isinstance(arcname, unicode): table = {ord(c): ord('_') for c in illegal} else: table = string.maketrans(illegal, ('_' * len(illegal))) arcname = arcname.translate(table) arcname = (x.rstrip('.') for x in arcname.split(os.path.sep)) arcname = os.path.sep.join((x for x in arcname if x)) targetpath = os.path.join(targetpath, arcname) targetpath = os.path.normpath(targetpath) upperdirs = os.path.dirname(targetpath) if (upperdirs and (not os.path.exists(upperdirs))): os.makedirs(upperdirs) if (member.filename[(-1)] == '/'): if (not os.path.isdir(targetpath)): os.mkdir(targetpath) return targetpath with self.open(member, pwd=pwd) as source: with file(targetpath, 'wb') as target: shutil.copyfileobj(source, target) return targetpath
'Check for errors before writing a file to the archive.'
def _writecheck(self, zinfo):
if (zinfo.filename in self.NameToInfo): import warnings warnings.warn(('Duplicate name: %r' % zinfo.filename), stacklevel=3) if (self.mode not in ('w', 'a')): raise RuntimeError, 'write() requires mode "w" or "a"' if (not self.fp): raise RuntimeError, 'Attempt to write ZIP archive that was already closed' if ((zinfo.compress_type == ZIP_DEFLATED) and (not zlib)): raise RuntimeError, 'Compression requires the (missing) zlib module' if (zinfo.compress_type not in (ZIP_STORED, ZIP_DEFLATED)): raise RuntimeError, 'That compression method is not supported' if (not self._allowZip64): requires_zip64 = None if (len(self.filelist) >= ZIP_FILECOUNT_LIMIT): requires_zip64 = 'Files count' elif (zinfo.file_size > ZIP64_LIMIT): requires_zip64 = 'Filesize' elif (zinfo.header_offset > ZIP64_LIMIT): requires_zip64 = 'Zipfile size' if requires_zip64: raise LargeZipFile((requires_zip64 + ' would require ZIP64 extensions'))
'Put the bytes from filename into the archive under the name arcname.'
def write(self, filename, arcname=None, compress_type=None):
if (not self.fp): raise RuntimeError('Attempt to write to ZIP archive that was already closed') st = os.stat(filename) isdir = stat.S_ISDIR(st.st_mode) mtime = time.localtime(st.st_mtime) date_time = mtime[0:6] if (arcname is None): arcname = filename arcname = os.path.normpath(os.path.splitdrive(arcname)[1]) while (arcname[0] in (os.sep, os.altsep)): arcname = arcname[1:] if isdir: arcname += '/' zinfo = ZipInfo(arcname, date_time) zinfo.external_attr = ((st[0] & 65535) << 16L) if isdir: zinfo.compress_type = ZIP_STORED elif (compress_type is None): zinfo.compress_type = self.compression else: zinfo.compress_type = compress_type zinfo.file_size = st.st_size zinfo.flag_bits = 0 zinfo.header_offset = self.fp.tell() self._writecheck(zinfo) self._didModify = True if isdir: zinfo.file_size = 0 zinfo.compress_size = 0 zinfo.CRC = 0 zinfo.external_attr |= 16 self.filelist.append(zinfo) self.NameToInfo[zinfo.filename] = zinfo self.fp.write(zinfo.FileHeader(False)) return with open(filename, 'rb') as fp: zinfo.CRC = CRC = 0 zinfo.compress_size = compress_size = 0 zip64 = (self._allowZip64 and ((zinfo.file_size * 1.05) > ZIP64_LIMIT)) self.fp.write(zinfo.FileHeader(zip64)) if (zinfo.compress_type == ZIP_DEFLATED): cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, (-15)) else: cmpr = None file_size = 0 while 1: buf = fp.read((1024 * 8)) if (not buf): break file_size = (file_size + len(buf)) CRC = (crc32(buf, CRC) & 4294967295) if cmpr: buf = cmpr.compress(buf) compress_size = (compress_size + len(buf)) self.fp.write(buf) if cmpr: buf = cmpr.flush() compress_size = (compress_size + len(buf)) self.fp.write(buf) zinfo.compress_size = compress_size else: zinfo.compress_size = file_size zinfo.CRC = CRC zinfo.file_size = file_size if ((not zip64) and self._allowZip64): if (file_size > ZIP64_LIMIT): raise RuntimeError('File size has increased during compressing') if (compress_size > ZIP64_LIMIT): raise RuntimeError('Compressed size larger than uncompressed size') position = self.fp.tell() self.fp.seek(zinfo.header_offset, 0) self.fp.write(zinfo.FileHeader(zip64)) self.fp.seek(position, 0) self.filelist.append(zinfo) self.NameToInfo[zinfo.filename] = zinfo
'Write a file into the archive. The contents is the string \'bytes\'. \'zinfo_or_arcname\' is either a ZipInfo instance or the name of the file in the archive.'
def writestr(self, zinfo_or_arcname, bytes, compress_type=None):
if (not isinstance(zinfo_or_arcname, ZipInfo)): zinfo = ZipInfo(filename=zinfo_or_arcname, date_time=time.localtime(time.time())[:6]) zinfo.compress_type = self.compression if (zinfo.filename[(-1)] == '/'): zinfo.external_attr = (16893 << 16) zinfo.external_attr |= 16 else: zinfo.external_attr = (384 << 16) else: zinfo = zinfo_or_arcname if (not self.fp): raise RuntimeError('Attempt to write to ZIP archive that was already closed') if (compress_type is not None): zinfo.compress_type = compress_type zinfo.file_size = len(bytes) zinfo.header_offset = self.fp.tell() self._writecheck(zinfo) self._didModify = True zinfo.CRC = (crc32(bytes) & 4294967295) if (zinfo.compress_type == ZIP_DEFLATED): co = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, (-15)) bytes = (co.compress(bytes) + co.flush()) zinfo.compress_size = len(bytes) else: zinfo.compress_size = zinfo.file_size zip64 = ((zinfo.file_size > ZIP64_LIMIT) or (zinfo.compress_size > ZIP64_LIMIT)) if (zip64 and (not self._allowZip64)): raise LargeZipFile('Filesize would require ZIP64 extensions') self.fp.write(zinfo.FileHeader(zip64)) self.fp.write(bytes) if (zinfo.flag_bits & 8): fmt = ('<LQQ' if zip64 else '<LLL') self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size, zinfo.file_size)) self.fp.flush() self.filelist.append(zinfo) self.NameToInfo[zinfo.filename] = zinfo
'Call the "close()" method in case the user forgot.'
def __del__(self):
self.close()
'Close the file, and for mode "w" and "a" write the ending records.'
def close(self):
if (self.fp is None): return try: if ((self.mode in ('w', 'a')) and self._didModify): pos1 = self.fp.tell() for zinfo in self.filelist: dt = zinfo.date_time dosdate = ((((dt[0] - 1980) << 9) | (dt[1] << 5)) | dt[2]) dostime = (((dt[3] << 11) | (dt[4] << 5)) | (dt[5] // 2)) extra = [] if ((zinfo.file_size > ZIP64_LIMIT) or (zinfo.compress_size > ZIP64_LIMIT)): extra.append(zinfo.file_size) extra.append(zinfo.compress_size) file_size = 4294967295 compress_size = 4294967295 else: file_size = zinfo.file_size compress_size = zinfo.compress_size if (zinfo.header_offset > ZIP64_LIMIT): extra.append(zinfo.header_offset) header_offset = 4294967295L else: header_offset = zinfo.header_offset extra_data = zinfo.extra if extra: extra_data = (struct.pack(('<HH' + ('Q' * len(extra))), 1, (8 * len(extra)), *extra) + extra_data) extract_version = max(45, zinfo.extract_version) create_version = max(45, zinfo.create_version) else: extract_version = zinfo.extract_version create_version = zinfo.create_version try: (filename, flag_bits) = zinfo._encodeFilenameFlags() centdir = struct.pack(structCentralDir, stringCentralDir, create_version, zinfo.create_system, extract_version, zinfo.reserved, flag_bits, zinfo.compress_type, dostime, dosdate, zinfo.CRC, compress_size, file_size, len(filename), len(extra_data), len(zinfo.comment), 0, zinfo.internal_attr, zinfo.external_attr, header_offset) except DeprecationWarning: print >>sys.stderr, (structCentralDir, stringCentralDir, create_version, zinfo.create_system, extract_version, zinfo.reserved, zinfo.flag_bits, zinfo.compress_type, dostime, dosdate, zinfo.CRC, compress_size, file_size, len(zinfo.filename), len(extra_data), len(zinfo.comment), 0, zinfo.internal_attr, zinfo.external_attr, header_offset) raise self.fp.write(centdir) self.fp.write(filename) self.fp.write(extra_data) self.fp.write(zinfo.comment) pos2 = self.fp.tell() centDirCount = len(self.filelist) centDirSize = (pos2 - pos1) centDirOffset = pos1 requires_zip64 = None if (centDirCount > ZIP_FILECOUNT_LIMIT): requires_zip64 = 'Files count' elif (centDirOffset > ZIP64_LIMIT): requires_zip64 = 'Central directory offset' elif (centDirSize > ZIP64_LIMIT): requires_zip64 = 'Central directory size' if requires_zip64: if (not self._allowZip64): raise LargeZipFile((requires_zip64 + ' would require ZIP64 extensions')) zip64endrec = struct.pack(structEndArchive64, stringEndArchive64, 44, 45, 45, 0, 0, centDirCount, centDirCount, centDirSize, centDirOffset) self.fp.write(zip64endrec) zip64locrec = struct.pack(structEndArchive64Locator, stringEndArchive64Locator, 0, pos2, 1) self.fp.write(zip64locrec) centDirCount = min(centDirCount, 65535) centDirSize = min(centDirSize, 4294967295) centDirOffset = min(centDirOffset, 4294967295) endrec = struct.pack(structEndArchive, stringEndArchive, 0, 0, centDirCount, centDirCount, centDirSize, centDirOffset, len(self._comment)) self.fp.write(endrec) self.fp.write(self._comment) self.fp.flush() finally: fp = self.fp self.fp = None if (not self._filePassed): fp.close()
'Add all files from "pathname" to the ZIP archive. If pathname is a package directory, search the directory and all package subdirectories recursively for all *.py and enter the modules into the archive. If pathname is a plain directory, listdir *.py and enter all modules. Else, pathname must be a Python *.py file and the module will be put into the archive. Added modules are always module.pyo or module.pyc. This method will compile the module.py into module.pyc if necessary.'
def writepy(self, pathname, basename=''):
(dir, name) = os.path.split(pathname) if os.path.isdir(pathname): initname = os.path.join(pathname, '__init__.py') if os.path.isfile(initname): if basename: basename = ('%s/%s' % (basename, name)) else: basename = name if self.debug: print 'Adding package in', pathname, 'as', basename (fname, arcname) = self._get_codename(initname[0:(-3)], basename) if self.debug: print 'Adding', arcname self.write(fname, arcname) dirlist = os.listdir(pathname) dirlist.remove('__init__.py') for filename in dirlist: path = os.path.join(pathname, filename) (root, ext) = os.path.splitext(filename) if os.path.isdir(path): if os.path.isfile(os.path.join(path, '__init__.py')): self.writepy(path, basename) elif (ext == '.py'): (fname, arcname) = self._get_codename(path[0:(-3)], basename) if self.debug: print 'Adding', arcname self.write(fname, arcname) else: if self.debug: print 'Adding files from directory', pathname for filename in os.listdir(pathname): path = os.path.join(pathname, filename) (root, ext) = os.path.splitext(filename) if (ext == '.py'): (fname, arcname) = self._get_codename(path[0:(-3)], basename) if self.debug: print 'Adding', arcname self.write(fname, arcname) else: if (pathname[(-3):] != '.py'): raise RuntimeError, 'Files added with writepy() must end with ".py"' (fname, arcname) = self._get_codename(pathname[0:(-3)], basename) if self.debug: print 'Adding file', arcname self.write(fname, arcname)
'Return (filename, archivename) for the path. Given a module name path, return the correct file path and archive name, compiling if necessary. For example, given /python/lib/string, return (/python/lib/string.pyc, string).'
def _get_codename(self, pathname, basename):
file_py = (pathname + '.py') file_pyc = (pathname + '.pyc') file_pyo = (pathname + '.pyo') if (os.path.isfile(file_pyo) and (os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime)): fname = file_pyo elif ((not os.path.isfile(file_pyc)) or (os.stat(file_pyc).st_mtime < os.stat(file_py).st_mtime)): import py_compile if self.debug: print 'Compiling', file_py try: py_compile.compile(file_py, file_pyc, None, True) except py_compile.PyCompileError as err: print err.msg fname = file_pyc else: fname = file_pyc archivename = os.path.split(fname)[1] if basename: archivename = ('%s/%s' % (basename, archivename)) return (fname, archivename)
'Constructor from field name and value.'
def __init__(self, name, value):
self.name = name self.value = value
'Return printable representation.'
def __repr__(self):
return ('MiniFieldStorage(%r, %r)' % (self.name, self.value))
'Constructor. Read multipart/* until last part. Arguments, all optional: fp : file pointer; default: sys.stdin (not used when the request method is GET) headers : header dictionary-like object; default: taken from environ as per CGI spec outerboundary : terminating multipart boundary (for internal use only) environ : environment dictionary; default: os.environ keep_blank_values: flag indicating whether blank values in percent-encoded forms should be treated as blank strings. A true value indicates that blanks should be retained as blank strings. The default false value indicates that blank values are to be ignored and treated as if they were not included. strict_parsing: flag indicating what to do with parsing errors. If false (the default), errors are silently ignored. If true, errors raise a ValueError exception.'
def __init__(self, fp=None, headers=None, outerboundary='', environ=os.environ, keep_blank_values=0, strict_parsing=0):
method = 'GET' self.keep_blank_values = keep_blank_values self.strict_parsing = strict_parsing if ('REQUEST_METHOD' in environ): method = environ['REQUEST_METHOD'].upper() self.qs_on_post = None if ((method == 'GET') or (method == 'HEAD')): if ('QUERY_STRING' in environ): qs = environ['QUERY_STRING'] elif sys.argv[1:]: qs = sys.argv[1] else: qs = '' fp = StringIO(qs) if (headers is None): headers = {'content-type': 'application/x-www-form-urlencoded'} if (headers is None): headers = {} if (method == 'POST'): headers['content-type'] = 'application/x-www-form-urlencoded' if ('CONTENT_TYPE' in environ): headers['content-type'] = environ['CONTENT_TYPE'] if ('QUERY_STRING' in environ): self.qs_on_post = environ['QUERY_STRING'] if ('CONTENT_LENGTH' in environ): headers['content-length'] = environ['CONTENT_LENGTH'] self.fp = (fp or sys.stdin) self.headers = headers self.outerboundary = outerboundary (cdisp, pdict) = ('', {}) if ('content-disposition' in self.headers): (cdisp, pdict) = parse_header(self.headers['content-disposition']) self.disposition = cdisp self.disposition_options = pdict self.name = None if ('name' in pdict): self.name = pdict['name'] self.filename = None if ('filename' in pdict): self.filename = pdict['filename'] if ('content-type' in self.headers): (ctype, pdict) = parse_header(self.headers['content-type']) elif (self.outerboundary or (method != 'POST')): (ctype, pdict) = ('text/plain', {}) else: (ctype, pdict) = ('application/x-www-form-urlencoded', {}) self.type = ctype self.type_options = pdict self.innerboundary = '' if ('boundary' in pdict): self.innerboundary = pdict['boundary'] clen = (-1) if ('content-length' in self.headers): try: clen = int(self.headers['content-length']) except ValueError: pass if (maxlen and (clen > maxlen)): raise ValueError, 'Maximum content length exceeded' self.length = clen self.list = self.file = None self.done = 0 if (ctype == 'application/x-www-form-urlencoded'): self.read_urlencoded() elif (ctype[:10] == 'multipart/'): self.read_multi(environ, keep_blank_values, strict_parsing) else: self.read_single()
'Return a printable representation.'
def __repr__(self):
return ('FieldStorage(%r, %r, %r)' % (self.name, self.filename, self.value))
'Dictionary style indexing.'
def __getitem__(self, key):
if (self.list is None): raise TypeError, 'not indexable' found = [] for item in self.list: if (item.name == key): found.append(item) if (not found): raise KeyError, key if (len(found) == 1): return found[0] else: return found
'Dictionary style get() method, including \'value\' lookup.'
def getvalue(self, key, default=None):
if (key in self): value = self[key] if (type(value) is type([])): return map(attrgetter('value'), value) else: return value.value else: return default
'Return the first value received.'
def getfirst(self, key, default=None):
if (key in self): value = self[key] if (type(value) is type([])): return value[0].value else: return value.value else: return default
'Return list of received values.'
def getlist(self, key):
if (key in self): value = self[key] if (type(value) is type([])): return map(attrgetter('value'), value) else: return [value.value] else: return []
'Dictionary style keys() method.'
def keys(self):
if (self.list is None): raise TypeError, 'not indexable' return list(set((item.name for item in self.list)))
'Dictionary style has_key() method.'
def has_key(self, key):
if (self.list is None): raise TypeError, 'not indexable' return any(((item.name == key) for item in self.list))
'Dictionary style __contains__ method.'
def __contains__(self, key):
if (self.list is None): raise TypeError, 'not indexable' return any(((item.name == key) for item in self.list))
'Dictionary style len(x) support.'
def __len__(self):
return len(self.keys())
'Internal: read data in query string format.'
def read_urlencoded(self):
qs = self.fp.read(self.length) if self.qs_on_post: qs += ('&' + self.qs_on_post) self.list = list = [] for (key, value) in urlparse.parse_qsl(qs, self.keep_blank_values, self.strict_parsing): list.append(MiniFieldStorage(key, value)) self.skip_lines()
'Internal: read a part that is itself multipart.'
def read_multi(self, environ, keep_blank_values, strict_parsing):
ib = self.innerboundary if (not valid_boundary(ib)): raise ValueError, ('Invalid boundary in multipart form: %r' % (ib,)) self.list = [] if self.qs_on_post: for (key, value) in urlparse.parse_qsl(self.qs_on_post, self.keep_blank_values, self.strict_parsing): self.list.append(MiniFieldStorage(key, value)) FieldStorageClass = None klass = (self.FieldStorageClass or self.__class__) part = klass(self.fp, {}, ib, environ, keep_blank_values, strict_parsing) while (not part.done): headers = rfc822.Message(self.fp) part = klass(self.fp, headers, ib, environ, keep_blank_values, strict_parsing) self.list.append(part) self.skip_lines()
'Internal: read an atomic part.'
def read_single(self):
if (self.length >= 0): self.read_binary() self.skip_lines() else: self.read_lines() self.file.seek(0)
'Internal: read binary data.'
def read_binary(self):
self.file = self.make_file('b') todo = self.length if (todo >= 0): while (todo > 0): data = self.fp.read(min(todo, self.bufsize)) if (not data): self.done = (-1) break self.file.write(data) todo = (todo - len(data))
'Internal: read lines until EOF or outerboundary.'
def read_lines(self):
self.file = self.__file = StringIO() if self.outerboundary: self.read_lines_to_outerboundary() else: self.read_lines_to_eof()
'Internal: read lines until EOF.'
def read_lines_to_eof(self):
while 1: line = self.fp.readline((1 << 16)) if (not line): self.done = (-1) break self.__write(line)
'Internal: read lines until outerboundary.'
def read_lines_to_outerboundary(self):
next = ('--' + self.outerboundary) last = (next + '--') delim = '' last_line_lfend = True while 1: line = self.fp.readline((1 << 16)) if (not line): self.done = (-1) break if (delim == '\r'): line = (delim + line) delim = '' if ((line[:2] == '--') and last_line_lfend): strippedline = line.strip() if (strippedline == next): break if (strippedline == last): self.done = 1 break odelim = delim if (line[(-2):] == '\r\n'): delim = '\r\n' line = line[:(-2)] last_line_lfend = True elif (line[(-1)] == '\n'): delim = '\n' line = line[:(-1)] last_line_lfend = True elif (line[(-1)] == '\r'): delim = '\r' line = line[:(-1)] last_line_lfend = False else: delim = '' last_line_lfend = False self.__write((odelim + line))
'Internal: skip lines until outer boundary if defined.'
def skip_lines(self):
if ((not self.outerboundary) or self.done): return next = ('--' + self.outerboundary) last = (next + '--') last_line_lfend = True while 1: line = self.fp.readline((1 << 16)) if (not line): self.done = (-1) break if ((line[:2] == '--') and last_line_lfend): strippedline = line.strip() if (strippedline == next): break if (strippedline == last): self.done = 1 break last_line_lfend = line.endswith('\n')
'Overridable: return a readable & writable file. The file will be used as follows: - data is written to it - seek(0) - data is read from it The \'binary\' argument is unused -- the file is always opened in binary mode. This version opens a temporary file for reading and writing, and immediately deletes (unlinks) it. The trick (on Unix!) is that the file can still be used, but it can\'t be opened by another process, and it will automatically be deleted when it is closed or when the current process terminates. If you want a more permanent file, you derive a class which overrides this method. If you want a visible temporary file that is nevertheless automatically deleted when the script terminates, try defining a __del__ method in a derived class which unlinks the temporary files you have created.'
def make_file(self, binary=None):
import tempfile return tempfile.TemporaryFile('w+b')
'Set all attributes. Order of methods called matters for dependency reasons. The locale language is set at the offset and then checked again before exiting. This is to make sure that the attributes were not set with a mix of information from more than one locale. This would most likely happen when using threads where one thread calls a locale-dependent function while another thread changes the locale while the function in the other thread is still running. Proper coding would call for locks to prevent changing the locale while locale-dependent code is running. The check here is done in case someone does not think about doing this. Only other possible issue is if someone changed the timezone and did not call tz.tzset . That is an issue for the programmer, though, since changing the timezone is worthless without that call.'
def __init__(self):
self.lang = _getlang() self.__calc_weekday() self.__calc_month() self.__calc_am_pm() self.__calc_timezone() self.__calc_date_time() if (_getlang() != self.lang): raise ValueError('locale changed during initialization') if ((time.tzname != self.tzname) or (time.daylight != self.daylight)): raise ValueError('timezone changed during initialization')
'Create keys/values. Order of execution is important for dependency reasons.'
def __init__(self, locale_time=None):
if locale_time: self.locale_time = locale_time else: self.locale_time = LocaleTime() base = super(TimeRE, self) base.__init__({'d': '(?P<d>3[0-1]|[1-2]\\d|0[1-9]|[1-9]| [1-9])', 'f': '(?P<f>[0-9]{1,6})', 'H': '(?P<H>2[0-3]|[0-1]\\d|\\d)', 'I': '(?P<I>1[0-2]|0[1-9]|[1-9])', 'j': '(?P<j>36[0-6]|3[0-5]\\d|[1-2]\\d\\d|0[1-9]\\d|00[1-9]|[1-9]\\d|0[1-9]|[1-9])', 'm': '(?P<m>1[0-2]|0[1-9]|[1-9])', 'M': '(?P<M>[0-5]\\d|\\d)', 'S': '(?P<S>6[0-1]|[0-5]\\d|\\d)', 'U': '(?P<U>5[0-3]|[0-4]\\d|\\d)', 'w': '(?P<w>[0-6])', 'y': '(?P<y>\\d\\d)', 'Y': '(?P<Y>\\d\\d\\d\\d)', 'A': self.__seqToRE(self.locale_time.f_weekday, 'A'), 'a': self.__seqToRE(self.locale_time.a_weekday, 'a'), 'B': self.__seqToRE(self.locale_time.f_month[1:], 'B'), 'b': self.__seqToRE(self.locale_time.a_month[1:], 'b'), 'p': self.__seqToRE(self.locale_time.am_pm, 'p'), 'Z': self.__seqToRE((tz for tz_names in self.locale_time.timezone for tz in tz_names), 'Z'), '%': '%'}) base.__setitem__('W', base.__getitem__('U').replace('U', 'W')) base.__setitem__('c', self.pattern(self.locale_time.LC_date_time)) base.__setitem__('x', self.pattern(self.locale_time.LC_date)) base.__setitem__('X', self.pattern(self.locale_time.LC_time))
'Convert a list to a regex string for matching a directive. Want possible matching values to be from longest to shortest. This prevents the possibility of a match occurring for a value that also a substring of a larger value that should have matched (e.g., \'abc\' matching when \'abcdef\' should have been the match).'
def __seqToRE(self, to_convert, directive):
to_convert = sorted(to_convert, key=len, reverse=True) for value in to_convert: if (value != ''): break else: return '' regex = '|'.join((re_escape(stuff) for stuff in to_convert)) regex = ('(?P<%s>%s' % (directive, regex)) return ('%s)' % regex)
'Return regex pattern for the format string. Need to make sure that any characters that might be interpreted as regex syntax are escaped.'
def pattern(self, format):
processed_format = '' regex_chars = re_compile('([\\\\.^$*+?\\(\\){}\\[\\]|])') format = regex_chars.sub('\\\\\\1', format) whitespace_replacement = re_compile('\\s+') format = whitespace_replacement.sub('\\s+', format) while ('%' in format): directive_index = (format.index('%') + 1) processed_format = ('%s%s%s' % (processed_format, format[:(directive_index - 1)], self[format[directive_index]])) format = format[(directive_index + 1):] return ('%s%s' % (processed_format, format))
'Return a compiled re object for the format string.'
def compile(self, format):
return re_compile(self.pattern(format), IGNORECASE)