desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Encode fields and files for posting to an HTTP server.
:param fields: The fields to send as a list of (fieldname, value)
tuples.
:param files: The files to send as a list of (fieldname, filename,
file_bytes) tuple.'
| def encode_request(self, fields, files):
| parts = []
boundary = self.boundary
for (k, values) in fields:
if (not isinstance(values, (list, tuple))):
values = [values]
for v in values:
parts.extend((('--' + boundary), ('Content-Disposition: form-data; name="%s"' % k).encode('utf-8'), '', v.encode('utf-8')))
for (key, filename, value) in files:
parts.extend((('--' + boundary), ('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)).encode('utf-8'), '', value))
parts.extend(((('--' + boundary) + '--'), ''))
body = '\r\n'.join(parts)
ct = ('multipart/form-data; boundary=' + boundary)
headers = {'Content-type': ct, 'Content-length': str(len(body))}
return Request(self.url, body, headers)
|
'Construct a _Stream object.'
| def __init__(self, name, mode, comptype, fileobj, bufsize):
| self._extfileobj = True
if (fileobj is None):
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if (comptype == '*'):
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = (name or '')
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = ''
self.pos = 0
self.closed = False
try:
if (comptype == 'gz'):
try:
import zlib
except ImportError:
raise CompressionError('zlib module is not available')
self.zlib = zlib
self.crc = zlib.crc32('')
if (mode == 'r'):
self._init_read_gz()
else:
self._init_write_gz()
if (comptype == 'bz2'):
try:
import bz2
except ImportError:
raise CompressionError('bz2 module is not available')
if (mode == 'r'):
self.dbuf = ''
self.cmp = bz2.BZ2Decompressor()
else:
self.cmp = bz2.BZ2Compressor()
except:
if (not self._extfileobj):
self.fileobj.close()
self.closed = True
raise
|
'Initialize for writing with gzip compression.'
| def _init_write_gz(self):
| self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED, (- self.zlib.MAX_WBITS), self.zlib.DEF_MEM_LEVEL, 0)
timestamp = struct.pack('<L', int(time.time()))
self.__write((('\x1f\x8b\x08\x08' + timestamp) + '\x02\xff'))
if self.name.endswith('.gz'):
self.name = self.name[:(-3)]
self.__write((self.name.encode('iso-8859-1', 'replace') + NUL))
|
'Write string s to the stream.'
| def write(self, s):
| if (self.comptype == 'gz'):
self.crc = self.zlib.crc32(s, self.crc)
self.pos += len(s)
if (self.comptype != 'tar'):
s = self.cmp.compress(s)
self.__write(s)
|
'Write string s to the stream if a whole new block
is ready to be written.'
| def __write(self, s):
| self.buf += s
while (len(self.buf) > self.bufsize):
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
|
'Close the _Stream object. No operation should be
done on it afterwards.'
| def close(self):
| if self.closed:
return
if ((self.mode == 'w') and (self.comptype != 'tar')):
self.buf += self.cmp.flush()
if ((self.mode == 'w') and self.buf):
self.fileobj.write(self.buf)
self.buf = ''
if (self.comptype == 'gz'):
self.fileobj.write(struct.pack('<L', (self.crc & 4294967295)))
self.fileobj.write(struct.pack('<L', (self.pos & 4294967295)))
if (not self._extfileobj):
self.fileobj.close()
self.closed = True
|
'Initialize for reading a gzip compressed fileobj.'
| def _init_read_gz(self):
| self.cmp = self.zlib.decompressobj((- self.zlib.MAX_WBITS))
self.dbuf = ''
if (self.__read(2) != '\x1f\x8b'):
raise ReadError('not a gzip file')
if (self.__read(1) != '\x08'):
raise CompressionError('unsupported compression method')
flag = ord(self.__read(1))
self.__read(6)
if (flag & 4):
xlen = (ord(self.__read(1)) + (256 * ord(self.__read(1))))
self.read(xlen)
if (flag & 8):
while True:
s = self.__read(1)
if ((not s) or (s == NUL)):
break
if (flag & 16):
while True:
s = self.__read(1)
if ((not s) or (s == NUL)):
break
if (flag & 2):
self.__read(2)
|
'Return the stream\'s file pointer position.'
| def tell(self):
| return self.pos
|
'Set the stream\'s file pointer to pos. Negative seeking
is forbidden.'
| def seek(self, pos=0):
| if ((pos - self.pos) >= 0):
(blocks, remainder) = divmod((pos - self.pos), self.bufsize)
for i in range(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError('seeking backwards is not allowed')
return self.pos
|
'Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.'
| def read(self, size=None):
| if (size is None):
t = []
while True:
buf = self._read(self.bufsize)
if (not buf):
break
t.append(buf)
buf = ''.join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
|
'Return size bytes from the stream.'
| def _read(self, size):
| if (self.comptype == 'tar'):
return self.__read(size)
c = len(self.dbuf)
while (c < size):
buf = self.__read(self.bufsize)
if (not buf):
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError('invalid compressed data')
self.dbuf += buf
c += len(buf)
buf = self.dbuf[:size]
self.dbuf = self.dbuf[size:]
return buf
|
'Return size bytes from stream. If internal buffer is empty,
read another block from the stream.'
| def __read(self, size):
| c = len(self.buf)
while (c < size):
buf = self.fileobj.read(self.bufsize)
if (not buf):
break
self.buf += buf
c += len(buf)
buf = self.buf[:size]
self.buf = self.buf[size:]
return buf
|
'Return the current file position.'
| def tell(self):
| return self.position
|
'Seek to a position in the file.'
| def seek(self, position):
| self.position = position
|
'Read data from the file.'
| def read(self, size=None):
| if (size is None):
size = (self.size - self.position)
else:
size = min(size, (self.size - self.position))
buf = ''
while (size > 0):
while True:
(data, start, stop, offset) = self.map[self.map_index]
if (start <= self.position < stop):
break
else:
self.map_index += 1
if (self.map_index == len(self.map)):
self.map_index = 0
length = min(size, (stop - self.position))
if data:
self.fileobj.seek((offset + (self.position - start)))
buf += self.fileobj.read(length)
else:
buf += (NUL * length)
size -= length
self.position += length
return buf
|
'Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.'
| def read(self, size=None):
| if self.closed:
raise ValueError('I/O operation on closed file')
buf = ''
if self.buffer:
if (size is None):
buf = self.buffer
self.buffer = ''
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if (size is None):
buf += self.fileobj.read()
else:
buf += self.fileobj.read((size - len(buf)))
self.position += len(buf)
return buf
|
'Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.'
| def readline(self, size=(-1)):
| if self.closed:
raise ValueError('I/O operation on closed file')
pos = (self.buffer.find('\n') + 1)
if (pos == 0):
while True:
buf = self.fileobj.read(self.blocksize)
self.buffer += buf
if ((not buf) or ('\n' in buf)):
pos = (self.buffer.find('\n') + 1)
if (pos == 0):
pos = len(self.buffer)
break
if (size != (-1)):
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
|
'Return a list with all remaining lines.'
| def readlines(self):
| result = []
while True:
line = self.readline()
if (not line):
break
result.append(line)
return result
|
'Return the current file position.'
| def tell(self):
| if self.closed:
raise ValueError('I/O operation on closed file')
return self.position
|
'Seek to a position in the file.'
| def seek(self, pos, whence=os.SEEK_SET):
| if self.closed:
raise ValueError('I/O operation on closed file')
if (whence == os.SEEK_SET):
self.position = min(max(pos, 0), self.size)
elif (whence == os.SEEK_CUR):
if (pos < 0):
self.position = max((self.position + pos), 0)
else:
self.position = min((self.position + pos), self.size)
elif (whence == os.SEEK_END):
self.position = max(min((self.size + pos), self.size), 0)
else:
raise ValueError('Invalid argument')
self.buffer = ''
self.fileobj.seek(self.position)
|
'Close the file object.'
| def close(self):
| self.closed = True
|
'Get an iterator over the file\'s lines.'
| def __iter__(self):
| while True:
line = self.readline()
if (not line):
break
(yield line)
|
'Construct a TarInfo object. name is the optional name
of the member.'
| def __init__(self, name=''):
| self.name = name
self.mode = 420
self.uid = 0
self.gid = 0
self.size = 0
self.mtime = 0
self.chksum = 0
self.type = REGTYPE
self.linkname = ''
self.uname = ''
self.gname = ''
self.devmajor = 0
self.devminor = 0
self.offset = 0
self.offset_data = 0
self.sparse = None
self.pax_headers = {}
|
'Return the TarInfo\'s attributes as a dictionary.'
| def get_info(self):
| info = {'name': self.name, 'mode': (self.mode & 4095), 'uid': self.uid, 'gid': self.gid, 'size': self.size, 'mtime': self.mtime, 'chksum': self.chksum, 'type': self.type, 'linkname': self.linkname, 'uname': self.uname, 'gname': self.gname, 'devmajor': self.devmajor, 'devminor': self.devminor}
if ((info['type'] == DIRTYPE) and (not info['name'].endswith('/'))):
info['name'] += '/'
return info
|
'Return a tar header as a string of 512 byte blocks.'
| def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors='surrogateescape'):
| info = self.get_info()
if (format == USTAR_FORMAT):
return self.create_ustar_header(info, encoding, errors)
elif (format == GNU_FORMAT):
return self.create_gnu_header(info, encoding, errors)
elif (format == PAX_FORMAT):
return self.create_pax_header(info, encoding)
else:
raise ValueError('invalid format')
|
'Return the object as a ustar header block.'
| def create_ustar_header(self, info, encoding, errors):
| info['magic'] = POSIX_MAGIC
if (len(info['linkname']) > LENGTH_LINK):
raise ValueError('linkname is too long')
if (len(info['name']) > LENGTH_NAME):
(info['prefix'], info['name']) = self._posix_split_name(info['name'])
return self._create_header(info, USTAR_FORMAT, encoding, errors)
|
'Return the object as a GNU header block sequence.'
| def create_gnu_header(self, info, encoding, errors):
| info['magic'] = GNU_MAGIC
buf = ''
if (len(info['linkname']) > LENGTH_LINK):
buf += self._create_gnu_long_header(info['linkname'], GNUTYPE_LONGLINK, encoding, errors)
if (len(info['name']) > LENGTH_NAME):
buf += self._create_gnu_long_header(info['name'], GNUTYPE_LONGNAME, encoding, errors)
return (buf + self._create_header(info, GNU_FORMAT, encoding, errors))
|
'Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.'
| def create_pax_header(self, info, encoding):
| info['magic'] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
for (name, hname, length) in (('name', 'path', LENGTH_NAME), ('linkname', 'linkpath', LENGTH_LINK), ('uname', 'uname', 32), ('gname', 'gname', 32)):
if (hname in pax_headers):
continue
try:
info[name].encode('ascii', 'strict')
except UnicodeEncodeError:
pax_headers[hname] = info[name]
continue
if (len(info[name]) > length):
pax_headers[hname] = info[name]
for (name, digits) in (('uid', 8), ('gid', 8), ('size', 12), ('mtime', 12)):
if (name in pax_headers):
info[name] = 0
continue
val = info[name]
if ((not (0 <= val < (8 ** (digits - 1)))) or isinstance(val, float)):
pax_headers[name] = str(val)
info[name] = 0
if pax_headers:
buf = self._create_pax_generic_header(pax_headers, XHDTYPE, encoding)
else:
buf = ''
return (buf + self._create_header(info, USTAR_FORMAT, 'ascii', 'replace'))
|
'Return the object as a pax global header block sequence.'
| @classmethod
def create_pax_global_header(cls, pax_headers):
| return cls._create_pax_generic_header(pax_headers, XGLTYPE, 'utf8')
|
'Split a name longer than 100 chars into a prefix
and a name part.'
| def _posix_split_name(self, name):
| prefix = name[:(LENGTH_PREFIX + 1)]
while (prefix and (prefix[(-1)] != '/')):
prefix = prefix[:(-1)]
name = name[len(prefix):]
prefix = prefix[:(-1)]
if ((not prefix) or (len(name) > LENGTH_NAME)):
raise ValueError('name is too long')
return (prefix, name)
|
'Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.'
| @staticmethod
def _create_header(info, format, encoding, errors):
| parts = [stn(info.get('name', ''), 100, encoding, errors), itn((info.get('mode', 0) & 4095), 8, format), itn(info.get('uid', 0), 8, format), itn(info.get('gid', 0), 8, format), itn(info.get('size', 0), 12, format), itn(info.get('mtime', 0), 12, format), ' ', info.get('type', REGTYPE), stn(info.get('linkname', ''), 100, encoding, errors), info.get('magic', POSIX_MAGIC), stn(info.get('uname', ''), 32, encoding, errors), stn(info.get('gname', ''), 32, encoding, errors), itn(info.get('devmajor', 0), 8, format), itn(info.get('devminor', 0), 8, format), stn(info.get('prefix', ''), 155, encoding, errors)]
buf = struct.pack(('%ds' % BLOCKSIZE), ''.join(parts))
chksum = calc_chksums(buf[(- BLOCKSIZE):])[0]
buf = ((buf[:(-364)] + ('%06o\x00' % chksum).encode('ascii')) + buf[(-357):])
return buf
|
'Return the string payload filled with zero bytes
up to the next 512 byte border.'
| @staticmethod
def _create_payload(payload):
| (blocks, remainder) = divmod(len(payload), BLOCKSIZE)
if (remainder > 0):
payload += ((BLOCKSIZE - remainder) * NUL)
return payload
|
'Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.'
| @classmethod
def _create_gnu_long_header(cls, name, type, encoding, errors):
| name = (name.encode(encoding, errors) + NUL)
info = {}
info['name'] = '././@LongLink'
info['type'] = type
info['size'] = len(name)
info['magic'] = GNU_MAGIC
return (cls._create_header(info, USTAR_FORMAT, encoding, errors) + cls._create_payload(name))
|
'Return a POSIX.1-2008 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be strings.'
| @classmethod
def _create_pax_generic_header(cls, pax_headers, type, encoding):
| binary = False
for (keyword, value) in pax_headers.items():
try:
value.encode('utf8', 'strict')
except UnicodeEncodeError:
binary = True
break
records = ''
if binary:
records += '21 hdrcharset=BINARY\n'
for (keyword, value) in pax_headers.items():
keyword = keyword.encode('utf8')
if binary:
value = value.encode(encoding, 'surrogateescape')
else:
value = value.encode('utf8')
l = ((len(keyword) + len(value)) + 3)
n = p = 0
while True:
n = (l + len(str(p)))
if (n == p):
break
p = n
records += (((((bytes(str(p), 'ascii') + ' ') + keyword) + '=') + value) + '\n')
info = {}
info['name'] = '././@PaxHeader'
info['type'] = type
info['size'] = len(records)
info['magic'] = POSIX_MAGIC
return (cls._create_header(info, USTAR_FORMAT, 'ascii', 'replace') + cls._create_payload(records))
|
'Construct a TarInfo object from a 512 byte bytes object.'
| @classmethod
def frombuf(cls, buf, encoding, errors):
| if (len(buf) == 0):
raise EmptyHeaderError('empty header')
if (len(buf) != BLOCKSIZE):
raise TruncatedHeaderError('truncated header')
if (buf.count(NUL) == BLOCKSIZE):
raise EOFHeaderError('end of file header')
chksum = nti(buf[148:156])
if (chksum not in calc_chksums(buf)):
raise InvalidHeaderError('bad checksum')
obj = cls()
obj.name = nts(buf[0:100], encoding, errors)
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257], encoding, errors)
obj.uname = nts(buf[265:297], encoding, errors)
obj.gname = nts(buf[297:329], encoding, errors)
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500], encoding, errors)
if ((obj.type == AREGTYPE) and obj.name.endswith('/')):
obj.type = DIRTYPE
if (obj.type == GNUTYPE_SPARSE):
pos = 386
structs = []
for i in range(4):
try:
offset = nti(buf[pos:(pos + 12)])
numbytes = nti(buf[(pos + 12):(pos + 24)])
except ValueError:
break
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[482])
origsize = nti(buf[483:495])
obj._sparse_structs = (structs, isextended, origsize)
if obj.isdir():
obj.name = obj.name.rstrip('/')
if (prefix and (obj.type not in GNU_TYPES)):
obj.name = ((prefix + '/') + obj.name)
return obj
|
'Return the next TarInfo object from TarFile object
tarfile.'
| @classmethod
def fromtarfile(cls, tarfile):
| buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf, tarfile.encoding, tarfile.errors)
obj.offset = (tarfile.fileobj.tell() - BLOCKSIZE)
return obj._proc_member(tarfile)
|
'Choose the right processing method depending on
the type and call it.'
| def _proc_member(self, tarfile):
| if (self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK)):
return self._proc_gnulong(tarfile)
elif (self.type == GNUTYPE_SPARSE):
return self._proc_sparse(tarfile)
elif (self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE)):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
|
'Process a builtin type or an unknown type which
will be treated as a regular file.'
| def _proc_builtin(self, tarfile):
| self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if (self.isreg() or (self.type not in SUPPORTED_TYPES)):
offset += self._block(self.size)
tarfile.offset = offset
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
|
'Process the blocks that hold a GNU longname
or longlink member.'
| def _proc_gnulong(self, tarfile):
| buf = tarfile.fileobj.read(self._block(self.size))
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError('missing or bad subsequent header')
next.offset = self.offset
if (self.type == GNUTYPE_LONGNAME):
next.name = nts(buf, tarfile.encoding, tarfile.errors)
elif (self.type == GNUTYPE_LONGLINK):
next.linkname = nts(buf, tarfile.encoding, tarfile.errors)
return next
|
'Process a GNU sparse header plus extra headers.'
| def _proc_sparse(self, tarfile):
| (structs, isextended, origsize) = self._sparse_structs
del self._sparse_structs
while isextended:
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in range(21):
try:
offset = nti(buf[pos:(pos + 12)])
numbytes = nti(buf[(pos + 12):(pos + 24)])
except ValueError:
break
if (offset and numbytes):
structs.append((offset, numbytes))
pos += 24
isextended = bool(buf[504])
self.sparse = structs
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = (self.offset_data + self._block(self.size))
self.size = origsize
return self
|
'Process an extended or global header as described in
POSIX.1-2008.'
| def _proc_pax(self, tarfile):
| buf = tarfile.fileobj.read(self._block(self.size))
if (self.type == XGLTYPE):
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
match = re.search('\\d+ hdrcharset=([^\\n]+)\\n', buf)
if (match is not None):
pax_headers['hdrcharset'] = match.group(1).decode('utf8')
hdrcharset = pax_headers.get('hdrcharset')
if (hdrcharset == 'BINARY'):
encoding = tarfile.encoding
else:
encoding = 'utf8'
regex = re.compile('(\\d+) ([^=]+)=')
pos = 0
while True:
match = regex.match(buf, pos)
if (not match):
break
(length, keyword) = match.groups()
length = int(length)
value = buf[(match.end(2) + 1):((match.start(1) + length) - 1)]
keyword = self._decode_pax_field(keyword, 'utf8', 'utf8', tarfile.errors)
if (keyword in PAX_NAME_FIELDS):
value = self._decode_pax_field(value, encoding, tarfile.encoding, tarfile.errors)
else:
value = self._decode_pax_field(value, 'utf8', 'utf8', tarfile.errors)
pax_headers[keyword] = value
pos += length
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError('missing or bad subsequent header')
if ('GNU.sparse.map' in pax_headers):
self._proc_gnusparse_01(next, pax_headers)
elif ('GNU.sparse.size' in pax_headers):
self._proc_gnusparse_00(next, pax_headers, buf)
elif ((pax_headers.get('GNU.sparse.major') == '1') and (pax_headers.get('GNU.sparse.minor') == '0')):
self._proc_gnusparse_10(next, pax_headers, tarfile)
if (self.type in (XHDTYPE, SOLARIS_XHDTYPE)):
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if ('size' in pax_headers):
offset = next.offset_data
if (next.isreg() or (next.type not in SUPPORTED_TYPES)):
offset += next._block(next.size)
tarfile.offset = offset
return next
|
'Process a GNU tar extended sparse header, version 0.0.'
| def _proc_gnusparse_00(self, next, pax_headers, buf):
| offsets = []
for match in re.finditer('\\d+ GNU.sparse.offset=(\\d+)\\n', buf):
offsets.append(int(match.group(1)))
numbytes = []
for match in re.finditer('\\d+ GNU.sparse.numbytes=(\\d+)\\n', buf):
numbytes.append(int(match.group(1)))
next.sparse = list(zip(offsets, numbytes))
|
'Process a GNU tar extended sparse header, version 0.1.'
| def _proc_gnusparse_01(self, next, pax_headers):
| sparse = [int(x) for x in pax_headers['GNU.sparse.map'].split(',')]
next.sparse = list(zip(sparse[::2], sparse[1::2]))
|
'Process a GNU tar extended sparse header, version 1.0.'
| def _proc_gnusparse_10(self, next, pax_headers, tarfile):
| fields = None
sparse = []
buf = tarfile.fileobj.read(BLOCKSIZE)
(fields, buf) = buf.split('\n', 1)
fields = int(fields)
while (len(sparse) < (fields * 2)):
if ('\n' not in buf):
buf += tarfile.fileobj.read(BLOCKSIZE)
(number, buf) = buf.split('\n', 1)
sparse.append(int(number))
next.offset_data = tarfile.fileobj.tell()
next.sparse = list(zip(sparse[::2], sparse[1::2]))
|
'Replace fields with supplemental information from a previous
pax extended or global header.'
| def _apply_pax_info(self, pax_headers, encoding, errors):
| for (keyword, value) in pax_headers.items():
if (keyword == 'GNU.sparse.name'):
setattr(self, 'path', value)
elif (keyword == 'GNU.sparse.size'):
setattr(self, 'size', int(value))
elif (keyword == 'GNU.sparse.realsize'):
setattr(self, 'size', int(value))
elif (keyword in PAX_FIELDS):
if (keyword in PAX_NUMBER_FIELDS):
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
if (keyword == 'path'):
value = value.rstrip('/')
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
|
'Decode a single field from a pax record.'
| def _decode_pax_field(self, value, encoding, fallback_encoding, fallback_errors):
| try:
return value.decode(encoding, 'strict')
except UnicodeDecodeError:
return value.decode(fallback_encoding, fallback_errors)
|
'Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.'
| def _block(self, count):
| (blocks, remainder) = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return (blocks * BLOCKSIZE)
|
'Open an (uncompressed) tar archive `name\'. `mode\' is either \'r\' to
read from an existing archive, \'a\' to append data to an existing
file or \'w\' to create a new file overwriting an existing one. `mode\'
defaults to \'r\'.
If `fileobj\' is given, it is used for reading or writing data. If it
can be determined, `mode\' is overridden by `fileobj\'s mode.
`fileobj\' is not closed, when TarFile is closed.'
| def __init__(self, name=None, mode='r', fileobj=None, format=None, tarinfo=None, dereference=None, ignore_zeros=None, encoding=None, errors='surrogateescape', pax_headers=None, debug=None, errorlevel=None):
| if ((len(mode) > 1) or (mode not in 'raw')):
raise ValueError("mode must be 'r', 'a' or 'w'")
self.mode = mode
self._mode = {'r': 'rb', 'a': 'r+b', 'w': 'wb'}[mode]
if (not fileobj):
if ((self.mode == 'a') and (not os.path.exists(name))):
self.mode = 'w'
self._mode = 'wb'
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if ((name is None) and hasattr(fileobj, 'name')):
name = fileobj.name
if hasattr(fileobj, 'mode'):
self._mode = fileobj.mode
self._extfileobj = True
self.name = (os.path.abspath(name) if name else None)
self.fileobj = fileobj
if (format is not None):
self.format = format
if (tarinfo is not None):
self.tarinfo = tarinfo
if (dereference is not None):
self.dereference = dereference
if (ignore_zeros is not None):
self.ignore_zeros = ignore_zeros
if (encoding is not None):
self.encoding = encoding
self.errors = errors
if ((pax_headers is not None) and (self.format == PAX_FORMAT)):
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if (debug is not None):
self.debug = debug
if (errorlevel is not None):
self.errorlevel = errorlevel
self.closed = False
self.members = []
self._loaded = False
self.offset = self.fileobj.tell()
self.inodes = {}
try:
if (self.mode == 'r'):
self.firstmember = None
self.firstmember = self.next()
if (self.mode == 'a'):
while True:
self.fileobj.seek(self.offset)
try:
tarinfo = self.tarinfo.fromtarfile(self)
self.members.append(tarinfo)
except EOFHeaderError:
self.fileobj.seek(self.offset)
break
except HeaderError as e:
raise ReadError(str(e))
if (self.mode in 'aw'):
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
except:
if (not self._extfileobj):
self.fileobj.close()
self.closed = True
raise
|
'Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
\'r\' or \'r:*\' open for reading with transparent compression
\'r:\' open for reading exclusively uncompressed
\'r:gz\' open for reading with gzip compression
\'r:bz2\' open for reading with bzip2 compression
\'a\' or \'a:\' open for appending, creating the file if necessary
\'w\' or \'w:\' open for writing without compression
\'w:gz\' open for writing with gzip compression
\'w:bz2\' open for writing with bzip2 compression
\'r|*\' open a stream of tar blocks with transparent compression
\'r|\' open an uncompressed stream of tar blocks for reading
\'r|gz\' open a gzip compressed stream of tar blocks
\'r|bz2\' open a bzip2 compressed stream of tar blocks
\'w|\' open an uncompressed stream for writing
\'w|gz\' open a gzip compressed stream for writing
\'w|bz2\' open a bzip2 compressed stream for writing'
| @classmethod
def open(cls, name=None, mode='r', fileobj=None, bufsize=RECORDSIZE, **kwargs):
| if ((not name) and (not fileobj)):
raise ValueError('nothing to open')
if (mode in ('r', 'r:*')):
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if (fileobj is not None):
saved_pos = fileobj.tell()
try:
return func(name, 'r', fileobj, **kwargs)
except (ReadError, CompressionError) as e:
if (fileobj is not None):
fileobj.seek(saved_pos)
continue
raise ReadError('file could not be opened successfully')
elif (':' in mode):
(filemode, comptype) = mode.split(':', 1)
filemode = (filemode or 'r')
comptype = (comptype or 'tar')
if (comptype in cls.OPEN_METH):
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError(('unknown compression type %r' % comptype))
return func(name, filemode, fileobj, **kwargs)
elif ('|' in mode):
(filemode, comptype) = mode.split('|', 1)
filemode = (filemode or 'r')
comptype = (comptype or 'tar')
if (filemode not in 'rw'):
raise ValueError("mode must be 'r' or 'w'")
stream = _Stream(name, filemode, comptype, fileobj, bufsize)
try:
t = cls(name, filemode, stream, **kwargs)
except:
stream.close()
raise
t._extfileobj = False
return t
elif (mode in 'aw'):
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError('undiscernible mode')
|
'Open uncompressed tar archive name for reading or writing.'
| @classmethod
def taropen(cls, name, mode='r', fileobj=None, **kwargs):
| if ((len(mode) > 1) or (mode not in 'raw')):
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
|
'Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.'
| @classmethod
def gzopen(cls, name, mode='r', fileobj=None, compresslevel=9, **kwargs):
| if ((len(mode) > 1) or (mode not in 'rw')):
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError('gzip module is not available')
extfileobj = (fileobj is not None)
try:
fileobj = gzip.GzipFile(name, (mode + 'b'), compresslevel, fileobj)
t = cls.taropen(name, mode, fileobj, **kwargs)
except IOError:
if ((not extfileobj) and (fileobj is not None)):
fileobj.close()
if (fileobj is None):
raise
raise ReadError('not a gzip file')
except:
if ((not extfileobj) and (fileobj is not None)):
fileobj.close()
raise
t._extfileobj = extfileobj
return t
|
'Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.'
| @classmethod
def bz2open(cls, name, mode='r', fileobj=None, compresslevel=9, **kwargs):
| if ((len(mode) > 1) or (mode not in 'rw')):
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError('bz2 module is not available')
if (fileobj is not None):
fileobj = _BZ2Proxy(fileobj, mode)
else:
fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (IOError, EOFError):
fileobj.close()
raise ReadError('not a bzip2 file')
t._extfileobj = False
return t
|
'Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.'
| def close(self):
| if self.closed:
return
if (self.mode in 'aw'):
self.fileobj.write((NUL * (BLOCKSIZE * 2)))
self.offset += (BLOCKSIZE * 2)
(blocks, remainder) = divmod(self.offset, RECORDSIZE)
if (remainder > 0):
self.fileobj.write((NUL * (RECORDSIZE - remainder)))
if (not self._extfileobj):
self.fileobj.close()
self.closed = True
|
'Return a TarInfo object for member `name\'. If `name\' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.'
| def getmember(self, name):
| tarinfo = self._getmember(name)
if (tarinfo is None):
raise KeyError(('filename %r not found' % name))
return tarinfo
|
'Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.'
| def getmembers(self):
| self._check()
if (not self._loaded):
self._load()
return self.members
|
'Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().'
| def getnames(self):
| return [tarinfo.name for tarinfo in self.getmembers()]
|
'Create a TarInfo object for either the file `name\' or the file
object `fileobj\' (using os.fstat on its file descriptor). You can
modify some of the TarInfo\'s attributes before you add it using
addfile(). If given, `arcname\' specifies an alternative name for the
file in the archive.'
| def gettarinfo(self, name=None, arcname=None, fileobj=None):
| self._check('aw')
if (fileobj is not None):
name = fileobj.name
if (arcname is None):
arcname = name
(drv, arcname) = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, '/')
arcname = arcname.lstrip('/')
tarinfo = self.tarinfo()
tarinfo.tarfile = self
if (fileobj is None):
if (hasattr(os, 'lstat') and (not self.dereference)):
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ''
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if ((not self.dereference) and (statres.st_nlink > 1) and (inode in self.inodes) and (arcname != self.inodes[inode])):
type = LNKTYPE
linkname = self.inodes[inode]
else:
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if (type == REGTYPE):
tarinfo.size = statres.st_size
else:
tarinfo.size = 0
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if (type in (CHRTYPE, BLKTYPE)):
if (hasattr(os, 'major') and hasattr(os, 'minor')):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
|
'Print a table of contents to sys.stdout. If `verbose\' is False, only
the names of the members are printed. If it is True, an `ls -l\'-like
output is produced.'
| def list(self, verbose=True):
| self._check()
for tarinfo in self:
if verbose:
print(filemode(tarinfo.mode), end=' ')
print(('%s/%s' % ((tarinfo.uname or tarinfo.uid), (tarinfo.gname or tarinfo.gid))), end=' ')
if (tarinfo.ischr() or tarinfo.isblk()):
print(('%10s' % ('%d,%d' % (tarinfo.devmajor, tarinfo.devminor))), end=' ')
else:
print(('%10d' % tarinfo.size), end=' ')
print(('%d-%02d-%02d %02d:%02d:%02d' % time.localtime(tarinfo.mtime)[:6]), end=' ')
print((tarinfo.name + ('/' if tarinfo.isdir() else '')), end=' ')
if verbose:
if tarinfo.issym():
print('->', tarinfo.linkname, end=' ')
if tarinfo.islnk():
print('link to', tarinfo.linkname, end=' ')
print()
|
'Add the file `name\' to the archive. `name\' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname\'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive\' to False. `exclude\' is a function that should
return True for each filename to be excluded. `filter\' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object, if it returns None the TarInfo object will be
excluded from the archive.'
| def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
| self._check('aw')
if (arcname is None):
arcname = name
if (exclude is not None):
import warnings
warnings.warn('use the filter argument instead', DeprecationWarning, 2)
if exclude(name):
self._dbg(2, ('tarfile: Excluded %r' % name))
return
if ((self.name is not None) and (os.path.abspath(name) == self.name)):
self._dbg(2, ('tarfile: Skipped %r' % name))
return
self._dbg(1, name)
tarinfo = self.gettarinfo(name, arcname)
if (tarinfo is None):
self._dbg(1, ('tarfile: Unsupported type %r' % name))
return
if (filter is not None):
tarinfo = filter(tarinfo)
if (tarinfo is None):
self._dbg(2, ('tarfile: Excluded %r' % name))
return
if tarinfo.isreg():
f = bltn_open(name, 'rb')
self.addfile(tarinfo, f)
f.close()
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f), recursive, exclude, filter=filter)
else:
self.addfile(tarinfo)
|
'Add the TarInfo object `tarinfo\' to the archive. If `fileobj\' is
given, tarinfo.size bytes are read from it and added to the archive.
You can create TarInfo objects using gettarinfo().
On Windows platforms, `fileobj\' should always be opened with mode
\'rb\' to avoid irritation about the file size.'
| def addfile(self, tarinfo, fileobj=None):
| self._check('aw')
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
if (fileobj is not None):
copyfileobj(fileobj, self.fileobj, tarinfo.size)
(blocks, remainder) = divmod(tarinfo.size, BLOCKSIZE)
if (remainder > 0):
self.fileobj.write((NUL * (BLOCKSIZE - remainder)))
blocks += 1
self.offset += (blocks * BLOCKSIZE)
self.members.append(tarinfo)
|
'Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path\' specifies a different directory
to extract to. `members\' is optional and must be a subset of the
list returned by getmembers().'
| def extractall(self, path='.', members=None):
| directories = []
if (members is None):
members = self
for tarinfo in members:
if tarinfo.isdir():
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448
self.extract(tarinfo, path, set_attrs=(not tarinfo.isdir()))
directories.sort(key=(lambda a: a.name))
directories.reverse()
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError as e:
if (self.errorlevel > 1):
raise
else:
self._dbg(1, ('tarfile: %s' % e))
|
'Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member\' may be a filename or a TarInfo object. You can
specify a different directory using `path\'. File attributes (owner,
mtime, mode) are set unless `set_attrs\' is False.'
| def extract(self, member, path='', set_attrs=True):
| self._check('r')
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name), set_attrs=set_attrs)
except EnvironmentError as e:
if (self.errorlevel > 0):
raise
elif (e.filename is None):
self._dbg(1, ('tarfile: %s' % e.strerror))
else:
self._dbg(1, ('tarfile: %s %r' % (e.strerror, e.filename)))
except ExtractError as e:
if (self.errorlevel > 1):
raise
else:
self._dbg(1, ('tarfile: %s' % e))
|
'Extract a member from the archive as a file object. `member\' may be
a filename or a TarInfo object. If `member\' is a regular file, a
file-like object is returned. If `member\' is a link, a file-like
object is constructed from the link\'s target. If `member\' is none of
the above, None is returned.
The file-like object is read-only and provides the following
methods: read(), readline(), readlines(), seek() and tell()'
| def extractfile(self, member):
| self._check('r')
if isinstance(member, str):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg():
return self.fileobject(self, tarinfo)
elif (tarinfo.type not in SUPPORTED_TYPES):
return self.fileobject(self, tarinfo)
elif (tarinfo.islnk() or tarinfo.issym()):
if isinstance(self.fileobj, _Stream):
raise StreamError('cannot extract (sym)link as file object')
else:
return self.extractfile(self._find_link_target(tarinfo))
else:
return None
|
'Extract the TarInfo object tarinfo to a physical
file called targetpath.'
| def _extract_member(self, tarinfo, targetpath, set_attrs=True):
| targetpath = targetpath.rstrip('/')
targetpath = targetpath.replace('/', os.sep)
upperdirs = os.path.dirname(targetpath)
if (upperdirs and (not os.path.exists(upperdirs))):
os.makedirs(upperdirs)
if (tarinfo.islnk() or tarinfo.issym()):
self._dbg(1, ('%s -> %s' % (tarinfo.name, tarinfo.linkname)))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif (tarinfo.ischr() or tarinfo.isblk()):
self.makedev(tarinfo, targetpath)
elif (tarinfo.islnk() or tarinfo.issym()):
self.makelink(tarinfo, targetpath)
elif (tarinfo.type not in SUPPORTED_TYPES):
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
if set_attrs:
self.chown(tarinfo, targetpath)
if (not tarinfo.issym()):
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
|
'Make a directory called targetpath.'
| def makedir(self, tarinfo, targetpath):
| try:
os.mkdir(targetpath, 448)
except EnvironmentError as e:
if (e.errno != errno.EEXIST):
raise
|
'Make a file called targetpath.'
| def makefile(self, tarinfo, targetpath):
| source = self.fileobj
source.seek(tarinfo.offset_data)
target = bltn_open(targetpath, 'wb')
if (tarinfo.sparse is not None):
for (offset, size) in tarinfo.sparse:
target.seek(offset)
copyfileobj(source, target, size)
else:
copyfileobj(source, target, tarinfo.size)
target.seek(tarinfo.size)
target.truncate()
target.close()
|
'Make a file from a TarInfo object with an unknown type
at targetpath.'
| def makeunknown(self, tarinfo, targetpath):
| self.makefile(tarinfo, targetpath)
self._dbg(1, ('tarfile: Unknown file type %r, extracted as regular file.' % tarinfo.type))
|
'Make a fifo called targetpath.'
| def makefifo(self, tarinfo, targetpath):
| if hasattr(os, 'mkfifo'):
os.mkfifo(targetpath)
else:
raise ExtractError('fifo not supported by system')
|
'Make a character or block device called targetpath.'
| def makedev(self, tarinfo, targetpath):
| if ((not hasattr(os, 'mknod')) or (not hasattr(os, 'makedev'))):
raise ExtractError('special devices not supported by system')
mode = tarinfo.mode
if tarinfo.isblk():
mode |= stat.S_IFBLK
else:
mode |= stat.S_IFCHR
os.mknod(targetpath, mode, os.makedev(tarinfo.devmajor, tarinfo.devminor))
|
'Make a (symbolic) link called targetpath. If it cannot be created
(platform limitation), we try to make a copy of the referenced file
instead of a link.'
| def makelink(self, tarinfo, targetpath):
| try:
if tarinfo.issym():
os.symlink(tarinfo.linkname, targetpath)
elif os.path.exists(tarinfo._link_target):
os.link(tarinfo._link_target, targetpath)
else:
self._extract_member(self._find_link_target(tarinfo), targetpath)
except symlink_exception:
if tarinfo.issym():
linkpath = os.path.join(os.path.dirname(tarinfo.name), tarinfo.linkname)
else:
linkpath = tarinfo.linkname
else:
try:
self._extract_member(self._find_link_target(tarinfo), targetpath)
except KeyError:
raise ExtractError('unable to resolve link inside archive')
|
'Set owner of targetpath according to tarinfo.'
| def chown(self, tarinfo, targetpath):
| if (pwd and hasattr(os, 'geteuid') and (os.geteuid() == 0)):
try:
g = grp.getgrnam(tarinfo.gname)[2]
except KeyError:
g = tarinfo.gid
try:
u = pwd.getpwnam(tarinfo.uname)[2]
except KeyError:
u = tarinfo.uid
try:
if (tarinfo.issym() and hasattr(os, 'lchown')):
os.lchown(targetpath, u, g)
elif (sys.platform != 'os2emx'):
os.chown(targetpath, u, g)
except EnvironmentError as e:
raise ExtractError('could not change owner')
|
'Set file permissions of targetpath according to tarinfo.'
| def chmod(self, tarinfo, targetpath):
| if hasattr(os, 'chmod'):
try:
os.chmod(targetpath, tarinfo.mode)
except EnvironmentError as e:
raise ExtractError('could not change mode')
|
'Set modification time of targetpath according to tarinfo.'
| def utime(self, tarinfo, targetpath):
| if (not hasattr(os, 'utime')):
return
try:
os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime))
except EnvironmentError as e:
raise ExtractError('could not change modification time')
|
'Return the next member of the archive as a TarInfo object, when
TarFile is opened for reading. Return None if there is no more
available.'
| def next(self):
| self._check('ra')
if (self.firstmember is not None):
m = self.firstmember
self.firstmember = None
return m
self.fileobj.seek(self.offset)
tarinfo = None
while True:
try:
tarinfo = self.tarinfo.fromtarfile(self)
except EOFHeaderError as e:
if self.ignore_zeros:
self._dbg(2, ('0x%X: %s' % (self.offset, e)))
self.offset += BLOCKSIZE
continue
except InvalidHeaderError as e:
if self.ignore_zeros:
self._dbg(2, ('0x%X: %s' % (self.offset, e)))
self.offset += BLOCKSIZE
continue
elif (self.offset == 0):
raise ReadError(str(e))
except EmptyHeaderError:
if (self.offset == 0):
raise ReadError('empty file')
except TruncatedHeaderError as e:
if (self.offset == 0):
raise ReadError(str(e))
except SubsequentHeaderError as e:
raise ReadError(str(e))
break
if (tarinfo is not None):
self.members.append(tarinfo)
else:
self._loaded = True
return tarinfo
|
'Find an archive member by name from bottom to top.
If tarinfo is given, it is used as the starting point.'
| def _getmember(self, name, tarinfo=None, normalize=False):
| members = self.getmembers()
if (tarinfo is not None):
members = members[:members.index(tarinfo)]
if normalize:
name = os.path.normpath(name)
for member in reversed(members):
if normalize:
member_name = os.path.normpath(member.name)
else:
member_name = member.name
if (name == member_name):
return member
|
'Read through the entire archive file and look for readable
members.'
| def _load(self):
| while True:
tarinfo = self.next()
if (tarinfo is None):
break
self._loaded = True
|
'Check if TarFile is still open, and if the operation\'s mode
corresponds to TarFile\'s mode.'
| def _check(self, mode=None):
| if self.closed:
raise IOError(('%s is closed' % self.__class__.__name__))
if ((mode is not None) and (self.mode not in mode)):
raise IOError(('bad operation for mode %r' % self.mode))
|
'Find the target member of a symlink or hardlink member in the
archive.'
| def _find_link_target(self, tarinfo):
| if tarinfo.issym():
linkname = ((os.path.dirname(tarinfo.name) + '/') + tarinfo.linkname)
limit = None
else:
linkname = tarinfo.linkname
limit = tarinfo
member = self._getmember(linkname, tarinfo=limit, normalize=True)
if (member is None):
raise KeyError(('linkname %r not found' % linkname))
return member
|
'Provide an iterator object.'
| def __iter__(self):
| if self._loaded:
return iter(self.members)
else:
return TarIter(self)
|
'Write debugging output to sys.stderr.'
| def _dbg(self, level, msg):
| if (level <= self.debug):
print(msg, file=sys.stderr)
|
'Construct a TarIter object.'
| def __init__(self, tarfile):
| self.tarfile = tarfile
self.index = 0
|
'Return iterator object.'
| def __iter__(self):
| return self
|
'Return the next item using TarFile\'s next() method.
When all members have been read, set TarFile as _loaded.'
| def __next__(self):
| if (not self.tarfile._loaded):
tarinfo = self.tarfile.next()
if (not tarinfo):
self.tarfile._loaded = True
raise StopIteration
else:
try:
tarinfo = self.tarfile.members[self.index]
except IndexError:
raise StopIteration
self.index += 1
return tarinfo
|
'Tell if the target is newer than the source.
Returns true if \'source\' exists and is more recently modified than
\'target\', or if \'source\' exists and \'target\' doesn\'t.
Returns false if both exist and \'target\' is the same age or younger
than \'source\'. Raise PackagingFileError if \'source\' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".'
| def newer(self, source, target):
| if (not os.path.exists(source)):
raise DistlibException(("file '%r' does not exist" % os.path.abspath(source)))
if (not os.path.exists(target)):
return True
return (os.stat(source).st_mtime > os.stat(target).st_mtime)
|
'Copy a file respecting dry-run and force flags.'
| def copy_file(self, infile, outfile, check=True):
| self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if (not self.dry_run):
msg = None
if check:
if os.path.islink(outfile):
msg = ('%s is a symlink' % outfile)
elif (os.path.exists(outfile) and (not os.path.isfile(outfile))):
msg = ('%s is a non-regular file' % outfile)
if msg:
raise ValueError((msg + ' which would be overwritten'))
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
|
'Commit recorded changes, turn off recording, return
changes.'
| def commit(self):
| assert self.record
result = (self.files_written, self.dirs_created)
self._init_record()
return result
|
'Initialise an instance.
:param base: The base directory where the cache should be located.'
| def __init__(self, base):
| if (not os.path.isdir(base)):
os.makedirs(base)
if ((os.stat(base).st_mode & 63) != 0):
logger.warning("Directory '%s' is not private", base)
self.base = os.path.abspath(os.path.normpath(base))
|
'Converts a resource prefix to a directory name in the cache.'
| def prefix_to_dir(self, prefix):
| return path_to_cache_dir(prefix)
|
'Clear the cache.'
| def clear(self):
| not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if (os.path.islink(fn) or os.path.isfile(fn)):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
|
'Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.'
| def add(self, event, subscriber, append=True):
| subs = self._subscribers
if (event not in subs):
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
|
'Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.'
| def remove(self, event, subscriber):
| subs = self._subscribers
if (event not in subs):
raise ValueError(('No subscribers: %r' % event))
subs[event].remove(subscriber)
|
'Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.'
| def get_subscribers(self, event):
| return iter(self._subscribers.get(event, ()))
|
'Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event\'s
subscribers.
:param kwargs: The keyword arguments to pass to the event\'s
subscribers.'
| def publish(self, event, *args, **kwargs):
| result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s', event, args, kwargs, result)
return result
|
'This is called to create a connection instance. Normally you\'d
pass a connection class to do_open, but it doesn\'t actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we\'ll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.'
| def _conn_maker(self, *args, **kwargs):
| result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
|
'Default converter for the inc:// protocol.'
| def inc_convert(self, value):
| if (not os.path.isabs(value)):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
|
'Read lines from a subprocess\' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.'
| def reader(self, stream, context):
| progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if (not s):
break
if (progress is not None):
progress(s, context)
else:
if (not verbose):
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
|
'Initialise an instance. There is normally one for each DistributionPath.'
| def __init__(self):
| self.name = {}
self.path = {}
self.generated = False
|
'Clear the cache, setting it to its initial state.'
| def clear(self):
| self.name.clear()
self.path.clear()
self.generated = False
|
'Add a distribution to the cache.
:param dist: The distribution to add.'
| def add(self, dist):
| if (dist.path not in self.path):
self.path[dist.path] = dist
self.name.setdefault(dist.key, []).append(dist)
|
'Create an instance from a path, optionally including legacy (distutils/
setuptools/distribute) distributions.
:param path: The path to use, as a list of directories. If not specified,
sys.path is used.
:param include_egg: If True, this instance will look for and return legacy
distributions as well as those based on PEP 376.'
| def __init__(self, path=None, include_egg=False):
| if (path is None):
path = sys.path
self.path = path
self._include_dist = True
self._include_egg = include_egg
self._cache = _Cache()
self._cache_egg = _Cache()
self._cache_enabled = True
self._scheme = get_scheme(u'default')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.