desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Run the given test case or test suite.'
| def run(self, test, skipped):
| result = self._makeResult()
startTime = time.time()
test(result)
stopTime = time.time()
timeTaken = (stopTime - startTime)
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
if _unavail:
requested = _unavail.keys()
requested.sort()
self.stream.writeln(('Ran %d test%s in %.3fs (%s module%s skipped)' % (run, (((run != 1) and 's') or ''), timeTaken, len(skipped), (((len(skipped) != 1) and 's') or ''))))
self.stream.writeln(('Unavailable resources: %s' % ', '.join(requested)))
else:
self.stream.writeln(('Ran %d test%s in %.3fs' % (run, (((run != 1) and 's') or ''), timeTaken)))
self.stream.writeln()
if (not result.wasSuccessful()):
self.stream.write('FAILED (')
(failed, errored) = map(len, (result.failures, result.errors))
if failed:
self.stream.write(('failures=%d' % failed))
if errored:
if failed:
self.stream.write(', ')
self.stream.write(('errors=%d' % errored))
self.stream.writeln(')')
else:
self.stream.writeln('OK')
return result
|
'Test that a character pointer-to-pointer is correctly passed'
| def test_charpp(self):
| dll = CDLL(_ctypes_test.__file__)
func = dll._testfunc_c_p_p
func.restype = c_char_p
argv = (c_char_p * 2)()
argc = c_int(2)
argv[0] = 'hello'
argv[1] = 'world'
result = func(byref(argc), argv)
assert (result == 'world'), result
|
'Constructor. May be extended, do not override.'
| def __init__(self, server_address, RequestHandlerClass):
| self.server_address = server_address
self.RequestHandlerClass = RequestHandlerClass
self.__is_shut_down = threading.Event()
self.__shutdown_request = False
|
'Called by constructor to activate the server.
May be overridden.'
| def server_activate(self):
| pass
|
'Handle one request at a time until shutdown.
Polls for shutdown every poll_interval seconds. Ignores
self.timeout. If you need to do periodic tasks, do them in
another thread.'
| def serve_forever(self, poll_interval=0.5):
| self.__is_shut_down.clear()
try:
while (not self.__shutdown_request):
(r, w, e) = select.select([self], [], [], poll_interval)
if (self in r):
self._handle_request_noblock()
finally:
self.__shutdown_request = False
self.__is_shut_down.set()
|
'Stops the serve_forever loop.
Blocks until the loop has finished. This must be called while
serve_forever() is running in another thread, or it will
deadlock.'
| def shutdown(self):
| self.__shutdown_request = True
self.__is_shut_down.wait()
|
'Handle one request, possibly blocking.
Respects self.timeout.'
| def handle_request(self):
| timeout = self.socket.gettimeout()
if (timeout is None):
timeout = self.timeout
elif (self.timeout is not None):
timeout = min(timeout, self.timeout)
fd_sets = select.select([self], [], [], timeout)
if (not fd_sets[0]):
self.handle_timeout()
return
self._handle_request_noblock()
|
'Handle one request, without blocking.
I assume that select.select has returned that the socket is
readable before this function was called, so there should be
no risk of blocking in get_request().'
| def _handle_request_noblock(self):
| try:
(request, client_address) = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except:
self.handle_error(request, client_address)
self.shutdown_request(request)
|
'Called if no new request arrives within self.timeout.
Overridden by ForkingMixIn.'
| def handle_timeout(self):
| pass
|
'Verify the request. May be overridden.
Return True if we should proceed with this request.'
| def verify_request(self, request, client_address):
| return True
|
'Call finish_request.
Overridden by ForkingMixIn and ThreadingMixIn.'
| def process_request(self, request, client_address):
| self.finish_request(request, client_address)
self.shutdown_request(request)
|
'Called to clean-up the server.
May be overridden.'
| def server_close(self):
| pass
|
'Finish one request by instantiating RequestHandlerClass.'
| def finish_request(self, request, client_address):
| self.RequestHandlerClass(request, client_address, self)
|
'Called to shutdown and close an individual request.'
| def shutdown_request(self, request):
| self.close_request(request)
|
'Called to clean up an individual request.'
| def close_request(self, request):
| pass
|
'Handle an error gracefully. May be overridden.
The default is to print a traceback and continue.'
| def handle_error(self, request, client_address):
| print ('-' * 40)
print 'Exception happened during processing of request from',
print client_address
import traceback
traceback.print_exc()
print ('-' * 40)
|
'Constructor. May be extended, do not override.'
| def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
| BaseServer.__init__(self, server_address, RequestHandlerClass)
self.socket = socket.socket(self.address_family, self.socket_type)
if bind_and_activate:
self.server_bind()
self.server_activate()
|
'Called by constructor to bind the socket.
May be overridden.'
| def server_bind(self):
| if self.allow_reuse_address:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
self.server_address = self.socket.getsockname()
|
'Called by constructor to activate the server.
May be overridden.'
| def server_activate(self):
| self.socket.listen(self.request_queue_size)
|
'Called to clean-up the server.
May be overridden.'
| def server_close(self):
| self.socket.close()
|
'Return socket file number.
Interface required by select().'
| def fileno(self):
| return self.socket.fileno()
|
'Get the request and client address from the socket.
May be overridden.'
| def get_request(self):
| return self.socket.accept()
|
'Called to shutdown and close an individual request.'
| def shutdown_request(self, request):
| try:
request.shutdown(socket.SHUT_WR)
except socket.error:
pass
self.close_request(request)
|
'Called to clean up an individual request.'
| def close_request(self, request):
| request.close()
|
'Internal routine to wait for children that have exited.'
| def collect_children(self):
| if (self.active_children is None):
return
while (len(self.active_children) >= self.max_children):
try:
(pid, status) = os.waitpid(0, 0)
except os.error:
pid = None
if (pid not in self.active_children):
continue
self.active_children.remove(pid)
for child in self.active_children:
try:
(pid, status) = os.waitpid(child, os.WNOHANG)
except os.error:
pid = None
if (not pid):
continue
try:
self.active_children.remove(pid)
except ValueError as e:
raise ValueError(('%s. x=%d and list=%r' % (e.message, pid, self.active_children)))
|
'Wait for zombies after self.timeout seconds of inactivity.
May be extended, do not override.'
| def handle_timeout(self):
| self.collect_children()
|
'Fork a new subprocess to process the request.'
| def process_request(self, request, client_address):
| self.collect_children()
pid = os.fork()
if pid:
if (self.active_children is None):
self.active_children = []
self.active_children.append(pid)
self.close_request(request)
return
else:
try:
self.finish_request(request, client_address)
self.shutdown_request(request)
os._exit(0)
except:
try:
self.handle_error(request, client_address)
self.shutdown_request(request)
finally:
os._exit(1)
|
'Same as in BaseServer but as a thread.
In addition, exception handling is done here.'
| def process_request_thread(self, request, client_address):
| try:
self.finish_request(request, client_address)
self.shutdown_request(request)
except:
self.handle_error(request, client_address)
self.shutdown_request(request)
|
'Start a new thread to process the request.'
| def process_request(self, request, client_address):
| t = threading.Thread(target=self.process_request_thread, args=(request, client_address))
if self.daemon_threads:
t.setDaemon(1)
t.start()
|
'Create new Popen instance.'
| def __init__(self, args, bufsize=0, executable=None, stdin=None, stdout=None, stderr=None, preexec_fn=None, close_fds=False, shell=False, cwd=None, env=None, universal_newlines=False, startupinfo=None, creationflags=0):
| _cleanup()
self._child_created = False
if (not isinstance(bufsize, (int, long))):
raise TypeError('bufsize must be an integer')
if mswindows:
if (preexec_fn is not None):
raise ValueError('preexec_fn is not supported on Windows platforms')
if (close_fds and ((stdin is not None) or (stdout is not None) or (stderr is not None))):
raise ValueError('close_fds is not supported on Windows platforms if you redirect stdin/stdout/stderr')
else:
if (startupinfo is not None):
raise ValueError('startupinfo is only supported on Windows platforms')
if (creationflags != 0):
raise ValueError('creationflags is only supported on Windows platforms')
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
(p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) = self._get_handles(stdin, stdout, stderr)
self._execute_child(args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite)
if mswindows:
if (p2cwrite is not None):
p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
if (c2pread is not None):
c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
if (errread is not None):
errread = msvcrt.open_osfhandle(errread.Detach(), 0)
if (p2cwrite is not None):
self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
if (c2pread is not None):
if universal_newlines:
self.stdout = os.fdopen(c2pread, 'rU', bufsize)
else:
self.stdout = os.fdopen(c2pread, 'rb', bufsize)
if (errread is not None):
if universal_newlines:
self.stderr = os.fdopen(errread, 'rU', bufsize)
else:
self.stderr = os.fdopen(errread, 'rb', bufsize)
|
'Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr).'
| def communicate(self, input=None):
| if ([self.stdin, self.stdout, self.stderr].count(None) >= 2):
stdout = None
stderr = None
if self.stdin:
if input:
try:
self.stdin.write(input)
except IOError as e:
if ((e.errno != errno.EPIPE) and (e.errno != errno.EINVAL)):
raise
self.stdin.close()
elif self.stdout:
stdout = self.stdout.read()
self.stdout.close()
elif self.stderr:
stderr = self.stderr.read()
self.stderr.close()
self.wait()
return (stdout, stderr)
return self._communicate(input)
|
'A workaround to get special attributes on the ServerProxy
without interfering with the magic __getattr__'
| def __call__(self, attr):
| if (attr == 'close'):
return self.__close
elif (attr == 'transport'):
return self.__transport
raise AttributeError(('Attribute %r not found' % (attr,)))
|
'add_argument(dest, ..., name=value, ...)
add_argument(option_string, option_string, ..., name=value, ...)'
| def add_argument(self, *args, **kwargs):
| chars = self.prefix_chars
if ((not args) or ((len(args) == 1) and (args[0][0] not in chars))):
if (args and ('dest' in kwargs)):
raise ValueError('dest supplied twice for positional argument')
kwargs = self._get_positional_kwargs(*args, **kwargs)
else:
kwargs = self._get_optional_kwargs(*args, **kwargs)
if ('default' not in kwargs):
dest = kwargs['dest']
if (dest in self._defaults):
kwargs['default'] = self._defaults[dest]
elif (self.argument_default is not None):
kwargs['default'] = self.argument_default
action_class = self._pop_action_class(kwargs)
if (not _callable(action_class)):
raise ValueError(('unknown action "%s"' % (action_class,)))
action = action_class(**kwargs)
type_func = self._registry_get('type', action.type, action.type)
if (not _callable(type_func)):
raise ValueError(('%r is not callable' % (type_func,)))
if hasattr(self, '_get_formatter'):
try:
self._get_formatter()._format_args(action, None)
except TypeError:
raise ValueError('length of metavar tuple does not match nargs')
return self._add_action(action)
|
'error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.'
| def error(self, message):
| self.print_usage(_sys.stderr)
self.exit(2, (_('%s: error: %s\n') % (self.prog, message)))
|
'Construct a _Stream object.'
| def __init__(self, name, mode, comptype, fileobj, bufsize):
| self._extfileobj = True
if (fileobj is None):
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if (comptype == '*'):
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = (name or '')
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = ''
self.pos = 0L
self.closed = False
if (comptype == 'gz'):
try:
import zlib
except ImportError:
raise CompressionError('zlib module is not available')
self.zlib = zlib
self.crc = (zlib.crc32('') & 4294967295L)
if (mode == 'r'):
self._init_read_gz()
else:
self._init_write_gz()
if (comptype == 'bz2'):
try:
import bz2
except ImportError:
raise CompressionError('bz2 module is not available')
if (mode == 'r'):
self.dbuf = ''
self.cmp = bz2.BZ2Decompressor()
else:
self.cmp = bz2.BZ2Compressor()
|
'Initialize for writing with gzip compression.'
| def _init_write_gz(self):
| self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED, (- self.zlib.MAX_WBITS), self.zlib.DEF_MEM_LEVEL, 0)
timestamp = struct.pack('<L', long(time.time()))
self.__write(('\x1f\x8b\x08\x08%s\x02\xff' % timestamp))
if self.name.endswith('.gz'):
self.name = self.name[:(-3)]
self.__write((self.name + NUL))
|
'Write string s to the stream.'
| def write(self, s):
| if (self.comptype == 'gz'):
self.crc = (self.zlib.crc32(s, self.crc) & 4294967295L)
self.pos += len(s)
if (self.comptype != 'tar'):
s = self.cmp.compress(s)
self.__write(s)
|
'Write string s to the stream if a whole new block
is ready to be written.'
| def __write(self, s):
| self.buf += s
while (len(self.buf) > self.bufsize):
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
|
'Close the _Stream object. No operation should be
done on it afterwards.'
| def close(self):
| if self.closed:
return
if ((self.mode == 'w') and (self.comptype != 'tar')):
self.buf += self.cmp.flush()
if ((self.mode == 'w') and self.buf):
self.fileobj.write(self.buf)
self.buf = ''
if (self.comptype == 'gz'):
self.fileobj.write(struct.pack('<L', (self.crc & 4294967295L)))
self.fileobj.write(struct.pack('<L', (self.pos & 4294967295L)))
if (not self._extfileobj):
self.fileobj.close()
self.closed = True
|
'Initialize for reading a gzip compressed fileobj.'
| def _init_read_gz(self):
| self.cmp = self.zlib.decompressobj((- self.zlib.MAX_WBITS))
self.dbuf = ''
if (self.__read(2) != '\x1f\x8b'):
raise ReadError('not a gzip file')
if (self.__read(1) != '\x08'):
raise CompressionError('unsupported compression method')
flag = ord(self.__read(1))
self.__read(6)
if (flag & 4):
xlen = (ord(self.__read(1)) + (256 * ord(self.__read(1))))
self.read(xlen)
if (flag & 8):
while True:
s = self.__read(1)
if ((not s) or (s == NUL)):
break
if (flag & 16):
while True:
s = self.__read(1)
if ((not s) or (s == NUL)):
break
if (flag & 2):
self.__read(2)
|
'Return the stream\'s file pointer position.'
| def tell(self):
| return self.pos
|
'Set the stream\'s file pointer to pos. Negative seeking
is forbidden.'
| def seek(self, pos=0):
| if ((pos - self.pos) >= 0):
(blocks, remainder) = divmod((pos - self.pos), self.bufsize)
for i in xrange(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError('seeking backwards is not allowed')
return self.pos
|
'Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.'
| def read(self, size=None):
| if (size is None):
t = []
while True:
buf = self._read(self.bufsize)
if (not buf):
break
t.append(buf)
buf = ''.join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
|
'Return size bytes from the stream.'
| def _read(self, size):
| if (self.comptype == 'tar'):
return self.__read(size)
c = len(self.dbuf)
t = [self.dbuf]
while (c < size):
buf = self.__read(self.bufsize)
if (not buf):
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError('invalid compressed data')
t.append(buf)
c += len(buf)
t = ''.join(t)
self.dbuf = t[size:]
return t[:size]
|
'Return size bytes from stream. If internal buffer is empty,
read another block from the stream.'
| def __read(self, size):
| c = len(self.buf)
t = [self.buf]
while (c < size):
buf = self.fileobj.read(self.bufsize)
if (not buf):
break
t.append(buf)
c += len(buf)
t = ''.join(t)
self.buf = t[size:]
return t[:size]
|
'Return the current file position.'
| def tell(self):
| return self.position
|
'Seek to a position in the file.'
| def seek(self, position):
| self.position = position
|
'Read data from the file.'
| def read(self, size=None):
| if (size is None):
size = (self.size - self.position)
else:
size = min(size, (self.size - self.position))
if (self.sparse is None):
return self.readnormal(size)
else:
return self.readsparse(size)
|
'Read operation for regular files.'
| def readnormal(self, size):
| self.fileobj.seek((self.offset + self.position))
self.position += size
return self.fileobj.read(size)
|
'Read operation for sparse files.'
| def readsparse(self, size):
| data = []
while (size > 0):
buf = self.readsparsesection(size)
if (not buf):
break
size -= len(buf)
data.append(buf)
return ''.join(data)
|
'Read a single section of a sparse file.'
| def readsparsesection(self, size):
| section = self.sparse.find(self.position)
if (section is None):
return ''
size = min(size, ((section.offset + section.size) - self.position))
if isinstance(section, _data):
realpos = ((section.realpos + self.position) - section.offset)
self.fileobj.seek((self.offset + realpos))
self.position += size
return self.fileobj.read(size)
else:
self.position += size
return (NUL * size)
|
'Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.'
| def read(self, size=None):
| if self.closed:
raise ValueError('I/O operation on closed file')
buf = ''
if self.buffer:
if (size is None):
buf = self.buffer
self.buffer = ''
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if (size is None):
buf += self.fileobj.read()
else:
buf += self.fileobj.read((size - len(buf)))
self.position += len(buf)
return buf
|
'Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.'
| def readline(self, size=(-1)):
| if self.closed:
raise ValueError('I/O operation on closed file')
if ('\n' in self.buffer):
pos = (self.buffer.find('\n') + 1)
else:
buffers = [self.buffer]
while True:
buf = self.fileobj.read(self.blocksize)
buffers.append(buf)
if ((not buf) or ('\n' in buf)):
self.buffer = ''.join(buffers)
pos = (self.buffer.find('\n') + 1)
if (pos == 0):
pos = len(self.buffer)
break
if (size != (-1)):
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
|
'Return a list with all remaining lines.'
| def readlines(self):
| result = []
while True:
line = self.readline()
if (not line):
break
result.append(line)
return result
|
'Return the current file position.'
| def tell(self):
| if self.closed:
raise ValueError('I/O operation on closed file')
return self.position
|
'Seek to a position in the file.'
| def seek(self, pos, whence=os.SEEK_SET):
| if self.closed:
raise ValueError('I/O operation on closed file')
if (whence == os.SEEK_SET):
self.position = min(max(pos, 0), self.size)
elif (whence == os.SEEK_CUR):
if (pos < 0):
self.position = max((self.position + pos), 0)
else:
self.position = min((self.position + pos), self.size)
elif (whence == os.SEEK_END):
self.position = max(min((self.size + pos), self.size), 0)
else:
raise ValueError('Invalid argument')
self.buffer = ''
self.fileobj.seek(self.position)
|
'Close the file object.'
| def close(self):
| self.closed = True
|
'Get an iterator over the file\'s lines.'
| def __iter__(self):
| while True:
line = self.readline()
if (not line):
break
(yield line)
|
'Construct a TarInfo object. name is the optional name
of the member.'
| def __init__(self, name=''):
| self.name = name
self.mode = 420
self.uid = 0
self.gid = 0
self.size = 0
self.mtime = 0
self.chksum = 0
self.type = REGTYPE
self.linkname = ''
self.uname = ''
self.gname = ''
self.devmajor = 0
self.devminor = 0
self.offset = 0
self.offset_data = 0
self.pax_headers = {}
|
'Return the TarInfo\'s attributes as a dictionary.'
| def get_info(self, encoding, errors):
| info = {'name': self.name, 'mode': (self.mode & 4095), 'uid': self.uid, 'gid': self.gid, 'size': self.size, 'mtime': self.mtime, 'chksum': self.chksum, 'type': self.type, 'linkname': self.linkname, 'uname': self.uname, 'gname': self.gname, 'devmajor': self.devmajor, 'devminor': self.devminor}
if ((info['type'] == DIRTYPE) and (not info['name'].endswith('/'))):
info['name'] += '/'
for key in ('name', 'linkname', 'uname', 'gname'):
if (type(info[key]) is unicode):
info[key] = info[key].encode(encoding, errors)
return info
|
'Return a tar header as a string of 512 byte blocks.'
| def tobuf(self, format=DEFAULT_FORMAT, encoding=ENCODING, errors='strict'):
| info = self.get_info(encoding, errors)
if (format == USTAR_FORMAT):
return self.create_ustar_header(info)
elif (format == GNU_FORMAT):
return self.create_gnu_header(info)
elif (format == PAX_FORMAT):
return self.create_pax_header(info, encoding, errors)
else:
raise ValueError('invalid format')
|
'Return the object as a ustar header block.'
| def create_ustar_header(self, info):
| info['magic'] = POSIX_MAGIC
if (len(info['linkname']) > LENGTH_LINK):
raise ValueError('linkname is too long')
if (len(info['name']) > LENGTH_NAME):
(info['prefix'], info['name']) = self._posix_split_name(info['name'])
return self._create_header(info, USTAR_FORMAT)
|
'Return the object as a GNU header block sequence.'
| def create_gnu_header(self, info):
| info['magic'] = GNU_MAGIC
buf = ''
if (len(info['linkname']) > LENGTH_LINK):
buf += self._create_gnu_long_header(info['linkname'], GNUTYPE_LONGLINK)
if (len(info['name']) > LENGTH_NAME):
buf += self._create_gnu_long_header(info['name'], GNUTYPE_LONGNAME)
return (buf + self._create_header(info, GNU_FORMAT))
|
'Return the object as a ustar header block. If it cannot be
represented this way, prepend a pax extended header sequence
with supplement information.'
| def create_pax_header(self, info, encoding, errors):
| info['magic'] = POSIX_MAGIC
pax_headers = self.pax_headers.copy()
for (name, hname, length) in (('name', 'path', LENGTH_NAME), ('linkname', 'linkpath', LENGTH_LINK), ('uname', 'uname', 32), ('gname', 'gname', 32)):
if (hname in pax_headers):
continue
val = info[name].decode(encoding, errors)
try:
val.encode('ascii')
except UnicodeEncodeError:
pax_headers[hname] = val
continue
if (len(info[name]) > length):
pax_headers[hname] = val
for (name, digits) in (('uid', 8), ('gid', 8), ('size', 12), ('mtime', 12)):
if (name in pax_headers):
info[name] = 0
continue
val = info[name]
if ((not (0 <= val < (8 ** (digits - 1)))) or isinstance(val, float)):
pax_headers[name] = unicode(val)
info[name] = 0
if pax_headers:
buf = self._create_pax_generic_header(pax_headers)
else:
buf = ''
return (buf + self._create_header(info, USTAR_FORMAT))
|
'Return the object as a pax global header block sequence.'
| @classmethod
def create_pax_global_header(cls, pax_headers):
| return cls._create_pax_generic_header(pax_headers, type=XGLTYPE)
|
'Split a name longer than 100 chars into a prefix
and a name part.'
| def _posix_split_name(self, name):
| prefix = name[:(LENGTH_PREFIX + 1)]
while (prefix and (prefix[(-1)] != '/')):
prefix = prefix[:(-1)]
name = name[len(prefix):]
prefix = prefix[:(-1)]
if ((not prefix) or (len(name) > LENGTH_NAME)):
raise ValueError('name is too long')
return (prefix, name)
|
'Return a header block. info is a dictionary with file
information, format must be one of the *_FORMAT constants.'
| @staticmethod
def _create_header(info, format):
| parts = [stn(info.get('name', ''), 100), itn((info.get('mode', 0) & 4095), 8, format), itn(info.get('uid', 0), 8, format), itn(info.get('gid', 0), 8, format), itn(info.get('size', 0), 12, format), itn(info.get('mtime', 0), 12, format), ' ', info.get('type', REGTYPE), stn(info.get('linkname', ''), 100), stn(info.get('magic', POSIX_MAGIC), 8), stn(info.get('uname', ''), 32), stn(info.get('gname', ''), 32), itn(info.get('devmajor', 0), 8, format), itn(info.get('devminor', 0), 8, format), stn(info.get('prefix', ''), 155)]
buf = struct.pack(('%ds' % BLOCKSIZE), ''.join(parts))
chksum = calc_chksums(buf[(- BLOCKSIZE):])[0]
buf = ((buf[:(-364)] + ('%06o\x00' % chksum)) + buf[(-357):])
return buf
|
'Return the string payload filled with zero bytes
up to the next 512 byte border.'
| @staticmethod
def _create_payload(payload):
| (blocks, remainder) = divmod(len(payload), BLOCKSIZE)
if (remainder > 0):
payload += ((BLOCKSIZE - remainder) * NUL)
return payload
|
'Return a GNUTYPE_LONGNAME or GNUTYPE_LONGLINK sequence
for name.'
| @classmethod
def _create_gnu_long_header(cls, name, type):
| name += NUL
info = {}
info['name'] = '././@LongLink'
info['type'] = type
info['size'] = len(name)
info['magic'] = GNU_MAGIC
return (cls._create_header(info, USTAR_FORMAT) + cls._create_payload(name))
|
'Return a POSIX.1-2001 extended or global header sequence
that contains a list of keyword, value pairs. The values
must be unicode objects.'
| @classmethod
def _create_pax_generic_header(cls, pax_headers, type=XHDTYPE):
| records = []
for (keyword, value) in pax_headers.iteritems():
keyword = keyword.encode('utf8')
value = value.encode('utf8')
l = ((len(keyword) + len(value)) + 3)
n = p = 0
while True:
n = (l + len(str(p)))
if (n == p):
break
p = n
records.append(('%d %s=%s\n' % (p, keyword, value)))
records = ''.join(records)
info = {}
info['name'] = '././@PaxHeader'
info['type'] = type
info['size'] = len(records)
info['magic'] = POSIX_MAGIC
return (cls._create_header(info, USTAR_FORMAT) + cls._create_payload(records))
|
'Construct a TarInfo object from a 512 byte string buffer.'
| @classmethod
def frombuf(cls, buf):
| if (len(buf) == 0):
raise EmptyHeaderError('empty header')
if (len(buf) != BLOCKSIZE):
raise TruncatedHeaderError('truncated header')
if (buf.count(NUL) == BLOCKSIZE):
raise EOFHeaderError('end of file header')
chksum = nti(buf[148:156])
if (chksum not in calc_chksums(buf)):
raise InvalidHeaderError('bad checksum')
obj = cls()
obj.buf = buf
obj.name = nts(buf[0:100])
obj.mode = nti(buf[100:108])
obj.uid = nti(buf[108:116])
obj.gid = nti(buf[116:124])
obj.size = nti(buf[124:136])
obj.mtime = nti(buf[136:148])
obj.chksum = chksum
obj.type = buf[156:157]
obj.linkname = nts(buf[157:257])
obj.uname = nts(buf[265:297])
obj.gname = nts(buf[297:329])
obj.devmajor = nti(buf[329:337])
obj.devminor = nti(buf[337:345])
prefix = nts(buf[345:500])
if ((obj.type == AREGTYPE) and obj.name.endswith('/')):
obj.type = DIRTYPE
if obj.isdir():
obj.name = obj.name.rstrip('/')
if (prefix and (obj.type not in GNU_TYPES)):
obj.name = ((prefix + '/') + obj.name)
return obj
|
'Return the next TarInfo object from TarFile object
tarfile.'
| @classmethod
def fromtarfile(cls, tarfile):
| buf = tarfile.fileobj.read(BLOCKSIZE)
obj = cls.frombuf(buf)
obj.offset = (tarfile.fileobj.tell() - BLOCKSIZE)
return obj._proc_member(tarfile)
|
'Choose the right processing method depending on
the type and call it.'
| def _proc_member(self, tarfile):
| if (self.type in (GNUTYPE_LONGNAME, GNUTYPE_LONGLINK)):
return self._proc_gnulong(tarfile)
elif (self.type == GNUTYPE_SPARSE):
return self._proc_sparse(tarfile)
elif (self.type in (XHDTYPE, XGLTYPE, SOLARIS_XHDTYPE)):
return self._proc_pax(tarfile)
else:
return self._proc_builtin(tarfile)
|
'Process a builtin type or an unknown type which
will be treated as a regular file.'
| def _proc_builtin(self, tarfile):
| self.offset_data = tarfile.fileobj.tell()
offset = self.offset_data
if (self.isreg() or (self.type not in SUPPORTED_TYPES)):
offset += self._block(self.size)
tarfile.offset = offset
self._apply_pax_info(tarfile.pax_headers, tarfile.encoding, tarfile.errors)
return self
|
'Process the blocks that hold a GNU longname
or longlink member.'
| def _proc_gnulong(self, tarfile):
| buf = tarfile.fileobj.read(self._block(self.size))
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError('missing or bad subsequent header')
next.offset = self.offset
if (self.type == GNUTYPE_LONGNAME):
next.name = nts(buf)
elif (self.type == GNUTYPE_LONGLINK):
next.linkname = nts(buf)
return next
|
'Process a GNU sparse header plus extra headers.'
| def _proc_sparse(self, tarfile):
| buf = self.buf
sp = _ringbuffer()
pos = 386
lastpos = 0L
realpos = 0L
for i in xrange(4):
try:
offset = nti(buf[pos:(pos + 12)])
numbytes = nti(buf[(pos + 12):(pos + 24)])
except ValueError:
break
if (offset > lastpos):
sp.append(_hole(lastpos, (offset - lastpos)))
sp.append(_data(offset, numbytes, realpos))
realpos += numbytes
lastpos = (offset + numbytes)
pos += 24
isextended = ord(buf[482])
origsize = nti(buf[483:495])
while (isextended == 1):
buf = tarfile.fileobj.read(BLOCKSIZE)
pos = 0
for i in xrange(21):
try:
offset = nti(buf[pos:(pos + 12)])
numbytes = nti(buf[(pos + 12):(pos + 24)])
except ValueError:
break
if (offset > lastpos):
sp.append(_hole(lastpos, (offset - lastpos)))
sp.append(_data(offset, numbytes, realpos))
realpos += numbytes
lastpos = (offset + numbytes)
pos += 24
isextended = ord(buf[504])
if (lastpos < origsize):
sp.append(_hole(lastpos, (origsize - lastpos)))
self.sparse = sp
self.offset_data = tarfile.fileobj.tell()
tarfile.offset = (self.offset_data + self._block(self.size))
self.size = origsize
return self
|
'Process an extended or global header as described in
POSIX.1-2001.'
| def _proc_pax(self, tarfile):
| buf = tarfile.fileobj.read(self._block(self.size))
if (self.type == XGLTYPE):
pax_headers = tarfile.pax_headers
else:
pax_headers = tarfile.pax_headers.copy()
regex = re.compile('(\\d+) ([^=]+)=', re.U)
pos = 0
while True:
match = regex.match(buf, pos)
if (not match):
break
(length, keyword) = match.groups()
length = int(length)
value = buf[(match.end(2) + 1):((match.start(1) + length) - 1)]
keyword = keyword.decode('utf8')
value = value.decode('utf8')
pax_headers[keyword] = value
pos += length
try:
next = self.fromtarfile(tarfile)
except HeaderError:
raise SubsequentHeaderError('missing or bad subsequent header')
if (self.type in (XHDTYPE, SOLARIS_XHDTYPE)):
next._apply_pax_info(pax_headers, tarfile.encoding, tarfile.errors)
next.offset = self.offset
if ('size' in pax_headers):
offset = next.offset_data
if (next.isreg() or (next.type not in SUPPORTED_TYPES)):
offset += next._block(next.size)
tarfile.offset = offset
return next
|
'Replace fields with supplemental information from a previous
pax extended or global header.'
| def _apply_pax_info(self, pax_headers, encoding, errors):
| for (keyword, value) in pax_headers.iteritems():
if (keyword not in PAX_FIELDS):
continue
if (keyword == 'path'):
value = value.rstrip('/')
if (keyword in PAX_NUMBER_FIELDS):
try:
value = PAX_NUMBER_FIELDS[keyword](value)
except ValueError:
value = 0
else:
value = uts(value, encoding, errors)
setattr(self, keyword, value)
self.pax_headers = pax_headers.copy()
|
'Round up a byte count by BLOCKSIZE and return it,
e.g. _block(834) => 1024.'
| def _block(self, count):
| (blocks, remainder) = divmod(count, BLOCKSIZE)
if remainder:
blocks += 1
return (blocks * BLOCKSIZE)
|
'Open an (uncompressed) tar archive `name\'. `mode\' is either \'r\' to
read from an existing archive, \'a\' to append data to an existing
file or \'w\' to create a new file overwriting an existing one. `mode\'
defaults to \'r\'.
If `fileobj\' is given, it is used for reading or writing data. If it
can be determined, `mode\' is overridden by `fileobj\'s mode.
`fileobj\' is not closed, when TarFile is closed.'
| def __init__(self, name=None, mode='r', fileobj=None, format=None, tarinfo=None, dereference=None, ignore_zeros=None, encoding=None, errors=None, pax_headers=None, debug=None, errorlevel=None):
| if ((len(mode) > 1) or (mode not in 'raw')):
raise ValueError("mode must be 'r', 'a' or 'w'")
self.mode = mode
self._mode = {'r': 'rb', 'a': 'r+b', 'w': 'wb'}[mode]
if (not fileobj):
if ((self.mode == 'a') and (not os.path.exists(name))):
self.mode = 'w'
self._mode = 'wb'
fileobj = bltn_open(name, self._mode)
self._extfileobj = False
else:
if ((name is None) and hasattr(fileobj, 'name')):
name = fileobj.name
if hasattr(fileobj, 'mode'):
self._mode = fileobj.mode
self._extfileobj = True
self.name = (os.path.abspath(name) if name else None)
self.fileobj = fileobj
if (format is not None):
self.format = format
if (tarinfo is not None):
self.tarinfo = tarinfo
if (dereference is not None):
self.dereference = dereference
if (ignore_zeros is not None):
self.ignore_zeros = ignore_zeros
if (encoding is not None):
self.encoding = encoding
if (errors is not None):
self.errors = errors
elif (mode == 'r'):
self.errors = 'utf-8'
else:
self.errors = 'strict'
if ((pax_headers is not None) and (self.format == PAX_FORMAT)):
self.pax_headers = pax_headers
else:
self.pax_headers = {}
if (debug is not None):
self.debug = debug
if (errorlevel is not None):
self.errorlevel = errorlevel
self.closed = False
self.members = []
self._loaded = False
self.offset = self.fileobj.tell()
self.inodes = {}
try:
if (self.mode == 'r'):
self.firstmember = None
self.firstmember = self.next()
if (self.mode == 'a'):
while True:
self.fileobj.seek(self.offset)
try:
tarinfo = self.tarinfo.fromtarfile(self)
self.members.append(tarinfo)
except EOFHeaderError:
self.fileobj.seek(self.offset)
break
except HeaderError as e:
raise ReadError(str(e))
if (self.mode in 'aw'):
self._loaded = True
if self.pax_headers:
buf = self.tarinfo.create_pax_global_header(self.pax_headers.copy())
self.fileobj.write(buf)
self.offset += len(buf)
except:
if (not self._extfileobj):
self.fileobj.close()
self.closed = True
raise
|
'Open a tar archive for reading, writing or appending. Return
an appropriate TarFile class.
mode:
\'r\' or \'r:*\' open for reading with transparent compression
\'r:\' open for reading exclusively uncompressed
\'r:gz\' open for reading with gzip compression
\'r:bz2\' open for reading with bzip2 compression
\'a\' or \'a:\' open for appending, creating the file if necessary
\'w\' or \'w:\' open for writing without compression
\'w:gz\' open for writing with gzip compression
\'w:bz2\' open for writing with bzip2 compression
\'r|*\' open a stream of tar blocks with transparent compression
\'r|\' open an uncompressed stream of tar blocks for reading
\'r|gz\' open a gzip compressed stream of tar blocks
\'r|bz2\' open a bzip2 compressed stream of tar blocks
\'w|\' open an uncompressed stream for writing
\'w|gz\' open a gzip compressed stream for writing
\'w|bz2\' open a bzip2 compressed stream for writing'
| @classmethod
def open(cls, name=None, mode='r', fileobj=None, bufsize=RECORDSIZE, **kwargs):
| if ((not name) and (not fileobj)):
raise ValueError('nothing to open')
if (mode in ('r', 'r:*')):
for comptype in cls.OPEN_METH:
func = getattr(cls, cls.OPEN_METH[comptype])
if (fileobj is not None):
saved_pos = fileobj.tell()
try:
return func(name, 'r', fileobj, **kwargs)
except (ReadError, CompressionError) as e:
if (fileobj is not None):
fileobj.seek(saved_pos)
continue
raise ReadError('file could not be opened successfully')
elif (':' in mode):
(filemode, comptype) = mode.split(':', 1)
filemode = (filemode or 'r')
comptype = (comptype or 'tar')
if (comptype in cls.OPEN_METH):
func = getattr(cls, cls.OPEN_METH[comptype])
else:
raise CompressionError(('unknown compression type %r' % comptype))
return func(name, filemode, fileobj, **kwargs)
elif ('|' in mode):
(filemode, comptype) = mode.split('|', 1)
filemode = (filemode or 'r')
comptype = (comptype or 'tar')
if (filemode not in 'rw'):
raise ValueError("mode must be 'r' or 'w'")
t = cls(name, filemode, _Stream(name, filemode, comptype, fileobj, bufsize), **kwargs)
t._extfileobj = False
return t
elif (mode in 'aw'):
return cls.taropen(name, mode, fileobj, **kwargs)
raise ValueError('undiscernible mode')
|
'Open uncompressed tar archive name for reading or writing.'
| @classmethod
def taropen(cls, name, mode='r', fileobj=None, **kwargs):
| if ((len(mode) > 1) or (mode not in 'raw')):
raise ValueError("mode must be 'r', 'a' or 'w'")
return cls(name, mode, fileobj, **kwargs)
|
'Open gzip compressed tar archive name for reading or writing.
Appending is not allowed.'
| @classmethod
def gzopen(cls, name, mode='r', fileobj=None, compresslevel=9, **kwargs):
| if ((len(mode) > 1) or (mode not in 'rw')):
raise ValueError("mode must be 'r' or 'w'")
try:
import gzip
gzip.GzipFile
except (ImportError, AttributeError):
raise CompressionError('gzip module is not available')
if (fileobj is None):
fileobj = bltn_open(name, (mode + 'b'))
try:
t = cls.taropen(name, mode, gzip.GzipFile(name, mode, compresslevel, fileobj), **kwargs)
except IOError:
raise ReadError('not a gzip file')
t._extfileobj = False
return t
|
'Open bzip2 compressed tar archive name for reading or writing.
Appending is not allowed.'
| @classmethod
def bz2open(cls, name, mode='r', fileobj=None, compresslevel=9, **kwargs):
| if ((len(mode) > 1) or (mode not in 'rw')):
raise ValueError("mode must be 'r' or 'w'.")
try:
import bz2
except ImportError:
raise CompressionError('bz2 module is not available')
if (fileobj is not None):
fileobj = _BZ2Proxy(fileobj, mode)
else:
fileobj = bz2.BZ2File(name, mode, compresslevel=compresslevel)
try:
t = cls.taropen(name, mode, fileobj, **kwargs)
except (IOError, EOFError):
raise ReadError('not a bzip2 file')
t._extfileobj = False
return t
|
'Close the TarFile. In write-mode, two finishing zero blocks are
appended to the archive.'
| def close(self):
| if self.closed:
return
if (self.mode in 'aw'):
self.fileobj.write((NUL * (BLOCKSIZE * 2)))
self.offset += (BLOCKSIZE * 2)
(blocks, remainder) = divmod(self.offset, RECORDSIZE)
if (remainder > 0):
self.fileobj.write((NUL * (RECORDSIZE - remainder)))
if (not self._extfileobj):
self.fileobj.close()
self.closed = True
|
'Return a TarInfo object for member `name\'. If `name\' can not be
found in the archive, KeyError is raised. If a member occurs more
than once in the archive, its last occurrence is assumed to be the
most up-to-date version.'
| def getmember(self, name):
| tarinfo = self._getmember(name)
if (tarinfo is None):
raise KeyError(('filename %r not found' % name))
return tarinfo
|
'Return the members of the archive as a list of TarInfo objects. The
list has the same order as the members in the archive.'
| def getmembers(self):
| self._check()
if (not self._loaded):
self._load()
return self.members
|
'Return the members of the archive as a list of their names. It has
the same order as the list returned by getmembers().'
| def getnames(self):
| return [tarinfo.name for tarinfo in self.getmembers()]
|
'Create a TarInfo object for either the file `name\' or the file
object `fileobj\' (using os.fstat on its file descriptor). You can
modify some of the TarInfo\'s attributes before you add it using
addfile(). If given, `arcname\' specifies an alternative name for the
file in the archive.'
| def gettarinfo(self, name=None, arcname=None, fileobj=None):
| self._check('aw')
if (fileobj is not None):
name = fileobj.name
if (arcname is None):
arcname = name
(drv, arcname) = os.path.splitdrive(arcname)
arcname = arcname.replace(os.sep, '/')
arcname = arcname.lstrip('/')
tarinfo = self.tarinfo()
tarinfo.tarfile = self
if (fileobj is None):
if (hasattr(os, 'lstat') and (not self.dereference)):
statres = os.lstat(name)
else:
statres = os.stat(name)
else:
statres = os.fstat(fileobj.fileno())
linkname = ''
stmd = statres.st_mode
if stat.S_ISREG(stmd):
inode = (statres.st_ino, statres.st_dev)
if ((not self.dereference) and (statres.st_nlink > 1) and (inode in self.inodes) and (arcname != self.inodes[inode])):
type = LNKTYPE
linkname = self.inodes[inode]
else:
type = REGTYPE
if inode[0]:
self.inodes[inode] = arcname
elif stat.S_ISDIR(stmd):
type = DIRTYPE
elif stat.S_ISFIFO(stmd):
type = FIFOTYPE
elif stat.S_ISLNK(stmd):
type = SYMTYPE
linkname = os.readlink(name)
elif stat.S_ISCHR(stmd):
type = CHRTYPE
elif stat.S_ISBLK(stmd):
type = BLKTYPE
else:
return None
tarinfo.name = arcname
tarinfo.mode = stmd
tarinfo.uid = statres.st_uid
tarinfo.gid = statres.st_gid
if (type == REGTYPE):
tarinfo.size = statres.st_size
else:
tarinfo.size = 0L
tarinfo.mtime = statres.st_mtime
tarinfo.type = type
tarinfo.linkname = linkname
if pwd:
try:
tarinfo.uname = pwd.getpwuid(tarinfo.uid)[0]
except KeyError:
pass
if grp:
try:
tarinfo.gname = grp.getgrgid(tarinfo.gid)[0]
except KeyError:
pass
if (type in (CHRTYPE, BLKTYPE)):
if (hasattr(os, 'major') and hasattr(os, 'minor')):
tarinfo.devmajor = os.major(statres.st_rdev)
tarinfo.devminor = os.minor(statres.st_rdev)
return tarinfo
|
'Print a table of contents to sys.stdout. If `verbose\' is False, only
the names of the members are printed. If it is True, an `ls -l\'-like
output is produced.'
| def list(self, verbose=True):
| self._check()
for tarinfo in self:
if verbose:
print filemode(tarinfo.mode),
print ('%s/%s' % ((tarinfo.uname or tarinfo.uid), (tarinfo.gname or tarinfo.gid))),
if (tarinfo.ischr() or tarinfo.isblk()):
print ('%10s' % ('%d,%d' % (tarinfo.devmajor, tarinfo.devminor))),
else:
print ('%10d' % tarinfo.size),
print ('%d-%02d-%02d %02d:%02d:%02d' % time.localtime(tarinfo.mtime)[:6]),
print (tarinfo.name + ('/' if tarinfo.isdir() else '')),
if verbose:
if tarinfo.issym():
print '->', tarinfo.linkname,
if tarinfo.islnk():
print 'link to', tarinfo.linkname,
print
|
'Add the file `name\' to the archive. `name\' may be any type of file
(directory, fifo, symbolic link, etc.). If given, `arcname\'
specifies an alternative name for the file in the archive.
Directories are added recursively by default. This can be avoided by
setting `recursive\' to False. `exclude\' is a function that should
return True for each filename to be excluded. `filter\' is a function
that expects a TarInfo object argument and returns the changed
TarInfo object, if it returns None the TarInfo object will be
excluded from the archive.'
| def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
| self._check('aw')
if (arcname is None):
arcname = name
if (exclude is not None):
import warnings
warnings.warn('use the filter argument instead', DeprecationWarning, 2)
if exclude(name):
self._dbg(2, ('tarfile: Excluded %r' % name))
return
if ((self.name is not None) and (os.path.abspath(name) == self.name)):
self._dbg(2, ('tarfile: Skipped %r' % name))
return
self._dbg(1, name)
tarinfo = self.gettarinfo(name, arcname)
if (tarinfo is None):
self._dbg(1, ('tarfile: Unsupported type %r' % name))
return
if (filter is not None):
tarinfo = filter(tarinfo)
if (tarinfo is None):
self._dbg(2, ('tarfile: Excluded %r' % name))
return
if tarinfo.isreg():
f = bltn_open(name, 'rb')
self.addfile(tarinfo, f)
f.close()
elif tarinfo.isdir():
self.addfile(tarinfo)
if recursive:
for f in os.listdir(name):
self.add(os.path.join(name, f), os.path.join(arcname, f), recursive, exclude, filter)
else:
self.addfile(tarinfo)
|
'Add the TarInfo object `tarinfo\' to the archive. If `fileobj\' is
given, tarinfo.size bytes are read from it and added to the archive.
You can create TarInfo objects using gettarinfo().
On Windows platforms, `fileobj\' should always be opened with mode
\'rb\' to avoid irritation about the file size.'
| def addfile(self, tarinfo, fileobj=None):
| self._check('aw')
tarinfo = copy.copy(tarinfo)
buf = tarinfo.tobuf(self.format, self.encoding, self.errors)
self.fileobj.write(buf)
self.offset += len(buf)
if (fileobj is not None):
copyfileobj(fileobj, self.fileobj, tarinfo.size)
(blocks, remainder) = divmod(tarinfo.size, BLOCKSIZE)
if (remainder > 0):
self.fileobj.write((NUL * (BLOCKSIZE - remainder)))
blocks += 1
self.offset += (blocks * BLOCKSIZE)
self.members.append(tarinfo)
|
'Extract all members from the archive to the current working
directory and set owner, modification time and permissions on
directories afterwards. `path\' specifies a different directory
to extract to. `members\' is optional and must be a subset of the
list returned by getmembers().'
| def extractall(self, path='.', members=None):
| directories = []
if (members is None):
members = self
for tarinfo in members:
if tarinfo.isdir():
directories.append(tarinfo)
tarinfo = copy.copy(tarinfo)
tarinfo.mode = 448
self.extract(tarinfo, path)
directories.sort(key=operator.attrgetter('name'))
directories.reverse()
for tarinfo in directories:
dirpath = os.path.join(path, tarinfo.name)
try:
self.chown(tarinfo, dirpath)
self.utime(tarinfo, dirpath)
self.chmod(tarinfo, dirpath)
except ExtractError as e:
if (self.errorlevel > 1):
raise
else:
self._dbg(1, ('tarfile: %s' % e))
|
'Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member\' may be a filename or a TarInfo object. You can
specify a different directory using `path\'.'
| def extract(self, member, path=''):
| self._check('r')
if isinstance(member, basestring):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.islnk():
tarinfo._link_target = os.path.join(path, tarinfo.linkname)
try:
self._extract_member(tarinfo, os.path.join(path, tarinfo.name))
except EnvironmentError as e:
if (self.errorlevel > 0):
raise
elif (e.filename is None):
self._dbg(1, ('tarfile: %s' % e.strerror))
else:
self._dbg(1, ('tarfile: %s %r' % (e.strerror, e.filename)))
except ExtractError as e:
if (self.errorlevel > 1):
raise
else:
self._dbg(1, ('tarfile: %s' % e))
|
'Extract a member from the archive as a file object. `member\' may be
a filename or a TarInfo object. If `member\' is a regular file, a
file-like object is returned. If `member\' is a link, a file-like
object is constructed from the link\'s target. If `member\' is none of
the above, None is returned.
The file-like object is read-only and provides the following
methods: read(), readline(), readlines(), seek() and tell()'
| def extractfile(self, member):
| self._check('r')
if isinstance(member, basestring):
tarinfo = self.getmember(member)
else:
tarinfo = member
if tarinfo.isreg():
return self.fileobject(self, tarinfo)
elif (tarinfo.type not in SUPPORTED_TYPES):
return self.fileobject(self, tarinfo)
elif (tarinfo.islnk() or tarinfo.issym()):
if isinstance(self.fileobj, _Stream):
raise StreamError('cannot extract (sym)link as file object')
else:
return self.extractfile(self._find_link_target(tarinfo))
else:
return None
|
'Extract the TarInfo object tarinfo to a physical
file called targetpath.'
| def _extract_member(self, tarinfo, targetpath):
| targetpath = targetpath.rstrip('/')
targetpath = targetpath.replace('/', os.sep)
upperdirs = os.path.dirname(targetpath)
if (upperdirs and (not os.path.exists(upperdirs))):
os.makedirs(upperdirs)
if (tarinfo.islnk() or tarinfo.issym()):
self._dbg(1, ('%s -> %s' % (tarinfo.name, tarinfo.linkname)))
else:
self._dbg(1, tarinfo.name)
if tarinfo.isreg():
self.makefile(tarinfo, targetpath)
elif tarinfo.isdir():
self.makedir(tarinfo, targetpath)
elif tarinfo.isfifo():
self.makefifo(tarinfo, targetpath)
elif (tarinfo.ischr() or tarinfo.isblk()):
self.makedev(tarinfo, targetpath)
elif (tarinfo.islnk() or tarinfo.issym()):
self.makelink(tarinfo, targetpath)
elif (tarinfo.type not in SUPPORTED_TYPES):
self.makeunknown(tarinfo, targetpath)
else:
self.makefile(tarinfo, targetpath)
self.chown(tarinfo, targetpath)
if (not tarinfo.issym()):
self.chmod(tarinfo, targetpath)
self.utime(tarinfo, targetpath)
|
'Make a directory called targetpath.'
| def makedir(self, tarinfo, targetpath):
| try:
os.mkdir(targetpath, 448)
except EnvironmentError as e:
if (e.errno != errno.EEXIST):
raise
|
'Make a file called targetpath.'
| def makefile(self, tarinfo, targetpath):
| source = self.extractfile(tarinfo)
target = bltn_open(targetpath, 'wb')
copyfileobj(source, target)
source.close()
target.close()
|
'Make a file from a TarInfo object with an unknown type
at targetpath.'
| def makeunknown(self, tarinfo, targetpath):
| self.makefile(tarinfo, targetpath)
self._dbg(1, ('tarfile: Unknown file type %r, extracted as regular file.' % tarinfo.type))
|
'Make a fifo called targetpath.'
| def makefifo(self, tarinfo, targetpath):
| if hasattr(os, 'mkfifo'):
os.mkfifo(targetpath)
else:
raise ExtractError('fifo not supported by system')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.