desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Make a character or block device called targetpath.'
def makedev(self, tarinfo, targetpath):
if ((not hasattr(os, 'mknod')) or (not hasattr(os, 'makedev'))): raise ExtractError('special devices not supported by system') mode = tarinfo.mode if tarinfo.isblk(): mode |= stat.S_IFBLK else: mode |= stat.S_IFCHR os.mknod(targetpath, mode, os.makedev(tarinfo.devmajor, tarinfo.devminor))
'Make a (symbolic) link called targetpath. If it cannot be created (platform limitation), we try to make a copy of the referenced file instead of a link.'
def makelink(self, tarinfo, targetpath):
if (hasattr(os, 'symlink') and hasattr(os, 'link')): if tarinfo.issym(): if os.path.lexists(targetpath): os.unlink(targetpath) os.symlink(tarinfo.linkname, targetpath) elif os.path.exists(tarinfo._link_target): if os.path.lexists(targetpath): os.unlink(targetpath) os.link(tarinfo._link_target, targetpath) else: self._extract_member(self._find_link_target(tarinfo), targetpath) else: try: self._extract_member(self._find_link_target(tarinfo), targetpath) except KeyError: raise ExtractError('unable to resolve link inside archive')
'Set owner of targetpath according to tarinfo.'
def chown(self, tarinfo, targetpath):
if (pwd and hasattr(os, 'geteuid') and (os.geteuid() == 0)): try: g = grp.getgrnam(tarinfo.gname)[2] except KeyError: try: g = grp.getgrgid(tarinfo.gid)[2] except KeyError: g = os.getgid() try: u = pwd.getpwnam(tarinfo.uname)[2] except KeyError: try: u = pwd.getpwuid(tarinfo.uid)[2] except KeyError: u = os.getuid() try: if (tarinfo.issym() and hasattr(os, 'lchown')): os.lchown(targetpath, u, g) elif (sys.platform != 'os2emx'): os.chown(targetpath, u, g) except EnvironmentError as e: raise ExtractError('could not change owner')
'Set file permissions of targetpath according to tarinfo.'
def chmod(self, tarinfo, targetpath):
if hasattr(os, 'chmod'): try: os.chmod(targetpath, tarinfo.mode) except EnvironmentError as e: raise ExtractError('could not change mode')
'Set modification time of targetpath according to tarinfo.'
def utime(self, tarinfo, targetpath):
if (not hasattr(os, 'utime')): return try: os.utime(targetpath, (tarinfo.mtime, tarinfo.mtime)) except EnvironmentError as e: raise ExtractError('could not change modification time')
'Return the next member of the archive as a TarInfo object, when TarFile is opened for reading. Return None if there is no more available.'
def next(self):
self._check('ra') if (self.firstmember is not None): m = self.firstmember self.firstmember = None return m self.fileobj.seek(self.offset) tarinfo = None while True: try: tarinfo = self.tarinfo.fromtarfile(self) except EOFHeaderError as e: if self.ignore_zeros: self._dbg(2, ('0x%X: %s' % (self.offset, e))) self.offset += BLOCKSIZE continue except InvalidHeaderError as e: if self.ignore_zeros: self._dbg(2, ('0x%X: %s' % (self.offset, e))) self.offset += BLOCKSIZE continue elif (self.offset == 0): raise ReadError(str(e)) except EmptyHeaderError: if (self.offset == 0): raise ReadError('empty file') except TruncatedHeaderError as e: if (self.offset == 0): raise ReadError(str(e)) except SubsequentHeaderError as e: raise ReadError(str(e)) break if (tarinfo is not None): self.members.append(tarinfo) else: self._loaded = True return tarinfo
'Find an archive member by name from bottom to top. If tarinfo is given, it is used as the starting point.'
def _getmember(self, name, tarinfo=None, normalize=False):
members = self.getmembers() if (tarinfo is not None): members = members[:members.index(tarinfo)] if normalize: name = os.path.normpath(name) for member in reversed(members): if normalize: member_name = os.path.normpath(member.name) else: member_name = member.name if (name == member_name): return member
'Read through the entire archive file and look for readable members.'
def _load(self):
while True: tarinfo = self.next() if (tarinfo is None): break self._loaded = True
'Check if TarFile is still open, and if the operation\'s mode corresponds to TarFile\'s mode.'
def _check(self, mode=None):
if self.closed: raise IOError(('%s is closed' % self.__class__.__name__)) if ((mode is not None) and (self.mode not in mode)): raise IOError(('bad operation for mode %r' % self.mode))
'Find the target member of a symlink or hardlink member in the archive.'
def _find_link_target(self, tarinfo):
if tarinfo.issym(): linkname = ((os.path.dirname(tarinfo.name) + '/') + tarinfo.linkname) limit = None else: linkname = tarinfo.linkname limit = tarinfo member = self._getmember(linkname, tarinfo=limit, normalize=True) if (member is None): raise KeyError(('linkname %r not found' % linkname)) return member
'Provide an iterator object.'
def __iter__(self):
if self._loaded: return iter(self.members) else: return TarIter(self)
'Write debugging output to sys.stderr.'
def _dbg(self, level, msg):
if (level <= self.debug): print >>sys.stderr, msg
'Construct a TarIter object.'
def __init__(self, tarfile):
self.tarfile = tarfile self.index = 0
'Return iterator object.'
def __iter__(self):
return self
'Return the next item using TarFile\'s next() method. When all members have been read, set TarFile as _loaded.'
def next(self):
if (not self.tarfile._loaded): tarinfo = self.tarfile.next() if (not tarinfo): self.tarfile._loaded = True raise StopIteration else: try: tarinfo = self.tarfile.members[self.index] except IndexError: raise StopIteration self.index += 1 return tarinfo
'Indicate that a formerly enqueued task is complete. Used by Queue consumer threads. For each get() used to fetch a task, a subsequent call to task_done() tells the queue that the processing on the task is complete. If a join() is currently blocking, it will resume when all items have been processed (meaning that a task_done() call was received for every item that had been put() into the queue). Raises a ValueError if called more times than there were items placed in the queue.'
def task_done(self):
self.all_tasks_done.acquire() try: unfinished = (self.unfinished_tasks - 1) if (unfinished <= 0): if (unfinished < 0): raise ValueError('task_done() called too many times') self.all_tasks_done.notify_all() self.unfinished_tasks = unfinished finally: self.all_tasks_done.release()
'Blocks until all items in the Queue have been gotten and processed. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer thread calls task_done() to indicate the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, join() unblocks.'
def join(self):
self.all_tasks_done.acquire() try: while self.unfinished_tasks: self.all_tasks_done.wait() finally: self.all_tasks_done.release()
'Return the approximate size of the queue (not reliable!).'
def qsize(self):
self.mutex.acquire() n = self._qsize() self.mutex.release() return n
'Return True if the queue is empty, False otherwise (not reliable!).'
def empty(self):
self.mutex.acquire() n = (not self._qsize()) self.mutex.release() return n
'Return True if the queue is full, False otherwise (not reliable!).'
def full(self):
self.mutex.acquire() n = (0 < self.maxsize == self._qsize()) self.mutex.release() return n
'Put an item into the queue. If optional args \'block\' is true and \'timeout\' is None (the default), block if necessary until a free slot is available. If \'timeout\' is a positive number, it blocks at most \'timeout\' seconds and raises the Full exception if no free slot was available within that time. Otherwise (\'block\' is false), put an item on the queue if a free slot is immediately available, else raise the Full exception (\'timeout\' is ignored in that case).'
def put(self, item, block=True, timeout=None):
self.not_full.acquire() try: if (self.maxsize > 0): if (not block): if (self._qsize() == self.maxsize): raise Full elif (timeout is None): while (self._qsize() == self.maxsize): self.not_full.wait() elif (timeout < 0): raise ValueError("'timeout' must be a positive number") else: endtime = (_time() + timeout) while (self._qsize() == self.maxsize): remaining = (endtime - _time()) if (remaining <= 0.0): raise Full self.not_full.wait(remaining) self._put(item) self.unfinished_tasks += 1 self.not_empty.notify() finally: self.not_full.release()
'Put an item into the queue without blocking. Only enqueue the item if a free slot is immediately available. Otherwise raise the Full exception.'
def put_nowait(self, item):
return self.put(item, False)
'Remove and return an item from the queue. If optional args \'block\' is true and \'timeout\' is None (the default), block if necessary until an item is available. If \'timeout\' is a positive number, it blocks at most \'timeout\' seconds and raises the Empty exception if no item was available within that time. Otherwise (\'block\' is false), return an item if one is immediately available, else raise the Empty exception (\'timeout\' is ignored in that case).'
def get(self, block=True, timeout=None):
self.not_empty.acquire() try: if (not block): if (not self._qsize()): raise Empty elif (timeout is None): while (not self._qsize()): self.not_empty.wait() elif (timeout < 0): raise ValueError("'timeout' must be a positive number") else: endtime = (_time() + timeout) while (not self._qsize()): remaining = (endtime - _time()) if (remaining <= 0.0): raise Empty self.not_empty.wait(remaining) item = self._get() self.not_full.notify() return item finally: self.not_empty.release()
'Remove and return an item from the queue without blocking. Only get an item if one is immediately available. Otherwise raise the Empty exception.'
def get_nowait(self):
return self.get(False)
'Merge in the data from another CoverageResults'
def update(self, other):
counts = self.counts calledfuncs = self.calledfuncs callers = self.callers other_counts = other.counts other_calledfuncs = other.calledfuncs other_callers = other.callers for key in other_counts.keys(): counts[key] = (counts.get(key, 0) + other_counts[key]) for key in other_calledfuncs.keys(): calledfuncs[key] = 1 for key in other_callers.keys(): callers[key] = 1
'@param coverdir'
def write_results(self, show_missing=True, summary=False, coverdir=None):
if self.calledfuncs: print print 'functions called:' calls = self.calledfuncs.keys() calls.sort() for (filename, modulename, funcname) in calls: print ('filename: %s, modulename: %s, funcname: %s' % (filename, modulename, funcname)) if self.callers: print print 'calling relationships:' calls = self.callers.keys() calls.sort() lastfile = lastcfile = '' for ((pfile, pmod, pfunc), (cfile, cmod, cfunc)) in calls: if (pfile != lastfile): print print '***', pfile, '***' lastfile = pfile lastcfile = '' if ((cfile != pfile) and (lastcfile != cfile)): print ' -->', cfile lastcfile = cfile print (' %s.%s -> %s.%s' % (pmod, pfunc, cmod, cfunc)) per_file = {} for (filename, lineno) in self.counts.keys(): lines_hit = per_file[filename] = per_file.get(filename, {}) lines_hit[lineno] = self.counts[(filename, lineno)] sums = {} for (filename, count) in per_file.iteritems(): if (filename == '<string>'): continue if filename.startswith('<doctest '): continue if filename.endswith(('.pyc', '.pyo')): filename = filename[:(-1)] if (coverdir is None): dir = os.path.dirname(os.path.abspath(filename)) modulename = modname(filename) else: dir = coverdir if (not os.path.exists(dir)): os.makedirs(dir) modulename = fullmodname(filename) if show_missing: lnotab = find_executable_linenos(filename) else: lnotab = {} source = linecache.getlines(filename) coverpath = os.path.join(dir, (modulename + '.cover')) (n_hits, n_lines) = self.write_results_file(coverpath, source, lnotab, count) if (summary and n_lines): percent = ((100 * n_hits) // n_lines) sums[modulename] = (n_lines, percent, modulename, filename) if (summary and sums): mods = sums.keys() mods.sort() print 'lines cov% module (path)' for m in mods: (n_lines, percent, modulename, filename) = sums[m] print ('%5d %3d%% %s (%s)' % sums[m]) if self.outfile: try: pickle.dump((self.counts, self.calledfuncs, self.callers), open(self.outfile, 'wb'), 1) except IOError as err: print >>sys.stderr, ("Can't save counts files because %s" % err)
'Return a coverage results file in path.'
def write_results_file(self, path, lines, lnotab, lines_hit):
try: outfile = open(path, 'w') except IOError as err: print >>sys.stderr, ('trace: Could not open %r for writing: %s- skipping' % (path, err)) return (0, 0) n_lines = 0 n_hits = 0 for (i, line) in enumerate(lines): lineno = (i + 1) if (lineno in lines_hit): outfile.write(('%5d: ' % lines_hit[lineno])) n_hits += 1 n_lines += 1 elif rx_blank.match(line): outfile.write(' ') elif ((lineno in lnotab) and (not (PRAGMA_NOCOVER in lines[i]))): outfile.write('>>>>>> ') n_lines += 1 else: outfile.write(' ') outfile.write(lines[i].expandtabs(8)) outfile.close() return (n_hits, n_lines)
'@param count true iff it should count number of times each line is executed @param trace true iff it should print out each line that is being counted @param countfuncs true iff it should just output a list of (filename, modulename, funcname,) for functions that were called at least once; This overrides `count\' and `trace\' @param ignoremods a list of the names of modules to ignore @param ignoredirs a list of the names of directories to ignore all of the (recursive) contents of @param infile file from which to read stored counts to be added into the results @param outfile file in which to write the results @param timing true iff timing information be displayed'
def __init__(self, count=1, trace=1, countfuncs=0, countcallers=0, ignoremods=(), ignoredirs=(), infile=None, outfile=None, timing=False):
self.infile = infile self.outfile = outfile self.ignore = Ignore(ignoremods, ignoredirs) self.counts = {} self.blabbed = {} self.pathtobasename = {} self.donothing = 0 self.trace = trace self._calledfuncs = {} self._callers = {} self._caller_cache = {} self.start_time = None if timing: self.start_time = time.time() if countcallers: self.globaltrace = self.globaltrace_trackcallers elif countfuncs: self.globaltrace = self.globaltrace_countfuncs elif (trace and count): self.globaltrace = self.globaltrace_lt self.localtrace = self.localtrace_trace_and_count elif trace: self.globaltrace = self.globaltrace_lt self.localtrace = self.localtrace_trace elif count: self.globaltrace = self.globaltrace_lt self.localtrace = self.localtrace_count else: self.donothing = 1
'Handler for call events. Adds information about who called who to the self._callers dict.'
def globaltrace_trackcallers(self, frame, why, arg):
if (why == 'call'): this_func = self.file_module_function_of(frame) parent_func = self.file_module_function_of(frame.f_back) self._callers[(parent_func, this_func)] = 1
'Handler for call events. Adds (filename, modulename, funcname) to the self._calledfuncs dict.'
def globaltrace_countfuncs(self, frame, why, arg):
if (why == 'call'): this_func = self.file_module_function_of(frame) self._calledfuncs[this_func] = 1
'Handler for call events. If the code block being entered is to be ignored, returns `None\', else returns self.localtrace.'
def globaltrace_lt(self, frame, why, arg):
if (why == 'call'): code = frame.f_code filename = frame.f_globals.get('__file__', None) if filename: modulename = modname(filename) if (modulename is not None): ignore_it = self.ignore.names(filename, modulename) if (not ignore_it): if self.trace: print (' --- modulename: %s, funcname: %s' % (modulename, code.co_name)) return self.localtrace else: return None
'The parameter \'cmd\' is the shell command to execute in a sub-process. On UNIX, \'cmd\' may be a sequence, in which case arguments will be passed directly to the program without shell intervention (as with os.spawnv()). If \'cmd\' is a string it will be passed to the shell (as with os.system()). The \'capturestderr\' flag, if true, specifies that the object should capture standard error output of the child process. The default is false. If the \'bufsize\' parameter is specified, it specifies the size of the I/O buffers to/from the child process.'
def __init__(self, cmd, capturestderr=False, bufsize=(-1)):
_cleanup() self.cmd = cmd (p2cread, p2cwrite) = os.pipe() (c2pread, c2pwrite) = os.pipe() if capturestderr: (errout, errin) = os.pipe() self.pid = os.fork() if (self.pid == 0): os.dup2(p2cread, 0) os.dup2(c2pwrite, 1) if capturestderr: os.dup2(errin, 2) self._run_child(cmd) os.close(p2cread) self.tochild = os.fdopen(p2cwrite, 'w', bufsize) os.close(c2pwrite) self.fromchild = os.fdopen(c2pread, 'r', bufsize) if capturestderr: os.close(errin) self.childerr = os.fdopen(errout, 'r', bufsize) else: self.childerr = None
'Return the exit status of the child process if it has finished, or -1 if it hasn\'t finished yet.'
def poll(self, _deadstate=None):
if (self.sts < 0): try: (pid, sts) = os.waitpid(self.pid, os.WNOHANG) if (pid == self.pid): self.sts = sts except os.error: if (_deadstate is not None): self.sts = _deadstate return self.sts
'Wait for and return the exit status of the child process.'
def wait(self):
if (self.sts < 0): (pid, sts) = os.waitpid(self.pid, 0) assert (pid == self.pid) self.sts = sts return self.sts
'Returns an instance of the RExec class. The hooks parameter is an instance of the RHooks class or a subclass of it. If it is omitted or None, the default RHooks class is instantiated. Whenever the RExec module searches for a module (even a built-in one) or reads a module\'s code, it doesn\'t actually go out to the file system itself. Rather, it calls methods of an RHooks instance that was passed to or created by its constructor. (Actually, the RExec object doesn\'t make these calls --- they are made by a module loader object that\'s part of the RExec object. This allows another level of flexibility, which can be useful when changing the mechanics of import within the restricted environment.) By providing an alternate RHooks object, we can control the file system accesses made to import a module, without changing the actual algorithm that controls the order in which those accesses are made. For instance, we could substitute an RHooks object that passes all filesystem requests to a file server elsewhere, via some RPC mechanism such as ILU. Grail\'s applet loader uses this to support importing applets from a URL for a directory. If the verbose parameter is true, additional debugging output may be sent to standard output.'
def __init__(self, hooks=None, verbose=0):
raise RuntimeError, 'This code is not secure in Python 2.2 and later' ihooks._Verbose.__init__(self, verbose) self.hooks = (hooks or RHooks(verbose)) self.hooks.set_rexec(self) self.modules = {} self.ok_dynamic_modules = self.ok_builtin_modules list = [] for mname in self.ok_builtin_modules: if (mname in sys.builtin_module_names): list.append(mname) self.ok_builtin_modules = tuple(list) self.set_trusted_path() self.make_builtin() self.make_initial_modules() self.make_sys() self.loader = RModuleLoader(self.hooks, verbose) self.importer = RModuleImporter(self.loader, verbose)
'Execute code within a restricted environment. The code parameter must either be a string containing one or more lines of Python code, or a compiled code object, which will be executed in the restricted environment\'s __main__ module.'
def r_exec(self, code):
m = self.add_module('__main__') exec code in m.__dict__
'Evaluate code within a restricted environment. The code parameter must either be a string containing a Python expression, or a compiled code object, which will be evaluated in the restricted environment\'s __main__ module. The value of the expression or code object will be returned.'
def r_eval(self, code):
m = self.add_module('__main__') return eval(code, m.__dict__)
'Execute the Python code in the file in the restricted environment\'s __main__ module.'
def r_execfile(self, file):
m = self.add_module('__main__') execfile(file, m.__dict__)
'Import a module, raising an ImportError exception if the module is considered unsafe. This method is implicitly called by code executing in the restricted environment. Overriding this method in a subclass is used to change the policies enforced by a restricted environment.'
def r_import(self, mname, globals={}, locals={}, fromlist=[]):
return self.importer.import_module(mname, globals, locals, fromlist)
'Reload the module object, re-parsing and re-initializing it. This method is implicitly called by code executing in the restricted environment. Overriding this method in a subclass is used to change the policies enforced by a restricted environment.'
def r_reload(self, m):
return self.importer.reload(m)
'Unload the module. Removes it from the restricted environment\'s sys.modules dictionary. This method is implicitly called by code executing in the restricted environment. Overriding this method in a subclass is used to change the policies enforced by a restricted environment.'
def r_unload(self, m):
return self.importer.unload(m)
'Execute code within a restricted environment. Similar to the r_exec() method, but the code will be granted access to restricted versions of the standard I/O streams sys.stdin, sys.stderr, and sys.stdout. The code parameter must either be a string containing one or more lines of Python code, or a compiled code object, which will be executed in the restricted environment\'s __main__ module.'
def s_exec(self, *args):
return self.s_apply(self.r_exec, args)
'Evaluate code within a restricted environment. Similar to the r_eval() method, but the code will be granted access to restricted versions of the standard I/O streams sys.stdin, sys.stderr, and sys.stdout. The code parameter must either be a string containing a Python expression, or a compiled code object, which will be evaluated in the restricted environment\'s __main__ module. The value of the expression or code object will be returned.'
def s_eval(self, *args):
return self.s_apply(self.r_eval, args)
'Execute the Python code in the file in the restricted environment\'s __main__ module. Similar to the r_execfile() method, but the code will be granted access to restricted versions of the standard I/O streams sys.stdin, sys.stderr, and sys.stdout.'
def s_execfile(self, *args):
return self.s_apply(self.r_execfile, args)
'Import a module, raising an ImportError exception if the module is considered unsafe. This method is implicitly called by code executing in the restricted environment. Overriding this method in a subclass is used to change the policies enforced by a restricted environment. Similar to the r_import() method, but has access to restricted versions of the standard I/O streams sys.stdin, sys.stderr, and sys.stdout.'
def s_import(self, *args):
return self.s_apply(self.r_import, args)
'Reload the module object, re-parsing and re-initializing it. This method is implicitly called by code executing in the restricted environment. Overriding this method in a subclass is used to change the policies enforced by a restricted environment. Similar to the r_reload() method, but has access to restricted versions of the standard I/O streams sys.stdin, sys.stderr, and sys.stdout.'
def s_reload(self, *args):
return self.s_apply(self.r_reload, args)
'Unload the module. Removes it from the restricted environment\'s sys.modules dictionary. This method is implicitly called by code executing in the restricted environment. Overriding this method in a subclass is used to change the policies enforced by a restricted environment. Similar to the r_unload() method, but has access to restricted versions of the standard I/O streams sys.stdin, sys.stderr, and sys.stdout.'
def s_unload(self, *args):
return self.s_apply(self.r_unload, args)
'Method called when open() is called in the restricted environment. The arguments are identical to those of the open() function, and a file object (or a class instance compatible with file objects) should be returned. RExec\'s default behaviour is allow opening any file for reading, but forbidding any attempt to write a file. This method is implicitly called by code executing in the restricted environment. Overriding this method in a subclass is used to change the policies enforced by a restricted environment.'
def r_open(self, file, mode='r', buf=(-1)):
mode = str(mode) if (mode not in ('r', 'rb')): raise IOError, "can't open files for writing in restricted mode" return open(file, mode, buf)
'Connect to host. Arguments are: - host: hostname to connect to (string, default previous host) - port: port to connect to (integer, default previous port)'
def connect(self, host='', port=0, timeout=(-999)):
if (host != ''): self.host = host if (port > 0): self.port = port if (timeout != (-999)): self.timeout = timeout self.sock = socket.create_connection((self.host, self.port), self.timeout) self.af = self.sock.family self.file = self.sock.makefile('rb') self.welcome = self.getresp() return self.welcome
'Get the welcome message from the server. (this is read and squirreled away by connect())'
def getwelcome(self):
if self.debugging: print '*welcome*', self.sanitize(self.welcome) return self.welcome
'Set the debugging level. The required argument level means: 0: no debugging output (default) 1: print commands and responses but not body text etc. 2: also print raw lines read and sent before stripping CR/LF'
def set_debuglevel(self, level):
self.debugging = level
'Use passive or active mode for data transfers. With a false argument, use the normal PORT mode, With a true argument, use the PASV command.'
def set_pasv(self, val):
self.passiveserver = val
'Expect a response beginning with \'2\'.'
def voidresp(self):
resp = self.getresp() if (resp[:1] != '2'): raise error_reply, resp return resp
'Abort a file transfer. Uses out-of-band data. This does not follow the procedure from the RFC to send Telnet IP and Synch; that doesn\'t seem to work with the servers I\'ve tried. Instead, just send the ABOR command as OOB data.'
def abort(self):
line = ('ABOR' + CRLF) if (self.debugging > 1): print '*put urgent*', self.sanitize(line) self.sock.sendall(line, MSG_OOB) resp = self.getmultiline() if (resp[:3] not in ('426', '225', '226')): raise error_proto, resp
'Send a command and return the response.'
def sendcmd(self, cmd):
self.putcmd(cmd) return self.getresp()
'Send a command and expect a response beginning with \'2\'.'
def voidcmd(self, cmd):
self.putcmd(cmd) return self.voidresp()
'Send a PORT command with the current host and the given port number.'
def sendport(self, host, port):
hbytes = host.split('.') pbytes = [repr((port // 256)), repr((port % 256))] bytes = (hbytes + pbytes) cmd = ('PORT ' + ','.join(bytes)) return self.voidcmd(cmd)
'Send a EPRT command with the current host and the given port number.'
def sendeprt(self, host, port):
af = 0 if (self.af == socket.AF_INET): af = 1 if (self.af == socket.AF_INET6): af = 2 if (af == 0): raise error_proto, 'unsupported address family' fields = ['', repr(af), host, repr(port), ''] cmd = ('EPRT ' + '|'.join(fields)) return self.voidcmd(cmd)
'Create a new socket and send a PORT command for it.'
def makeport(self):
msg = 'getaddrinfo returns an empty list' sock = None for res in socket.getaddrinfo(None, 0, self.af, socket.SOCK_STREAM, 0, socket.AI_PASSIVE): (af, socktype, proto, canonname, sa) = res try: sock = socket.socket(af, socktype, proto) sock.bind(sa) except socket.error as msg: if sock: sock.close() sock = None continue break if (not sock): raise socket.error, msg sock.listen(1) port = sock.getsockname()[1] host = self.sock.getsockname()[0] if (self.af == socket.AF_INET): resp = self.sendport(host, port) else: resp = self.sendeprt(host, port) if (self.timeout is not _GLOBAL_DEFAULT_TIMEOUT): sock.settimeout(self.timeout) return sock
'Initiate a transfer over the data connection. If the transfer is active, send a port command and the transfer command, and accept the connection. If the server is passive, send a pasv command, connect to it, and start the transfer command. Either way, return the socket for the connection and the expected size of the transfer. The expected size may be None if it could not be determined. Optional `rest\' argument can be a string that is sent as the argument to a REST command. This is essentially a server marker used to tell the server to skip over any data up to the given marker.'
def ntransfercmd(self, cmd, rest=None):
size = None if self.passiveserver: (host, port) = self.makepasv() conn = socket.create_connection((host, port), self.timeout) if (rest is not None): self.sendcmd(('REST %s' % rest)) resp = self.sendcmd(cmd) if (resp[0] == '2'): resp = self.getresp() if (resp[0] != '1'): raise error_reply, resp else: sock = self.makeport() if (rest is not None): self.sendcmd(('REST %s' % rest)) resp = self.sendcmd(cmd) if (resp[0] == '2'): resp = self.getresp() if (resp[0] != '1'): raise error_reply, resp (conn, sockaddr) = sock.accept() if (self.timeout is not _GLOBAL_DEFAULT_TIMEOUT): conn.settimeout(self.timeout) if (resp[:3] == '150'): size = parse150(resp) return (conn, size)
'Like ntransfercmd() but returns only the socket.'
def transfercmd(self, cmd, rest=None):
return self.ntransfercmd(cmd, rest)[0]
'Login, default anonymous.'
def login(self, user='', passwd='', acct=''):
if (not user): user = 'anonymous' if (not passwd): passwd = '' if (not acct): acct = '' if ((user == 'anonymous') and (passwd in ('', '-'))): passwd = (passwd + 'anonymous@') resp = self.sendcmd(('USER ' + user)) if (resp[0] == '3'): resp = self.sendcmd(('PASS ' + passwd)) if (resp[0] == '3'): resp = self.sendcmd(('ACCT ' + acct)) if (resp[0] != '2'): raise error_reply, resp return resp
'Retrieve data in binary mode. A new port is created for you. Args: cmd: A RETR command. callback: A single parameter callable to be called on each block of data read. blocksize: The maximum number of bytes to read from the socket at one time. [default: 8192] rest: Passed to transfercmd(). [default: None] Returns: The response code.'
def retrbinary(self, cmd, callback, blocksize=8192, rest=None):
self.voidcmd('TYPE I') conn = self.transfercmd(cmd, rest) while 1: data = conn.recv(blocksize) if (not data): break callback(data) conn.close() return self.voidresp()
'Retrieve data in line mode. A new port is created for you. Args: cmd: A RETR, LIST, NLST, or MLSD command. callback: An optional single parameter callable that is called for each line with the trailing CRLF stripped. [default: print_line()] Returns: The response code.'
def retrlines(self, cmd, callback=None):
if (callback is None): callback = print_line resp = self.sendcmd('TYPE A') conn = self.transfercmd(cmd) fp = conn.makefile('rb') while 1: line = fp.readline() if (self.debugging > 2): print '*retr*', repr(line) if (not line): break if (line[(-2):] == CRLF): line = line[:(-2)] elif (line[(-1):] == '\n'): line = line[:(-1)] callback(line) fp.close() conn.close() return self.voidresp()
'Store a file in binary mode. A new port is created for you. Args: cmd: A STOR command. fp: A file-like object with a read(num_bytes) method. blocksize: The maximum data size to read from fp and send over the connection at once. [default: 8192] callback: An optional single parameter callable that is called on on each block of data after it is sent. [default: None] rest: Passed to transfercmd(). [default: None] Returns: The response code.'
def storbinary(self, cmd, fp, blocksize=8192, callback=None, rest=None):
self.voidcmd('TYPE I') conn = self.transfercmd(cmd, rest) while 1: buf = fp.read(blocksize) if (not buf): break conn.sendall(buf) if callback: callback(buf) conn.close() return self.voidresp()
'Store a file in line mode. A new port is created for you. Args: cmd: A STOR command. fp: A file-like object with a readline() method. callback: An optional single parameter callable that is called on on each line after it is sent. [default: None] Returns: The response code.'
def storlines(self, cmd, fp, callback=None):
self.voidcmd('TYPE A') conn = self.transfercmd(cmd) while 1: buf = fp.readline() if (not buf): break if (buf[(-2):] != CRLF): if (buf[(-1)] in CRLF): buf = buf[:(-1)] buf = (buf + CRLF) conn.sendall(buf) if callback: callback(buf) conn.close() return self.voidresp()
'Send new account name.'
def acct(self, password):
cmd = ('ACCT ' + password) return self.voidcmd(cmd)
'Return a list of files in a given directory (default the current).'
def nlst(self, *args):
cmd = 'NLST' for arg in args: cmd = (cmd + (' ' + arg)) files = [] self.retrlines(cmd, files.append) return files
'List a directory in long form. By default list current directory to stdout. Optional last argument is callback function; all non-empty arguments before it are concatenated to the LIST command. (This *should* only be used for a pathname.)'
def dir(self, *args):
cmd = 'LIST' func = None if (args[(-1):] and (type(args[(-1)]) != type(''))): (args, func) = (args[:(-1)], args[(-1)]) for arg in args: if arg: cmd = (cmd + (' ' + arg)) self.retrlines(cmd, func)
'Rename a file.'
def rename(self, fromname, toname):
resp = self.sendcmd(('RNFR ' + fromname)) if (resp[0] != '3'): raise error_reply, resp return self.voidcmd(('RNTO ' + toname))
'Delete a file.'
def delete(self, filename):
resp = self.sendcmd(('DELE ' + filename)) if (resp[:3] in ('250', '200')): return resp else: raise error_reply, resp
'Change to a directory.'
def cwd(self, dirname):
if (dirname == '..'): try: return self.voidcmd('CDUP') except error_perm as msg: if (msg.args[0][:3] != '500'): raise elif (dirname == ''): dirname = '.' cmd = ('CWD ' + dirname) return self.voidcmd(cmd)
'Retrieve the size of a file.'
def size(self, filename):
resp = self.sendcmd(('SIZE ' + filename)) if (resp[:3] == '213'): s = resp[3:].strip() try: return int(s) except (OverflowError, ValueError): return long(s)
'Make a directory, return its full pathname.'
def mkd(self, dirname):
resp = self.sendcmd(('MKD ' + dirname)) return parse257(resp)
'Remove a directory.'
def rmd(self, dirname):
return self.voidcmd(('RMD ' + dirname))
'Return current working directory.'
def pwd(self):
resp = self.sendcmd('PWD') return parse257(resp)
'Quit, and close the connection.'
def quit(self):
resp = self.voidcmd('QUIT') self.close() return resp
'Close the connection without assuming anything about it.'
def close(self):
if self.file: self.file.close() self.sock.close() self.file = self.sock = None
'Return a list of hosts mentioned in the .netrc file.'
def get_hosts(self):
return self.__hosts.keys()
'Returns login information for the named host. The return value is a triple containing userid, password, and the accounting field.'
def get_account(self, host):
host = host.lower() user = passwd = acct = None if (host in self.__hosts): (user, passwd, acct) = self.__hosts[host] user = (user or self.__defuser) passwd = (passwd or self.__defpasswd) acct = (acct or self.__defacct) return (user, passwd, acct)
'Return a list of all defined macro names.'
def get_macros(self):
return self.__macros.keys()
'Return a sequence of lines which define a named macro.'
def get_macro(self, macro):
return self.__macros[macro]
'Add a header line to the MIME message. The key is the name of the header, where the value obviously provides the value of the header. The optional argument prefix determines where the header is inserted; 0 means append at the end, 1 means insert at the start. The default is to append.'
def addheader(self, key, value, prefix=0):
lines = value.split('\n') while (lines and (not lines[(-1)])): del lines[(-1)] while (lines and (not lines[0])): del lines[0] for i in range(1, len(lines)): lines[i] = (' ' + lines[i].strip()) value = ('\n'.join(lines) + '\n') line = ((key + ': ') + value) if prefix: self._headers.insert(0, line) else: self._headers.append(line)
'Writes out and forgets all headers accumulated so far. This is useful if you don\'t need a body part at all; for example, for a subpart of type message/rfc822 that\'s (mis)used to store some header-like information.'
def flushheaders(self):
self._fp.writelines(self._headers) self._headers = []
'Returns a file-like object for writing the body of the message. The content-type is set to the provided ctype, and the optional parameter, plist, provides additional parameters for the content-type declaration. The optional argument prefix determines where the header is inserted; 0 means append at the end, 1 means insert at the start. The default is to insert at the start.'
def startbody(self, ctype, plist=[], prefix=1):
for (name, value) in plist: ctype = (ctype + (';\n %s="%s"' % (name, value))) self.addheader('Content-Type', ctype, prefix=prefix) self.flushheaders() self._fp.write('\n') return self._fp
'Returns a file-like object for writing the body of the message. Additionally, this method initializes the multi-part code, where the subtype parameter provides the multipart subtype, the boundary parameter may provide a user-defined boundary specification, and the plist parameter provides optional parameters for the subtype. The optional argument, prefix, determines where the header is inserted; 0 means append at the end, 1 means insert at the start. The default is to insert at the start. Subparts should be created using the nextpart() method.'
def startmultipartbody(self, subtype, boundary=None, plist=[], prefix=1):
self._boundary = (boundary or mimetools.choose_boundary()) return self.startbody(('multipart/' + subtype), ([('boundary', self._boundary)] + plist), prefix=prefix)
'Returns a new instance of MimeWriter which represents an individual part in a multipart message. This may be used to write the part as well as used for creating recursively complex multipart messages. The message must first be initialized with the startmultipartbody() method before using the nextpart() method.'
def nextpart(self):
self._fp.write((('\n--' + self._boundary) + '\n')) return self.__class__(self._fp)
'This is used to designate the last part of a multipart message. It should always be used when writing multipart messages.'
def lastpart(self):
self._fp.write((('\n--' + self._boundary) + '--\n'))
'Constructs a Fraction. Takes a string like \'3/2\' or \'1.5\', another Rational instance, a numerator/denominator pair, or a float. Examples >>> Fraction(10, -8) Fraction(-5, 4) >>> Fraction(Fraction(1, 7), 5) Fraction(1, 35) >>> Fraction(Fraction(1, 7), Fraction(2, 3)) Fraction(3, 14) >>> Fraction(\'314\') Fraction(314, 1) >>> Fraction(\'-35/4\') Fraction(-35, 4) >>> Fraction(\'3.1415\') # conversion from numeric string Fraction(6283, 2000) >>> Fraction(\'-47e-2\') # string may include a decimal exponent Fraction(-47, 100) >>> Fraction(1.47) # direct construction from float (exact conversion) Fraction(6620291452234629, 4503599627370496) >>> Fraction(2.25) Fraction(9, 4) >>> Fraction(Decimal(\'1.47\')) Fraction(147, 100)'
def __new__(cls, numerator=0, denominator=None):
self = super(Fraction, cls).__new__(cls) if (denominator is None): if isinstance(numerator, Rational): self._numerator = numerator.numerator self._denominator = numerator.denominator return self elif isinstance(numerator, float): value = Fraction.from_float(numerator) self._numerator = value._numerator self._denominator = value._denominator return self elif isinstance(numerator, Decimal): value = Fraction.from_decimal(numerator) self._numerator = value._numerator self._denominator = value._denominator return self elif isinstance(numerator, basestring): m = _RATIONAL_FORMAT.match(numerator) if (m is None): raise ValueError(('Invalid literal for Fraction: %r' % numerator)) numerator = int((m.group('num') or '0')) denom = m.group('denom') if denom: denominator = int(denom) else: denominator = 1 decimal = m.group('decimal') if decimal: scale = (10 ** len(decimal)) numerator = ((numerator * scale) + int(decimal)) denominator *= scale exp = m.group('exp') if exp: exp = int(exp) if (exp >= 0): numerator *= (10 ** exp) else: denominator *= (10 ** (- exp)) if (m.group('sign') == '-'): numerator = (- numerator) else: raise TypeError('argument should be a string or a Rational instance') elif (isinstance(numerator, Rational) and isinstance(denominator, Rational)): (numerator, denominator) = ((numerator.numerator * denominator.denominator), (denominator.numerator * numerator.denominator)) else: raise TypeError('both arguments should be Rational instances') if (denominator == 0): raise ZeroDivisionError(('Fraction(%s, 0)' % numerator)) g = gcd(numerator, denominator) self._numerator = (numerator // g) self._denominator = (denominator // g) return self
'Converts a finite float to a rational number, exactly. Beware that Fraction.from_float(0.3) != Fraction(3, 10).'
@classmethod def from_float(cls, f):
if isinstance(f, numbers.Integral): return cls(f) elif (not isinstance(f, float)): raise TypeError(('%s.from_float() only takes floats, not %r (%s)' % (cls.__name__, f, type(f).__name__))) if (math.isnan(f) or math.isinf(f)): raise TypeError(('Cannot convert %r to %s.' % (f, cls.__name__))) return cls(*f.as_integer_ratio())
'Converts a finite Decimal instance to a rational number, exactly.'
@classmethod def from_decimal(cls, dec):
from decimal import Decimal if isinstance(dec, numbers.Integral): dec = Decimal(int(dec)) elif (not isinstance(dec, Decimal)): raise TypeError(('%s.from_decimal() only takes Decimals, not %r (%s)' % (cls.__name__, dec, type(dec).__name__))) if (not dec.is_finite()): raise TypeError(('Cannot convert %s to %s.' % (dec, cls.__name__))) (sign, digits, exp) = dec.as_tuple() digits = int(''.join(map(str, digits))) if sign: digits = (- digits) if (exp >= 0): return cls((digits * (10 ** exp))) else: return cls(digits, (10 ** (- exp)))
'Closest Fraction to self with denominator at most max_denominator. >>> Fraction(\'3.141592653589793\').limit_denominator(10) Fraction(22, 7) >>> Fraction(\'3.141592653589793\').limit_denominator(100) Fraction(311, 99) >>> Fraction(4321, 8765).limit_denominator(10000) Fraction(4321, 8765)'
def limit_denominator(self, max_denominator=1000000):
if (max_denominator < 1): raise ValueError('max_denominator should be at least 1') if (self._denominator <= max_denominator): return Fraction(self) (p0, q0, p1, q1) = (0, 1, 1, 0) (n, d) = (self._numerator, self._denominator) while True: a = (n // d) q2 = (q0 + (a * q1)) if (q2 > max_denominator): break (p0, q0, p1, q1) = (p1, q1, (p0 + (a * p1)), q2) (n, d) = (d, (n - (a * d))) k = ((max_denominator - q0) // q1) bound1 = Fraction((p0 + (k * p1)), (q0 + (k * q1))) bound2 = Fraction(p1, q1) if (abs((bound2 - self)) <= abs((bound1 - self))): return bound2 else: return bound1
'repr(self)'
def __repr__(self):
return ('Fraction(%s, %s)' % (self._numerator, self._denominator))
'str(self)'
def __str__(self):
if (self._denominator == 1): return str(self._numerator) else: return ('%s/%s' % (self._numerator, self._denominator))
'Generates forward and reverse operators given a purely-rational operator and a function from the operator module. Use this like: __op__, __rop__ = _operator_fallbacks(just_rational_op, operator.op) In general, we want to implement the arithmetic operations so that mixed-mode operations either call an implementation whose author knew about the types of both arguments, or convert both to the nearest built in type and do the operation there. In Fraction, that means that we define __add__ and __radd__ as: def __add__(self, other): # Both types have numerators/denominator attributes, # so do the operation directly if isinstance(other, (int, long, Fraction)): return Fraction(self.numerator * other.denominator + other.numerator * self.denominator, self.denominator * other.denominator) # float and complex don\'t have those operations, but we # know about those types, so special case them. elif isinstance(other, float): return float(self) + other elif isinstance(other, complex): return complex(self) + other # Let the other type take over. return NotImplemented def __radd__(self, other): # radd handles more types than add because there\'s # nothing left to fall back to. if isinstance(other, Rational): return Fraction(self.numerator * other.denominator + other.numerator * self.denominator, self.denominator * other.denominator) elif isinstance(other, Real): return float(other) + float(self) elif isinstance(other, Complex): return complex(other) + complex(self) return NotImplemented There are 5 different cases for a mixed-type addition on Fraction. I\'ll refer to all of the above code that doesn\'t refer to Fraction, float, or complex as "boilerplate". \'r\' will be an instance of Fraction, which is a subtype of Rational (r : Fraction <: Rational), and b : B <: Complex. The first three involve \'r + b\': 1. If B <: Fraction, int, float, or complex, we handle that specially, and all is well. 2. If Fraction falls back to the boilerplate code, and it were to return a value from __add__, we\'d miss the possibility that B defines a more intelligent __radd__, so the boilerplate should return NotImplemented from __add__. In particular, we don\'t handle Rational here, even though we could get an exact answer, in case the other type wants to do something special. 3. If B <: Fraction, Python tries B.__radd__ before Fraction.__add__. This is ok, because it was implemented with knowledge of Fraction, so it can handle those instances before delegating to Real or Complex. The next two situations describe \'b + r\'. We assume that b didn\'t know about Fraction in its implementation, and that it uses similar boilerplate code: 4. If B <: Rational, then __radd_ converts both to the builtin rational type (hey look, that\'s us) and proceeds. 5. Otherwise, __radd__ tries to find the nearest common base ABC, and fall back to its builtin type. Since this class doesn\'t subclass a concrete type, there\'s no implementation to fall back to, so we need to try as hard as possible to return an actual value, or the user will get a TypeError.'
def _operator_fallbacks(monomorphic_operator, fallback_operator):
def forward(a, b): if isinstance(b, (int, long, Fraction)): return monomorphic_operator(a, b) elif isinstance(b, float): return fallback_operator(float(a), b) elif isinstance(b, complex): return fallback_operator(complex(a), b) else: return NotImplemented forward.__name__ = (('__' + fallback_operator.__name__) + '__') forward.__doc__ = monomorphic_operator.__doc__ def reverse(b, a): if isinstance(a, Rational): return monomorphic_operator(a, b) elif isinstance(a, numbers.Real): return fallback_operator(float(a), float(b)) elif isinstance(a, numbers.Complex): return fallback_operator(complex(a), complex(b)) else: return NotImplemented reverse.__name__ = (('__r' + fallback_operator.__name__) + '__') reverse.__doc__ = monomorphic_operator.__doc__ return (forward, reverse)
'a + b'
def _add(a, b):
return Fraction(((a.numerator * b.denominator) + (b.numerator * a.denominator)), (a.denominator * b.denominator))
'a - b'
def _sub(a, b):
return Fraction(((a.numerator * b.denominator) - (b.numerator * a.denominator)), (a.denominator * b.denominator))
'a * b'
def _mul(a, b):
return Fraction((a.numerator * b.numerator), (a.denominator * b.denominator))
'a / b'
def _div(a, b):
return Fraction((a.numerator * b.denominator), (a.denominator * b.numerator))
'a // b'
def __floordiv__(a, b):
div = (a / b) if isinstance(div, Rational): return (div.numerator // div.denominator) else: return math.floor(div)