desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Initialize with the specified logger being a child of this placeholder.'
| def __init__(self, alogger):
| self.loggerMap = {alogger: None}
|
'Add the specified logger as a child of this placeholder.'
| def append(self, alogger):
| if (alogger not in self.loggerMap):
self.loggerMap[alogger] = None
|
'Initialize the manager with the root node of the logger hierarchy.'
| def __init__(self, rootnode):
| self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = 0
self.loggerDict = {}
self.loggerClass = None
|
'Get a logger with the specified name (channel name), creating it
if it doesn\'t yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn\'t exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.'
| def getLogger(self, name):
| rv = None
if (not isinstance(name, basestring)):
raise TypeError('A logger name must be string or Unicode')
if isinstance(name, unicode):
name = name.encode('utf-8')
_acquireLock()
try:
if (name in self.loggerDict):
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
|
'Set the class to be used when instantiating a logger with this Manager.'
| def setLoggerClass(self, klass):
| if (klass != Logger):
if (not issubclass(klass, Logger)):
raise TypeError(('logger not derived from logging.Logger: ' + klass.__name__))
self.loggerClass = klass
|
'Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.'
| def _fixupParents(self, alogger):
| name = alogger.name
i = name.rfind('.')
rv = None
while ((i > 0) and (not rv)):
substr = name[:i]
if (substr not in self.loggerDict):
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind('.', 0, (i - 1))
if (not rv):
rv = self.root
alogger.parent = rv
|
'Ensure that children of the placeholder ph are connected to the
specified logger.'
| def _fixupChildren(self, ph, alogger):
| name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
if (c.parent.name[:namelen] != name):
alogger.parent = c.parent
c.parent = alogger
|
'Initialize the logger with a name and an optional level.'
| def __init__(self, name, level=NOTSET):
| Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = 1
self.handlers = []
self.disabled = 0
|
'Set the logging level of this logger.'
| def setLevel(self, level):
| self.level = _checkLevel(level)
|
'Log \'msg % args\' with severity \'DEBUG\'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)'
| def debug(self, msg, *args, **kwargs):
| if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
|
'Log \'msg % args\' with severity \'INFO\'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)'
| def info(self, msg, *args, **kwargs):
| if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
|
'Log \'msg % args\' with severity \'WARNING\'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)'
| def warning(self, msg, *args, **kwargs):
| if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
|
'Log \'msg % args\' with severity \'ERROR\'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)'
| def error(self, msg, *args, **kwargs):
| if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
|
'Convenience method for logging an ERROR with exception information.'
| def exception(self, msg, *args, **kwargs):
| kwargs['exc_info'] = 1
self.error(msg, *args, **kwargs)
|
'Log \'msg % args\' with severity \'CRITICAL\'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)'
| def critical(self, msg, *args, **kwargs):
| if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
|
'Log \'msg % args\' with the integer severity \'level\'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)'
| def log(self, level, msg, *args, **kwargs):
| if (not isinstance(level, int)):
if raiseExceptions:
raise TypeError('level must be an integer')
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
|
'Find the stack frame of the caller so that we can note the source
file name, line number and function name.'
| def findCaller(self):
| f = currentframe()
if (f is not None):
f = f.f_back
rv = ('(unknown file)', 0, '(unknown function)')
while hasattr(f, 'f_code'):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if (filename == _srcfile):
f = f.f_back
continue
rv = (co.co_filename, f.f_lineno, co.co_name)
break
return rv
|
'A factory method which can be overridden in subclasses to create
specialized LogRecords.'
| def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None):
| rv = LogRecord(name, level, fn, lno, msg, args, exc_info, func)
if (extra is not None):
for key in extra:
if ((key in ['message', 'asctime']) or (key in rv.__dict__)):
raise KeyError(('Attempt to overwrite %r in LogRecord' % key))
rv.__dict__[key] = extra[key]
return rv
|
'Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.'
| def _log(self, level, msg, args, exc_info=None, extra=None):
| if _srcfile:
try:
(fn, lno, func) = self.findCaller()
except ValueError:
(fn, lno, func) = ('(unknown file)', 0, '(unknown function)')
else:
(fn, lno, func) = ('(unknown file)', 0, '(unknown function)')
if exc_info:
if (not isinstance(exc_info, tuple)):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args, exc_info, func, extra)
self.handle(record)
|
'Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.'
| def handle(self, record):
| if ((not self.disabled) and self.filter(record)):
self.callHandlers(record)
|
'Add the specified handler to this logger.'
| def addHandler(self, hdlr):
| _acquireLock()
try:
if (not (hdlr in self.handlers)):
self.handlers.append(hdlr)
finally:
_releaseLock()
|
'Remove the specified handler from this logger.'
| def removeHandler(self, hdlr):
| _acquireLock()
try:
if (hdlr in self.handlers):
self.handlers.remove(hdlr)
finally:
_releaseLock()
|
'Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.'
| def callHandlers(self, record):
| c = self
found = 0
while c:
for hdlr in c.handlers:
found = (found + 1)
if (record.levelno >= hdlr.level):
hdlr.handle(record)
if (not c.propagate):
c = None
else:
c = c.parent
if ((found == 0) and raiseExceptions and (not self.manager.emittedNoHandlerWarning)):
sys.stderr.write(('No handlers could be found for logger "%s"\n' % self.name))
self.manager.emittedNoHandlerWarning = 1
|
'Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.'
| def getEffectiveLevel(self):
| logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
|
'Is this logger enabled for level \'level\'?'
| def isEnabledFor(self, level):
| if (self.manager.disable >= level):
return 0
return (level >= self.getEffectiveLevel())
|
'Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger(\'abc\').getChild(\'def.ghi\')
is the same as
logging.getLogger(\'abc.def.ghi\')
It\'s useful, for example, when the parent logger is named using
__name__ rather than a literal string.'
| def getChild(self, suffix):
| if (self.root is not self):
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
|
'Initialize the logger with the name "root".'
| def __init__(self, level):
| Logger.__init__(self, 'root', level)
|
'Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))'
| def __init__(self, logger, extra):
| self.logger = logger
self.extra = extra
|
'Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you\'ll only need to override this one method in a
LoggerAdapter subclass for your specific needs.'
| def process(self, msg, kwargs):
| kwargs['extra'] = self.extra
return (msg, kwargs)
|
'Delegate a debug call to the underlying logger, after adding
contextual information from this adapter instance.'
| def debug(self, msg, *args, **kwargs):
| (msg, kwargs) = self.process(msg, kwargs)
self.logger.debug(msg, *args, **kwargs)
|
'Delegate an info call to the underlying logger, after adding
contextual information from this adapter instance.'
| def info(self, msg, *args, **kwargs):
| (msg, kwargs) = self.process(msg, kwargs)
self.logger.info(msg, *args, **kwargs)
|
'Delegate a warning call to the underlying logger, after adding
contextual information from this adapter instance.'
| def warning(self, msg, *args, **kwargs):
| (msg, kwargs) = self.process(msg, kwargs)
self.logger.warning(msg, *args, **kwargs)
|
'Delegate an error call to the underlying logger, after adding
contextual information from this adapter instance.'
| def error(self, msg, *args, **kwargs):
| (msg, kwargs) = self.process(msg, kwargs)
self.logger.error(msg, *args, **kwargs)
|
'Delegate an exception call to the underlying logger, after adding
contextual information from this adapter instance.'
| def exception(self, msg, *args, **kwargs):
| (msg, kwargs) = self.process(msg, kwargs)
kwargs['exc_info'] = 1
self.logger.error(msg, *args, **kwargs)
|
'Delegate a critical call to the underlying logger, after adding
contextual information from this adapter instance.'
| def critical(self, msg, *args, **kwargs):
| (msg, kwargs) = self.process(msg, kwargs)
self.logger.critical(msg, *args, **kwargs)
|
'Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.'
| def log(self, level, msg, *args, **kwargs):
| (msg, kwargs) = self.process(msg, kwargs)
self.logger.log(level, msg, *args, **kwargs)
|
'See if the underlying logger is enabled for the specified level.'
| def isEnabledFor(self, level):
| return self.logger.isEnabledFor(level)
|
'This method is called when there is the remote possibility
that we ever need to stop in this function.'
| def user_call(self, frame, argument_list):
| pass
|
'This method is called when we stop or break at this line.'
| def user_line(self, frame):
| pass
|
'This method is called when a return trap is set here.'
| def user_return(self, frame, return_value):
| pass
|
'Stop when the line with the line no greater than the current one is
reached or when returning from current frame'
| def set_until(self, frame):
| self._set_stopinfo(frame, frame, (frame.f_lineno + 1))
|
'Stop after one line of code.'
| def set_step(self):
| if self.frame_returning:
caller_frame = self.frame_returning.f_back
if (caller_frame and (not caller_frame.f_trace)):
caller_frame.f_trace = self.trace_dispatch
self._set_stopinfo(None, None)
|
'Stop on the next line in or below the given frame.'
| def set_next(self, frame):
| self._set_stopinfo(frame, None)
|
'Stop when returning from the given frame.'
| def set_return(self, frame):
| self._set_stopinfo(frame.f_back, frame)
|
'Start debugging from `frame`.
If frame is not specified, debugging starts from caller\'s frame.'
| def set_trace(self, frame=None):
| if (frame is None):
frame = sys._getframe().f_back
self.reset()
while frame:
frame.f_trace = self.trace_dispatch
self.botframe = frame
frame = frame.f_back
self.set_step()
sys.settrace(self.trace_dispatch)
|
'Run the given test case or test suite.'
| def run(self, test, skipped):
| result = self._makeResult()
startTime = time.time()
test(result)
stopTime = time.time()
timeTaken = (stopTime - startTime)
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
if _unavail:
requested = _unavail.keys()
requested.sort()
self.stream.writeln(('Ran %d test%s in %.3fs (%s module%s skipped)' % (run, (((run != 1) and 's') or ''), timeTaken, len(skipped), (((len(skipped) != 1) and 's') or ''))))
self.stream.writeln(('Unavailable resources: %s' % ', '.join(requested)))
else:
self.stream.writeln(('Ran %d test%s in %.3fs' % (run, (((run != 1) and 's') or ''), timeTaken)))
self.stream.writeln()
if (not result.wasSuccessful()):
self.stream.write('FAILED (')
(failed, errored) = map(len, (result.failures, result.errors))
if failed:
self.stream.write(('failures=%d' % failed))
if errored:
if failed:
self.stream.write(', ')
self.stream.write(('errors=%d' % errored))
self.stream.writeln(')')
else:
self.stream.writeln('OK')
return result
|
'Test that a character pointer-to-pointer is correctly passed'
| def test_charpp(self):
| dll = CDLL(_ctypes_test.__file__)
func = dll._testfunc_c_p_p
func.restype = c_char_p
argv = (c_char_p * 2)()
argc = c_int(2)
argv[0] = 'hello'
argv[1] = 'world'
result = func(byref(argc), argv)
assert (result == 'world'), result
|
'Constructor. May be extended, do not override.'
| def __init__(self, server_address, RequestHandlerClass):
| self.server_address = server_address
self.RequestHandlerClass = RequestHandlerClass
self.__is_shut_down = threading.Event()
self.__shutdown_request = False
|
'Called by constructor to activate the server.
May be overridden.'
| def server_activate(self):
| pass
|
'Handle one request at a time until shutdown.
Polls for shutdown every poll_interval seconds. Ignores
self.timeout. If you need to do periodic tasks, do them in
another thread.'
| def serve_forever(self, poll_interval=0.5):
| self.__is_shut_down.clear()
try:
while (not self.__shutdown_request):
(r, w, e) = _eintr_retry(select.select, [self], [], [], poll_interval)
if (self in r):
self._handle_request_noblock()
finally:
self.__shutdown_request = False
self.__is_shut_down.set()
|
'Stops the serve_forever loop.
Blocks until the loop has finished. This must be called while
serve_forever() is running in another thread, or it will
deadlock.'
| def shutdown(self):
| self.__shutdown_request = True
self.__is_shut_down.wait()
|
'Handle one request, possibly blocking.
Respects self.timeout.'
| def handle_request(self):
| timeout = self.socket.gettimeout()
if (timeout is None):
timeout = self.timeout
elif (self.timeout is not None):
timeout = min(timeout, self.timeout)
fd_sets = _eintr_retry(select.select, [self], [], [], timeout)
if (not fd_sets[0]):
self.handle_timeout()
return
self._handle_request_noblock()
|
'Handle one request, without blocking.
I assume that select.select has returned that the socket is
readable before this function was called, so there should be
no risk of blocking in get_request().'
| def _handle_request_noblock(self):
| try:
(request, client_address) = self.get_request()
except socket.error:
return
if self.verify_request(request, client_address):
try:
self.process_request(request, client_address)
except:
self.handle_error(request, client_address)
self.shutdown_request(request)
else:
self.shutdown_request(request)
|
'Called if no new request arrives within self.timeout.
Overridden by ForkingMixIn.'
| def handle_timeout(self):
| pass
|
'Verify the request. May be overridden.
Return True if we should proceed with this request.'
| def verify_request(self, request, client_address):
| return True
|
'Call finish_request.
Overridden by ForkingMixIn and ThreadingMixIn.'
| def process_request(self, request, client_address):
| self.finish_request(request, client_address)
self.shutdown_request(request)
|
'Called to clean-up the server.
May be overridden.'
| def server_close(self):
| pass
|
'Finish one request by instantiating RequestHandlerClass.'
| def finish_request(self, request, client_address):
| self.RequestHandlerClass(request, client_address, self)
|
'Called to shutdown and close an individual request.'
| def shutdown_request(self, request):
| self.close_request(request)
|
'Called to clean up an individual request.'
| def close_request(self, request):
| pass
|
'Handle an error gracefully. May be overridden.
The default is to print a traceback and continue.'
| def handle_error(self, request, client_address):
| print ('-' * 40)
print 'Exception happened during processing of request from',
print client_address
import traceback
traceback.print_exc()
print ('-' * 40)
|
'Constructor. May be extended, do not override.'
| def __init__(self, server_address, RequestHandlerClass, bind_and_activate=True):
| BaseServer.__init__(self, server_address, RequestHandlerClass)
self.socket = socket.socket(self.address_family, self.socket_type)
if bind_and_activate:
try:
self.server_bind()
self.server_activate()
except:
self.server_close()
raise
|
'Called by constructor to bind the socket.
May be overridden.'
| def server_bind(self):
| if self.allow_reuse_address:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
self.server_address = self.socket.getsockname()
|
'Called by constructor to activate the server.
May be overridden.'
| def server_activate(self):
| self.socket.listen(self.request_queue_size)
|
'Called to clean-up the server.
May be overridden.'
| def server_close(self):
| self.socket.close()
|
'Return socket file number.
Interface required by select().'
| def fileno(self):
| return self.socket.fileno()
|
'Get the request and client address from the socket.
May be overridden.'
| def get_request(self):
| return self.socket.accept()
|
'Called to shutdown and close an individual request.'
| def shutdown_request(self, request):
| try:
request.shutdown(socket.SHUT_WR)
except socket.error:
pass
self.close_request(request)
|
'Called to clean up an individual request.'
| def close_request(self, request):
| request.close()
|
'Internal routine to wait for children that have exited.'
| def collect_children(self):
| if (self.active_children is None):
return
while (len(self.active_children) >= self.max_children):
try:
(pid, _) = os.waitpid((-1), 0)
self.active_children.discard(pid)
except OSError as e:
if (e.errno == errno.ECHILD):
self.active_children.clear()
elif (e.errno != errno.EINTR):
break
for pid in self.active_children.copy():
try:
(pid, _) = os.waitpid(pid, os.WNOHANG)
self.active_children.discard(pid)
except OSError as e:
if (e.errno == errno.ECHILD):
self.active_children.discard(pid)
|
'Wait for zombies after self.timeout seconds of inactivity.
May be extended, do not override.'
| def handle_timeout(self):
| self.collect_children()
|
'Fork a new subprocess to process the request.'
| def process_request(self, request, client_address):
| self.collect_children()
pid = os.fork()
if pid:
if (self.active_children is None):
self.active_children = set()
self.active_children.add(pid)
self.close_request(request)
return
else:
try:
self.finish_request(request, client_address)
self.shutdown_request(request)
os._exit(0)
except:
try:
self.handle_error(request, client_address)
self.shutdown_request(request)
finally:
os._exit(1)
|
'Same as in BaseServer but as a thread.
In addition, exception handling is done here.'
| def process_request_thread(self, request, client_address):
| try:
self.finish_request(request, client_address)
self.shutdown_request(request)
except:
self.handle_error(request, client_address)
self.shutdown_request(request)
|
'Start a new thread to process the request.'
| def process_request(self, request, client_address):
| t = threading.Thread(target=self.process_request_thread, args=(request, client_address))
t.daemon = self.daemon_threads
t.start()
|
'Create new Popen instance.'
| def __init__(self, args, bufsize=0, executable=None, stdin=None, stdout=None, stderr=None, preexec_fn=None, close_fds=False, shell=False, cwd=None, env=None, universal_newlines=False, startupinfo=None, creationflags=0):
| _cleanup()
if (not isinstance(bufsize, (int, long))):
raise TypeError('bufsize must be an integer')
if mswindows:
if (preexec_fn is not None):
raise ValueError('preexec_fn is not supported on Windows platforms')
if (close_fds and ((stdin is not None) or (stdout is not None) or (stderr is not None))):
raise ValueError('close_fds is not supported on Windows platforms if you redirect stdin/stdout/stderr')
elif mono:
if preexec_fn:
raise ValueError('preexec_fn is not supported on .NET platforms')
if close_fds:
raise ValueError('close_fds is not supported on .NET platforms')
if universal_newlines:
raise ValueError('universal_newlines is not supported on .NET platforms')
if startupinfo:
raise ValueError('startupinfo is not supported on .NET platforms')
if creationflags:
raise ValueError('creationflags is not supported on .NET platforms')
if ((stdin is not None) and (stdin != PIPE)):
raise NotImplementedError('Cannont redirect stdin yet.')
if (stderr == STDOUT):
raise NotImplementedError('Cannont redirect stderr to stdout yet.')
else:
if (startupinfo is not None):
raise ValueError('startupinfo is only supported on Windows platforms')
if (creationflags != 0):
raise ValueError('creationflags is only supported on Windows platforms')
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
((p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite), to_close) = self._get_handles(stdin, stdout, stderr)
try:
self._execute_child(args, executable, preexec_fn, close_fds, cwd, env, universal_newlines, startupinfo, creationflags, shell, to_close, p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite)
except Exception:
(exc_type, exc_value, exc_trace) = sys.exc_info()
for fd in to_close:
try:
if mswindows:
fd.Close()
else:
os.close(fd)
except EnvironmentError:
pass
raise exc_type, exc_value, exc_trace
if mswindows:
if (p2cwrite is not None):
p2cwrite = msvcrt.open_osfhandle(p2cwrite.Detach(), 0)
if (c2pread is not None):
c2pread = msvcrt.open_osfhandle(c2pread.Detach(), 0)
if (errread is not None):
errread = msvcrt.open_osfhandle(errread.Detach(), 0)
if (p2cwrite is not None):
self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
if (c2pread is not None):
if universal_newlines:
self.stdout = os.fdopen(c2pread, 'rU', bufsize)
else:
self.stdout = os.fdopen(c2pread, 'rb', bufsize)
if (errread is not None):
if universal_newlines:
self.stderr = os.fdopen(errread, 'rU', bufsize)
else:
self.stderr = os.fdopen(errread, 'rb', bufsize)
|
'Interact with process: Send data to stdin. Read data from
stdout and stderr, until end-of-file is reached. Wait for
process to terminate. The optional input argument should be a
string to be sent to the child process, or None, if no data
should be sent to the child.
communicate() returns a tuple (stdout, stderr).'
| def communicate(self, input=None):
| if ([self.stdin, self.stdout, self.stderr].count(None) >= 2):
stdout = None
stderr = None
if self.stdin:
if input:
try:
self.stdin.write(input)
except IOError as e:
if ((e.errno != errno.EPIPE) and (e.errno != errno.EINVAL)):
raise
self.stdin.close()
elif self.stdout:
stdout = _eintr_retry_call(self.stdout.read)
self.stdout.close()
elif self.stderr:
stderr = _eintr_retry_call(self.stderr.read)
self.stderr.close()
self.wait()
return (stdout, stderr)
return self._communicate(input)
|
'A workaround to get special attributes on the ServerProxy
without interfering with the magic __getattr__'
| def __call__(self, attr):
| if (attr == 'close'):
return self.__close
elif (attr == 'transport'):
return self.__transport
raise AttributeError(('Attribute %r not found' % (attr,)))
|
'add_argument(dest, ..., name=value, ...)
add_argument(option_string, option_string, ..., name=value, ...)'
| def add_argument(self, *args, **kwargs):
| chars = self.prefix_chars
if ((not args) or ((len(args) == 1) and (args[0][0] not in chars))):
if (args and ('dest' in kwargs)):
raise ValueError('dest supplied twice for positional argument')
kwargs = self._get_positional_kwargs(*args, **kwargs)
else:
kwargs = self._get_optional_kwargs(*args, **kwargs)
if ('default' not in kwargs):
dest = kwargs['dest']
if (dest in self._defaults):
kwargs['default'] = self._defaults[dest]
elif (self.argument_default is not None):
kwargs['default'] = self.argument_default
action_class = self._pop_action_class(kwargs)
if (not _callable(action_class)):
raise ValueError(('unknown action "%s"' % (action_class,)))
action = action_class(**kwargs)
type_func = self._registry_get('type', action.type, action.type)
if (not _callable(type_func)):
raise ValueError(('%r is not callable' % (type_func,)))
if hasattr(self, '_get_formatter'):
try:
self._get_formatter()._format_args(action, None)
except TypeError:
raise ValueError('length of metavar tuple does not match nargs')
return self._add_action(action)
|
'error(message: string)
Prints a usage message incorporating the message to stderr and
exits.
If you override this in a subclass, it should not return -- it
should either exit or raise an exception.'
| def error(self, message):
| self.print_usage(_sys.stderr)
self.exit(2, (_('%s: error: %s\n') % (self.prog, message)))
|
'Construct a _Stream object.'
| def __init__(self, name, mode, comptype, fileobj, bufsize):
| self._extfileobj = True
if (fileobj is None):
fileobj = _LowLevelFile(name, mode)
self._extfileobj = False
if (comptype == '*'):
fileobj = _StreamProxy(fileobj)
comptype = fileobj.getcomptype()
self.name = (name or '')
self.mode = mode
self.comptype = comptype
self.fileobj = fileobj
self.bufsize = bufsize
self.buf = ''
self.pos = 0L
self.closed = False
try:
if (comptype == 'gz'):
try:
import zlib
except ImportError:
raise CompressionError('zlib module is not available')
self.zlib = zlib
self.crc = (zlib.crc32('') & 4294967295L)
if (mode == 'r'):
self._init_read_gz()
else:
self._init_write_gz()
elif (comptype == 'bz2'):
try:
import bz2
except ImportError:
raise CompressionError('bz2 module is not available')
if (mode == 'r'):
self.dbuf = ''
self.cmp = bz2.BZ2Decompressor()
else:
self.cmp = bz2.BZ2Compressor()
except:
if (not self._extfileobj):
self.fileobj.close()
self.closed = True
raise
|
'Initialize for writing with gzip compression.'
| def _init_write_gz(self):
| self.cmp = self.zlib.compressobj(9, self.zlib.DEFLATED, (- self.zlib.MAX_WBITS), self.zlib.DEF_MEM_LEVEL, 0)
timestamp = struct.pack('<L', long(time.time()))
self.__write(('\x1f\x8b\x08\x08%s\x02\xff' % timestamp))
if (type(self.name) is unicode):
self.name = self.name.encode('iso-8859-1', 'replace')
if self.name.endswith('.gz'):
self.name = self.name[:(-3)]
self.__write((self.name + NUL))
|
'Write string s to the stream.'
| def write(self, s):
| if (self.comptype == 'gz'):
self.crc = (self.zlib.crc32(s, self.crc) & 4294967295L)
self.pos += len(s)
if (self.comptype != 'tar'):
s = self.cmp.compress(s)
self.__write(s)
|
'Write string s to the stream if a whole new block
is ready to be written.'
| def __write(self, s):
| self.buf += s
while (len(self.buf) > self.bufsize):
self.fileobj.write(self.buf[:self.bufsize])
self.buf = self.buf[self.bufsize:]
|
'Close the _Stream object. No operation should be
done on it afterwards.'
| def close(self):
| if self.closed:
return
self.closed = True
try:
if ((self.mode == 'w') and (self.comptype != 'tar')):
self.buf += self.cmp.flush()
if ((self.mode == 'w') and self.buf):
self.fileobj.write(self.buf)
self.buf = ''
if (self.comptype == 'gz'):
self.fileobj.write(struct.pack('<L', (self.crc & 4294967295L)))
self.fileobj.write(struct.pack('<L', (self.pos & 4294967295L)))
finally:
if (not self._extfileobj):
self.fileobj.close()
|
'Initialize for reading a gzip compressed fileobj.'
| def _init_read_gz(self):
| self.cmp = self.zlib.decompressobj((- self.zlib.MAX_WBITS))
self.dbuf = ''
if (self.__read(2) != '\x1f\x8b'):
raise ReadError('not a gzip file')
if (self.__read(1) != '\x08'):
raise CompressionError('unsupported compression method')
flag = ord(self.__read(1))
self.__read(6)
if (flag & 4):
xlen = (ord(self.__read(1)) + (256 * ord(self.__read(1))))
self.read(xlen)
if (flag & 8):
while True:
s = self.__read(1)
if ((not s) or (s == NUL)):
break
if (flag & 16):
while True:
s = self.__read(1)
if ((not s) or (s == NUL)):
break
if (flag & 2):
self.__read(2)
|
'Return the stream\'s file pointer position.'
| def tell(self):
| return self.pos
|
'Set the stream\'s file pointer to pos. Negative seeking
is forbidden.'
| def seek(self, pos=0):
| if ((pos - self.pos) >= 0):
(blocks, remainder) = divmod((pos - self.pos), self.bufsize)
for i in xrange(blocks):
self.read(self.bufsize)
self.read(remainder)
else:
raise StreamError('seeking backwards is not allowed')
return self.pos
|
'Return the next size number of bytes from the stream.
If size is not defined, return all bytes of the stream
up to EOF.'
| def read(self, size=None):
| if (size is None):
t = []
while True:
buf = self._read(self.bufsize)
if (not buf):
break
t.append(buf)
buf = ''.join(t)
else:
buf = self._read(size)
self.pos += len(buf)
return buf
|
'Return size bytes from the stream.'
| def _read(self, size):
| if (self.comptype == 'tar'):
return self.__read(size)
c = len(self.dbuf)
t = [self.dbuf]
while (c < size):
buf = self.__read(self.bufsize)
if (not buf):
break
try:
buf = self.cmp.decompress(buf)
except IOError:
raise ReadError('invalid compressed data')
t.append(buf)
c += len(buf)
t = ''.join(t)
self.dbuf = t[size:]
return t[:size]
|
'Return size bytes from stream. If internal buffer is empty,
read another block from the stream.'
| def __read(self, size):
| c = len(self.buf)
t = [self.buf]
while (c < size):
buf = self.fileobj.read(self.bufsize)
if (not buf):
break
t.append(buf)
c += len(buf)
t = ''.join(t)
self.buf = t[size:]
return t[:size]
|
'Return the current file position.'
| def tell(self):
| return self.position
|
'Seek to a position in the file.'
| def seek(self, position):
| self.position = position
|
'Read data from the file.'
| def read(self, size=None):
| if (size is None):
size = (self.size - self.position)
else:
size = min(size, (self.size - self.position))
if (self.sparse is None):
return self.readnormal(size)
else:
return self.readsparse(size)
|
'Read operation for regular files.'
| def readnormal(self, size):
| self.fileobj.seek((self.offset + self.position))
self.position += size
return self.__read(size)
|
'Read operation for sparse files.'
| def readsparse(self, size):
| data = []
while (size > 0):
buf = self.readsparsesection(size)
if (not buf):
break
size -= len(buf)
data.append(buf)
return ''.join(data)
|
'Read a single section of a sparse file.'
| def readsparsesection(self, size):
| section = self.sparse.find(self.position)
if (section is None):
return ''
size = min(size, ((section.offset + section.size) - self.position))
if isinstance(section, _data):
realpos = ((section.realpos + self.position) - section.offset)
self.fileobj.seek((self.offset + realpos))
self.position += size
return self.__read(size)
else:
self.position += size
return (NUL * size)
|
'Read at most size bytes from the file. If size is not
present or None, read all data until EOF is reached.'
| def read(self, size=None):
| if self.closed:
raise ValueError('I/O operation on closed file')
buf = ''
if self.buffer:
if (size is None):
buf = self.buffer
self.buffer = ''
else:
buf = self.buffer[:size]
self.buffer = self.buffer[size:]
if (size is None):
buf += self.fileobj.read()
else:
buf += self.fileobj.read((size - len(buf)))
self.position += len(buf)
return buf
|
'Read one entire line from the file. If size is present
and non-negative, return a string with at most that
size, which may be an incomplete line.'
| def readline(self, size=(-1)):
| if self.closed:
raise ValueError('I/O operation on closed file')
if ('\n' in self.buffer):
pos = (self.buffer.find('\n') + 1)
else:
buffers = [self.buffer]
while True:
buf = self.fileobj.read(self.blocksize)
buffers.append(buf)
if ((not buf) or ('\n' in buf)):
self.buffer = ''.join(buffers)
pos = (self.buffer.find('\n') + 1)
if (pos == 0):
pos = len(self.buffer)
break
if (size != (-1)):
pos = min(size, pos)
buf = self.buffer[:pos]
self.buffer = self.buffer[pos:]
self.position += len(buf)
return buf
|
'Return a list with all remaining lines.'
| def readlines(self):
| result = []
while True:
line = self.readline()
if (not line):
break
result.append(line)
return result
|
'Return the current file position.'
| def tell(self):
| if self.closed:
raise ValueError('I/O operation on closed file')
return self.position
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.