desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Provide a getfile, since the superclass\' does not use this concept.'
def getfile(self):
return self.file
'Compat definition since superclass does not define it. Returns a tuple consisting of: - server status code (e.g. \'200\' if all goes well) - server "reason" corresponding to status code - any RFC822 headers in the response from the server'
def getreply(self, buffering=False):
try: if (not buffering): response = self._conn.getresponse() else: response = self._conn.getresponse(buffering) except BadStatusLine as e: self.file = self._conn.sock.makefile('rb', 0) self.close() self.headers = None return ((-1), e.line, None) self.headers = response.msg self.file = response.fp return (response.status, response.reason, response.msg)
'Resolve strings to objects using standard import and attribute syntax.'
def resolve(self, s):
name = s.split('.') used = name.pop(0) try: found = self.importer(used) for frag in name: used += ('.' + frag) try: found = getattr(found, frag) except AttributeError: self.importer(used) found = getattr(found, frag) return found except ImportError: (e, tb) = sys.exc_info()[1:] v = ValueError(('Cannot resolve %r: %s' % (s, e))) (v.__cause__, v.__traceback__) = (e, tb) raise v
'Default converter for the ext:// protocol.'
def ext_convert(self, value):
return self.resolve(value)
'Default converter for the cfg:// protocol.'
def cfg_convert(self, value):
rest = value m = self.WORD_PATTERN.match(rest) if (m is None): raise ValueError(('Unable to convert %r' % value)) else: rest = rest[m.end():] d = self.config[m.groups()[0]] while rest: m = self.DOT_PATTERN.match(rest) if m: d = d[m.groups()[0]] else: m = self.INDEX_PATTERN.match(rest) if m: idx = m.groups()[0] if (not self.DIGIT_PATTERN.match(idx)): d = d[idx] else: try: n = int(idx) d = d[n] except TypeError: d = d[idx] if m: rest = rest[m.end():] else: raise ValueError(('Unable to convert %r at %r' % (value, rest))) return d
'Convert values to an appropriate type. dicts, lists and tuples are replaced by their converting alternatives. Strings are checked to see if they have a conversion format and are converted if they do.'
def convert(self, value):
if ((not isinstance(value, ConvertingDict)) and isinstance(value, dict)): value = ConvertingDict(value) value.configurator = self elif ((not isinstance(value, ConvertingList)) and isinstance(value, list)): value = ConvertingList(value) value.configurator = self elif ((not isinstance(value, ConvertingTuple)) and isinstance(value, tuple)): value = ConvertingTuple(value) value.configurator = self elif isinstance(value, basestring): m = self.CONVERT_PATTERN.match(value) if m: d = m.groupdict() prefix = d['prefix'] converter = self.value_converters.get(prefix, None) if converter: suffix = d['suffix'] converter = getattr(self, converter) value = converter(suffix) return value
'Configure an object with a user-supplied factory.'
def configure_custom(self, config):
c = config.pop('()') if ((not hasattr(c, '__call__')) and hasattr(types, 'ClassType') and (type(c) != types.ClassType)): c = self.resolve(c) props = config.pop('.', None) kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) result = c(**kwargs) if props: for (name, value) in props.items(): setattr(result, name, value) return result
'Utility function which converts lists to tuples.'
def as_tuple(self, value):
if isinstance(value, list): value = tuple(value) return value
'Do the configuration.'
def configure(self):
config = self.config if ('version' not in config): raise ValueError("dictionary doesn't specify a version") if (config['version'] != 1): raise ValueError(('Unsupported version: %s' % config['version'])) incremental = config.pop('incremental', False) EMPTY_DICT = {} logging._acquireLock() try: if incremental: handlers = config.get('handlers', EMPTY_DICT) for name in handlers: if (name not in logging._handlers): raise ValueError(('No handler found with name %r' % name)) else: try: handler = logging._handlers[name] handler_config = handlers[name] level = handler_config.get('level', None) if level: handler.setLevel(logging._checkLevel(level)) except StandardError as e: raise ValueError(('Unable to configure handler %r: %s' % (name, e))) loggers = config.get('loggers', EMPTY_DICT) for name in loggers: try: self.configure_logger(name, loggers[name], True) except StandardError as e: raise ValueError(('Unable to configure logger %r: %s' % (name, e))) root = config.get('root', None) if root: try: self.configure_root(root, True) except StandardError as e: raise ValueError(('Unable to configure root logger: %s' % e)) else: disable_existing = config.pop('disable_existing_loggers', True) logging._handlers.clear() del logging._handlerList[:] formatters = config.get('formatters', EMPTY_DICT) for name in formatters: try: formatters[name] = self.configure_formatter(formatters[name]) except StandardError as e: raise ValueError(('Unable to configure formatter %r: %s' % (name, e))) filters = config.get('filters', EMPTY_DICT) for name in filters: try: filters[name] = self.configure_filter(filters[name]) except StandardError as e: raise ValueError(('Unable to configure filter %r: %s' % (name, e))) handlers = config.get('handlers', EMPTY_DICT) deferred = [] for name in sorted(handlers): try: handler = self.configure_handler(handlers[name]) handler.name = name handlers[name] = handler except StandardError as e: if ('target not configured yet' in str(e)): deferred.append(name) else: raise ValueError(('Unable to configure handler %r: %s' % (name, e))) for name in deferred: try: handler = self.configure_handler(handlers[name]) handler.name = name handlers[name] = handler except StandardError as e: raise ValueError(('Unable to configure handler %r: %s' % (name, e))) root = logging.root existing = root.manager.loggerDict.keys() existing.sort() child_loggers = [] loggers = config.get('loggers', EMPTY_DICT) for name in loggers: name = _encoded(name) if (name in existing): i = existing.index(name) prefixed = (name + '.') pflen = len(prefixed) num_existing = len(existing) i = (i + 1) while ((i < num_existing) and (existing[i][:pflen] == prefixed)): child_loggers.append(existing[i]) i = (i + 1) existing.remove(name) try: self.configure_logger(name, loggers[name]) except StandardError as e: raise ValueError(('Unable to configure logger %r: %s' % (name, e))) for log in existing: logger = root.manager.loggerDict[log] if (log in child_loggers): logger.level = logging.NOTSET logger.handlers = [] logger.propagate = True elif disable_existing: logger.disabled = True root = config.get('root', None) if root: try: self.configure_root(root) except StandardError as e: raise ValueError(('Unable to configure root logger: %s' % e)) finally: logging._releaseLock()
'Configure a formatter from a dictionary.'
def configure_formatter(self, config):
if ('()' in config): factory = config['()'] try: result = self.configure_custom(config) except TypeError as te: if ("'format'" not in str(te)): raise config['fmt'] = config.pop('format') config['()'] = factory result = self.configure_custom(config) else: fmt = config.get('format', None) dfmt = config.get('datefmt', None) result = logging.Formatter(fmt, dfmt) return result
'Configure a filter from a dictionary.'
def configure_filter(self, config):
if ('()' in config): result = self.configure_custom(config) else: name = config.get('name', '') result = logging.Filter(name) return result
'Add filters to a filterer from a list of names.'
def add_filters(self, filterer, filters):
for f in filters: try: filterer.addFilter(self.config['filters'][f]) except StandardError as e: raise ValueError(('Unable to add filter %r: %s' % (f, e)))
'Configure a handler from a dictionary.'
def configure_handler(self, config):
formatter = config.pop('formatter', None) if formatter: try: formatter = self.config['formatters'][formatter] except StandardError as e: raise ValueError(('Unable to set formatter %r: %s' % (formatter, e))) level = config.pop('level', None) filters = config.pop('filters', None) if ('()' in config): c = config.pop('()') if ((not hasattr(c, '__call__')) and hasattr(types, 'ClassType') and (type(c) != types.ClassType)): c = self.resolve(c) factory = c else: cname = config.pop('class') klass = self.resolve(cname) if (issubclass(klass, logging.handlers.MemoryHandler) and ('target' in config)): try: th = self.config['handlers'][config['target']] if (not isinstance(th, logging.Handler)): config['class'] = cname raise StandardError('target not configured yet') config['target'] = th except StandardError as e: raise ValueError(('Unable to set target handler %r: %s' % (config['target'], e))) elif (issubclass(klass, logging.handlers.SMTPHandler) and ('mailhost' in config)): config['mailhost'] = self.as_tuple(config['mailhost']) elif (issubclass(klass, logging.handlers.SysLogHandler) and ('address' in config)): config['address'] = self.as_tuple(config['address']) factory = klass kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) try: result = factory(**kwargs) except TypeError as te: if ("'stream'" not in str(te)): raise kwargs['strm'] = kwargs.pop('stream') result = factory(**kwargs) if formatter: result.setFormatter(formatter) if (level is not None): result.setLevel(logging._checkLevel(level)) if filters: self.add_filters(result, filters) return result
'Add handlers to a logger from a list of names.'
def add_handlers(self, logger, handlers):
for h in handlers: try: logger.addHandler(self.config['handlers'][h]) except StandardError as e: raise ValueError(('Unable to add handler %r: %s' % (h, e)))
'Perform configuration which is common to root and non-root loggers.'
def common_logger_config(self, logger, config, incremental=False):
level = config.get('level', None) if (level is not None): logger.setLevel(logging._checkLevel(level)) if (not incremental): for h in logger.handlers[:]: logger.removeHandler(h) handlers = config.get('handlers', None) if handlers: self.add_handlers(logger, handlers) filters = config.get('filters', None) if filters: self.add_filters(logger, filters)
'Configure a non-root logger from a dictionary.'
def configure_logger(self, name, config, incremental=False):
logger = logging.getLogger(name) self.common_logger_config(logger, config, incremental) propagate = config.get('propagate', None) if (propagate is not None): logger.propagate = propagate
'Configure a root logger from a dictionary.'
def configure_root(self, config, incremental=False):
root = logging.getLogger() self.common_logger_config(root, config, incremental)
'Use the specified filename for streamed logging'
def __init__(self, filename, mode, encoding=None, delay=0):
if (codecs is None): encoding = None logging.FileHandler.__init__(self, filename, mode, encoding, delay) self.mode = mode self.encoding = encoding
'Emit a record. Output the record to the file, catering for rollover as described in doRollover().'
def emit(self, record):
try: if self.shouldRollover(record): self.doRollover() logging.FileHandler.emit(self, record) except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record)
'Open the specified file and use it as the stream for logging. By default, the file grows indefinitely. You can specify particular values of maxBytes and backupCount to allow the file to rollover at a predetermined size. Rollover occurs whenever the current log file is nearly maxBytes in length. If backupCount is >= 1, the system will successively create new files with the same pathname as the base file, but with extensions ".1", ".2" etc. appended to it. For example, with a backupCount of 5 and a base file name of "app.log", you would get "app.log", "app.log.1", "app.log.2", ... through to "app.log.5". The file being written to is always "app.log" - when it gets filled up, it is closed and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. exist, then they are renamed to "app.log.2", "app.log.3" etc. respectively. If maxBytes is zero, rollover never occurs.'
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0):
if (maxBytes > 0): mode = 'a' BaseRotatingHandler.__init__(self, filename, mode, encoding, delay) self.maxBytes = maxBytes self.backupCount = backupCount
'Do a rollover, as described in __init__().'
def doRollover(self):
if self.stream: self.stream.close() self.stream = None if (self.backupCount > 0): for i in range((self.backupCount - 1), 0, (-1)): sfn = ('%s.%d' % (self.baseFilename, i)) dfn = ('%s.%d' % (self.baseFilename, (i + 1))) if os.path.exists(sfn): if os.path.exists(dfn): os.remove(dfn) os.rename(sfn, dfn) dfn = (self.baseFilename + '.1') if os.path.exists(dfn): os.remove(dfn) if os.path.exists(self.baseFilename): os.rename(self.baseFilename, dfn) if (not self.delay): self.stream = self._open()
'Determine if rollover should occur. Basically, see if the supplied record would cause the file to exceed the size limit we have.'
def shouldRollover(self, record):
if (self.stream is None): self.stream = self._open() if (self.maxBytes > 0): msg = ('%s\n' % self.format(record)) self.stream.seek(0, 2) if ((self.stream.tell() + len(msg)) >= self.maxBytes): return 1 return 0
'Work out the rollover time based on the specified time.'
def computeRollover(self, currentTime):
result = (currentTime + self.interval) if ((self.when == 'MIDNIGHT') or self.when.startswith('W')): if self.utc: t = time.gmtime(currentTime) else: t = time.localtime(currentTime) currentHour = t[3] currentMinute = t[4] currentSecond = t[5] r = (_MIDNIGHT - ((((currentHour * 60) + currentMinute) * 60) + currentSecond)) result = (currentTime + r) if self.when.startswith('W'): day = t[6] if (day != self.dayOfWeek): if (day < self.dayOfWeek): daysToWait = (self.dayOfWeek - day) else: daysToWait = (((6 - day) + self.dayOfWeek) + 1) newRolloverAt = (result + (daysToWait * ((60 * 60) * 24))) if (not self.utc): dstNow = t[(-1)] dstAtRollover = time.localtime(newRolloverAt)[(-1)] if (dstNow != dstAtRollover): if (not dstNow): addend = (-3600) else: addend = 3600 newRolloverAt += addend result = newRolloverAt return result
'Determine if rollover should occur. record is not used, as we are just comparing times, but it is needed so the method signatures are the same'
def shouldRollover(self, record):
t = int(time.time()) if (t >= self.rolloverAt): return 1 return 0
'Determine the files to delete when rolling over. More specific than the earlier method, which just used glob.glob().'
def getFilesToDelete(self):
(dirName, baseName) = os.path.split(self.baseFilename) fileNames = os.listdir(dirName) result = [] prefix = (baseName + '.') plen = len(prefix) for fileName in fileNames: if (fileName[:plen] == prefix): suffix = fileName[plen:] if self.extMatch.match(suffix): result.append(os.path.join(dirName, fileName)) result.sort() if (len(result) < self.backupCount): result = [] else: result = result[:(len(result) - self.backupCount)] return result
'do a rollover; in this case, a date/time stamp is appended to the filename when the rollover happens. However, you want the file to be named for the start of the interval, not the current time. If there is a backup count, then we have to get a list of matching filenames, sort them and remove the one with the oldest suffix.'
def doRollover(self):
if self.stream: self.stream.close() self.stream = None currentTime = int(time.time()) dstNow = time.localtime(currentTime)[(-1)] t = (self.rolloverAt - self.interval) if self.utc: timeTuple = time.gmtime(t) else: timeTuple = time.localtime(t) dstThen = timeTuple[(-1)] if (dstNow != dstThen): if dstNow: addend = 3600 else: addend = (-3600) timeTuple = time.localtime((t + addend)) dfn = ((self.baseFilename + '.') + time.strftime(self.suffix, timeTuple)) if os.path.exists(dfn): os.remove(dfn) if os.path.exists(self.baseFilename): os.rename(self.baseFilename, dfn) if (self.backupCount > 0): for s in self.getFilesToDelete(): os.remove(s) if (not self.delay): self.stream = self._open() newRolloverAt = self.computeRollover(currentTime) while (newRolloverAt <= currentTime): newRolloverAt = (newRolloverAt + self.interval) if (((self.when == 'MIDNIGHT') or self.when.startswith('W')) and (not self.utc)): dstAtRollover = time.localtime(newRolloverAt)[(-1)] if (dstNow != dstAtRollover): if (not dstNow): addend = (-3600) else: addend = 3600 newRolloverAt += addend self.rolloverAt = newRolloverAt
'Emit a record. First check if the underlying file has changed, and if it has, close the old stream and reopen the file to get the current stream.'
def emit(self, record):
try: sres = os.stat(self.baseFilename) except OSError as err: if (err.errno == errno.ENOENT): sres = None else: raise if ((not sres) or (sres[ST_DEV] != self.dev) or (sres[ST_INO] != self.ino)): if (self.stream is not None): self.stream.flush() self.stream.close() self.stream = None self.stream = self._open() self._statstream() logging.FileHandler.emit(self, record)
'Initializes the handler with a specific host address and port. The attribute \'closeOnError\' is set to 1 - which means that if a socket error occurs, the socket is silently closed and then reopened on the next logging call.'
def __init__(self, host, port):
logging.Handler.__init__(self) self.host = host self.port = port self.sock = None self.closeOnError = 0 self.retryTime = None self.retryStart = 1.0 self.retryMax = 30.0 self.retryFactor = 2.0
'A factory method which allows subclasses to define the precise type of socket they want.'
def makeSocket(self, timeout=1):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if hasattr(s, 'settimeout'): s.settimeout(timeout) s.connect((self.host, self.port)) return s
'Try to create a socket, using an exponential backoff with a max retry time. Thanks to Robert Olson for the original patch (SF #815911) which has been slightly refactored.'
def createSocket(self):
now = time.time() if (self.retryTime is None): attempt = 1 else: attempt = (now >= self.retryTime) if attempt: try: self.sock = self.makeSocket() self.retryTime = None except socket.error: if (self.retryTime is None): self.retryPeriod = self.retryStart else: self.retryPeriod = (self.retryPeriod * self.retryFactor) if (self.retryPeriod > self.retryMax): self.retryPeriod = self.retryMax self.retryTime = (now + self.retryPeriod)
'Send a pickled string to the socket. This function allows for partial sends which can happen when the network is busy.'
def send(self, s):
if (self.sock is None): self.createSocket() if self.sock: try: if hasattr(self.sock, 'sendall'): self.sock.sendall(s) else: sentsofar = 0 left = len(s) while (left > 0): sent = self.sock.send(s[sentsofar:]) sentsofar = (sentsofar + sent) left = (left - sent) except socket.error: self.sock.close() self.sock = None
'Pickles the record in binary format with a length prefix, and returns it ready for transmission across the socket.'
def makePickle(self, record):
ei = record.exc_info if ei: dummy = self.format(record) record.exc_info = None d = dict(record.__dict__) d['msg'] = record.getMessage() d['args'] = None s = cPickle.dumps(d, 1) if ei: record.exc_info = ei slen = struct.pack('>L', len(s)) return (slen + s)
'Handle an error during logging. An error has occurred during logging. Most likely cause - connection lost. Close the socket so that we can retry on the next event.'
def handleError(self, record):
if (self.closeOnError and self.sock): self.sock.close() self.sock = None else: logging.Handler.handleError(self, record)
'Emit a record. Pickles the record and writes it to the socket in binary format. If there is an error with the socket, silently drop the packet. If there was a problem with the socket, re-establishes the socket.'
def emit(self, record):
try: s = self.makePickle(record) self.send(s) except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record)
'Closes the socket.'
def close(self):
self.acquire() try: sock = self.sock if sock: self.sock = None sock.close() finally: self.release() logging.Handler.close(self)
'Initializes the handler with a specific host address and port.'
def __init__(self, host, port):
SocketHandler.__init__(self, host, port) self.closeOnError = 0
'The factory method of SocketHandler is here overridden to create a UDP socket (SOCK_DGRAM).'
def makeSocket(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return s
'Send a pickled string to a socket. This function no longer allows for partial sends which can happen when the network is busy - UDP does not guarantee delivery and can deliver packets out of sequence.'
def send(self, s):
if (self.sock is None): self.createSocket() self.sock.sendto(s, (self.host, self.port))
'Initialize a handler. If address is specified as a string, a UNIX socket is used. To log to a local syslogd, "SysLogHandler(address="/dev/log")" can be used. If facility is not specified, LOG_USER is used. If socktype is specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific socket type will be used. For Unix sockets, you can also specify a socktype of None, in which case socket.SOCK_DGRAM will be used, falling back to socket.SOCK_STREAM.'
def __init__(self, address=('localhost', SYSLOG_UDP_PORT), facility=LOG_USER, socktype=None):
logging.Handler.__init__(self) self.address = address self.facility = facility self.socktype = socktype if isinstance(address, basestring): self.unixsocket = 1 self._connect_unixsocket(address) else: self.unixsocket = 0 if (socktype is None): socktype = socket.SOCK_DGRAM self.socket = socket.socket(socket.AF_INET, socktype) if (socktype == socket.SOCK_STREAM): self.socket.connect(address) self.socktype = socktype self.formatter = None
'Encode the facility and priority. You can pass in strings or integers - if strings are passed, the facility_names and priority_names mapping dictionaries are used to convert them to integers.'
def encodePriority(self, facility, priority):
if isinstance(facility, basestring): facility = self.facility_names[facility] if isinstance(priority, basestring): priority = self.priority_names[priority] return ((facility << 3) | priority)
'Closes the socket.'
def close(self):
self.acquire() try: if self.unixsocket: self.socket.close() finally: self.release() logging.Handler.close(self)
'Map a logging level name to a key in the priority_names map. This is useful in two scenarios: when custom levels are being used, and in the case where you can\'t do a straightforward mapping by lowercasing the logging level name because of locale- specific issues (see SF #1524081).'
def mapPriority(self, levelName):
return self.priority_map.get(levelName, 'warning')
'Emit a record. The record is formatted, and then sent to the syslog server. If exception information is present, it is NOT sent to the server.'
def emit(self, record):
try: msg = (self.format(record) + '\x00') '\n We need to convert record level to lowercase, maybe this will\n change in the future.\n ' prio = ('<%d>' % self.encodePriority(self.facility, self.mapPriority(record.levelname))) if (type(msg) is unicode): msg = msg.encode('utf-8') msg = (prio + msg) if self.unixsocket: try: self.socket.send(msg) except socket.error: self.socket.close() self._connect_unixsocket(self.address) self.socket.send(msg) elif (self.socktype == socket.SOCK_DGRAM): self.socket.sendto(msg, self.address) else: self.socket.sendall(msg) except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record)
'Initialize the handler. Initialize the instance with the from and to addresses and subject line of the email. To specify a non-standard SMTP port, use the (host, port) tuple format for the mailhost argument. To specify authentication credentials, supply a (username, password) tuple for the credentials argument. To specify the use of a secure protocol (TLS), pass in a tuple for the secure argument. This will only be used when authentication credentials are supplied. The tuple will be either an empty tuple, or a single-value tuple with the name of a keyfile, or a 2-value tuple with the names of the keyfile and certificate file. (This tuple is passed to the `starttls` method).'
def __init__(self, mailhost, fromaddr, toaddrs, subject, credentials=None, secure=None):
logging.Handler.__init__(self) if isinstance(mailhost, (list, tuple)): (self.mailhost, self.mailport) = mailhost else: (self.mailhost, self.mailport) = (mailhost, None) if isinstance(credentials, (list, tuple)): (self.username, self.password) = credentials else: self.username = None self.fromaddr = fromaddr if isinstance(toaddrs, basestring): toaddrs = [toaddrs] self.toaddrs = toaddrs self.subject = subject self.secure = secure self._timeout = 5.0
'Determine the subject for the email. If you want to specify a subject line which is record-dependent, override this method.'
def getSubject(self, record):
return self.subject
'Emit a record. Format the record and send it to the specified addressees.'
def emit(self, record):
try: import smtplib from email.utils import formatdate port = self.mailport if (not port): port = smtplib.SMTP_PORT smtp = smtplib.SMTP(self.mailhost, port, timeout=self._timeout) msg = self.format(record) msg = ('From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s' % (self.fromaddr, ','.join(self.toaddrs), self.getSubject(record), formatdate(), msg)) if self.username: if (self.secure is not None): smtp.ehlo() smtp.starttls(*self.secure) smtp.ehlo() smtp.login(self.username, self.password) smtp.sendmail(self.fromaddr, self.toaddrs, msg) smtp.quit() except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record)
'Return the message ID for the event record. If you are using your own messages, you could do this by having the msg passed to the logger being an ID rather than a formatting string. Then, in here, you could use a dictionary lookup to get the message ID. This version returns 1, which is the base message ID in win32service.pyd.'
def getMessageID(self, record):
return 1
'Return the event category for the record. Override this if you want to specify your own categories. This version returns 0.'
def getEventCategory(self, record):
return 0
'Return the event type for the record. Override this if you want to specify your own types. This version does a mapping using the handler\'s typemap attribute, which is set up in __init__() to a dictionary which contains mappings for DEBUG, INFO, WARNING, ERROR and CRITICAL. If you are using your own levels you will either need to override this method or place a suitable dictionary in the handler\'s typemap attribute.'
def getEventType(self, record):
return self.typemap.get(record.levelno, self.deftype)
'Emit a record. Determine the message ID, event category and event type. Then log the message in the NT event log.'
def emit(self, record):
if self._welu: try: id = self.getMessageID(record) cat = self.getEventCategory(record) type = self.getEventType(record) msg = self.format(record) self._welu.ReportEvent(self.appname, id, cat, type, [msg]) except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record)
'Clean up this handler. You can remove the application name from the registry as a source of event log entries. However, if you do this, you will not be able to see the events as you intended in the Event Log Viewer - it needs to be able to access the registry to get the DLL name.'
def close(self):
logging.Handler.close(self)
'Initialize the instance with the host, the request URL, and the method ("GET" or "POST")'
def __init__(self, host, url, method='GET'):
logging.Handler.__init__(self) method = method.upper() if (method not in ['GET', 'POST']): raise ValueError('method must be GET or POST') self.host = host self.url = url self.method = method
'Default implementation of mapping the log record into a dict that is sent as the CGI data. Overwrite in your class. Contributed by Franz Glasner.'
def mapLogRecord(self, record):
return record.__dict__
'Emit a record. Send the record to the Web server as a percent-encoded dictionary'
def emit(self, record):
try: import httplib, urllib host = self.host h = httplib.HTTP(host) url = self.url data = urllib.urlencode(self.mapLogRecord(record)) if (self.method == 'GET'): if (url.find('?') >= 0): sep = '&' else: sep = '?' url = (url + ('%c%s' % (sep, data))) h.putrequest(self.method, url) i = host.find(':') if (i >= 0): host = host[:i] h.putheader('Host', host) if (self.method == 'POST'): h.putheader('Content-type', 'application/x-www-form-urlencoded') h.putheader('Content-length', str(len(data))) h.endheaders((data if (self.method == 'POST') else None)) h.getreply() except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record)
'Initialize the handler with the buffer size.'
def __init__(self, capacity):
logging.Handler.__init__(self) self.capacity = capacity self.buffer = []
'Should the handler flush its buffer? Returns true if the buffer is up to capacity. This method can be overridden to implement custom flushing strategies.'
def shouldFlush(self, record):
return (len(self.buffer) >= self.capacity)
'Emit a record. Append the record. If shouldFlush() tells us to, call flush() to process the buffer.'
def emit(self, record):
self.buffer.append(record) if self.shouldFlush(record): self.flush()
'Override to implement custom flushing behaviour. This version just zaps the buffer to empty.'
def flush(self):
self.acquire() try: self.buffer = [] finally: self.release()
'Close the handler. This version just flushes and chains to the parent class\' close().'
def close(self):
try: self.flush() finally: logging.Handler.close(self)
'Initialize the handler with the buffer size, the level at which flushing should occur and an optional target. Note that without a target being set either here or via setTarget(), a MemoryHandler is no use to anyone!'
def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
BufferingHandler.__init__(self, capacity) self.flushLevel = flushLevel self.target = target
'Check for buffer full or a record at the flushLevel or higher.'
def shouldFlush(self, record):
return ((len(self.buffer) >= self.capacity) or (record.levelno >= self.flushLevel))
'Set the target handler for this handler.'
def setTarget(self, target):
self.target = target
'For a MemoryHandler, flushing means just sending the buffered records to the target, if there is one. Override if you want different behaviour.'
def flush(self):
self.acquire() try: if self.target: for record in self.buffer: self.target.handle(record) self.buffer = [] finally: self.release()
'Flush, set the target to None and lose the buffer.'
def close(self):
try: self.flush() finally: self.acquire() try: self.target = None BufferingHandler.close(self) finally: self.release()
'Initialize a logging record with interesting information.'
def __init__(self, name, level, pathname, lineno, msg, args, exc_info, func=None):
ct = time.time() self.name = name self.msg = msg if (args and (len(args) == 1) and isinstance(args[0], collections.Mapping) and args[0]): args = args[0] self.args = args self.levelname = getLevelName(level) self.levelno = level self.pathname = pathname try: self.filename = os.path.basename(pathname) self.module = os.path.splitext(self.filename)[0] except (TypeError, ValueError, AttributeError): self.filename = pathname self.module = 'Unknown module' self.exc_info = exc_info self.exc_text = None self.lineno = lineno self.funcName = func self.created = ct self.msecs = ((ct - long(ct)) * 1000) self.relativeCreated = ((self.created - _startTime) * 1000) if (logThreads and thread): self.thread = thread.get_ident() self.threadName = threading.current_thread().name else: self.thread = None self.threadName = None if (not logMultiprocessing): self.processName = None else: self.processName = 'MainProcess' mp = sys.modules.get('multiprocessing') if (mp is not None): try: self.processName = mp.current_process().name except StandardError: pass if (logProcesses and hasattr(os, 'getpid')): self.process = os.getpid() else: self.process = None
'Return the message for this LogRecord. Return the message for this LogRecord after merging any user-supplied arguments with the message.'
def getMessage(self):
if (not _unicode): msg = str(self.msg) else: msg = self.msg if (not isinstance(msg, basestring)): try: msg = str(self.msg) except UnicodeError: msg = self.msg if self.args: msg = (msg % self.args) return msg
'Initialize the formatter with specified format strings. Initialize the formatter either with the specified format string, or a default as described above. Allow for specialized date formatting with the optional datefmt argument (if omitted, you get the ISO8601 format).'
def __init__(self, fmt=None, datefmt=None):
if fmt: self._fmt = fmt else: self._fmt = '%(message)s' self.datefmt = datefmt
'Return the creation time of the specified LogRecord as formatted text. This method should be called from format() by a formatter which wants to make use of a formatted time. This method can be overridden in formatters to provide for any specific requirement, but the basic behaviour is as follows: if datefmt (a string) is specified, it is used with time.strftime() to format the creation time of the record. Otherwise, the ISO8601 format is used. The resulting string is returned. This function uses a user-configurable function to convert the creation time to a tuple. By default, time.localtime() is used; to change this for a particular formatter instance, set the \'converter\' attribute to a function with the same signature as time.localtime() or time.gmtime(). To change it for all formatters, for example if you want all logging times to be shown in GMT, set the \'converter\' attribute in the Formatter class.'
def formatTime(self, record, datefmt=None):
ct = self.converter(record.created) if datefmt: s = time.strftime(datefmt, ct) else: t = time.strftime('%Y-%m-%d %H:%M:%S', ct) s = ('%s,%03d' % (t, record.msecs)) return s
'Format and return the specified exception information as a string. This default implementation just uses traceback.print_exception()'
def formatException(self, ei):
sio = cStringIO.StringIO() traceback.print_exception(ei[0], ei[1], ei[2], None, sio) s = sio.getvalue() sio.close() if (s[(-1):] == '\n'): s = s[:(-1)] return s
'Check if the format uses the creation time of the record.'
def usesTime(self):
return (self._fmt.find('%(asctime)') >= 0)
'Format the specified record as text. The record\'s attribute dictionary is used as the operand to a string formatting operation which yields the returned string. Before formatting the dictionary, a couple of preparatory steps are carried out. The message attribute of the record is computed using LogRecord.getMessage(). If the formatting string uses the time (as determined by a call to usesTime(), formatTime() is called to format the event time. If there is exception information, it is formatted using formatException() and appended to the message.'
def format(self, record):
record.message = record.getMessage() if self.usesTime(): record.asctime = self.formatTime(record, self.datefmt) try: s = (self._fmt % record.__dict__) except UnicodeDecodeError as e: try: record.name = record.name.decode('utf-8') s = (self._fmt % record.__dict__) except UnicodeDecodeError: raise e if record.exc_info: if (not record.exc_text): record.exc_text = self.formatException(record.exc_info) if record.exc_text: if (s[(-1):] != '\n'): s = (s + '\n') try: s = (s + record.exc_text) except UnicodeError: s = (s + record.exc_text.decode(sys.getfilesystemencoding(), 'replace')) return s
'Optionally specify a formatter which will be used to format each individual record.'
def __init__(self, linefmt=None):
if linefmt: self.linefmt = linefmt else: self.linefmt = _defaultFormatter
'Return the header string for the specified records.'
def formatHeader(self, records):
return ''
'Return the footer string for the specified records.'
def formatFooter(self, records):
return ''
'Format the specified records and return the result as a string.'
def format(self, records):
rv = '' if (len(records) > 0): rv = (rv + self.formatHeader(records)) for record in records: rv = (rv + self.linefmt.format(record)) rv = (rv + self.formatFooter(records)) return rv
'Initialize a filter. Initialize with the name of the logger which, together with its children, will have its events allowed through the filter. If no name is specified, allow every event.'
def __init__(self, name=''):
self.name = name self.nlen = len(name)
'Determine if the specified record is to be logged. Is the specified record to be logged? Returns 0 for no, nonzero for yes. If deemed appropriate, the record may be modified in-place.'
def filter(self, record):
if (self.nlen == 0): return 1 elif (self.name == record.name): return 1 elif (record.name.find(self.name, 0, self.nlen) != 0): return 0 return (record.name[self.nlen] == '.')
'Initialize the list of filters to be an empty list.'
def __init__(self):
self.filters = []
'Add the specified filter to this handler.'
def addFilter(self, filter):
if (not (filter in self.filters)): self.filters.append(filter)
'Remove the specified filter from this handler.'
def removeFilter(self, filter):
if (filter in self.filters): self.filters.remove(filter)
'Determine if a record is loggable by consulting all the filters. The default is to allow the record to be logged; any filter can veto this and the record is then dropped. Returns a zero value if a record is to be dropped, else non-zero.'
def filter(self, record):
rv = 1 for f in self.filters: if (not f.filter(record)): rv = 0 break return rv
'Initializes the instance - basically setting the formatter to None and the filter list to empty.'
def __init__(self, level=NOTSET):
Filterer.__init__(self) self._name = None self.level = _checkLevel(level) self.formatter = None _addHandlerRef(self) self.createLock()
'Acquire a thread lock for serializing access to the underlying I/O.'
def createLock(self):
if thread: self.lock = threading.RLock() else: self.lock = None
'Acquire the I/O thread lock.'
def acquire(self):
if self.lock: self.lock.acquire()
'Release the I/O thread lock.'
def release(self):
if self.lock: self.lock.release()
'Set the logging level of this handler.'
def setLevel(self, level):
self.level = _checkLevel(level)
'Format the specified record. If a formatter is set, use it. Otherwise, use the default formatter for the module.'
def format(self, record):
if self.formatter: fmt = self.formatter else: fmt = _defaultFormatter return fmt.format(record)
'Do whatever it takes to actually log the specified logging record. This version is intended to be implemented by subclasses and so raises a NotImplementedError.'
def emit(self, record):
raise NotImplementedError('emit must be implemented by Handler subclasses')
'Conditionally emit the specified logging record. Emission depends on filters which may have been added to the handler. Wrap the actual emission of the record with acquisition/release of the I/O thread lock. Returns whether the filter passed the record for emission.'
def handle(self, record):
rv = self.filter(record) if rv: self.acquire() try: self.emit(record) finally: self.release() return rv
'Set the formatter for this handler.'
def setFormatter(self, fmt):
self.formatter = fmt
'Ensure all logging output has been flushed. This version does nothing and is intended to be implemented by subclasses.'
def flush(self):
pass
'Tidy up any resources used by the handler. This version removes the handler from an internal map of handlers, _handlers, which is used for handler lookup by name. Subclasses should ensure that this gets called from overridden close() methods.'
def close(self):
_acquireLock() try: if (self._name and (self._name in _handlers)): del _handlers[self._name] finally: _releaseLock()
'Handle errors which occur during an emit() call. This method should be called from handlers when an exception is encountered during an emit() call. If raiseExceptions is false, exceptions get silently ignored. This is what is mostly wanted for a logging system - most users will not care about errors in the logging system, they are more interested in application errors. You could, however, replace this with a custom handler if you wish. The record which was being processed is passed in to this method.'
def handleError(self, record):
if (raiseExceptions and sys.stderr): ei = sys.exc_info() try: traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr) sys.stderr.write(('Logged from file %s, line %s\n' % (record.filename, record.lineno))) except IOError: pass finally: del ei
'Initialize the handler. If stream is not specified, sys.stderr is used.'
def __init__(self, stream=None):
Handler.__init__(self) if (stream is None): stream = sys.stderr self.stream = stream
'Flushes the stream.'
def flush(self):
self.acquire() try: if (self.stream and hasattr(self.stream, 'flush')): self.stream.flush() finally: self.release()
'Emit a record. If a formatter is specified, it is used to format the record. The record is then written to the stream with a trailing newline. If exception information is present, it is formatted using traceback.print_exception and appended to the stream. If the stream has an \'encoding\' attribute, it is used to determine how to do the output to the stream.'
def emit(self, record):
try: msg = self.format(record) stream = self.stream fs = '%s\n' if (not _unicode): stream.write((fs % msg)) else: try: if (isinstance(msg, unicode) and getattr(stream, 'encoding', None)): ufs = u'%s\n' try: stream.write((ufs % msg)) except UnicodeEncodeError: stream.write((ufs % msg).encode(stream.encoding)) else: stream.write((fs % msg)) except UnicodeError: stream.write((fs % msg.encode('UTF-8'))) self.flush() except (KeyboardInterrupt, SystemExit): raise except: self.handleError(record)
'Open the specified file and use it as the stream for logging.'
def __init__(self, filename, mode='a', encoding=None, delay=0):
if (codecs is None): encoding = None self.baseFilename = os.path.abspath(filename) self.mode = mode self.encoding = encoding self.delay = delay if delay: Handler.__init__(self) self.stream = None else: StreamHandler.__init__(self, self._open())
'Closes the stream.'
def close(self):
self.acquire() try: try: if self.stream: try: self.flush() finally: stream = self.stream self.stream = None if hasattr(stream, 'close'): stream.close() finally: StreamHandler.close(self) finally: self.release()
'Open the current base file with the (original) mode and encoding. Return the resulting stream.'
def _open(self):
if (self.encoding is None): stream = open(self.baseFilename, self.mode) else: stream = codecs.open(self.baseFilename, self.mode, self.encoding) return stream
'Emit a record. If the stream was not opened because \'delay\' was specified in the constructor, open it before calling the superclass\'s emit.'
def emit(self, record):
if (self.stream is None): self.stream = self._open() StreamHandler.emit(self, record)