desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Perform a TLS/SSL handshake.'
def do_handshake(self, block=False):
self._check_connected() timeout = self.gettimeout() try: if ((timeout == 0.0) and block): self.settimeout(None) self._sslobj.do_handshake() finally: self.settimeout(timeout) if self.context.check_hostname: if (not self.server_hostname): raise ValueError('check_hostname needs server_hostname argument') match_hostname(self.getpeercert(), self.server_hostname)
'Connects to remote ADDR, and then wraps the connection in an SSL channel.'
def connect(self, addr):
self._real_connect(addr, False)
'Connects to remote ADDR, and then wraps the connection in an SSL channel.'
def connect_ex(self, addr):
return self._real_connect(addr, True)
'Accepts a new connection from a remote client, and returns a tuple containing that new connection wrapped with a server-side SSL channel, and the address of the remote client.'
def accept(self):
(newsock, addr) = socket.accept(self) newsock = self.context.wrap_socket(newsock, do_handshake_on_connect=self.do_handshake_on_connect, suppress_ragged_eofs=self.suppress_ragged_eofs, server_side=True) return (newsock, addr)
'Make and return a file-like object that works with the SSL connection. Just use the code from the socket module.'
def makefile(self, mode='r', bufsize=(-1)):
self._makefile_refs += 1 return _fileobject(self, mode, bufsize, close=True)
'Get channel binding data for current connection. Raise ValueError if the requested `cb_type` is not supported. Return bytes of the data or None if the data is not available (e.g. before the handshake).'
def get_channel_binding(self, cb_type='tls-unique'):
if (cb_type not in CHANNEL_BINDING_TYPES): raise ValueError('Unsupported channel binding type') if (cb_type != 'tls-unique'): raise NotImplementedError('{0} channel binding type not implemented'.format(cb_type)) if (self._sslobj is None): return None return self._sslobj.tls_unique_cb()
'Return a string identifying the protocol version used by the current SSL channel, or None if there is no established channel.'
def version(self):
if (self._sslobj is None): return None return self._sslobj.version()
'Print a report to stdout, listing the found modules with their paths, as well as modules that are missing, or seem to be missing.'
def report(self):
print print (' %-25s %s' % ('Name', 'File')) print (' %-25s %s' % ('----', '----')) keys = self.modules.keys() keys.sort() for key in keys: m = self.modules[key] if m.__path__: print 'P', else: print 'm', print ('%-25s' % key), (m.__file__ or '') (missing, maybe) = self.any_missing_maybe() if missing: print print 'Missing modules:' for name in missing: mods = self.badmodules[name].keys() mods.sort() print '?', name, 'imported from', ', '.join(mods) if maybe: print print 'Submodules that appear to be missing, but could also be', print 'global names in the parent package:' for name in maybe: mods = self.badmodules[name].keys() mods.sort() print '?', name, 'imported from', ', '.join(mods)
'Return a list of modules that appear to be missing. Use any_missing_maybe() if you want to know which modules are certain to be missing, and which *may* be missing.'
def any_missing(self):
(missing, maybe) = self.any_missing_maybe() return (missing + maybe)
'Return two lists, one with modules that are certainly missing and one with modules that *may* be missing. The latter names could either be submodules *or* just global names in the package. The reason it can\'t always be determined is that it\'s impossible to tell which names are imported when "from module import *" is done with an extension module, short of actually importing it.'
def any_missing_maybe(self):
missing = [] maybe = [] for name in self.badmodules: if (name in self.excludes): continue i = name.rfind('.') if (i < 0): missing.append(name) continue subname = name[(i + 1):] pkgname = name[:i] pkg = self.modules.get(pkgname) if (pkg is not None): if (pkgname in self.badmodules[name]): missing.append(name) elif (subname in pkg.globalnames): pass elif pkg.starimports: maybe.append(name) else: missing.append(name) else: missing.append(name) missing.sort() maybe.sort() return (missing, maybe)
'True if self != 0. Called for bool(self).'
def __nonzero__(self):
return (self != 0)
'Retrieve the real component of this number. This should subclass Real.'
@abstractproperty def real(self):
raise NotImplementedError
'Retrieve the imaginary component of this number. This should subclass Real.'
@abstractproperty def imag(self):
raise NotImplementedError
'self + other'
@abstractmethod def __add__(self, other):
raise NotImplementedError
'other + self'
@abstractmethod def __radd__(self, other):
raise NotImplementedError
'-self'
@abstractmethod def __neg__(self):
raise NotImplementedError
'+self'
@abstractmethod def __pos__(self):
raise NotImplementedError
'self - other'
def __sub__(self, other):
return (self + (- other))
'other - self'
def __rsub__(self, other):
return ((- self) + other)
'self * other'
@abstractmethod def __mul__(self, other):
raise NotImplementedError
'other * self'
@abstractmethod def __rmul__(self, other):
raise NotImplementedError
'self / other without __future__ division May promote to float.'
@abstractmethod def __div__(self, other):
raise NotImplementedError
'other / self without __future__ division'
@abstractmethod def __rdiv__(self, other):
raise NotImplementedError
'self / other with __future__ division. Should promote to float when necessary.'
@abstractmethod def __truediv__(self, other):
raise NotImplementedError
'other / self with __future__ division'
@abstractmethod def __rtruediv__(self, other):
raise NotImplementedError
'self**exponent; should promote to float or complex when necessary.'
@abstractmethod def __pow__(self, exponent):
raise NotImplementedError
'base ** self'
@abstractmethod def __rpow__(self, base):
raise NotImplementedError
'Returns the Real distance from 0. Called for abs(self).'
@abstractmethod def __abs__(self):
raise NotImplementedError
'(x+y*i).conjugate() returns (x-y*i).'
@abstractmethod def conjugate(self):
raise NotImplementedError
'self == other'
@abstractmethod def __eq__(self, other):
raise NotImplementedError
'self != other'
def __ne__(self, other):
return (not (self == other))
'Any Real can be converted to a native float object. Called for float(self).'
@abstractmethod def __float__(self):
raise NotImplementedError
'trunc(self): Truncates self to an Integral. Returns an Integral i such that: * i>0 iff self>0; * abs(i) <= abs(self); * for any Integral j satisfying the first two conditions, abs(i) >= abs(j) [i.e. i has "maximal" abs among those]. i.e. "truncate towards 0".'
@abstractmethod def __trunc__(self):
raise NotImplementedError
'divmod(self, other): The pair (self // other, self % other). Sometimes this can be computed faster than the pair of operations.'
def __divmod__(self, other):
return ((self // other), (self % other))
'divmod(other, self): The pair (self // other, self % other). Sometimes this can be computed faster than the pair of operations.'
def __rdivmod__(self, other):
return ((other // self), (other % self))
'self // other: The floor() of self/other.'
@abstractmethod def __floordiv__(self, other):
raise NotImplementedError
'other // self: The floor() of other/self.'
@abstractmethod def __rfloordiv__(self, other):
raise NotImplementedError
'self % other'
@abstractmethod def __mod__(self, other):
raise NotImplementedError
'other % self'
@abstractmethod def __rmod__(self, other):
raise NotImplementedError
'self < other < on Reals defines a total ordering, except perhaps for NaN.'
@abstractmethod def __lt__(self, other):
raise NotImplementedError
'self <= other'
@abstractmethod def __le__(self, other):
raise NotImplementedError
'complex(self) == complex(float(self), 0)'
def __complex__(self):
return complex(float(self))
'Real numbers are their real component.'
@property def real(self):
return (+ self)
'Real numbers have no imaginary component.'
@property def imag(self):
return 0
'Conjugate is a no-op for Reals.'
def conjugate(self):
return (+ self)
'float(self) = self.numerator / self.denominator It\'s important that this conversion use the integer\'s "true" division rather than casting one side to float before dividing so that ratios of huge integers convert without overflowing.'
def __float__(self):
return (self.numerator / self.denominator)
'long(self)'
@abstractmethod def __long__(self):
raise NotImplementedError
'Called whenever an index is needed, such as in slicing'
def __index__(self):
return long(self)
'self ** exponent % modulus, but maybe faster. Accept the modulus argument if you want to support the 3-argument version of pow(). Raise a TypeError if exponent < 0 or any argument isn\'t Integral. Otherwise, just implement the 2-argument version described in Complex.'
@abstractmethod def __pow__(self, exponent, modulus=None):
raise NotImplementedError
'self << other'
@abstractmethod def __lshift__(self, other):
raise NotImplementedError
'other << self'
@abstractmethod def __rlshift__(self, other):
raise NotImplementedError
'self >> other'
@abstractmethod def __rshift__(self, other):
raise NotImplementedError
'other >> self'
@abstractmethod def __rrshift__(self, other):
raise NotImplementedError
'self & other'
@abstractmethod def __and__(self, other):
raise NotImplementedError
'other & self'
@abstractmethod def __rand__(self, other):
raise NotImplementedError
'self ^ other'
@abstractmethod def __xor__(self, other):
raise NotImplementedError
'other ^ self'
@abstractmethod def __rxor__(self, other):
raise NotImplementedError
'self | other'
@abstractmethod def __or__(self, other):
raise NotImplementedError
'other | self'
@abstractmethod def __ror__(self, other):
raise NotImplementedError
'~self'
@abstractmethod def __invert__(self):
raise NotImplementedError
'float(self) == float(long(self))'
def __float__(self):
return float(long(self))
'Integers are their own numerators.'
@property def numerator(self):
return (+ self)
'Integers have a denominator of 1.'
@property def denominator(self):
return 1
'Constructor for JSONEncoder, with sensible defaults. If skipkeys is false, then it is a TypeError to attempt encoding of keys that are not str, int, long, float or None. If skipkeys is True, such items are simply skipped. If *ensure_ascii* is true (the default), all non-ASCII characters in the output are escaped with \uXXXX sequences, and the results are str instances consisting of ASCII characters only. If ensure_ascii is False, a result may be a unicode instance. This usually happens if the input contains unicode strings or the *encoding* parameter is used. If check_circular is true, then lists, dicts, and custom encoded objects will be checked for circular references during encoding to prevent an infinite recursion (which would cause an OverflowError). Otherwise, no such check takes place. If allow_nan is true, then NaN, Infinity, and -Infinity will be encoded as such. This behavior is not JSON specification compliant, but is consistent with most JavaScript based encoders and decoders. Otherwise, it will be a ValueError to encode such floats. If sort_keys is true, then the output of dictionaries will be sorted by key; this is useful for regression tests to ensure that JSON serializations can be compared on a day-to-day basis. If indent is a non-negative integer, then JSON array elements and object members will be pretty-printed with that indent level. An indent level of 0 will only insert newlines. None is the most compact representation. Since the default item separator is \', \', the output might include trailing whitespace when indent is specified. You can use separators=(\',\', \': \') to avoid this. If specified, separators should be a (item_separator, key_separator) tuple. The default is (\', \', \': \'). To get the most compact JSON representation you should specify (\',\', \':\') to eliminate whitespace. If specified, default is a function that gets called for objects that can\'t otherwise be serialized. It should return a JSON encodable version of the object or raise a ``TypeError``. If encoding is not None, then all input strings will be transformed into unicode using that encoding prior to JSON-encoding. The default is UTF-8.'
def __init__(self, skipkeys=False, ensure_ascii=True, check_circular=True, allow_nan=True, sort_keys=False, indent=None, separators=None, encoding='utf-8', default=None):
self.skipkeys = skipkeys self.ensure_ascii = ensure_ascii self.check_circular = check_circular self.allow_nan = allow_nan self.sort_keys = sort_keys self.indent = indent if (separators is not None): (self.item_separator, self.key_separator) = separators if (default is not None): self.default = default self.encoding = encoding
'Implement this method in a subclass such that it returns a serializable object for ``o``, or calls the base implementation (to raise a ``TypeError``). For example, to support arbitrary iterators, you could implement default like this:: def default(self, o): try: iterable = iter(o) except TypeError: pass else: return list(iterable) # Let the base class default method raise the TypeError return JSONEncoder.default(self, o)'
def default(self, o):
raise TypeError((repr(o) + ' is not JSON serializable'))
'Return a JSON string representation of a Python data structure. >>> JSONEncoder().encode({"foo": ["bar", "baz"]}) \'{"foo": ["bar", "baz"]}\''
def encode(self, o):
if isinstance(o, basestring): if isinstance(o, str): _encoding = self.encoding if ((_encoding is not None) and (not (_encoding == 'utf-8'))): o = o.decode(_encoding) if self.ensure_ascii: return encode_basestring_ascii(o) else: return encode_basestring(o) chunks = self.iterencode(o, _one_shot=True) if (not isinstance(chunks, (list, tuple))): chunks = list(chunks) return ''.join(chunks)
'Encode the given object and yield each string representation as available. For example:: for chunk in JSONEncoder().iterencode(bigobject): mysocket.write(chunk)'
def iterencode(self, o, _one_shot=False):
if self.check_circular: markers = {} else: markers = None if self.ensure_ascii: _encoder = encode_basestring_ascii else: _encoder = encode_basestring if (self.encoding != 'utf-8'): def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding): if isinstance(o, str): o = o.decode(_encoding) return _orig_encoder(o) def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=(- INFINITY)): if (o != o): text = 'NaN' elif (o == _inf): text = 'Infinity' elif (o == _neginf): text = '-Infinity' else: return _repr(o) if (not allow_nan): raise ValueError(('Out of range float values are not JSON compliant: ' + repr(o))) return text if (_one_shot and (c_make_encoder is not None) and (self.indent is None) and (not self.sort_keys)): _iterencode = c_make_encoder(markers, self.default, _encoder, self.indent, self.key_separator, self.item_separator, self.sort_keys, self.skipkeys, self.allow_nan) else: _iterencode = _make_iterencode(markers, self.default, _encoder, self.indent, floatstr, self.key_separator, self.item_separator, self.sort_keys, self.skipkeys, _one_shot) return _iterencode(o, 0)
'``encoding`` determines the encoding used to interpret any ``str`` objects decoded by this instance (utf-8 by default). It has no effect when decoding ``unicode`` objects. Note that currently only encodings that are a superset of ASCII work, strings of other encodings should be passed in as ``unicode``. ``object_hook``, if specified, will be called with the result of every JSON object decoded and its return value will be used in place of the given ``dict``. This can be used to provide custom deserializations (e.g. to support JSON-RPC class hinting). ``object_pairs_hook``, if specified will be called with the result of every JSON object decoded with an ordered list of pairs. The return value of ``object_pairs_hook`` will be used instead of the ``dict``. This feature can be used to implement custom decoders that rely on the order that the key and value pairs are decoded (for example, collections.OrderedDict will remember the order of insertion). If ``object_hook`` is also defined, the ``object_pairs_hook`` takes priority. ``parse_float``, if specified, will be called with the string of every JSON float to be decoded. By default this is equivalent to float(num_str). This can be used to use another datatype or parser for JSON floats (e.g. decimal.Decimal). ``parse_int``, if specified, will be called with the string of every JSON int to be decoded. By default this is equivalent to int(num_str). This can be used to use another datatype or parser for JSON integers (e.g. float). ``parse_constant``, if specified, will be called with one of the following strings: -Infinity, Infinity, NaN. This can be used to raise an exception if invalid JSON numbers are encountered. If ``strict`` is false (true is the default), then control characters will be allowed inside strings. Control characters in this context are those with character codes in the 0-31 range, including ``\'\t\'`` (tab), ``\'\n\'``, ``\'\r\'`` and ``\'\0\'``.'
def __init__(self, encoding=None, object_hook=None, parse_float=None, parse_int=None, parse_constant=None, strict=True, object_pairs_hook=None):
self.encoding = encoding self.object_hook = object_hook self.object_pairs_hook = object_pairs_hook self.parse_float = (parse_float or float) self.parse_int = (parse_int or int) self.parse_constant = (parse_constant or _CONSTANTS.__getitem__) self.strict = strict self.parse_object = JSONObject self.parse_array = JSONArray self.parse_string = scanstring self.scan_once = scanner.make_scanner(self)
'Return the Python representation of ``s`` (a ``str`` or ``unicode`` instance containing a JSON document)'
def decode(self, s, _w=WHITESPACE.match):
(obj, end) = self.raw_decode(s, idx=_w(s, 0).end()) end = _w(s, end).end() if (end != len(s)): raise ValueError(errmsg('Extra data', s, end, len(s))) return obj
'Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning with a JSON document) and return a 2-tuple of the Python representation and the index in ``s`` where the document ended. This can be used to decode a JSON document from a string that may have extraneous data at the end.'
def raw_decode(self, s, idx=0):
try: (obj, end) = self.scan_once(s, idx) except StopIteration: raise ValueError('No JSON object could be decoded') return (obj, end)
'Create a new directory in the Directory table. There is a current component at each point in time for the directory, which is either explicitly created through start_component, or implicitly when files are added for the first time. Files are added into the current component, and into the cab file. To create a directory, a base directory object needs to be specified (can be None), the path to the physical directory, and a logical directory name. Default specifies the DefaultDir slot in the directory table. componentflags specifies the default flags that new components get.'
def __init__(self, db, cab, basedir, physical, _logical, default, componentflags=None):
index = 1 _logical = make_id(_logical) logical = _logical while (logical in _directories): logical = ('%s%d' % (_logical, index)) index += 1 _directories.add(logical) self.db = db self.cab = cab self.basedir = basedir self.physical = physical self.logical = logical self.component = None self.short_names = set() self.ids = set() self.keyfiles = {} self.componentflags = componentflags if basedir: self.absolute = os.path.join(basedir.absolute, physical) blogical = basedir.logical else: self.absolute = physical blogical = None add_data(db, 'Directory', [(logical, blogical, default)])
'Add an entry to the Component table, and make this component the current for this directory. If no component name is given, the directory name is used. If no feature is given, the current feature is used. If no flags are given, the directory\'s default flags are used. If no keyfile is given, the KeyPath is left null in the Component table.'
def start_component(self, component=None, feature=None, flags=None, keyfile=None, uuid=None):
if (flags is None): flags = self.componentflags if (uuid is None): uuid = gen_uuid() else: uuid = uuid.upper() if (component is None): component = self.logical self.component = component if Win64: flags |= 256 if keyfile: keyid = self.cab.gen_id(self.absolute, keyfile) self.keyfiles[keyfile] = keyid else: keyid = None add_data(self.db, 'Component', [(component, uuid, self.logical, flags, None, keyid)]) if (feature is None): feature = current_feature add_data(self.db, 'FeatureComponents', [(feature.id, component)])
'Add a file to the current component of the directory, starting a new one if there is no current component. By default, the file name in the source and the file table will be identical. If the src file is specified, it is interpreted relative to the current directory. Optionally, a version and a language can be specified for the entry in the File table.'
def add_file(self, file, src=None, version=None, language=None):
if (not self.component): self.start_component(self.logical, current_feature, 0) if (not src): src = file file = os.path.basename(file) absolute = os.path.join(self.absolute, src) assert (not re.search('[\\?|><:/*]"', file)) if (file in self.keyfiles): logical = self.keyfiles[file] else: logical = None (sequence, logical) = self.cab.append(absolute, file, logical) assert (logical not in self.ids) self.ids.add(logical) short = self.make_short(file) full = ('%s|%s' % (short, file)) filesize = os.stat(absolute).st_size attributes = 512 add_data(self.db, 'File', [(logical, self.component, full, filesize, version, language, attributes, sequence)]) if file.endswith('.py'): add_data(self.db, 'RemoveFile', [((logical + 'c'), self.component, ('%sC|%sc' % (short, file)), self.logical, 2), ((logical + 'o'), self.component, ('%sO|%so' % (short, file)), self.logical, 2)]) return logical
'Add a list of files to the current component as specified in the glob pattern. Individual files can be excluded in the exclude list.'
def glob(self, pattern, exclude=None):
files = glob.glob1(self.absolute, pattern) for f in files: if (exclude and (f in exclude)): continue self.add_file(f) return files
'Remove .pyc/.pyo files on uninstall'
def remove_pyc(self):
add_data(self.db, 'RemoveFile', [((self.component + 'c'), self.component, '*.pyc', self.logical, 2), ((self.component + 'o'), self.component, '*.pyo', self.logical, 2)])
'Getter for \'message\'; needed only to override deprecation in BaseException.'
def _get_message(self):
return self.__message
'Setter for \'message\'; needed only to override deprecation in BaseException.'
def _set_message(self, value):
self.__message = value
'Return a list of section names, excluding [DEFAULT]'
def sections(self):
return self._sections.keys()
'Create a new section in the configuration. Raise DuplicateSectionError if a section by the specified name already exists. Raise ValueError if name is DEFAULT or any of it\'s case-insensitive variants.'
def add_section(self, section):
if (section.lower() == 'default'): raise ValueError, ('Invalid section name: %s' % section) if (section in self._sections): raise DuplicateSectionError(section) self._sections[section] = self._dict()
'Indicate whether the named section is present in the configuration. The DEFAULT section is not acknowledged.'
def has_section(self, section):
return (section in self._sections)
'Return a list of option names for the given section name.'
def options(self, section):
try: opts = self._sections[section].copy() except KeyError: raise NoSectionError(section) opts.update(self._defaults) if ('__name__' in opts): del opts['__name__'] return opts.keys()
'Read and parse a filename or a list of filenames. Files that cannot be opened are silently ignored; this is designed so that you can specify a list of potential configuration file locations (e.g. current directory, user\'s home directory, systemwide directory), and all existing configuration files in the list will be read. A single filename may also be given. Return list of successfully read files.'
def read(self, filenames):
if isinstance(filenames, basestring): filenames = [filenames] read_ok = [] for filename in filenames: try: fp = open(filename) except IOError: continue self._read(fp, filename) fp.close() read_ok.append(filename) return read_ok
'Like read() but the argument must be a file-like object. The `fp\' argument must have a `readline\' method. Optional second argument is the `filename\', which if not given, is taken from fp.name. If fp has no `name\' attribute, `<???>\' is used.'
def readfp(self, fp, filename=None):
if (filename is None): try: filename = fp.name except AttributeError: filename = '<???>' self._read(fp, filename)
'Check for the existence of a given option in a given section.'
def has_option(self, section, option):
if ((not section) or (section == DEFAULTSECT)): option = self.optionxform(option) return (option in self._defaults) elif (section not in self._sections): return False else: option = self.optionxform(option) return ((option in self._sections[section]) or (option in self._defaults))
'Set an option.'
def set(self, section, option, value=None):
if ((not section) or (section == DEFAULTSECT)): sectdict = self._defaults else: try: sectdict = self._sections[section] except KeyError: raise NoSectionError(section) sectdict[self.optionxform(option)] = value
'Write an .ini-format representation of the configuration state.'
def write(self, fp):
if self._defaults: fp.write(('[%s]\n' % DEFAULTSECT)) for (key, value) in self._defaults.items(): fp.write(('%s = %s\n' % (key, str(value).replace('\n', '\n DCTB ')))) fp.write('\n') for section in self._sections: fp.write(('[%s]\n' % section)) for (key, value) in self._sections[section].items(): if (key == '__name__'): continue if ((value is not None) or (self._optcre == self.OPTCRE)): key = ' = '.join((key, str(value).replace('\n', '\n DCTB '))) fp.write(('%s\n' % key)) fp.write('\n')
'Remove an option.'
def remove_option(self, section, option):
if ((not section) or (section == DEFAULTSECT)): sectdict = self._defaults else: try: sectdict = self._sections[section] except KeyError: raise NoSectionError(section) option = self.optionxform(option) existed = (option in sectdict) if existed: del sectdict[option] return existed
'Remove a file section.'
def remove_section(self, section):
existed = (section in self._sections) if existed: del self._sections[section] return existed
'Parse a sectioned setup file. The sections in setup file contains a title line at the top, indicated by a name in square brackets (`[]\'), plus key/value options lines, indicated by `name: value\' format lines. Continuations are represented by an embedded newline then leading whitespace. Blank lines, lines beginning with a \'#\', and just about everything else are ignored.'
def _read(self, fp, fpname):
cursect = None optname = None lineno = 0 e = None while True: line = fp.readline() if (not line): break lineno = (lineno + 1) if ((line.strip() == '') or (line[0] in '#;')): continue if ((line.split(None, 1)[0].lower() == 'rem') and (line[0] in 'rR')): continue if (line[0].isspace() and (cursect is not None) and optname): value = line.strip() if value: cursect[optname].append(value) else: mo = self.SECTCRE.match(line) if mo: sectname = mo.group('header') if (sectname in self._sections): cursect = self._sections[sectname] elif (sectname == DEFAULTSECT): cursect = self._defaults else: cursect = self._dict() cursect['__name__'] = sectname self._sections[sectname] = cursect optname = None elif (cursect is None): raise MissingSectionHeaderError(fpname, lineno, line) else: mo = self._optcre.match(line) if mo: (optname, vi, optval) = mo.group('option', 'vi', 'value') optname = self.optionxform(optname.rstrip()) if (optval is not None): if ((vi in ('=', ':')) and (';' in optval)): pos = optval.find(';') if ((pos != (-1)) and optval[(pos - 1)].isspace()): optval = optval[:pos] optval = optval.strip() if (optval == '""'): optval = '' cursect[optname] = [optval] else: cursect[optname] = optval else: if (not e): e = ParsingError(fpname) e.append(lineno, repr(line)) if e: raise e all_sections = [self._defaults] all_sections.extend(self._sections.values()) for options in all_sections: for (name, val) in options.items(): if isinstance(val, list): options[name] = '\n'.join(val)
'Get an option value for a given section. If `vars\' is provided, it must be a dictionary. The option is looked up in `vars\' (if provided), `section\', and in `defaults\' in that order. All % interpolations are expanded in the return values, unless the optional argument `raw\' is true. Values for interpolation keys are looked up in the same manner as the option. The section DEFAULT is special.'
def get(self, section, option, raw=False, vars=None):
sectiondict = {} try: sectiondict = self._sections[section] except KeyError: if (section != DEFAULTSECT): raise NoSectionError(section) vardict = {} if vars: for (key, value) in vars.items(): vardict[self.optionxform(key)] = value d = _Chainmap(vardict, sectiondict, self._defaults) option = self.optionxform(option) try: value = d[option] except KeyError: raise NoOptionError(option, section) if (raw or (value is None)): return value else: return self._interpolate(section, option, value, d)
'Return a list of tuples with (name, value) for each option in the section. All % interpolations are expanded in the return values, based on the defaults passed into the constructor, unless the optional argument `raw\' is true. Additional substitutions may be provided using the `vars\' argument, which must be a dictionary whose contents overrides any pre-existing defaults. The section DEFAULT is special.'
def items(self, section, raw=False, vars=None):
d = self._defaults.copy() try: d.update(self._sections[section]) except KeyError: if (section != DEFAULTSECT): raise NoSectionError(section) if vars: for (key, value) in vars.items(): d[self.optionxform(key)] = value options = d.keys() if ('__name__' in options): options.remove('__name__') if raw: return [(option, d[option]) for option in options] else: return [(option, self._interpolate(section, option, d[option], d)) for option in options]
'Set an option. Extend ConfigParser.set: check for string values.'
def set(self, section, option, value=None):
if ((self._optcre is self.OPTCRE) or value): if (not isinstance(value, basestring)): raise TypeError('option values must be strings') if (value is not None): tmp_value = value.replace('%%', '') tmp_value = self._interpvar_re.sub('', tmp_value) if ('%' in tmp_value): raise ValueError(('invalid interpolation syntax in %r at position %d' % (value, tmp_value.find('%')))) ConfigParser.set(self, section, option, value)
'Override this abstract method to handle messages from the client. peer is a tuple containing (ipaddr, port) of the client that made the socket connection to our smtp port. mailfrom is the raw address the client claims the message is coming from. rcpttos is a list of raw addresses the client wishes to deliver the message to. data is a string containing the entire full text of the message, headers (if supplied) and all. It has been `de-transparencied\' according to RFC 821, Section 4.5.2. In other words, a line containing a `.\' followed by other text has had the leading dot removed. This function should return None, for a normal `250 Ok\' response; otherwise it returns the desired response string in RFC 821 format.'
def process_message(self, peer, mailfrom, rcpttos, data):
raise NotImplementedError
'Internal: raise an exception for unsupported operations.'
def _unsupported(self, name):
raise UnsupportedOperation((u'%s.%s() not supported' % (self.__class__.__name__, name)))
'Change stream position. Change the stream position to byte offset pos. Argument pos is interpreted relative to the position indicated by whence. Values for whence are: * 0 -- start of stream (the default); offset should be zero or positive * 1 -- current stream position; offset may be negative * 2 -- end of stream; offset is usually negative Return the new absolute position.'
def seek(self, pos, whence=0):
self._unsupported(u'seek')
'Return current stream position.'
def tell(self):
return self.seek(0, 1)
'Truncate file to size bytes. Size defaults to the current IO position as reported by tell(). Return the new size.'
def truncate(self, pos=None):
self._unsupported(u'truncate')
'Flush write buffers, if applicable. This is not implemented for read-only and non-blocking streams.'
def flush(self):
self._checkClosed()
'Flush and close the IO object. This method has no effect if the file is already closed.'
def close(self):
if (not self.__closed): try: self.flush() finally: self.__closed = True
'Destructor. Calls close().'
def __del__(self):
try: self.close() except: pass