desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Hook method executed once when the cmdloop() method is about to
return.'
| def postloop(self):
| pass
|
'Parse the line into a command name and a string containing
the arguments. Returns a tuple containing (command, args, line).
\'command\' and \'args\' may be None if the line couldn\'t be parsed.'
| def parseline(self, line):
| line = line.strip()
if (not line):
return (None, None, line)
elif (line[0] == '?'):
line = ('help ' + line[1:])
elif (line[0] == '!'):
if hasattr(self, 'do_shell'):
line = ('shell ' + line[1:])
else:
return (None, None, line)
(i, n) = (0, len(line))
while ((i < n) and (line[i] in self.identchars)):
i = (i + 1)
(cmd, arg) = (line[:i], line[i:].strip())
return (cmd, arg, line)
|
'Interpret the argument as though it had been typed in response
to the prompt.
This may be overridden, but should not normally need to be;
see the precmd() and postcmd() methods for useful execution hooks.
The return value is a flag indicating whether interpretation of
commands by the interpreter should stop.'
| def onecmd(self, line):
| (cmd, arg, line) = self.parseline(line)
if (not line):
return self.emptyline()
if (cmd is None):
return self.default(line)
self.lastcmd = line
if (cmd == ''):
return self.default(line)
else:
try:
func = getattr(self, ('do_' + cmd))
except AttributeError:
return self.default(line)
return func(arg)
|
'Called when an empty line is entered in response to the prompt.
If this method is not overridden, it repeats the last nonempty
command entered.'
| def emptyline(self):
| if self.lastcmd:
return self.onecmd(self.lastcmd)
|
'Called on an input line when the command prefix is not recognized.
If this method is not overridden, it prints an error message and
returns.'
| def default(self, line):
| self.stdout.write(('*** Unknown syntax: %s\n' % line))
|
'Method called to complete an input line when no command-specific
complete_*() method is available.
By default, it returns an empty list.'
| def completedefault(self, *ignored):
| return []
|
'Return the next possible completion for \'text\'.
If a command has not been entered, then complete against command list.
Otherwise try to call complete_<command> to get list of completions.'
| def complete(self, text, state):
| if (state == 0):
import readline
origline = readline.get_line_buffer()
line = origline.lstrip()
stripped = (len(origline) - len(line))
begidx = (readline.get_begidx() - stripped)
endidx = (readline.get_endidx() - stripped)
if (begidx > 0):
(cmd, args, foo) = self.parseline(line)
if (cmd == ''):
compfunc = self.completedefault
else:
try:
compfunc = getattr(self, ('complete_' + cmd))
except AttributeError:
compfunc = self.completedefault
else:
compfunc = self.completenames
self.completion_matches = compfunc(text, line, begidx, endidx)
try:
return self.completion_matches[state]
except IndexError:
return None
|
'Display a list of strings as a compact set of columns.
Each column is only as wide as necessary.
Columns are separated by two spaces (one was not legible enough).'
| def columnize(self, list, displaywidth=80):
| if (not list):
self.stdout.write('<empty>\n')
return
nonstrings = [i for i in range(len(list)) if (not isinstance(list[i], str))]
if nonstrings:
raise TypeError, ('list[i] not a string for i in %s' % ', '.join(map(str, nonstrings)))
size = len(list)
if (size == 1):
self.stdout.write(('%s\n' % str(list[0])))
return
for nrows in range(1, len(list)):
ncols = (((size + nrows) - 1) // nrows)
colwidths = []
totwidth = (-2)
for col in range(ncols):
colwidth = 0
for row in range(nrows):
i = (row + (nrows * col))
if (i >= size):
break
x = list[i]
colwidth = max(colwidth, len(x))
colwidths.append(colwidth)
totwidth += (colwidth + 2)
if (totwidth > displaywidth):
break
if (totwidth <= displaywidth):
break
else:
nrows = len(list)
ncols = 1
colwidths = [0]
for row in range(nrows):
texts = []
for col in range(ncols):
i = (row + (nrows * col))
if (i >= size):
x = ''
else:
x = list[i]
texts.append(x)
while (texts and (not texts[(-1)])):
del texts[(-1)]
for col in range(len(texts)):
texts[col] = texts[col].ljust(colwidths[col])
self.stdout.write(('%s\n' % str(' '.join(texts))))
|
'Deprecated. Use the readPlist() function instead.'
| def fromFile(cls, pathOrFile):
| rootObject = readPlist(pathOrFile)
plist = cls()
plist.update(rootObject)
return plist
|
'Deprecated. Use the writePlist() function instead.'
| def write(self, pathOrFile):
| writePlist(self, pathOrFile)
|
'Send user name, return response
(should indicate password required).'
| def user(self, user):
| return self._shortcmd(('USER %s' % user))
|
'Send password, return response
(response includes message count, mailbox size).
NB: mailbox is locked by server from here to \'quit()\''
| def pass_(self, pswd):
| return self._shortcmd(('PASS %s' % pswd))
|
'Get mailbox status.
Result is tuple of 2 ints (message count, mailbox size)'
| def stat(self):
| retval = self._shortcmd('STAT')
rets = retval.split()
if self._debugging:
print '*stat*', repr(rets)
numMessages = int(rets[1])
sizeMessages = int(rets[2])
return (numMessages, sizeMessages)
|
'Request listing, return result.
Result without a message number argument is in form
[\'response\', [\'mesg_num octets\', ...], octets].
Result when a message number argument is given is a
single response: the "scan listing" for that message.'
| def list(self, which=None):
| if (which is not None):
return self._shortcmd(('LIST %s' % which))
return self._longcmd('LIST')
|
'Retrieve whole message number \'which\'.
Result is in form [\'response\', [\'line\', ...], octets].'
| def retr(self, which):
| return self._longcmd(('RETR %s' % which))
|
'Delete message number \'which\'.
Result is \'response\'.'
| def dele(self, which):
| return self._shortcmd(('DELE %s' % which))
|
'Does nothing.
One supposes the response indicates the server is alive.'
| def noop(self):
| return self._shortcmd('NOOP')
|
'Unmark all messages marked for deletion.'
| def rset(self):
| return self._shortcmd('RSET')
|
'Signoff: commit changes on server, unlock mailbox, close connection.'
| def quit(self):
| try:
resp = self._shortcmd('QUIT')
except error_proto as val:
resp = val
self.file.close()
self.sock.close()
del self.file, self.sock
return resp
|
'Not sure what this does.'
| def rpop(self, user):
| return self._shortcmd(('RPOP %s' % user))
|
'Authorisation
- only possible if server has supplied a timestamp in initial greeting.
Args:
user - mailbox user;
secret - secret shared between client and server.
NB: mailbox is locked by server from here to \'quit()\''
| def apop(self, user, secret):
| m = self.timestamp.match(self.welcome)
if (not m):
raise error_proto('-ERR APOP not supported by server')
import hashlib
digest = hashlib.md5((m.group(1) + secret)).digest()
digest = ''.join(map((lambda x: ('%02x' % ord(x))), digest))
return self._shortcmd(('APOP %s %s' % (user, digest)))
|
'Retrieve message header of message number \'which\'
and first \'howmuch\' lines of message body.
Result is in form [\'response\', [\'line\', ...], octets].'
| def top(self, which, howmuch):
| return self._longcmd(('TOP %s %s' % (which, howmuch)))
|
'Return message digest (unique id) list.
If \'which\', result contains unique id for that message
in the form \'response mesgnum uid\', otherwise result is
the list [\'response\', [\'mesgnum uid\', ...], octets]'
| def uidl(self, which=None):
| if (which is not None):
return self._shortcmd(('UIDL %s' % which))
return self._longcmd('UIDL')
|
'Set the input delimiter. Can be a fixed string of any length, an integer, or None'
| def set_terminator(self, term):
| self.terminator = term
|
'predicate for inclusion in the readable for select()'
| def readable(self):
| return 1
|
'predicate for inclusion in the writable for select()'
| def writable(self):
| return (self.producer_fifo or (not self.connected))
|
'automatically close this channel once the outgoing queue is empty'
| def close_when_done(self):
| self.producer_fifo.append(None)
|
'Create a UUID from either a string of 32 hexadecimal digits,
a string of 16 bytes as the \'bytes\' argument, a string of 16 bytes
in little-endian order as the \'bytes_le\' argument, a tuple of six
integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version,
8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as
the \'fields\' argument, or a single 128-bit integer as the \'int\'
argument. When a string of hex digits is given, curly braces,
hyphens, and a URN prefix are all optional. For example, these
expressions all yield the same UUID:
UUID(\'{12345678-1234-5678-1234-567812345678}\')
UUID(\'12345678123456781234567812345678\')
UUID(\'urn:uuid:12345678-1234-5678-1234-567812345678\')
UUID(bytes=\'\x12\x34\x56\x78\'*4)
UUID(bytes_le=\'\x78\x56\x34\x12\x34\x12\x78\x56\' +
\'\x12\x34\x56\x78\x12\x34\x56\x78\')
UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678))
UUID(int=0x12345678123456781234567812345678)
Exactly one of \'hex\', \'bytes\', \'bytes_le\', \'fields\', or \'int\' must
be given. The \'version\' argument is optional; if given, the resulting
UUID will have its variant and version set according to RFC 4122,
overriding the given \'hex\', \'bytes\', \'bytes_le\', \'fields\', or \'int\'.'
| def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None, int=None, version=None):
| if ([hex, bytes, bytes_le, fields, int].count(None) != 4):
raise TypeError('need one of hex, bytes, bytes_le, fields, or int')
if (hex is not None):
hex = hex.replace('urn:', '').replace('uuid:', '')
hex = hex.strip('{}').replace('-', '')
if (len(hex) != 32):
raise ValueError('badly formed hexadecimal UUID string')
int = long(hex, 16)
if (bytes_le is not None):
if (len(bytes_le) != 16):
raise ValueError('bytes_le is not a 16-char string')
bytes = ((((((((bytes_le[3] + bytes_le[2]) + bytes_le[1]) + bytes_le[0]) + bytes_le[5]) + bytes_le[4]) + bytes_le[7]) + bytes_le[6]) + bytes_le[8:])
if (bytes is not None):
if (len(bytes) != 16):
raise ValueError('bytes is not a 16-char string')
int = long((('%02x' * 16) % tuple(map(ord, bytes))), 16)
if (fields is not None):
if (len(fields) != 6):
raise ValueError('fields is not a 6-tuple')
(time_low, time_mid, time_hi_version, clock_seq_hi_variant, clock_seq_low, node) = fields
if (not (0 <= time_low < (1 << 32L))):
raise ValueError('field 1 out of range (need a 32-bit value)')
if (not (0 <= time_mid < (1 << 16L))):
raise ValueError('field 2 out of range (need a 16-bit value)')
if (not (0 <= time_hi_version < (1 << 16L))):
raise ValueError('field 3 out of range (need a 16-bit value)')
if (not (0 <= clock_seq_hi_variant < (1 << 8L))):
raise ValueError('field 4 out of range (need an 8-bit value)')
if (not (0 <= clock_seq_low < (1 << 8L))):
raise ValueError('field 5 out of range (need an 8-bit value)')
if (not (0 <= node < (1 << 48L))):
raise ValueError('field 6 out of range (need a 48-bit value)')
clock_seq = ((clock_seq_hi_variant << 8L) | clock_seq_low)
int = (((((time_low << 96L) | (time_mid << 80L)) | (time_hi_version << 64L)) | (clock_seq << 48L)) | node)
if (int is not None):
if (not (0 <= int < (1 << 128L))):
raise ValueError('int is out of range (need a 128-bit value)')
if (version is not None):
if (not (1 <= version <= 5)):
raise ValueError('illegal version number')
int &= (~ (49152 << 48L))
int |= (32768 << 48L)
int &= (~ (61440 << 64L))
int |= (version << 76L)
self.__dict__['int'] = int
|
'Create a new completer for the command line.
Completer([namespace]) -> completer instance.
If unspecified, the default namespace where completions are performed
is __main__ (technically, __main__.__dict__). Namespaces should be
given as dictionaries.
Completer instances should be used as the completion mechanism of
readline via the set_completer() call:
readline.set_completer(Completer(my_namespace).complete)'
| def __init__(self, namespace=None):
| if (namespace and (not isinstance(namespace, dict))):
raise TypeError, 'namespace must be a dictionary'
if (namespace is None):
self.use_main_ns = 1
else:
self.use_main_ns = 0
self.namespace = namespace
|
'Return the next possible completion for \'text\'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with \'text\'.'
| def complete(self, text, state):
| if self.use_main_ns:
self.namespace = __main__.__dict__
if (state == 0):
if ('.' in text):
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None
|
'Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace that match.'
| def global_matches(self, text):
| import keyword
matches = []
n = len(text)
for word in keyword.kwlist:
if (word[:n] == text):
matches.append(word)
for nspace in [__builtin__.__dict__, self.namespace]:
for (word, val) in nspace.items():
if ((word[:n] == text) and (word != '__builtins__')):
matches.append(self._callable_postfix(val, word))
return matches
|
'Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluatable in self.namespace, it will be evaluated and its attributes
(as revealed by dir()) are used as possible completions. (For class
instances, class members are also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.'
| def attr_matches(self, text):
| import re
m = re.match('(\\w+(\\.\\w+)*)\\.(\\w*)', text)
if (not m):
return []
(expr, attr) = m.group(1, 3)
try:
thisobject = eval(expr, self.namespace)
except Exception:
return []
words = dir(thisobject)
if ('__builtins__' in words):
words.remove('__builtins__')
if hasattr(thisobject, '__class__'):
words.append('__class__')
words.extend(get_class_members(thisobject.__class__))
matches = []
n = len(attr)
for word in words:
if ((word[:n] == attr) and hasattr(thisobject, word)):
val = getattr(thisobject, word)
word = self._callable_postfix(val, ('%s.%s' % (expr, word)))
matches.append(word)
return matches
|
'Initialize the class instance and read the headers.'
| def __init__(self, fp, seekable=1):
| if (seekable == 1):
try:
fp.tell()
except (AttributeError, IOError):
seekable = 0
self.fp = fp
self.seekable = seekable
self.startofheaders = None
self.startofbody = None
if self.seekable:
try:
self.startofheaders = self.fp.tell()
except IOError:
self.seekable = 0
self.readheaders()
if self.seekable:
try:
self.startofbody = self.fp.tell()
except IOError:
self.seekable = 0
|
'Rewind the file to the start of the body (if seekable).'
| def rewindbody(self):
| if (not self.seekable):
raise IOError, 'unseekable file'
self.fp.seek(self.startofbody)
|
'Read header lines.
Read header lines up to the entirely blank line that terminates them.
The (normally blank) line that ends the headers is skipped, but not
included in the returned list. If a non-header line ends the headers,
(which is an error), an attempt is made to backspace over it; it is
never included in the returned list.
The variable self.status is set to the empty string if all went well,
otherwise it is an error message. The variable self.headers is a
completely uninterpreted list of lines contained in the header (so
printing them will reproduce the header exactly as it appears in the
file).'
| def readheaders(self):
| self.dict = {}
self.unixfrom = ''
self.headers = lst = []
self.status = ''
headerseen = ''
firstline = 1
startofline = unread = tell = None
if hasattr(self.fp, 'unread'):
unread = self.fp.unread
elif self.seekable:
tell = self.fp.tell
while 1:
if tell:
try:
startofline = tell()
except IOError:
startofline = tell = None
self.seekable = 0
line = self.fp.readline()
if (not line):
self.status = 'EOF in headers'
break
if (firstline and line.startswith('From ')):
self.unixfrom = (self.unixfrom + line)
continue
firstline = 0
if (headerseen and (line[0] in ' DCTB ')):
lst.append(line)
x = ((self.dict[headerseen] + '\n ') + line.strip())
self.dict[headerseen] = x.strip()
continue
elif self.iscomment(line):
continue
elif self.islast(line):
break
headerseen = self.isheader(line)
if headerseen:
lst.append(line)
self.dict[headerseen] = line[(len(headerseen) + 1):].strip()
continue
else:
if (not self.dict):
self.status = 'No headers'
else:
self.status = 'Non-header line where header expected'
if unread:
unread(line)
elif tell:
self.fp.seek(startofline)
else:
self.status = (self.status + '; bad seek')
break
|
'Determine whether a given line is a legal header.
This method should return the header name, suitably canonicalized.
You may override this method in order to use Message parsing on tagged
data in RFC 2822-like formats with special header formats.'
| def isheader(self, line):
| i = line.find(':')
if (i > 0):
return line[:i].lower()
return None
|
'Determine whether a line is a legal end of RFC 2822 headers.
You may override this method if your application wants to bend the
rules, e.g. to strip trailing whitespace, or to recognize MH template
separators (\'--------\'). For convenience (e.g. for code reading from
sockets) a line consisting of
also matches.'
| def islast(self, line):
| return (line in _blanklines)
|
'Determine whether a line should be skipped entirely.
You may override this method in order to use Message parsing on tagged
data in RFC 2822-like formats that support embedded comments or
free-text data.'
| def iscomment(self, line):
| return False
|
'Find all header lines matching a given header name.
Look through the list of headers and find all lines matching a given
header name (and their continuation lines). A list of the lines is
returned, without interpretation. If the header does not occur, an
empty list is returned. If the header occurs multiple times, all
occurrences are returned. Case is not important in the header name.'
| def getallmatchingheaders(self, name):
| name = (name.lower() + ':')
n = len(name)
lst = []
hit = 0
for line in self.headers:
if (line[:n].lower() == name):
hit = 1
elif (not line[:1].isspace()):
hit = 0
if hit:
lst.append(line)
return lst
|
'Get the first header line matching name.
This is similar to getallmatchingheaders, but it returns only the
first matching header (and its continuation lines).'
| def getfirstmatchingheader(self, name):
| name = (name.lower() + ':')
n = len(name)
lst = []
hit = 0
for line in self.headers:
if hit:
if (not line[:1].isspace()):
break
elif (line[:n].lower() == name):
hit = 1
if hit:
lst.append(line)
return lst
|
'A higher-level interface to getfirstmatchingheader().
Return a string containing the literal text of the header but with the
keyword stripped. All leading, trailing and embedded whitespace is
kept in the string, however. Return None if the header does not
occur.'
| def getrawheader(self, name):
| lst = self.getfirstmatchingheader(name)
if (not lst):
return None
lst[0] = lst[0][(len(name) + 1):]
return ''.join(lst)
|
'Get the header value for a name.
This is the normal interface: it returns a stripped version of the
header value for a given header name, or None if it doesn\'t exist.
This uses the dictionary version which finds the *last* such header.'
| def getheader(self, name, default=None):
| return self.dict.get(name.lower(), default)
|
'Get all values for a header.
This returns a list of values for headers given more than once; each
value in the result list is stripped in the same way as the result of
getheader(). If the header is not given, return an empty list.'
| def getheaders(self, name):
| result = []
current = ''
have_header = 0
for s in self.getallmatchingheaders(name):
if s[0].isspace():
if current:
current = ('%s\n %s' % (current, s.strip()))
else:
current = s.strip()
else:
if have_header:
result.append(current)
current = s[(s.find(':') + 1):].strip()
have_header = 1
if have_header:
result.append(current)
return result
|
'Get a single address from a header, as a tuple.
An example return value:
(\'Guido van Rossum\', \'[email protected]\')'
| def getaddr(self, name):
| alist = self.getaddrlist(name)
if alist:
return alist[0]
else:
return (None, None)
|
'Get a list of addresses from a header.
Retrieves a list of addresses from a header, where each address is a
tuple as returned by getaddr(). Scans all named headers, so it works
properly with multiple To: or Cc: headers for example.'
| def getaddrlist(self, name):
| raw = []
for h in self.getallmatchingheaders(name):
if (h[0] in ' DCTB '):
raw.append(h)
else:
if raw:
raw.append(', ')
i = h.find(':')
if (i > 0):
addr = h[(i + 1):]
raw.append(addr)
alladdrs = ''.join(raw)
a = AddressList(alladdrs)
return a.addresslist
|
'Retrieve a date field from a header.
Retrieves a date field from the named header, returning a tuple
compatible with time.mktime().'
| def getdate(self, name):
| try:
data = self[name]
except KeyError:
return None
return parsedate(data)
|
'Retrieve a date field from a header as a 10-tuple.
The first 9 elements make up a tuple compatible with time.mktime(),
and the 10th is the offset of the poster\'s time zone from GMT/UTC.'
| def getdate_tz(self, name):
| try:
data = self[name]
except KeyError:
return None
return parsedate_tz(data)
|
'Get the number of headers in a message.'
| def __len__(self):
| return len(self.dict)
|
'Get a specific header, as from a dictionary.'
| def __getitem__(self, name):
| return self.dict[name.lower()]
|
'Set the value of a header.
Note: This is not a perfect inversion of __getitem__, because any
changed headers get stuck at the end of the raw-headers list rather
than where the altered header was.'
| def __setitem__(self, name, value):
| del self[name]
self.dict[name.lower()] = value
text = ((name + ': ') + value)
for line in text.split('\n'):
self.headers.append((line + '\n'))
|
'Delete all occurrences of a specific header, if it is present.'
| def __delitem__(self, name):
| name = name.lower()
if (not (name in self.dict)):
return
del self.dict[name]
name = (name + ':')
n = len(name)
lst = []
hit = 0
for i in range(len(self.headers)):
line = self.headers[i]
if (line[:n].lower() == name):
hit = 1
elif (not line[:1].isspace()):
hit = 0
if hit:
lst.append(i)
for i in reversed(lst):
del self.headers[i]
|
'Determine whether a message contains the named header.'
| def has_key(self, name):
| return (name.lower() in self.dict)
|
'Determine whether a message contains the named header.'
| def __contains__(self, name):
| return (name.lower() in self.dict)
|
'Get all of a message\'s header field names.'
| def keys(self):
| return self.dict.keys()
|
'Get all of a message\'s header field values.'
| def values(self):
| return self.dict.values()
|
'Get all of a message\'s headers.
Returns a list of name, value tuples.'
| def items(self):
| return self.dict.items()
|
'Initialize a new instance.
`field\' is an unparsed address header field, containing one or more
addresses.'
| def __init__(self, field):
| self.specials = '()<>@,:;."[]'
self.pos = 0
self.LWS = ' DCTB '
self.CR = '\r\n'
self.atomends = ((self.specials + self.LWS) + self.CR)
self.phraseends = self.atomends.replace('.', '')
self.field = field
self.commentlist = []
|
'Parse up to the start of the next address.'
| def gotonext(self):
| while (self.pos < len(self.field)):
if (self.field[self.pos] in (self.LWS + '\n\r')):
self.pos = (self.pos + 1)
elif (self.field[self.pos] == '('):
self.commentlist.append(self.getcomment())
else:
break
|
'Parse all addresses.
Returns a list containing all of the addresses.'
| def getaddrlist(self):
| result = []
ad = self.getaddress()
while ad:
result += ad
ad = self.getaddress()
return result
|
'Parse the next address.'
| def getaddress(self):
| self.commentlist = []
self.gotonext()
oldpos = self.pos
oldcl = self.commentlist
plist = self.getphraselist()
self.gotonext()
returnlist = []
if (self.pos >= len(self.field)):
if plist:
returnlist = [(' '.join(self.commentlist), plist[0])]
elif (self.field[self.pos] in '.@'):
self.pos = oldpos
self.commentlist = oldcl
addrspec = self.getaddrspec()
returnlist = [(' '.join(self.commentlist), addrspec)]
elif (self.field[self.pos] == ':'):
returnlist = []
fieldlen = len(self.field)
self.pos += 1
while (self.pos < len(self.field)):
self.gotonext()
if ((self.pos < fieldlen) and (self.field[self.pos] == ';')):
self.pos += 1
break
returnlist = (returnlist + self.getaddress())
elif (self.field[self.pos] == '<'):
routeaddr = self.getrouteaddr()
if self.commentlist:
returnlist = [((((' '.join(plist) + ' (') + ' '.join(self.commentlist)) + ')'), routeaddr)]
else:
returnlist = [(' '.join(plist), routeaddr)]
elif plist:
returnlist = [(' '.join(self.commentlist), plist[0])]
elif (self.field[self.pos] in self.specials):
self.pos += 1
self.gotonext()
if ((self.pos < len(self.field)) and (self.field[self.pos] == ',')):
self.pos += 1
return returnlist
|
'Parse a route address (Return-path value).
This method just skips all the route stuff and returns the addrspec.'
| def getrouteaddr(self):
| if (self.field[self.pos] != '<'):
return
expectroute = 0
self.pos += 1
self.gotonext()
adlist = ''
while (self.pos < len(self.field)):
if expectroute:
self.getdomain()
expectroute = 0
elif (self.field[self.pos] == '>'):
self.pos += 1
break
elif (self.field[self.pos] == '@'):
self.pos += 1
expectroute = 1
elif (self.field[self.pos] == ':'):
self.pos += 1
else:
adlist = self.getaddrspec()
self.pos += 1
break
self.gotonext()
return adlist
|
'Parse an RFC 2822 addr-spec.'
| def getaddrspec(self):
| aslist = []
self.gotonext()
while (self.pos < len(self.field)):
if (self.field[self.pos] == '.'):
aslist.append('.')
self.pos += 1
elif (self.field[self.pos] == '"'):
aslist.append(('"%s"' % self.getquote()))
elif (self.field[self.pos] in self.atomends):
break
else:
aslist.append(self.getatom())
self.gotonext()
if ((self.pos >= len(self.field)) or (self.field[self.pos] != '@')):
return ''.join(aslist)
aslist.append('@')
self.pos += 1
self.gotonext()
return (''.join(aslist) + self.getdomain())
|
'Get the complete domain name from an address.'
| def getdomain(self):
| sdlist = []
while (self.pos < len(self.field)):
if (self.field[self.pos] in self.LWS):
self.pos += 1
elif (self.field[self.pos] == '('):
self.commentlist.append(self.getcomment())
elif (self.field[self.pos] == '['):
sdlist.append(self.getdomainliteral())
elif (self.field[self.pos] == '.'):
self.pos += 1
sdlist.append('.')
elif (self.field[self.pos] in self.atomends):
break
else:
sdlist.append(self.getatom())
return ''.join(sdlist)
|
'Parse a header fragment delimited by special characters.
`beginchar\' is the start character for the fragment. If self is not
looking at an instance of `beginchar\' then getdelimited returns the
empty string.
`endchars\' is a sequence of allowable end-delimiting characters.
Parsing stops when one of these is encountered.
If `allowcomments\' is non-zero, embedded RFC 2822 comments are allowed
within the parsed fragment.'
| def getdelimited(self, beginchar, endchars, allowcomments=1):
| if (self.field[self.pos] != beginchar):
return ''
slist = ['']
quote = 0
self.pos += 1
while (self.pos < len(self.field)):
if (quote == 1):
slist.append(self.field[self.pos])
quote = 0
elif (self.field[self.pos] in endchars):
self.pos += 1
break
elif (allowcomments and (self.field[self.pos] == '(')):
slist.append(self.getcomment())
continue
elif (self.field[self.pos] == '\\'):
quote = 1
else:
slist.append(self.field[self.pos])
self.pos += 1
return ''.join(slist)
|
'Get a quote-delimited fragment from self\'s field.'
| def getquote(self):
| return self.getdelimited('"', '"\r', 0)
|
'Get a parenthesis-delimited fragment from self\'s field.'
| def getcomment(self):
| return self.getdelimited('(', ')\r', 1)
|
'Parse an RFC 2822 domain-literal.'
| def getdomainliteral(self):
| return ('[%s]' % self.getdelimited('[', ']\r', 0))
|
'Parse an RFC 2822 atom.
Optional atomends specifies a different set of end token delimiters
(the default is to use self.atomends). This is used e.g. in
getphraselist() since phrase endings must not include the `.\' (which
is legal in phrases).'
| def getatom(self, atomends=None):
| atomlist = ['']
if (atomends is None):
atomends = self.atomends
while (self.pos < len(self.field)):
if (self.field[self.pos] in atomends):
break
else:
atomlist.append(self.field[self.pos])
self.pos += 1
return ''.join(atomlist)
|
'Parse a sequence of RFC 2822 phrases.
A phrase is a sequence of words, which are in turn either RFC 2822
atoms or quoted-strings. Phrases are canonicalized by squeezing all
runs of continuous whitespace into one space.'
| def getphraselist(self):
| plist = []
while (self.pos < len(self.field)):
if (self.field[self.pos] in self.LWS):
self.pos += 1
elif (self.field[self.pos] == '"'):
plist.append(self.getquote())
elif (self.field[self.pos] == '('):
self.commentlist.append(self.getcomment())
elif (self.field[self.pos] in self.phraseends):
break
else:
plist.append(self.getatom(self.phraseends))
return plist
|
'This takes a file-like object for writing a pickle data stream.
The optional protocol argument tells the pickler to use the
given protocol; supported protocols are 0, 1, 2. The default
protocol is 0, to be backwards compatible. (Protocol 0 is the
only protocol that can be written to a file opened in text
mode and read back successfully. When using a protocol higher
than 0, make sure the file is opened in binary mode, both when
pickling and unpickling.)
Protocol 1 is more efficient than protocol 0; protocol 2 is
more efficient than protocol 1.
Specifying a negative protocol version selects the highest
protocol version supported. The higher the protocol used, the
more recent the version of Python needed to read the pickle
produced.
The file parameter must have a write() method that accepts a single
string argument. It can thus be an open file object, a StringIO
object, or any other custom object that meets this interface.'
| def __init__(self, file, protocol=None):
| if (protocol is None):
protocol = 0
if (protocol < 0):
protocol = HIGHEST_PROTOCOL
elif (not (0 <= protocol <= HIGHEST_PROTOCOL)):
raise ValueError(('pickle protocol must be <= %d' % HIGHEST_PROTOCOL))
self.write = file.write
self.memo = {}
self.proto = int(protocol)
self.bin = (protocol >= 1)
self.fast = 0
|
'Clears the pickler\'s "memo".
The memo is the data structure that remembers which objects the
pickler has already seen, so that shared or recursive objects are
pickled by reference and not by value. This method is useful when
re-using picklers.'
| def clear_memo(self):
| self.memo.clear()
|
'Write a pickled representation of obj to the open file.'
| def dump(self, obj):
| if (self.proto >= 2):
self.write((PROTO + chr(self.proto)))
self.save(obj)
self.write(STOP)
|
'Store an object in the memo.'
| def memoize(self, obj):
| if self.fast:
return
assert (id(obj) not in self.memo)
memo_len = len(self.memo)
self.write(self.put(memo_len))
self.memo[id(obj)] = (memo_len, obj)
|
'This takes a file-like object for reading a pickle data stream.
The protocol version of the pickle is detected automatically, so no
proto argument is needed.
The file-like object must have two methods, a read() method that
takes an integer argument, and a readline() method that requires no
arguments. Both methods should return a string. Thus file-like
object can be a file object opened for reading, a StringIO object,
or any other custom object that meets this interface.'
| def __init__(self, file):
| self.readline = file.readline
self.read = file.read
self.memo = {}
|
'Read a pickled object representation from the open file.
Return the reconstituted object hierarchy specified in the file.'
| def load(self):
| self.mark = object()
self.stack = []
self.append = self.stack.append
read = self.read
dispatch = self.dispatch
try:
while 1:
key = read(1)
dispatch[key](self)
except _Stop as stopinst:
return stopinst.value
|
'Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
\'strict\' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The encoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.'
| def encode(self, input, errors='strict'):
| raise NotImplementedError
|
'Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
\'strict\' handling.
The method may not store state in the Codec instance. Use
StreamCodec for codecs which have to keep state in order to
make encoding/decoding efficient.
The decoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.'
| def decode(self, input, errors='strict'):
| raise NotImplementedError
|
'Creates an IncrementalEncoder instance.
The IncrementalEncoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.'
| def __init__(self, errors='strict'):
| self.errors = errors
self.buffer = ''
|
'Encodes input and returns the resulting object.'
| def encode(self, input, final=False):
| raise NotImplementedError
|
'Return the current state of the encoder.'
| def getstate(self):
| return 0
|
'Creates a IncrementalDecoder instance.
The IncrementalDecoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.'
| def __init__(self, errors='strict'):
| self.errors = errors
|
'Decodes input and returns the resulting object.'
| def decode(self, input, final=False):
| raise NotImplementedError
|
'Return the current state of the decoder.
This must be a (buffered_input, additional_state_info) tuple.
buffered_input must be a bytes object containing bytes that
were passed to decode() that have not yet been converted.
additional_state_info must be a non-negative integer
representing the state of the decoder WITHOUT yet having
processed the contents of buffered_input. In the initial state
and after reset(), getstate() must return (b"", 0).'
| def getstate(self):
| return ('', 0)
|
'Creates a StreamWriter instance.
stream must be a file-like object open for writing
(binary) data.
The StreamWriter may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
\'strict\' - raise a ValueError (or a subclass)
\'ignore\' - ignore the character and continue with the next
\'replace\'- replace with a suitable replacement character
\'xmlcharrefreplace\' - Replace with the appropriate XML
character reference.
\'backslashreplace\' - Replace with backslashed escape
sequences (only for encoding).
The set of allowed parameter values can be extended via
register_error.'
| def __init__(self, stream, errors='strict'):
| self.stream = stream
self.errors = errors
|
'Writes the object\'s contents encoded to self.stream.'
| def write(self, object):
| (data, consumed) = self.encode(object, self.errors)
self.stream.write(data)
|
'Writes the concatenated list of strings to the stream
using .write().'
| def writelines(self, list):
| self.write(''.join(list))
|
'Flushes and resets the codec buffers used for keeping state.
Calling this method should ensure that the data on the
output is put into a clean state, that allows appending
of new fresh data without having to rescan the whole
stream to recover state.'
| def reset(self):
| pass
|
'Inherit all other methods from the underlying stream.'
| def __getattr__(self, name, getattr=getattr):
| return getattr(self.stream, name)
|
'Creates a StreamReader instance.
stream must be a file-like object open for reading
(binary) data.
The StreamReader may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
\'strict\' - raise a ValueError (or a subclass)
\'ignore\' - ignore the character and continue with the next
\'replace\'- replace with a suitable replacement character;
The set of allowed parameter values can be extended via
register_error.'
| def __init__(self, stream, errors='strict'):
| self.stream = stream
self.errors = errors
self.bytebuffer = ''
self.charbuffer = ''
self.linebuffer = None
|
'Decodes data from the stream self.stream and returns the
resulting object.
chars indicates the number of characters to read from the
stream. read() will never return more than chars
characters, but it might return less, if there are not enough
characters available.
size indicates the approximate maximum number of bytes to
read from the stream for decoding purposes. The decoder
can modify this setting as appropriate. The default value
-1 indicates to read and decode as much as possible. size
is intended to prevent having to decode huge files in one
step.
If firstline is true, and a UnicodeDecodeError happens
after the first line terminator in the input only the first line
will be returned, the rest of the input will be kept until the
next call to read().
The method should use a greedy read strategy meaning that
it should read as much data as is allowed within the
definition of the encoding and the given size, e.g. if
optional encoding endings or state markers are available
on the stream, these should be read too.'
| def read(self, size=(-1), chars=(-1), firstline=False):
| if self.linebuffer:
self.charbuffer = ''.join(self.linebuffer)
self.linebuffer = None
while True:
if (chars < 0):
if (size < 0):
if self.charbuffer:
break
elif (len(self.charbuffer) >= size):
break
elif (len(self.charbuffer) >= chars):
break
if (size < 0):
newdata = self.stream.read()
else:
newdata = self.stream.read(size)
data = (self.bytebuffer + newdata)
try:
(newchars, decodedbytes) = self.decode(data, self.errors)
except UnicodeDecodeError as exc:
if firstline:
(newchars, decodedbytes) = self.decode(data[:exc.start], self.errors)
lines = newchars.splitlines(True)
if (len(lines) <= 1):
raise
else:
raise
self.bytebuffer = data[decodedbytes:]
self.charbuffer += newchars
if (not newdata):
break
if (chars < 0):
result = self.charbuffer
self.charbuffer = ''
else:
result = self.charbuffer[:chars]
self.charbuffer = self.charbuffer[chars:]
return result
|
'Read one line from the input stream and return the
decoded data.
size, if given, is passed as size argument to the
read() method.'
| def readline(self, size=None, keepends=True):
| if self.linebuffer:
line = self.linebuffer[0]
del self.linebuffer[0]
if (len(self.linebuffer) == 1):
self.charbuffer = self.linebuffer[0]
self.linebuffer = None
if (not keepends):
line = line.splitlines(False)[0]
return line
readsize = (size or 72)
line = ''
while True:
data = self.read(readsize, firstline=True)
if data:
if data.endswith('\r'):
data += self.read(size=1, chars=1)
line += data
lines = line.splitlines(True)
if lines:
if (len(lines) > 1):
line = lines[0]
del lines[0]
if (len(lines) > 1):
lines[(-1)] += self.charbuffer
self.linebuffer = lines
self.charbuffer = None
else:
self.charbuffer = (lines[0] + self.charbuffer)
if (not keepends):
line = line.splitlines(False)[0]
break
line0withend = lines[0]
line0withoutend = lines[0].splitlines(False)[0]
if (line0withend != line0withoutend):
self.charbuffer = (''.join(lines[1:]) + self.charbuffer)
if keepends:
line = line0withend
else:
line = line0withoutend
break
if ((not data) or (size is not None)):
if (line and (not keepends)):
line = line.splitlines(False)[0]
break
if (readsize < 8000):
readsize *= 2
return line
|
'Read all lines available on the input stream
and return them as list of lines.
Line breaks are implemented using the codec\'s decoder
method and are included in the list entries.
sizehint, if given, is ignored since there is no efficient
way to finding the true end-of-line.'
| def readlines(self, sizehint=None, keepends=True):
| data = self.read()
return data.splitlines(keepends)
|
'Resets the codec buffers used for keeping state.
Note that no stream repositioning should take place.
This method is primarily intended to be able to recover
from decoding errors.'
| def reset(self):
| self.bytebuffer = ''
self.charbuffer = u''
self.linebuffer = None
|
'Set the input stream\'s current position.
Resets the codec buffers used for keeping state.'
| def seek(self, offset, whence=0):
| self.stream.seek(offset, whence)
self.reset()
|
'Return the next decoded line from the input stream.'
| def next(self):
| line = self.readline()
if line:
return line
raise StopIteration
|
'Inherit all other methods from the underlying stream.'
| def __getattr__(self, name, getattr=getattr):
| return getattr(self.stream, name)
|
'Creates a StreamReaderWriter instance.
stream must be a Stream-like object.
Reader, Writer must be factory functions or classes
providing the StreamReader, StreamWriter interface resp.
Error handling is done in the same way as defined for the
StreamWriter/Readers.'
| def __init__(self, stream, Reader, Writer, errors='strict'):
| self.stream = stream
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
|
'Return the next decoded line from the input stream.'
| def next(self):
| return self.reader.next()
|
'Inherit all other methods from the underlying stream.'
| def __getattr__(self, name, getattr=getattr):
| return getattr(self.stream, name)
|
'Creates a StreamRecoder instance which implements a two-way
conversion: encode and decode work on the frontend (the
input to .read() and output of .write()) while
Reader and Writer work on the backend (reading and
writing to the stream).
You can use these objects to do transparent direct
recodings from e.g. latin-1 to utf-8 and back.
stream must be a file-like object.
encode, decode must adhere to the Codec interface, Reader,
Writer must be factory functions or classes providing the
StreamReader, StreamWriter interface resp.
encode and decode are needed for the frontend translation,
Reader and Writer for the backend translation. Unicode is
used as intermediate encoding.
Error handling is done in the same way as defined for the
StreamWriter/Readers.'
| def __init__(self, stream, encode, decode, Reader, Writer, errors='strict'):
| self.stream = stream
self.encode = encode
self.decode = decode
self.reader = Reader(stream, errors)
self.writer = Writer(stream, errors)
self.errors = errors
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.