desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Create a Windows process.
cmd: command to run
login: run as user \'Domain
User
Password\'
hStdin, hStdout, hStderr:
handles for process I/O; default is caller\'s stdin,
stdout & stderr
show: wShowWindow (0=SW_HIDE, 1=SW_NORMAL, ...)
xy: window offset (x, y) of upper left corner in pixels
xySize: window size (width, height) in pixels
desktop: lpDesktop - name of desktop e.g. \'winsta0\default\'
None = inherit current desktop
\'\' = create new desktop if necessary
User calling login requires additional privileges:
Act as part of the operating system [not needed on Windows XP]
Increase quotas
Replace a process level token
Login string must EITHER be an administrator\'s account
(ordinary user can\'t access current desktop - see Microsoft
Q165194) OR use desktop=\'\' to run another desktop invisibly
(may be very slow to startup & finalize).'
| def __init__(self, cmd, login=None, hStdin=None, hStdout=None, hStderr=None, show=1, xy=None, xySize=None, desktop=None):
| si = win32process.STARTUPINFO()
si.dwFlags = (win32con.STARTF_USESTDHANDLES ^ win32con.STARTF_USESHOWWINDOW)
if (hStdin is None):
si.hStdInput = win32api.GetStdHandle(win32api.STD_INPUT_HANDLE)
else:
si.hStdInput = hStdin
if (hStdout is None):
si.hStdOutput = win32api.GetStdHandle(win32api.STD_OUTPUT_HANDLE)
else:
si.hStdOutput = hStdout
if (hStderr is None):
si.hStdError = win32api.GetStdHandle(win32api.STD_ERROR_HANDLE)
else:
si.hStdError = hStderr
si.wShowWindow = show
if (xy is not None):
(si.dwX, si.dwY) = xy
si.dwFlags ^= win32con.STARTF_USEPOSITION
if (xySize is not None):
(si.dwXSize, si.dwYSize) = xySize
si.dwFlags ^= win32con.STARTF_USESIZE
if (desktop is not None):
si.lpDesktop = desktop
procArgs = (None, cmd, None, None, 1, win32process.CREATE_NEW_CONSOLE, None, None, si)
if (login is not None):
hUser = logonUser(login)
win32security.ImpersonateLoggedOnUser(hUser)
procHandles = win32process.CreateProcessAsUser(hUser, *procArgs)
win32security.RevertToSelf()
else:
procHandles = win32process.CreateProcess(*procArgs)
(self.hProcess, self.hThread, self.PId, self.TId) = procHandles
|
'Wait for process to finish or for specified number of
milliseconds to elapse.'
| def wait(self, mSec=None):
| if (mSec is None):
mSec = win32event.INFINITE
return win32event.WaitForSingleObject(self.hProcess, mSec)
|
'Kill process. Try for an orderly shutdown via WM_CLOSE. If
still running after gracePeriod (5 sec. default), terminate.'
| def kill(self, gracePeriod=5000):
| win32gui.EnumWindows(self.__close__, 0)
if (self.wait(gracePeriod) != win32event.WAIT_OBJECT_0):
win32process.TerminateProcess(self.hProcess, 0)
win32api.Sleep(100)
|
'EnumWindows callback - sends WM_CLOSE to any window
owned by this process.'
| def __close__(self, hwnd, dummy):
| (TId, PId) = win32process.GetWindowThreadProcessId(hwnd)
if (PId == self.PId):
win32gui.PostMessage(hwnd, win32con.WM_CLOSE, 0, 0)
|
'Return process exit code.'
| def exitCode(self):
| return win32process.GetExitCodeProcess(self.hProcess)
|
'Should create a procedure called deleteme'
| def help_nextset_setUp(self, cur):
| 'that returns two result sets, first the number of rows in booze then "name from booze"'
sql = ('\n create procedure deleteme as\n begin\n select count(*) from %sbooze\n select name from %sbooze\n end\n ' % (self.table_prefix, self.table_prefix))
cur.execute(sql)
|
'If cleaning up is needed after nextSetTest'
| def help_nextset_tearDown(self, cur):
| try:
cur.execute('drop procedure deleteme')
except:
pass
|
'self.drivers should override this method to perform required setup
if any is necessary, such as creating the database.'
| def setUp(self):
| pass
|
'self.drivers should override this method to perform required cleanup
if any is necessary, such as deleting the test database.
The default drops the tables that may be created.'
| def tearDown(self):
| con = self._connect()
try:
cur = con.cursor()
for ddl in (self.xddl1, self.xddl2):
try:
cur.execute(ddl)
con.commit()
except self.driver.Error:
pass
finally:
con.close()
|
'Return a list of sql commands to setup the DB for the fetch
tests.'
| def _populate(self):
| populate = [("insert into %sbooze values ('%s')" % (self.table_prefix, s)) for s in self.samples]
return populate
|
'Should create a procedure called deleteme
that returns two result sets, first the
number of rows in booze then "name from booze"'
| def help_nextset_setUp(self, cur):
| raise NotImplementedError('Helper not implemented')
|
'If cleaning up is needed after nextSetTest'
| def help_nextset_tearDown(self, cur):
| raise NotImplementedError('Helper not implemented')
|
'Returns a ComDate from a datetime in inputformat'
| def COMDate(self, obj):
| raise NotImplementedError
|
'Returns an object of the wanted type from a ComDate'
| def DateObjectFromCOMDate(self, comDate):
| raise NotImplementedError
|
'This function constructs an object holding a date value.'
| def Date(self, year, month, day):
| raise NotImplementedError
|
'This function constructs an object holding a time value.'
| def Time(self, hour, minute, second):
| raise NotImplementedError
|
'This function constructs an object holding a time stamp value.'
| def Timestamp(self, year, month, day, hour, minute, second):
| raise NotImplementedError
|
'This function should return a string in the format \'YYYY-MM-dd HH:MM:SS:ms\' (ms optional)'
| def DateObjectToIsoFormatString(self, obj):
| raise NotImplementedError
|
'Returns ticks since 1970'
| def DateObjectFromCOMDate(self, comDate):
| if isinstance(comDate, datetime.datetime):
return comDate.timetuple()
elif isinstance(comDate, DateTime):
fcomDate = comDate.ToOADate()
else:
fcomDate = float(comDate)
secondsperday = 86400
t = time.gmtime((secondsperday * (fcomDate - 25569.0)))
return t
|
'close the underlying ADO Connection object,
rolling it back first if it supports transactions.'
| def _closeAdoConnection(self):
| if self.supportsTransactions:
self.adoConn.RollbackTrans()
self.adoConn.Close()
if verbose:
print ('adodbapi Closed connection at %X' % id(self))
|
'Close the connection now (rather than whenever __del__ is called).
The connection will be unusable from this point forward;
an Error (or subclass) exception will be raised if any operation is attempted with the connection.
The same applies to all cursor objects trying to use the connection.'
| def close(self):
| self.messages = []
try:
self._closeAdoConnection()
except Exception as e:
self._raiseConnectionError(InternalError, e)
if (not onIronPython):
pythoncom.CoUninitialize()
|
'Commit any pending transaction to the database.
Note that if the database supports an auto-commit feature,
this must be initially off. An interface method may be provided to turn it back on.
Database modules that do not support transactions should implement this method with void functionality.'
| def commit(self):
| self.messages = []
try:
if self.supportsTransactions:
self.adoConn.CommitTrans()
if (not (self.adoConn.Attributes & adXactCommitRetaining)):
self.adoConn.BeginTrans()
except Exception as e:
self._raiseConnectionError(Error, e)
|
'In case a database does provide transactions this method causes the the database to roll back to
the start of any pending transaction. Closing a connection without committing the changes first will
cause an implicit rollback to be performed.
If the database does not support the functionality required by the method, the interface should
throw an exception in case the method is used.
The preferred approach is to not implement the method and thus have Python generate
an AttributeError in case the method is requested. This allows the programmer to check for database
capabilities using the standard hasattr() function.
For some dynamically configured interfaces it may not be appropriate to require dynamically making
the method available. These interfaces should then raise a NotSupportedError to indicate the
non-ability to perform the roll back when the method is invoked.'
| def rollback(self):
| self.messages = []
if self.supportsTransactions:
self.adoConn.RollbackTrans()
if (not (self.adoConn.Attributes & adXactAbortRetaining)):
self.adoConn.BeginTrans()
else:
self._raiseConnectionError(NotSupportedError, None)
|
'Return a new Cursor Object using the connection.'
| def cursor(self):
| self.messages = []
return Cursor(self)
|
'Call a stored database procedure with the given name.
The sequence of parameters must contain one entry for each argument that the procedure expects.
The result of the call is returned as modified copy of the input sequence.
Input parameters are left untouched, output and input/output parameters replaced
with possibly new values.
The procedure may also provide a result set as output, which is
then available through the standard fetchXXX() methods.'
| def callproc(self, procname, parameters=None):
| self.messages = []
return self._executeHelper(procname, True, parameters)
|
'Close the cursor now (rather than whenever __del__ is called).
The cursor will be unusable from this point forward; an Error (or subclass)
exception will be raised if any operation is attempted with the cursor.'
| def close(self):
| self.messages = []
self.conn = None
if (self.rs and (self.rs.State != adStateClosed)):
self.rs.Close()
self.rs = None
|
'Prepare and execute a database operation (query or command).
Parameters may be provided as sequence or mapping and will be bound to variables in the operation.
Variables are specified in a database-specific notation
(see the module\'s paramstyle attribute for details). [5]
A reference to the operation will be retained by the cursor.
If the same operation object is passed in again, then the cursor
can optimize its behavior. This is most effective for algorithms
where the same operation is used, but different parameters are bound to it (many times).
For maximum efficiency when reusing an operation, it is best to use
the setinputsizes() method to specify the parameter types and sizes ahead of time.
It is legal for a parameter to not match the predefined information;
the implementation should compensate, possibly with a loss of efficiency.
The parameters may also be specified as list of tuples to e.g. insert multiple rows in
a single operation, but this kind of usage is depreciated: executemany() should be used instead.
Return values are not defined.
[5] The module will use the __getitem__ method of the parameters object to map either positions
(integers) or names (strings) to parameter values. This allows for both sequences and mappings
to be used as input.
The term "bound" refers to the process of binding an input value to a database execution buffer.
In practical terms, this means that the input value is directly used as a value in the operation.
The client should not be required to "escape" the value so that it can be used -- the value
should be equal to the actual database value.'
| def execute(self, operation, parameters=None):
| self.messages = []
self._executeHelper(operation, False, parameters)
|
'Prepare a database operation (query or command) and then execute it against all parameter sequences or mappings found in the sequence seq_of_parameters.
Return values are not defined.'
| def executemany(self, operation, seq_of_parameters):
| self.messages = []
totrecordcount = 0
canCount = True
for params in seq_of_parameters:
self.execute(operation, params)
if (self.rowcount == (-1)):
canCount = False
elif canCount:
totrecordcount += self.rowcount
if canCount:
self.rowcount = totrecordcount
else:
self.rowcount = (-1)
|
'Fetch rows from the recordset.
rows is None gets all (for fetchall).'
| def _fetch(self, rows=None):
| rs = self.rs
if (self.conn == None):
self._raiseCursorError(Error, None)
return
if ((not rs) or (rs.State == adStateClosed)):
self._raiseCursorError(Error, None)
return
else:
if ((rs.State == adStateClosed) or rs.BOF or rs.EOF):
if (rows == 1):
return None
else:
return []
if rows:
ado_results = self.rs.GetRows(rows)
else:
ado_results = self.rs.GetRows()
d = self.description
returnList = []
i = 0
if onIronPython:
type_codes = [descTuple[1] for descTuple in d]
for j in range((len(ado_results) / len(d))):
L = []
for i in range(len(d)):
L.append(convertVariantToPython(ado_results[(i, j)], type_codes[i]))
returnList.append(tuple(L))
return tuple(returnList)
else:
for descTuple in d:
type_code = descTuple[1]
returnList.append([convertVariantToPython(r, type_code) for r in ado_results[i]])
i += 1
return tuple(zip(*returnList))
|
'Fetch the next row of a query result set, returning a single sequence,
or None when no more data is available.
An Error (or subclass) exception is raised if the previous call to executeXXX()
did not produce any result set or no call was issued yet.'
| def fetchone(self):
| self.messages = []
ret = self._fetch(1)
if ret:
return ret[0]
else:
return ret
|
'Fetch the next set of rows of a query result, returning a list of tuples. An empty sequence is returned when no more rows are available.
The number of rows to fetch per call is specified by the parameter.
If it is not given, the cursor\'s arraysize determines the number of rows to be fetched.
The method should try to fetch as many rows as indicated by the size parameter.
If this is not possible due to the specified number of rows not being available,
fewer rows may be returned.
An Error (or subclass) exception is raised if the previous call to executeXXX()
did not produce any result set or no call was issued yet.
Note there are performance considerations involved with the size parameter.
For optimal performance, it is usually best to use the arraysize attribute.
If the size parameter is used, then it is best for it to retain the same value from
one fetchmany() call to the next.'
| def fetchmany(self, size=None):
| self.messages = []
if (size == None):
size = self.arraysize
return self._fetch(size)
|
'Fetch all (remaining) rows of a query result, returning them as a sequence of sequences (e.g. a list of tuples).
Note that the cursor\'s arraysize attribute
can affect the performance of this operation.
An Error (or subclass) exception is raised if the previous call to executeXXX()
did not produce any result set or no call was issued yet.'
| def fetchall(self):
| self.messages = []
return self._fetch()
|
'Make the cursor skip to the next available set, discarding any remaining rows from the current set.
If there are no more sets, the method returns None. Otherwise, it returns a true
value and subsequent calls to the fetch methods will return rows from the next result set.
An Error (or subclass) exception is raised if the previous call to executeXXX()
did not produce any result set or no call was issued yet.'
| def nextset(self):
| self.messages = []
if (not self.conn):
self._raiseCursorError(Error, None)
return
if (not self.rs):
self._raiseCursorError(Error, None)
return
else:
if onIronPython:
try:
rs = self.rs.NextRecordset()
except TypeError:
rs = None
except Error as exc:
self._raiseCursorError(NotSupportedError, exc.args)
else:
try:
rsTuple = self.rs.NextRecordset()
except pywintypes.com_error as exc:
self._raiseCursorError(NotSupportedError, exc.args)
rs = rsTuple[0]
self._makeDescriptionFromRS(rs)
if rs:
return True
return None
|
'don\'t make adoType a string :-)'
| def __setitem__(self, adoType, cvtFn):
| try:
for type in adoType:
dict.__setitem__(self, type, cvtFn)
except TypeError:
dict.__setitem__(self, adoType, cvtFn)
|
'Create a UUID from either a string of 32 hexadecimal digits,
a string of 16 bytes as the \'bytes\' argument, a string of 16 bytes
in little-endian order as the \'bytes_le\' argument, a tuple of six
integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version,
8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as
the \'fields\' argument, or a single 128-bit integer as the \'int\'
argument. When a string of hex digits is given, curly braces,
hyphens, and a URN prefix are all optional. For example, these
expressions all yield the same UUID:
UUID(\'{12345678-1234-5678-1234-567812345678}\')
UUID(\'12345678123456781234567812345678\')
UUID(\'urn:uuid:12345678-1234-5678-1234-567812345678\')
UUID(bytes=\'\x12\x34\x56\x78\'*4)
UUID(bytes_le=\'\x78\x56\x34\x12\x34\x12\x78\x56\' +
\'\x12\x34\x56\x78\x12\x34\x56\x78\')
UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678))
UUID(int=0x12345678123456781234567812345678)
Exactly one of \'hex\', \'bytes\', \'bytes_le\', \'fields\', or \'int\' must
be given. The \'version\' argument is optional; if given, the resulting
UUID will have its variant and version set according to RFC 4122,
overriding the given \'hex\', \'bytes\', \'bytes_le\', \'fields\', or \'int\'.'
| def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None, int=None, version=None):
| if ([hex, bytes, bytes_le, fields, int].count(None) != 4):
raise TypeError('need one of hex, bytes, bytes_le, fields, or int')
if (hex is not None):
hex = hex.replace('urn:', '').replace('uuid:', '')
hex = hex.strip('{}').replace('-', '')
if (len(hex) != 32):
raise ValueError('badly formed hexadecimal UUID string')
int = long(hex, 16)
if (bytes_le is not None):
if (len(bytes_le) != 16):
raise ValueError('bytes_le is not a 16-char string')
bytes = ((((((((bytes_le[3] + bytes_le[2]) + bytes_le[1]) + bytes_le[0]) + bytes_le[5]) + bytes_le[4]) + bytes_le[7]) + bytes_le[6]) + bytes_le[8:])
if (bytes is not None):
if (len(bytes) != 16):
raise ValueError('bytes is not a 16-char string')
int = long((('%02x' * 16) % tuple(map(ord, bytes))), 16)
if (fields is not None):
if (len(fields) != 6):
raise ValueError('fields is not a 6-tuple')
(time_low, time_mid, time_hi_version, clock_seq_hi_variant, clock_seq_low, node) = fields
if (not (0 <= time_low < (1 << 32L))):
raise ValueError('field 1 out of range (need a 32-bit value)')
if (not (0 <= time_mid < (1 << 16L))):
raise ValueError('field 2 out of range (need a 16-bit value)')
if (not (0 <= time_hi_version < (1 << 16L))):
raise ValueError('field 3 out of range (need a 16-bit value)')
if (not (0 <= clock_seq_hi_variant < (1 << 8L))):
raise ValueError('field 4 out of range (need an 8-bit value)')
if (not (0 <= clock_seq_low < (1 << 8L))):
raise ValueError('field 5 out of range (need an 8-bit value)')
if (not (0 <= node < (1 << 48L))):
raise ValueError('field 6 out of range (need a 48-bit value)')
clock_seq = ((clock_seq_hi_variant << 8L) | clock_seq_low)
int = (((((time_low << 96L) | (time_mid << 80L)) | (time_hi_version << 64L)) | (clock_seq << 48L)) | node)
if (int is not None):
if (not (0 <= int < (1 << 128L))):
raise ValueError('int is out of range (need a 128-bit value)')
if (version is not None):
if (not (1 <= version <= 5)):
raise ValueError('illegal version number')
int &= (~ (49152 << 48L))
int |= (32768 << 48L)
int &= (~ (61440 << 64L))
int |= (version << 76L)
self.__dict__['int'] = int
|
'Create a new completer for the command line.
Completer([namespace]) -> completer instance.
If unspecified, the default namespace where completions are performed
is __main__ (technically, __main__.__dict__). Namespaces should be
given as dictionaries.
Completer instances should be used as the completion mechanism of
readline via the set_completer() call:
readline.set_completer(Completer(my_namespace).complete)'
| def __init__(self, namespace=None):
| if (namespace and (not isinstance(namespace, dict))):
raise TypeError, 'namespace must be a dictionary'
if (namespace is None):
self.use_main_ns = 1
else:
self.use_main_ns = 0
self.namespace = namespace
|
'Return the next possible completion for \'text\'.
This is called successively with state == 0, 1, 2, ... until it
returns None. The completion should begin with \'text\'.'
| def complete(self, text, state):
| if self.use_main_ns:
self.namespace = __main__.__dict__
if (state == 0):
if ('.' in text):
self.matches = self.attr_matches(text)
else:
self.matches = self.global_matches(text)
try:
return self.matches[state]
except IndexError:
return None
|
'Compute matches when text is a simple name.
Return a list of all keywords, built-in functions and names currently
defined in self.namespace that match.'
| def global_matches(self, text):
| import keyword
matches = []
seen = {'__builtins__'}
n = len(text)
for word in keyword.kwlist:
if (word[:n] == text):
seen.add(word)
matches.append(word)
for nspace in [self.namespace, __builtin__.__dict__]:
for (word, val) in nspace.items():
if ((word[:n] == text) and (word not in seen)):
seen.add(word)
matches.append(self._callable_postfix(val, word))
return matches
|
'Compute matches when text contains a dot.
Assuming the text is of the form NAME.NAME....[NAME], and is
evaluable in self.namespace, it will be evaluated and its attributes
(as revealed by dir()) are used as possible completions. (For class
instances, class members are also considered.)
WARNING: this can still invoke arbitrary C code, if an object
with a __getattr__ hook is evaluated.'
| def attr_matches(self, text):
| import re
m = re.match('(\\w+(\\.\\w+)*)\\.(\\w*)', text)
if (not m):
return []
(expr, attr) = m.group(1, 3)
try:
thisobject = eval(expr, self.namespace)
except Exception:
return []
words = set(dir(thisobject))
words.discard('__builtins__')
if hasattr(thisobject, '__class__'):
words.add('__class__')
words.update(get_class_members(thisobject.__class__))
matches = []
n = len(attr)
for word in words:
if (word[:n] == attr):
try:
val = getattr(thisobject, word)
except Exception:
continue
word = self._callable_postfix(val, ('%s.%s' % (expr, word)))
matches.append(word)
matches.sort()
return matches
|
'Initialize the class instance and read the headers.'
| def __init__(self, fp, seekable=1):
| if (seekable == 1):
try:
fp.tell()
except (AttributeError, IOError):
seekable = 0
self.fp = fp
self.seekable = seekable
self.startofheaders = None
self.startofbody = None
if self.seekable:
try:
self.startofheaders = self.fp.tell()
except IOError:
self.seekable = 0
self.readheaders()
if self.seekable:
try:
self.startofbody = self.fp.tell()
except IOError:
self.seekable = 0
|
'Rewind the file to the start of the body (if seekable).'
| def rewindbody(self):
| if (not self.seekable):
raise IOError, 'unseekable file'
self.fp.seek(self.startofbody)
|
'Read header lines.
Read header lines up to the entirely blank line that terminates them.
The (normally blank) line that ends the headers is skipped, but not
included in the returned list. If a non-header line ends the headers,
(which is an error), an attempt is made to backspace over it; it is
never included in the returned list.
The variable self.status is set to the empty string if all went well,
otherwise it is an error message. The variable self.headers is a
completely uninterpreted list of lines contained in the header (so
printing them will reproduce the header exactly as it appears in the
file).'
| def readheaders(self):
| self.dict = {}
self.unixfrom = ''
self.headers = lst = []
self.status = ''
headerseen = ''
firstline = 1
startofline = unread = tell = None
if hasattr(self.fp, 'unread'):
unread = self.fp.unread
elif self.seekable:
tell = self.fp.tell
while 1:
if tell:
try:
startofline = tell()
except IOError:
startofline = tell = None
self.seekable = 0
line = self.fp.readline()
if (not line):
self.status = 'EOF in headers'
break
if (firstline and line.startswith('From ')):
self.unixfrom = (self.unixfrom + line)
continue
firstline = 0
if (headerseen and (line[0] in ' DCTB ')):
lst.append(line)
x = ((self.dict[headerseen] + '\n ') + line.strip())
self.dict[headerseen] = x.strip()
continue
elif self.iscomment(line):
continue
elif self.islast(line):
break
headerseen = self.isheader(line)
if headerseen:
lst.append(line)
self.dict[headerseen] = line[(len(headerseen) + 1):].strip()
continue
elif (headerseen is not None):
continue
else:
if (not self.dict):
self.status = 'No headers'
else:
self.status = 'Non-header line where header expected'
if unread:
unread(line)
elif tell:
self.fp.seek(startofline)
else:
self.status = (self.status + '; bad seek')
break
|
'Determine whether a given line is a legal header.
This method should return the header name, suitably canonicalized.
You may override this method in order to use Message parsing on tagged
data in RFC 2822-like formats with special header formats.'
| def isheader(self, line):
| i = line.find(':')
if (i > (-1)):
return line[:i].lower()
return None
|
'Determine whether a line is a legal end of RFC 2822 headers.
You may override this method if your application wants to bend the
rules, e.g. to strip trailing whitespace, or to recognize MH template
separators (\'--------\'). For convenience (e.g. for code reading from
sockets) a line consisting of \r\n also matches.'
| def islast(self, line):
| return (line in _blanklines)
|
'Determine whether a line should be skipped entirely.
You may override this method in order to use Message parsing on tagged
data in RFC 2822-like formats that support embedded comments or
free-text data.'
| def iscomment(self, line):
| return False
|
'Find all header lines matching a given header name.
Look through the list of headers and find all lines matching a given
header name (and their continuation lines). A list of the lines is
returned, without interpretation. If the header does not occur, an
empty list is returned. If the header occurs multiple times, all
occurrences are returned. Case is not important in the header name.'
| def getallmatchingheaders(self, name):
| name = (name.lower() + ':')
n = len(name)
lst = []
hit = 0
for line in self.headers:
if (line[:n].lower() == name):
hit = 1
elif (not line[:1].isspace()):
hit = 0
if hit:
lst.append(line)
return lst
|
'Get the first header line matching name.
This is similar to getallmatchingheaders, but it returns only the
first matching header (and its continuation lines).'
| def getfirstmatchingheader(self, name):
| name = (name.lower() + ':')
n = len(name)
lst = []
hit = 0
for line in self.headers:
if hit:
if (not line[:1].isspace()):
break
elif (line[:n].lower() == name):
hit = 1
if hit:
lst.append(line)
return lst
|
'A higher-level interface to getfirstmatchingheader().
Return a string containing the literal text of the header but with the
keyword stripped. All leading, trailing and embedded whitespace is
kept in the string, however. Return None if the header does not
occur.'
| def getrawheader(self, name):
| lst = self.getfirstmatchingheader(name)
if (not lst):
return None
lst[0] = lst[0][(len(name) + 1):]
return ''.join(lst)
|
'Get the header value for a name.
This is the normal interface: it returns a stripped version of the
header value for a given header name, or None if it doesn\'t exist.
This uses the dictionary version which finds the *last* such header.'
| def getheader(self, name, default=None):
| return self.dict.get(name.lower(), default)
|
'Get all values for a header.
This returns a list of values for headers given more than once; each
value in the result list is stripped in the same way as the result of
getheader(). If the header is not given, return an empty list.'
| def getheaders(self, name):
| result = []
current = ''
have_header = 0
for s in self.getallmatchingheaders(name):
if s[0].isspace():
if current:
current = ('%s\n %s' % (current, s.strip()))
else:
current = s.strip()
else:
if have_header:
result.append(current)
current = s[(s.find(':') + 1):].strip()
have_header = 1
if have_header:
result.append(current)
return result
|
'Get a single address from a header, as a tuple.
An example return value:
(\'Guido van Rossum\', \'[email protected]\')'
| def getaddr(self, name):
| alist = self.getaddrlist(name)
if alist:
return alist[0]
else:
return (None, None)
|
'Get a list of addresses from a header.
Retrieves a list of addresses from a header, where each address is a
tuple as returned by getaddr(). Scans all named headers, so it works
properly with multiple To: or Cc: headers for example.'
| def getaddrlist(self, name):
| raw = []
for h in self.getallmatchingheaders(name):
if (h[0] in ' DCTB '):
raw.append(h)
else:
if raw:
raw.append(', ')
i = h.find(':')
if (i > 0):
addr = h[(i + 1):]
raw.append(addr)
alladdrs = ''.join(raw)
a = AddressList(alladdrs)
return a.addresslist
|
'Retrieve a date field from a header.
Retrieves a date field from the named header, returning a tuple
compatible with time.mktime().'
| def getdate(self, name):
| try:
data = self[name]
except KeyError:
return None
return parsedate(data)
|
'Retrieve a date field from a header as a 10-tuple.
The first 9 elements make up a tuple compatible with time.mktime(),
and the 10th is the offset of the poster\'s time zone from GMT/UTC.'
| def getdate_tz(self, name):
| try:
data = self[name]
except KeyError:
return None
return parsedate_tz(data)
|
'Get the number of headers in a message.'
| def __len__(self):
| return len(self.dict)
|
'Get a specific header, as from a dictionary.'
| def __getitem__(self, name):
| return self.dict[name.lower()]
|
'Set the value of a header.
Note: This is not a perfect inversion of __getitem__, because any
changed headers get stuck at the end of the raw-headers list rather
than where the altered header was.'
| def __setitem__(self, name, value):
| del self[name]
self.dict[name.lower()] = value
text = ((name + ': ') + value)
for line in text.split('\n'):
self.headers.append((line + '\n'))
|
'Delete all occurrences of a specific header, if it is present.'
| def __delitem__(self, name):
| name = name.lower()
if (not (name in self.dict)):
return
del self.dict[name]
name = (name + ':')
n = len(name)
lst = []
hit = 0
for i in range(len(self.headers)):
line = self.headers[i]
if (line[:n].lower() == name):
hit = 1
elif (not line[:1].isspace()):
hit = 0
if hit:
lst.append(i)
for i in reversed(lst):
del self.headers[i]
|
'Determine whether a message contains the named header.'
| def has_key(self, name):
| return (name.lower() in self.dict)
|
'Determine whether a message contains the named header.'
| def __contains__(self, name):
| return (name.lower() in self.dict)
|
'Get all of a message\'s header field names.'
| def keys(self):
| return self.dict.keys()
|
'Get all of a message\'s header field values.'
| def values(self):
| return self.dict.values()
|
'Get all of a message\'s headers.
Returns a list of name, value tuples.'
| def items(self):
| return self.dict.items()
|
'Initialize a new instance.
`field\' is an unparsed address header field, containing one or more
addresses.'
| def __init__(self, field):
| self.specials = '()<>@,:;."[]'
self.pos = 0
self.LWS = ' DCTB '
self.CR = '\r\n'
self.atomends = ((self.specials + self.LWS) + self.CR)
self.phraseends = self.atomends.replace('.', '')
self.field = field
self.commentlist = []
|
'Parse up to the start of the next address.'
| def gotonext(self):
| while (self.pos < len(self.field)):
if (self.field[self.pos] in (self.LWS + '\n\r')):
self.pos = (self.pos + 1)
elif (self.field[self.pos] == '('):
self.commentlist.append(self.getcomment())
else:
break
|
'Parse all addresses.
Returns a list containing all of the addresses.'
| def getaddrlist(self):
| result = []
ad = self.getaddress()
while ad:
result += ad
ad = self.getaddress()
return result
|
'Parse the next address.'
| def getaddress(self):
| self.commentlist = []
self.gotonext()
oldpos = self.pos
oldcl = self.commentlist
plist = self.getphraselist()
self.gotonext()
returnlist = []
if (self.pos >= len(self.field)):
if plist:
returnlist = [(' '.join(self.commentlist), plist[0])]
elif (self.field[self.pos] in '.@'):
self.pos = oldpos
self.commentlist = oldcl
addrspec = self.getaddrspec()
returnlist = [(' '.join(self.commentlist), addrspec)]
elif (self.field[self.pos] == ':'):
returnlist = []
fieldlen = len(self.field)
self.pos += 1
while (self.pos < len(self.field)):
self.gotonext()
if ((self.pos < fieldlen) and (self.field[self.pos] == ';')):
self.pos += 1
break
returnlist = (returnlist + self.getaddress())
elif (self.field[self.pos] == '<'):
routeaddr = self.getrouteaddr()
if self.commentlist:
returnlist = [((((' '.join(plist) + ' (') + ' '.join(self.commentlist)) + ')'), routeaddr)]
else:
returnlist = [(' '.join(plist), routeaddr)]
elif plist:
returnlist = [(' '.join(self.commentlist), plist[0])]
elif (self.field[self.pos] in self.specials):
self.pos += 1
self.gotonext()
if ((self.pos < len(self.field)) and (self.field[self.pos] == ',')):
self.pos += 1
return returnlist
|
'Parse a route address (Return-path value).
This method just skips all the route stuff and returns the addrspec.'
| def getrouteaddr(self):
| if (self.field[self.pos] != '<'):
return
expectroute = 0
self.pos += 1
self.gotonext()
adlist = ''
while (self.pos < len(self.field)):
if expectroute:
self.getdomain()
expectroute = 0
elif (self.field[self.pos] == '>'):
self.pos += 1
break
elif (self.field[self.pos] == '@'):
self.pos += 1
expectroute = 1
elif (self.field[self.pos] == ':'):
self.pos += 1
else:
adlist = self.getaddrspec()
self.pos += 1
break
self.gotonext()
return adlist
|
'Parse an RFC 2822 addr-spec.'
| def getaddrspec(self):
| aslist = []
self.gotonext()
while (self.pos < len(self.field)):
if (self.field[self.pos] == '.'):
aslist.append('.')
self.pos += 1
elif (self.field[self.pos] == '"'):
aslist.append(('"%s"' % self.getquote()))
elif (self.field[self.pos] in self.atomends):
break
else:
aslist.append(self.getatom())
self.gotonext()
if ((self.pos >= len(self.field)) or (self.field[self.pos] != '@')):
return ''.join(aslist)
aslist.append('@')
self.pos += 1
self.gotonext()
return (''.join(aslist) + self.getdomain())
|
'Get the complete domain name from an address.'
| def getdomain(self):
| sdlist = []
while (self.pos < len(self.field)):
if (self.field[self.pos] in self.LWS):
self.pos += 1
elif (self.field[self.pos] == '('):
self.commentlist.append(self.getcomment())
elif (self.field[self.pos] == '['):
sdlist.append(self.getdomainliteral())
elif (self.field[self.pos] == '.'):
self.pos += 1
sdlist.append('.')
elif (self.field[self.pos] in self.atomends):
break
else:
sdlist.append(self.getatom())
return ''.join(sdlist)
|
'Parse a header fragment delimited by special characters.
`beginchar\' is the start character for the fragment. If self is not
looking at an instance of `beginchar\' then getdelimited returns the
empty string.
`endchars\' is a sequence of allowable end-delimiting characters.
Parsing stops when one of these is encountered.
If `allowcomments\' is non-zero, embedded RFC 2822 comments are allowed
within the parsed fragment.'
| def getdelimited(self, beginchar, endchars, allowcomments=1):
| if (self.field[self.pos] != beginchar):
return ''
slist = ['']
quote = 0
self.pos += 1
while (self.pos < len(self.field)):
if (quote == 1):
slist.append(self.field[self.pos])
quote = 0
elif (self.field[self.pos] in endchars):
self.pos += 1
break
elif (allowcomments and (self.field[self.pos] == '(')):
slist.append(self.getcomment())
continue
elif (self.field[self.pos] == '\\'):
quote = 1
else:
slist.append(self.field[self.pos])
self.pos += 1
return ''.join(slist)
|
'Get a quote-delimited fragment from self\'s field.'
| def getquote(self):
| return self.getdelimited('"', '"\r', 0)
|
'Get a parenthesis-delimited fragment from self\'s field.'
| def getcomment(self):
| return self.getdelimited('(', ')\r', 1)
|
'Parse an RFC 2822 domain-literal.'
| def getdomainliteral(self):
| return ('[%s]' % self.getdelimited('[', ']\r', 0))
|
'Parse an RFC 2822 atom.
Optional atomends specifies a different set of end token delimiters
(the default is to use self.atomends). This is used e.g. in
getphraselist() since phrase endings must not include the `.\' (which
is legal in phrases).'
| def getatom(self, atomends=None):
| atomlist = ['']
if (atomends is None):
atomends = self.atomends
while (self.pos < len(self.field)):
if (self.field[self.pos] in atomends):
break
else:
atomlist.append(self.field[self.pos])
self.pos += 1
return ''.join(atomlist)
|
'Parse a sequence of RFC 2822 phrases.
A phrase is a sequence of words, which are in turn either RFC 2822
atoms or quoted-strings. Phrases are canonicalized by squeezing all
runs of continuous whitespace into one space.'
| def getphraselist(self):
| plist = []
while (self.pos < len(self.field)):
if (self.field[self.pos] in self.LWS):
self.pos += 1
elif (self.field[self.pos] == '"'):
plist.append(self.getquote())
elif (self.field[self.pos] == '('):
self.commentlist.append(self.getcomment())
elif (self.field[self.pos] in self.phraseends):
break
else:
plist.append(self.getatom(self.phraseends))
return plist
|
'This takes a file-like object for writing a pickle data stream.
The optional protocol argument tells the pickler to use the
given protocol; supported protocols are 0, 1, 2. The default
protocol is 0, to be backwards compatible. (Protocol 0 is the
only protocol that can be written to a file opened in text
mode and read back successfully. When using a protocol higher
than 0, make sure the file is opened in binary mode, both when
pickling and unpickling.)
Protocol 1 is more efficient than protocol 0; protocol 2 is
more efficient than protocol 1.
Specifying a negative protocol version selects the highest
protocol version supported. The higher the protocol used, the
more recent the version of Python needed to read the pickle
produced.
The file parameter must have a write() method that accepts a single
string argument. It can thus be an open file object, a StringIO
object, or any other custom object that meets this interface.'
| def __init__(self, file, protocol=None):
| if (protocol is None):
protocol = 0
if (protocol < 0):
protocol = HIGHEST_PROTOCOL
elif (not (0 <= protocol <= HIGHEST_PROTOCOL)):
raise ValueError(('pickle protocol must be <= %d' % HIGHEST_PROTOCOL))
self.write = file.write
self.memo = {}
self.proto = int(protocol)
self.bin = (protocol >= 1)
self.fast = 0
|
'Clears the pickler\'s "memo".
The memo is the data structure that remembers which objects the
pickler has already seen, so that shared or recursive objects are
pickled by reference and not by value. This method is useful when
re-using picklers.'
| def clear_memo(self):
| self.memo.clear()
|
'Write a pickled representation of obj to the open file.'
| def dump(self, obj):
| if (self.proto >= 2):
self.write((PROTO + chr(self.proto)))
self.save(obj)
self.write(STOP)
|
'Store an object in the memo.'
| def memoize(self, obj):
| if self.fast:
return
assert (id(obj) not in self.memo)
memo_len = len(self.memo)
self.write(self.put(memo_len))
self.memo[id(obj)] = (memo_len, obj)
|
'This takes a file-like object for reading a pickle data stream.
The protocol version of the pickle is detected automatically, so no
proto argument is needed.
The file-like object must have two methods, a read() method that
takes an integer argument, and a readline() method that requires no
arguments. Both methods should return a string. Thus file-like
object can be a file object opened for reading, a StringIO object,
or any other custom object that meets this interface.'
| def __init__(self, file):
| self.readline = file.readline
self.read = file.read
self.memo = {}
|
'Read a pickled object representation from the open file.
Return the reconstituted object hierarchy specified in the file.'
| def load(self):
| self.mark = object()
self.stack = []
self.append = self.stack.append
read = self.read
dispatch = self.dispatch
try:
while 1:
key = read(1)
dispatch[key](self)
except _Stop as stopinst:
return stopinst.value
|
'Encodes the object input and returns a tuple (output
object, length consumed).
errors defines the error handling to apply. It defaults to
\'strict\' handling.
The method may not store state in the Codec instance. Use
StreamWriter for codecs which have to keep state in order to
make encoding efficient.
The encoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.'
| def encode(self, input, errors='strict'):
| raise NotImplementedError
|
'Decodes the object input and returns a tuple (output
object, length consumed).
input must be an object which provides the bf_getreadbuf
buffer slot. Python strings, buffer objects and memory
mapped files are examples of objects providing this slot.
errors defines the error handling to apply. It defaults to
\'strict\' handling.
The method may not store state in the Codec instance. Use
StreamReader for codecs which have to keep state in order to
make decoding efficient.
The decoder must be able to handle zero length input and
return an empty object of the output object type in this
situation.'
| def decode(self, input, errors='strict'):
| raise NotImplementedError
|
'Creates an IncrementalEncoder instance.
The IncrementalEncoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.'
| def __init__(self, errors='strict'):
| self.errors = errors
self.buffer = ''
|
'Encodes input and returns the resulting object.'
| def encode(self, input, final=False):
| raise NotImplementedError
|
'Return the current state of the encoder.'
| def getstate(self):
| return 0
|
'Creates a IncrementalDecoder instance.
The IncrementalDecoder may use different error handling schemes by
providing the errors keyword argument. See the module docstring
for a list of possible values.'
| def __init__(self, errors='strict'):
| self.errors = errors
|
'Decodes input and returns the resulting object.'
| def decode(self, input, final=False):
| raise NotImplementedError
|
'Return the current state of the decoder.
This must be a (buffered_input, additional_state_info) tuple.
buffered_input must be a bytes object containing bytes that
were passed to decode() that have not yet been converted.
additional_state_info must be a non-negative integer
representing the state of the decoder WITHOUT yet having
processed the contents of buffered_input. In the initial state
and after reset(), getstate() must return (b"", 0).'
| def getstate(self):
| return ('', 0)
|
'Creates a StreamWriter instance.
stream must be a file-like object open for writing
(binary) data.
The StreamWriter may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
\'strict\' - raise a ValueError (or a subclass)
\'ignore\' - ignore the character and continue with the next
\'replace\'- replace with a suitable replacement character
\'xmlcharrefreplace\' - Replace with the appropriate XML
character reference.
\'backslashreplace\' - Replace with backslashed escape
sequences (only for encoding).
The set of allowed parameter values can be extended via
register_error.'
| def __init__(self, stream, errors='strict'):
| self.stream = stream
self.errors = errors
|
'Writes the object\'s contents encoded to self.stream.'
| def write(self, object):
| (data, consumed) = self.encode(object, self.errors)
self.stream.write(data)
|
'Writes the concatenated list of strings to the stream
using .write().'
| def writelines(self, list):
| self.write(''.join(list))
|
'Flushes and resets the codec buffers used for keeping state.
Calling this method should ensure that the data on the
output is put into a clean state, that allows appending
of new fresh data without having to rescan the whole
stream to recover state.'
| def reset(self):
| pass
|
'Inherit all other methods from the underlying stream.'
| def __getattr__(self, name, getattr=getattr):
| return getattr(self.stream, name)
|
'Creates a StreamReader instance.
stream must be a file-like object open for reading
(binary) data.
The StreamReader may use different error handling
schemes by providing the errors keyword argument. These
parameters are predefined:
\'strict\' - raise a ValueError (or a subclass)
\'ignore\' - ignore the character and continue with the next
\'replace\'- replace with a suitable replacement character;
The set of allowed parameter values can be extended via
register_error.'
| def __init__(self, stream, errors='strict'):
| self.stream = stream
self.errors = errors
self.bytebuffer = ''
self.charbuffer = ''
self.linebuffer = None
|
'Decodes data from the stream self.stream and returns the
resulting object.
chars indicates the number of characters to read from the
stream. read() will never return more than chars
characters, but it might return less, if there are not enough
characters available.
size indicates the approximate maximum number of bytes to
read from the stream for decoding purposes. The decoder
can modify this setting as appropriate. The default value
-1 indicates to read and decode as much as possible. size
is intended to prevent having to decode huge files in one
step.
If firstline is true, and a UnicodeDecodeError happens
after the first line terminator in the input only the first line
will be returned, the rest of the input will be kept until the
next call to read().
The method should use a greedy read strategy meaning that
it should read as much data as is allowed within the
definition of the encoding and the given size, e.g. if
optional encoding endings or state markers are available
on the stream, these should be read too.'
| def read(self, size=(-1), chars=(-1), firstline=False):
| if self.linebuffer:
self.charbuffer = ''.join(self.linebuffer)
self.linebuffer = None
while True:
if (chars >= 0):
if (len(self.charbuffer) >= chars):
break
elif (size >= 0):
if (len(self.charbuffer) >= size):
break
if (size < 0):
newdata = self.stream.read()
else:
newdata = self.stream.read(size)
data = (self.bytebuffer + newdata)
try:
(newchars, decodedbytes) = self.decode(data, self.errors)
except UnicodeDecodeError as exc:
if firstline:
(newchars, decodedbytes) = self.decode(data[:exc.start], self.errors)
lines = newchars.splitlines(True)
if (len(lines) <= 1):
raise
else:
raise
self.bytebuffer = data[decodedbytes:]
self.charbuffer += newchars
if (not newdata):
break
if (chars < 0):
result = self.charbuffer
self.charbuffer = ''
else:
result = self.charbuffer[:chars]
self.charbuffer = self.charbuffer[chars:]
return result
|
'Read one line from the input stream and return the
decoded data.
size, if given, is passed as size argument to the
read() method.'
| def readline(self, size=None, keepends=True):
| if self.linebuffer:
line = self.linebuffer[0]
del self.linebuffer[0]
if (len(self.linebuffer) == 1):
self.charbuffer = self.linebuffer[0]
self.linebuffer = None
if (not keepends):
line = line.splitlines(False)[0]
return line
readsize = (size or 72)
line = ''
while True:
data = self.read(readsize, firstline=True)
if data:
if data.endswith('\r'):
data += self.read(size=1, chars=1)
line += data
lines = line.splitlines(True)
if lines:
if (len(lines) > 1):
line = lines[0]
del lines[0]
if (len(lines) > 1):
lines[(-1)] += self.charbuffer
self.linebuffer = lines
self.charbuffer = None
else:
self.charbuffer = (lines[0] + self.charbuffer)
if (not keepends):
line = line.splitlines(False)[0]
break
line0withend = lines[0]
line0withoutend = lines[0].splitlines(False)[0]
if (line0withend != line0withoutend):
self.charbuffer = (''.join(lines[1:]) + self.charbuffer)
if keepends:
line = line0withend
else:
line = line0withoutend
break
if ((not data) or (size is not None)):
if (line and (not keepends)):
line = line.splitlines(False)[0]
break
if (readsize < 8000):
readsize *= 2
return line
|
'Read all lines available on the input stream
and return them as list of lines.
Line breaks are implemented using the codec\'s decoder
method and are included in the list entries.
sizehint, if given, is ignored since there is no efficient
way to finding the true end-of-line.'
| def readlines(self, sizehint=None, keepends=True):
| data = self.read()
return data.splitlines(keepends)
|
'Resets the codec buffers used for keeping state.
Note that no stream repositioning should take place.
This method is primarily intended to be able to recover
from decoding errors.'
| def reset(self):
| self.bytebuffer = ''
self.charbuffer = u''
self.linebuffer = None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.