desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Remove an element. Do not raise an exception if absent.'
| @abstractmethod
def discard(self, value):
| raise NotImplementedError
|
'Remove an element. If not a member, raise a KeyError.'
| def remove(self, value):
| if (value not in self):
raise KeyError(value)
self.discard(value)
|
'Return the popped value. Raise KeyError if empty.'
| def pop(self):
| it = iter(self)
try:
value = next(it)
except StopIteration:
raise KeyError
self.discard(value)
return value
|
'This is slow (creates N new iterators!) but effective.'
| def clear(self):
| try:
while True:
self.pop()
except KeyError:
pass
|
'This tests the improved concurrency with pysqlite 2.3.4. You needed
to roll back con2 before you could commit con1.'
| def CheckLocking(self):
| if (sqlite.sqlite_version_info < (3, 2, 2)):
return
self.cur1.execute('create table test(i)')
self.cur1.execute('insert into test(i) values (5)')
try:
self.cur2.execute('insert into test(i) values (5)')
self.fail('should have raised an OperationalError')
except sqlite.OperationalError:
pass
except:
self.fail('should have raised an OperationalError')
self.con1.commit()
|
'Checks if cursors on the connection are set into a "reset" state
when a rollback is done on the connection.'
| def CheckRollbackCursorConsistency(self):
| con = sqlite.connect(':memory:')
cur = con.cursor()
cur.execute('create table test(x)')
cur.execute('insert into test(x) values (5)')
cur.execute('select 1 union select 2 union select 3')
con.rollback()
try:
cur.fetchall()
self.fail('InterfaceError should have been raised')
except sqlite.InterfaceError as e:
pass
except:
self.fail('InterfaceError should have been raised')
|
'Can the connection be used as a context manager at all?'
| def CheckContextManager(self):
| with self.con:
pass
|
'Is a commit called in the context manager?'
| def CheckContextManagerCommit(self):
| with self.con:
self.con.execute("insert into test(c) values ('foo')")
self.con.rollback()
count = self.con.execute('select count(*) from test').fetchone()[0]
self.assertEqual(count, 1)
|
'Is a rollback called in the context manager?'
| def CheckContextManagerRollback(self):
| global did_rollback
self.assertEqual(did_rollback, False)
try:
with self.con:
self.con.execute('insert into test(c) values (4)')
self.con.execute('insert into test(c) values (4)')
except sqlite.IntegrityError:
pass
self.assertEqual(did_rollback, True)
|
'Checks if the row object is iterable'
| def CheckSqliteRowIter(self):
| self.con.row_factory = sqlite.Row
row = self.con.execute('select 1 as a, 2 as b').fetchone()
for col in row:
pass
|
'Checks if the row object can be converted to a tuple'
| def CheckSqliteRowAsTuple(self):
| self.con.row_factory = sqlite.Row
row = self.con.execute('select 1 as a, 2 as b').fetchone()
t = tuple(row)
|
'Checks if the row object can be correctly converted to a dictionary'
| def CheckSqliteRowAsDict(self):
| self.con.row_factory = sqlite.Row
row = self.con.execute('select 1 as a, 2 as b').fetchone()
d = dict(row)
self.assertEqual(d['a'], row['a'])
self.assertEqual(d['b'], row['b'])
|
'Checks if the row object compares and hashes correctly'
| def CheckSqliteRowHashCmp(self):
| self.con.row_factory = sqlite.Row
row_1 = self.con.execute('select 1 as a, 2 as b').fetchone()
row_2 = self.con.execute('select 1 as a, 2 as b').fetchone()
row_3 = self.con.execute('select 1 as a, 3 as b').fetchone()
self.assertTrue((row_1 == row_1))
self.assertTrue((row_1 == row_2))
self.assertTrue((row_2 != row_3))
self.assertFalse((row_1 != row_1))
self.assertFalse((row_1 != row_2))
self.assertFalse((row_2 == row_3))
self.assertEqual(row_1, row_2)
self.assertEqual(hash(row_1), hash(row_2))
self.assertNotEqual(row_1, row_3)
self.assertNotEqual(hash(row_1), hash(row_3))
|
'Register two different collation functions under the same name.
Verify that the last one is actually used.'
| def CheckCollationRegisterTwice(self):
| con = sqlite.connect(':memory:')
con.create_collation('mycoll', cmp)
con.create_collation('mycoll', (lambda x, y: (- cmp(x, y))))
result = con.execute("\n select x from (select 'a' as x union select 'b' as x) order by x collate mycoll\n ").fetchall()
if ((result[0][0] != 'b') or (result[1][0] != 'a')):
self.fail('wrong collation function is used')
|
'Register a collation, then deregister it. Make sure an error is raised if we try
to use it.'
| def CheckDeregisterCollation(self):
| con = sqlite.connect(':memory:')
con.create_collation('mycoll', cmp)
con.create_collation('mycoll', None)
try:
con.execute("select 'a' as x union select 'b' as x order by x collate mycoll")
self.fail('should have raised an OperationalError')
except sqlite.OperationalError as e:
if (not e.args[0].startswith('no such collation sequence')):
self.fail('wrong OperationalError raised')
|
'Test that the progress handler is invoked once it is set.'
| def CheckProgressHandlerUsed(self):
| con = sqlite.connect(':memory:')
progress_calls = []
def progress():
progress_calls.append(None)
return 0
con.set_progress_handler(progress, 1)
con.execute('\n create table foo(a, b)\n ')
self.assertTrue(progress_calls)
|
'Test that the opcode argument is respected.'
| def CheckOpcodeCount(self):
| con = sqlite.connect(':memory:')
progress_calls = []
def progress():
progress_calls.append(None)
return 0
con.set_progress_handler(progress, 1)
curs = con.cursor()
curs.execute('\n create table foo (a, b)\n ')
first_count = len(progress_calls)
progress_calls = []
con.set_progress_handler(progress, 2)
curs.execute('\n create table bar (a, b)\n ')
second_count = len(progress_calls)
self.assertTrue((first_count > second_count))
|
'Test that returning a non-zero value stops the operation in progress.'
| def CheckCancelOperation(self):
| con = sqlite.connect(':memory:')
progress_calls = []
def progress():
progress_calls.append(None)
return 1
con.set_progress_handler(progress, 1)
curs = con.cursor()
self.assertRaises(sqlite.OperationalError, curs.execute, 'create table bar (a, b)')
|
'Test that setting the progress handler to None clears the previously set handler.'
| def CheckClearHandler(self):
| con = sqlite.connect(':memory:')
action = 0
def progress():
action = 1
return 0
con.set_progress_handler(progress, 1)
con.set_progress_handler(None, 1)
con.execute('select 1 union select 2 union select 3').fetchall()
self.assertEqual(action, 0, 'progress handler was not cleared')
|
'A commit should also work when no changes were made to the database.'
| def CheckCommitAfterNoChanges(self):
| self.cx.commit()
self.cx.commit()
|
'A rollback should also work when no changes were made to the database.'
| def CheckRollbackAfterNoChanges(self):
| self.cx.rollback()
self.cx.rollback()
|
'pysqlite does not know the rowcount of SELECT statements, because we
don\'t fetch all rows after executing the select statement. The rowcount
has thus to be -1.'
| def CheckRowcountSelect(self):
| self.cu.execute('select 5 union select 6')
self.assertEqual(self.cu.rowcount, (-1))
|
'Checks if fetchmany works with keyword arguments'
| def CheckFetchmanyKwArg(self):
| self.cu.execute('select name from test')
res = self.cu.fetchmany(size=100)
self.assertEqual(len(res), 1)
|
'Checks wether converter names are cut off at \'(\' characters'
| def CheckNumber2(self):
| self.cur.execute('insert into test(n2) values (5)')
value = self.cur.execute('select n2 from test').fetchone()[0]
self.assertEqual(type(value), float)
|
'Assures that the declared type is not used when PARSE_DECLTYPES
is not set.'
| def CheckDeclTypeNotUsed(self):
| self.cur.execute('insert into test(x) values (?)', ('xxx',))
self.cur.execute('select x from test')
val = self.cur.fetchone()[0]
self.assertEqual(val, 'xxx')
|
'cursor.description should at least provide the column name(s), even if
no row returned.'
| def CheckCursorDescriptionNoRow(self):
| self.cur.execute('select * from test where 0 = 1')
self.assertEqual(self.cur.description[0][0], 'x')
|
'pysqlite would crash with older SQLite versions unless
a workaround is implemented.'
| def CheckWorkaroundForBuggySqliteTransferBindings(self):
| self.con.execute('create table foo(bar)')
self.con.execute('drop table foo')
self.con.execute('create table foo(bar)')
|
'pysqlite used to segfault with SQLite versions 3.5.x. These return NULL
for "no-operation" statements'
| def CheckEmptyStatement(self):
| self.con.execute('')
|
'With pysqlite 2.4.0 you needed to use a string or a APSW connection
object for opening database connections.
Formerly, both bytestrings and unicode strings used to work.
Let\'s make sure unicode strings work in the future.'
| def CheckUnicodeConnect(self):
| con = sqlite.connect(u':memory:')
con.close()
|
'pysqlite until 2.4.1 did not rebuild the row_cast_map when recompiling
a statement. This test exhibits the problem.'
| def CheckTypeMapUsage(self):
| SELECT = 'select * from foo'
con = sqlite.connect(':memory:', detect_types=sqlite.PARSE_DECLTYPES)
con.execute('create table foo(bar timestamp)')
con.execute('insert into foo(bar) values (?)', (datetime.datetime.now(),))
con.execute(SELECT)
con.execute('drop table foo')
con.execute('create table foo(bar integer)')
con.execute('insert into foo(bar) values (5)')
con.execute(SELECT)
|
'See issue 3312.'
| def CheckRegisterAdapter(self):
| self.assertRaises(TypeError, sqlite.register_adapter, {}, None)
|
'See issue 3312.'
| def CheckSetIsolationLevel(self):
| con = sqlite.connect(':memory:')
self.assertRaises(UnicodeEncodeError, setattr, con, 'isolation_level', u'\xe9')
|
'Verifies that cursor methods check wether base class __init__ was called.'
| def CheckCursorConstructorCallCheck(self):
| class Cursor(sqlite.Cursor, ):
def __init__(self, con):
pass
con = sqlite.connect(':memory:')
cur = Cursor(con)
try:
cur.execute('select 4+5').fetchall()
self.fail('should have raised ProgrammingError')
except sqlite.ProgrammingError:
pass
except:
self.fail('should have raised ProgrammingError')
|
'Verifies that connection methods check wether base class __init__ was called.'
| def CheckConnectionConstructorCallCheck(self):
| class Connection(sqlite.Connection, ):
def __init__(self, name):
pass
con = Connection(':memory:')
try:
cur = con.cursor()
self.fail('should have raised ProgrammingError')
except sqlite.ProgrammingError:
pass
except:
self.fail('should have raised ProgrammingError')
|
'Verifies that subclassed cursor classes are correctly registered with
the connection object, too. (fetch-across-rollback problem)'
| def CheckCursorRegistration(self):
| class Connection(sqlite.Connection, ):
def cursor(self):
return Cursor(self)
class Cursor(sqlite.Cursor, ):
def __init__(self, con):
sqlite.Cursor.__init__(self, con)
con = Connection(':memory:')
cur = con.cursor()
cur.execute('create table foo(x)')
cur.executemany('insert into foo(x) values (?)', [(3,), (4,), (5,)])
cur.execute('select x from foo')
con.rollback()
try:
cur.fetchall()
self.fail('should have raised InterfaceError')
except sqlite.InterfaceError:
pass
except:
self.fail('should have raised InterfaceError')
|
'Verifies that creating a connection in autocommit mode works.
2.5.3 introduced a regression so that these could no longer
be created.'
| def CheckAutoCommit(self):
| con = sqlite.connect(':memory:', isolation_level=None)
|
'Verifies that running a PRAGMA statement that does an autocommit does
work. This did not work in 2.5.3/2.5.4.'
| def CheckPragmaAutocommit(self):
| cur = self.con.cursor()
cur.execute('create table foo(bar)')
cur.execute('insert into foo(bar) values (5)')
cur.execute('pragma page_size')
row = cur.fetchone()
|
'See http://bugs.python.org/issue7478
It was possible to successfully register callbacks that could not be
hashed. Return codes of PyDict_SetItem were not checked properly.'
| def CheckSetDict(self):
| class NotHashable:
def __call__(self, *args, **kw):
pass
def __hash__(self):
raise TypeError()
var = NotHashable()
self.assertRaises(TypeError, self.con.create_function, var)
self.assertRaises(TypeError, self.con.create_aggregate, var)
self.assertRaises(TypeError, self.con.set_authorizer, var)
self.assertRaises(TypeError, self.con.set_progress_handler, var)
|
'Call a connection with a non-string SQL request: check error handling
of the statement constructor.'
| def CheckConnectionCall(self):
| self.assertRaises(sqlite.Warning, self.con, 1)
|
'Return true if (and only if) cookie should be accepted from server.
Currently, pre-expired cookies never get this far -- the CookieJar
class deletes such cookies itself.'
| def set_ok(self, cookie, request):
| raise NotImplementedError()
|
'Return true if (and only if) cookie should be returned to server.'
| def return_ok(self, cookie, request):
| raise NotImplementedError()
|
'Return false if cookies should not be returned, given cookie domain.'
| def domain_return_ok(self, domain, request):
| return True
|
'Return false if cookies should not be returned, given cookie path.'
| def path_return_ok(self, path, request):
| return True
|
'Constructor arguments should be passed as keyword arguments only.'
| def __init__(self, blocked_domains=None, allowed_domains=None, netscape=True, rfc2965=False, rfc2109_as_netscape=None, hide_cookie2=False, strict_domain=False, strict_rfc2965_unverifiable=True, strict_ns_unverifiable=False, strict_ns_domain=DomainLiberal, strict_ns_set_initial_dollar=False, strict_ns_set_path=False):
| self.netscape = netscape
self.rfc2965 = rfc2965
self.rfc2109_as_netscape = rfc2109_as_netscape
self.hide_cookie2 = hide_cookie2
self.strict_domain = strict_domain
self.strict_rfc2965_unverifiable = strict_rfc2965_unverifiable
self.strict_ns_unverifiable = strict_ns_unverifiable
self.strict_ns_domain = strict_ns_domain
self.strict_ns_set_initial_dollar = strict_ns_set_initial_dollar
self.strict_ns_set_path = strict_ns_set_path
if (blocked_domains is not None):
self._blocked_domains = tuple(blocked_domains)
else:
self._blocked_domains = ()
if (allowed_domains is not None):
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
|
'Return the sequence of blocked domains (as a tuple).'
| def blocked_domains(self):
| return self._blocked_domains
|
'Set the sequence of blocked domains.'
| def set_blocked_domains(self, blocked_domains):
| self._blocked_domains = tuple(blocked_domains)
|
'Return None, or the sequence of allowed domains (as a tuple).'
| def allowed_domains(self):
| return self._allowed_domains
|
'Set the sequence of allowed domains, or None.'
| def set_allowed_domains(self, allowed_domains):
| if (allowed_domains is not None):
allowed_domains = tuple(allowed_domains)
self._allowed_domains = allowed_domains
|
'If you override .set_ok(), be sure to call this method. If it returns
false, so should your subclass (assuming your subclass wants to be more
strict about which cookies to accept).'
| def set_ok(self, cookie, request):
| _debug(' - checking cookie %s=%s', cookie.name, cookie.value)
assert (cookie.name is not None)
for n in ('version', 'verifiability', 'name', 'path', 'domain', 'port'):
fn_name = ('set_ok_' + n)
fn = getattr(self, fn_name)
if (not fn(cookie, request)):
return False
return True
|
'If you override .return_ok(), be sure to call this method. If it
returns false, so should your subclass (assuming your subclass wants to
be more strict about which cookies to return).'
| def return_ok(self, cookie, request):
| _debug(' - checking cookie %s=%s', cookie.name, cookie.value)
for n in ('version', 'verifiability', 'secure', 'expires', 'port', 'domain'):
fn_name = ('return_ok_' + n)
fn = getattr(self, fn_name)
if (not fn(cookie, request)):
return False
return True
|
'Return a list of cookies to be returned to server.'
| def _cookies_for_request(self, request):
| cookies = []
for domain in self._cookies.keys():
cookies.extend(self._cookies_for_domain(domain, request))
return cookies
|
'Return a list of cookie-attributes to be returned to server.
like [\'foo="bar"; $Path="/"\', ...]
The $Version attribute is also added when appropriate (currently only
once per request).'
| def _cookie_attrs(self, cookies):
| cookies.sort(key=(lambda arg: len(arg.path)), reverse=True)
version_set = False
attrs = []
for cookie in cookies:
version = cookie.version
if (not version_set):
version_set = True
if (version > 0):
attrs.append(('$Version=%s' % version))
if ((cookie.value is not None) and self.non_word_re.search(cookie.value) and (version > 0)):
value = self.quote_re.sub('\\\\\\1', cookie.value)
else:
value = cookie.value
if (cookie.value is None):
attrs.append(cookie.name)
else:
attrs.append(('%s=%s' % (cookie.name, value)))
if (version > 0):
if cookie.path_specified:
attrs.append(('$Path="%s"' % cookie.path))
if cookie.domain.startswith('.'):
domain = cookie.domain
if ((not cookie.domain_initial_dot) and domain.startswith('.')):
domain = domain[1:]
attrs.append(('$Domain="%s"' % domain))
if (cookie.port is not None):
p = '$Port'
if cookie.port_specified:
p = (p + ('="%s"' % cookie.port))
attrs.append(p)
return attrs
|
'Add correct Cookie: header to request (urllib2.Request object).
The Cookie2 header is also added unless policy.hide_cookie2 is true.'
| def add_cookie_header(self, request):
| _debug('add_cookie_header')
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
cookies = self._cookies_for_request(request)
attrs = self._cookie_attrs(cookies)
if attrs:
if (not request.has_header('Cookie')):
request.add_unredirected_header('Cookie', '; '.join(attrs))
if (self._policy.rfc2965 and (not self._policy.hide_cookie2) and (not request.has_header('Cookie2'))):
for cookie in cookies:
if (cookie.version != 1):
request.add_unredirected_header('Cookie2', '$Version="1"')
break
finally:
self._cookies_lock.release()
self.clear_expired_cookies()
|
'Return list of tuples containing normalised cookie information.
attrs_set is the list of lists of key,value pairs extracted from
the Set-Cookie or Set-Cookie2 headers.
Tuples are name, value, standard, rest, where name and value are the
cookie name and value, standard is a dictionary containing the standard
cookie-attributes (discard, secure, version, expires or max-age,
domain, path and port) and rest is a dictionary containing the rest of
the cookie-attributes.'
| def _normalized_cookie_tuples(self, attrs_set):
| cookie_tuples = []
boolean_attrs = ('discard', 'secure')
value_attrs = ('version', 'expires', 'max-age', 'domain', 'path', 'port', 'comment', 'commenturl')
for cookie_attrs in attrs_set:
(name, value) = cookie_attrs[0]
max_age_set = False
bad_cookie = False
standard = {}
rest = {}
for (k, v) in cookie_attrs[1:]:
lc = k.lower()
if ((lc in value_attrs) or (lc in boolean_attrs)):
k = lc
if ((k in boolean_attrs) and (v is None)):
v = True
if (k in standard):
continue
if (k == 'domain'):
if (v is None):
_debug(' missing value for domain attribute')
bad_cookie = True
break
v = v.lower()
if (k == 'expires'):
if max_age_set:
continue
if (v is None):
_debug(' missing or invalid value for expires attribute: treating as session cookie')
continue
if (k == 'max-age'):
max_age_set = True
try:
v = int(v)
except ValueError:
_debug(' missing or invalid (non-numeric) value for max-age attribute')
bad_cookie = True
break
k = 'expires'
v = (self._now + v)
if ((k in value_attrs) or (k in boolean_attrs)):
if ((v is None) and (k not in ('port', 'comment', 'commenturl'))):
_debug((' missing value for %s attribute' % k))
bad_cookie = True
break
standard[k] = v
else:
rest[k] = v
if bad_cookie:
continue
cookie_tuples.append((name, value, standard, rest))
return cookie_tuples
|
'Return sequence of Cookie objects extracted from response object.'
| def make_cookies(self, response, request):
| headers = response.info()
rfc2965_hdrs = headers.getheaders('Set-Cookie2')
ns_hdrs = headers.getheaders('Set-Cookie')
rfc2965 = self._policy.rfc2965
netscape = self._policy.netscape
if (((not rfc2965_hdrs) and (not ns_hdrs)) or ((not ns_hdrs) and (not rfc2965)) or ((not rfc2965_hdrs) and (not netscape)) or ((not netscape) and (not rfc2965))):
return []
try:
cookies = self._cookies_from_attrs_set(split_header_words(rfc2965_hdrs), request)
except Exception:
_warn_unhandled_exception()
cookies = []
if (ns_hdrs and netscape):
try:
ns_cookies = self._cookies_from_attrs_set(parse_ns_headers(ns_hdrs), request)
except Exception:
_warn_unhandled_exception()
ns_cookies = []
self._process_rfc2109_cookies(ns_cookies)
if rfc2965:
lookup = {}
for cookie in cookies:
lookup[(cookie.domain, cookie.path, cookie.name)] = None
def no_matching_rfc2965(ns_cookie, lookup=lookup):
key = (ns_cookie.domain, ns_cookie.path, ns_cookie.name)
return (key not in lookup)
ns_cookies = filter(no_matching_rfc2965, ns_cookies)
if ns_cookies:
cookies.extend(ns_cookies)
return cookies
|
'Set a cookie if policy says it\'s OK to do so.'
| def set_cookie_if_ok(self, cookie, request):
| self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
if self._policy.set_ok(cookie, request):
self.set_cookie(cookie)
finally:
self._cookies_lock.release()
|
'Set a cookie, without checking whether or not it should be set.'
| def set_cookie(self, cookie):
| c = self._cookies
self._cookies_lock.acquire()
try:
if (cookie.domain not in c):
c[cookie.domain] = {}
c2 = c[cookie.domain]
if (cookie.path not in c2):
c2[cookie.path] = {}
c3 = c2[cookie.path]
c3[cookie.name] = cookie
finally:
self._cookies_lock.release()
|
'Extract cookies from response, where allowable given the request.'
| def extract_cookies(self, response, request):
| _debug('extract_cookies: %s', response.info())
self._cookies_lock.acquire()
try:
self._policy._now = self._now = int(time.time())
for cookie in self.make_cookies(response, request):
if self._policy.set_ok(cookie, request):
_debug(' setting cookie: %s', cookie)
self.set_cookie(cookie)
finally:
self._cookies_lock.release()
|
'Clear some cookies.
Invoking this method without arguments will clear all cookies. If
given a single argument, only cookies belonging to that domain will be
removed. If given two arguments, cookies belonging to the specified
path within that domain are removed. If given three arguments, then
the cookie with the specified name, path and domain is removed.
Raises KeyError if no matching cookie exists.'
| def clear(self, domain=None, path=None, name=None):
| if (name is not None):
if ((domain is None) or (path is None)):
raise ValueError('domain and path must be given to remove a cookie by name')
del self._cookies[domain][path][name]
elif (path is not None):
if (domain is None):
raise ValueError('domain must be given to remove cookies by path')
del self._cookies[domain][path]
elif (domain is not None):
del self._cookies[domain]
else:
self._cookies = {}
|
'Discard all session cookies.
Note that the .save() method won\'t save session cookies anyway, unless
you ask otherwise by passing a true ignore_discard argument.'
| def clear_session_cookies(self):
| self._cookies_lock.acquire()
try:
for cookie in self:
if cookie.discard:
self.clear(cookie.domain, cookie.path, cookie.name)
finally:
self._cookies_lock.release()
|
'Discard all expired cookies.
You probably don\'t need to call this method: expired cookies are never
sent back to the server (provided you\'re using DefaultCookiePolicy),
this method is called by CookieJar itself every so often, and the
.save() method won\'t save expired cookies anyway (unless you ask
otherwise by passing a true ignore_expires argument).'
| def clear_expired_cookies(self):
| self._cookies_lock.acquire()
try:
now = time.time()
for cookie in self:
if cookie.is_expired(now):
self.clear(cookie.domain, cookie.path, cookie.name)
finally:
self._cookies_lock.release()
|
'Return number of contained cookies.'
| def __len__(self):
| i = 0
for cookie in self:
i = (i + 1)
return i
|
'Cookies are NOT loaded from the named file until either the .load() or
.revert() method is called.'
| def __init__(self, filename=None, delayload=False, policy=None):
| CookieJar.__init__(self, policy)
if (filename is not None):
try:
(filename + '')
except:
raise ValueError('filename must be string-like')
self.filename = filename
self.delayload = bool(delayload)
|
'Save cookies to a file.'
| def save(self, filename=None, ignore_discard=False, ignore_expires=False):
| raise NotImplementedError()
|
'Load cookies from a file.'
| def load(self, filename=None, ignore_discard=False, ignore_expires=False):
| if (filename is None):
if (self.filename is not None):
filename = self.filename
else:
raise ValueError(MISSING_FILENAME_TEXT)
f = open(filename)
try:
self._really_load(f, filename, ignore_discard, ignore_expires)
finally:
f.close()
|
'Clear all cookies and reload cookies from a saved file.
Raises LoadError (or IOError) if reversion is not successful; the
object\'s state will not be altered if this happens.'
| def revert(self, filename=None, ignore_discard=False, ignore_expires=False):
| if (filename is None):
if (self.filename is not None):
filename = self.filename
else:
raise ValueError(MISSING_FILENAME_TEXT)
self._cookies_lock.acquire()
try:
old_state = copy.deepcopy(self._cookies)
self._cookies = {}
try:
self.load(filename, ignore_discard, ignore_expires)
except (LoadError, IOError):
self._cookies = old_state
raise
finally:
self._cookies_lock.release()
|
'Add a mapping between a type and an extension.
When the extension is already known, the new
type will replace the old one. When the type
is already known the extension will be added
to the list of known extensions.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.'
| def add_type(self, type, ext, strict=True):
| self.types_map[strict][ext] = type
exts = self.types_map_inv[strict].setdefault(type, [])
if (ext not in exts):
exts.append(ext)
|
'Guess the type of a file based on its URL.
Return value is a tuple (type, encoding) where type is None if
the type can\'t be guessed (no or unknown suffix) or a string
of the form type/subtype, usable for a MIME Content-type
header; and encoding is None for no encoding or the name of
the program used to encode (e.g. compress or gzip). The
mappings are table driven. Encoding suffixes are case
sensitive; type suffixes are first tried case sensitive, then
case insensitive.
The suffixes .tgz, .taz and .tz (case sensitive!) are all
mapped to \'.tar.gz\'. (This is table-driven too, using the
dictionary suffix_map.)
Optional `strict\' argument when False adds a bunch of commonly found,
but non-standard types.'
| def guess_type(self, url, strict=True):
| (scheme, url) = urllib.splittype(url)
if (scheme == 'data'):
comma = url.find(',')
if (comma < 0):
return (None, None)
semi = url.find(';', 0, comma)
if (semi >= 0):
type = url[:semi]
else:
type = url[:comma]
if (('=' in type) or ('/' not in type)):
type = 'text/plain'
return (type, None)
(base, ext) = posixpath.splitext(url)
while (ext in self.suffix_map):
(base, ext) = posixpath.splitext((base + self.suffix_map[ext]))
if (ext in self.encodings_map):
encoding = self.encodings_map[ext]
(base, ext) = posixpath.splitext(base)
else:
encoding = None
types_map = self.types_map[True]
if (ext in types_map):
return (types_map[ext], encoding)
elif (ext.lower() in types_map):
return (types_map[ext.lower()], encoding)
elif strict:
return (None, encoding)
types_map = self.types_map[False]
if (ext in types_map):
return (types_map[ext], encoding)
elif (ext.lower() in types_map):
return (types_map[ext.lower()], encoding)
else:
return (None, encoding)
|
'Guess the extensions for a file based on its MIME type.
Return value is a list of strings giving the possible filename
extensions, including the leading dot (\'.\'). The extension is not
guaranteed to have been associated with any particular data stream,
but would be mapped to the MIME type `type\' by guess_type().
Optional `strict\' argument when false adds a bunch of commonly found,
but non-standard types.'
| def guess_all_extensions(self, type, strict=True):
| type = type.lower()
extensions = self.types_map_inv[True].get(type, [])
if (not strict):
for ext in self.types_map_inv[False].get(type, []):
if (ext not in extensions):
extensions.append(ext)
return extensions
|
'Guess the extension for a file based on its MIME type.
Return value is a string giving a filename extension,
including the leading dot (\'.\'). The extension is not
guaranteed to have been associated with any particular data
stream, but would be mapped to the MIME type `type\' by
guess_type(). If no extension can be guessed for `type\', None
is returned.
Optional `strict\' argument when false adds a bunch of commonly found,
but non-standard types.'
| def guess_extension(self, type, strict=True):
| extensions = self.guess_all_extensions(type, strict)
if (not extensions):
return None
return extensions[0]
|
'Read a single mime.types-format file, specified by pathname.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.'
| def read(self, filename, strict=True):
| with open(filename) as fp:
self.readfp(fp, strict)
|
'Read a single mime.types-format file.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.'
| def readfp(self, fp, strict=True):
| while 1:
line = fp.readline()
if (not line):
break
words = line.split()
for i in range(len(words)):
if (words[i][0] == '#'):
del words[i:]
break
if (not words):
continue
(type, suffixes) = (words[0], words[1:])
for suff in suffixes:
self.add_type(type, ('.' + suff), strict)
|
'Load the MIME types database from Windows registry.
If strict is true, information will be added to
list of standard types, else to the list of non-standard
types.'
| def read_windows_registry(self, strict=True):
| if (not _winreg):
return
def enum_types(mimedb):
i = 0
while True:
try:
ctype = _winreg.EnumKey(mimedb, i)
except EnvironmentError:
break
try:
ctype = ctype.encode(default_encoding)
except UnicodeEncodeError:
pass
else:
(yield ctype)
i += 1
default_encoding = sys.getdefaultencoding()
with _winreg.OpenKey(_winreg.HKEY_CLASSES_ROOT, 'MIME\\Database\\Content Type') as mimedb:
for ctype in enum_types(mimedb):
try:
with _winreg.OpenKey(mimedb, ctype) as key:
(suffix, datatype) = _winreg.QueryValueEx(key, 'Extension')
except EnvironmentError:
continue
if (datatype != _winreg.REG_SZ):
continue
try:
suffix = suffix.encode(default_encoding)
except UnicodeEncodeError:
continue
self.add_type(ctype, suffix, strict)
|
'Generate documentation for an object.'
| def document(self, object, name=None, *args):
| args = ((object, name) + args)
if inspect.isgetsetdescriptor(object):
return self.docdata(*args)
if inspect.ismemberdescriptor(object):
return self.docdata(*args)
try:
if inspect.ismodule(object):
return self.docmodule(*args)
if inspect.isclass(object):
return self.docclass(*args)
if inspect.isroutine(object):
return self.docroutine(*args)
except AttributeError:
pass
if isinstance(object, property):
return self.docproperty(*args)
return self.docother(*args)
|
'Raise an exception for unimplemented types.'
| def fail(self, object, name=None, *args):
| message = ("don't know how to document object%s of type %s" % ((name and (' ' + repr(name))), type(object).__name__))
raise TypeError, message
|
'Return the location of module docs or None'
| def getdocloc(self, object):
| try:
file = inspect.getabsfile(object)
except TypeError:
file = '(built-in)'
docloc = os.environ.get('PYTHONDOCS', 'http://docs.python.org/library')
basedir = os.path.join(sys.exec_prefix, 'lib', ('python' + sys.version[0:3]))
if (isinstance(object, type(os)) and ((object.__name__ in ('errno', 'exceptions', 'gc', 'imp', 'marshal', 'posix', 'signal', 'sys', 'thread', 'zipimport')) or (file.startswith(basedir) and (not file.startswith(os.path.join(basedir, 'site-packages'))))) and (object.__name__ not in ('xml.etree', 'test.pydoc_mod'))):
if docloc.startswith('http://'):
docloc = ('%s/%s' % (docloc.rstrip('/'), object.__name__))
else:
docloc = os.path.join(docloc, (object.__name__ + '.html'))
else:
docloc = None
return docloc
|
'Format an HTML page.'
| def page(self, title, contents):
| return ('\n<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">\n<html><head><title>Python: %s</title>\n</head><body bgcolor="#f0f0f8">\n%s\n</body></html>' % (title, contents))
|
'Format a page heading.'
| def heading(self, title, fgcol, bgcol, extras=''):
| return ('\n<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="heading">\n<tr bgcolor="%s">\n<td valign=bottom> <br>\n<font color="%s" face="helvetica, arial"> <br>%s</font></td\n><td align=right valign=bottom\n><font color="%s" face="helvetica, arial">%s</font></td></tr></table>\n ' % (bgcol, fgcol, title, fgcol, (extras or ' ')))
|
'Format a section with a heading.'
| def section(self, title, fgcol, bgcol, contents, width=6, prelude='', marginalia=None, gap=' '):
| if (marginalia is None):
marginalia = (('<tt>' + (' ' * width)) + '</tt>')
result = ('<p>\n<table width="100%%" cellspacing=0 cellpadding=2 border=0 summary="section">\n<tr bgcolor="%s">\n<td colspan=3 valign=bottom> <br>\n<font color="%s" face="helvetica, arial">%s</font></td></tr>\n ' % (bgcol, fgcol, title))
if prelude:
result = (result + ('\n<tr bgcolor="%s"><td rowspan=2>%s</td>\n<td colspan=2>%s</td></tr>\n<tr><td>%s</td>' % (bgcol, marginalia, prelude, gap)))
else:
result = (result + ('\n<tr><td bgcolor="%s">%s</td><td>%s</td>' % (bgcol, marginalia, gap)))
return (result + ('\n<td width="100%%">%s</td></tr></table>' % contents))
|
'Format a section with a big heading.'
| def bigsection(self, title, *args):
| title = ('<big><strong>%s</strong></big>' % title)
return self.section(title, *args)
|
'Format literal preformatted text.'
| def preformat(self, text):
| text = self.escape(expandtabs(text))
return replace(text, '\n\n', '\n \n', '\n\n', '\n \n', ' ', ' ', '\n', '<br>\n')
|
'Format a list of items into a multi-column list.'
| def multicolumn(self, list, format, cols=4):
| result = ''
rows = (((len(list) + cols) - 1) // cols)
for col in range(cols):
result = (result + ('<td width="%d%%" valign=top>' % (100 // cols)))
for i in range((rows * col), ((rows * col) + rows)):
if (i < len(list)):
result = ((result + format(list[i])) + '<br>\n')
result = (result + '</td>')
return ('<table width="100%%" summary="list"><tr>%s</tr></table>' % result)
|
'Make a link for an identifier, given name-to-URL mappings.'
| def namelink(self, name, *dicts):
| for dict in dicts:
if (name in dict):
return ('<a href="%s">%s</a>' % (dict[name], name))
return name
|
'Make a link for a class.'
| def classlink(self, object, modname):
| (name, module) = (object.__name__, sys.modules.get(object.__module__))
if (hasattr(module, name) and (getattr(module, name) is object)):
return ('<a href="%s.html#%s">%s</a>' % (module.__name__, name, classname(object, modname)))
return classname(object, modname)
|
'Make a link for a module.'
| def modulelink(self, object):
| return ('<a href="%s.html">%s</a>' % (object.__name__, object.__name__))
|
'Make a link for a module or package to display in an index.'
| def modpkglink(self, data):
| (name, path, ispackage, shadowed) = data
if shadowed:
return self.grey(name)
if path:
url = ('%s.%s.html' % (path, name))
else:
url = ('%s.html' % name)
if ispackage:
text = ('<strong>%s</strong> (package)' % name)
else:
text = name
return ('<a href="%s">%s</a>' % (url, text))
|
'Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names.'
| def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
| escape = (escape or self.escape)
results = []
here = 0
pattern = re.compile('\\b((http|ftp)://\\S+[\\w/]|RFC[- ]?(\\d+)|PEP[- ]?(\\d+)|(self\\.)?(\\w+))')
while True:
match = pattern.search(text, here)
if (not match):
break
(start, end) = match.span()
results.append(escape(text[here:start]))
(all, scheme, rfc, pep, selfdot, name) = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append(('<a href="%s">%s</a>' % (url, url)))
elif rfc:
url = ('http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc))
results.append(('<a href="%s">%s</a>' % (url, escape(all))))
elif pep:
url = ('http://www.python.org/dev/peps/pep-%04d/' % int(pep))
results.append(('<a href="%s">%s</a>' % (url, escape(all))))
elif (text[end:(end + 1)] == '('):
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append(('self.<strong>%s</strong>' % name))
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return join(results, '')
|
'Produce HTML for a class tree as given by inspect.getclasstree().'
| def formattree(self, tree, modname, parent=None):
| result = ''
for entry in tree:
if (type(entry) is type(())):
(c, bases) = entry
result = (result + '<dt><font face="helvetica, arial">')
result = (result + self.classlink(c, modname))
if (bases and (bases != (parent,))):
parents = []
for base in bases:
parents.append(self.classlink(base, modname))
result = (((result + '(') + join(parents, ', ')) + ')')
result = (result + '\n</font></dt>')
elif (type(entry) is type([])):
result = (result + ('<dd>\n%s</dd>\n' % self.formattree(entry, modname, c)))
return ('<dl>\n%s</dl>\n' % result)
|
'Produce HTML documentation for a module object.'
| def docmodule(self, object, name=None, mod=None, *ignored):
| name = object.__name__
try:
all = object.__all__
except AttributeError:
all = None
parts = split(name, '.')
links = []
for i in range((len(parts) - 1)):
links.append(('<a href="%s.html"><font color="#ffffff">%s</font></a>' % (join(parts[:(i + 1)], '.'), parts[i])))
linkedname = join((links + parts[(-1):]), '.')
head = ('<big><big><strong>%s</strong></big></big>' % linkedname)
try:
path = inspect.getabsfile(object)
url = path
if (sys.platform == 'win32'):
import nturl2path
url = nturl2path.pathname2url(path)
filelink = ('<a href="file:%s">%s</a>' % (url, path))
except TypeError:
filelink = '(built-in)'
info = []
if hasattr(object, '__version__'):
version = str(object.__version__)
if ((version[:11] == ('$' + 'Revision: ')) and (version[(-1):] == '$')):
version = strip(version[11:(-1)])
info.append(('version %s' % self.escape(version)))
if hasattr(object, '__date__'):
info.append(self.escape(str(object.__date__)))
if info:
head = (head + (' (%s)' % join(info, ', ')))
docloc = self.getdocloc(object)
if (docloc is not None):
docloc = ('<br><a href="%(docloc)s">Module Docs</a>' % locals())
else:
docloc = ''
result = self.heading(head, '#ffffff', '#7799ee', (('<a href=".">index</a><br>' + filelink) + docloc))
modules = inspect.getmembers(object, inspect.ismodule)
(classes, cdict) = ([], {})
for (key, value) in inspect.getmembers(object, inspect.isclass):
if ((all is not None) or ((inspect.getmodule(value) or object) is object)):
if visiblename(key, all, object):
classes.append((key, value))
cdict[key] = cdict[value] = ('#' + key)
for (key, value) in classes:
for base in value.__bases__:
(key, modname) = (base.__name__, base.__module__)
module = sys.modules.get(modname)
if ((modname != name) and module and hasattr(module, key)):
if (getattr(module, key) is base):
if (not (key in cdict)):
cdict[key] = cdict[base] = ((modname + '.html#') + key)
(funcs, fdict) = ([], {})
for (key, value) in inspect.getmembers(object, inspect.isroutine):
if ((all is not None) or inspect.isbuiltin(value) or (inspect.getmodule(value) is object)):
if visiblename(key, all, object):
funcs.append((key, value))
fdict[key] = ('#-' + key)
if inspect.isfunction(value):
fdict[value] = fdict[key]
data = []
for (key, value) in inspect.getmembers(object, isdata):
if visiblename(key, all, object):
data.append((key, value))
doc = self.markup(getdoc(object), self.preformat, fdict, cdict)
doc = (doc and ('<tt>%s</tt>' % doc))
result = (result + ('<p>%s</p>\n' % doc))
if hasattr(object, '__path__'):
modpkgs = []
for (importer, modname, ispkg) in pkgutil.iter_modules(object.__path__):
modpkgs.append((modname, name, ispkg, 0))
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
result = (result + self.bigsection('Package Contents', '#ffffff', '#aa55cc', contents))
elif modules:
contents = self.multicolumn(modules, (lambda key_value, s=self: s.modulelink(key_value[1])))
result = (result + self.bigsection('Modules', '#ffffff', '#aa55cc', contents))
if classes:
classlist = map((lambda key_value: key_value[1]), classes)
contents = [self.formattree(inspect.getclasstree(classlist, 1), name)]
for (key, value) in classes:
contents.append(self.document(value, key, name, fdict, cdict))
result = (result + self.bigsection('Classes', '#ffffff', '#ee77aa', join(contents)))
if funcs:
contents = []
for (key, value) in funcs:
contents.append(self.document(value, key, name, fdict, cdict))
result = (result + self.bigsection('Functions', '#ffffff', '#eeaa77', join(contents)))
if data:
contents = []
for (key, value) in data:
contents.append(self.document(value, key))
result = (result + self.bigsection('Data', '#ffffff', '#55aa55', join(contents, '<br>\n')))
if hasattr(object, '__author__'):
contents = self.markup(str(object.__author__), self.preformat)
result = (result + self.bigsection('Author', '#ffffff', '#7799ee', contents))
if hasattr(object, '__credits__'):
contents = self.markup(str(object.__credits__), self.preformat)
result = (result + self.bigsection('Credits', '#ffffff', '#7799ee', contents))
return result
|
'Produce HTML documentation for a class object.'
| def docclass(self, object, name=None, mod=None, funcs={}, classes={}, *ignored):
| realname = object.__name__
name = (name or realname)
bases = object.__bases__
contents = []
push = contents.append
class HorizontalRule:
def __init__(self):
self.needone = 0
def maybe(self):
if self.needone:
push('<hr>\n')
self.needone = 1
hr = HorizontalRule()
mro = deque(inspect.getmro(object))
if (len(mro) > 2):
hr.maybe()
push('<dl><dt>Method resolution order:</dt>\n')
for base in mro:
push(('<dd>%s</dd>\n' % self.classlink(base, object.__module__)))
push('</dl>\n')
def spill(msg, attrs, predicate):
(ok, attrs) = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for (name, kind, homecls, value) in ok:
push(self.document(getattr(object, name), name, mod, funcs, classes, mdict, object))
push('\n')
return attrs
def spilldescriptors(msg, attrs, predicate):
(ok, attrs) = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for (name, kind, homecls, value) in ok:
push(self._docdescriptor(name, value, mod))
return attrs
def spilldata(msg, attrs, predicate):
(ok, attrs) = _split_list(attrs, predicate)
if ok:
hr.maybe()
push(msg)
for (name, kind, homecls, value) in ok:
base = self.docother(getattr(object, name), name, mod)
if (hasattr(value, '__call__') or inspect.isdatadescriptor(value)):
doc = getattr(value, '__doc__', None)
else:
doc = None
if (doc is None):
push(('<dl><dt>%s</dl>\n' % base))
else:
doc = self.markup(getdoc(value), self.preformat, funcs, classes, mdict)
doc = ('<dd><tt>%s</tt>' % doc)
push(('<dl><dt>%s%s</dl>\n' % (base, doc)))
push('\n')
return attrs
attrs = filter((lambda data: visiblename(data[0], obj=object)), classify_class_attrs(object))
mdict = {}
for (key, kind, homecls, value) in attrs:
mdict[key] = anchor = ((('#' + name) + '-') + key)
value = getattr(object, key)
try:
mdict[value] = anchor
except TypeError:
pass
while attrs:
if mro:
thisclass = mro.popleft()
else:
thisclass = attrs[0][2]
(attrs, inherited) = _split_list(attrs, (lambda t: (t[2] is thisclass)))
if (thisclass is __builtin__.object):
attrs = inherited
continue
elif (thisclass is object):
tag = 'defined here'
else:
tag = ('inherited from %s' % self.classlink(thisclass, object.__module__))
tag += ':<br>\n'
try:
attrs.sort(key=(lambda t: t[0]))
except TypeError:
attrs.sort((lambda t1, t2: cmp(t1[0], t2[0])))
attrs = spill(('Methods %s' % tag), attrs, (lambda t: (t[1] == 'method')))
attrs = spill(('Class methods %s' % tag), attrs, (lambda t: (t[1] == 'class method')))
attrs = spill(('Static methods %s' % tag), attrs, (lambda t: (t[1] == 'static method')))
attrs = spilldescriptors(('Data descriptors %s' % tag), attrs, (lambda t: (t[1] == 'data descriptor')))
attrs = spilldata(('Data and other attributes %s' % tag), attrs, (lambda t: (t[1] == 'data')))
assert (attrs == [])
attrs = inherited
contents = ''.join(contents)
if (name == realname):
title = ('<a name="%s">class <strong>%s</strong></a>' % (name, realname))
else:
title = ('<strong>%s</strong> = <a name="%s">class %s</a>' % (name, name, realname))
if bases:
parents = []
for base in bases:
parents.append(self.classlink(base, object.__module__))
title = (title + ('(%s)' % join(parents, ', ')))
doc = self.markup(getdoc(object), self.preformat, funcs, classes, mdict)
doc = (doc and ('<tt>%s<br> </tt>' % doc))
return self.section(title, '#000000', '#ffc8d8', contents, 3, doc)
|
'Format an argument default value as text.'
| def formatvalue(self, object):
| return self.grey(('=' + self.repr(object)))
|
'Produce HTML documentation for a function or method object.'
| def docroutine(self, object, name=None, mod=None, funcs={}, classes={}, methods={}, cl=None):
| realname = object.__name__
name = (name or realname)
anchor = ((((cl and cl.__name__) or '') + '-') + name)
note = ''
skipdocs = 0
if inspect.ismethod(object):
imclass = object.im_class
if cl:
if (imclass is not cl):
note = (' from ' + self.classlink(imclass, mod))
elif (object.im_self is not None):
note = (' method of %s instance' % self.classlink(object.im_self.__class__, mod))
else:
note = (' unbound %s method' % self.classlink(imclass, mod))
object = object.im_func
if (name == realname):
title = ('<a name="%s"><strong>%s</strong></a>' % (anchor, realname))
else:
if (cl and (realname in cl.__dict__) and (cl.__dict__[realname] is object)):
reallink = ('<a href="#%s">%s</a>' % (((cl.__name__ + '-') + realname), realname))
skipdocs = 1
else:
reallink = realname
title = ('<a name="%s"><strong>%s</strong></a> = %s' % (anchor, name, reallink))
if inspect.isfunction(object):
(args, varargs, varkw, defaults) = inspect.getargspec(object)
argspec = inspect.formatargspec(args, varargs, varkw, defaults, formatvalue=self.formatvalue)
if (realname == '<lambda>'):
title = ('<strong>%s</strong> <em>lambda</em> ' % name)
argspec = argspec[1:(-1)]
else:
argspec = '(...)'
decl = ((title + argspec) + (note and self.grey(('<font face="helvetica, arial">%s</font>' % note))))
if skipdocs:
return ('<dl><dt>%s</dt></dl>\n' % decl)
else:
doc = self.markup(getdoc(object), self.preformat, funcs, classes, methods)
doc = (doc and ('<dd><tt>%s</tt></dd>' % doc))
return ('<dl><dt>%s</dt>%s</dl>\n' % (decl, doc))
|
'Produce html documentation for a property.'
| def docproperty(self, object, name=None, mod=None, cl=None):
| return self._docdescriptor(name, object, mod)
|
'Produce HTML documentation for a data object.'
| def docother(self, object, name=None, mod=None, *ignored):
| lhs = ((name and ('<strong>%s</strong> = ' % name)) or '')
return (lhs + self.repr(object))
|
'Produce html documentation for a data descriptor.'
| def docdata(self, object, name=None, mod=None, cl=None):
| return self._docdescriptor(name, object, mod)
|
'Generate an HTML index for a directory of modules.'
| def index(self, dir, shadowed=None):
| modpkgs = []
if (shadowed is None):
shadowed = {}
for (importer, name, ispkg) in pkgutil.iter_modules([dir]):
modpkgs.append((name, '', ispkg, (name in shadowed)))
shadowed[name] = 1
modpkgs.sort()
contents = self.multicolumn(modpkgs, self.modpkglink)
return self.bigsection(dir, '#ffffff', '#ee77aa', contents)
|
'Format a string in bold by overstriking.'
| def bold(self, text):
| return join(map((lambda ch: ((ch + '\x08') + ch)), text), '')
|
'Indent text by prepending a given prefix to each line.'
| def indent(self, text, prefix=' '):
| if (not text):
return ''
lines = split(text, '\n')
lines = map((lambda line, prefix=prefix: (prefix + line)), lines)
if lines:
lines[(-1)] = rstrip(lines[(-1)])
return join(lines, '\n')
|
'Format a section with a given heading.'
| def section(self, title, contents):
| return (((self.bold(title) + '\n') + rstrip(self.indent(contents))) + '\n\n')
|
'Render in text a class tree as returned by inspect.getclasstree().'
| def formattree(self, tree, modname, parent=None, prefix=''):
| result = ''
for entry in tree:
if (type(entry) is type(())):
(c, bases) = entry
result = ((result + prefix) + classname(c, modname))
if (bases and (bases != (parent,))):
parents = map((lambda c, m=modname: classname(c, m)), bases)
result = (result + ('(%s)' % join(parents, ', ')))
result = (result + '\n')
elif (type(entry) is type([])):
result = (result + self.formattree(entry, modname, c, (prefix + ' ')))
return result
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.