desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Asserts that the message in a raised exception matches a regexp.
Args:
expected_exception: Exception class expected to be raised.
expected_regexp: Regexp (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
args: Extra args.
kwargs: Extra kwargs.'
| def assertRaisesRegexp(self, expected_exception, expected_regexp, callable_obj=None, *args, **kwargs):
| if (expected_regexp is not None):
expected_regexp = re.compile(expected_regexp)
context = _AssertRaisesContext(expected_exception, self, expected_regexp)
if (callable_obj is None):
return context
with context:
callable_obj(*args, **kwargs)
|
'Fail the test unless the text matches the regular expression.'
| def assertRegexpMatches(self, text, expected_regexp, msg=None):
| if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if (not expected_regexp.search(text)):
msg = (msg or "Regexp didn't match")
msg = ('%s: %r not found in %r' % (msg, expected_regexp.pattern, text))
raise self.failureException(msg)
|
'Fail the test if the text matches the regular expression.'
| def assertNotRegexpMatches(self, text, unexpected_regexp, msg=None):
| if isinstance(unexpected_regexp, basestring):
unexpected_regexp = re.compile(unexpected_regexp)
match = unexpected_regexp.search(text)
if match:
msg = (msg or 'Regexp matched')
msg = ('%s: %r matches %r in %r' % (msg, text[match.start():match.end()], unexpected_regexp.pattern, text))
raise self.failureException(msg)
|
'Called when the given test is about to be run'
| def startTest(self, test):
| self.testsRun += 1
self._mirrorOutput = False
self._setupStdout()
|
'Called when the given test has been run'
| def stopTest(self, test):
| self._restoreStdout()
self._mirrorOutput = False
|
'Called when an error has occurred. \'err\' is a tuple of values as
returned by sys.exc_info().'
| @failfast
def addError(self, test, err):
| self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
|
'Called when an error has occurred. \'err\' is a tuple of values as
returned by sys.exc_info().'
| @failfast
def addFailure(self, test, err):
| self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
|
'Called when a test has completed successfully'
| def addSuccess(self, test):
| pass
|
'Called when a test is skipped.'
| def addSkip(self, test, reason):
| self.skipped.append((test, reason))
|
'Called when an expected failure/error occurred.'
| def addExpectedFailure(self, test, err):
| self.expectedFailures.append((test, self._exc_info_to_string(err, test)))
|
'Called when a test was expected to fail, but succeed.'
| @failfast
def addUnexpectedSuccess(self, test):
| self.unexpectedSuccesses.append(test)
|
'Tells whether or not this result was a success'
| def wasSuccessful(self):
| return (len(self.failures) == len(self.errors) == 0)
|
'Indicates that the tests should be aborted'
| def stop(self):
| self.shouldStop = True
|
'Converts a sys.exc_info()-style tuple of values into a string.'
| def _exc_info_to_string(self, err, test):
| (exctype, value, tb) = err
while (tb and self._is_relevant_tb_level(tb)):
tb = tb.tb_next
if (exctype is test.failureException):
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if (not output.endswith('\n')):
output += '\n'
msgLines.append((STDOUT_LINE % output))
if error:
if (not error.endswith('\n')):
error += '\n'
msgLines.append((STDERR_LINE % error))
return ''.join(msgLines)
|
'Run the given test case or test suite.'
| def run(self, test):
| result = self._makeResult()
registerResult(result)
result.failfast = self.failfast
result.buffer = self.buffer
startTime = time.time()
startTestRun = getattr(result, 'startTestRun', None)
if (startTestRun is not None):
startTestRun()
try:
test(result)
finally:
stopTestRun = getattr(result, 'stopTestRun', None)
if (stopTestRun is not None):
stopTestRun()
stopTime = time.time()
timeTaken = (stopTime - startTime)
result.printErrors()
if hasattr(result, 'separator2'):
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln(('Ran %d test%s in %.3fs' % (run, (((run != 1) and 's') or ''), timeTaken)))
self.stream.writeln()
expectedFails = unexpectedSuccesses = skipped = 0
try:
results = map(len, (result.expectedFailures, result.unexpectedSuccesses, result.skipped))
except AttributeError:
pass
else:
(expectedFails, unexpectedSuccesses, skipped) = results
infos = []
if (not result.wasSuccessful()):
self.stream.write('FAILED')
(failed, errored) = map(len, (result.failures, result.errors))
if failed:
infos.append(('failures=%d' % failed))
if errored:
infos.append(('errors=%d' % errored))
else:
self.stream.write('OK')
if skipped:
infos.append(('skipped=%d' % skipped))
if expectedFails:
infos.append(('expected failures=%d' % expectedFails))
if unexpectedSuccesses:
infos.append(('unexpected successes=%d' % unexpectedSuccesses))
if infos:
self.stream.writeln((' (%s)' % (', '.join(infos),)))
else:
self.stream.write('\n')
return result
|
'Return a suite of all tests cases contained in testCaseClass'
| def loadTestsFromTestCase(self, testCaseClass):
| if issubclass(testCaseClass, suite.TestSuite):
raise TypeError('Test cases should not be derived from TestSuite. Maybe you meant to derive from TestCase?')
testCaseNames = self.getTestCaseNames(testCaseClass)
if ((not testCaseNames) and hasattr(testCaseClass, 'runTest')):
testCaseNames = ['runTest']
loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
return loaded_suite
|
'Return a suite of all tests cases contained in the given module'
| def loadTestsFromModule(self, module, use_load_tests=True):
| tests = []
for name in dir(module):
obj = getattr(module, name)
if (isinstance(obj, type) and issubclass(obj, case.TestCase)):
tests.append(self.loadTestsFromTestCase(obj))
load_tests = getattr(module, 'load_tests', None)
tests = self.suiteClass(tests)
if (use_load_tests and (load_tests is not None)):
try:
return load_tests(self, tests, None)
except Exception as e:
return _make_failed_load_tests(module.__name__, e, self.suiteClass)
return tests
|
'Return a suite of all tests cases given a string specifier.
The name may resolve either to a module, a test case class, a
test method within a test case class, or a callable object which
returns a TestCase or TestSuite instance.
The method optionally resolves the names relative to a given module.'
| def loadTestsFromName(self, name, module=None):
| parts = name.split('.')
if (module is None):
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[(-1)]
if (not parts_copy):
raise
parts = parts[1:]
obj = module
for part in parts:
(parent, obj) = (obj, getattr(obj, part))
if isinstance(obj, types.ModuleType):
return self.loadTestsFromModule(obj)
elif (isinstance(obj, type) and issubclass(obj, case.TestCase)):
return self.loadTestsFromTestCase(obj)
elif (isinstance(obj, types.UnboundMethodType) and isinstance(parent, type) and issubclass(parent, case.TestCase)):
name = parts[(-1)]
inst = parent(name)
return self.suiteClass([inst])
elif isinstance(obj, suite.TestSuite):
return obj
elif hasattr(obj, '__call__'):
test = obj()
if isinstance(test, suite.TestSuite):
return test
elif isinstance(test, case.TestCase):
return self.suiteClass([test])
else:
raise TypeError(('calling %s returned %s, not a test' % (obj, test)))
else:
raise TypeError(("don't know how to make test from: %s" % obj))
|
'Return a suite of all tests cases found using the given sequence
of string specifiers. See \'loadTestsFromName()\'.'
| def loadTestsFromNames(self, names, module=None):
| suites = [self.loadTestsFromName(name, module) for name in names]
return self.suiteClass(suites)
|
'Return a sorted sequence of method names found within testCaseClass'
| def getTestCaseNames(self, testCaseClass):
| def isTestMethod(attrname, testCaseClass=testCaseClass, prefix=self.testMethodPrefix):
return (attrname.startswith(prefix) and hasattr(getattr(testCaseClass, attrname), '__call__'))
testFnNames = filter(isTestMethod, dir(testCaseClass))
if self.sortTestMethodsUsing:
testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing))
return testFnNames
|
'Find and return all test modules from the specified start
directory, recursing into subdirectories to find them. Only test files
that match the pattern will be loaded. (Using shell style pattern
matching.)
All test modules must be importable from the top level of the project.
If the start directory is not the top level directory then the top
level directory must be specified separately.
If a test package name (directory with \'__init__.py\') matches the
pattern then the package will be checked for a \'load_tests\' function. If
this exists then it will be called with loader, tests, pattern.
If load_tests exists then discovery does *not* recurse into the package,
load_tests is responsible for loading all tests in the package.
The pattern is deliberately not stored as a loader attribute so that
packages can continue discovery themselves. top_level_dir is stored so
load_tests does not need to pass this argument in to loader.discover().'
| def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
| set_implicit_top = False
if ((top_level_dir is None) and (self._top_level_dir is not None)):
top_level_dir = self._top_level_dir
elif (top_level_dir is None):
set_implicit_top = True
top_level_dir = start_dir
top_level_dir = os.path.abspath(top_level_dir)
if (not (top_level_dir in sys.path)):
sys.path.insert(0, top_level_dir)
self._top_level_dir = top_level_dir
is_not_importable = False
if os.path.isdir(os.path.abspath(start_dir)):
start_dir = os.path.abspath(start_dir)
if (start_dir != top_level_dir):
is_not_importable = (not os.path.isfile(os.path.join(start_dir, '__init__.py')))
else:
try:
__import__(start_dir)
except ImportError:
is_not_importable = True
else:
the_module = sys.modules[start_dir]
top_part = start_dir.split('.')[0]
start_dir = os.path.abspath(os.path.dirname(the_module.__file__))
if set_implicit_top:
self._top_level_dir = self._get_directory_containing_module(top_part)
sys.path.remove(top_level_dir)
if is_not_importable:
raise ImportError(('Start directory is not importable: %r' % start_dir))
tests = list(self._find_tests(start_dir, pattern))
return self.suiteClass(tests)
|
'Used by discovery. Yields test suites it loads.'
| def _find_tests(self, start_dir, pattern):
| paths = os.listdir(start_dir)
for path in paths:
full_path = os.path.join(start_dir, path)
if os.path.isfile(full_path):
if (not VALID_MODULE_NAME.match(path)):
continue
if (not self._match_path(path, full_path, pattern)):
continue
name = self._get_name_from_path(full_path)
try:
module = self._get_module_from_name(name)
except:
(yield _make_failed_import_test(name, self.suiteClass))
else:
mod_file = os.path.abspath(getattr(module, '__file__', full_path))
realpath = os.path.splitext(os.path.realpath(mod_file))[0]
fullpath_noext = os.path.splitext(os.path.realpath(full_path))[0]
if (realpath.lower() != fullpath_noext.lower()):
module_dir = os.path.dirname(realpath)
mod_name = os.path.splitext(os.path.basename(full_path))[0]
expected_dir = os.path.dirname(full_path)
msg = '%r module incorrectly imported from %r. Expected %r. Is this module globally installed?'
raise ImportError((msg % (mod_name, module_dir, expected_dir)))
(yield self.loadTestsFromModule(module))
elif os.path.isdir(full_path):
if (not os.path.isfile(os.path.join(full_path, '__init__.py'))):
continue
load_tests = None
tests = None
if fnmatch(path, pattern):
name = self._get_name_from_path(full_path)
package = self._get_module_from_name(name)
load_tests = getattr(package, 'load_tests', None)
tests = self.loadTestsFromModule(package, use_load_tests=False)
if (load_tests is None):
if (tests is not None):
(yield tests)
for test in self._find_tests(full_path, pattern):
(yield test)
else:
try:
(yield load_tests(self, tests, pattern))
except Exception as e:
(yield _make_failed_load_tests(package.__name__, e, self.suiteClass))
|
'Tests shortDescription() for a method with a docstring.'
| @unittest.skipIf((sys.flags.optimize >= 2), 'Docstrings are omitted with -O2 and above')
def testShortDescriptionWithOneLineDocstring(self):
| self.assertEqual(self.shortDescription(), 'Tests shortDescription() for a method with a docstring.')
|
'Tests shortDescription() for a method with a longer docstring.
This method ensures that only the first line of a docstring is
returned used in the short description, no matter how long the
whole thing is.'
| @unittest.skipIf((sys.flags.optimize >= 2), 'Docstrings are omitted with -O2 and above')
def testShortDescriptionWithMultiLineDocstring(self):
| self.assertEqual(self.shortDescription(), 'Tests shortDescription() for a method with a longer docstring.')
|
'Test undocumented method name synonyms.
Please do not use these methods names in your own code.
This test confirms their continued existence and functionality
in order to avoid breaking existing code.'
| def testSynonymAssertMethodNames(self):
| self.assertNotEquals(3, 5)
self.assertEquals(3, 3)
self.assertAlmostEquals(2.0, 2.0)
self.assertNotAlmostEquals(3.0, 5.0)
self.assert_(True)
|
'Test fail* methods pending deprecation, they will warn in 3.2.
Do not use these methods. They will go away in 3.3.'
| def testPendingDeprecationMethodNames(self):
| with test_support.check_warnings():
self.failIfEqual(3, 5)
self.failUnlessEqual(3, 3)
self.failUnlessAlmostEqual(2.0, 2.0)
self.failIfAlmostEqual(3.0, 5.0)
self.failUnless(True)
self.failUnlessRaises(TypeError, (lambda _: (3.14 + u'spam')))
self.failIf(False)
|
'Tests getDescription() for a method with a docstring.'
| @unittest.skipIf((sys.flags.optimize >= 2), 'Docstrings are omitted with -O2 and above')
def testGetDescriptionWithOneLineDocstring(self):
| result = unittest.TextTestResult(None, True, 1)
self.assertEqual(result.getDescription(self), (('testGetDescriptionWithOneLineDocstring (' + __name__) + '.Test_TestResult)\nTests getDescription() for a method with a docstring.'))
|
'Tests getDescription() for a method with a longer docstring.
The second line of the docstring.'
| @unittest.skipIf((sys.flags.optimize >= 2), 'Docstrings are omitted with -O2 and above')
def testGetDescriptionWithMultiLineDocstring(self):
| result = unittest.TextTestResult(None, True, 1)
self.assertEqual(result.getDescription(self), (('testGetDescriptionWithMultiLineDocstring (' + __name__) + '.Test_TestResult)\nTests getDescription() for a method with a longer docstring.'))
|
'Run the tests without collecting errors in a TestResult'
| def debug(self):
| for test in self:
test.debug()
|
'Run the tests without collecting errors in a TestResult'
| def debug(self):
| debug = _DebugResult()
self.run(debug, True)
|
'A file object is its own iterator, for example iter(f) returns f
(unless f is closed). When a file is used as an iterator, typically
in a for loop (for example, for line in f: print line), the next()
method is called repeatedly. This method returns the next input line,
or raises StopIteration when EOF is hit.'
| def next(self):
| _complain_ifclosed(self.closed)
r = self.readline()
if (not r):
raise StopIteration
return r
|
'Free the memory buffer.'
| def close(self):
| if (not self.closed):
self.closed = True
del self.buf, self.pos
|
'Returns False because StringIO objects are not connected to a
tty-like device.'
| def isatty(self):
| _complain_ifclosed(self.closed)
return False
|
'Set the file\'s current position.
The mode argument is optional and defaults to 0 (absolute file
positioning); other values are 1 (seek relative to the current
position) and 2 (seek relative to the file\'s end).
There is no return value.'
| def seek(self, pos, mode=0):
| _complain_ifclosed(self.closed)
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
if (mode == 1):
pos += self.pos
elif (mode == 2):
pos += self.len
self.pos = max(0, pos)
|
'Return the file\'s current position.'
| def tell(self):
| _complain_ifclosed(self.closed)
return self.pos
|
'Read at most size bytes from the file
(less if the read hits EOF before obtaining size bytes).
If the size argument is negative or omitted, read all data until EOF
is reached. The bytes are returned as a string object. An empty
string is returned when EOF is encountered immediately.'
| def read(self, n=(-1)):
| _complain_ifclosed(self.closed)
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
if ((n is None) or (n < 0)):
newpos = self.len
else:
newpos = min((self.pos + n), self.len)
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
|
'Read one entire line from the file.
A trailing newline character is kept in the string (but may be absent
when a file ends with an incomplete line). If the size argument is
present and non-negative, it is a maximum byte count (including the
trailing newline) and an incomplete line may be returned.
An empty string is returned only when EOF is encountered immediately.
Note: Unlike stdio\'s fgets(), the returned string contains null
characters (\'\0\') if they occurred in the input.'
| def readline(self, length=None):
| _complain_ifclosed(self.closed)
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
i = self.buf.find('\n', self.pos)
if (i < 0):
newpos = self.len
else:
newpos = (i + 1)
if ((length is not None) and (length >= 0)):
if ((self.pos + length) < newpos):
newpos = (self.pos + length)
r = self.buf[self.pos:newpos]
self.pos = newpos
return r
|
'Read until EOF using readline() and return a list containing the
lines thus read.
If the optional sizehint argument is present, instead of reading up
to EOF, whole lines totalling approximately sizehint bytes (or more
to accommodate a final whole line).'
| def readlines(self, sizehint=0):
| total = 0
lines = []
line = self.readline()
while line:
lines.append(line)
total += len(line)
if (0 < sizehint <= total):
break
line = self.readline()
return lines
|
'Truncate the file\'s size.
If the optional size argument is present, the file is truncated to
(at most) that size. The size defaults to the current position.
The current file position is not changed unless the position
is beyond the new file size.
If the specified size exceeds the file\'s current size, the
file remains unchanged.'
| def truncate(self, size=None):
| _complain_ifclosed(self.closed)
if (size is None):
size = self.pos
elif (size < 0):
raise IOError(EINVAL, 'Negative size not allowed')
elif (size < self.pos):
self.pos = size
self.buf = self.getvalue()[:size]
self.len = size
|
'Write a string to the file.
There is no return value.'
| def write(self, s):
| _complain_ifclosed(self.closed)
if (not s):
return
if (not isinstance(s, basestring)):
s = str(s)
spos = self.pos
slen = self.len
if (spos == slen):
self.buflist.append(s)
self.len = self.pos = (spos + len(s))
return
if (spos > slen):
self.buflist.append(('\x00' * (spos - slen)))
slen = spos
newpos = (spos + len(s))
if (spos < slen):
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = [self.buf[:spos], s, self.buf[newpos:]]
self.buf = ''
if (newpos > slen):
slen = newpos
else:
self.buflist.append(s)
slen = newpos
self.len = slen
self.pos = newpos
|
'Write a sequence of strings to the file. The sequence can be any
iterable object producing strings, typically a list of strings. There
is no return value.
(The name is intended to match readlines(); writelines() does not add
line separators.)'
| def writelines(self, iterable):
| write = self.write
for line in iterable:
write(line)
|
'Flush the internal buffer'
| def flush(self):
| _complain_ifclosed(self.closed)
|
'Retrieve the entire contents of the "file" at any time before
the StringIO object\'s close() method is called.
The StringIO object can accept either Unicode or 8-bit strings,
but mixing the two may take some care. If both are used, 8-bit
strings that cannot be interpreted as 7-bit ASCII (that use the
8th bit) will cause a UnicodeError to be raised when getvalue()
is called.'
| def getvalue(self):
| _complain_ifclosed(self.closed)
if self.buflist:
self.buf += ''.join(self.buflist)
self.buflist = []
return self.buf
|
'real_value, coded_value = value_decode(STRING)
Called prior to setting a cookie\'s value from the network
representation. The VALUE is the value read from HTTP
header.
Override this function to modify the behavior of cookies.'
| def value_decode(self, val):
| return (val, val)
|
'real_value, coded_value = value_encode(VALUE)
Called prior to setting a cookie\'s value from the dictionary
representation. The VALUE is the value being assigned.
Override this function to modify the behavior of cookies.'
| def value_encode(self, val):
| strval = str(val)
return (strval, strval)
|
'Private method for setting a cookie\'s value'
| def __set(self, key, real_value, coded_value):
| M = self.get(key, Morsel())
M.set(key, real_value, coded_value)
dict.__setitem__(self, key, M)
|
'Dictionary style assignment.'
| def __setitem__(self, key, value):
| if isinstance(value, Morsel):
dict.__setitem__(self, key, value)
else:
(rval, cval) = self.value_encode(value)
self.__set(key, rval, cval)
|
'Return a string suitable for HTTP.'
| def output(self, attrs=None, header='Set-Cookie:', sep='\r\n'):
| result = []
items = self.items()
items.sort()
for (K, V) in items:
result.append(V.output(attrs, header))
return sep.join(result)
|
'Return a string suitable for JavaScript.'
| def js_output(self, attrs=None):
| result = []
items = self.items()
items.sort()
for (K, V) in items:
result.append(V.js_output(attrs))
return _nulljoin(result)
|
'Load cookies from a string (presumably HTTP_COOKIE) or
from a dictionary. Loading cookies from a dictionary \'d\'
is equivalent to calling:
map(Cookie.__setitem__, d.keys(), d.values())'
| def load(self, rawdata):
| if (type(rawdata) == type('')):
self.__ParseString(rawdata)
else:
for (k, v) in rawdata.items():
self[k] = v
return
|
'Acquire a lock, blocking or non-blocking.
When invoked without arguments: if this thread already owns the lock,
increment the recursion level by one, and return immediately. Otherwise,
if another thread owns the lock, block until the lock is unlocked. Once
the lock is unlocked (not owned by any thread), then grab ownership, set
the recursion level to one, and return. If more than one thread is
blocked waiting until the lock is unlocked, only one at a time will be
able to grab ownership of the lock. There is no return value in this
case.
When invoked with the blocking argument set to true, do the same thing
as when called without arguments, and return true.
When invoked with the blocking argument set to false, do not block. If a
call without an argument would block, return false immediately;
otherwise, do the same thing as when called without arguments, and
return true.'
| def acquire(self, blocking=1):
| me = _get_ident()
if (self.__owner == me):
self.__count = (self.__count + 1)
if __debug__:
self._note('%s.acquire(%s): recursive success', self, blocking)
return 1
rc = self.__block.acquire(blocking)
if rc:
self.__owner = me
self.__count = 1
if __debug__:
self._note('%s.acquire(%s): initial success', self, blocking)
elif __debug__:
self._note('%s.acquire(%s): failure', self, blocking)
return rc
|
'Release a lock, decrementing the recursion level.
If after the decrement it is zero, reset the lock to unlocked (not owned
by any thread), and if any other threads are blocked waiting for the
lock to become unlocked, allow exactly one of them to proceed. If after
the decrement the recursion level is still nonzero, the lock remains
locked and owned by the calling thread.
Only call this method when the calling thread owns the lock. A
RuntimeError is raised if this method is called when the lock is
unlocked.
There is no return value.'
| def release(self):
| if (self.__owner != _get_ident()):
raise RuntimeError('cannot release un-acquired lock')
self.__count = count = (self.__count - 1)
if (not count):
self.__owner = None
self.__block.release()
if __debug__:
self._note('%s.release(): final release', self)
elif __debug__:
self._note('%s.release(): non-final release', self)
|
'Wait until notified or until a timeout occurs.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method releases the underlying lock, and then blocks until it is
awakened by a notify() or notifyAll() call for the same condition
variable in another thread, or until the optional timeout occurs. Once
awakened or timed out, it re-acquires the lock and returns.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
When the underlying lock is an RLock, it is not released using its
release() method, since this may not actually unlock the lock when it
was acquired multiple times recursively. Instead, an internal interface
of the RLock class is used, which really unlocks it even when it has
been recursively acquired several times. Another internal interface is
then used to restore the recursion level when the lock is reacquired.'
| def wait(self, timeout=None):
| if (not self._is_owned()):
raise RuntimeError('cannot wait on un-acquired lock')
waiter = _allocate_lock()
waiter.acquire()
self.__waiters.append(waiter)
saved_state = self._release_save()
try:
if (timeout is None):
waiter.acquire()
if __debug__:
self._note('%s.wait(): got it', self)
else:
endtime = (_time() + timeout)
delay = 0.0005
while True:
gotit = waiter.acquire(0)
if gotit:
break
remaining = (endtime - _time())
if (remaining <= 0):
break
delay = min((delay * 2), remaining, 0.05)
_sleep(delay)
if (not gotit):
if __debug__:
self._note('%s.wait(%s): timed out', self, timeout)
try:
self.__waiters.remove(waiter)
except ValueError:
pass
elif __debug__:
self._note('%s.wait(%s): got it', self, timeout)
finally:
self._acquire_restore(saved_state)
|
'Wake up one or more threads waiting on this condition, if any.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method wakes up at most n of the threads waiting for the condition
variable; it is a no-op if no threads are waiting.'
| def notify(self, n=1):
| if (not self._is_owned()):
raise RuntimeError('cannot notify on un-acquired lock')
__waiters = self.__waiters
waiters = __waiters[:n]
if (not waiters):
if __debug__:
self._note('%s.notify(): no waiters', self)
return
self._note('%s.notify(): notifying %d waiter%s', self, n, (((n != 1) and 's') or ''))
for waiter in waiters:
waiter.release()
try:
__waiters.remove(waiter)
except ValueError:
pass
|
'Wake up all threads waiting on this condition.
If the calling thread has not acquired the lock when this method
is called, a RuntimeError is raised.'
| def notifyAll(self):
| self.notify(len(self.__waiters))
|
'Acquire a semaphore, decrementing the internal counter by one.
When invoked without arguments: if the internal counter is larger than
zero on entry, decrement it by one and return immediately. If it is zero
on entry, block, waiting until some other thread has called release() to
make it larger than zero. This is done with proper interlocking so that
if multiple acquire() calls are blocked, release() will wake exactly one
of them up. The implementation may pick one at random, so the order in
which blocked threads are awakened should not be relied on. There is no
return value in this case.
When invoked with blocking set to true, do the same thing as when called
without arguments, and return true.
When invoked with blocking set to false, do not block. If a call without
an argument would block, return false immediately; otherwise, do the
same thing as when called without arguments, and return true.'
| def acquire(self, blocking=1):
| rc = False
with self.__cond:
while (self.__value == 0):
if (not blocking):
break
if __debug__:
self._note('%s.acquire(%s): blocked waiting, value=%s', self, blocking, self.__value)
self.__cond.wait()
else:
self.__value = (self.__value - 1)
if __debug__:
self._note('%s.acquire: success, value=%s', self, self.__value)
rc = True
return rc
|
'Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.'
| def release(self):
| with self.__cond:
self.__value = (self.__value + 1)
if __debug__:
self._note('%s.release: success, value=%s', self, self.__value)
self.__cond.notify()
|
'Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
If the number of releases exceeds the number of acquires,
raise a ValueError.'
| def release(self):
| with self._Semaphore__cond:
if (self._Semaphore__value >= self._initial_value):
raise ValueError('Semaphore released too many times')
self._Semaphore__value += 1
self._Semaphore__cond.notify()
|
'Return true if and only if the internal flag is true.'
| def isSet(self):
| return self.__flag
|
'Set the internal flag to true.
All threads waiting for the flag to become true are awakened. Threads
that call wait() once the flag is true will not block at all.'
| def set(self):
| with self.__cond:
self.__flag = True
self.__cond.notify_all()
|
'Reset the internal flag to false.
Subsequently, threads calling wait() will block until set() is called to
set the internal flag to true again.'
| def clear(self):
| with self.__cond:
self.__flag = False
|
'Block until the internal flag is true.
If the internal flag is true on entry, return immediately. Otherwise,
block until another thread calls set() to set the flag to true, or until
the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
This method returns the internal flag on exit, so it will always return
True except if a timeout is given and the operation times out.'
| def wait(self, timeout=None):
| with self.__cond:
if (not self.__flag):
self.__cond.wait(timeout)
return self.__flag
|
'This constructor should always be called with keyword arguments. Arguments are:
*group* should be None; reserved for future extension when a ThreadGroup
class is implemented.
*target* is the callable object to be invoked by the run()
method. Defaults to None, meaning nothing is called.
*name* is the thread name. By default, a unique name is constructed of
the form "Thread-N" where N is a small decimal number.
*args* is the argument tuple for the target invocation. Defaults to ().
*kwargs* is a dictionary of keyword arguments for the target
invocation. Defaults to {}.
If a subclass overrides the constructor, it must make sure to invoke
the base class constructor (Thread.__init__()) before doing anything
else to the thread.'
| def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, verbose=None):
| assert (group is None), 'group argument must be None for now'
_Verbose.__init__(self, verbose)
if (kwargs is None):
kwargs = {}
self.__target = target
self.__name = str((name or _newname()))
self.__args = args
self.__kwargs = kwargs
self.__daemonic = self._set_daemon()
self.__ident = None
self.__started = Event()
self.__stopped = False
self.__block = Condition(Lock())
self.__initialized = True
self.__stderr = _sys.stderr
|
'Start the thread\'s activity.
It must be called at most once per thread object. It arranges for the
object\'s run() method to be invoked in a separate thread of control.
This method will raise a RuntimeError if called more than once on the
same thread object.'
| def start(self):
| if (not self.__initialized):
raise RuntimeError('thread.__init__() not called')
if self.__started.is_set():
raise RuntimeError('threads can only be started once')
if __debug__:
self._note('%s.start(): starting thread', self)
with _active_limbo_lock:
_limbo[self] = self
try:
_start_new_thread(self.__bootstrap, ())
except Exception:
with _active_limbo_lock:
del _limbo[self]
raise
self.__started.wait()
|
'Method representing the thread\'s activity.
You may override this method in a subclass. The standard run() method
invokes the callable object passed to the object\'s constructor as the
target argument, if any, with sequential and keyword arguments taken
from the args and kwargs arguments, respectively.'
| def run(self):
| try:
if self.__target:
self.__target(*self.__args, **self.__kwargs)
finally:
del self.__target, self.__args, self.__kwargs
|
'Remove current thread from the dict of currently running threads.'
| def __delete(self):
| try:
with _active_limbo_lock:
del _active[_get_ident()]
except KeyError:
if ('dummy_threading' not in _sys.modules):
raise
|
'Wait until the thread terminates.
This blocks the calling thread until the thread whose join() method is
called terminates -- either normally or through an unhandled exception
or until the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof). As join() always returns None, you must call
isAlive() after join() to decide whether a timeout happened -- if the
thread is still alive, the join() call timed out.
When the timeout argument is not present or None, the operation will
block until the thread terminates.
A thread can be join()ed many times.
join() raises a RuntimeError if an attempt is made to join the current
thread as that would cause a deadlock. It is also an error to join() a
thread before it has been started and attempts to do so raises the same
exception.'
| def join(self, timeout=None):
| if (not self.__initialized):
raise RuntimeError('Thread.__init__() not called')
if (not self.__started.is_set()):
raise RuntimeError('cannot join thread before it is started')
if (self is current_thread()):
raise RuntimeError('cannot join current thread')
if __debug__:
if (not self.__stopped):
self._note('%s.join(): waiting until thread stops', self)
self.__block.acquire()
try:
if (timeout is None):
while (not self.__stopped):
self.__block.wait()
if __debug__:
self._note('%s.join(): thread stopped', self)
else:
deadline = (_time() + timeout)
while (not self.__stopped):
delay = (deadline - _time())
if (delay <= 0):
if __debug__:
self._note('%s.join(): timed out', self)
break
self.__block.wait(delay)
else:
if __debug__:
self._note('%s.join(): thread stopped', self)
finally:
self.__block.release()
|
'A string used for identification purposes only.
It has no semantics. Multiple threads may be given the same name. The
initial name is set by the constructor.'
| @property
def name(self):
| assert self.__initialized, 'Thread.__init__() not called'
return self.__name
|
'Thread identifier of this thread or None if it has not been started.
This is a nonzero integer. See the thread.get_ident() function. Thread
identifiers may be recycled when a thread exits and another thread is
created. The identifier is available even after the thread has exited.'
| @property
def ident(self):
| assert self.__initialized, 'Thread.__init__() not called'
return self.__ident
|
'Return whether the thread is alive.
This method returns True just before the run() method starts until just
after the run() method terminates. The module function enumerate()
returns a list of all alive threads.'
| def isAlive(self):
| assert self.__initialized, 'Thread.__init__() not called'
return (self.__started.is_set() and (not self.__stopped))
|
'A boolean value indicating whether this thread is a daemon thread (True) or not (False).
This must be set before start() is called, otherwise RuntimeError is
raised. Its initial value is inherited from the creating thread; the
main thread is not a daemon thread and therefore all threads created in
the main thread default to daemon = False.
The entire Python program exits when no alive non-daemon threads are
left.'
| @property
def daemon(self):
| assert self.__initialized, 'Thread.__init__() not called'
return self.__daemonic
|
'Stop the timer if it hasn\'t finished yet'
| def cancel(self):
| self.finished.set()
|
'_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\tbar\n\nbaz"
becomes " foo bar baz".'
| def _munge_whitespace(self, text):
| if self.expand_tabs:
text = text.expandtabs()
if self.replace_whitespace:
if isinstance(text, str):
text = text.translate(self.whitespace_trans)
elif isinstance(text, _unicode):
text = text.translate(self.unicode_whitespace_trans)
return text
|
'_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see _wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
\'Look,\', \' \', \'goof-\', \'ball\', \' \', \'--\', \' \',
\'use\', \' \', \'the\', \' \', \'-b\', \' \', \'option!\'
if break_on_hyphens is True, or in:
\'Look,\', \' \', \'goof-ball\', \' \', \'--\', \' \',
\'use\', \' \', \'the\', \' \', \'-b\', \' \', option!\'
otherwise.'
| def _split(self, text):
| if isinstance(text, _unicode):
if self.break_on_hyphens:
pat = self.wordsep_re_uni
else:
pat = self.wordsep_simple_re_uni
elif self.break_on_hyphens:
pat = self.wordsep_re
else:
pat = self.wordsep_simple_re
chunks = pat.split(text)
chunks = filter(None, chunks)
return chunks
|
'_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in \'chunks\'. Eg. when the
original text contains "... foo.\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.'
| def _fix_sentence_endings(self, chunks):
| i = 0
patsearch = self.sentence_end_re.search
while (i < (len(chunks) - 1)):
if ((chunks[(i + 1)] == ' ') and patsearch(chunks[i])):
chunks[(i + 1)] = ' '
i += 2
else:
i += 1
|
'_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.'
| def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
| if (width < 1):
space_left = 1
else:
space_left = (width - cur_len)
if self.break_long_words:
cur_line.append(reversed_chunks[(-1)][:space_left])
reversed_chunks[(-1)] = reversed_chunks[(-1)][space_left:]
elif (not cur_line):
cur_line.append(reversed_chunks.pop())
|
'_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length \'self.width\' or less. (If \'break_long_words\' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo \'break_long_words\'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.'
| def _wrap_chunks(self, chunks):
| lines = []
if (self.width <= 0):
raise ValueError(('invalid width %r (must be > 0)' % self.width))
chunks.reverse()
while chunks:
cur_line = []
cur_len = 0
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
width = (self.width - len(indent))
if (self.drop_whitespace and (chunks[(-1)].strip() == '') and lines):
del chunks[(-1)]
while chunks:
l = len(chunks[(-1)])
if ((cur_len + l) <= width):
cur_line.append(chunks.pop())
cur_len += l
else:
break
if (chunks and (len(chunks[(-1)]) > width)):
self._handle_long_word(chunks, cur_line, cur_len, width)
if (self.drop_whitespace and cur_line and (cur_line[(-1)].strip() == '')):
del cur_line[(-1)]
if cur_line:
lines.append((indent + ''.join(cur_line)))
return lines
|
'wrap(text : string) -> [string]
Reformat the single paragraph in \'text\' so it fits in lines of
no more than \'self.width\' columns, and return a list of wrapped
lines. Tabs in \'text\' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.'
| def wrap(self, text):
| text = self._munge_whitespace(text)
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
|
'fill(text : string) -> string
Reformat the single paragraph in \'text\' to fit in lines of no
more than \'self.width\' columns, and return a new string
containing the entire wrapped paragraph.'
| def fill(self, text):
| return '\n'.join(self.wrap(text))
|
'Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names.'
| def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
| escape = (escape or self.escape)
results = []
here = 0
pattern = re.compile('\\b((http|ftp)://\\S+[\\w/]|RFC[- ]?(\\d+)|PEP[- ]?(\\d+)|(self\\.)?((?:\\w|\\.)+))\\b')
while 1:
match = pattern.search(text, here)
if (not match):
break
(start, end) = match.span()
results.append(escape(text[here:start]))
(all, scheme, rfc, pep, selfdot, name) = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append(('<a href="%s">%s</a>' % (url, url)))
elif rfc:
url = ('http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc))
results.append(('<a href="%s">%s</a>' % (url, escape(all))))
elif pep:
url = ('http://www.python.org/dev/peps/pep-%04d/' % int(pep))
results.append(('<a href="%s">%s</a>' % (url, escape(all))))
elif (text[end:(end + 1)] == '('):
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append(('self.<strong>%s</strong>' % name))
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
|
'Produce HTML documentation for a function or method object.'
| def docroutine(self, object, name, mod=None, funcs={}, classes={}, methods={}, cl=None):
| anchor = ((((cl and cl.__name__) or '') + '-') + name)
note = ''
title = ('<a name="%s"><strong>%s</strong></a>' % (self.escape(anchor), self.escape(name)))
if inspect.ismethod(object):
(args, varargs, varkw, defaults) = inspect.getargspec(object.im_func)
argspec = inspect.formatargspec(args[1:], varargs, varkw, defaults, formatvalue=self.formatvalue)
elif inspect.isfunction(object):
(args, varargs, varkw, defaults) = inspect.getargspec(object)
argspec = inspect.formatargspec(args, varargs, varkw, defaults, formatvalue=self.formatvalue)
else:
argspec = '(...)'
if isinstance(object, tuple):
argspec = (object[0] or argspec)
docstring = (object[1] or '')
else:
docstring = pydoc.getdoc(object)
decl = ((title + argspec) + (note and self.grey(('<font face="helvetica, arial">%s</font>' % note))))
doc = self.markup(docstring, self.preformat, funcs, classes, methods)
doc = (doc and ('<dd><tt>%s</tt></dd>' % doc))
return ('<dl><dt>%s</dt>%s</dl>\n' % (decl, doc))
|
'Produce HTML documentation for an XML-RPC server.'
| def docserver(self, server_name, package_documentation, methods):
| fdict = {}
for (key, value) in methods.items():
fdict[key] = ('#-' + key)
fdict[value] = fdict[key]
server_name = self.escape(server_name)
head = ('<big><big><strong>%s</strong></big></big>' % server_name)
result = self.heading(head, '#ffffff', '#7799ee')
doc = self.markup(package_documentation, self.preformat, fdict)
doc = (doc and ('<tt>%s</tt>' % doc))
result = (result + ('<p>%s</p>\n' % doc))
contents = []
method_items = sorted(methods.items())
for (key, value) in method_items:
contents.append(self.docroutine(value, key, funcs=fdict))
result = (result + self.bigsection('Methods', '#ffffff', '#eeaa77', pydoc.join(contents)))
return result
|
'Set the HTML title of the generated server documentation'
| def set_server_title(self, server_title):
| self.server_title = server_title
|
'Set the name of the generated HTML server documentation'
| def set_server_name(self, server_name):
| self.server_name = server_name
|
'Set the documentation string for the entire server.'
| def set_server_documentation(self, server_documentation):
| self.server_documentation = server_documentation
|
'generate_html_documentation() => html documentation for the server
Generates HTML documentation for the server using introspection for
installed functions and instances that do not implement the
_dispatch method. Alternatively, instances can choose to implement
the _get_method_argstring(method_name) method to provide the
argument string used in the documentation and the
_methodHelp(method_name) method to provide the help text used
in the documentation.'
| def generate_html_documentation(self):
| methods = {}
for method_name in self.system_listMethods():
if (method_name in self.funcs):
method = self.funcs[method_name]
elif (self.instance is not None):
method_info = [None, None]
if hasattr(self.instance, '_get_method_argstring'):
method_info[0] = self.instance._get_method_argstring(method_name)
if hasattr(self.instance, '_methodHelp'):
method_info[1] = self.instance._methodHelp(method_name)
method_info = tuple(method_info)
if (method_info != (None, None)):
method = method_info
elif (not hasattr(self.instance, '_dispatch')):
try:
method = resolve_dotted_attribute(self.instance, method_name)
except AttributeError:
method = method_info
else:
method = method_info
else:
assert 0, 'Could not find method in self.functions and no instance installed'
methods[method_name] = method
documenter = ServerHTMLDoc()
documentation = documenter.docserver(self.server_name, self.server_documentation, methods)
return documenter.page(self.server_title, documentation)
|
'Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.'
| def do_GET(self):
| if (not self.is_rpc_path_valid()):
self.report_404()
return
response = self.server.generate_html_documentation()
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.send_header('Content-length', str(len(response)))
self.end_headers()
self.wfile.write(response)
|
'Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.'
| def handle_get(self):
| response = self.generate_html_documentation()
print 'Content-Type: text/html'
print ('Content-Length: %d' % len(response))
print
sys.stdout.write(response)
|
'Read at least wtd bytes (or until EOF)'
| def read(self, totalwtd):
| decdata = ''
wtd = totalwtd
while (wtd > 0):
if self.eof:
return decdata
wtd = (((wtd + 2) // 3) * 4)
data = self.ifp.read(wtd)
while 1:
try:
(decdatacur, self.eof) = binascii.a2b_hqx(data)
break
except binascii.Incomplete:
pass
newdata = self.ifp.read(1)
if (not newdata):
raise Error, 'Premature EOF on binhex file'
data = (data + newdata)
decdata = (decdata + decdatacur)
wtd = (totalwtd - len(decdata))
if ((not decdata) and (not self.eof)):
raise Error, 'Premature EOF on binhex file'
return decdata
|
'Initialize and reset this instance.'
| def __init__(self, verbose=0):
| self.verbose = verbose
self.reset()
|
'Reset this instance. Loses all unprocessed data.'
| def reset(self):
| self.__starttag_text = None
self.rawdata = ''
self.stack = []
self.lasttag = '???'
self.nomoretags = 0
self.literal = 0
markupbase.ParserBase.reset(self)
|
'Enter literal mode (CDATA) till EOF.
Intended for derived classes only.'
| def setnomoretags(self):
| self.nomoretags = self.literal = 1
|
'Enter literal mode (CDATA).
Intended for derived classes only.'
| def setliteral(self, *args):
| self.literal = 1
|
'Feed some data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include \'
\'). (This just saves the text,
all the processing is done by goahead().)'
| def feed(self, data):
| self.rawdata = (self.rawdata + data)
self.goahead(0)
|
'Handle the remaining data.'
| def close(self):
| self.goahead(1)
|
'Convert character reference, may be overridden.'
| def convert_charref(self, name):
| try:
n = int(name)
except ValueError:
return
if (not (0 <= n <= 127)):
return
return self.convert_codepoint(n)
|
'Handle character reference, no need to override.'
| def handle_charref(self, name):
| replacement = self.convert_charref(name)
if (replacement is None):
self.unknown_charref(name)
else:
self.handle_data(replacement)
|
'Convert entity references.
As an alternative to overriding this method; one can tailor the
results by setting up the self.entitydefs mapping appropriately.'
| def convert_entityref(self, name):
| table = self.entitydefs
if (name in table):
return table[name]
else:
return
|
'Handle entity references, no need to override.'
| def handle_entityref(self, name):
| replacement = self.convert_entityref(name)
if (replacement is None):
self.unknown_entityref(name)
else:
self.handle_data(replacement)
|
'Create the generator for message flattening.
outfp is the output file-like object for writing the message to. It
must have a write() method.
Optional mangle_from_ is a flag that, when True (the default), escapes
From_ lines in the body of the message by putting a `>\' in front of
them.
Optional maxheaderlen specifies the longest length for a non-continued
header. When a header line is longer (in characters, with tabs
expanded to 8 spaces) than maxheaderlen, the header will split as
defined in the Header class. Set maxheaderlen to zero to disable
header wrapping. The default is 78, as recommended (but not required)
by RFC 2822.'
| def __init__(self, outfp, mangle_from_=True, maxheaderlen=78):
| self._fp = outfp
self._mangle_from_ = mangle_from_
self._maxheaderlen = maxheaderlen
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.