desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'A tuple-specific equality assertion.
Args:
tuple1: The first tuple to compare.
tuple2: The second tuple to compare.
msg: Optional message to use on failure instead of a list of
differences.'
| def assertTupleEqual(self, tuple1, tuple2, msg=None):
| self.assertSequenceEqual(tuple1, tuple2, msg, seq_type=tuple)
|
'A set-specific equality assertion.
Args:
set1: The first set to compare.
set2: The second set to compare.
msg: Optional message to use on failure instead of a list of
differences.
assertSetEqual uses ducktyping to support
different types of sets, and is optimized for sets specifically
(parameters must support a difference method).'
| def assertSetEqual(self, set1, set2, msg=None):
| try:
difference1 = set1.difference(set2)
except TypeError as e:
self.fail(('invalid type when attempting set difference: %s' % e))
except AttributeError as e:
self.fail(('first argument does not support set difference: %s' % e))
try:
difference2 = set2.difference(set1)
except TypeError as e:
self.fail(('invalid type when attempting set difference: %s' % e))
except AttributeError as e:
self.fail(('second argument does not support set difference: %s' % e))
if (not (difference1 or difference2)):
return
lines = []
if difference1:
lines.append('Items in the first set but not the second:')
for item in difference1:
lines.append(repr(item))
if difference2:
lines.append('Items in the second set but not the first:')
for item in difference2:
lines.append(repr(item))
standardMsg = '\n'.join(lines)
self.fail(self._formatMessage(msg, standardMsg))
|
'Just like self.assertTrue(a in b), but with a nicer default message.'
| def assertIn(self, member, container, msg=None):
| if (member not in container):
standardMsg = ('%s not found in %s' % (safe_repr(member), safe_repr(container)))
self.fail(self._formatMessage(msg, standardMsg))
|
'Just like self.assertTrue(a not in b), but with a nicer default message.'
| def assertNotIn(self, member, container, msg=None):
| if (member in container):
standardMsg = ('%s unexpectedly found in %s' % (safe_repr(member), safe_repr(container)))
self.fail(self._formatMessage(msg, standardMsg))
|
'Just like self.assertTrue(a is b), but with a nicer default message.'
| def assertIs(self, expr1, expr2, msg=None):
| if (expr1 is not expr2):
standardMsg = ('%s is not %s' % (safe_repr(expr1), safe_repr(expr2)))
self.fail(self._formatMessage(msg, standardMsg))
|
'Just like self.assertTrue(a is not b), but with a nicer default message.'
| def assertIsNot(self, expr1, expr2, msg=None):
| if (expr1 is expr2):
standardMsg = ('unexpectedly identical: %s' % (safe_repr(expr1),))
self.fail(self._formatMessage(msg, standardMsg))
|
'Checks whether actual is a superset of expected.'
| def assertDictContainsSubset(self, expected, actual, msg=None):
| missing = []
mismatched = []
for (key, value) in expected.iteritems():
if (key not in actual):
missing.append(key)
elif (value != actual[key]):
mismatched.append(('%s, expected: %s, actual: %s' % (safe_repr(key), safe_repr(value), safe_repr(actual[key]))))
if (not (missing or mismatched)):
return
standardMsg = ''
if missing:
standardMsg = ('Missing: %s' % ','.join((safe_repr(m) for m in missing)))
if mismatched:
if standardMsg:
standardMsg += '; '
standardMsg += ('Mismatched values: %s' % ','.join(mismatched))
self.fail(self._formatMessage(msg, standardMsg))
|
'An unordered sequence specific comparison. It asserts that
expected_seq and actual_seq contain the same elements. It is
the equivalent of::
self.assertEqual(sorted(expected_seq), sorted(actual_seq))
Raises with an error message listing which elements of expected_seq
are missing from actual_seq and vice versa if any.
Asserts that each element has the same count in both sequences.
Example:
- [0, 1, 1] and [1, 0, 1] compare equal.
- [0, 0, 1] and [0, 1] compare unequal.'
| def assertItemsEqual(self, expected_seq, actual_seq, msg=None):
| try:
expected = sorted(expected_seq)
actual = sorted(actual_seq)
except TypeError:
expected = list(expected_seq)
actual = list(actual_seq)
(missing, unexpected) = unorderable_list_difference(expected, actual, ignore_duplicate=False)
else:
return self.assertSequenceEqual(expected, actual, msg=msg)
errors = []
if missing:
errors.append(('Expected, but missing:\n %s' % safe_repr(missing)))
if unexpected:
errors.append(('Unexpected, but present:\n %s' % safe_repr(unexpected)))
if errors:
standardMsg = '\n'.join(errors)
self.fail(self._formatMessage(msg, standardMsg))
|
'Assert that two multi-line strings are equal.'
| def assertMultiLineEqual(self, first, second, msg=None):
| self.assertTrue(isinstance(first, basestring), 'First argument is not a string')
self.assertTrue(isinstance(second, basestring), 'Second argument is not a string')
if (first != second):
standardMsg = ('%s != %s' % (safe_repr(first, True), safe_repr(second, True)))
diff = ('\n' + ''.join(difflib.ndiff(first.splitlines(True), second.splitlines(True))))
standardMsg = self._truncateMessage(standardMsg, diff)
self.fail(self._formatMessage(msg, standardMsg))
|
'Just like self.assertTrue(a < b), but with a nicer default message.'
| def assertLess(self, a, b, msg=None):
| if (not (a < b)):
standardMsg = ('%s not less than %s' % (safe_repr(a), safe_repr(b)))
self.fail(self._formatMessage(msg, standardMsg))
|
'Just like self.assertTrue(a <= b), but with a nicer default message.'
| def assertLessEqual(self, a, b, msg=None):
| if (not (a <= b)):
standardMsg = ('%s not less than or equal to %s' % (safe_repr(a), safe_repr(b)))
self.fail(self._formatMessage(msg, standardMsg))
|
'Just like self.assertTrue(a > b), but with a nicer default message.'
| def assertGreater(self, a, b, msg=None):
| if (not (a > b)):
standardMsg = ('%s not greater than %s' % (safe_repr(a), safe_repr(b)))
self.fail(self._formatMessage(msg, standardMsg))
|
'Just like self.assertTrue(a >= b), but with a nicer default message.'
| def assertGreaterEqual(self, a, b, msg=None):
| if (not (a >= b)):
standardMsg = ('%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b)))
self.fail(self._formatMessage(msg, standardMsg))
|
'Same as self.assertTrue(obj is None), with a nicer default message.'
| def assertIsNone(self, obj, msg=None):
| if (obj is not None):
standardMsg = ('%s is not None' % (safe_repr(obj),))
self.fail(self._formatMessage(msg, standardMsg))
|
'Included for symmetry with assertIsNone.'
| def assertIsNotNone(self, obj, msg=None):
| if (obj is None):
standardMsg = 'unexpectedly None'
self.fail(self._formatMessage(msg, standardMsg))
|
'Same as self.assertTrue(isinstance(obj, cls)), with a nicer
default message.'
| def assertIsInstance(self, obj, cls, msg=None):
| if (not isinstance(obj, cls)):
standardMsg = ('%s is not an instance of %r' % (safe_repr(obj), cls))
self.fail(self._formatMessage(msg, standardMsg))
|
'Included for symmetry with assertIsInstance.'
| def assertNotIsInstance(self, obj, cls, msg=None):
| if isinstance(obj, cls):
standardMsg = ('%s is an instance of %r' % (safe_repr(obj), cls))
self.fail(self._formatMessage(msg, standardMsg))
|
'Asserts that the message in a raised exception matches a regexp.
Args:
expected_exception: Exception class expected to be raised.
expected_regexp: Regexp (re pattern object or string) expected
to be found in error message.
callable_obj: Function to be called.
args: Extra args.
kwargs: Extra kwargs.'
| def assertRaisesRegexp(self, expected_exception, expected_regexp, callable_obj=None, *args, **kwargs):
| if (callable_obj is None):
return _AssertRaisesContext(expected_exception, self, expected_regexp)
try:
callable_obj(*args, **kwargs)
except expected_exception as exc_value:
if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if (not expected_regexp.search(str(exc_value))):
raise self.failureException(('"%s" does not match "%s"' % (expected_regexp.pattern, str(exc_value))))
else:
if hasattr(expected_exception, '__name__'):
excName = expected_exception.__name__
else:
excName = str(expected_exception)
raise self.failureException(('%s not raised' % excName))
|
'Fail the test unless the text matches the regular expression.'
| def assertRegexpMatches(self, text, expected_regexp, msg=None):
| if isinstance(expected_regexp, basestring):
expected_regexp = re.compile(expected_regexp)
if (not expected_regexp.search(text)):
msg = (msg or "Regexp didn't match")
msg = ('%s: %r not found in %r' % (msg, expected_regexp.pattern, text))
raise self.failureException(msg)
|
'Fail the test if the text matches the regular expression.'
| def assertNotRegexpMatches(self, text, unexpected_regexp, msg=None):
| if isinstance(unexpected_regexp, basestring):
unexpected_regexp = re.compile(unexpected_regexp)
match = unexpected_regexp.search(text)
if match:
msg = (msg or 'Regexp matched')
msg = ('%s: %r matches %r in %r' % (msg, text[match.start():match.end()], unexpected_regexp.pattern, text))
raise self.failureException(msg)
|
'Called when the given test is about to be run'
| def startTest(self, test):
| self.testsRun += 1
self._mirrorOutput = False
if self.buffer:
if (self._stderr_buffer is None):
self._stderr_buffer = StringIO()
self._stdout_buffer = StringIO()
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer
|
'Called when the given test has been run'
| def stopTest(self, test):
| if self.buffer:
if self._mirrorOutput:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if (not output.endswith('\n')):
output += '\n'
self._original_stdout.write((STDOUT_LINE % output))
if error:
if (not error.endswith('\n')):
error += '\n'
self._original_stderr.write((STDERR_LINE % error))
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate()
self._mirrorOutput = False
|
'Called when an error has occurred. \'err\' is a tuple of values as
returned by sys.exc_info().'
| @failfast
def addError(self, test, err):
| self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
|
'Called when an error has occurred. \'err\' is a tuple of values as
returned by sys.exc_info().'
| @failfast
def addFailure(self, test, err):
| self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
|
'Called when a test has completed successfully'
| def addSuccess(self, test):
| pass
|
'Called when a test is skipped.'
| def addSkip(self, test, reason):
| self.skipped.append((test, reason))
|
'Called when an expected failure/error occured.'
| def addExpectedFailure(self, test, err):
| self.expectedFailures.append((test, self._exc_info_to_string(err, test)))
|
'Called when a test was expected to fail, but succeed.'
| @failfast
def addUnexpectedSuccess(self, test):
| self.unexpectedSuccesses.append(test)
|
'Tells whether or not this result was a success'
| def wasSuccessful(self):
| return ((len(self.failures) + len(self.errors)) == 0)
|
'Indicates that the tests should be aborted'
| def stop(self):
| self.shouldStop = True
|
'Converts a sys.exc_info()-style tuple of values into a string.'
| def _exc_info_to_string(self, err, test):
| (exctype, value, tb) = err
while (tb and self._is_relevant_tb_level(tb)):
tb = tb.tb_next
if (exctype is test.failureException):
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if (not output.endswith('\n')):
output += '\n'
msgLines.append((STDOUT_LINE % output))
if error:
if (not error.endswith('\n')):
error += '\n'
msgLines.append((STDERR_LINE % error))
return ''.join(msgLines)
|
'Run the given test case or test suite.'
| def run(self, test):
| result = self._makeResult()
result.failfast = self.failfast
result.buffer = self.buffer
registerResult(result)
startTime = time.time()
startTestRun = getattr(result, 'startTestRun', None)
if (startTestRun is not None):
startTestRun()
try:
test(result)
finally:
stopTestRun = getattr(result, 'stopTestRun', None)
if (stopTestRun is not None):
stopTestRun()
else:
result.printErrors()
stopTime = time.time()
timeTaken = (stopTime - startTime)
if hasattr(result, 'separator2'):
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln(('Ran %d test%s in %.3fs' % (run, (((run != 1) and 's') or ''), timeTaken)))
self.stream.writeln()
expectedFails = unexpectedSuccesses = skipped = 0
try:
results = map(len, (result.expectedFailures, result.unexpectedSuccesses, result.skipped))
(expectedFails, unexpectedSuccesses, skipped) = results
except AttributeError:
pass
infos = []
if (not result.wasSuccessful()):
self.stream.write('FAILED')
(failed, errored) = map(len, (result.failures, result.errors))
if failed:
infos.append(('failures=%d' % failed))
if errored:
infos.append(('errors=%d' % errored))
else:
self.stream.write('OK')
if skipped:
infos.append(('skipped=%d' % skipped))
if expectedFails:
infos.append(('expected failures=%d' % expectedFails))
if unexpectedSuccesses:
infos.append(('unexpected successes=%d' % unexpectedSuccesses))
if infos:
self.stream.writeln((' (%s)' % (', '.join(infos),)))
else:
self.stream.write('\n')
return result
|
'Return a suite of all tests cases contained in testCaseClass'
| def loadTestsFromTestCase(self, testCaseClass):
| if issubclass(testCaseClass, suite.TestSuite):
raise TypeError('Test cases should not be derived from TestSuite. Maybe you meant to derive from TestCase?')
testCaseNames = self.getTestCaseNames(testCaseClass)
if ((not testCaseNames) and hasattr(testCaseClass, 'runTest')):
testCaseNames = ['runTest']
loaded_suite = self.suiteClass(map(testCaseClass, testCaseNames))
return loaded_suite
|
'Return a suite of all tests cases contained in the given module'
| def loadTestsFromModule(self, module, use_load_tests=True):
| tests = []
for name in dir(module):
obj = getattr(module, name)
if (isinstance(obj, type) and issubclass(obj, unittest.TestCase)):
tests.append(self.loadTestsFromTestCase(obj))
load_tests = getattr(module, 'load_tests', None)
tests = self.suiteClass(tests)
if (use_load_tests and (load_tests is not None)):
try:
return load_tests(self, tests, None)
except Exception as e:
return _make_failed_load_tests(module.__name__, e, self.suiteClass)
return tests
|
'Return a suite of all tests cases given a string specifier.
The name may resolve either to a module, a test case class, a
test method within a test case class, or a callable object which
returns a TestCase or TestSuite instance.
The method optionally resolves the names relative to a given module.'
| def loadTestsFromName(self, name, module=None):
| parts = name.split('.')
if (module is None):
parts_copy = parts[:]
while parts_copy:
try:
module = __import__('.'.join(parts_copy))
break
except ImportError:
del parts_copy[(-1)]
if (not parts_copy):
raise
parts = parts[1:]
obj = module
for part in parts:
(parent, obj) = (obj, getattr(obj, part))
if isinstance(obj, types.ModuleType):
return self.loadTestsFromModule(obj)
elif (isinstance(obj, type) and issubclass(obj, unittest.TestCase)):
return self.loadTestsFromTestCase(obj)
elif (isinstance(obj, types.UnboundMethodType) and isinstance(parent, type) and issubclass(parent, case.TestCase)):
return self.suiteClass([parent(obj.__name__)])
elif isinstance(obj, unittest.TestSuite):
return obj
elif hasattr(obj, '__call__'):
test = obj()
if isinstance(test, unittest.TestSuite):
return test
elif isinstance(test, unittest.TestCase):
return self.suiteClass([test])
else:
raise TypeError(('calling %s returned %s, not a test' % (obj, test)))
else:
raise TypeError(("don't know how to make test from: %s" % obj))
|
'Return a suite of all tests cases found using the given sequence
of string specifiers. See \'loadTestsFromName()\'.'
| def loadTestsFromNames(self, names, module=None):
| suites = [self.loadTestsFromName(name, module) for name in names]
return self.suiteClass(suites)
|
'Return a sorted sequence of method names found within testCaseClass'
| def getTestCaseNames(self, testCaseClass):
| def isTestMethod(attrname, testCaseClass=testCaseClass, prefix=self.testMethodPrefix):
return (attrname.startswith(prefix) and hasattr(getattr(testCaseClass, attrname), '__call__'))
testFnNames = filter(isTestMethod, dir(testCaseClass))
if self.sortTestMethodsUsing:
testFnNames.sort(key=_CmpToKey(self.sortTestMethodsUsing))
return testFnNames
|
'Find and return all test modules from the specified start
directory, recursing into subdirectories to find them. Only test files
that match the pattern will be loaded. (Using shell style pattern
matching.)
All test modules must be importable from the top level of the project.
If the start directory is not the top level directory then the top
level directory must be specified separately.
If a test package name (directory with \'__init__.py\') matches the
pattern then the package will be checked for a \'load_tests\' function. If
this exists then it will be called with loader, tests, pattern.
If load_tests exists then discovery does *not* recurse into the package,
load_tests is responsible for loading all tests in the package.
The pattern is deliberately not stored as a loader attribute so that
packages can continue discovery themselves. top_level_dir is stored so
load_tests does not need to pass this argument in to loader.discover().'
| def discover(self, start_dir, pattern='test*.py', top_level_dir=None):
| set_implicit_top = False
if ((top_level_dir is None) and (self._top_level_dir is not None)):
top_level_dir = self._top_level_dir
elif (top_level_dir is None):
set_implicit_top = True
top_level_dir = start_dir
top_level_dir = os.path.abspath(top_level_dir)
if (not (top_level_dir in sys.path)):
sys.path.insert(0, top_level_dir)
self._top_level_dir = top_level_dir
is_not_importable = False
if os.path.isdir(os.path.abspath(start_dir)):
start_dir = os.path.abspath(start_dir)
if (start_dir != top_level_dir):
is_not_importable = (not os.path.isfile(os.path.join(start_dir, '__init__.py')))
else:
try:
__import__(start_dir)
except ImportError:
is_not_importable = True
else:
the_module = sys.modules[start_dir]
top_part = start_dir.split('.')[0]
start_dir = os.path.abspath(os.path.dirname(the_module.__file__))
if set_implicit_top:
self._top_level_dir = os.path.abspath(os.path.dirname(os.path.dirname(sys.modules[top_part].__file__)))
sys.path.remove(top_level_dir)
if is_not_importable:
raise ImportError(('Start directory is not importable: %r' % start_dir))
tests = list(self._find_tests(start_dir, pattern))
return self.suiteClass(tests)
|
'Used by discovery. Yields test suites it loads.'
| def _find_tests(self, start_dir, pattern):
| paths = os.listdir(start_dir)
for path in paths:
full_path = os.path.join(start_dir, path)
if os.path.isfile(full_path):
if (not VALID_MODULE_NAME.match(path)):
continue
if (not self._match_path(path, full_path, pattern)):
continue
name = self._get_name_from_path(full_path)
try:
module = self._get_module_from_name(name)
except:
(yield _make_failed_import_test(name, self.suiteClass))
else:
mod_file = os.path.abspath(getattr(module, '__file__', full_path))
realpath = os.path.splitext(mod_file)[0]
fullpath_noext = os.path.splitext(full_path)[0]
if (realpath.lower() != fullpath_noext.lower()):
module_dir = os.path.dirname(realpath)
mod_name = os.path.splitext(os.path.basename(full_path))[0]
expected_dir = os.path.dirname(full_path)
msg = '%r module incorrectly imported from %r. Expected %r. Is this module globally installed?'
raise ImportError((msg % (mod_name, module_dir, expected_dir)))
(yield self.loadTestsFromModule(module))
elif os.path.isdir(full_path):
if (not os.path.isfile(os.path.join(full_path, '__init__.py'))):
continue
load_tests = None
tests = None
if fnmatch(path, pattern):
name = self._get_name_from_path(full_path)
package = self._get_module_from_name(name)
load_tests = getattr(package, 'load_tests', None)
tests = self.loadTestsFromModule(package, use_load_tests=False)
if (load_tests is None):
if (tests is not None):
(yield tests)
for test in self._find_tests(full_path, pattern):
(yield test)
else:
try:
(yield load_tests(self, tests, pattern))
except Exception as e:
(yield _make_failed_load_tests(package.__name__, e, self.suiteClass))
|
'Run the tests without collecting errors in a TestResult'
| def debug(self):
| for test in self:
test.debug()
|
'Run the tests without collecting errors in a TestResult'
| def debug(self):
| debug = _DebugResult()
self._wrapped_run(debug, True)
self._tearDownPreviousClass(None, debug)
self._handleModuleTearDown(debug)
|
'Concatenating a safe byte string with another safe byte string or safe
unicode string is safe. Otherwise, the result is no longer safe.'
| def __add__(self, rhs):
| t = super(SafeBytes, self).__add__(rhs)
if isinstance(rhs, SafeText):
return SafeText(t)
elif isinstance(rhs, SafeBytes):
return SafeBytes(t)
return t
|
'Wrap a call to a normal unicode method up so that we return safe
results. The method that is being wrapped is passed in the \'method\'
argument.'
| def _proxy_method(self, *args, **kwargs):
| method = kwargs.pop('method')
data = method(self, *args, **kwargs)
if isinstance(data, bytes):
return SafeBytes(data)
else:
return SafeText(data)
|
'Concatenating a safe unicode string with another safe byte string or
safe unicode string is safe. Otherwise, the result is no longer safe.'
| def __add__(self, rhs):
| t = super(SafeText, self).__add__(rhs)
if isinstance(rhs, SafeData):
return SafeText(t)
return t
|
'Wrap a call to a normal unicode method up so that we return safe
results. The method that is being wrapped is passed in the \'method\'
argument.'
| def _proxy_method(self, *args, **kwargs):
| method = kwargs.pop('method')
data = method(self, *args, **kwargs)
if isinstance(data, bytes):
return SafeBytes(data)
else:
return SafeText(data)
|
'Resolve strings to objects using standard import and attribute
syntax.'
| def resolve(self, s):
| name = s.split('.')
used = name.pop(0)
try:
found = self.importer(used)
for frag in name:
used += ('.' + frag)
try:
found = getattr(found, frag)
except AttributeError:
self.importer(used)
found = getattr(found, frag)
return found
except ImportError:
(e, tb) = sys.exc_info()[1:]
v = ValueError(('Cannot resolve %r: %s' % (s, e)))
(v.__cause__, v.__traceback__) = (e, tb)
raise v
|
'Default converter for the ext:// protocol.'
| def ext_convert(self, value):
| return self.resolve(value)
|
'Default converter for the cfg:// protocol.'
| def cfg_convert(self, value):
| rest = value
m = self.WORD_PATTERN.match(rest)
if (m is None):
raise ValueError(('Unable to convert %r' % value))
else:
rest = rest[m.end():]
d = self.config[m.groups()[0]]
while rest:
m = self.DOT_PATTERN.match(rest)
if m:
d = d[m.groups()[0]]
else:
m = self.INDEX_PATTERN.match(rest)
if m:
idx = m.groups()[0]
if (not self.DIGIT_PATTERN.match(idx)):
d = d[idx]
else:
try:
n = int(idx)
d = d[n]
except TypeError:
d = d[idx]
if m:
rest = rest[m.end():]
else:
raise ValueError(('Unable to convert %r at %r' % (value, rest)))
return d
|
'Convert values to an appropriate type. dicts, lists and tuples are
replaced by their converting alternatives. Strings are checked to
see if they have a conversion format and are converted if they do.'
| def convert(self, value):
| if ((not isinstance(value, ConvertingDict)) and isinstance(value, dict)):
value = ConvertingDict(value)
value.configurator = self
elif ((not isinstance(value, ConvertingList)) and isinstance(value, list)):
value = ConvertingList(value)
value.configurator = self
elif ((not isinstance(value, ConvertingTuple)) and isinstance(value, tuple)):
value = ConvertingTuple(value)
value.configurator = self
elif isinstance(value, six.string_types):
m = self.CONVERT_PATTERN.match(value)
if m:
d = m.groupdict()
prefix = d['prefix']
converter = self.value_converters.get(prefix, None)
if converter:
suffix = d['suffix']
converter = getattr(self, converter)
value = converter(suffix)
return value
|
'Configure an object with a user-supplied factory.'
| def configure_custom(self, config):
| c = config.pop('()')
if ((not hasattr(c, '__call__')) and hasattr(types, 'ClassType') and (type(c) != types.ClassType)):
c = self.resolve(c)
props = config.pop('.', None)
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
result = c(**kwargs)
if props:
for (name, value) in props.items():
setattr(result, name, value)
return result
|
'Utility function which converts lists to tuples.'
| def as_tuple(self, value):
| if isinstance(value, list):
value = tuple(value)
return value
|
'Do the configuration.'
| def configure(self):
| config = self.config
if ('version' not in config):
raise ValueError("dictionary doesn't specify a version")
if (config['version'] != 1):
raise ValueError(('Unsupported version: %s' % config['version']))
incremental = config.pop('incremental', False)
EMPTY_DICT = {}
logging._acquireLock()
try:
if incremental:
handlers = config.get('handlers', EMPTY_DICT)
if (sys.version_info[:2] == (2, 7)):
for name in handlers:
if (name not in logging._handlers):
raise ValueError(('No handler found with name %r' % name))
else:
try:
handler = logging._handlers[name]
handler_config = handlers[name]
level = handler_config.get('level', None)
if level:
handler.setLevel(_checkLevel(level))
except StandardError as e:
raise ValueError(('Unable to configure handler %r: %s' % (name, e)))
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
try:
self.configure_logger(name, loggers[name], True)
except StandardError as e:
raise ValueError(('Unable to configure logger %r: %s' % (name, e)))
root = config.get('root', None)
if root:
try:
self.configure_root(root, True)
except StandardError as e:
raise ValueError(('Unable to configure root logger: %s' % e))
else:
disable_existing = config.pop('disable_existing_loggers', True)
logging._handlers.clear()
del logging._handlerList[:]
formatters = config.get('formatters', EMPTY_DICT)
for name in formatters:
try:
formatters[name] = self.configure_formatter(formatters[name])
except StandardError as e:
raise ValueError(('Unable to configure formatter %r: %s' % (name, e)))
filters = config.get('filters', EMPTY_DICT)
for name in filters:
try:
filters[name] = self.configure_filter(filters[name])
except StandardError as e:
raise ValueError(('Unable to configure filter %r: %s' % (name, e)))
handlers = config.get('handlers', EMPTY_DICT)
for name in sorted(handlers):
try:
handler = self.configure_handler(handlers[name])
handler.name = name
handlers[name] = handler
except StandardError as e:
raise ValueError(('Unable to configure handler %r: %s' % (name, e)))
root = logging.root
existing = list(root.manager.loggerDict)
existing.sort()
child_loggers = []
loggers = config.get('loggers', EMPTY_DICT)
for name in loggers:
if (name in existing):
i = existing.index(name)
prefixed = (name + '.')
pflen = len(prefixed)
num_existing = len(existing)
i = (i + 1)
while ((i < num_existing) and (existing[i][:pflen] == prefixed)):
child_loggers.append(existing[i])
i = (i + 1)
existing.remove(name)
try:
self.configure_logger(name, loggers[name])
except StandardError as e:
raise ValueError(('Unable to configure logger %r: %s' % (name, e)))
for log in existing:
logger = root.manager.loggerDict[log]
if (log in child_loggers):
logger.level = logging.NOTSET
logger.handlers = []
logger.propagate = True
elif disable_existing:
logger.disabled = True
root = config.get('root', None)
if root:
try:
self.configure_root(root)
except StandardError as e:
raise ValueError(('Unable to configure root logger: %s' % e))
finally:
logging._releaseLock()
|
'Configure a formatter from a dictionary.'
| def configure_formatter(self, config):
| if ('()' in config):
factory = config['()']
try:
result = self.configure_custom(config)
except TypeError as te:
if ("'format'" not in str(te)):
raise
config['fmt'] = config.pop('format')
config['()'] = factory
result = self.configure_custom(config)
else:
fmt = config.get('format', None)
dfmt = config.get('datefmt', None)
result = logging.Formatter(fmt, dfmt)
return result
|
'Configure a filter from a dictionary.'
| def configure_filter(self, config):
| if ('()' in config):
result = self.configure_custom(config)
else:
name = config.get('name', '')
result = logging.Filter(name)
return result
|
'Add filters to a filterer from a list of names.'
| def add_filters(self, filterer, filters):
| for f in filters:
try:
filterer.addFilter(self.config['filters'][f])
except StandardError as e:
raise ValueError(('Unable to add filter %r: %s' % (f, e)))
|
'Configure a handler from a dictionary.'
| def configure_handler(self, config):
| formatter = config.pop('formatter', None)
if formatter:
try:
formatter = self.config['formatters'][formatter]
except StandardError as e:
raise ValueError(('Unable to set formatter %r: %s' % (formatter, e)))
level = config.pop('level', None)
filters = config.pop('filters', None)
if ('()' in config):
c = config.pop('()')
if ((not hasattr(c, '__call__')) and hasattr(types, 'ClassType') and (type(c) != types.ClassType)):
c = self.resolve(c)
factory = c
else:
klass = self.resolve(config.pop('class'))
if (issubclass(klass, logging.handlers.MemoryHandler) and ('target' in config)):
try:
config['target'] = self.config['handlers'][config['target']]
except StandardError as e:
raise ValueError(('Unable to set target handler %r: %s' % (config['target'], e)))
elif (issubclass(klass, logging.handlers.SMTPHandler) and ('mailhost' in config)):
config['mailhost'] = self.as_tuple(config['mailhost'])
elif (issubclass(klass, logging.handlers.SysLogHandler) and ('address' in config)):
config['address'] = self.as_tuple(config['address'])
factory = klass
kwargs = dict([(k, config[k]) for k in config if valid_ident(k)])
try:
result = factory(**kwargs)
except TypeError as te:
if ("'stream'" not in str(te)):
raise
kwargs['strm'] = kwargs.pop('stream')
result = factory(**kwargs)
if formatter:
result.setFormatter(formatter)
if (level is not None):
result.setLevel(_checkLevel(level))
if filters:
self.add_filters(result, filters)
return result
|
'Add handlers to a logger from a list of names.'
| def add_handlers(self, logger, handlers):
| for h in handlers:
try:
logger.addHandler(self.config['handlers'][h])
except StandardError as e:
raise ValueError(('Unable to add handler %r: %s' % (h, e)))
|
'Perform configuration which is common to root and non-root loggers.'
| def common_logger_config(self, logger, config, incremental=False):
| level = config.get('level', None)
if (level is not None):
logger.setLevel(_checkLevel(level))
if (not incremental):
for h in logger.handlers[:]:
logger.removeHandler(h)
handlers = config.get('handlers', None)
if handlers:
self.add_handlers(logger, handlers)
filters = config.get('filters', None)
if filters:
self.add_filters(logger, filters)
|
'Configure a non-root logger from a dictionary.'
| def configure_logger(self, name, config, incremental=False):
| logger = logging.getLogger(name)
self.common_logger_config(logger, config, incremental)
propagate = config.get('propagate', None)
if (propagate is not None):
logger.propagate = propagate
|
'Configure a root logger from a dictionary.'
| def configure_root(self, config, incremental=False):
| root = logging.getLogger()
self.common_logger_config(root, config, incremental)
|
'Returns a copy of this object.'
| def copy(self):
| return self.__copy__()
|
'Returns something like
"{\'key1\': \'val1\', \'key2\': \'val2\', \'key3\': \'val3\'}"
instead of the generic "<object meta-data>" inherited from object.'
| def __str__(self):
| return str(dict(self.items()))
|
'Returns something like
MergeDict({\'key1\': \'val1\', \'key2\': \'val2\'}, {\'key3\': \'val3\'})
instead of generic "<object meta-data>" inherited from object.'
| def __repr__(self):
| dictreprs = ', '.join((repr(d) for d in self.dicts))
return ('%s(%s)' % (self.__class__.__name__, dictreprs))
|
'Returns the value of the item at the given zero-based index.'
| def value_for_index(self, index):
| warnings.warn('SortedDict.value_for_index is deprecated', PendingDeprecationWarning, stacklevel=2)
return self[self.keyOrder[index]]
|
'Inserts the key, value pair before the item with the given index.'
| def insert(self, index, key, value):
| warnings.warn('SortedDict.insert is deprecated', PendingDeprecationWarning, stacklevel=2)
if (key in self.keyOrder):
n = self.keyOrder.index(key)
del self.keyOrder[n]
if (n < index):
index -= 1
self.keyOrder.insert(index, key)
super(SortedDict, self).__setitem__(key, value)
|
'Returns a copy of this object.'
| def copy(self):
| return self.__class__(self)
|
'Replaces the normal dict.__repr__ with a version that returns the keys
in their sorted order.'
| def __repr__(self):
| return ('{%s}' % ', '.join([('%r: %r' % (k, v)) for (k, v) in six.iteritems(self)]))
|
'Returns the last data value for this key, or [] if it\'s an empty list;
raises KeyError if not found.'
| def __getitem__(self, key):
| try:
list_ = super(MultiValueDict, self).__getitem__(key)
except KeyError:
raise MultiValueDictKeyError(('Key %r not found in %r' % (key, self)))
try:
return list_[(-1)]
except IndexError:
return []
|
'Returns the last data value for the passed key. If key doesn\'t exist
or value is an empty list, then default is returned.'
| def get(self, key, default=None):
| try:
val = self[key]
except KeyError:
return default
if (val == []):
return default
return val
|
'Returns the list of values for the passed key. If key doesn\'t exist,
then a default value is returned.'
| def getlist(self, key, default=None):
| try:
return super(MultiValueDict, self).__getitem__(key)
except KeyError:
if (default is None):
return []
return default
|
'Appends an item to the internal list associated with key.'
| def appendlist(self, key, value):
| self.setlistdefault(key).append(value)
|
'Yields (key, value) pairs, where value is the last item in the list
associated with the key.'
| def _iteritems(self):
| for key in self:
(yield (key, self[key]))
|
'Yields (key, list) pairs.'
| def _iterlists(self):
| return six.iteritems(super(MultiValueDict, self))
|
'Yield the last value on every key list.'
| def _itervalues(self):
| for key in self:
(yield self[key])
|
'Returns a shallow copy of this object.'
| def copy(self):
| return copy.copy(self)
|
'update() extends rather than replaces existing key lists.
Also accepts keyword args.'
| def update(self, *args, **kwargs):
| if (len(args) > 1):
raise TypeError(('update expected at most 1 arguments, got %d' % len(args)))
if args:
other_dict = args[0]
if isinstance(other_dict, MultiValueDict):
for (key, value_list) in other_dict.lists():
self.setlistdefault(key).extend(value_list)
else:
try:
for (key, value) in other_dict.items():
self.setlistdefault(key).append(value)
except TypeError:
raise ValueError('MultiValueDict.update() takes either a MultiValueDict or dictionary')
for (key, value) in six.iteritems(kwargs):
self.setlistdefault(key).append(value)
|
'Returns current object as a dict with singular values.'
| def dict(self):
| return dict(((key, self[key]) for key in self))
|
'Retrieves the real value after stripping the prefix string (if
present). If the prefix is present, pass the value through self.func
before returning, otherwise return the raw value.'
| def __getitem__(self, key):
| if key.startswith(self.prefix):
use_func = True
key = key[len(self.prefix):]
else:
use_func = False
value = super(DictWrapper, self).__getitem__(key)
if use_func:
return self.func(value)
return value
|
'Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)'
| def has_leading_dir(self, paths):
| common_prefix = None
for path in paths:
(prefix, rest) = self.split_leading_dir(path)
if (not prefix):
return False
elif (common_prefix is None):
common_prefix = prefix
elif (prefix != common_prefix):
return False
return True
|
'Convenience method for adding an element with no children'
| def addQuickElement(self, name, contents=None, attrs=None):
| if (attrs is None):
attrs = {}
self.startElement(name, attrs)
if (contents is not None):
self.characters(contents)
self.endElement(name)
|
'Returns the text truncated to be no longer than the specified number
of characters.
Takes an optional argument of what should be used to notify that the
string has been truncated, defaulting to a translatable string of an
ellipsis (...).'
| def chars(self, num, truncate=None):
| length = int(num)
text = unicodedata.normalize(u'NFC', self._wrapped)
truncate_len = length
for char in self.add_truncation_text(u'', truncate):
if (not unicodedata.combining(char)):
truncate_len -= 1
if (truncate_len == 0):
break
s_len = 0
end_index = None
for (i, char) in enumerate(text):
if unicodedata.combining(char):
continue
s_len += 1
if ((end_index is None) and (s_len > truncate_len)):
end_index = i
if (s_len > length):
return self.add_truncation_text(text[:(end_index or 0)], truncate)
return text
|
'Truncates a string after a certain number of words. Takes an optional
argument of what should be used to notify that the string has been
truncated, defaulting to ellipsis (...).'
| def words(self, num, truncate=None, html=False):
| length = int(num)
if html:
return self._html_words(length, truncate)
return self._text_words(length, truncate)
|
'Truncates a string after a certain number of words.
Newlines in the string will be stripped.'
| def _text_words(self, length, truncate):
| words = self._wrapped.split()
if (len(words) > length):
words = words[:length]
return self.add_truncation_text(u' '.join(words), truncate)
return u' '.join(words)
|
'Truncates HTML to a certain number of words (not counting tags and
comments). Closes opened tags if they were correctly closed in the
given HTML.
Newlines in the HTML are preserved.'
| def _html_words(self, length, truncate):
| if (length <= 0):
return u''
html4_singlets = (u'br', u'col', u'link', u'base', u'img', u'param', u'area', u'hr', u'input')
pos = 0
end_text_pos = 0
words = 0
open_tags = []
while (words <= length):
m = re_words.search(self._wrapped, pos)
if (not m):
break
pos = m.end(0)
if m.group(1):
words += 1
if (words == length):
end_text_pos = pos
continue
tag = re_tag.match(m.group(0))
if ((not tag) or end_text_pos):
continue
(closing_tag, tagname, self_closing) = tag.groups()
tagname = tagname.lower()
if (self_closing or (tagname in html4_singlets)):
pass
elif closing_tag:
try:
i = open_tags.index(tagname)
except ValueError:
pass
else:
open_tags = open_tags[(i + 1):]
else:
open_tags.insert(0, tagname)
if (words <= length):
return self._wrapped
out = self._wrapped[:end_text_pos]
truncate_text = self.add_truncation_text(u'', truncate)
if truncate_text:
out += truncate_text
for tag in open_tags:
out += (u'</%s>' % tag)
return out
|
'Constructs a new Node. If no connector is given, the default will be
used.
Warning: You probably don\'t want to pass in the \'negated\' parameter. It
is NOT the same as constructing a node and calling negate() on the
result.'
| def __init__(self, children=None, connector=None, negated=False):
| self.children = ((children and children[:]) or [])
self.connector = (connector or self.default)
self.subtree_parents = []
self.negated = negated
|
'This is called to create a new instance of this class when we need new
Nodes (or subclasses) in the internal code in this class. Normally, it
just shadows __init__(). However, subclasses with an __init__ signature
that is not an extension of Node.__init__ might need to implement this
method to allow a Node to create a new instance of them (if they have
any extra setting up to do).'
| def _new_instance(cls, children=None, connector=None, negated=False):
| obj = Node(children, connector, negated)
obj.__class__ = cls
return obj
|
'Utility method used by copy.deepcopy().'
| def __deepcopy__(self, memodict):
| obj = Node(connector=self.connector, negated=self.negated)
obj.__class__ = self.__class__
obj.children = copy.deepcopy(self.children, memodict)
obj.subtree_parents = copy.deepcopy(self.subtree_parents, memodict)
return obj
|
'The size of a node if the number of children it has.'
| def __len__(self):
| return len(self.children)
|
'For truth value testing.'
| def __bool__(self):
| return bool(self.children)
|
'Returns True is \'other\' is a direct child of this instance.'
| def __contains__(self, other):
| return (other in self.children)
|
'Adds a new node to the tree. If the conn_type is the same as the root\'s
current connector type, the node is added to the first level.
Otherwise, the whole tree is pushed down one level and a new root
connector is created, connecting the existing tree and the new node.'
| def add(self, node, conn_type):
| if ((node in self.children) and (conn_type == self.connector)):
return
if (len(self.children) < 2):
self.connector = conn_type
if (self.connector == conn_type):
if (isinstance(node, Node) and ((node.connector == conn_type) or (len(node) == 1))):
self.children.extend(node.children)
else:
self.children.append(node)
else:
obj = self._new_instance(self.children, self.connector, self.negated)
self.connector = conn_type
self.children = [obj, node]
|
'Negate the sense of the root connector. This reorganises the children
so that the current node has a single child: a negated node containing
all the previous children. This slightly odd construction makes adding
new children behave more intuitively.
Interpreting the meaning of this negate is up to client code. This
method is useful for implementing "not" arrangements.'
| def negate(self):
| self.children = [self._new_instance(self.children, self.connector, (not self.negated))]
self.connector = self.default
|
'Sets up internal state so that new nodes are added to a subtree of the
current node. The conn_type specifies how the sub-tree is joined to the
existing children.'
| def start_subtree(self, conn_type):
| if (len(self.children) == 1):
self.connector = conn_type
elif (self.connector != conn_type):
self.children = [self._new_instance(self.children, self.connector, self.negated)]
self.connector = conn_type
self.negated = False
self.subtree_parents.append(self.__class__(self.children, self.connector, self.negated))
self.connector = self.default
self.negated = False
self.children = []
|
'Closes off the most recently unmatched start_subtree() call.
This puts the current state into a node of the parent tree and returns
the current instances state to be the parent.'
| def end_subtree(self):
| obj = self.subtree_parents.pop()
node = self.__class__(self.children, self.connector)
self.connector = obj.connector
self.negated = obj.negated
self.children = obj.children
self.children.append(node)
|
'Lexically analyze `text`.
Yields pairs (`name`, `tokentext`).'
| def lex(self, text):
| end = len(text)
state = self.state
regexes = self.regexes
toks = self.toks
start = 0
while (start < end):
for match in regexes[state].finditer(text, start):
name = match.lastgroup
tok = toks[name]
toktext = match.group(name)
start += len(toktext)
(yield (tok.name, toktext))
if tok.next:
state = tok.next
break
self.state = state
|
'Must be implemented by subclasses to initialise the wrapped object.'
| def _setup(self):
| raise NotImplementedError
|
'Pass in a callable that returns the object to be wrapped.
If copies are made of the resulting SimpleLazyObject, which can happen
in various circumstances within Django, then you must ensure that the
callable can be safely run more than once and will return the same
value.'
| def __init__(self, func):
| self.__dict__['_setupfunc'] = func
_super(SimpleLazyObject, self).__init__()
|
'\'a.m.\' or \'p.m.\''
| def a(self):
| if (self.data.hour > 11):
return _(u'p.m.')
return _(u'a.m.')
|
'\'AM\' or \'PM\''
| def A(self):
| if (self.data.hour > 11):
return _(u'PM')
return _(u'AM')
|
'Swatch Internet time'
| def B(self):
| raise NotImplementedError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.