desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'SF bug #1486663 -- this used to erroneously raise a TypeError'
| def test_keywords_in_subclass(self):
| SetSubclassWithKeywordArgs(newarg=1)
|
'Helper function to make a list of random numbers'
| def randomlist(self, n):
| return [self.gen.random() for i in xrange(n)]
|
'If UseForeignDTD is passed True and a document without an external
entity reference is parsed, ExternalEntityRefHandler is first called
with None for the public and system ids.'
| def test_use_foreign_dtd(self):
| handler_call_args = []
def resolve_entity(context, base, system_id, public_id):
handler_call_args.append((public_id, system_id))
return 1
parser = expat.ParserCreate()
parser.UseForeignDTD(True)
parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
parser.ExternalEntityRefHandler = resolve_entity
parser.Parse("<?xml version='1.0'?><element/>")
self.assertEqual(handler_call_args, [(None, None)])
handler_call_args[:] = []
parser = expat.ParserCreate()
parser.UseForeignDTD()
parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
parser.ExternalEntityRefHandler = resolve_entity
parser.Parse("<?xml version='1.0'?><element/>")
self.assertEqual(handler_call_args, [(None, None)])
|
'If UseForeignDTD is passed True and a document with an external
entity reference is parsed, ExternalEntityRefHandler is called with
the public and system ids from the document.'
| def test_ignore_use_foreign_dtd(self):
| handler_call_args = []
def resolve_entity(context, base, system_id, public_id):
handler_call_args.append((public_id, system_id))
return 1
parser = expat.ParserCreate()
parser.UseForeignDTD(True)
parser.SetParamEntityParsing(expat.XML_PARAM_ENTITY_PARSING_ALWAYS)
parser.ExternalEntityRefHandler = resolve_entity
parser.Parse("<?xml version='1.0'?><!DOCTYPE foo PUBLIC 'bar' 'baz'><element/>")
self.assertEqual(handler_call_args, [('bar', 'baz')])
|
'Test multiple targets on the left hand side.'
| def testMultipleLHS(self):
| snippets = ['a, b = 1, 2', '(a, b) = 1, 2', '((a, b), c) = (1, 2), 3']
for s in snippets:
a = transformer.parse(s)
self.assertIsInstance(a, ast.Module)
child1 = a.getChildNodes()[0]
self.assertIsInstance(child1, ast.Stmt)
child2 = child1.getChildNodes()[0]
self.assertIsInstance(child2, ast.Assign)
c = compile(s, '<string>', 'single')
vals = {}
exec c in vals
assert (vals['a'] == 1)
assert (vals['b'] == 2)
|
'Utility method to verify if two objects are copies of each others.'
| def assert_is_copy(self, obj, objcopy, msg=None):
| if (msg is None):
msg = '{!r} is not a copy of {!r}'.format(obj, objcopy)
self.assertEqual(obj, objcopy, msg=msg)
self.assertIs(type(obj), type(objcopy), msg=msg)
if hasattr(obj, '__dict__'):
self.assertDictEqual(obj.__dict__, objcopy.__dict__, msg=msg)
self.assertIsNot(obj.__dict__, objcopy.__dict__, msg=msg)
if hasattr(obj, '__slots__'):
self.assertListEqual(obj.__slots__, objcopy.__slots__, msg=msg)
for slot in obj.__slots__:
self.assertEqual(hasattr(obj, slot), hasattr(objcopy, slot), msg=msg)
self.assertEqual(getattr(obj, slot, None), getattr(objcopy, slot, None), msg=msg)
|
'Make a copy of sys.path'
| def setUp(self):
| super(TestSysConfig, self).setUp()
self.sys_path = sys.path[:]
self.makefile = None
if hasattr(os, 'uname'):
self.uname = os.uname
self._uname = os.uname()
else:
self.uname = None
self._uname = None
os.uname = self._get_uname
self.name = os.name
self.platform = sys.platform
self.version = sys.version
self.sep = os.sep
self.join = os.path.join
self.isabs = os.path.isabs
self.splitdrive = os.path.splitdrive
self._config_vars = copy(sysconfig._CONFIG_VARS)
self.old_environ = deepcopy(os.environ)
|
'Restore sys.path'
| def tearDown(self):
| sys.path[:] = self.sys_path
if (self.makefile is not None):
os.unlink(self.makefile)
self._cleanup_testfn()
if (self.uname is not None):
os.uname = self.uname
else:
del os.uname
os.name = self.name
sys.platform = self.platform
sys.version = self.version
os.sep = self.sep
os.path.join = self.join
os.path.isabs = self.isabs
os.path.splitdrive = self.splitdrive
sysconfig._CONFIG_VARS = copy(self._config_vars)
for (key, value) in self.old_environ.items():
if (os.environ.get(key) != value):
os.environ[key] = value
for key in os.environ.keys():
if (key not in self.old_environ):
del os.environ[key]
super(TestSysConfig, self).tearDown()
|
'Mocks the select.select() call to raise EINTR for first call'
| @contextlib.contextmanager
def mocked_select_module(self):
| old_select = select.select
class MockSelect:
def __init__(self):
self.called = 0
def __call__(self, *args):
self.called += 1
if (self.called == 1):
raise select.error(errno.EINTR, os.strerror(errno.EINTR))
else:
return old_select(*args)
select.select = MockSelect()
try:
(yield select.select)
finally:
select.select = old_select
|
'Setup of a temp file to use for testing'
| def setUp(self):
| self.text = ('test_urllib: %s\n' % self.__class__.__name__)
FILE = file(test_support.TESTFN, 'wb')
try:
FILE.write(self.text)
finally:
FILE.close()
self.pathname = test_support.TESTFN
self.returned_obj = urllib.urlopen(('file:%s' % self.pathname))
|
'Shut down the open object'
| def tearDown(self):
| self.returned_obj.close()
os.remove(test_support.TESTFN)
|
'Creates a new temporary file containing the specified data,
registers the file for deletion during the test fixture tear down, and
returns the absolute path of the file.'
| def createNewTempFile(self, data=''):
| (newFd, newFilePath) = tempfile.mkstemp()
try:
self.registerFileForCleanUp(newFilePath)
newFile = os.fdopen(newFd, 'wb')
newFile.write(data)
newFile.close()
finally:
try:
newFile.close()
except:
pass
return newFilePath
|
'Helper method for testing different input types.
\'given\' must lead to only the pairs:
* 1st, 1
* 2nd, 2
* 3rd, 3
Test cannot assume anything about order. Docs make no guarantee and
have possible dictionary input.'
| def help_inputtype(self, given, test_type):
| expect_somewhere = ['1st=1', '2nd=2', '3rd=3']
result = urllib.urlencode(given)
for expected in expect_somewhere:
self.assertIn(expected, result, ('testing %s: %s not found in %s' % (test_type, expected, result)))
self.assertEqual(result.count('&'), 2, ("testing %s: expected 2 '&'s; got %s" % (test_type, result.count('&'))))
amp_location = result.index('&')
on_amp_left = result[(amp_location - 1)]
on_amp_right = result[(amp_location + 1)]
self.assertTrue((on_amp_left.isdigit() and on_amp_right.isdigit()), ("testing %s: '&' not located in proper place in %s" % (test_type, result)))
self.assertEqual(len(result), ((5 * 3) + 2), ('testing %s: unexpected number of characters: %s != %s' % (test_type, len(result), ((5 * 3) + 2))))
|
'Assert the options are what we expected when parsing arguments.
Otherwise, fail with a nicely formatted message.
Keyword arguments:
args -- A list of arguments to parse with OptionParser.
expected_opts -- The options expected.
expected_positional_args -- The positional arguments expected.
Returns the options and positional args for further testing.'
| def assertParseOK(self, args, expected_opts, expected_positional_args):
| (options, positional_args) = self.parser.parse_args(args)
optdict = vars(options)
self.assertEqual(optdict, expected_opts, ('\nOptions are %(optdict)s.\nShould be %(expected_opts)s.\nArgs were %(args)s.' % locals()))
self.assertEqual(positional_args, expected_positional_args, ('\nPositional arguments are %(positional_args)s.\nShould be %(expected_positional_args)s.\nArgs were %(args)s.' % locals()))
return (options, positional_args)
|
'Assert that the expected exception is raised when calling a
function, and that the right error message is included with
that exception.
Arguments:
func -- the function to call
args -- positional arguments to `func`
kwargs -- keyword arguments to `func`
expected_exception -- exception that should be raised
expected_message -- expected exception message (or pattern
if a compiled regex object)
Returns the exception raised for further testing.'
| def assertRaises(self, func, args, kwargs, expected_exception, expected_message):
| if (args is None):
args = ()
if (kwargs is None):
kwargs = {}
try:
func(*args, **kwargs)
except expected_exception as err:
actual_message = str(err)
if isinstance(expected_message, retype):
self.assertTrue(expected_message.search(actual_message), ("expected exception message pattern:\n/%s/\nactual exception message:\n'''%s'''\n" % (expected_message.pattern, actual_message)))
else:
self.assertEqual(actual_message, expected_message, ("expected exception message:\n'''%s'''\nactual exception message:\n'''%s'''\n" % (expected_message, actual_message)))
return err
else:
self.fail(('expected exception %(expected_exception)s not raised\ncalled %(func)r\nwith args %(args)r\nand kwargs %(kwargs)r\n' % locals()))
|
'Assert the parser fails with the expected message. Caller
must ensure that self.parser is an InterceptingOptionParser.'
| def assertParseFail(self, cmdline_args, expected_output):
| try:
self.parser.parse_args(cmdline_args)
except InterceptedError as err:
self.assertEqual(err.error_message, expected_output)
else:
self.assertFalse('expected parse failure')
|
'Assert the parser prints the expected output on stdout.'
| def assertOutput(self, cmdline_args, expected_output, expected_status=0, expected_error=None):
| save_stdout = sys.stdout
encoding = getattr(save_stdout, 'encoding', None)
try:
try:
sys.stdout = StringIO()
if encoding:
sys.stdout.encoding = encoding
self.parser.parse_args(cmdline_args)
finally:
output = sys.stdout.getvalue()
sys.stdout = save_stdout
except InterceptedError as err:
self.assertTrue((type(output) is types.StringType), ('expected output to be an ordinary string, not %r' % type(output)))
if (output != expected_output):
self.fail((((("expected: \n'''\n" + expected_output) + "'''\nbut got \n'''\n") + output) + "'''"))
self.assertEqual(err.exit_status, expected_status)
self.assertEqual(err.exit_message, expected_error)
else:
self.assertFalse('expected parser.exit()')
|
'Assert that TypeError is raised when executing func.'
| def assertTypeError(self, func, expected_message, *args):
| self.assertRaises(func, args, None, TypeError, expected_message)
|
'doctest monkeypatches linecache to enable inspection'
| def test_proceed_with_fake_filename(self):
| (fn, source) = ('<test>', 'def x(): pass\n')
getlines = linecache.getlines
def monkey(filename, module_globals=None):
if (filename == fn):
return source.splitlines(True)
else:
return getlines(filename, module_globals)
linecache.getlines = monkey
try:
ns = {}
exec compile(source, fn, 'single') in ns
inspect.getsource(ns['x'])
finally:
linecache.getlines = getlines
|
'Helper for testing that classify_class_attrs finds a bunch of
different kinds of attributes on a given class.'
| def _classify_test(self, newstyle):
| if newstyle:
base = object
else:
class base:
pass
class A(base, ):
def s():
pass
s = staticmethod(s)
def c(cls):
pass
c = classmethod(c)
def getp(self):
pass
p = property(getp)
def m(self):
pass
def m1(self):
pass
datablob = '1'
dd = _BrokenDataDescriptor()
md = _BrokenMethodDescriptor()
attrs = attrs_wo_objs(A)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'class method', A), attrs, 'missing class method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', A), attrs, 'missing plain method')
self.assertIn(('m1', 'method', A), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
self.assertIn(('md', 'method', A), attrs, 'missing method descriptor')
self.assertIn(('dd', 'data', A), attrs, 'missing data descriptor')
class B(A, ):
def m(self):
pass
attrs = attrs_wo_objs(B)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'class method', A), attrs, 'missing class method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', B), attrs, 'missing plain method')
self.assertIn(('m1', 'method', A), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
self.assertIn(('md', 'method', A), attrs, 'missing method descriptor')
self.assertIn(('dd', 'data', A), attrs, 'missing data descriptor')
class C(A, ):
def m(self):
pass
def c(self):
pass
attrs = attrs_wo_objs(C)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
self.assertIn(('c', 'method', C), attrs, 'missing plain method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', C), attrs, 'missing plain method')
self.assertIn(('m1', 'method', A), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
self.assertIn(('md', 'method', A), attrs, 'missing method descriptor')
self.assertIn(('dd', 'data', A), attrs, 'missing data descriptor')
class D(B, C, ):
def m1(self):
pass
attrs = attrs_wo_objs(D)
self.assertIn(('s', 'static method', A), attrs, 'missing static method')
if newstyle:
self.assertIn(('c', 'method', C), attrs, 'missing plain method')
else:
self.assertIn(('c', 'class method', A), attrs, 'missing class method')
self.assertIn(('p', 'property', A), attrs, 'missing property')
self.assertIn(('m', 'method', B), attrs, 'missing plain method')
self.assertIn(('m1', 'method', D), attrs, 'missing plain method')
self.assertIn(('datablob', 'data', A), attrs, 'missing data')
self.assertIn(('md', 'method', A), attrs, 'missing method descriptor')
self.assertIn(('dd', 'data', A), attrs, 'missing data descriptor')
|
'classify_class_attrs finds static methods, class methods,
properties, normal methods, and data attributes on an old-style
class.'
| def test_classify_oldstyle(self):
| self._classify_test(False)
|
'Just like test_classify_oldstyle, but for a new-style class.'
| def test_classify_newstyle(self):
| self._classify_test(True)
|
'Create a function that returns its locals(), excluding the
autogenerated \'.1\', \'.2\', etc. tuple param names (if any).'
| def makeCallable(self, signature):
| with check_py3k_warnings(('tuple parameter unpacking has been removed', SyntaxWarning), quiet=True):
code = 'lambda %s: dict(i for i in locals().items() if not is_tuplename(i[0]))'
return eval((code % signature), {'is_tuplename': self.is_tuplename})
|
'Check for cases where compressed data is larger than original.'
| @skipUnless(zlib, 'requires zlib')
def test_low_compression(self):
| with zipfile.ZipFile(TESTFN2, 'w', zipfile.ZIP_DEFLATED) as zipfp:
zipfp.writestr('strfile', '12')
with zipfile.ZipFile(TESTFN2, 'r', zipfile.ZIP_DEFLATED) as zipfp:
with zipfp.open('strfile') as openobj:
self.assertEqual(openobj.read(1), '1')
self.assertEqual(openobj.read(1), '2')
|
'Test appending to an existing zipfile.'
| def test_append_to_zip_file(self):
| with zipfile.ZipFile(TESTFN2, 'w', zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
with zipfile.ZipFile(TESTFN2, 'a', zipfile.ZIP_STORED) as zipfp:
zipfp.writestr('strfile', self.data)
self.assertEqual(zipfp.namelist(), [TESTFN, 'strfile'])
|
'Test appending to an existing file that is not a zipfile.'
| def test_append_to_non_zip_file(self):
| data = ('I am not a ZipFile!' * 10)
with open(TESTFN2, 'wb') as f:
f.write(data)
with zipfile.ZipFile(TESTFN2, 'a', zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
with open(TESTFN2, 'rb') as f:
f.seek(len(data))
with zipfile.ZipFile(f, 'r') as zipfp:
self.assertEqual(zipfp.namelist(), [TESTFN])
|
'Check that calling ZipFile.write without arcname specified
produces the expected result.'
| def test_write_default_name(self):
| with zipfile.ZipFile(TESTFN2, 'w') as zipfp:
zipfp.write(TESTFN)
with open(TESTFN, 'r') as fid:
self.assertEqual(zipfp.read(TESTFN), fid.read())
|
'Check that files within a Zip archive can have different
compression options.'
| @skipUnless(zlib, 'requires zlib')
def test_per_file_compression(self):
| with zipfile.ZipFile(TESTFN2, 'w') as zipfp:
zipfp.write(TESTFN, 'storeme', zipfile.ZIP_STORED)
zipfp.write(TESTFN, 'deflateme', zipfile.ZIP_DEFLATED)
sinfo = zipfp.getinfo('storeme')
dinfo = zipfp.getinfo('deflateme')
self.assertEqual(sinfo.compress_type, zipfile.ZIP_STORED)
self.assertEqual(dinfo.compress_type, zipfile.ZIP_DEFLATED)
|
'Check that trying to call write() on a readonly ZipFile object
raises a RuntimeError.'
| def test_write_to_readonly(self):
| with zipfile.ZipFile(TESTFN2, mode='w') as zipfp:
zipfp.writestr('somefile.txt', 'bogus')
with zipfile.ZipFile(TESTFN2, mode='r') as zipfp:
self.assertRaises(RuntimeError, zipfp.write, TESTFN)
|
'Check that the zipfile is closed after the \'with\' block.'
| def test_close(self):
| with zipfile.ZipFile(TESTFN2, 'w') as zipfp:
for (fpath, fdata) in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
self.assertTrue((zipfp.fp is not None), 'zipfp is not open')
self.assertTrue((zipfp.fp is None), 'zipfp is not closed')
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
self.assertTrue((zipfp.fp is not None), 'zipfp is not open')
self.assertTrue((zipfp.fp is None), 'zipfp is not closed')
|
'Check that the zipfile is closed if an exception is raised in the
\'with\' block.'
| def test_close_on_exception(self):
| with zipfile.ZipFile(TESTFN2, 'w') as zipfp:
for (fpath, fdata) in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
try:
with zipfile.ZipFile(TESTFN2, 'r') as zipfp2:
raise zipfile.BadZipfile()
except zipfile.BadZipfile:
self.assertTrue((zipfp2.fp is None), 'zipfp is not closed')
|
'Check that is_zipfile() correctly identifies non-zip files.'
| def test_is_zip_erroneous_file(self):
| with open(TESTFN, 'w') as fp:
fp.write('this is not a legal zip file\n')
chk = zipfile.is_zipfile(TESTFN)
self.assertFalse(chk)
with open(TESTFN, 'rb') as fp:
chk = zipfile.is_zipfile(fp)
self.assertTrue((not chk))
fp = StringIO()
fp.write('this is not a legal zip file\n')
chk = zipfile.is_zipfile(fp)
self.assertTrue((not chk))
fp.seek(0, 0)
chk = zipfile.is_zipfile(fp)
self.assertTrue((not chk))
|
'Check that zipfiles with missing bytes at the end raise BadZipFile.'
| def test_damaged_zipfile(self):
| fp = io.BytesIO()
with zipfile.ZipFile(fp, mode='w') as zipf:
zipf.writestr('foo.txt', 'O, for a Muse of Fire!')
zipfiledata = fp.getvalue()
for N in range(len(zipfiledata)):
fp = io.BytesIO(zipfiledata[:N])
self.assertRaises(zipfile.BadZipfile, zipfile.ZipFile, fp)
|
'Check that is_zipfile() correctly identifies zip files.'
| def test_is_zip_valid_file(self):
| with zipfile.ZipFile(TESTFN, mode='w') as zipf:
zipf.writestr('foo.txt', 'O, for a Muse of Fire!')
chk = zipfile.is_zipfile(TESTFN)
self.assertTrue(chk)
with open(TESTFN, 'rb') as fp:
chk = zipfile.is_zipfile(fp)
self.assertTrue(chk)
fp.seek(0, 0)
zip_contents = fp.read()
fp = StringIO()
fp.write(zip_contents)
chk = zipfile.is_zipfile(fp)
self.assertTrue(chk)
fp.seek(0, 0)
chk = zipfile.is_zipfile(fp)
self.assertTrue(chk)
|
'Verify that testzip() doesn\'t swallow inappropriate exceptions.'
| def test_closed_zip_raises_RuntimeError(self):
| data = StringIO()
with zipfile.ZipFile(data, mode='w') as zipf:
zipf.writestr('foo.txt', 'O, for a Muse of Fire!')
self.assertRaises(RuntimeError, zipf.read, 'foo.txt')
self.assertRaises(RuntimeError, zipf.open, 'foo.txt')
self.assertRaises(RuntimeError, zipf.testzip)
self.assertRaises(RuntimeError, zipf.writestr, 'bogus.txt', 'bogus')
with open(TESTFN, 'w') as fid:
fid.write('zipfile test data')
self.assertRaises(RuntimeError, zipf.write, TESTFN)
|
'Check that bad modes passed to ZipFile constructor are caught.'
| def test_bad_constructor_mode(self):
| self.assertRaises(RuntimeError, zipfile.ZipFile, TESTFN, 'q')
|
'Check that bad modes passed to ZipFile.open are caught.'
| def test_bad_open_mode(self):
| with zipfile.ZipFile(TESTFN, mode='w') as zipf:
zipf.writestr('foo.txt', 'O, for a Muse of Fire!')
with zipfile.ZipFile(TESTFN, mode='r') as zipf:
zipf.read('foo.txt')
self.assertRaises(RuntimeError, zipf.open, 'foo.txt', 'q')
|
'Check that calling read(0) on a ZipExtFile object returns an empty
string and doesn\'t advance file pointer.'
| def test_read0(self):
| with zipfile.ZipFile(TESTFN, mode='w') as zipf:
zipf.writestr('foo.txt', 'O, for a Muse of Fire!')
with zipf.open('foo.txt') as f:
for i in xrange(FIXEDTEST_SIZE):
self.assertEqual(f.read(0), '')
self.assertEqual(f.read(), 'O, for a Muse of Fire!')
|
'Check that attempting to call open() for an item that doesn\'t
exist in the archive raises a RuntimeError.'
| def test_open_non_existent_item(self):
| with zipfile.ZipFile(TESTFN, mode='w') as zipf:
self.assertRaises(KeyError, zipf.open, 'foo.txt', 'r')
|
'Check that bad compression methods passed to ZipFile.open are
caught.'
| def test_bad_compression_mode(self):
| self.assertRaises(RuntimeError, zipfile.ZipFile, TESTFN, 'w', (-1))
|
'Check that a filename containing a null byte is properly
terminated.'
| def test_null_byte_in_filename(self):
| with zipfile.ZipFile(TESTFN, mode='w') as zipf:
zipf.writestr('foo.txt\x00qqq', 'O, for a Muse of Fire!')
self.assertEqual(zipf.namelist(), ['foo.txt'])
|
'Check that ZIP internal structure sizes are calculated correctly.'
| def test_struct_sizes(self):
| self.assertEqual(zipfile.sizeEndCentDir, 22)
self.assertEqual(zipfile.sizeCentralDir, 46)
self.assertEqual(zipfile.sizeEndCentDir64, 56)
self.assertEqual(zipfile.sizeEndCentDir64Locator, 20)
|
'Check that comments on the archive are handled properly.'
| def test_comments(self):
| with zipfile.ZipFile(TESTFN, mode='w') as zipf:
self.assertEqual(zipf.comment, '')
zipf.writestr('foo.txt', 'O, for a Muse of Fire!')
with zipfile.ZipFile(TESTFN, mode='r') as zipf:
self.assertEqual(zipf.comment, '')
comment = 'Bravely taking to his feet, he beat a very brave retreat.'
with zipfile.ZipFile(TESTFN, mode='w') as zipf:
zipf.comment = comment
zipf.writestr('foo.txt', 'O, for a Muse of Fire!')
with zipfile.ZipFile(TESTFN, mode='r') as zipf:
self.assertEqual(zipf.comment, comment)
comment2 = ''.join([('%d' % ((i ** 3) % 10)) for i in xrange(((1 << 16) - 1))])
with zipfile.ZipFile(TESTFN, mode='w') as zipf:
zipf.comment = comment2
zipf.writestr('foo.txt', 'O, for a Muse of Fire!')
with zipfile.ZipFile(TESTFN, mode='r') as zipf:
self.assertEqual(zipf.comment, comment2)
with zipfile.ZipFile(TESTFN, mode='w') as zipf:
with check_warnings(('', UserWarning)):
zipf.comment = (comment2 + 'oops')
zipf.writestr('foo.txt', 'O, for a Muse of Fire!')
with zipfile.ZipFile(TESTFN, mode='r') as zipf:
self.assertEqual(zipf.comment, comment2)
|
'Tests that files with bad CRCs return their name from testzip.'
| def check_testzip_with_bad_crc(self, compression):
| zipdata = self.zips_with_bad_crc[compression]
with zipfile.ZipFile(io.BytesIO(zipdata), mode='r') as zipf:
self.assertEqual('afile', zipf.testzip())
|
'Tests that files with bad CRCs raise a BadZipfile exception when read.'
| def check_read_with_bad_crc(self, compression):
| zipdata = self.zips_with_bad_crc[compression]
with zipfile.ZipFile(io.BytesIO(zipdata), mode='r') as zipf:
self.assertRaises(zipfile.BadZipfile, zipf.read, 'afile')
with zipfile.ZipFile(io.BytesIO(zipdata), mode='r') as zipf:
with zipf.open('afile', 'r') as corrupt_file:
self.assertRaises(zipfile.BadZipfile, corrupt_file.read)
with zipfile.ZipFile(io.BytesIO(zipdata), mode='r') as zipf:
with zipf.open('afile', 'r') as corrupt_file:
corrupt_file.MIN_READ_SIZE = 2
with self.assertRaises(zipfile.BadZipfile):
while corrupt_file.read(2):
pass
|
'If an extra field in the header is less than 4 bytes, skip it.'
| def test_zipfile_with_short_extra_field(self):
| zipdata = 'PK\x03\x04\x14\x00\x00\x00\x00\x00\x93\x9b\xad@\x8b\x9e\xd9\xd3\x01\x00\x00\x00\x01\x00\x00\x00\x03\x00\x03\x00abc\x00\x00\x00APK\x01\x02\x14\x03\x14\x00\x00\x00\x00\x00\x93\x9b\xad@\x8b\x9e\xd9\xd3\x01\x00\x00\x00\x01\x00\x00\x00\x03\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa4\x81\x00\x00\x00\x00abc\x00\x00PK\x05\x06\x00\x00\x00\x00\x01\x00\x01\x003\x00\x00\x00%\x00\x00\x00\x00\x00'
with zipfile.ZipFile(io.BytesIO(zipdata), 'r') as zipf:
self.assertIsNone(zipf.testzip())
|
'$ matches the end of string, and just before the terminating'
| def test_dollar_matches_twice(self):
| pattern = re.compile('$')
self.assertEqual(pattern.sub('#', 'a\nb\n'), 'a\nb#\n#')
self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a\nb\nc#')
self.assertEqual(pattern.sub('#', '\n'), '#\n#')
pattern = re.compile('$', re.MULTILINE)
self.assertEqual(pattern.sub('#', 'a\nb\n'), 'a#\nb#\n#')
self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a#\nb#\nc#')
self.assertEqual(pattern.sub('#', '\n'), '#\n#')
|
'BaseHTTPServer method, overridden.'
| def get_request(self):
| (request, client_address) = self.socket.accept()
request.settimeout(10.0)
return (request, client_address)
|
'Stops the webserver if it\'s currently running.'
| def stop(self):
| self._stop = True
self.join()
|
'Performs digest authentication on the given HTTP request
handler. Returns True if authentication was successful, False
otherwise.
If no users have been set, then digest auth is effectively
disabled and this method will always return True.'
| def handle_request(self, request_handler):
| if (len(self._users) == 0):
return True
if ('Proxy-Authorization' not in request_handler.headers):
return self._return_auth_challenge(request_handler)
else:
auth_dict = self._create_auth_dict(request_handler.headers['Proxy-Authorization'])
if (auth_dict['username'] in self._users):
password = self._users[auth_dict['username']]
else:
return self._return_auth_challenge(request_handler)
if (not (auth_dict.get('nonce') in self._nonces)):
return self._return_auth_challenge(request_handler)
else:
self._nonces.remove(auth_dict['nonce'])
auth_validated = False
for path in [request_handler.path, request_handler.short_path]:
if self._validate_auth(auth_dict, password, request_handler.command, path):
auth_validated = True
if (not auth_validated):
return self._return_auth_challenge(request_handler)
return True
|
'Create a new DocTest containing the given examples. The
DocTest\'s globals are initialized with a copy of `globs`.'
| def __init__(self, examples, globs, name, filename, lineno, docstring):
| assert (not isinstance(examples, basestring)), 'DocTest no longer accepts str; use DocTestParser instead'
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
|
'Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.'
| def parse(self, string, name='<string>'):
| string = string.expandtabs()
min_indent = self._min_indent(string)
if (min_indent > 0):
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
(charno, lineno) = (0, 0)
for m in self._EXAMPLE_RE.finditer(string):
output.append(string[charno:m.start()])
lineno += string.count('\n', charno, m.start())
(source, options, want, exc_msg) = self._parse_example(m, name, lineno)
if (not self._IS_BLANK_OR_COMMENT(source)):
output.append(Example(source, want, exc_msg, lineno=lineno, indent=(min_indent + len(m.group('indent'))), options=options))
lineno += string.count('\n', m.start(), m.end())
charno = m.end()
output.append(string[charno:])
return output
|
'Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.'
| def get_doctest(self, string, globs, name, filename, lineno):
| return DocTest(self.get_examples(string, name), globs, name, filename, lineno, string)
|
'Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it\'s most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called "line 1" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.'
| def get_examples(self, string, name='<string>'):
| return [x for x in self.parse(string, name) if isinstance(x, Example)]
|
'Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example\'s source code (with prompts and indentation stripped);
and `want` is the example\'s expected output (with indentation
stripped).
`name` is the string\'s name, and `lineno` is the line number
where the example starts; both are used for error messages.'
| def _parse_example(self, m, name, lineno):
| indent = len(m.group('indent'))
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ((' ' * indent) + '.'), name, lineno)
source = '\n'.join([sl[(indent + 4):] for sl in source_lines])
want = m.group('want')
want_lines = want.split('\n')
if ((len(want_lines) > 1) and re.match(' *$', want_lines[(-1)])):
del want_lines[(-1)]
self._check_prefix(want_lines, (' ' * indent), name, (lineno + len(source_lines)))
want = '\n'.join([wl[indent:] for wl in want_lines])
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
options = self._find_options(source, name, lineno)
return (source, options, want, exc_msg)
|
'Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string\'s name, and `lineno` is the line number
where the example starts; both are used for error messages.'
| def _find_options(self, source, name, lineno):
| options = {}
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if ((option[0] not in '+-') or (option[1:] not in OPTIONFLAGS_BY_NAME)):
raise ValueError(('line %r of the doctest for %s has an invalid option: %r' % ((lineno + 1), name, option)))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if (options and self._IS_BLANK_OR_COMMENT(source)):
raise ValueError(('line %r of the doctest for %s has an option directive on a line with no example: %r' % (lineno, name, source)))
return options
|
'Return the minimum indentation of any non-blank line in `s`'
| def _min_indent(self, s):
| indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if (len(indents) > 0):
return min(indents)
else:
return 0
|
'Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.'
| def _check_prompt_blank(self, lines, indent, name, lineno):
| for (i, line) in enumerate(lines):
if ((len(line) >= (indent + 4)) and (line[(indent + 3)] != ' ')):
raise ValueError(('line %r of the docstring for %s lacks blank after %s: %r' % (((lineno + i) + 1), name, line[indent:(indent + 3)], line)))
|
'Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.'
| def _check_prefix(self, lines, prefix, name, lineno):
| for (i, line) in enumerate(lines):
if (line and (not line.startswith(prefix))):
raise ValueError(('line %r of the docstring for %s has inconsistent leading whitespace: %r' % (((lineno + i) + 1), name, line)))
|
'Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.'
| def __init__(self, verbose=False, parser=DocTestParser(), recurse=True, exclude_empty=True):
| self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
|
'Return a list of the DocTests that are defined by the given
object\'s docstring, or by any of its contained objects\'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object\'s module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module\'s `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.'
| def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
| if (name is None):
name = getattr(obj, '__name__', None)
if (name is None):
raise ValueError(("DocTestFinder.find: name must be given when obj.__name__ doesn't exist: %r" % (type(obj),)))
if (module is False):
module = None
elif (module is None):
module = inspect.getmodule(obj)
try:
file = (inspect.getsourcefile(obj) or inspect.getfile(obj))
if (module is not None):
source_lines = linecache.getlines(file, module.__dict__)
else:
source_lines = linecache.getlines(file)
if (not source_lines):
source_lines = None
except TypeError:
source_lines = None
if (globs is None):
if (module is None):
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if (extraglobs is not None):
globs.update(extraglobs)
if ('__name__' not in globs):
globs['__name__'] = '__main__'
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
tests.sort()
return tests
|
'Return true if the given object is defined in the given
module.'
| def _from_module(self, module, object):
| if (module is None):
return True
elif (inspect.getmodule(object) is not None):
return (module is inspect.getmodule(object))
elif inspect.isfunction(object):
return (module.__dict__ is object.func_globals)
elif inspect.isclass(object):
return (module.__name__ == object.__module__)
elif hasattr(object, '__module__'):
return (module.__name__ == object.__module__)
elif isinstance(object, property):
return True
else:
raise ValueError('object must be a class or function')
|
'Find tests for the given object and any contained objects, and
add them to `tests`.'
| def _find(self, tests, obj, name, module, source_lines, globs, seen):
| if self._verbose:
print ('Finding tests in %s' % name)
if (id(obj) in seen):
return
seen[id(obj)] = 1
test = self._get_test(obj, name, module, globs, source_lines)
if (test is not None):
tests.append(test)
if (inspect.ismodule(obj) and self._recurse):
for (valname, val) in obj.__dict__.items():
valname = ('%s.%s' % (name, valname))
if ((inspect.isfunction(val) or inspect.isclass(val)) and self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines, globs, seen)
if (inspect.ismodule(obj) and self._recurse):
for (valname, val) in getattr(obj, '__test__', {}).items():
if (not isinstance(valname, basestring)):
raise ValueError(('DocTestFinder.find: __test__ keys must be strings: %r' % (type(valname),)))
if (not (inspect.isfunction(val) or inspect.isclass(val) or inspect.ismethod(val) or inspect.ismodule(val) or isinstance(val, basestring))):
raise ValueError(('DocTestFinder.find: __test__ values must be strings, functions, methods, classes, or modules: %r' % (type(val),)))
valname = ('%s.__test__.%s' % (name, valname))
self._find(tests, val, valname, module, source_lines, globs, seen)
if (inspect.isclass(obj) and self._recurse):
for (valname, val) in obj.__dict__.items():
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).im_func
if ((inspect.isfunction(val) or inspect.isclass(val) or isinstance(val, property)) and self._from_module(module, val)):
valname = ('%s.%s' % (name, valname))
self._find(tests, val, valname, module, source_lines, globs, seen)
|
'Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.'
| def _get_test(self, obj, name, module, globs, source_lines):
| if isinstance(obj, basestring):
docstring = obj
else:
try:
if (obj.__doc__ is None):
docstring = ''
else:
docstring = obj.__doc__
if (not isinstance(docstring, basestring)):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
lineno = self._find_lineno(obj, source_lines)
if (self._exclude_empty and (not docstring)):
return None
if (module is None):
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if (filename[(-4):] in ('.pyc', '.pyo')):
filename = filename[:(-1)]
return self._parser.get_doctest(docstring, globs, name, filename, lineno)
|
'Return a line number of the given object\'s docstring. Note:
this method assumes that the object has a docstring.'
| def _find_lineno(self, obj, source_lines):
| lineno = None
if inspect.ismodule(obj):
lineno = 0
if inspect.isclass(obj):
if (source_lines is None):
return None
pat = re.compile(('^\\s*class\\s*%s\\b' % getattr(obj, '__name__', '-')))
for (i, line) in enumerate(source_lines):
if pat.match(line):
lineno = i
break
if inspect.ismethod(obj):
obj = obj.im_func
if inspect.isfunction(obj):
obj = obj.func_code
if inspect.istraceback(obj):
obj = obj.tb_frame
if inspect.isframe(obj):
obj = obj.f_code
if inspect.iscode(obj):
lineno = (getattr(obj, 'co_firstlineno', None) - 1)
if (lineno is not None):
if (source_lines is None):
return (lineno + 1)
pat = re.compile('(^|.*:)\\s*\\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
return None
|
'Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg \'verbose\' prints lots of stuff if true,
only failures if false; by default, it\'s true iff \'-v\' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.'
| def __init__(self, checker=None, verbose=None, optionflags=0):
| self._checker = (checker or OutputChecker())
if (verbose is None):
verbose = ('-v' in sys.argv)
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
self.tries = 0
self.failures = 0
self._name2ft = {}
self._fakeout = _SpoofOut()
|
'Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)'
| def report_start(self, out, test, example):
| if self._verbose:
if example.want:
out(((('Trying:\n' + _indent(example.source)) + 'Expecting:\n') + _indent(example.want)))
else:
out((('Trying:\n' + _indent(example.source)) + 'Expecting nothing\n'))
|
'Report that the given example ran successfully. (Only
displays a message if verbose=True)'
| def report_success(self, out, test, example, got):
| if self._verbose:
out('ok\n')
|
'Report that the given example failed.'
| def report_failure(self, out, test, example, got):
| out((self._failure_header(test, example) + self._checker.output_difference(example, got, self.optionflags)))
|
'Report that the given example raised an unexpected exception.'
| def report_unexpected_exception(self, out, test, example, exc_info):
| out(((self._failure_header(test, example) + 'Exception raised:\n') + _indent(_exception_traceback(exc_info))))
|
'Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.'
| def __run(self, test, compileflags, out):
| failures = tries = 0
original_optionflags = self.optionflags
(SUCCESS, FAILURE, BOOM) = range(3)
check = self._checker.check_output
for (examplenum, example) in enumerate(test.examples):
quiet = ((self.optionflags & REPORT_ONLY_FIRST_FAILURE) and (failures > 0))
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= (~ optionflag)
if (self.optionflags & SKIP):
continue
tries += 1
if (not quiet):
self.report_start(out, test, example)
filename = ('<doctest %s[%d]>' % (test.name, examplenum))
try:
exec compile(example.source, filename, 'single', compileflags, 1) in test.globs
self.debugger.set_continue()
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue()
got = self._fakeout.getvalue()
self._fakeout.truncate(0)
outcome = FAILURE
if (exception is None):
if check(example.want, got, self.optionflags):
outcome = SUCCESS
else:
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(*exc_info[:2])[(-1)]
if (not quiet):
got += _exception_traceback(exc_info)
if (example.exc_msg is None):
outcome = BOOM
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
elif (self.optionflags & IGNORE_EXCEPTION_DETAIL):
if check(_strip_exception_details(example.exc_msg), _strip_exception_details(exc_msg), self.optionflags):
outcome = SUCCESS
if (outcome is SUCCESS):
if (not quiet):
self.report_success(out, test, example, got)
elif (outcome is FAILURE):
if (not quiet):
self.report_failure(out, test, example, got)
failures += 1
elif (outcome is BOOM):
if (not quiet):
self.report_unexpected_exception(out, test, example, exc_info)
failures += 1
else:
assert False, ('unknown outcome', outcome)
self.optionflags = original_optionflags
self.__record_outcome(test, failures, tries)
return TestResults(failures, tries)
|
'Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.'
| def __record_outcome(self, test, f, t):
| (f2, t2) = self._name2ft.get(test.name, (0, 0))
self._name2ft[test.name] = ((f + f2), (t + t2))
self.failures += f
self.tries += t
|
'Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.'
| def run(self, test, compileflags=None, out=None, clear_globs=True):
| self.test = test
if (compileflags is None):
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if (out is None):
out = save_stdout.write
sys.stdout = self._fakeout
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
save_displayhook = sys.displayhook
sys.displayhook = sys.__displayhook__
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
sys.displayhook = save_displayhook
if clear_globs:
test.globs.clear()
|
'Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner\'s verbosity is used.'
| def summarize(self, verbose=None):
| if (verbose is None):
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
(name, (f, t)) = x
assert (f <= t)
totalt += t
totalf += f
if (t == 0):
notests.append(name)
elif (f == 0):
passed.append((name, t))
else:
failed.append(x)
if verbose:
if notests:
print len(notests), 'items had no tests:'
notests.sort()
for thing in notests:
print ' ', thing
if passed:
print len(passed), 'items passed all tests:'
passed.sort()
for (thing, count) in passed:
print (' %3d tests in %s' % (count, thing))
if failed:
print self.DIVIDER
print len(failed), 'items had failures:'
failed.sort()
for (thing, (f, t)) in failed:
print (' %3d of %3d in %s' % (f, t, thing))
if verbose:
print totalt, 'tests in', len(self._name2ft), 'items.'
print (totalt - totalf), 'passed and', totalf, 'failed.'
if totalf:
print '***Test Failed***', totalf, 'failures.'
elif verbose:
print 'Test passed.'
return TestResults(totalf, totalt)
|
'Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.'
| def check_output(self, want, got, optionflags):
| if (got == want):
return True
if (not (optionflags & DONT_ACCEPT_TRUE_FOR_1)):
if ((got, want) == ('True\n', '1\n')):
return True
if ((got, want) == ('False\n', '0\n')):
return True
if (not (optionflags & DONT_ACCEPT_BLANKLINE)):
want = re.sub(('(?m)^%s\\s*?$' % re.escape(BLANKLINE_MARKER)), '', want)
got = re.sub('(?m)^\\s*?$', '', got)
if (got == want):
return True
if (optionflags & NORMALIZE_WHITESPACE):
got = ' '.join(got.split())
want = ' '.join(want.split())
if (got == want):
return True
if (optionflags & ELLIPSIS):
if _ellipsis_match(want, got):
return True
return False
|
'Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.'
| def output_difference(self, example, got, optionflags):
| want = example.want
if (not (optionflags & DONT_ACCEPT_BLANKLINE)):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
if self._do_a_fancy_diff(want, got, optionflags):
want_lines = want.splitlines(True)
got_lines = got.splitlines(True)
if (optionflags & REPORT_UDIFF):
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:]
kind = 'unified diff with -expected +actual'
elif (optionflags & REPORT_CDIFF):
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:]
kind = 'context diff with expected followed by actual'
elif (optionflags & REPORT_NDIFF):
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
diff = [(line.rstrip() + '\n') for line in diff]
return (('Differences (%s):\n' % kind) + _indent(''.join(diff)))
if (want and got):
return ('Expected:\n%sGot:\n%s' % (_indent(want), _indent(got)))
elif want:
return ('Expected:\n%sGot nothing\n' % _indent(want))
elif got:
return ('Expected nothing\nGot:\n%s' % _indent(got))
else:
return 'Expected nothing\nGot nothing\n'
|
'Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexpected
exception:
>>> test = DocTestParser().get_doctest(\'>>> raise KeyError\n42\',
... {}, \'foo\', \'foo.py\', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException, failure:
... pass
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
\'42\n\'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
KeyError
If the output doesn\'t match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest(\'\'\'
... >>> x = 1
... >>> x
... 2
... \'\'\', {}, \'foo\', \'foo.py\', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
\'2\n\'
and the actual output:
>>> failure.got
\'1\n\''
| def debug(self):
| self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags, checker=self._dt_checker, verbose=False)
runner.run(self._dt_test, clear_globs=False)
self.tearDown()
|
'val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print t.get()
123'
| def __init__(self, val):
| self.val = val
|
'square() -> square TestClass\'s associated value
>>> _TestClass(13).square().get()
169'
| def square(self):
| self.val = (self.val ** 2)
return self
|
'get() -> return TestClass\'s associated value.
>>> x = _TestClass(-42)
>>> print x.get()
-42'
| def get(self):
| return self.val
|
'Many methods we can just pass through to the DB object.
(See below)'
| def __getattr__(self, name):
| return getattr(self.db, name)
|
'Some methods we can just pass through to the cursor object. (See below)'
| def __getattr__(self, name):
| return getattr(self.dbc, name)
|
'bsdTableDB(filename, dbhome, create=0, truncate=0, mode=0600)
Open database name in the dbhome Berkeley DB directory.
Use keyword arguments when calling this constructor.'
| def __init__(self, filename, dbhome, create=0, truncate=0, mode=384, recover=0, dbflags=0):
| self.db = None
myflags = db.DB_THREAD
if create:
myflags |= db.DB_CREATE
flagsforenv = ((((db.DB_INIT_MPOOL | db.DB_INIT_LOCK) | db.DB_INIT_LOG) | db.DB_INIT_TXN) | dbflags)
try:
dbflags |= db.DB_AUTO_COMMIT
except AttributeError:
pass
if recover:
flagsforenv = (flagsforenv | db.DB_RECOVER)
self.env = db.DBEnv()
self.env.set_lk_detect(db.DB_LOCK_DEFAULT)
self.env.open(dbhome, (myflags | flagsforenv))
if truncate:
myflags |= db.DB_TRUNCATE
self.db = db.DB(self.env)
self.db.set_get_returns_none(1)
self.db.set_flags(db.DB_DUP)
self.db.open(filename, db.DB_BTREE, (dbflags | myflags), mode)
self.dbfilename = filename
if (sys.version_info[0] >= 3):
class cursor_py3k(object, ):
def __init__(self, dbcursor):
self._dbcursor = dbcursor
def close(self):
return self._dbcursor.close()
def set_range(self, search):
v = self._dbcursor.set_range(bytes(search, 'iso8859-1'))
if (v is not None):
v = (v[0].decode('iso8859-1'), v[1].decode('iso8859-1'))
return v
def __next__(self):
v = getattr(self._dbcursor, 'next')()
if (v is not None):
v = (v[0].decode('iso8859-1'), v[1].decode('iso8859-1'))
return v
class db_py3k(object, ):
def __init__(self, db):
self._db = db
def cursor(self, txn=None):
return cursor_py3k(self._db.cursor(txn=txn))
def has_key(self, key, txn=None):
return getattr(self._db, 'has_key')(bytes(key, 'iso8859-1'), txn=txn)
def put(self, key, value, flags=0, txn=None):
key = bytes(key, 'iso8859-1')
if (value is not None):
value = bytes(value, 'iso8859-1')
return self._db.put(key, value, flags=flags, txn=txn)
def put_bytes(self, key, value, txn=None):
key = bytes(key, 'iso8859-1')
return self._db.put(key, value, txn=txn)
def get(self, key, txn=None, flags=0):
key = bytes(key, 'iso8859-1')
v = self._db.get(key, txn=txn, flags=flags)
if (v is not None):
v = v.decode('iso8859-1')
return v
def get_bytes(self, key, txn=None, flags=0):
key = bytes(key, 'iso8859-1')
return self._db.get(key, txn=txn, flags=flags)
def delete(self, key, txn=None):
key = bytes(key, 'iso8859-1')
return self._db.delete(key, txn=txn)
def close(self):
return self._db.close()
self.db = db_py3k(self.db)
else:
pass
txn = self.env.txn_begin()
try:
if (not getattr(self.db, 'has_key')(_table_names_key, txn)):
getattr(self.db, 'put_bytes', self.db.put)(_table_names_key, pickle.dumps([], 1), txn=txn)
except:
txn.abort()
raise
else:
txn.commit()
self.__tablecolumns = {}
|
'Print the database to stdout for debugging'
| def _db_print(self):
| print '******** Printing raw database for debugging ********'
cur = self.db.cursor()
try:
(key, data) = cur.first()
while 1:
print repr({key: data})
next = cur.next()
if next:
(key, data) = next
else:
cur.close()
return
except db.DBNotFoundError:
cur.close()
|
'CreateTable(table, columns) - Create a new table in the database.
raises TableDBError if it already exists or for other DB errors.'
| def CreateTable(self, table, columns):
| assert isinstance(columns, list)
txn = None
try:
if contains_metastrings(table):
raise ValueError('bad table name: contains reserved metastrings')
for column in columns:
if contains_metastrings(column):
raise ValueError('bad column name: contains reserved metastrings')
columnlist_key = _columns_key(table)
if getattr(self.db, 'has_key')(columnlist_key):
raise TableAlreadyExists, 'table already exists'
txn = self.env.txn_begin()
getattr(self.db, 'put_bytes', self.db.put)(columnlist_key, pickle.dumps(columns, 1), txn=txn)
tablelist = pickle.loads(getattr(self.db, 'get_bytes', self.db.get)(_table_names_key, txn=txn, flags=db.DB_RMW))
tablelist.append(table)
self.db.delete(_table_names_key, txn=txn)
getattr(self.db, 'put_bytes', self.db.put)(_table_names_key, pickle.dumps(tablelist, 1), txn=txn)
txn.commit()
txn = None
except db.DBError as dberror:
if txn:
txn.abort()
if (sys.version_info < (2, 6)):
raise TableDBError, dberror[1]
else:
raise TableDBError, dberror.args[1]
|
'Return a list of columns in the given table.
[] if the table doesn\'t exist.'
| def ListTableColumns(self, table):
| assert isinstance(table, str)
if contains_metastrings(table):
raise ValueError, 'bad table name: contains reserved metastrings'
columnlist_key = _columns_key(table)
if (not getattr(self.db, 'has_key')(columnlist_key)):
return []
pickledcolumnlist = getattr(self.db, 'get_bytes', self.db.get)(columnlist_key)
if pickledcolumnlist:
return pickle.loads(pickledcolumnlist)
else:
return []
|
'Return a list of tables in this database.'
| def ListTables(self):
| pickledtablelist = self.db.get_get(_table_names_key)
if pickledtablelist:
return pickle.loads(pickledtablelist)
else:
return []
|
'CreateOrExtendTable(table, columns)
Create a new table in the database.
If a table of this name already exists, extend it to have any
additional columns present in the given list as well as
all of its current columns.'
| def CreateOrExtendTable(self, table, columns):
| assert isinstance(columns, list)
try:
self.CreateTable(table, columns)
except TableAlreadyExists:
txn = None
try:
columnlist_key = _columns_key(table)
txn = self.env.txn_begin()
oldcolumnlist = pickle.loads(getattr(self.db, 'get_bytes', self.db.get)(columnlist_key, txn=txn, flags=db.DB_RMW))
oldcolumnhash = {}
for c in oldcolumnlist:
oldcolumnhash[c] = c
newcolumnlist = copy.copy(oldcolumnlist)
for c in columns:
if (not (c in oldcolumnhash)):
newcolumnlist.append(c)
if (newcolumnlist != oldcolumnlist):
self.db.delete(columnlist_key, txn=txn)
getattr(self.db, 'put_bytes', self.db.put)(columnlist_key, pickle.dumps(newcolumnlist, 1), txn=txn)
txn.commit()
txn = None
self.__load_column_info(table)
except db.DBError as dberror:
if txn:
txn.abort()
if (sys.version_info < (2, 6)):
raise TableDBError, dberror[1]
else:
raise TableDBError, dberror.args[1]
|
'initialize the self.__tablecolumns dict'
| def __load_column_info(self, table):
| try:
tcolpickles = getattr(self.db, 'get_bytes', self.db.get)(_columns_key(table))
except db.DBNotFoundError:
raise TableDBError, ('unknown table: %r' % (table,))
if (not tcolpickles):
raise TableDBError, ('unknown table: %r' % (table,))
self.__tablecolumns[table] = pickle.loads(tcolpickles)
|
'Create a new unique row identifier'
| def __new_rowid(self, table, txn):
| unique = 0
while (not unique):
blist = []
for x in xrange(_rowid_str_len):
blist.append(random.randint(0, 255))
newid = struct.pack(('B' * _rowid_str_len), *blist)
if (sys.version_info[0] >= 3):
newid = newid.decode('iso8859-1')
try:
self.db.put(_rowid_key(table, newid), None, txn=txn, flags=db.DB_NOOVERWRITE)
except db.DBKeyExistError:
pass
else:
unique = 1
return newid
|
'Insert(table, datadict) - Insert a new row into the table
using the keys+values from rowdict as the column values.'
| def Insert(self, table, rowdict):
| txn = None
try:
if (not getattr(self.db, 'has_key')(_columns_key(table))):
raise TableDBError, 'unknown table'
if (not (table in self.__tablecolumns)):
self.__load_column_info(table)
for column in rowdict.keys():
if (not self.__tablecolumns[table].count(column)):
raise TableDBError, ('unknown column: %r' % (column,))
txn = self.env.txn_begin()
rowid = self.__new_rowid(table, txn=txn)
for (column, dataitem) in rowdict.items():
self.db.put(_data_key(table, column, rowid), dataitem, txn=txn)
txn.commit()
txn = None
except db.DBError as dberror:
info = sys.exc_info()
if txn:
txn.abort()
self.db.delete(_rowid_key(table, rowid))
if (sys.version_info < (2, 6)):
raise TableDBError, dberror[1], info[2]
else:
raise TableDBError, dberror.args[1], info[2]
|
'Modify(table, conditions={}, mappings={}) - Modify items in rows matching \'conditions\' using mapping functions in \'mappings\'
* table - the table name
* conditions - a dictionary keyed on column names containing
a condition callable expecting the data string as an
argument and returning a boolean.
* mappings - a dictionary keyed on column names containing a
condition callable expecting the data string as an argument and
returning the new string for that column.'
| def Modify(self, table, conditions={}, mappings={}):
| try:
matching_rowids = self.__Select(table, [], conditions)
columns = mappings.keys()
for rowid in matching_rowids.keys():
txn = None
try:
for column in columns:
txn = self.env.txn_begin()
try:
dataitem = self.db.get(_data_key(table, column, rowid), txn=txn)
self.db.delete(_data_key(table, column, rowid), txn=txn)
except db.DBNotFoundError:
dataitem = None
dataitem = mappings[column](dataitem)
if (dataitem is not None):
self.db.put(_data_key(table, column, rowid), dataitem, txn=txn)
txn.commit()
txn = None
except:
if txn:
txn.abort()
raise
except db.DBError as dberror:
if (sys.version_info < (2, 6)):
raise TableDBError, dberror[1]
else:
raise TableDBError, dberror.args[1]
|
'Delete(table, conditions) - Delete items matching the given
conditions from the table.
* conditions - a dictionary keyed on column names containing
condition functions expecting the data string as an
argument and returning a boolean.'
| def Delete(self, table, conditions={}):
| try:
matching_rowids = self.__Select(table, [], conditions)
columns = self.__tablecolumns[table]
for rowid in matching_rowids.keys():
txn = None
try:
txn = self.env.txn_begin()
for column in columns:
try:
self.db.delete(_data_key(table, column, rowid), txn=txn)
except db.DBNotFoundError:
pass
try:
self.db.delete(_rowid_key(table, rowid), txn=txn)
except db.DBNotFoundError:
pass
txn.commit()
txn = None
except db.DBError as dberror:
if txn:
txn.abort()
raise
except db.DBError as dberror:
if (sys.version_info < (2, 6)):
raise TableDBError, dberror[1]
else:
raise TableDBError, dberror.args[1]
|
'Select(table, columns, conditions) - retrieve specific row data
Returns a list of row column->value mapping dictionaries.
* columns - a list of which column data to return. If
columns is None, all columns will be returned.
* conditions - a dictionary keyed on column names
containing callable conditions expecting the data string as an
argument and returning a boolean.'
| def Select(self, table, columns, conditions={}):
| try:
if (not (table in self.__tablecolumns)):
self.__load_column_info(table)
if (columns is None):
columns = self.__tablecolumns[table]
matching_rowids = self.__Select(table, columns, conditions)
except db.DBError as dberror:
if (sys.version_info < (2, 6)):
raise TableDBError, dberror[1]
else:
raise TableDBError, dberror.args[1]
return matching_rowids.values()
|
'__Select() - Used to implement Select and Delete (above)
Returns a dictionary keyed on rowids containing dicts
holding the row data for columns listed in the columns param
that match the given conditions.
* conditions is a dictionary keyed on column names
containing callable conditions expecting the data string as an
argument and returning a boolean.'
| def __Select(self, table, columns, conditions):
| if (not (table in self.__tablecolumns)):
self.__load_column_info(table)
if (columns is None):
columns = self.tablecolumns[table]
for column in (columns + conditions.keys()):
if (not self.__tablecolumns[table].count(column)):
raise TableDBError, ('unknown column: %r' % (column,))
matching_rowids = {}
rejected_rowids = {}
def cmp_conditions(atuple, btuple):
a = atuple[1]
b = btuple[1]
if (type(a) is type(b)):
def cmp(a, b):
if (a == b):
return 0
if (a < b):
return (-1)
return 1
if (isinstance(a, PrefixCond) and isinstance(b, PrefixCond)):
return cmp(len(b.prefix), len(a.prefix))
if (isinstance(a, LikeCond) and isinstance(b, LikeCond)):
return cmp(len(b.likestr), len(a.likestr))
return 0
if isinstance(a, ExactCond):
return (-1)
if isinstance(b, ExactCond):
return 1
if isinstance(a, PrefixCond):
return (-1)
if isinstance(b, PrefixCond):
return 1
return 0
if (sys.version_info < (2, 6)):
conditionlist = conditions.items()
conditionlist.sort(cmp_conditions)
else:
conditionlist = []
for i in conditions.items():
for (j, k) in enumerate(conditionlist):
r = cmp_conditions(k, i)
if (r == 1):
conditionlist.insert(j, i)
break
else:
conditionlist.append(i)
cur = self.db.cursor()
column_num = (-1)
for (column, condition) in conditionlist:
column_num = (column_num + 1)
searchkey = _search_col_data_key(table, column)
if (column in columns):
savethiscolumndata = 1
else:
savethiscolumndata = 0
try:
(key, data) = cur.set_range(searchkey)
while (key[:len(searchkey)] == searchkey):
rowid = key[(- _rowid_str_len):]
if (not (rowid in rejected_rowids)):
if ((not condition) or condition(data)):
if (not (rowid in matching_rowids)):
matching_rowids[rowid] = {}
if savethiscolumndata:
matching_rowids[rowid][column] = data
else:
if (rowid in matching_rowids):
del matching_rowids[rowid]
rejected_rowids[rowid] = rowid
(key, data) = cur.next()
except db.DBError as dberror:
if (dberror.args[0] != db.DB_NOTFOUND):
raise
continue
cur.close()
del rejected_rowids
if (len(columns) > 0):
for (rowid, rowdata) in matching_rowids.items():
for column in columns:
if (column in rowdata):
continue
try:
rowdata[column] = self.db.get(_data_key(table, column, rowid))
except db.DBError as dberror:
if (sys.version_info < (2, 6)):
if (dberror[0] != db.DB_NOTFOUND):
raise
elif (dberror.args[0] != db.DB_NOTFOUND):
raise
rowdata[column] = None
return matching_rowids
|
'Remove an entire table from the database'
| def Drop(self, table):
| txn = None
try:
txn = self.env.txn_begin()
self.db.delete(_columns_key(table), txn=txn)
cur = self.db.cursor(txn)
table_key = _search_all_data_key(table)
while 1:
try:
(key, data) = cur.set_range(table_key)
except db.DBNotFoundError:
break
if (key[:len(table_key)] != table_key):
break
cur.delete()
table_key = _search_rowid_key(table)
while 1:
try:
(key, data) = cur.set_range(table_key)
except db.DBNotFoundError:
break
if (key[:len(table_key)] != table_key):
break
cur.delete()
cur.close()
tablelist = pickle.loads(getattr(self.db, 'get_bytes', self.db.get)(_table_names_key, txn=txn, flags=db.DB_RMW))
try:
tablelist.remove(table)
except ValueError:
pass
self.db.delete(_table_names_key, txn=txn)
getattr(self.db, 'put_bytes', self.db.put)(_table_names_key, pickle.dumps(tablelist, 1), txn=txn)
txn.commit()
txn = None
if (table in self.__tablecolumns):
del self.__tablecolumns[table]
except db.DBError as dberror:
if txn:
txn.abort()
raise TableDBError(dberror.args[1])
|
'Turn key into an appropriate key type for this db'
| def mk(self, key):
| if (sys.version_info[0] < 3):
return key
else:
return bytes(key, 'iso8859-1')
|
'A Recno file that is given a "backing source file" is essentially a
simple ASCII file. Normally each record is delimited by
and so is
just a line in the file, but you can set a different record delimiter
if needed.'
| def test02_WithSource(self):
| homeDir = get_new_environment_path()
self.homeDir = homeDir
source = os.path.join(homeDir, 'test_recno.txt')
if (not os.path.isdir(homeDir)):
os.mkdir(homeDir)
f = open(source, 'w')
f.close()
d = db.DB()
d.set_re_delim(10)
d.set_re_delim('\n')
d.set_re_source(source)
d.open(self.filename, db.DB_RECNO, db.DB_CREATE)
data = 'The quick brown fox jumped over the lazy dog'.split()
for datum in data:
d.append(datum)
d.sync()
d.close()
f = open(source, 'r')
text = f.read()
f.close()
text = text.strip()
if verbose:
print text
print data
print text.split('\n')
self.assertEqual(text.split('\n'), data)
d = db.DB()
d.set_re_source(source)
d.open(self.filename, db.DB_RECNO)
d[3] = 'reddish-brown'
d[8] = 'comatose'
d.sync()
d.close()
f = open(source, 'r')
text = f.read()
f.close()
text = text.strip()
if verbose:
print text
print text.split('\n')
self.assertEqual(text.split('\n'), 'The quick reddish-brown fox jumped over the comatose dog'.split())
|
'Call method() while capturing sys.stderr output internally and
call self.fail() if successRe.search() does not match the stderr
output. This is used to test for uncatchable exceptions.'
| def verifyStderr(self, method, successRe):
| stdErr = sys.stderr
sys.stderr = StringIO()
try:
method()
finally:
temp = sys.stderr
sys.stderr = stdErr
errorOut = temp.getvalue()
if (not successRe.search(errorOut)):
self.fail(('unexpected stderr output:\n' + errorOut))
if (sys.version_info < (3, 0)):
sys.exc_traceback = sys.last_traceback = None
|
'Call method() while capturing sys.stderr output internally and
call self.fail() if successRe.search() does not match the stderr
output. This is used to test for uncatchable exceptions.'
| def verifyStderr(self, method, successRe):
| stdErr = sys.stderr
sys.stderr = StringIO()
try:
method()
finally:
temp = sys.stderr
sys.stderr = stdErr
errorOut = temp.getvalue()
if (not successRe.search(errorOut)):
self.fail(('unexpected stderr output:\n' + errorOut))
if (sys.version_info < (3, 0)):
sys.exc_traceback = sys.last_traceback = None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.