desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Ensure select_related together with only on a proxy model behaves
as expected. See #17876.'
| def test_defer_proxy(self):
| related = Secondary.objects.create(first='x1', second='x2')
ChildProxy.objects.create(name='p1', value='xx', related=related)
children = ChildProxy.objects.all().select_related().only('id', 'name')
self.assertEqual(len(children), 1)
child = children[0]
self.assert_delayed(child, 2)
self.assertEqual(child.name, 'p1')
self.assertEqual(child.value, 'xx')
|
'When an inherited model is fetched from the DB, its PK is also fetched.
When getting the PK of the parent model it is useful to use the already
fetched parent model PK if it happens to be available. Tests that this
is done.'
| def test_defer_inheritance_pk_chaining(self):
| s1 = Secondary.objects.create(first='x1', second='y1')
bc = BigChild.objects.create(name='b1', value='foo', related=s1, other='bar')
bc_deferred = BigChild.objects.only('name').get(pk=bc.pk)
with self.assertNumQueries(0):
bc_deferred.id
self.assertEqual(bc_deferred.pk, bc_deferred.id)
|
'Regression test for #6045: references to other models can be unicode
strings, providing they are directly convertible to ASCII.'
| def test_m2m_with_unicode_reference(self):
| m1 = UnicodeReferenceModel.objects.create()
m2 = UnicodeReferenceModel.objects.create()
m2.others.add(m1)
m2.save()
list(m2.others.all())
|
'Ensure that a lookup query containing non-fields raises the proper
exception.'
| def test_nonfield_lookups(self):
| with self.assertRaises(FieldError):
Article.objects.filter(headline__blahblah=99)
with self.assertRaises(FieldError):
Article.objects.filter(headline__blahblah__exact=99)
with self.assertRaises(FieldError):
Article.objects.filter(blahblah=99)
|
'Ensure that genuine field names don\'t collide with built-in lookup
types (\'year\', \'gt\', \'range\', \'in\' etc.).
Refs #11670.'
| def test_lookup_collision(self):
| season_2009 = Season.objects.create(year=2009, gt=111)
season_2009.games.create(home=u'Houston Astros', away=u'St. Louis Cardinals')
season_2010 = Season.objects.create(year=2010, gt=222)
season_2010.games.create(home=u'Houston Astros', away=u'Chicago Cubs')
season_2010.games.create(home=u'Houston Astros', away=u'Milwaukee Brewers')
season_2010.games.create(home=u'Houston Astros', away=u'St. Louis Cardinals')
season_2011 = Season.objects.create(year=2011, gt=333)
season_2011.games.create(home=u'Houston Astros', away=u'St. Louis Cardinals')
season_2011.games.create(home=u'Houston Astros', away=u'Milwaukee Brewers')
hunter_pence = Player.objects.create(name=u'Hunter Pence')
hunter_pence.games = Game.objects.filter(season__year__in=[2009, 2010])
pudge = Player.objects.create(name=u'Ivan Rodriquez')
pudge.games = Game.objects.filter(season__year=2009)
pedro_feliz = Player.objects.create(name=u'Pedro Feliz')
pedro_feliz.games = Game.objects.filter(season__year__in=[2011])
johnson = Player.objects.create(name=u'Johnson')
johnson.games = Game.objects.filter(season__year__in=[2011])
self.assertEqual(Game.objects.filter(season__year=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__year__exact=2010).count(), 3)
self.assertEqual(Game.objects.filter(season__gt=222).count(), 3)
self.assertEqual(Game.objects.filter(season__gt__exact=222).count(), 3)
self.assertEqual(Game.objects.filter(season__year=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__year__exact=2011).count(), 2)
self.assertEqual(Game.objects.filter(season__gt=333).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__exact=333).count(), 2)
self.assertEqual(Game.objects.filter(season__year__gt=2010).count(), 2)
self.assertEqual(Game.objects.filter(season__gt__gt=222).count(), 2)
self.assertEqual(Game.objects.filter(season__year__in=[2010, 2011]).count(), 5)
self.assertEqual(Game.objects.filter(season__year__gt=2009).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__in=[222, 333]).count(), 5)
self.assertEqual(Game.objects.filter(season__gt__gt=111).count(), 5)
self.assertEqual(Player.objects.filter(games__season__year=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2009).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=111).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__exact=111).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__year__exact=2010).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt=222).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__gt__exact=222).distinct().count(), 1)
self.assertEqual(Player.objects.filter(games__season__year=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__exact=2011).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt=333).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__year__gt=2010).distinct().count(), 2)
self.assertEqual(Player.objects.filter(games__season__gt__gt=222).distinct().count(), 2)
|
'QuerySet.distinct(\'field\', ...) works'
| @skipUnlessDBFeature('can_distinct_on_fields')
def test_basic_distinct_on(self):
| qsets = ((Staff.objects.distinct().order_by('name'), ['<Staff: p1>', '<Staff: p1>', '<Staff: p2>', '<Staff: p3>']), (Staff.objects.distinct('name').order_by('name'), ['<Staff: p1>', '<Staff: p2>', '<Staff: p3>']), (Staff.objects.distinct('organisation').order_by('organisation', 'name'), ['<Staff: p1>', '<Staff: p1>']), (Staff.objects.distinct('name', 'organisation').order_by('name', 'organisation'), ['<Staff: p1>', '<Staff: p1>', '<Staff: p2>', '<Staff: p3>']), (Celebrity.objects.filter(fan__in=[self.fan1, self.fan2, self.fan3]).distinct('name').order_by('name'), ['<Celebrity: c1>', '<Celebrity: c2>']), ((Celebrity.objects.filter(fan__in=[self.fan1, self.fan2]).distinct('name').order_by('name') | Celebrity.objects.filter(fan__in=[self.fan3]).distinct('name').order_by('name')), ['<Celebrity: c1>', '<Celebrity: c2>']), (StaffTag.objects.distinct('staff', 'tag'), ['<StaffTag: t1 -> p1>']), (Tag.objects.order_by('parent__pk', 'pk').distinct('parent'), ['<Tag: t2>', '<Tag: t4>', '<Tag: t1>']), (StaffTag.objects.select_related('staff').distinct('staff__name').order_by('staff__name'), ['<StaffTag: t1 -> p1>']), (Staff.objects.distinct('id').order_by('id', 'coworkers__name').values_list('id', 'coworkers__name'), [str_prefix("(1, %(_)s'p2')"), str_prefix("(2, %(_)s'p1')"), str_prefix("(3, %(_)s'p1')"), '(4, None)']))
for (qset, expected) in qsets:
self.assertQuerysetEqual(qset, expected)
self.assertEqual(qset.count(), len(expected))
base_qs = Celebrity.objects.all()
self.assertRaisesMessage(AssertionError, 'Cannot combine queries with different distinct fields.', (lambda : (base_qs.distinct('id') & base_qs.distinct('name'))))
c1 = Celebrity.objects.distinct('greatest_fan__id', 'greatest_fan__fan_of')
self.assertIn('OUTER JOIN', str(c1.query))
c2 = c1.distinct('pk')
self.assertNotIn('OUTER JOIN', str(c2.query))
|
'Test the {% localtime %} templatetag and related filters.'
| @requires_tz_support
def test_localtime_templatetag_and_filters(self):
| datetimes = {u'utc': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC), u'eat': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), u'ict': datetime.datetime(2011, 9, 1, 17, 20, 30, tzinfo=ICT), u'naive': datetime.datetime(2011, 9, 1, 13, 20, 30)}
templates = {u'notag': Template(u'{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}'), u'noarg': Template(u'{% load tz %}{% localtime %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}'), u'on': Template(u'{% load tz %}{% localtime on %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}'), u'off': Template(u'{% load tz %}{% localtime off %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:ICT }}{% endlocaltime %}')}
def t(*result):
return u'|'.join((datetimes[key].isoformat() for key in result))
results = {u'utc': {u'notag': t(u'eat', u'eat', u'utc', u'ict'), u'noarg': t(u'eat', u'eat', u'utc', u'ict'), u'on': t(u'eat', u'eat', u'utc', u'ict'), u'off': t(u'utc', u'eat', u'utc', u'ict')}, u'eat': {u'notag': t(u'eat', u'eat', u'utc', u'ict'), u'noarg': t(u'eat', u'eat', u'utc', u'ict'), u'on': t(u'eat', u'eat', u'utc', u'ict'), u'off': t(u'eat', u'eat', u'utc', u'ict')}, u'ict': {u'notag': t(u'eat', u'eat', u'utc', u'ict'), u'noarg': t(u'eat', u'eat', u'utc', u'ict'), u'on': t(u'eat', u'eat', u'utc', u'ict'), u'off': t(u'ict', u'eat', u'utc', u'ict')}, u'naive': {u'notag': t(u'naive', u'eat', u'utc', u'ict'), u'noarg': t(u'naive', u'eat', u'utc', u'ict'), u'on': t(u'naive', u'eat', u'utc', u'ict'), u'off': t(u'naive', u'eat', u'utc', u'ict')}}
for (k1, dt) in six.iteritems(datetimes):
for (k2, tpl) in six.iteritems(templates):
ctx = Context({u'dt': dt, u'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, (u'%s / %s: %r != %r' % (k1, k2, actual, expected)))
results[u'utc'][u'notag'] = t(u'utc', u'eat', u'utc', u'ict')
results[u'ict'][u'notag'] = t(u'ict', u'eat', u'utc', u'ict')
with self.settings(USE_TZ=False):
for (k1, dt) in six.iteritems(datetimes):
for (k2, tpl) in six.iteritems(templates):
ctx = Context({u'dt': dt, u'ICT': ICT})
actual = tpl.render(ctx)
expected = results[k1][k2]
self.assertEqual(actual, expected, (u'%s / %s: %r != %r' % (k1, k2, actual, expected)))
|
'Test the |localtime, |utc, and |timezone filters with pytz.'
| @skipIf((pytz is None), u'this test requires pytz')
def test_localtime_filters_with_pytz(self):
| tpl = Template(u'{% load tz %}{{ dt|localtime }}|{{ dt|utc }}')
ctx = Context({u'dt': datetime.datetime(2011, 9, 1, 12, 20, 30)})
with self.settings(TIME_ZONE=u'Europe/Paris'):
self.assertEqual(tpl.render(ctx), u'2011-09-01T12:20:30+02:00|2011-09-01T10:20:30+00:00')
tpl = Template(u'{% load tz %}{{ dt|timezone:tz }}')
ctx = Context({u'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), u'tz': pytz.timezone(u'Europe/Paris')})
self.assertEqual(tpl.render(ctx), u'2011-09-01T12:20:30+02:00')
tpl = Template(u"{% load tz %}{{ dt|timezone:'Europe/Paris' }}")
ctx = Context({u'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), u'tz': pytz.timezone(u'Europe/Paris')})
self.assertEqual(tpl.render(ctx), u'2011-09-01T12:20:30+02:00')
|
'Test the |localtime, |utc, and |timezone filters on bad inputs.'
| def test_localtime_filters_do_not_raise_exceptions(self):
| tpl = Template(u'{% load tz %}{{ dt }}|{{ dt|localtime }}|{{ dt|utc }}|{{ dt|timezone:tz }}')
with self.settings(USE_TZ=True):
ctx = Context({u'dt': None, u'tz': ICT})
self.assertEqual(tpl.render(ctx), u'None|||')
ctx = Context({u'dt': u'not a date', u'tz': ICT})
self.assertEqual(tpl.render(ctx), u'not a date|||')
tpl = Template(u'{% load tz %}{{ dt|timezone:tz }}')
ctx = Context({u'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), u'tz': None})
self.assertEqual(tpl.render(ctx), u'')
ctx = Context({u'dt': datetime.datetime(2011, 9, 1, 13, 20, 30), u'tz': u'not a tz'})
self.assertEqual(tpl.render(ctx), u'')
|
'Test the {% timezone %} templatetag.'
| @requires_tz_support
def test_timezone_templatetag(self):
| tpl = Template(u'{% load tz %}{{ dt }}|{% timezone tz1 %}{{ dt }}|{% timezone tz2 %}{{ dt }}{% endtimezone %}{% endtimezone %}')
ctx = Context({u'dt': datetime.datetime(2011, 9, 1, 10, 20, 30, tzinfo=UTC), u'tz1': ICT, u'tz2': None})
self.assertEqual(tpl.render(ctx), u'2011-09-01T13:20:30+03:00|2011-09-01T17:20:30+07:00|2011-09-01T13:20:30+03:00')
|
'Test the {% timezone %} templatetag with pytz.'
| @skipIf((pytz is None), u'this test requires pytz')
def test_timezone_templatetag_with_pytz(self):
| tpl = Template(u'{% load tz %}{% timezone tz %}{{ dt }}{% endtimezone %}')
ctx = Context({u'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), u'tz': pytz.timezone(u'Europe/Paris')})
self.assertEqual(tpl.render(ctx), u'2011-09-01T12:20:30+02:00')
ctx = Context({u'dt': datetime.datetime(2011, 9, 1, 13, 20, 30, tzinfo=EAT), u'tz': u'Europe/Paris'})
self.assertEqual(tpl.render(ctx), u'2011-09-01T12:20:30+02:00')
|
'Test the {% get_current_timezone %} templatetag.'
| @skipIf(sys.platform.startswith(u'win'), u'Windows uses non-standard time zone names')
def test_get_current_timezone_templatetag(self):
| tpl = Template(u'{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}')
self.assertEqual(tpl.render(Context()), (u'Africa/Nairobi' if pytz else u'EAT'))
with timezone.override(UTC):
self.assertEqual(tpl.render(Context()), u'UTC')
tpl = Template(u'{% load tz %}{% timezone tz %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}')
self.assertEqual(tpl.render(Context({u'tz': ICT})), u'+0700')
with timezone.override(UTC):
self.assertEqual(tpl.render(Context({u'tz': ICT})), u'+0700')
|
'Test the {% get_current_timezone %} templatetag with pytz.'
| @skipIf((pytz is None), u'this test requires pytz')
def test_get_current_timezone_templatetag_with_pytz(self):
| tpl = Template(u'{% load tz %}{% get_current_timezone as time_zone %}{{ time_zone }}')
with timezone.override(pytz.timezone(u'Europe/Paris')):
self.assertEqual(tpl.render(Context()), u'Europe/Paris')
tpl = Template(u"{% load tz %}{% timezone 'Europe/Paris' %}{% get_current_timezone as time_zone %}{% endtimezone %}{{ time_zone }}")
self.assertEqual(tpl.render(Context()), u'Europe/Paris')
|
'Test the django.core.context_processors.tz template context processor.'
| @skipIf(sys.platform.startswith(u'win'), u'Windows uses non-standard time zone names')
def test_tz_template_context_processor(self):
| tpl = Template(u'{{ TIME_ZONE }}')
self.assertEqual(tpl.render(Context()), u'')
self.assertEqual(tpl.render(RequestContext(HttpRequest())), (u'Africa/Nairobi' if pytz else u'EAT'))
|
'The MyPerson model should be generating the same database queries as
the Person model (when the same manager is used in each case).'
| def test_same_manager_queries(self):
| my_person_sql = MyPerson.other.all().query.get_compiler(DEFAULT_DB_ALIAS).as_sql()
person_sql = Person.objects.order_by(u'name').query.get_compiler(DEFAULT_DB_ALIAS).as_sql()
self.assertEqual(my_person_sql, person_sql)
|
'The StatusPerson models should have its own table (it\'s using ORM-level
inheritance).'
| def test_inheretance_new_table(self):
| sp_sql = StatusPerson.objects.all().query.get_compiler(DEFAULT_DB_ALIAS).as_sql()
p_sql = Person.objects.all().query.get_compiler(DEFAULT_DB_ALIAS).as_sql()
self.assertNotEqual(sp_sql, p_sql)
|
'Creating a Person makes them accessible through the MyPerson proxy.'
| def test_basic_proxy(self):
| person = Person.objects.create(name=u'Foo McBar')
self.assertEqual(len(Person.objects.all()), 1)
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(MyPerson.objects.get(name=u'Foo McBar').id, person.id)
self.assertFalse(MyPerson.objects.get(id=person.id).has_special_name())
|
'Person is not proxied by StatusPerson subclass.'
| def test_no_proxy(self):
| Person.objects.create(name=u'Foo McBar')
self.assertEqual(list(StatusPerson.objects.all()), [])
|
'A new MyPerson also shows up as a standard Person.'
| def test_basic_proxy_reverse(self):
| MyPerson.objects.create(name=u'Bazza del Frob')
self.assertEqual(len(MyPerson.objects.all()), 1)
self.assertEqual(len(Person.objects.all()), 1)
LowerStatusPerson.objects.create(status=u'low', name=u'homer')
lsps = [lsp.name for lsp in LowerStatusPerson.objects.all()]
self.assertEqual(lsps, [u'homer'])
|
'Correct type when querying a proxy of proxy'
| def test_correct_type_proxy_of_proxy(self):
| Person.objects.create(name=u'Foo McBar')
MyPerson.objects.create(name=u'Bazza del Frob')
LowerStatusPerson.objects.create(status=u'low', name=u'homer')
pp = sorted([mpp.name for mpp in MyPersonProxy.objects.all()])
self.assertEqual(pp, [u'Bazza del Frob', u'Foo McBar', u'homer'])
|
'Proxy models are included in the ancestors for a model\'s DoesNotExist
and MultipleObjectsReturned'
| def test_proxy_included_in_ancestors(self):
| Person.objects.create(name=u'Foo McBar')
MyPerson.objects.create(name=u'Bazza del Frob')
LowerStatusPerson.objects.create(status=u'low', name=u'homer')
max_id = Person.objects.aggregate(max_id=models.Max(u'id'))[u'max_id']
self.assertRaises(Person.DoesNotExist, MyPersonProxy.objects.get, name=u'Zathras')
self.assertRaises(Person.MultipleObjectsReturned, MyPersonProxy.objects.get, id__lt=(max_id + 1))
self.assertRaises(Person.DoesNotExist, StatusPerson.objects.get, name=u'Zathras')
sp1 = StatusPerson.objects.create(name=u'Bazza Jr.')
sp2 = StatusPerson.objects.create(name=u'Foo Jr.')
max_id = Person.objects.aggregate(max_id=models.Max(u'id'))[u'max_id']
self.assertRaises(Person.MultipleObjectsReturned, StatusPerson.objects.get, id__lt=(max_id + 1))
|
'All base classes must be non-abstract'
| def test_abc(self):
| def build_abc():
class NoAbstract(Abstract, ):
class Meta:
proxy = True
self.assertRaises(TypeError, build_abc)
|
'The proxy must actually have one concrete base class'
| def test_no_cbc(self):
| def build_no_cbc():
class TooManyBases(Person, Abstract, ):
class Meta:
proxy = True
self.assertRaises(TypeError, build_no_cbc)
|
'Test save signals for proxy models'
| def test_proxy_model_signals(self):
| output = []
def make_handler(model, event):
def _handler(*args, **kwargs):
output.append((u'%s %s save' % (model, event)))
return _handler
h1 = make_handler(u'MyPerson', u'pre')
h2 = make_handler(u'MyPerson', u'post')
h3 = make_handler(u'Person', u'pre')
h4 = make_handler(u'Person', u'post')
signals.pre_save.connect(h1, sender=MyPerson)
signals.post_save.connect(h2, sender=MyPerson)
signals.pre_save.connect(h3, sender=Person)
signals.post_save.connect(h4, sender=Person)
dino = MyPerson.objects.create(name=u'dino')
self.assertEqual(output, [u'MyPerson pre save', u'MyPerson post save'])
output = []
h5 = make_handler(u'MyPersonProxy', u'pre')
h6 = make_handler(u'MyPersonProxy', u'post')
signals.pre_save.connect(h5, sender=MyPersonProxy)
signals.post_save.connect(h6, sender=MyPersonProxy)
dino = MyPersonProxy.objects.create(name=u'pebbles')
self.assertEqual(output, [u'MyPersonProxy pre save', u'MyPersonProxy post save'])
signals.pre_save.disconnect(h1, sender=MyPerson)
signals.post_save.disconnect(h2, sender=MyPerson)
signals.pre_save.disconnect(h3, sender=Person)
signals.post_save.disconnect(h4, sender=Person)
signals.pre_save.disconnect(h5, sender=MyPersonProxy)
signals.post_save.disconnect(h6, sender=MyPersonProxy)
|
'Proxy objects can be deleted'
| def test_proxy_delete(self):
| User.objects.create(name=u'Bruce')
u2 = UserProxy.objects.create(name=u'George')
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, [u'Bruce', u'George'])
u2.delete()
resp = [u.name for u in UserProxy.objects.all()]
self.assertEqual(resp, [u'Bruce'])
|
'We can still use `select_related()` to include related models in our
querysets.'
| def test_select_related(self):
| country = Country.objects.create(name=u'Australia')
state = State.objects.create(name=u'New South Wales', country=country)
resp = [s.name for s in State.objects.select_related()]
self.assertEqual(resp, [u'New South Wales'])
resp = [s.name for s in StateProxy.objects.select_related()]
self.assertEqual(resp, [u'New South Wales'])
self.assertEqual(StateProxy.objects.get(name=u'New South Wales').name, u'New South Wales')
resp = StateProxy.objects.select_related().get(name=u'New South Wales')
self.assertEqual(resp.name, u'New South Wales')
|
'Registering a new serializer populates the full registry. Refs #14823'
| def test_register(self):
| serializers.register_serializer(u'json3', u'django.core.serializers.json')
public_formats = serializers.get_public_serializer_formats()
self.assertIn(u'json3', public_formats)
self.assertIn(u'json2', public_formats)
self.assertIn(u'xml', public_formats)
|
'Unregistering a serializer doesn\'t cause the registry to be repopulated. Refs #14823'
| def test_unregister(self):
| serializers.unregister_serializer(u'xml')
serializers.register_serializer(u'json3', u'django.core.serializers.json')
public_formats = serializers.get_public_serializer_formats()
self.assertNotIn(u'xml', public_formats)
self.assertIn(u'json3', public_formats)
|
'Requesting a list of serializer formats popuates the registry'
| def test_builtin_serializers(self):
| all_formats = set(serializers.get_serializer_formats())
public_formats = set(serializers.get_public_serializer_formats())
(self.assertIn(u'xml', all_formats),)
self.assertIn(u'xml', public_formats)
self.assertIn(u'json2', all_formats)
self.assertIn(u'json2', public_formats)
self.assertIn(u'python', all_formats)
self.assertNotIn(u'python', public_formats)
|
'Tests that basic serialization works.'
| def test_serialize(self):
| serial_str = serializers.serialize(self.serializer_name, Article.objects.all())
self.assertTrue(self._validate_output(serial_str))
|
'Tests that serialized content can be deserialized.'
| def test_serializer_roundtrip(self):
| serial_str = serializers.serialize(self.serializer_name, Article.objects.all())
models = list(serializers.deserialize(self.serializer_name, serial_str))
self.assertEqual(len(models), 2)
|
'Tests the ability to create new objects by
modifying serialized content.'
| def test_altering_serialized_output(self):
| old_headline = u'Poker has no place on ESPN'
new_headline = u'Poker has no place on television'
serial_str = serializers.serialize(self.serializer_name, Article.objects.all())
serial_str = serial_str.replace(old_headline, new_headline)
models = list(serializers.deserialize(self.serializer_name, serial_str))
self.assertTrue(Article.objects.filter(headline=old_headline))
self.assertFalse(Article.objects.filter(headline=new_headline))
for model in models:
model.save()
self.assertTrue(Article.objects.filter(headline=new_headline))
self.assertFalse(Article.objects.filter(headline=old_headline))
|
'Tests that if you use your own primary key field
(such as a OneToOneField), it doesn\'t appear in the
serialized field list - it replaces the pk identifier.'
| def test_one_to_one_as_pk(self):
| profile = AuthorProfile(author=self.joe, date_of_birth=datetime(1970, 1, 1))
profile.save()
serial_str = serializers.serialize(self.serializer_name, AuthorProfile.objects.all())
self.assertFalse(self._get_field_values(serial_str, u'author'))
for obj in serializers.deserialize(self.serializer_name, serial_str):
self.assertEqual(obj.object.pk, self._comparison_value(self.joe.pk))
|
'Tests that output can be restricted to a subset of fields'
| def test_serialize_field_subset(self):
| valid_fields = (u'headline', u'pub_date')
invalid_fields = (u'author', u'categories')
serial_str = serializers.serialize(self.serializer_name, Article.objects.all(), fields=valid_fields)
for field_name in invalid_fields:
self.assertFalse(self._get_field_values(serial_str, field_name))
for field_name in valid_fields:
self.assertTrue(self._get_field_values(serial_str, field_name))
|
'Tests that unicode makes the roundtrip intact'
| def test_serialize_unicode(self):
| actor_name = u'Za\u017c\xf3\u0142\u0107'
movie_title = u'G\u0119\u015bl\u0105 ja\u017a\u0144'
ac = Actor(name=actor_name)
mv = Movie(title=movie_title, actor=ac)
ac.save()
mv.save()
serial_str = serializers.serialize(self.serializer_name, [mv])
self.assertEqual(self._get_field_values(serial_str, u'title')[0], movie_title)
self.assertEqual(self._get_field_values(serial_str, u'actor')[0], actor_name)
obj_list = list(serializers.deserialize(self.serializer_name, serial_str))
mv_obj = obj_list[0].object
self.assertEqual(mv_obj.title, movie_title)
|
'Ensure no superfluous queries are made when serializing ForeignKeys
#17602'
| def test_serialize_superfluous_queries(self):
| ac = Actor(name=u'Actor name')
ac.save()
mv = Movie(title=u'Movie title', actor_id=ac.pk)
mv.save()
with self.assertNumQueries(0):
serial_str = serializers.serialize(self.serializer_name, [mv])
|
'Tests that serialized data with no primary key results
in a model instance with no id'
| def test_serialize_with_null_pk(self):
| category = Category(name=u'Reference')
serial_str = serializers.serialize(self.serializer_name, [category])
pk_value = self._get_pk_values(serial_str)[0]
self.assertFalse(pk_value)
cat_obj = list(serializers.deserialize(self.serializer_name, serial_str))[0].object
self.assertEqual(cat_obj.id, None)
|
'Tests that float values serialize and deserialize intact'
| def test_float_serialization(self):
| sc = Score(score=3.4)
sc.save()
serial_str = serializers.serialize(self.serializer_name, [sc])
deserial_objs = list(serializers.deserialize(self.serializer_name, serial_str))
self.assertEqual(deserial_objs[0].object.score, Approximate(3.4, places=1))
|
'Tests that custom fields serialize and deserialize intact'
| def test_custom_field_serialization(self):
| team_str = u'Spartak Moskva'
player = Player()
player.name = u'Soslan Djanaev'
player.rank = 1
player.team = Team(team_str)
player.save()
serial_str = serializers.serialize(self.serializer_name, Player.objects.all())
team = self._get_field_values(serial_str, u'team')
self.assertTrue(team)
self.assertEqual(team[0], team_str)
deserial_objs = list(serializers.deserialize(self.serializer_name, serial_str))
self.assertEqual(deserial_objs[0].object.team.to_string(), player.team.to_string())
|
'Tests that year values before 1000AD are properly formatted'
| def test_pre_1000ad_date(self):
| a = Article.objects.create(author=self.jane, headline=u'Nobody remembers the early years', pub_date=datetime(1, 2, 3, 4, 5, 6))
serial_str = serializers.serialize(self.serializer_name, [a])
date_values = self._get_field_values(serial_str, u'pub_date')
self.assertEqual(date_values[0].replace(u'T', u' '), u'0001-02-03 04:05:06')
|
'Tests that serialized strings without PKs
can be turned into models'
| def test_pkless_serialized_strings(self):
| deserial_objs = list(serializers.deserialize(self.serializer_name, self.pkless_str))
for obj in deserial_objs:
self.assertFalse(obj.object.id)
obj.save()
self.assertEqual(Category.objects.all().count(), 4)
|
'Tests that objects ids can be referenced before they are
defined in the serialization data.'
| def test_forward_refs(self):
| transaction.enter_transaction_management()
transaction.managed(True)
objs = serializers.deserialize(self.serializer_name, self.fwd_ref_str)
with connection.constraint_checks_disabled():
for obj in objs:
obj.save()
transaction.commit()
transaction.leave_transaction_management()
for model_cls in (Category, Author, Article):
self.assertEqual(model_cls.objects.all().count(), 1)
art_obj = Article.objects.all()[0]
self.assertEqual(art_obj.categories.all().count(), 1)
self.assertEqual(art_obj.author.name, u'Agnes')
|
'ModelForm test of unique_together constraint'
| def test_unique_together(self):
| form = PriceForm({u'price': u'6.00', u'quantity': u'1'})
self.assertTrue(form.is_valid())
form.save()
form = PriceForm({u'price': u'6.00', u'quantity': u'1'})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 1)
self.assertEqual(form.errors[u'__all__'], [u'Price with this Price and Quantity already exists.'])
|
'Test for primary_key being in the form and failing validation.'
| def test_explicitpk_unspecified(self):
| form = ExplicitPKForm({u'key': u'', u'desc': u''})
self.assertFalse(form.is_valid())
|
'Ensure keys and blank character strings are tested for uniqueness.'
| def test_explicitpk_unique(self):
| form = ExplicitPKForm({u'key': u'key1', u'desc': u''})
self.assertTrue(form.is_valid())
form.save()
form = ExplicitPKForm({u'key': u'key1', u'desc': u''})
self.assertFalse(form.is_valid())
self.assertEqual(len(form.errors), 3)
self.assertEqual(form.errors[u'__all__'], [u'Explicit pk with this Key and Desc already exists.'])
self.assertEqual(form.errors[u'desc'], [u'Explicit pk with this Desc already exists.'])
self.assertEqual(form.errors[u'key'], [u'Explicit pk with this Key already exists.'])
|
'Execute the passed query against the passed model and check the output'
| def assertSuccessfulRawQuery(self, model, query, expected_results, expected_annotations=(), params=[], translations=None):
| results = list(model.objects.raw(query, params=params, translations=translations))
self.assertProcessed(model, results, expected_results, expected_annotations)
self.assertAnnotations(results, expected_annotations)
|
'Compare the results of a raw query against expected results'
| def assertProcessed(self, model, results, orig, expected_annotations=()):
| self.assertEqual(len(results), len(orig))
for (index, item) in enumerate(results):
orig_item = orig[index]
for annotation in expected_annotations:
setattr(orig_item, *annotation)
for field in model._meta.fields:
self.assertEqual(getattr(item, field.attname), getattr(orig_item, field.attname))
self.assertEqual(type(getattr(item, field.attname)), type(getattr(orig_item, field.attname)))
|
'Check that the results of a raw query contain no annotations'
| def assertNoAnnotations(self, results):
| self.assertAnnotations(results, ())
|
'Check that the passed raw query results contain the expected
annotations'
| def assertAnnotations(self, results, expected_annotations):
| if expected_annotations:
for (index, result) in enumerate(results):
(annotation, value) = expected_annotations[index]
self.assertTrue(hasattr(result, annotation))
self.assertEqual(getattr(result, annotation), value)
|
'Basic test of raw query with a simple database query'
| def testSimpleRawQuery(self):
| query = 'SELECT * FROM raw_query_author'
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors)
|
'Raw queries are lazy: they aren\'t actually executed until they\'re
iterated over.'
| def testRawQueryLazy(self):
| q = Author.objects.raw('SELECT * FROM raw_query_author')
self.assertTrue((q.query.cursor is None))
list(q)
self.assertTrue((q.query.cursor is not None))
|
'Test of a simple raw query against a model containing a foreign key'
| def testFkeyRawQuery(self):
| query = 'SELECT * FROM raw_query_book'
books = Book.objects.all()
self.assertSuccessfulRawQuery(Book, query, books)
|
'Test of a simple raw query against a model containing a field with
db_column defined.'
| def testDBColumnHandler(self):
| query = 'SELECT * FROM raw_query_coffee'
coffees = Coffee.objects.all()
self.assertSuccessfulRawQuery(Coffee, query, coffees)
|
'Test of raw raw query\'s tolerance for columns being returned in any
order'
| def testOrderHandler(self):
| selects = ('dob, last_name, first_name, id', 'last_name, dob, first_name, id', 'first_name, last_name, dob, id')
for select in selects:
query = ('SELECT %s FROM raw_query_author' % select)
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors)
|
'Test of raw query\'s optional ability to translate unexpected result
column names to specific model fields'
| def testTranslations(self):
| query = 'SELECT first_name AS first, last_name AS last, dob, id FROM raw_query_author'
translations = {'first': 'first_name', 'last': 'last_name'}
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors, translations=translations)
|
'Test passing optional query parameters'
| def testParams(self):
| query = 'SELECT * FROM raw_query_author WHERE first_name = %s'
author = Author.objects.all()[2]
params = [author.first_name]
results = list(Author.objects.raw(query, params=params))
self.assertProcessed(Author, results, [author])
self.assertNoAnnotations(results)
self.assertEqual(len(results), 1)
|
'Test of a simple raw query against a model containing a m2m field'
| def testManyToMany(self):
| query = 'SELECT * FROM raw_query_reviewer'
reviewers = Reviewer.objects.all()
self.assertSuccessfulRawQuery(Reviewer, query, reviewers)
|
'Test to insure that extra translations are ignored.'
| def testExtraConversions(self):
| query = 'SELECT * FROM raw_query_author'
translations = {'something': 'else'}
authors = Author.objects.all()
self.assertSuccessfulRawQuery(Author, query, authors, translations=translations)
|
'Test that an unknown command raises CommandError'
| def test_explode(self):
| self.assertRaises(CommandError, management.call_command, ('explode',))
|
'Exception raised in a command should raise CommandError with
call_command, but SystemExit when run from command line'
| def test_system_exit(self):
| with self.assertRaises(CommandError):
management.call_command('dance', example='raise')
old_stderr = sys.stderr
sys.stderr = err = StringIO()
try:
with self.assertRaises(SystemExit):
management.ManagementUtility(['manage.py', 'dance', '--example=raise']).execute()
finally:
sys.stderr = old_stderr
self.assertIn('CommandError', err.getvalue())
|
'Make sure that an add form that is filled out, but marked for deletion
doesn\'t cause validation errors.'
| def test_add_form_deletion_when_invalid(self):
| PoetFormSet = modelformset_factory(Poet, can_delete=True)
data = {u'form-TOTAL_FORMS': u'1', u'form-INITIAL_FORMS': u'0', u'form-MAX_NUM_FORMS': u'0', u'form-0-id': u'', u'form-0-name': (u'x' * 1000)}
formset = PoetFormSet(data, queryset=Poet.objects.all())
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poet.objects.count(), 0)
data[u'form-0-DELETE'] = u'on'
formset = PoetFormSet(data, queryset=Poet.objects.all())
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poet.objects.count(), 0)
|
'Make sure that an add form that is filled out, but marked for deletion
doesn\'t cause validation errors.'
| def test_change_form_deletion_when_invalid(self):
| PoetFormSet = modelformset_factory(Poet, can_delete=True)
poet = Poet.objects.create(name=u'test')
data = {u'form-TOTAL_FORMS': u'1', u'form-INITIAL_FORMS': u'1', u'form-MAX_NUM_FORMS': u'0', u'form-0-id': six.text_type(poet.id), u'form-0-name': (u'x' * 1000)}
formset = PoetFormSet(data, queryset=Poet.objects.all())
self.assertEqual(formset.is_valid(), False)
self.assertEqual(Poet.objects.count(), 1)
data[u'form-0-DELETE'] = u'on'
formset = PoetFormSet(data, queryset=Poet.objects.all())
self.assertEqual(formset.is_valid(), True)
formset.save()
self.assertEqual(Poet.objects.count(), 0)
|
'Test that model_formset respects fields and exclude parameters of
custom form'
| def test_custom_form(self):
| class PostForm1(forms.ModelForm, ):
class Meta:
model = Post
fields = (u'title', u'posted')
class PostForm2(forms.ModelForm, ):
class Meta:
model = Post
exclude = (u'subtitle',)
PostFormSet = modelformset_factory(Post, form=PostForm1)
formset = PostFormSet()
self.assertFalse((u'subtitle' in formset.forms[0].fields))
PostFormSet = modelformset_factory(Post, form=PostForm2)
formset = PostFormSet()
self.assertFalse((u'subtitle' in formset.forms[0].fields))
|
'Field instances have a `__lt__` comparison function to define an
ordering based on their creation. Prior to #17851 this ordering
comparison relied on the now unsupported `__cmp__` and was assuming
compared objects were both Field instances raising `AttributeError`
when it should have returned `NotImplemented`.'
| def test_field_ordering(self):
| f1 = Field()
f2 = Field(auto_created=True)
f3 = Field()
self.assertTrue((f2 < f1))
self.assertTrue((f3 > f1))
self.assertFalse((f1 == None))
self.assertFalse((f2 in (None, 1, u'')))
|
'Test that ugettext_lazy objects work when saving model instances
through various methods. Refs #10498.'
| def test_create_relation_with_ugettext_lazy(self):
| notlazy = u'test'
lazy = ugettext_lazy(notlazy)
reporter = Article.objects.create(headline=lazy, pub_date=datetime.now())
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
article.headline = lazy
article.save()
self.assertEqual(article.headline, notlazy)
Article.objects.update(headline=lazy)
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
Article.objects.all().delete()
Article.objects.bulk_create([Article(headline=lazy, pub_date=datetime.now())])
article = Article.objects.get()
self.assertEqual(article.headline, notlazy)
|
'Test cases can load fixture objects into models defined in packages'
| def testClassFixtures(self):
| self.assertEqual(Article.objects.count(), 3)
self.assertQuerysetEqual(Article.objects.all(), [u'Django conquers world!', u'Copyright is fine the way it is', u'Poker has no place on ESPN'], (lambda a: a.headline))
|
'Fixtures can load initial data into models defined in packages'
| def test_initial_data(self):
| self.assertQuerysetEqual(Book.objects.all(), [u'Achieving self-awareness of Python programs'], (lambda a: a.name))
|
'Fixtures can load data into models defined in packages'
| def test_loaddata(self):
| management.call_command(u'loaddata', u'fixture1.json', verbosity=0, commit=False)
self.assertQuerysetEqual(Article.objects.all(), [u'Time to reform copyright', u'Poker has no place on ESPN'], (lambda a: a.headline))
management.call_command(u'loaddata', u'fixture2.json', verbosity=0, commit=False)
self.assertQuerysetEqual(Article.objects.all(), [u'Django conquers world!', u'Copyright is fine the way it is', u'Poker has no place on ESPN'], (lambda a: a.headline))
management.call_command(u'loaddata', u'unknown.json', verbosity=0, commit=False)
self.assertQuerysetEqual(Article.objects.all(), [u'Django conquers world!', u'Copyright is fine the way it is', u'Poker has no place on ESPN'], (lambda a: a.headline))
|
'Check that test case has installed 3 fixture objects'
| def testClassFixtures(self):
| self.assertEqual(Article.objects.count(), 3)
self.assertQuerysetEqual(Article.objects.all(), ['<Article: Django conquers world!>', '<Article: Copyright is fine the way it is>', '<Article: Poker has no place on ESPN>'])
|
'Verifies that loading a fixture which contains an invalid object
outputs an error message which contains the pk of the object
that triggered the error.'
| def test_loaddata_error_message(self):
| if (connection.vendor == 'mysql'):
connection.cursor().execute("SET sql_mode = 'TRADITIONAL'")
with self.assertRaises(IntegrityError) as cm:
management.call_command('loaddata', 'invalid.json', verbosity=0, commit=False)
self.assertIn('Could not load fixtures.Article(pk=1):', cm.exception.args[0])
|
'Helper to create a complete tree.'
| def create_tree(self, stringtree):
| names = stringtree.split()
models = [Domain, Kingdom, Phylum, Klass, Order, Family, Genus, Species]
assert (len(names) == len(models)), (names, models)
parent = None
for (name, model) in zip(names, models):
try:
obj = model.objects.get(name=name)
except model.DoesNotExist:
obj = model(name=name)
if parent:
setattr(obj, parent.__class__.__name__.lower(), parent)
obj.save()
parent = obj
|
'Normally, accessing FKs doesn\'t fill in related objects'
| def test_access_fks_without_select_related(self):
| with self.assertNumQueries(8):
fly = Species.objects.get(name=u'melanogaster')
domain = fly.genus.family.order.klass.phylum.kingdom.domain
self.assertEqual(domain.name, u'Eukaryota')
|
'A select_related() call will fill in those related objects without any
extra queries'
| def test_access_fks_with_select_related(self):
| with self.assertNumQueries(1):
person = Species.objects.select_related(depth=10).get(name=u'sapiens')
domain = person.genus.family.order.klass.phylum.kingdom.domain
self.assertEqual(domain.name, u'Eukaryota')
|
'select_related() also of course applies to entire lists, not just
items. This test verifies the expected behavior without select_related.'
| def test_list_without_select_related(self):
| with self.assertNumQueries(9):
world = Species.objects.all()
families = [o.genus.family.name for o in world]
self.assertEqual(sorted(families), [u'Amanitacae', u'Drosophilidae', u'Fabaceae', u'Hominidae'])
|
'select_related() also of course applies to entire lists, not just
items. This test verifies the expected behavior with select_related.'
| def test_list_with_select_related(self):
| with self.assertNumQueries(1):
world = Species.objects.all().select_related()
families = [o.genus.family.name for o in world]
self.assertEqual(sorted(families), [u'Amanitacae', u'Drosophilidae', u'Fabaceae', u'Hominidae'])
|
'The "depth" argument to select_related() will stop the descent at a
particular level.'
| def test_depth(self, depth=1, expected=7):
| with self.assertNumQueries(expected):
pea = Species.objects.select_related(depth=depth).get(name=u'sativum')
self.assertEqual(pea.genus.family.order.klass.phylum.kingdom.domain.name, u'Eukaryota')
|
'The "depth" argument to select_related() will stop the descent at a
particular level. This tests a larger depth value.'
| def test_larger_depth(self):
| self.test_depth(depth=5, expected=3)
|
'The "depth" argument to select_related() will stop the descent at a
particular level. This can be used on lists as well.'
| def test_list_with_depth(self):
| with self.assertNumQueries(5):
world = Species.objects.all().select_related(depth=2)
orders = [o.genus.family.order.name for o in world]
self.assertEqual(sorted(orders), [u'Agaricales', u'Diptera', u'Fabales', u'Primates'])
|
'The optional fields passed to select_related() control which related
models we pull in. This allows for smaller queries and can act as an
alternative (or, in addition to) the depth parameter.
In this case, we explicitly say to select the \'genus\' and
\'genus.family\' models, leading to the same number of queries as before.'
| def test_certain_fields(self):
| with self.assertNumQueries(1):
world = Species.objects.select_related(u'genus__family')
families = [o.genus.family.name for o in world]
self.assertEqual(sorted(families), [u'Amanitacae', u'Drosophilidae', u'Fabaceae', u'Hominidae'])
|
'In this case, we explicitly say to select the \'genus\' and
\'genus.family\' models, leading to the same number of queries as before.'
| def test_more_certain_fields(self):
| with self.assertNumQueries(2):
world = Species.objects.filter(genus__name=u'Amanita').select_related(u'genus__family')
orders = [o.genus.family.order.name for o in world]
self.assertEqual(orders, [u'Agaricales'])
|
'Make sure un-saved object\'s related managers always return an instance
of the same class the manager\'s `get_query_set` returns. Refs #19652.'
| def test_related_manager(self):
| rel_qs = RelatedObject().objs.all()
self.assertIsInstance(rel_qs, ObjectQuerySet)
with self.assertNumQueries(0):
self.assertFalse(rel_qs.exists())
|
'Ensure that \'pk\' works as an ordering option in Meta.
Refs #8291.'
| def test_order_by_pk(self):
| a1 = ArticlePKOrdering.objects.create(pk=1, headline='Article 1', pub_date=datetime(2005, 7, 26))
a2 = ArticlePKOrdering.objects.create(pk=2, headline='Article 2', pub_date=datetime(2005, 7, 27))
a3 = ArticlePKOrdering.objects.create(pk=3, headline='Article 3', pub_date=datetime(2005, 7, 27))
a4 = ArticlePKOrdering.objects.create(pk=4, headline='Article 4', pub_date=datetime(2005, 7, 28))
self.assertQuerysetEqual(ArticlePKOrdering.objects.all(), ['Article 4', 'Article 3', 'Article 2', 'Article 1'], attrgetter('headline'))
|
'The default behavior is to autocommit after each save() action.'
| @skipUnlessDBFeature('supports_transactions')
def test_autocommit(self):
| self.assertRaises(Exception, self.create_a_reporter_then_fail, 'Alice', 'Smith')
self.assertEqual(Reporter.objects.count(), 1)
|
'The autocommit decorator works exactly the same as the default behavior.'
| @skipUnlessDBFeature('supports_transactions')
def test_autocommit_decorator(self):
| autocomitted_create_then_fail = transaction.autocommit(self.create_a_reporter_then_fail)
self.assertRaises(Exception, autocomitted_create_then_fail, 'Alice', 'Smith')
self.assertEqual(Reporter.objects.count(), 1)
|
'The autocommit decorator also works with a using argument.'
| @skipUnlessDBFeature('supports_transactions')
def test_autocommit_decorator_with_using(self):
| autocomitted_create_then_fail = transaction.autocommit(using='default')(self.create_a_reporter_then_fail)
self.assertRaises(Exception, autocomitted_create_then_fail, 'Alice', 'Smith')
self.assertEqual(Reporter.objects.count(), 1)
|
'With the commit_on_success decorator, the transaction is only committed
if the function doesn\'t throw an exception.'
| @skipUnlessDBFeature('supports_transactions')
def test_commit_on_success(self):
| committed_on_success = transaction.commit_on_success(self.create_a_reporter_then_fail)
self.assertRaises(Exception, committed_on_success, 'Dirk', 'Gently')
self.assertEqual(Reporter.objects.count(), 0)
|
'The commit_on_success decorator also works with a using argument.'
| @skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_with_using(self):
| using_committed_on_success = transaction.commit_on_success(using='default')(self.create_a_reporter_then_fail)
self.assertRaises(Exception, using_committed_on_success, 'Dirk', 'Gently')
self.assertEqual(Reporter.objects.count(), 0)
|
'If there aren\'t any exceptions, the data will get saved.'
| @skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_succeed(self):
| Reporter.objects.create(first_name='Alice', last_name='Smith')
remove_comitted_on_success = transaction.commit_on_success(self.remove_a_reporter)
remove_comitted_on_success('Alice')
self.assertEqual(list(Reporter.objects.all()), [])
|
'You can manually manage transactions if you really want to, but you
have to remember to commit/rollback.'
| @skipUnlessDBFeature('supports_transactions')
def test_manually_managed(self):
| manually_managed = transaction.commit_manually(self.manually_managed)
manually_managed()
self.assertEqual(Reporter.objects.count(), 1)
|
'If you forget, you\'ll get bad errors.'
| @skipUnlessDBFeature('supports_transactions')
def test_manually_managed_mistake(self):
| manually_managed_mistake = transaction.commit_manually(self.manually_managed_mistake)
self.assertRaises(transaction.TransactionManagementError, manually_managed_mistake)
|
'The commit_manually function also works with a using argument.'
| @skipUnlessDBFeature('supports_transactions')
def test_manually_managed_with_using(self):
| using_manually_managed_mistake = transaction.commit_manually(using='default')(self.manually_managed_mistake)
self.assertRaises(transaction.TransactionManagementError, using_manually_managed_mistake)
|
'Regression for #11900: If a function wrapped by commit_on_success
writes a transaction that can\'t be committed, that transaction should
be rolled back. The bug is only visible using the psycopg2 backend,
though the fix is generally a good idea.'
| @skipUnlessDBFeature('requires_rollback_on_dirty_transaction')
def test_bad_sql(self):
| execute_bad_sql = transaction.commit_on_success(self.execute_bad_sql)
self.assertRaises(IntegrityError, execute_bad_sql)
transaction.rollback()
|
'The default behavior is to autocommit after each save() action.'
| @skipUnlessDBFeature('supports_transactions')
def test_autocommit(self):
| with self.assertRaises(Exception):
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 1)
|
'The autocommit context manager works exactly the same as the default
behavior.'
| @skipUnlessDBFeature('supports_transactions')
def test_autocommit_context_manager(self):
| with self.assertRaises(Exception):
with transaction.autocommit():
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 1)
|
'The autocommit context manager also works with a using argument.'
| @skipUnlessDBFeature('supports_transactions')
def test_autocommit_context_manager_with_using(self):
| with self.assertRaises(Exception):
with transaction.autocommit(using='default'):
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 1)
|
'With the commit_on_success context manager, the transaction is only
committed if the block doesn\'t throw an exception.'
| @skipUnlessDBFeature('supports_transactions')
def test_commit_on_success(self):
| with self.assertRaises(Exception):
with transaction.commit_on_success():
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 0)
|
'The commit_on_success context manager also works with a using argument.'
| @skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_with_using(self):
| with self.assertRaises(Exception):
with transaction.commit_on_success(using='default'):
self.create_reporter_and_fail()
self.assertEqual(Reporter.objects.count(), 0)
|
'If there aren\'t any exceptions, the data will get saved.'
| @skipUnlessDBFeature('supports_transactions')
def test_commit_on_success_succeed(self):
| Reporter.objects.create(first_name='Alice', last_name='Smith')
with transaction.commit_on_success():
Reporter.objects.filter(first_name='Alice').delete()
self.assertQuerysetEqual(Reporter.objects.all(), [])
|
'You can manually manage transactions if you really want to, but you
have to remember to commit/rollback.'
| @skipUnlessDBFeature('supports_transactions')
def test_manually_managed(self):
| with transaction.commit_manually():
Reporter.objects.create(first_name='Libby', last_name='Holtzman')
transaction.commit()
self.assertEqual(Reporter.objects.count(), 1)
|
'If you forget, you\'ll get bad errors.'
| @skipUnlessDBFeature('supports_transactions')
def test_manually_managed_mistake(self):
| with self.assertRaises(transaction.TransactionManagementError):
with transaction.commit_manually():
Reporter.objects.create(first_name='Scott', last_name='Browning')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.