repo_name
stringlengths
5
92
path
stringlengths
4
232
copies
stringclasses
19 values
size
stringlengths
4
7
content
stringlengths
721
1.04M
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
15
997
alpha_frac
float64
0.25
0.97
autogenerated
bool
1 class
JesusMtnez/devexperto-challenge
jesusmtnez/python/koans/koans/about_generators.py
1
4606
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Written in place of AboutBlocks in the Ruby Koans # # Note: Both blocks and generators use a yield keyword, but they behave # a lot differently # from runner.koan import * class AboutGenerators(Koan): def test_generating_values_on_the_fly(self): result = list() bacon_generator = (n + ' bacon' for n in ['crunchy','veggie','danish']) for bacon in bacon_generator: result.append(bacon) self.assertEqual(['crunchy bacon', 'veggie bacon', 'danish bacon'], result) def test_generators_are_different_to_list_comprehensions(self): num_list = [x*2 for x in range(1,3)] num_generator = (x*2 for x in range(1,3)) self.assertEqual(2, num_list[0]) # A generator has to be iterated through. with self.assertRaises(TypeError): num = num_generator[0] self.assertEqual(2, list(num_generator)[0]) # Both list comprehensions and generators can be iterated though. However, a generator # function is only called on the first iteration. The values are generated on the fly # instead of stored. # # Generators are more memory friendly, but less versatile def test_generator_expressions_are_a_one_shot_deal(self): dynamite = ('Boom!' for n in range(3)) attempt1 = list(dynamite) attempt2 = list(dynamite) self.assertEqual(['Boom!', 'Boom!', 'Boom!'], list(attempt1)) self.assertEqual(list(), list(attempt2)) # ------------------------------------------------------------------ def simple_generator_method(self): yield 'peanut' yield 'butter' yield 'and' yield 'jelly' def test_generator_method_will_yield_values_during_iteration(self): result = list() for item in self.simple_generator_method(): result.append(item) self.assertEqual(['peanut', 'butter', 'and', 'jelly'], result) def test_coroutines_can_take_arguments(self): result = self.simple_generator_method() self.assertEqual('peanut', next(result)) self.assertEqual('butter', next(result)) result.close() # ------------------------------------------------------------------ def square_me(self, seq): for x in seq: yield x * x def test_generator_method_with_parameter(self): result = self.square_me(range(2,5)) self.assertEqual([4, 9, 16], list(result)) # ------------------------------------------------------------------ def sum_it(self, seq): value = 0 for num in seq: # The local state of 'value' will be retained between iterations value += num yield value def test_generator_keeps_track_of_local_variables(self): result = self.sum_it(range(2,5)) self.assertEqual([2, 5, 9], list(result)) # ------------------------------------------------------------------ def generator_with_coroutine(self): result = yield yield result def test_generators_can_take_coroutines(self): generator = self.generator_with_coroutine() # THINK ABOUT IT: # Why is this line necessary? # # Hint: Read the "Specification: Sending Values into Generators" # section of http://www.python.org/dev/peps/pep-0342/ next(generator) self.assertEqual(3, generator.send(1 + 2)) def test_before_sending_a_value_to_a_generator_next_must_be_called(self): generator = self.generator_with_coroutine() try: generator.send(1+2) except TypeError as ex: ex2 = ex self.assertRegex(ex2.args[0], "can't send non-None value to a just-started generator") # ------------------------------------------------------------------ def yield_tester(self): value = yield if value: yield value else: yield 'no value' def test_generators_can_see_if_they_have_been_called_with_a_value(self): generator = self.yield_tester() next(generator) self.assertEqual('with value', generator.send('with value')) generator2 = self.yield_tester() next(generator2) self.assertEqual('no value', next(generator2)) def test_send_none_is_equivalent_to_next(self): generator = self.yield_tester() next(generator) # 'next(generator)' is exactly equivalent to 'generator.send(None)' self.assertEqual('no value', generator.send(None))
mit
4,035,224,991,255,879,000
30.986111
94
0.566001
false
shh-dlce/pulotu
website/apps/survey/migrations/0050_auto__del_unique_question_simplified_question.py
1
13852
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Removing unique constraint on 'Question', fields ['simplified_question'] db.delete_unique('questions', ['simplified_question']) def backwards(self, orm): # Adding unique constraint on 'Question', fields ['simplified_question'] db.create_unique('questions', ['simplified_question']) models = { u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, u'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, u'core.culture': { 'Meta': {'ordering': "['culture']", 'object_name': 'Culture', 'db_table': "'cultures'"}, 'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'coder': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}), 'culture': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'db_index': 'True'}), 'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'fact': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'languages': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['core.Language']", 'symmetrical': 'False', 'blank': 'True'}), 'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128'}) }, u'core.language': { 'Meta': {'ordering': "['language']", 'unique_together': "(('isocode', 'language'),)", 'object_name': 'Language', 'db_table': "'languages'"}, 'abvdcode': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'classification': ('django.db.models.fields.TextField', [], {}), 'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'isocode': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '3', 'db_index': 'True'}), 'language': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}) }, u'core.section': { 'Meta': {'ordering': "['id']", 'object_name': 'Section', 'db_table': "'sections'"}, 'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'section': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}) }, u'core.source': { 'Meta': {'ordering': "['author', 'year']", 'unique_together': "(['author', 'year'],)", 'object_name': 'Source', 'db_table': "'sources'", 'index_together': "[['author', 'year']]"}, 'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'author': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'bibtex': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'reference': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '1000'}), 'year': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'null': 'True', 'blank': 'True'}) }, u'survey.floatresponse': { 'Meta': {'object_name': 'FloatResponse', 'db_table': "'responses_floats'", '_ormbases': [u'survey.Response']}, 'response': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), u'response_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['survey.Response']", 'unique': 'True', 'primary_key': 'True'}) }, u'survey.integerresponse': { 'Meta': {'object_name': 'IntegerResponse', 'db_table': "'responses_integers'", '_ormbases': [u'survey.Response']}, 'response': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), u'response_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['survey.Response']", 'unique': 'True', 'primary_key': 'True'}) }, u'survey.optionquestion': { 'Meta': {'object_name': 'OptionQuestion', 'db_table': "'questions_option'", '_ormbases': [u'survey.Question']}, 'options': ('django.db.models.fields.TextField', [], {}), u'question_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['survey.Question']", 'unique': 'True', 'primary_key': 'True'}) }, u'survey.optionresponse': { 'Meta': {'object_name': 'OptionResponse', 'db_table': "'responses_options'", '_ormbases': [u'survey.Response']}, 'response': ('django.db.models.fields.CharField', [], {'max_length': '3', 'null': 'True', 'blank': 'True'}), u'response_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['survey.Response']", 'unique': 'True', 'primary_key': 'True'}), 'response_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}) }, u'survey.question': { 'Meta': {'object_name': 'Question', 'db_table': "'questions'"}, 'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'editor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'information': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'number': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}), 'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_survey.question_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}), 'question': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}), 'response_type': ('django.db.models.fields.CharField', [], {'default': "'Int'", 'max_length': '6'}), 'section': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Section']"}), 'simplified_question': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}) }, u'survey.response': { 'Meta': {'unique_together': "(('question', 'culture'),)", 'object_name': 'Response', 'db_table': "'responses'"}, 'added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'author': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}), 'codersnotes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'culture': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Culture']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'missing': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'page1': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}), 'page2': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}), 'page3': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}), 'page4': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}), 'page5': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}), 'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'polymorphic_survey.response_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['survey.Question']"}), 'source1': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'source1'", 'null': 'True', 'to': u"orm['core.Source']"}), 'source2': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'source2'", 'null': 'True', 'to': u"orm['core.Source']"}), 'source3': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'source3'", 'null': 'True', 'to': u"orm['core.Source']"}), 'source4': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'source4'", 'null': 'True', 'to': u"orm['core.Source']"}), 'source5': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'source5'", 'null': 'True', 'to': u"orm['core.Source']"}), 'uncertainty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, u'survey.textresponse': { 'Meta': {'object_name': 'TextResponse', 'db_table': "'responses_texts'", '_ormbases': [u'survey.Response']}, 'response': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), u'response_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['survey.Response']", 'unique': 'True', 'primary_key': 'True'}) } } complete_apps = ['survey']
apache-2.0
9,183,734,929,371,984,000
83.98773
198
0.552989
false
tangentlabs/django-oscar-fancypages
oscar_fancypages/fancypages/migrations/0001_initial.py
1
56653
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): depends_on = ( ('assets', '0001_initial'), ('catalogue', '0009_auto__add_field_product_rating'), ('promotions', '0001_initial'), ('offer', '0001_initial'), ) def forwards(self, orm): # Adding model 'FancyPage' db.create_table('fancypages_fancypage', ( ('category_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['catalogue.Category'], unique=True, primary_key=True)), ('page_type', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='pages', null=True, to=orm['fancypages.PageType'])), ('keywords', self.gf('django.db.models.fields.CharField')(max_length=255, blank=True)), ('status', self.gf('django.db.models.fields.CharField')(default=u'draft', max_length=15)), ('date_visible_start', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)), ('date_visible_end', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)), )) db.send_create_signal('fancypages', ['FancyPage']) # Adding M2M table for field visibility_types on 'FancyPage' db.create_table('fancypages_fancypage_visibility_types', ( ('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)), ('fancypage', models.ForeignKey(orm['fancypages.fancypage'], null=False)), ('visibilitytype', models.ForeignKey(orm['fancypages.visibilitytype'], null=False)) )) db.create_unique('fancypages_fancypage_visibility_types', ['fancypage_id', 'visibilitytype_id']) # Adding model 'PageType' db.create_table('fancypages_pagetype', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128)), ('slug', self.gf('django.db.models.fields.SlugField')(max_length=128)), ('template_name', self.gf('django.db.models.fields.CharField')(max_length=255)), )) db.send_create_signal('fancypages', ['PageType']) # Adding model 'VisibilityType' db.create_table('fancypages_visibilitytype', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=128)), ('slug', self.gf('django.db.models.fields.SlugField')(max_length=128, null=True, blank=True)), )) db.send_create_signal('fancypages', ['VisibilityType']) # Adding model 'Container' db.create_table('fancypages_container', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('name', self.gf('django.db.models.fields.SlugField')(max_length=50, blank=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100, blank=True)), ('content_type', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['contenttypes.ContentType'], null=True)), ('object_id', self.gf('django.db.models.fields.PositiveIntegerField')(null=True)), )) db.send_create_signal('fancypages', ['Container']) # Adding unique constraint on 'Container', fields ['name', 'content_type', 'object_id'] db.create_unique('fancypages_container', ['name', 'content_type_id', 'object_id']) # Adding model 'OrderedContainer' db.create_table('fancypages_orderedcontainer', ( ('container_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.Container'], unique=True, primary_key=True)), ('display_order', self.gf('django.db.models.fields.PositiveIntegerField')()), )) db.send_create_signal('fancypages', ['OrderedContainer']) # Adding model 'ContentBlock' db.create_table('fancypages_contentblock', ( ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('container', self.gf('django.db.models.fields.related.ForeignKey')(related_name='blocks', to=orm['fancypages.Container'])), ('display_order', self.gf('django.db.models.fields.PositiveIntegerField')()), )) db.send_create_signal('fancypages', ['ContentBlock']) # Adding model 'TextBlock' db.create_table('fancypages_textblock', ( ('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)), ('text', self.gf('django.db.models.fields.TextField')(default='Your text goes here.')), )) db.send_create_signal('fancypages', ['TextBlock']) # Adding model 'TitleTextBlock' db.create_table('fancypages_titletextblock', ( ('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)), ('title', self.gf('django.db.models.fields.CharField')(default='Your title goes here.', max_length=100)), ('text', self.gf('django.db.models.fields.TextField')(default='Your text goes here.')), )) db.send_create_signal('fancypages', ['TitleTextBlock']) # Adding model 'ImageBlock' db.create_table('fancypages_imageblock', ( ('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)), ('alt_text', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)), ('link', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)), ('image_asset', self.gf('fancypages.assets.fields.AssetKey')(blank=True, related_name='image_blocks', null=True, to=orm['assets.ImageAsset'])), )) db.send_create_signal('fancypages', ['ImageBlock']) # Adding model 'ImageAndTextBlock' db.create_table('fancypages_imageandtextblock', ( ('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)), ('title', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)), ('alt_text', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)), ('link', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)), ('image_asset', self.gf('fancypages.assets.fields.AssetKey')(blank=True, related_name='image_text_blocks', null=True, to=orm['assets.ImageAsset'])), ('text', self.gf('django.db.models.fields.CharField')(default='Your text goes here.', max_length=2000)), )) db.send_create_signal('fancypages', ['ImageAndTextBlock']) # Adding model 'CarouselBlock' db.create_table('fancypages_carouselblock', ( ('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)), ('image_1', self.gf('fancypages.assets.fields.AssetKey')(blank=True, related_name='+', null=True, to=orm['assets.ImageAsset'])), ('link_url_1', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)), ('image_2', self.gf('fancypages.assets.fields.AssetKey')(blank=True, related_name='+', null=True, to=orm['assets.ImageAsset'])), ('link_url_2', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)), ('image_3', self.gf('fancypages.assets.fields.AssetKey')(blank=True, related_name='+', null=True, to=orm['assets.ImageAsset'])), ('link_url_3', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)), ('image_4', self.gf('fancypages.assets.fields.AssetKey')(blank=True, related_name='+', null=True, to=orm['assets.ImageAsset'])), ('link_url_4', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)), ('image_5', self.gf('fancypages.assets.fields.AssetKey')(blank=True, related_name='+', null=True, to=orm['assets.ImageAsset'])), ('link_url_5', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)), ('image_6', self.gf('fancypages.assets.fields.AssetKey')(blank=True, related_name='+', null=True, to=orm['assets.ImageAsset'])), ('link_url_6', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)), ('image_7', self.gf('fancypages.assets.fields.AssetKey')(blank=True, related_name='+', null=True, to=orm['assets.ImageAsset'])), ('link_url_7', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)), ('image_8', self.gf('fancypages.assets.fields.AssetKey')(blank=True, related_name='+', null=True, to=orm['assets.ImageAsset'])), ('link_url_8', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)), ('image_9', self.gf('fancypages.assets.fields.AssetKey')(blank=True, related_name='+', null=True, to=orm['assets.ImageAsset'])), ('link_url_9', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)), ('image_10', self.gf('fancypages.assets.fields.AssetKey')(blank=True, related_name='+', null=True, to=orm['assets.ImageAsset'])), ('link_url_10', self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True)), )) db.send_create_signal('fancypages', ['CarouselBlock']) # Adding model 'PageNavigationBlock' db.create_table('fancypages_pagenavigationblock', ( ('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)), )) db.send_create_signal('fancypages', ['PageNavigationBlock']) # Adding model 'PrimaryNavigationBlock' db.create_table('fancypages_primarynavigationblock', ( ('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)), )) db.send_create_signal('fancypages', ['PrimaryNavigationBlock']) # Adding model 'TabBlock' db.create_table('fancypages_tabblock', ( ('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)), )) db.send_create_signal('fancypages', ['TabBlock']) # Adding model 'TwoColumnLayoutBlock' db.create_table('fancypages_twocolumnlayoutblock', ( ('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)), ('left_width', self.gf('django.db.models.fields.PositiveIntegerField')(default=6, max_length=3)), )) db.send_create_signal('fancypages', ['TwoColumnLayoutBlock']) # Adding model 'ThreeColumnLayoutBlock' db.create_table('fancypages_threecolumnlayoutblock', ( ('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)), )) db.send_create_signal('fancypages', ['ThreeColumnLayoutBlock']) # Adding model 'FourColumnLayoutBlock' db.create_table('fancypages_fourcolumnlayoutblock', ( ('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)), )) db.send_create_signal('fancypages', ['FourColumnLayoutBlock']) # Adding model 'VideoBlock' db.create_table('fancypages_videoblock', ( ('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)), ('source', self.gf('django.db.models.fields.CharField')(max_length=50)), ('video_code', self.gf('django.db.models.fields.CharField')(max_length=50)), )) db.send_create_signal('fancypages', ['VideoBlock']) # Adding model 'TwitterBlock' db.create_table('fancypages_twitterblock', ( ('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)), ('username', self.gf('django.db.models.fields.CharField')(max_length=50)), ('max_tweets', self.gf('django.db.models.fields.PositiveIntegerField')(default=5)), )) db.send_create_signal('fancypages', ['TwitterBlock']) # Adding model 'SingleProductBlock' db.create_table('fancypages_singleproductblock', ( ('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)), ('product', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catalogue.Product'], null=True)), )) db.send_create_signal('fancypages', ['SingleProductBlock']) # Adding model 'HandPickedProductsPromotionBlock' db.create_table('fancypages_handpickedproductspromotionblock', ( ('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)), ('promotion', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['promotions.HandPickedProductList'], null=True)), )) db.send_create_signal('fancypages', ['HandPickedProductsPromotionBlock']) # Adding model 'AutomaticProductsPromotionBlock' db.create_table('fancypages_automaticproductspromotionblock', ( ('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)), ('promotion', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['promotions.AutomaticProductList'], null=True)), )) db.send_create_signal('fancypages', ['AutomaticProductsPromotionBlock']) # Adding model 'OfferBlock' db.create_table('fancypages_offerblock', ( ('contentblock_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['fancypages.ContentBlock'], unique=True, primary_key=True)), ('offer', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['offer.ConditionalOffer'], null=True)), )) db.send_create_signal('fancypages', ['OfferBlock']) def backwards(self, orm): # Removing unique constraint on 'Container', fields ['name', 'content_type', 'object_id'] db.delete_unique('fancypages_container', ['name', 'content_type_id', 'object_id']) # Deleting model 'FancyPage' db.delete_table('fancypages_fancypage') # Removing M2M table for field visibility_types on 'FancyPage' db.delete_table('fancypages_fancypage_visibility_types') # Deleting model 'PageType' db.delete_table('fancypages_pagetype') # Deleting model 'VisibilityType' db.delete_table('fancypages_visibilitytype') # Deleting model 'Container' db.delete_table('fancypages_container') # Deleting model 'OrderedContainer' db.delete_table('fancypages_orderedcontainer') # Deleting model 'ContentBlock' db.delete_table('fancypages_contentblock') # Deleting model 'TextBlock' db.delete_table('fancypages_textblock') # Deleting model 'TitleTextBlock' db.delete_table('fancypages_titletextblock') # Deleting model 'ImageBlock' db.delete_table('fancypages_imageblock') # Deleting model 'ImageAndTextBlock' db.delete_table('fancypages_imageandtextblock') # Deleting model 'CarouselBlock' db.delete_table('fancypages_carouselblock') # Deleting model 'PageNavigationBlock' db.delete_table('fancypages_pagenavigationblock') # Deleting model 'PrimaryNavigationBlock' db.delete_table('fancypages_primarynavigationblock') # Deleting model 'TabBlock' db.delete_table('fancypages_tabblock') # Deleting model 'TwoColumnLayoutBlock' db.delete_table('fancypages_twocolumnlayoutblock') # Deleting model 'ThreeColumnLayoutBlock' db.delete_table('fancypages_threecolumnlayoutblock') # Deleting model 'FourColumnLayoutBlock' db.delete_table('fancypages_fourcolumnlayoutblock') # Deleting model 'VideoBlock' db.delete_table('fancypages_videoblock') # Deleting model 'TwitterBlock' db.delete_table('fancypages_twitterblock') # Deleting model 'SingleProductBlock' db.delete_table('fancypages_singleproductblock') # Deleting model 'HandPickedProductsPromotionBlock' db.delete_table('fancypages_handpickedproductspromotionblock') # Deleting model 'AutomaticProductsPromotionBlock' db.delete_table('fancypages_automaticproductspromotionblock') # Deleting model 'OfferBlock' db.delete_table('fancypages_offerblock') models = { 'assets.imageasset': { 'Meta': {'object_name': 'ImageAsset'}, 'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'default': "''"}), 'height': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'width': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}) }, 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'catalogue.attributeentity': { 'Meta': {'object_name': 'AttributeEntity'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}), 'type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'entities'", 'to': "orm['catalogue.AttributeEntityType']"}) }, 'catalogue.attributeentitytype': { 'Meta': {'object_name': 'AttributeEntityType'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255', 'blank': 'True'}) }, 'catalogue.attributeoption': { 'Meta': {'object_name': 'AttributeOption'}, 'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'options'", 'to': "orm['catalogue.AttributeOptionGroup']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'option': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'catalogue.attributeoptiongroup': { 'Meta': {'object_name': 'AttributeOptionGroup'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}) }, 'catalogue.category': { 'Meta': {'ordering': "['full_name']", 'object_name': 'Category'}, 'depth': ('django.db.models.fields.PositiveIntegerField', [], {}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}), 'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}) }, 'catalogue.option': { 'Meta': {'object_name': 'Option'}, 'code': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'type': ('django.db.models.fields.CharField', [], {'default': "'Required'", 'max_length': '128'}) }, 'catalogue.product': { 'Meta': {'ordering': "['-date_created']", 'object_name': 'Product'}, 'attributes': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.ProductAttribute']", 'through': "orm['catalogue.ProductAttributeValue']", 'symmetrical': 'False'}), 'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Category']", 'through': "orm['catalogue.ProductCategory']", 'symmetrical': 'False'}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_discountable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'variants'", 'null': 'True', 'to': "orm['catalogue.Product']"}), 'product_class': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductClass']", 'null': 'True'}), 'product_options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}), 'rating': ('django.db.models.fields.FloatField', [], {'null': 'True'}), 'recommended_products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Product']", 'symmetrical': 'False', 'through': "orm['catalogue.ProductRecommendation']", 'blank': 'True'}), 'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'relations'", 'blank': 'True', 'to': "orm['catalogue.Product']"}), 'score': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'db_index': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}), 'status': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'upc': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}) }, 'catalogue.productattribute': { 'Meta': {'ordering': "['code']", 'object_name': 'ProductAttribute'}, 'code': ('django.db.models.fields.SlugField', [], {'max_length': '128'}), 'entity_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntityType']", 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'option_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOptionGroup']", 'null': 'True', 'blank': 'True'}), 'product_class': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'attributes'", 'null': 'True', 'to': "orm['catalogue.ProductClass']"}), 'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'type': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '20'}) }, 'catalogue.productattributevalue': { 'Meta': {'object_name': 'ProductAttributeValue'}, 'attribute': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.ProductAttribute']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'attribute_values'", 'to': "orm['catalogue.Product']"}), 'value_boolean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'value_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'value_entity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeEntity']", 'null': 'True', 'blank': 'True'}), 'value_float': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}), 'value_integer': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'value_option': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.AttributeOption']", 'null': 'True', 'blank': 'True'}), 'value_richtext': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'value_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}) }, 'catalogue.productcategory': { 'Meta': {'ordering': "['-is_canonical']", 'object_name': 'ProductCategory'}, 'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Category']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_canonical': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"}) }, 'catalogue.productclass': { 'Meta': {'ordering': "['name']", 'object_name': 'ProductClass'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'options': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['catalogue.Option']", 'symmetrical': 'False', 'blank': 'True'}), 'requires_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '128'}), 'track_stock': ('django.db.models.fields.BooleanField', [], {'default': 'True'}) }, 'catalogue.productrecommendation': { 'Meta': {'object_name': 'ProductRecommendation'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'primary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'primary_recommendations'", 'to': "orm['catalogue.Product']"}), 'ranking': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}), 'recommendation': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'fancypages.automaticproductspromotionblock': { 'Meta': {'ordering': "['display_order']", 'object_name': 'AutomaticProductsPromotionBlock', '_ormbases': ['fancypages.ContentBlock']}, 'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}), 'promotion': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['promotions.AutomaticProductList']", 'null': 'True'}) }, 'fancypages.carouselblock': { 'Meta': {'ordering': "['display_order']", 'object_name': 'CarouselBlock', '_ormbases': ['fancypages.ContentBlock']}, 'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}), 'image_1': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}), 'image_10': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}), 'image_2': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}), 'image_3': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}), 'image_4': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}), 'image_5': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}), 'image_6': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}), 'image_7': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}), 'image_8': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}), 'image_9': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}), 'link_url_1': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}), 'link_url_10': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}), 'link_url_2': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}), 'link_url_3': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}), 'link_url_4': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}), 'link_url_5': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}), 'link_url_6': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}), 'link_url_7': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}), 'link_url_8': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}), 'link_url_9': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}) }, 'fancypages.container': { 'Meta': {'unique_together': "(('name', 'content_type', 'object_id'),)", 'object_name': 'Container'}, 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}) }, 'fancypages.contentblock': { 'Meta': {'ordering': "['display_order']", 'object_name': 'ContentBlock'}, 'container': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'blocks'", 'to': "orm['fancypages.Container']"}), 'display_order': ('django.db.models.fields.PositiveIntegerField', [], {}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, 'fancypages.fancypage': { 'Meta': {'ordering': "['full_name']", 'object_name': 'FancyPage', '_ormbases': ['catalogue.Category']}, 'category_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['catalogue.Category']", 'unique': 'True', 'primary_key': 'True'}), 'date_visible_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'date_visible_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'page_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'pages'", 'null': 'True', 'to': "orm['fancypages.PageType']"}), 'status': ('django.db.models.fields.CharField', [], {'default': "u'draft'", 'max_length': '15'}), 'visibility_types': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['fancypages.VisibilityType']", 'symmetrical': 'False'}) }, 'fancypages.fourcolumnlayoutblock': { 'Meta': {'object_name': 'FourColumnLayoutBlock'}, 'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}) }, 'fancypages.handpickedproductspromotionblock': { 'Meta': {'ordering': "['display_order']", 'object_name': 'HandPickedProductsPromotionBlock', '_ormbases': ['fancypages.ContentBlock']}, 'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}), 'promotion': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['promotions.HandPickedProductList']", 'null': 'True'}) }, 'fancypages.imageandtextblock': { 'Meta': {'object_name': 'ImageAndTextBlock', '_ormbases': ['fancypages.ContentBlock']}, 'alt_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}), 'image_asset': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'image_text_blocks'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}), 'link': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}), 'text': ('django.db.models.fields.CharField', [], {'default': "'Your text goes here.'", 'max_length': '2000'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}) }, 'fancypages.imageblock': { 'Meta': {'object_name': 'ImageBlock', '_ormbases': ['fancypages.ContentBlock']}, 'alt_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}), 'image_asset': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'image_blocks'", 'null': 'True', 'to': "orm['assets.ImageAsset']"}), 'link': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}) }, 'fancypages.offerblock': { 'Meta': {'ordering': "['display_order']", 'object_name': 'OfferBlock', '_ormbases': ['fancypages.ContentBlock']}, 'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}), 'offer': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.ConditionalOffer']", 'null': 'True'}) }, 'fancypages.orderedcontainer': { 'Meta': {'object_name': 'OrderedContainer', '_ormbases': ['fancypages.Container']}, 'container_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.Container']", 'unique': 'True', 'primary_key': 'True'}), 'display_order': ('django.db.models.fields.PositiveIntegerField', [], {}) }, 'fancypages.pagenavigationblock': { 'Meta': {'ordering': "['display_order']", 'object_name': 'PageNavigationBlock', '_ormbases': ['fancypages.ContentBlock']}, 'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}) }, 'fancypages.pagetype': { 'Meta': {'object_name': 'PageType'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128'}), 'template_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}) }, 'fancypages.primarynavigationblock': { 'Meta': {'ordering': "['display_order']", 'object_name': 'PrimaryNavigationBlock', '_ormbases': ['fancypages.ContentBlock']}, 'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}) }, 'fancypages.singleproductblock': { 'Meta': {'ordering': "['display_order']", 'object_name': 'SingleProductBlock', '_ormbases': ['fancypages.ContentBlock']}, 'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']", 'null': 'True'}) }, 'fancypages.tabblock': { 'Meta': {'ordering': "['display_order']", 'object_name': 'TabBlock', '_ormbases': ['fancypages.ContentBlock']}, 'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}) }, 'fancypages.textblock': { 'Meta': {'ordering': "['display_order']", 'object_name': 'TextBlock', '_ormbases': ['fancypages.ContentBlock']}, 'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}), 'text': ('django.db.models.fields.TextField', [], {'default': "'Your text goes here.'"}) }, 'fancypages.threecolumnlayoutblock': { 'Meta': {'object_name': 'ThreeColumnLayoutBlock'}, 'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}) }, 'fancypages.titletextblock': { 'Meta': {'ordering': "['display_order']", 'object_name': 'TitleTextBlock', '_ormbases': ['fancypages.ContentBlock']}, 'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}), 'text': ('django.db.models.fields.TextField', [], {'default': "'Your text goes here.'"}), 'title': ('django.db.models.fields.CharField', [], {'default': "'Your title goes here.'", 'max_length': '100'}) }, 'fancypages.twitterblock': { 'Meta': {'ordering': "['display_order']", 'object_name': 'TwitterBlock', '_ormbases': ['fancypages.ContentBlock']}, 'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}), 'max_tweets': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5'}), 'username': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'fancypages.twocolumnlayoutblock': { 'Meta': {'object_name': 'TwoColumnLayoutBlock'}, 'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}), 'left_width': ('django.db.models.fields.PositiveIntegerField', [], {'default': '6', 'max_length': '3'}) }, 'fancypages.videoblock': { 'Meta': {'ordering': "['display_order']", 'object_name': 'VideoBlock', '_ormbases': ['fancypages.ContentBlock']}, 'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}), 'source': ('django.db.models.fields.CharField', [], {'max_length': '50'}), 'video_code': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'fancypages.visibilitytype': { 'Meta': {'object_name': 'VisibilityType'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}) }, 'offer.benefit': { 'Meta': {'object_name': 'Benefit'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'max_affected_items': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'proxy_class': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'range': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Range']", 'null': 'True', 'blank': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}), 'value': ('oscar.models.fields.PositiveDecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}) }, 'offer.condition': { 'Meta': {'object_name': 'Condition'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'proxy_class': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}), 'range': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Range']", 'null': 'True', 'blank': 'True'}), 'type': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}), 'value': ('oscar.models.fields.PositiveDecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}) }, 'offer.conditionaloffer': { 'Meta': {'ordering': "['-priority']", 'object_name': 'ConditionalOffer'}, 'benefit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Benefit']"}), 'condition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['offer.Condition']"}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'end_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'max_basket_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'max_discount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}), 'max_global_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'max_user_applications': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}), 'num_applications': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'num_orders': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'offer_type': ('django.db.models.fields.CharField', [], {'default': "'Site'", 'max_length': '128'}), 'priority': ('django.db.models.fields.IntegerField', [], {'default': '0'}), 'redirect_url': ('oscar.models.fields.ExtendedURLField', [], {'max_length': '200', 'blank': 'True'}), 'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'unique': 'True', 'null': 'True'}), 'start_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'Open'", 'max_length': '64'}), 'total_discount': ('django.db.models.fields.DecimalField', [], {'default': "'0.00'", 'max_digits': '12', 'decimal_places': '2'}) }, 'offer.range': { 'Meta': {'object_name': 'Range'}, 'classes': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'classes'", 'blank': 'True', 'to': "orm['catalogue.ProductClass']"}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'excluded_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'excludes'", 'blank': 'True', 'to': "orm['catalogue.Product']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'included_categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'includes'", 'blank': 'True', 'to': "orm['catalogue.Category']"}), 'included_products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'includes'", 'blank': 'True', 'to': "orm['catalogue.Product']"}), 'includes_all_products': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}), 'proxy_class': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'}) }, 'promotions.automaticproductlist': { 'Meta': {'object_name': 'AutomaticProductList'}, 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'link_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'link_url': ('oscar.models.fields.ExtendedURLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'method': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'num_products': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '4'}) }, 'promotions.handpickedproductlist': { 'Meta': {'object_name': 'HandPickedProductList'}, 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'link_text': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'link_url': ('oscar.models.fields.ExtendedURLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'products': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['catalogue.Product']", 'null': 'True', 'through': "orm['promotions.OrderedProduct']", 'blank': 'True'}) }, 'promotions.keywordpromotion': { 'Meta': {'object_name': 'KeywordPromotion'}, 'clicks': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'display_order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'filter': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'keyword': ('django.db.models.fields.CharField', [], {'max_length': '200'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'position': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'promotions.orderedproduct': { 'Meta': {'ordering': "('display_order',)", 'object_name': 'OrderedProduct'}, 'display_order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'list': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['promotions.HandPickedProductList']"}), 'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalogue.Product']"}) }, 'promotions.pagepromotion': { 'Meta': {'object_name': 'PagePromotion'}, 'clicks': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'display_order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}), 'page_url': ('oscar.models.fields.ExtendedURLField', [], {'max_length': '128', 'db_index': 'True'}), 'position': ('django.db.models.fields.CharField', [], {'max_length': '100'}) } } complete_apps = ['fancypages']
bsd-3-clause
-8,242,269,794,335,194,000
77.575589
222
0.590172
false
SublimeText-Markdown/MarkdownEditing
open_page.py
1
1363
import sublime, sublime_plugin import os, string import re try: from MarkdownEditing.wiki_page import * except ImportError: from wiki_page import * try: from MarkdownEditing.mdeutils import * except ImportError: from mdeutils import * class OpenPageCommand(MDETextCommand): def is_visible(self): """Return True if cursor is on a wiki page reference.""" for sel in self.view.sel(): scopes = self.view.scope_name(sel.b).split(" ") if 'meta.link.wiki.markdown' in scopes: return True return False def run(self, edit): print("Running OpenPageCommand") wiki_page = WikiPage(self.view) sel_region = self.get_selected() if sel_region: wiki_page.select_word_at_cursor() region = sublime.Region(sel_region.begin(), sel_region.begin()) file_list = wiki_page.find_matching_files(region) if len(file_list) > 1: wiki_page.show_quick_list(file_list) else: name = wiki_page.identify_page_at_cursor() wiki_page.select_page(name) def get_selected(self): selection = self.view.sel() for region in selection: return region return None
mit
-2,559,636,679,433,864,700
24.211538
75
0.568599
false
fedora-modularity/meta-test-family
moduleframework/tests/generic/dockerlint.py
1
6137
# -*- coding: utf-8 -*- # # Meta test family (MTF) is a tool to test components of a modular Fedora: # https://docs.pagure.org/modularity/ # Copyright (C) 2017 Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # he Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Authors: Jan Scotka <[email protected]> # from moduleframework.avocado_testers import container_avocado_test class DockerfileLinterInContainer(container_avocado_test.ContainerAvocadoTest): """ :avocado: enable :avocado: tags=sanity,rhel,fedora,docker,docker_lint_inside_test,generic """ def _file_to_check(self, doc_file_list): test_failed = False for doc in doc_file_list: exit_status = self.run("test -e %s" % doc, ignore_status=True).exit_status if int(exit_status) == 0: self.log.debug("%s doc file exists in container" % doc) test_failed = True return test_failed def test_all_nodocs(self): self.start() all_docs = self.run("rpm -qad", verbose=False).stdout test_failed = self._file_to_check(all_docs.split('\n')) msg = "Documentation files exist in container. They are installed in the base image or by RUN commands." if test_failed: self.log.warn(msg) self.assertTrue(True, msg=msg) def test_installed_docs(self): """ This test checks whether no docs are installed by RUN dnf command :return: FAILED in case we found some docs PASS in case there is no doc file found """ self.start() # Double brackets has to by used because of trans_dict. # 'EXCEPTION MTF: ', 'Command is formatted by using trans_dict. # If you want to use brackets { } in your code, please use {{ }}. installed_pkgs = self.run("rpm -qa --qf '%{{NAME}}\n'", verbose=False).stdout defined_pkgs = self.backend.getPackageList() list_pkg = set(installed_pkgs).intersection(set(defined_pkgs)) test_failed = False docu_pkg = [] for pkg in list_pkg: pkg_doc = self.run("rpm -qd %s" % pkg, verbose=False).stdout if self._file_to_check(pkg_doc.split('\n')): docu_pkg.append(pkg) test_failed = True self.assertFalse(test_failed, msg="There is documentation installed for packages: %s" % ','.join(docu_pkg)) def _check_container_files(self, exts, pkg_mgr): found_files = False file_list = [] for ext in exts: dir_with_ext = "/var/cache/{pkg_mgr}/**/*.{ext}".format(pkg_mgr=pkg_mgr, ext=ext) # Some images does not contain find command and therefore we have to use for or ls. ret = self.run('shopt -s globstar && for i in {dir}; do printf "%s\\n" "$i" ; done'.format( dir=dir_with_ext), ignore_status=True) # we did not find any file with an extension. # TODO I don't how to detect failure or empty files. if ret.stdout.strip() == dir_with_ext: continue file_list.extend(ret.stdout.split('\n')) if self._file_to_check(file_list): found_files = True return found_files def _dnf_clean_all(self): """ Function checks if files with relevant extensions exist in /var/cache/dnf directory :return: True if at least one file exists False if no file exists """ exts = ["solv", "solvx", "xml.gz", "rpm"] return self._check_container_files(exts, "dnf") def _yum_clean_all(self): """ Function checks if files with relevant extensions exist in /var/cache/dnf directory :return: True if at least one file exists False if no file exists """ # extensions are taken from https://github.com/rpm-software-management/yum/blob/master/yum/__init__.py#L2854 exts = ['rpm', 'sqlite', 'sqlite.bz2', 'xml.gz', 'asc', 'mirrorlist.txt', 'cachecookie', 'xml'] return self._check_container_files(exts, "yum") def test_docker_clean_all(self): """ This test checks if `dnf/yum clean all` was called in image :return: return True if clean all is called return False if clean all is not called """ self.start() # Detect distro in image distro = self.run("cat /etc/os-release").stdout if 'Fedora' in distro: self.assertFalse(self._dnf_clean_all(), msg="`dnf clean all` is not present in Dockerfile.") else: self.assertFalse(self._yum_clean_all(), msg="`yum clean all` is not present in Dockerfile.") class DockerLint(container_avocado_test.ContainerAvocadoTest): """ :avocado: enable :avocado: tags=sanity,rhel,fedora,docker,docker_labels_inspect_test """ def testLabels(self): """ Function tests whether labels are set in modulemd YAML file properly. :return: """ llabels = self.getConfigModule().get('labels') if llabels is None or len(llabels) == 0: self.log.info("No labels defined in config to check") self.cancel() for key in self.getConfigModule()['labels']: print(self.getConfigModule()['labels'][key]) aaa = self.checkLabel(key, self.getConfigModule()['labels'][key]) self.assertTrue(aaa, msg="Label %s is not set properly in modulemd YAML file." % key)
gpl-3.0
7,287,457,387,612,954,000
41.324138
116
0.617403
false
amitjamadagni/sympy
sympy/physics/quantum/state.py
2
28699
"""Dirac notation for states.""" from sympy import (cacheit, conjugate, Expr, Function, integrate, oo, sqrt, Tuple) from sympy.printing.pretty.stringpict import prettyForm, stringPict from sympy.physics.quantum.qexpr import QExpr, dispatch_method __all__ = [ 'KetBase', 'BraBase', 'StateBase', 'State', 'Ket', 'Bra', 'TimeDepState', 'TimeDepBra', 'TimeDepKet', 'Wavefunction' ] #----------------------------------------------------------------------------- # States, bras and kets. #----------------------------------------------------------------------------- # ASCII brackets _lbracket = "<" _rbracket = ">" _straight_bracket = "|" # Unicode brackets # MATHEMATICAL ANGLE BRACKETS _lbracket_ucode = u"\u27E8" _rbracket_ucode = u"\u27E9" # LIGHT VERTICAL BAR _straight_bracket_ucode = u"\u2758" # Other options for unicode printing of <, > and | for Dirac notation. # LEFT-POINTING ANGLE BRACKET # _lbracket = u"\u2329" # _rbracket = u"\u232A" # LEFT ANGLE BRACKET # _lbracket = u"\u3008" # _rbracket = u"\u3009" # VERTICAL LINE # _straight_bracket = u"\u007C" class StateBase(QExpr): """Abstract base class for general abstract states in quantum mechanics. All other state classes defined will need to inherit from this class. It carries the basic structure for all other states such as dual, _eval_adjoint and label. This is an abstract base class and you should not instantiate it directly, instead use State. """ @classmethod def _operators_to_state(self, ops, **options): """ Returns the eigenstate instance for the passed operators. This method should be overridden in subclasses. It will handle being passed either an Operator instance or set of Operator instances. It should return the corresponding state INSTANCE or simply raise a NotImplementedError. See cartesian.py for an example. """ raise NotImplementedError("Cannot map operators to states in this class. Method not implemented!") def _state_to_operators(self, op_classes, **options): """ Returns the operators which this state instance is an eigenstate of. This method should be overridden in subclasses. It will be called on state instances and be passed the operator classes that we wish to make into instances. The state instance will then transform the classes appropriately, or raise a NotImplementedError if it cannot return operator instances. See cartesian.py for examples, """ raise NotImplementedError( "Cannot map this state to operators. Method not implemented!") @property def operators(self): """Return the operator(s) that this state is an eigenstate of""" from operatorset import state_to_operators # import internally to avoid circular import errors return state_to_operators(self) def _enumerate_state(self, num_states, **options): raise NotImplementedError("Cannot enumerate this state!") def _represent_default_basis(self, **options): return self._represent(basis=self.operators) #------------------------------------------------------------------------- # Dagger/dual #------------------------------------------------------------------------- @property def dual(self): """Return the dual state of this one.""" return self.dual_class()._new_rawargs(self.hilbert_space, *self.args) @classmethod def dual_class(self): """Return the class used to construt the dual.""" raise NotImplementedError( 'dual_class must be implemented in a subclass' ) def _eval_adjoint(self): """Compute the dagger of this state using the dual.""" return self.dual #------------------------------------------------------------------------- # Printing #------------------------------------------------------------------------- def _pretty_brackets(self, height, use_unicode=True): # Return pretty printed brackets for the state # Ideally, this could be done by pform.parens but it does not support the angled < and > # Setup for unicode vs ascii if use_unicode: lbracket, rbracket = self.lbracket_ucode, self.rbracket_ucode slash, bslash, vert = u'\u2571', u'\u2572', u'\u2502' else: lbracket, rbracket = self.lbracket, self.rbracket slash, bslash, vert = '/', '\\', '|' # If height is 1, just return brackets if height == 1: return stringPict(lbracket), stringPict(rbracket) # Make height even height += (height % 2) brackets = [] for bracket in lbracket, rbracket: # Create left bracket if bracket in set([_lbracket, _lbracket_ucode]): bracket_args = [ ' ' * (height//2 - i - 1) + slash for i in range(height // 2)] bracket_args.extend( [ ' ' * i + bslash for i in range(height // 2)]) # Create right bracket elif bracket in set([_rbracket, _rbracket_ucode]): bracket_args = [ ' ' * i + bslash for i in range(height // 2)] bracket_args.extend([ ' ' * ( height//2 - i - 1) + slash for i in range(height // 2)]) # Create straight bracket elif bracket in set([_straight_bracket, _straight_bracket_ucode]): bracket_args = [vert for i in range(height)] else: raise ValueError(bracket) brackets.append( stringPict('\n'.join(bracket_args), baseline=height//2)) return brackets def _sympystr(self, printer, *args): contents = self._print_contents(printer, *args) return '%s%s%s' % (self.lbracket, contents, self.rbracket) def _pretty(self, printer, *args): from sympy.printing.pretty.stringpict import prettyForm # Get brackets pform = self._print_contents_pretty(printer, *args) lbracket, rbracket = self._pretty_brackets( pform.height(), printer._use_unicode) # Put together state pform = prettyForm(*pform.left(lbracket)) pform = prettyForm(*pform.right(rbracket)) return pform def _latex(self, printer, *args): contents = self._print_contents_latex(printer, *args) # The extra {} brackets are needed to get matplotlib's latex # rendered to render this properly. return '{%s%s%s}' % (self.lbracket_latex, contents, self.rbracket_latex) class KetBase(StateBase): """Base class for Kets. This class defines the dual property and the brackets for printing. This is an abstract base class and you should not instantiate it directly, instead use Ket. """ lbracket = _straight_bracket rbracket = _rbracket lbracket_ucode = _straight_bracket_ucode rbracket_ucode = _rbracket_ucode lbracket_latex = r'\left|' rbracket_latex = r'\right\rangle ' @classmethod def default_args(self): return ("psi",) @classmethod def dual_class(self): return BraBase def __mul__(self, other): """KetBase*other""" from sympy.physics.quantum.operator import OuterProduct if isinstance(other, BraBase): return OuterProduct(self, other) else: return Expr.__mul__(self, other) def __rmul__(self, other): """other*KetBase""" from sympy.physics.quantum.innerproduct import InnerProduct if isinstance(other, BraBase): return InnerProduct(other, self) else: return Expr.__rmul__(self, other) #------------------------------------------------------------------------- # _eval_* methods #------------------------------------------------------------------------- def _eval_innerproduct(self, bra, **hints): """Evaluate the inner product betweeen this ket and a bra. This is called to compute <bra|ket>, where the ket is ``self``. This method will dispatch to sub-methods having the format:: ``def _eval_innerproduct_BraClass(self, **hints):`` Subclasses should define these methods (one for each BraClass) to teach the ket how to take inner products with bras. """ return dispatch_method(self, '_eval_innerproduct', bra, **hints) def _apply_operator(self, op, **options): """Apply an Operator to this Ket. This method will dispatch to methods having the format:: ``def _apply_operator_OperatorName(op, **options):`` Subclasses should define these methods (one for each OperatorName) to teach the Ket how operators act on it. Parameters ========== op : Operator The Operator that is acting on the Ket. options : dict A dict of key/value pairs that control how the operator is applied to the Ket. """ return dispatch_method(self, '_apply_operator', op, **options) class BraBase(StateBase): """Base class for Bras. This class defines the dual property and the brackets for printing. This is an abstract base class and you should not instantiate it directly, instead use Bra. """ lbracket = _lbracket rbracket = _straight_bracket lbracket_ucode = _lbracket_ucode rbracket_ucode = _straight_bracket_ucode lbracket_latex = r'\left\langle ' rbracket_latex = r'\right|' @classmethod def _operators_to_state(self, ops, **options): state = self.dual_class().operators_to_state(ops, **options) return state.dual def _state_to_operators(self, op_classes, **options): return self.dual._state_to_operators(op_classes, **options) def _enumerate_state(self, num_states, **options): dual_states = self.dual._enumerate_state(num_states, **options) return map(lambda x: x.dual, dual_states) @classmethod def default_args(self): return self.dual_class().default_args() @classmethod def dual_class(self): return KetBase def __mul__(self, other): """BraBase*other""" from sympy.physics.quantum.innerproduct import InnerProduct if isinstance(other, KetBase): return InnerProduct(self, other) else: return Expr.__mul__(self, other) def __rmul__(self, other): """other*BraBase""" from sympy.physics.quantum.operator import OuterProduct if isinstance(other, KetBase): return OuterProduct(other, self) else: return Expr.__rmul__(self, other) def _represent(self, **options): """A default represent that uses the Ket's version.""" from sympy.physics.quantum.dagger import Dagger return Dagger(self.dual._represent(**options)) class State(StateBase): """General abstract quantum state used as a base class for Ket and Bra.""" pass class Ket(State, KetBase): """A general time-independent Ket in quantum mechanics. Inherits from State and KetBase. This class should be used as the base class for all physical, time-independent Kets in a system. This class and its subclasses will be the main classes that users will use for expressing Kets in Dirac notation [1]_. Parameters ========== args : tuple The list of numbers or parameters that uniquely specify the ket. This will usually be its symbol or its quantum numbers. For time-dependent state, this will include the time. Examples ======== Create a simple Ket and looking at its properties:: >>> from sympy.physics.quantum import Ket, Bra >>> from sympy import symbols, I >>> k = Ket('psi') >>> k |psi> >>> k.hilbert_space H >>> k.is_commutative False >>> k.label (psi,) Ket's know about their associated bra:: >>> k.dual <psi| >>> k.dual_class() <class 'sympy.physics.quantum.state.Bra'> Take a linear combination of two kets:: >>> k0 = Ket(0) >>> k1 = Ket(1) >>> 2*I*k0 - 4*k1 2*I*|0> - 4*|1> Compound labels are passed as tuples:: >>> n, m = symbols('n,m') >>> k = Ket(n,m) >>> k |nm> References ========== .. [1] http://en.wikipedia.org/wiki/Bra-ket_notation """ @classmethod def dual_class(self): return Bra class Bra(State, BraBase): """A general time-independent Bra in quantum mechanics. Inherits from State and BraBase. A Bra is the dual of a Ket [1]_. This class and its subclasses will be the main classes that users will use for expressing Bras in Dirac notation. Parameters ========== args : tuple The list of numbers or parameters that uniquely specify the ket. This will usually be its symbol or its quantum numbers. For time-dependent state, this will include the time. Examples ======== Create a simple Bra and look at its properties:: >>> from sympy.physics.quantum import Ket, Bra >>> from sympy import symbols, I >>> b = Bra('psi') >>> b <psi| >>> b.hilbert_space H >>> b.is_commutative False Bra's know about their dual Ket's:: >>> b.dual |psi> >>> b.dual_class() <class 'sympy.physics.quantum.state.Ket'> Like Kets, Bras can have compound labels and be manipulated in a similar manner:: >>> n, m = symbols('n,m') >>> b = Bra(n,m) - I*Bra(m,n) >>> b -I*<mn| + <nm| Symbols in a Bra can be substituted using ``.subs``:: >>> b.subs(n,m) <mm| - I*<mm| References ========== .. [1] http://en.wikipedia.org/wiki/Bra-ket_notation """ @classmethod def dual_class(self): return Ket #----------------------------------------------------------------------------- # Time dependent states, bras and kets. #----------------------------------------------------------------------------- class TimeDepState(StateBase): """Base class for a general time-dependent quantum state. This class is used as a base class for any time-dependent state. The main difference between this class and the time-independent state is that this class takes a second argument that is the time in addition to the usual label argument. Parameters ========== args : tuple The list of numbers or parameters that uniquely specify the ket. This will usually be its symbol or its quantum numbers. For time-dependent state, this will include the time as the final argument. """ #------------------------------------------------------------------------- # Initialization #------------------------------------------------------------------------- @classmethod def default_args(self): return ("psi", "t") #------------------------------------------------------------------------- # Properties #------------------------------------------------------------------------- @property def label(self): """The label of the state.""" return self.args[:-1] @property def time(self): """The time of the state.""" return self.args[-1] #------------------------------------------------------------------------- # Printing #------------------------------------------------------------------------- def _print_time(self, printer, *args): return printer._print(self.time, *args) _print_time_repr = _print_time _print_time_latex = _print_time def _print_time_pretty(self, printer, *args): pform = printer._print(self.time, *args) return pform def _print_contents(self, printer, *args): label = self._print_label(printer, *args) time = self._print_time(printer, *args) return '%s;%s' % (label, time) def _print_label_repr(self, printer, *args): label = self._print_sequence(self.label, ',', printer, *args) time = self._print_time_repr(printer, *args) return '%s,%s' % (label, time) def _print_contents_pretty(self, printer, *args): label = self._print_label_pretty(printer, *args) time = self._print_time_pretty(printer, *args) return printer._print_seq((label, time), delimiter=';') def _print_contents_latex(self, printer, *args): label = self._print_sequence( self.label, self._label_separator, printer, *args) time = self._print_time_latex(printer, *args) return '%s;%s' % (label, time) class TimeDepKet(TimeDepState, KetBase): """General time-dependent Ket in quantum mechanics. This inherits from ``TimeDepState`` and ``KetBase`` and is the main class that should be used for Kets that vary with time. Its dual is a ``TimeDepBra``. Parameters ========== args : tuple The list of numbers or parameters that uniquely specify the ket. This will usually be its symbol or its quantum numbers. For time-dependent state, this will include the time as the final argument. Examples ======== Create a TimeDepKet and look at its attributes:: >>> from sympy.physics.quantum import TimeDepKet >>> k = TimeDepKet('psi', 't') >>> k |psi;t> >>> k.time t >>> k.label (psi,) >>> k.hilbert_space H TimeDepKets know about their dual bra:: >>> k.dual <psi;t| >>> k.dual_class() <class 'sympy.physics.quantum.state.TimeDepBra'> """ @classmethod def dual_class(self): return TimeDepBra class TimeDepBra(TimeDepState, BraBase): """General time-dependent Bra in quantum mechanics. This inherits from TimeDepState and BraBase and is the main class that should be used for Bras that vary with time. Its dual is a TimeDepBra. Parameters ========== args : tuple The list of numbers or parameters that uniquely specify the ket. This will usually be its symbol or its quantum numbers. For time-dependent state, this will include the time as the final argument. Examples ======== >>> from sympy.physics.quantum import TimeDepBra >>> from sympy import symbols, I >>> b = TimeDepBra('psi', 't') >>> b <psi;t| >>> b.time t >>> b.label (psi,) >>> b.hilbert_space H >>> b.dual |psi;t> """ @classmethod def dual_class(self): return TimeDepKet class Wavefunction(Function): """Class for representations in continuous bases This class takes an expression and coordinates in its constructor. It can be used to easily calculate normalizations and probabilities. Parameters ========== expr : Expr The expression representing the functional form of the w.f. coords : Symbol or tuple The coordinates to be integrated over, and their bounds Examples ======== Particle in a box, specifying bounds in the more primitive way of using Piecewise: >>> from sympy import Symbol, Piecewise, pi, N >>> from sympy.functions import sqrt, sin >>> from sympy.physics.quantum.state import Wavefunction >>> x = Symbol('x', real=True) >>> n = 1 >>> L = 1 >>> g = Piecewise((0, x < 0), (0, x > L), (sqrt(2//L)*sin(n*pi*x/L), True)) >>> f = Wavefunction(g, x) >>> f.norm 1 >>> f.is_normalized True >>> p = f.prob() >>> p(0) 0 >>> p(L) 0 >>> p(0.5) 2 >>> p(0.85*L) 2*sin(0.85*pi)**2 >>> N(p(0.85*L)) 0.412214747707527 Additionally, you can specify the bounds of the function and the indices in a more compact way: >>> from sympy import symbols, pi, diff >>> from sympy.functions import sqrt, sin >>> from sympy.physics.quantum.state import Wavefunction >>> x, L = symbols('x,L', positive=True) >>> n = symbols('n', integer=True) >>> g = sqrt(2/L)*sin(n*pi*x/L) >>> f = Wavefunction(g, (x, 0, L)) >>> f.norm 1 >>> f(L+1) 0 >>> f(L-1) sqrt(2)*sin(pi*n*(L - 1)/L)/sqrt(L) >>> f(-1) 0 >>> f(0.85) sqrt(2)*sin(0.85*pi*n/L)/sqrt(L) >>> f(0.85, n=1, L=1) sqrt(2)*sin(0.85*pi) >>> f.is_commutative False All arguments are automatically sympified, so you can define the variables as strings rather than symbols: >>> expr = x**2 >>> f = Wavefunction(expr, 'x') >>> type(f.variables[0]) <class 'sympy.core.symbol.Symbol'> Derivatives of Wavefunctions will return Wavefunctions: >>> diff(f, x) Wavefunction(2*x, x) """ #Any passed tuples for coordinates and their bounds need to be #converted to Tuples before Function's constructor is called, to #avoid errors from calling is_Float in the constructor def __new__(cls, *args, **options): new_args = [None for i in args] ct = 0 for arg in args: if isinstance(arg, tuple): new_args[ct] = Tuple(*arg) else: new_args[ct] = arg ct += 1 return super(Function, cls).__new__(cls, *new_args, **options) def __call__(self, *args, **options): var = self.variables if len(args) != len(var): raise NotImplementedError( "Incorrect number of arguments to function!") ct = 0 #If the passed value is outside the specified bounds, return 0 for v in var: lower, upper = self.limits[v] #Do the comparison to limits only if the passed symbol is actually #a symbol present in the limits; #Had problems with a comparison of x > L if isinstance(args[ct], Expr) and \ not (lower in args[ct].free_symbols or upper in args[ct].free_symbols): continue if args[ct] < lower or args[ct] > upper: return 0 ct += 1 expr = self.expr #Allows user to make a call like f(2, 4, m=1, n=1) for symbol in list(expr.free_symbols): if str(symbol) in options.keys(): val = options[str(symbol)] expr = expr.subs(symbol, val) return expr.subs(zip(var, args)) def _eval_derivative(self, symbol): expr = self.expr deriv = expr._eval_derivative(symbol) return Wavefunction(deriv, *self.args[1:]) def _eval_conjugate(self): return Wavefunction(conjugate(self.expr), *self.args[1:]) def _eval_transpose(self): return self @property def free_symbols(self): return self.expr.free_symbols @property def is_commutative(self): """ Override Function's is_commutative so that order is preserved in represented expressions """ return False @classmethod def eval(self, *args): return None @property def variables(self): """ Return the coordinates which the wavefunction depends on Examples ======== >>> from sympy.physics.quantum.state import Wavefunction >>> from sympy import symbols >>> x,y = symbols('x,y') >>> f = Wavefunction(x*y, x, y) >>> f.variables (x, y) >>> g = Wavefunction(x*y, x) >>> g.variables (x,) """ var = [g[0] if isinstance(g, Tuple) else g for g in self._args[1:]] return tuple(var) @property def limits(self): """ Return the limits of the coordinates which the w.f. depends on If no limits are specified, defaults to ``(-oo, oo)``. Examples ======== >>> from sympy.physics.quantum.state import Wavefunction >>> from sympy import symbols >>> x, y = symbols('x, y') >>> f = Wavefunction(x**2, (x, 0, 1)) >>> f.limits {x: (0, 1)} >>> f = Wavefunction(x**2, x) >>> f.limits {x: (-oo, oo)} >>> f = Wavefunction(x**2 + y**2, x, (y, -1, 2)) >>> f.limits {x: (-oo, oo), y: (-1, 2)} """ limits = [(g[1], g[2]) if isinstance(g, Tuple) else (-oo, oo) for g in self._args[1:]] return dict(zip(self.variables, tuple(limits))) @property def expr(self): """ Return the expression which is the functional form of the Wavefunction Examples ======== >>> from sympy.physics.quantum.state import Wavefunction >>> from sympy import symbols >>> x, y = symbols('x, y') >>> f = Wavefunction(x**2, x) >>> f.expr x**2 """ return self._args[0] @property def is_normalized(self): """ Returns true if the Wavefunction is properly normalized Examples ======== >>> from sympy import symbols, pi >>> from sympy.functions import sqrt, sin >>> from sympy.physics.quantum.state import Wavefunction >>> x, L = symbols('x,L', positive=True) >>> n = symbols('n', integer=True) >>> g = sqrt(2/L)*sin(n*pi*x/L) >>> f = Wavefunction(g, (x, 0, L)) >>> f.is_normalized True """ return (self.norm == 1.0) @property @cacheit def norm(self): """ Return the normalization of the specified functional form. This function integrates over the coordinates of the Wavefunction, with the bounds specified. Examples ======== >>> from sympy import symbols, pi >>> from sympy.functions import sqrt, sin >>> from sympy.physics.quantum.state import Wavefunction >>> x, L = symbols('x,L', positive=True) >>> n = symbols('n', integer=True) >>> g = sqrt(2/L)*sin(n*pi*x/L) >>> f = Wavefunction(g, (x, 0, L)) >>> f.norm 1 >>> g = sin(n*pi*x/L) >>> f = Wavefunction(g, (x, 0, L)) >>> f.norm sqrt(2)*sqrt(L)/2 """ exp = self.expr*conjugate(self.expr) var = self.variables limits = self.limits for v in var: curr_limits = limits[v] exp = integrate(exp, (v, curr_limits[0], curr_limits[1])) return sqrt(exp) def normalize(self): """ Return a normalized version of the Wavefunction Examples ======== >>> from sympy import symbols, pi >>> from sympy.functions import sqrt, sin >>> from sympy.physics.quantum.state import Wavefunction >>> x, L = symbols('x,L', real=True) >>> n = symbols('n', integer=True) >>> g = sin(n*pi*x/L) >>> f = Wavefunction(g, (x, 0, L)) >>> f.normalize() Wavefunction(sqrt(2)*sin(pi*n*x/L)/sqrt(L), (x, 0, L)) """ const = self.norm if const == oo: raise NotImplementedError("The function is not normalizable!") else: return Wavefunction((const)**(-1)*self.expr, *self.args[1:]) def prob(self): """ Return the absolute magnitude of the w.f., `|\psi(x)|^2` Examples ======== >>> from sympy import symbols, pi >>> from sympy.functions import sqrt, sin >>> from sympy.physics.quantum.state import Wavefunction >>> x, L = symbols('x,L', real=True) >>> n = symbols('n', integer=True) >>> g = sin(n*pi*x/L) >>> f = Wavefunction(g, (x, 0, L)) >>> f.prob() Wavefunction(sin(pi*n*x/L)**2, x) """ return Wavefunction(self.expr*conjugate(self.expr), *self.variables)
bsd-3-clause
1,310,228,906,220,686,800
29.209474
106
0.545245
false
edljk/Mosek.jl
deps/src/mosek/7/tools/examples/fusion/python/TrafficNetworkModel.py
1
4935
# # Copyright: Copyright (c) MOSEK ApS, Denmark. All rights reserved. # # File: TrafficNetworkModel.py # # Purpose: Demonstrates a traffix network problem as a conic quadratic problem. # # Source: Robert Fourer, "Convexity Checking in Large-Scale Optimization", # OR 53 --- Nottingham 6-8 September 2011. # # The problem: # Given a directed graph representing a traffic network # with one source and one sink, we have for each arc an # associated capacity, base travel time and a # sensitivity. Travel time along a specific arc increases # as the flow approaches the capacity. # # Given a fixed inflow we now wish to find the # configuration that minimizes the average travel time. from mosek.fusion import * import sys class TrafficNetworkError(Exception): pass class TrafficNetworkModel(Model): def __init__(self, numberOfNodes, source_idx, sink_idx, arc_i, arc_j, arcSensitivity, arcCapacity, arcBaseTravelTime, T): Model.__init__(self,"Traffic Network") finished = False try: n = numberOfNodes narcs = len(arc_i) NxN = NDSet(n, n) sens = Matrix.sparse(n, n, arc_i, arc_j, arcSensitivity) cap = Matrix.sparse(n, n, arc_i, arc_j, arcCapacity) basetime = Matrix.sparse(n, n, arc_i, arc_j, arcBaseTravelTime) e = Matrix.sparse(n, n, arc_i, arc_j, [ 1.0 ] * narcs) e_e = Matrix.sparse(n,n, [ sink_idx ],[ source_idx ], [ 1.0 ]); cs_inv_matrix = \ Matrix.sparse(n, n, arc_i, arc_j, [ 1.0 / (arcSensitivity[i] * arcCapacity[i]) for i in range(narcs)]) s_inv_matrix = \ Matrix.sparse(n, n, arc_i, arc_j, [ 1.0 / arcSensitivity[i] for i in range(narcs)]) self.__flow = self.variable("traffic_flow", NxN, Domain.greaterThan(0.0)) x = self.__flow; t = self.variable("travel_time" , NxN, Domain.greaterThan(0.0)) d = self.variable("d", NxN, Domain.greaterThan(0.0)) z = self.variable("z", NxN, Domain.greaterThan(0.0)) # Set the objective: self.objective("Average travel time", ObjectiveSense.Minimize, Expr.mul(1.0/T, Expr.add(Expr.dot(basetime,x), Expr.dot(e,d)))) # Set up constraints # Constraint (1a) numnz = len(arcSensitivity) v = Variable.stack([ [ d.index(arc_i[i],arc_j[i]), z.index(arc_i[i],arc_j[i]), x.index(arc_i[i],arc_j[i]) ] for i in range(narcs) ]) self.constraint("(1a)",v, Domain.inRotatedQCone(narcs,3)) # Constraint (1b) self.constraint("(1b)", Expr.sub(Expr.add(Expr.mulElm(z,e), Expr.mulElm(x,cs_inv_matrix)), s_inv_matrix), Domain.equalsTo(0.0)) # Constraint (2) self.constraint("(2)", Expr.sub(Expr.add(Expr.mulDiag(x, e.transpose()), Expr.mulDiag(x, e_e.transpose())), Expr.add(Expr.mulDiag(x.transpose(), e), Expr.mulDiag(x.transpose(), e_e))), Domain.equalsTo(0.0)) # Constraint (3) self.constraint("(3)",x.index(sink_idx, source_idx), Domain.equalsTo(T)) finished = True finally: if not finished: self.__del__() # Return the solution. We do this the easy and inefficeint way: # We fetch the whole NxN array og values, a lot of which are # zeros. def getFlow(self): return self.__flow.level() def main(args): n = 4 arc_i = [ 0, 0, 2, 1, 2 ] arc_j = [ 1, 2, 1, 3, 3 ] arc_base = [ 4.0, 1.0, 2.0, 1.0, 6.0 ] arc_cap = [ 10.0, 12.0, 20.0, 15.0, 10.0 ] arc_sens = [ 0.1, 0.7, 0.9, 0.5, 0.1 ] T = 20.0 source_idx = 0 sink_idx = 3 with TrafficNetworkModel(n, source_idx, sink_idx, arc_i, arc_j, arc_sens, arc_cap, arc_base, T) as M: M.solve() flow = M.getFlow() print("Optimal flow:") for i,j in zip(arc_i,arc_j): print "\tflow node%d->node%d = %f" % (i,j, flow[i * n + j]) main(sys.argv[1:])
mit
7,939,681,877,127,627,000
35.286765
94
0.473759
false
hzlf/openbroadcast
website/cms/tests/forms.py
1
5969
# -*- coding: utf-8 -*- from __future__ import with_statement from cms.admin import forms from cms.admin.forms import PageUserForm from cms.api import create_page, create_page_user from cms.forms.fields import PageSelectFormField, SuperLazyIterator from cms.forms.utils import (get_site_choices, get_page_choices, update_site_and_page_choices) from cms.test_utils.testcases import CMSTestCase from cms.test_utils.util.context_managers import SettingsOverride from django.contrib.auth.models import User from django.contrib.sites.models import Site from django.core.cache import cache class Mock_PageSelectFormField(PageSelectFormField): def __init__(self, required=False): # That's to have a proper mock object, without having to resort # to dirtier tricks. We want to test *just* compress here. self.required = required self.error_messages = {} self.error_messages['invalid_page'] = 'Invalid_page' class FormsTestCase(CMSTestCase): def setUp(self): cache.clear() def test_get_site_choices(self): result = get_site_choices() self.assertEquals(result, []) def test_get_page_choices(self): result = get_page_choices() self.assertEquals(result, [('', '----')]) def test_get_site_choices_without_moderator(self): with SettingsOverride(CMS_MODERATOR=False): result = get_site_choices() self.assertEquals(result, []) def test_get_site_choices_without_moderator_with_superuser(self): with SettingsOverride(CMS_MODERATOR=False): # boilerplate (creating a page) user_super = User(username="super", is_staff=True, is_active=True, is_superuser=True) user_super.set_password("super") user_super.save() with self.login_user_context(user_super): create_page("home", "nav_playground.html", "en", created_by=user_super) # The proper test result = get_site_choices() self.assertEquals(result, [(1,'example.com')]) def test_compress_function_raises_when_page_is_none(self): raised = False try: fake_field = Mock_PageSelectFormField(required=True) data_list = (0, None) #(site_id, page_id) dsite-id is not used fake_field.compress(data_list) self.fail('compress function didn\'t raise!') except forms.ValidationError: raised = True self.assertTrue(raised) def test_compress_function_returns_none_when_not_required(self): fake_field = Mock_PageSelectFormField(required=False) data_list = (0, None) #(site_id, page_id) dsite-id is not used result = fake_field.compress(data_list) self.assertEquals(result, None) def test_compress_function_returns_none_when_no_data_list(self): fake_field = Mock_PageSelectFormField(required=False) data_list = None result = fake_field.compress(data_list) self.assertEquals(result, None) def test_compress_function_gets_a_page_when_one_exists(self): # boilerplate (creating a page) user_super = User(username="super", is_staff=True, is_active=True, is_superuser=True) user_super.set_password("super") user_super.save() with self.login_user_context(user_super): home_page = create_page("home", "nav_playground.html", "en", created_by=user_super) # The actual test fake_field = Mock_PageSelectFormField() data_list = (0, home_page.pk) #(site_id, page_id) dsite-id is not used result = fake_field.compress(data_list) self.assertEquals(home_page,result) def test_update_site_and_page_choices(self): with SettingsOverride(CMS_MODERATOR=False): Site.objects.all().delete() site = Site.objects.create(domain='http://www.django-cms.org', name='Django CMS') page1 = create_page('Page 1', 'nav_playground.html', 'en', site=site) page2 = create_page('Page 2', 'nav_playground.html', 'de', site=site) page3 = create_page('Page 3', 'nav_playground.html', 'en', site=site, parent=page1) # enfore the choices to be casted to a list site_choices, page_choices = [list(bit) for bit in update_site_and_page_choices('en')] self.assertEqual(page_choices, [ ('', '----'), (site.name, [ (page1.pk, 'Page 1'), (page3.pk, '&nbsp;&nbsp;Page 3'), (page2.pk, 'Page 2'), ]) ]) self.assertEqual(site_choices, [(site.pk, site.name)]) def test_superlazy_iterator_behaves_properly_for_sites(self): normal_result = get_site_choices() lazy_result = SuperLazyIterator(get_site_choices) self.assertEquals(normal_result, list(lazy_result)) def test_superlazy_iterator_behaves_properly_for_pages(self): normal_result = get_page_choices() lazy_result = SuperLazyIterator(get_page_choices) self.assertEquals(normal_result, list(lazy_result)) def test_page_user_form_initial(self): myuser = User.objects.create_superuser("myuser", "[email protected]", "myuser") user = create_page_user(myuser, myuser, grant_all=True) puf = PageUserForm(instance=user) names = ['can_add_page', 'can_change_page', 'can_delete_page', 'can_add_pageuser', 'can_change_pageuser', 'can_delete_pageuser', 'can_add_pagepermission', 'can_change_pagepermission', 'can_delete_pagepermission'] for name in names: self.assertTrue(puf.initial.get(name, False))
gpl-3.0
302,999,438,502,626,100
43.544776
98
0.608645
false
ProgressivePlanning/mongoengine
tests/queryset/queryset.py
1
162928
# -*- coding: utf-8 -*- import sys sys.path[0:0] = [""] import unittest import uuid from nose.plugins.skip import SkipTest from datetime import datetime, timedelta import pymongo from pymongo.errors import ConfigurationError from pymongo.read_preferences import ReadPreference from bson import ObjectId, DBRef from mongoengine import * from mongoengine.connection import get_connection, get_db from mongoengine.python_support import PY3, IS_PYMONGO_3 from mongoengine.context_managers import query_counter, switch_db from mongoengine.queryset import (QuerySet, QuerySetManager, MultipleObjectsReturned, DoesNotExist, queryset_manager) from mongoengine.errors import InvalidQueryError __all__ = ("QuerySetTest",) class db_ops_tracker(query_counter): def get_ops(self): ignore_query = {"ns": {"$ne": "%s.system.indexes" % self.db.name}} return list(self.db.system.profile.find(ignore_query)) def skip_older_mongodb(f): def _inner(*args, **kwargs): connection = get_connection() info = connection.test.command('buildInfo') mongodb_version = tuple([int(i) for i in info['version'].split('.')]) if mongodb_version < (2, 6): raise SkipTest("Need MongoDB version 2.6+") return f(*args, **kwargs) _inner.__name__ = f.__name__ _inner.__doc__ = f.__doc__ return _inner def skip_pymongo3(f): def _inner(*args, **kwargs): if IS_PYMONGO_3: raise SkipTest("Useless with PyMongo 3+") return f(*args, **kwargs) _inner.__name__ = f.__name__ _inner.__doc__ = f.__doc__ return _inner class QuerySetTest(unittest.TestCase): def setUp(self): connect(db='mongoenginetest') connect(db='mongoenginetest2', alias='test2') class PersonMeta(EmbeddedDocument): weight = IntField() class Person(Document): name = StringField() age = IntField() person_meta = EmbeddedDocumentField(PersonMeta) meta = {'allow_inheritance': True} Person.drop_collection() self.PersonMeta = PersonMeta self.Person = Person def test_initialisation(self): """Ensure that a QuerySet is correctly initialised by QuerySetManager. """ self.assertTrue(isinstance(self.Person.objects, QuerySet)) self.assertEqual(self.Person.objects._collection.name, self.Person._get_collection_name()) self.assertTrue(isinstance(self.Person.objects._collection, pymongo.collection.Collection)) def test_cannot_perform_joins_references(self): class BlogPost(Document): author = ReferenceField(self.Person) author2 = GenericReferenceField() def test_reference(): list(BlogPost.objects(author__name="test")) self.assertRaises(InvalidQueryError, test_reference) def test_generic_reference(): list(BlogPost.objects(author2__name="test")) def test_find(self): """Ensure that a query returns a valid set of results. """ self.Person(name="User A", age=20).save() self.Person(name="User B", age=30).save() # Find all people in the collection people = self.Person.objects self.assertEqual(people.count(), 2) results = list(people) self.assertTrue(isinstance(results[0], self.Person)) self.assertTrue(isinstance(results[0].id, (ObjectId, str, unicode))) self.assertEqual(results[0].name, "User A") self.assertEqual(results[0].age, 20) self.assertEqual(results[1].name, "User B") self.assertEqual(results[1].age, 30) # Use a query to filter the people found to just person1 people = self.Person.objects(age=20) self.assertEqual(people.count(), 1) person = people.next() self.assertEqual(person.name, "User A") self.assertEqual(person.age, 20) # Test limit people = list(self.Person.objects.limit(1)) self.assertEqual(len(people), 1) self.assertEqual(people[0].name, 'User A') # Test skip people = list(self.Person.objects.skip(1)) self.assertEqual(len(people), 1) self.assertEqual(people[0].name, 'User B') person3 = self.Person(name="User C", age=40) person3.save() # Test slice limit people = list(self.Person.objects[:2]) self.assertEqual(len(people), 2) self.assertEqual(people[0].name, 'User A') self.assertEqual(people[1].name, 'User B') # Test slice skip people = list(self.Person.objects[1:]) self.assertEqual(len(people), 2) self.assertEqual(people[0].name, 'User B') self.assertEqual(people[1].name, 'User C') # Test slice limit and skip people = list(self.Person.objects[1:2]) self.assertEqual(len(people), 1) self.assertEqual(people[0].name, 'User B') # Test slice limit and skip cursor reset qs = self.Person.objects[1:2] # fetch then delete the cursor qs._cursor qs._cursor_obj = None people = list(qs) self.assertEqual(len(people), 1) self.assertEqual(people[0].name, 'User B') people = list(self.Person.objects[1:1]) self.assertEqual(len(people), 0) # Test slice out of range people = list(self.Person.objects[80000:80001]) self.assertEqual(len(people), 0) # Test larger slice __repr__ self.Person.objects.delete() for i in xrange(55): self.Person(name='A%s' % i, age=i).save() self.assertEqual(self.Person.objects.count(), 55) self.assertEqual("Person object", "%s" % self.Person.objects[0]) self.assertEqual( "[<Person: Person object>, <Person: Person object>]", "%s" % self.Person.objects[1:3]) self.assertEqual( "[<Person: Person object>, <Person: Person object>]", "%s" % self.Person.objects[51:53]) # Test only after limit self.assertEqual(self.Person.objects().limit(2).only('name')[0].age, None) # Test only after skip self.assertEqual(self.Person.objects().skip(2).only('name')[0].age, None) def test_find_one(self): """Ensure that a query using find_one returns a valid result. """ person1 = self.Person(name="User A", age=20) person1.save() person2 = self.Person(name="User B", age=30) person2.save() # Retrieve the first person from the database person = self.Person.objects.first() self.assertTrue(isinstance(person, self.Person)) self.assertEqual(person.name, "User A") self.assertEqual(person.age, 20) # Use a query to filter the people found to just person2 person = self.Person.objects(age=30).first() self.assertEqual(person.name, "User B") person = self.Person.objects(age__lt=30).first() self.assertEqual(person.name, "User A") # Use array syntax person = self.Person.objects[0] self.assertEqual(person.name, "User A") person = self.Person.objects[1] self.assertEqual(person.name, "User B") self.assertRaises(IndexError, self.Person.objects.__getitem__, 2) # Find a document using just the object id person = self.Person.objects.with_id(person1.id) self.assertEqual(person.name, "User A") self.assertRaises( InvalidQueryError, self.Person.objects(name="User A").with_id, person1.id) def test_find_only_one(self): """Ensure that a query using ``get`` returns at most one result. """ # Try retrieving when no objects exists self.assertRaises(DoesNotExist, self.Person.objects.get) self.assertRaises(self.Person.DoesNotExist, self.Person.objects.get) person1 = self.Person(name="User A", age=20) person1.save() person2 = self.Person(name="User B", age=30) person2.save() # Retrieve the first person from the database self.assertRaises(MultipleObjectsReturned, self.Person.objects.get) self.assertRaises(self.Person.MultipleObjectsReturned, self.Person.objects.get) # Use a query to filter the people found to just person2 person = self.Person.objects.get(age=30) self.assertEqual(person.name, "User B") person = self.Person.objects.get(age__lt=30) self.assertEqual(person.name, "User A") def test_find_array_position(self): """Ensure that query by array position works. """ class Comment(EmbeddedDocument): name = StringField() class Post(EmbeddedDocument): comments = ListField(EmbeddedDocumentField(Comment)) class Blog(Document): tags = ListField(StringField()) posts = ListField(EmbeddedDocumentField(Post)) Blog.drop_collection() Blog.objects.create(tags=['a', 'b']) self.assertEqual(Blog.objects(tags__0='a').count(), 1) self.assertEqual(Blog.objects(tags__0='b').count(), 0) self.assertEqual(Blog.objects(tags__1='a').count(), 0) self.assertEqual(Blog.objects(tags__1='b').count(), 1) Blog.drop_collection() comment1 = Comment(name='testa') comment2 = Comment(name='testb') post1 = Post(comments=[comment1, comment2]) post2 = Post(comments=[comment2, comment2]) blog1 = Blog.objects.create(posts=[post1, post2]) blog2 = Blog.objects.create(posts=[post2, post1]) blog = Blog.objects(posts__0__comments__0__name='testa').get() self.assertEqual(blog, blog1) query = Blog.objects(posts__1__comments__1__name='testb') self.assertEqual(query.count(), 2) query = Blog.objects(posts__1__comments__1__name='testa') self.assertEqual(query.count(), 0) query = Blog.objects(posts__0__comments__1__name='testa') self.assertEqual(query.count(), 0) Blog.drop_collection() def test_none(self): class A(Document): s = StringField() A.drop_collection() A().save() self.assertEqual(list(A.objects.none()), []) self.assertEqual(list(A.objects.none().all()), []) def test_chaining(self): class A(Document): s = StringField() class B(Document): ref = ReferenceField(A) boolfield = BooleanField(default=False) A.drop_collection() B.drop_collection() a1 = A(s="test1").save() a2 = A(s="test2").save() B(ref=a1, boolfield=True).save() # Works q1 = B.objects.filter(ref__in=[a1, a2], ref=a1)._query # Doesn't work q2 = B.objects.filter(ref__in=[a1, a2]) q2 = q2.filter(ref=a1)._query self.assertEqual(q1, q2) a_objects = A.objects(s='test1') query = B.objects(ref__in=a_objects) query = query.filter(boolfield=True) self.assertEqual(query.count(), 1) def test_update_write_concern(self): """Test that passing write_concern works""" self.Person.drop_collection() write_concern = {"fsync": True} author = self.Person.objects.create(name='Test User') author.save(write_concern=write_concern) result = self.Person.objects.update( set__name='Ross', write_concern={"w": 1}) self.assertEqual(result, 1) result = self.Person.objects.update( set__name='Ross', write_concern={"w": 0}) self.assertEqual(result, None) result = self.Person.objects.update_one( set__name='Test User', write_concern={"w": 1}) self.assertEqual(result, 1) result = self.Person.objects.update_one( set__name='Test User', write_concern={"w": 0}) self.assertEqual(result, None) def test_update_update_has_a_value(self): """Test to ensure that update is passed a value to update to""" self.Person.drop_collection() author = self.Person(name='Test User') author.save() def update_raises(): self.Person.objects(pk=author.pk).update({}) def update_one_raises(): self.Person.objects(pk=author.pk).update_one({}) self.assertRaises(OperationError, update_raises) self.assertRaises(OperationError, update_one_raises) def test_update_array_position(self): """Ensure that updating by array position works. Check update() and update_one() can take syntax like: set__posts__1__comments__1__name="testc" Check that it only works for ListFields. """ class Comment(EmbeddedDocument): name = StringField() class Post(EmbeddedDocument): comments = ListField(EmbeddedDocumentField(Comment)) class Blog(Document): tags = ListField(StringField()) posts = ListField(EmbeddedDocumentField(Post)) Blog.drop_collection() comment1 = Comment(name='testa') comment2 = Comment(name='testb') post1 = Post(comments=[comment1, comment2]) post2 = Post(comments=[comment2, comment2]) Blog.objects.create(posts=[post1, post2]) Blog.objects.create(posts=[post2, post1]) # Update all of the first comments of second posts of all blogs Blog.objects().update(set__posts__1__comments__0__name="testc") testc_blogs = Blog.objects(posts__1__comments__0__name="testc") self.assertEqual(testc_blogs.count(), 2) Blog.drop_collection() Blog.objects.create(posts=[post1, post2]) Blog.objects.create(posts=[post2, post1]) # Update only the first blog returned by the query Blog.objects().update_one( set__posts__1__comments__1__name="testc") testc_blogs = Blog.objects(posts__1__comments__1__name="testc") self.assertEqual(testc_blogs.count(), 1) # Check that using this indexing syntax on a non-list fails def non_list_indexing(): Blog.objects().update(set__posts__1__comments__0__name__1="asdf") self.assertRaises(InvalidQueryError, non_list_indexing) Blog.drop_collection() def test_update_using_positional_operator(self): """Ensure that the list fields can be updated using the positional operator.""" class Comment(EmbeddedDocument): by = StringField() votes = IntField() class BlogPost(Document): title = StringField() comments = ListField(EmbeddedDocumentField(Comment)) BlogPost.drop_collection() c1 = Comment(by="joe", votes=3) c2 = Comment(by="jane", votes=7) BlogPost(title="ABC", comments=[c1, c2]).save() BlogPost.objects(comments__by="jane").update(inc__comments__S__votes=1) post = BlogPost.objects.first() self.assertEqual(post.comments[1].by, 'jane') self.assertEqual(post.comments[1].votes, 8) def test_update_using_positional_operator_matches_first(self): # Currently the $ operator only applies to the first matched item in # the query class Simple(Document): x = ListField() Simple.drop_collection() Simple(x=[1, 2, 3, 2]).save() Simple.objects(x=2).update(inc__x__S=1) simple = Simple.objects.first() self.assertEqual(simple.x, [1, 3, 3, 2]) Simple.drop_collection() # You can set multiples Simple.drop_collection() Simple(x=[1, 2, 3, 4]).save() Simple(x=[2, 3, 4, 5]).save() Simple(x=[3, 4, 5, 6]).save() Simple(x=[4, 5, 6, 7]).save() Simple.objects(x=3).update(set__x__S=0) s = Simple.objects() self.assertEqual(s[0].x, [1, 2, 0, 4]) self.assertEqual(s[1].x, [2, 0, 4, 5]) self.assertEqual(s[2].x, [0, 4, 5, 6]) self.assertEqual(s[3].x, [4, 5, 6, 7]) # Using "$unset" with an expression like this "array.$" will result in # the array item becoming None, not being removed. Simple.drop_collection() Simple(x=[1, 2, 3, 4, 3, 2, 3, 4]).save() Simple.objects(x=3).update(unset__x__S=1) simple = Simple.objects.first() self.assertEqual(simple.x, [1, 2, None, 4, 3, 2, 3, 4]) # Nested updates arent supported yet.. def update_nested(): Simple.drop_collection() Simple(x=[{'test': [1, 2, 3, 4]}]).save() Simple.objects(x__test=2).update(set__x__S__test__S=3) self.assertEqual(simple.x, [1, 2, 3, 4]) self.assertRaises(OperationError, update_nested) Simple.drop_collection() def test_update_using_positional_operator_embedded_document(self): """Ensure that the embedded documents can be updated using the positional operator.""" class Vote(EmbeddedDocument): score = IntField() class Comment(EmbeddedDocument): by = StringField() votes = EmbeddedDocumentField(Vote) class BlogPost(Document): title = StringField() comments = ListField(EmbeddedDocumentField(Comment)) BlogPost.drop_collection() c1 = Comment(by="joe", votes=Vote(score=3)) c2 = Comment(by="jane", votes=Vote(score=7)) BlogPost(title="ABC", comments=[c1, c2]).save() BlogPost.objects(comments__by="joe").update( set__comments__S__votes=Vote(score=4)) post = BlogPost.objects.first() self.assertEqual(post.comments[0].by, 'joe') self.assertEqual(post.comments[0].votes.score, 4) def test_update_min_max(self): class Scores(Document): high_score = IntField() low_score = IntField() scores = Scores(high_score=800, low_score=200) scores.save() Scores.objects(id=scores.id).update(min__low_score=150) self.assertEqual(Scores.objects(id=scores.id).get().low_score, 150) Scores.objects(id=scores.id).update(min__low_score=250) self.assertEqual(Scores.objects(id=scores.id).get().low_score, 150) def test_updates_can_have_match_operators(self): class Comment(EmbeddedDocument): content = StringField() name = StringField(max_length=120) vote = IntField() class Post(Document): title = StringField(required=True) tags = ListField(StringField()) comments = ListField(EmbeddedDocumentField("Comment")) Post.drop_collection() comm1 = Comment(content="very funny indeed", name="John S", vote=1) comm2 = Comment(content="kind of funny", name="Mark P", vote=0) Post(title='Fun with MongoEngine', tags=['mongodb', 'mongoengine'], comments=[comm1, comm2]).save() Post.objects().update_one(pull__comments__vote__lt=1) self.assertEqual(1, len(Post.objects.first().comments)) def test_mapfield_update(self): """Ensure that the MapField can be updated.""" class Member(EmbeddedDocument): gender = StringField() age = IntField() class Club(Document): members = MapField(EmbeddedDocumentField(Member)) Club.drop_collection() club = Club() club.members['John'] = Member(gender="M", age=13) club.save() Club.objects().update( set__members={"John": Member(gender="F", age=14)}) club = Club.objects().first() self.assertEqual(club.members['John'].gender, "F") self.assertEqual(club.members['John'].age, 14) def test_dictfield_update(self): """Ensure that the DictField can be updated.""" class Club(Document): members = DictField() club = Club() club.members['John'] = dict(gender="M", age=13) club.save() Club.objects().update( set__members={"John": dict(gender="F", age=14)}) club = Club.objects().first() self.assertEqual(club.members['John']['gender'], "F") self.assertEqual(club.members['John']['age'], 14) def test_update_results(self): self.Person.drop_collection() result = self.Person(name="Bob", age=25).update( upsert=True, full_result=True) self.assertTrue(isinstance(result, dict)) self.assertTrue("upserted" in result) self.assertFalse(result["updatedExisting"]) bob = self.Person.objects.first() result = bob.update(set__age=30, full_result=True) self.assertTrue(isinstance(result, dict)) self.assertTrue(result["updatedExisting"]) self.Person(name="Bob", age=20).save() result = self.Person.objects(name="Bob").update( set__name="bobby", multi=True) self.assertEqual(result, 2) def test_update_validate(self): class EmDoc(EmbeddedDocument): str_f = StringField() class Doc(Document): str_f = StringField() dt_f = DateTimeField() cdt_f = ComplexDateTimeField() ed_f = EmbeddedDocumentField(EmDoc) self.assertRaises(ValidationError, Doc.objects().update, str_f=1, upsert=True) self.assertRaises(ValidationError, Doc.objects().update, dt_f="datetime", upsert=True) self.assertRaises(ValidationError, Doc.objects().update, ed_f__str_f=1, upsert=True) def test_update_related_models( self ): class TestPerson( Document ): name = StringField() class TestOrganization( Document ): name = StringField() owner = ReferenceField( TestPerson ) TestPerson.drop_collection() TestOrganization.drop_collection() p = TestPerson( name='p1' ) p.save() o = TestOrganization( name='o1' ) o.save() o.owner = p p.name = 'p2' self.assertEqual( o._get_changed_fields(), [ 'owner' ] ) self.assertEqual( p._get_changed_fields(), [ 'name' ] ) o.save() self.assertEqual( o._get_changed_fields(), [] ) self.assertEqual( p._get_changed_fields(), [ 'name' ] ) # Fails; it's empty # This will do NOTHING at all, even though we changed the name p.save() p.reload() self.assertEqual( p.name, 'p2' ) # Fails; it's still `p1` def test_upsert(self): self.Person.drop_collection() self.Person.objects( pk=ObjectId(), name="Bob", age=30).update(upsert=True) bob = self.Person.objects.first() self.assertEqual("Bob", bob.name) self.assertEqual(30, bob.age) def test_upsert_one(self): self.Person.drop_collection() bob = self.Person.objects(name="Bob", age=30).upsert_one() self.assertEqual("Bob", bob.name) self.assertEqual(30, bob.age) bob.name = "Bobby" bob.save() bobby = self.Person.objects(name="Bobby", age=30).upsert_one() self.assertEqual("Bobby", bobby.name) self.assertEqual(30, bobby.age) self.assertEqual(bob.id, bobby.id) def test_set_on_insert(self): self.Person.drop_collection() self.Person.objects(pk=ObjectId()).update( set__name='Bob', set_on_insert__age=30, upsert=True) bob = self.Person.objects.first() self.assertEqual("Bob", bob.name) self.assertEqual(30, bob.age) def test_save_and_only_on_fields_with_default(self): class Embed(EmbeddedDocument): field = IntField() class B(Document): meta = {'collection': 'b'} field = IntField(default=1) embed = EmbeddedDocumentField(Embed, default=Embed) embed_no_default = EmbeddedDocumentField(Embed) # Creating {field : 2, embed : {field: 2}, embed_no_default: {field: 2}} val = 2 embed = Embed() embed.field = val record = B() record.field = val record.embed = embed record.embed_no_default = embed record.save() # Checking it was saved correctly record.reload() self.assertEqual(record.field, 2) self.assertEqual(record.embed_no_default.field, 2) self.assertEqual(record.embed.field, 2) # Request only the _id field and save clone = B.objects().only('id').first() clone.save() # Reload the record and see that the embed data is not lost record.reload() self.assertEqual(record.field, 2) self.assertEqual(record.embed_no_default.field, 2) self.assertEqual(record.embed.field, 2) def test_bulk_insert(self): """Ensure that bulk insert works """ class Comment(EmbeddedDocument): name = StringField() class Post(EmbeddedDocument): comments = ListField(EmbeddedDocumentField(Comment)) class Blog(Document): title = StringField(unique=True) tags = ListField(StringField()) posts = ListField(EmbeddedDocumentField(Post)) Blog.drop_collection() # get MongoDB version info connection = get_connection() info = connection.test.command('buildInfo') mongodb_version = tuple([int(i) for i in info['version'].split('.')]) # Recreates the collection self.assertEqual(0, Blog.objects.count()) with query_counter() as q: self.assertEqual(q, 0) comment1 = Comment(name='testa') comment2 = Comment(name='testb') post1 = Post(comments=[comment1, comment2]) post2 = Post(comments=[comment2, comment2]) blogs = [] for i in xrange(1, 100): blogs.append(Blog(title="post %s" % i, posts=[post1, post2])) Blog.objects.insert(blogs, load_bulk=False) if mongodb_version < (2, 6): self.assertEqual(q, 1) else: # profiling logs each doc now in the bulk op self.assertEqual(q, 99) Blog.drop_collection() Blog.ensure_indexes() with query_counter() as q: self.assertEqual(q, 0) Blog.objects.insert(blogs) if mongodb_version < (2, 6): self.assertEqual(q, 2) # 1 for insert, and 1 for in bulk fetch else: # 99 for insert, and 1 for in bulk fetch self.assertEqual(q, 100) Blog.drop_collection() comment1 = Comment(name='testa') comment2 = Comment(name='testb') post1 = Post(comments=[comment1, comment2]) post2 = Post(comments=[comment2, comment2]) blog1 = Blog(title="code", posts=[post1, post2]) blog2 = Blog(title="mongodb", posts=[post2, post1]) blog1, blog2 = Blog.objects.insert([blog1, blog2]) self.assertEqual(blog1.title, "code") self.assertEqual(blog2.title, "mongodb") self.assertEqual(Blog.objects.count(), 2) # test handles people trying to upsert def throw_operation_error(): blogs = Blog.objects Blog.objects.insert(blogs) self.assertRaises(OperationError, throw_operation_error) # Test can insert new doc new_post = Blog(title="code123", id=ObjectId()) Blog.objects.insert(new_post) # test handles other classes being inserted def throw_operation_error_wrong_doc(): class Author(Document): pass Blog.objects.insert(Author()) self.assertRaises(OperationError, throw_operation_error_wrong_doc) def throw_operation_error_not_a_document(): Blog.objects.insert("HELLO WORLD") self.assertRaises(OperationError, throw_operation_error_not_a_document) Blog.drop_collection() blog1 = Blog(title="code", posts=[post1, post2]) blog1 = Blog.objects.insert(blog1) self.assertEqual(blog1.title, "code") self.assertEqual(Blog.objects.count(), 1) Blog.drop_collection() blog1 = Blog(title="code", posts=[post1, post2]) obj_id = Blog.objects.insert(blog1, load_bulk=False) self.assertEqual(obj_id.__class__.__name__, 'ObjectId') Blog.drop_collection() post3 = Post(comments=[comment1, comment1]) blog1 = Blog(title="foo", posts=[post1, post2]) blog2 = Blog(title="bar", posts=[post2, post3]) blog3 = Blog(title="baz", posts=[post1, post2]) Blog.objects.insert([blog1, blog2]) def throw_operation_error_not_unique(): Blog.objects.insert([blog2, blog3]) self.assertRaises(NotUniqueError, throw_operation_error_not_unique) self.assertEqual(Blog.objects.count(), 2) Blog.objects.insert([blog2, blog3], write_concern={"w": 0, 'continue_on_error': True}) self.assertEqual(Blog.objects.count(), 3) def test_get_changed_fields_query_count(self): class Person(Document): name = StringField() owns = ListField(ReferenceField('Organization')) projects = ListField(ReferenceField('Project')) class Organization(Document): name = StringField() owner = ReferenceField('Person') employees = ListField(ReferenceField('Person')) class Project(Document): name = StringField() Person.drop_collection() Organization.drop_collection() Project.drop_collection() r1 = Project(name="r1").save() r2 = Project(name="r2").save() r3 = Project(name="r3").save() p1 = Person(name="p1", projects=[r1, r2]).save() p2 = Person(name="p2", projects=[r2, r3]).save() o1 = Organization(name="o1", employees=[p1]).save() with query_counter() as q: self.assertEqual(q, 0) fresh_o1 = Organization.objects.get(id=o1.id) self.assertEqual(1, q) fresh_o1._get_changed_fields() self.assertEqual(1, q) with query_counter() as q: self.assertEqual(q, 0) fresh_o1 = Organization.objects.get(id=o1.id) fresh_o1.save() # No changes, does nothing self.assertEqual(q, 1) with query_counter() as q: self.assertEqual(q, 0) fresh_o1 = Organization.objects.get(id=o1.id) fresh_o1.save(cascade=False) # No changes, does nothing self.assertEqual(q, 1) with query_counter() as q: self.assertEqual(q, 0) fresh_o1 = Organization.objects.get(id=o1.id) fresh_o1.employees.append(p2) # Dereferences fresh_o1.save(cascade=False) # Saves self.assertEqual(q, 3) @skip_pymongo3 def test_slave_okay(self): """Ensures that a query can take slave_okay syntax. Useless with PyMongo 3+ as well as with MongoDB 3+. """ person1 = self.Person(name="User A", age=20) person1.save() person2 = self.Person(name="User B", age=30) person2.save() # Retrieve the first person from the database person = self.Person.objects.slave_okay(True).first() self.assertTrue(isinstance(person, self.Person)) self.assertEqual(person.name, "User A") self.assertEqual(person.age, 20) @skip_older_mongodb @skip_pymongo3 def test_cursor_args(self): """Ensures the cursor args can be set as expected """ p = self.Person.objects # Check default self.assertEqual(p._cursor_args, {'snapshot': False, 'slave_okay': False, 'timeout': True}) p = p.snapshot(False).slave_okay(False).timeout(False) self.assertEqual(p._cursor_args, {'snapshot': False, 'slave_okay': False, 'timeout': False}) p = p.snapshot(True).slave_okay(False).timeout(False) self.assertEqual(p._cursor_args, {'snapshot': True, 'slave_okay': False, 'timeout': False}) p = p.snapshot(True).slave_okay(True).timeout(False) self.assertEqual(p._cursor_args, {'snapshot': True, 'slave_okay': True, 'timeout': False}) p = p.snapshot(True).slave_okay(True).timeout(True) self.assertEqual(p._cursor_args, {'snapshot': True, 'slave_okay': True, 'timeout': True}) def test_repeated_iteration(self): """Ensure that QuerySet rewinds itself one iteration finishes. """ self.Person(name='Person 1').save() self.Person(name='Person 2').save() queryset = self.Person.objects people1 = [person for person in queryset] people2 = [person for person in queryset] # Check that it still works even if iteration is interrupted. for person in queryset: break people3 = [person for person in queryset] self.assertEqual(people1, people2) self.assertEqual(people1, people3) def test_repr(self): """Test repr behavior isnt destructive""" class Doc(Document): number = IntField() def __repr__(self): return "<Doc: %s>" % self.number Doc.drop_collection() for i in xrange(1000): Doc(number=i).save() docs = Doc.objects.order_by('number') self.assertEqual(docs.count(), 1000) docs_string = "%s" % docs self.assertTrue("Doc: 0" in docs_string) self.assertEqual(docs.count(), 1000) self.assertTrue('(remaining elements truncated)' in "%s" % docs) # Limit and skip docs = docs[1:4] self.assertEqual('[<Doc: 1>, <Doc: 2>, <Doc: 3>]', "%s" % docs) self.assertEqual(docs.count(with_limit_and_skip=True), 3) for doc in docs: self.assertEqual('.. queryset mid-iteration ..', repr(docs)) def test_regex_query_shortcuts(self): """Ensure that contains, startswith, endswith, etc work. """ person = self.Person(name='Guido van Rossum') person.save() # Test contains obj = self.Person.objects(name__contains='van').first() self.assertEqual(obj, person) obj = self.Person.objects(name__contains='Van').first() self.assertEqual(obj, None) # Test icontains obj = self.Person.objects(name__icontains='Van').first() self.assertEqual(obj, person) # Test startswith obj = self.Person.objects(name__startswith='Guido').first() self.assertEqual(obj, person) obj = self.Person.objects(name__startswith='guido').first() self.assertEqual(obj, None) # Test istartswith obj = self.Person.objects(name__istartswith='guido').first() self.assertEqual(obj, person) # Test endswith obj = self.Person.objects(name__endswith='Rossum').first() self.assertEqual(obj, person) obj = self.Person.objects(name__endswith='rossuM').first() self.assertEqual(obj, None) # Test iendswith obj = self.Person.objects(name__iendswith='rossuM').first() self.assertEqual(obj, person) # Test exact obj = self.Person.objects(name__exact='Guido van Rossum').first() self.assertEqual(obj, person) obj = self.Person.objects(name__exact='Guido van rossum').first() self.assertEqual(obj, None) obj = self.Person.objects(name__exact='Guido van Rossu').first() self.assertEqual(obj, None) # Test iexact obj = self.Person.objects(name__iexact='gUIDO VAN rOSSUM').first() self.assertEqual(obj, person) obj = self.Person.objects(name__iexact='gUIDO VAN rOSSU').first() self.assertEqual(obj, None) # Test unsafe expressions person = self.Person(name='Guido van Rossum [.\'Geek\']') person.save() obj = self.Person.objects(name__icontains='[.\'Geek').first() self.assertEqual(obj, person) def test_not(self): """Ensure that the __not operator works as expected. """ alice = self.Person(name='Alice', age=25) alice.save() obj = self.Person.objects(name__iexact='alice').first() self.assertEqual(obj, alice) obj = self.Person.objects(name__not__iexact='alice').first() self.assertEqual(obj, None) def test_filter_chaining(self): """Ensure filters can be chained together. """ class Blog(Document): id = StringField(unique=True, primary_key=True) class BlogPost(Document): blog = ReferenceField(Blog) title = StringField() is_published = BooleanField() published_date = DateTimeField() @queryset_manager def published(doc_cls, queryset): return queryset(is_published=True) Blog.drop_collection() BlogPost.drop_collection() blog_1 = Blog(id="1") blog_2 = Blog(id="2") blog_3 = Blog(id="3") blog_1.save() blog_2.save() blog_3.save() blog_post_1 = BlogPost(blog=blog_1, title="Blog Post #1", is_published=True, published_date=datetime(2010, 1, 5, 0, 0, 0)) blog_post_2 = BlogPost(blog=blog_2, title="Blog Post #2", is_published=True, published_date=datetime(2010, 1, 6, 0, 0, 0)) blog_post_3 = BlogPost(blog=blog_3, title="Blog Post #3", is_published=True, published_date=datetime(2010, 1, 7, 0, 0, 0)) blog_post_1.save() blog_post_2.save() blog_post_3.save() # find all published blog posts before 2010-01-07 published_posts = BlogPost.published() published_posts = published_posts.filter( published_date__lt=datetime(2010, 1, 7, 0, 0, 0)) self.assertEqual(published_posts.count(), 2) blog_posts = BlogPost.objects blog_posts = blog_posts.filter(blog__in=[blog_1, blog_2]) blog_posts = blog_posts.filter(blog=blog_3) self.assertEqual(blog_posts.count(), 0) BlogPost.drop_collection() Blog.drop_collection() def assertSequence(self, qs, expected): qs = list(qs) expected = list(expected) self.assertEqual(len(qs), len(expected)) for i in xrange(len(qs)): self.assertEqual(qs[i], expected[i]) def test_ordering(self): """Ensure default ordering is applied and can be overridden. """ class BlogPost(Document): title = StringField() published_date = DateTimeField() meta = { 'ordering': ['-published_date'] } BlogPost.drop_collection() blog_post_1 = BlogPost(title="Blog Post #1", published_date=datetime(2010, 1, 5, 0, 0, 0)) blog_post_2 = BlogPost(title="Blog Post #2", published_date=datetime(2010, 1, 6, 0, 0, 0)) blog_post_3 = BlogPost(title="Blog Post #3", published_date=datetime(2010, 1, 7, 0, 0, 0)) blog_post_1.save() blog_post_2.save() blog_post_3.save() # get the "first" BlogPost using default ordering # from BlogPost.meta.ordering expected = [blog_post_3, blog_post_2, blog_post_1] self.assertSequence(BlogPost.objects.all(), expected) # override default ordering, order BlogPosts by "published_date" qs = BlogPost.objects.order_by("+published_date") expected = [blog_post_1, blog_post_2, blog_post_3] self.assertSequence(qs, expected) def test_clear_ordering(self): """ Ensure that the default ordering can be cleared by calling order_by(). """ class BlogPost(Document): title = StringField() published_date = DateTimeField() meta = { 'ordering': ['-published_date'] } BlogPost.drop_collection() with db_ops_tracker() as q: BlogPost.objects.filter(title='whatever').first() self.assertEqual(len(q.get_ops()), 1) self.assertEqual( q.get_ops()[0]['query']['$orderby'], {u'published_date': -1}) with db_ops_tracker() as q: BlogPost.objects.filter(title='whatever').order_by().first() self.assertEqual(len(q.get_ops()), 1) print q.get_ops()[0]['query'] self.assertFalse('$orderby' in q.get_ops()[0]['query']) def test_no_ordering_for_get(self): """ Ensure that Doc.objects.get doesn't use any ordering. """ class BlogPost(Document): title = StringField() published_date = DateTimeField() meta = { 'ordering': ['-published_date'] } BlogPost.objects.create( title='whatever', published_date=datetime.utcnow()) with db_ops_tracker() as q: BlogPost.objects.get(title='whatever') self.assertEqual(len(q.get_ops()), 1) self.assertFalse('$orderby' in q.get_ops()[0]['query']) # Ordering should be ignored for .get even if we set it explicitly with db_ops_tracker() as q: BlogPost.objects.order_by('-title').get(title='whatever') self.assertEqual(len(q.get_ops()), 1) self.assertFalse('$orderby' in q.get_ops()[0]['query']) def test_find_embedded(self): """Ensure that an embedded document is properly returned from a query. """ class User(EmbeddedDocument): name = StringField() class BlogPost(Document): content = StringField() author = EmbeddedDocumentField(User) BlogPost.drop_collection() post = BlogPost(content='Had a good coffee today...') post.author = User(name='Test User') post.save() result = BlogPost.objects.first() self.assertTrue(isinstance(result.author, User)) self.assertEqual(result.author.name, 'Test User') BlogPost.drop_collection() def test_find_dict_item(self): """Ensure that DictField items may be found. """ class BlogPost(Document): info = DictField() BlogPost.drop_collection() post = BlogPost(info={'title': 'test'}) post.save() post_obj = BlogPost.objects(info__title='test').first() self.assertEqual(post_obj.id, post.id) BlogPost.drop_collection() def test_exec_js_query(self): """Ensure that queries are properly formed for use in exec_js. """ class BlogPost(Document): hits = IntField() published = BooleanField() BlogPost.drop_collection() post1 = BlogPost(hits=1, published=False) post1.save() post2 = BlogPost(hits=1, published=True) post2.save() post3 = BlogPost(hits=1, published=True) post3.save() js_func = """ function(hitsField) { var count = 0; db[collection].find(query).forEach(function(doc) { count += doc[hitsField]; }); return count; } """ # Ensure that normal queries work c = BlogPost.objects(published=True).exec_js(js_func, 'hits') self.assertEqual(c, 2) c = BlogPost.objects(published=False).exec_js(js_func, 'hits') self.assertEqual(c, 1) BlogPost.drop_collection() def test_exec_js_field_sub(self): """Ensure that field substitutions occur properly in exec_js functions. """ class Comment(EmbeddedDocument): content = StringField(db_field='body') class BlogPost(Document): name = StringField(db_field='doc-name') comments = ListField(EmbeddedDocumentField(Comment), db_field='cmnts') BlogPost.drop_collection() comments1 = [Comment(content='cool'), Comment(content='yay')] post1 = BlogPost(name='post1', comments=comments1) post1.save() comments2 = [Comment(content='nice stuff')] post2 = BlogPost(name='post2', comments=comments2) post2.save() code = """ function getComments() { var comments = []; db[collection].find(query).forEach(function(doc) { var docComments = doc[~comments]; for (var i = 0; i < docComments.length; i++) { comments.push({ 'document': doc[~name], 'comment': doc[~comments][i][~comments.content] }); } }); return comments; } """ sub_code = BlogPost.objects._sub_js_fields(code) code_chunks = ['doc["cmnts"];', 'doc["doc-name"],', 'doc["cmnts"][i]["body"]'] for chunk in code_chunks: self.assertTrue(chunk in sub_code) results = BlogPost.objects.exec_js(code) expected_results = [ {u'comment': u'cool', u'document': u'post1'}, {u'comment': u'yay', u'document': u'post1'}, {u'comment': u'nice stuff', u'document': u'post2'}, ] self.assertEqual(results, expected_results) # Test template style code = "{{~comments.content}}" sub_code = BlogPost.objects._sub_js_fields(code) self.assertEqual("cmnts.body", sub_code) BlogPost.drop_collection() def test_delete(self): """Ensure that documents are properly deleted from the database. """ self.Person(name="User A", age=20).save() self.Person(name="User B", age=30).save() self.Person(name="User C", age=40).save() self.assertEqual(self.Person.objects.count(), 3) self.Person.objects(age__lt=30).delete() self.assertEqual(self.Person.objects.count(), 2) self.Person.objects.delete() self.assertEqual(self.Person.objects.count(), 0) def test_reverse_delete_rule_cascade(self): """Ensure cascading deletion of referring documents from the database. """ class BlogPost(Document): content = StringField() author = ReferenceField(self.Person, reverse_delete_rule=CASCADE) BlogPost.drop_collection() me = self.Person(name='Test User') me.save() someoneelse = self.Person(name='Some-one Else') someoneelse.save() BlogPost(content='Watching TV', author=me).save() BlogPost(content='Chilling out', author=me).save() BlogPost(content='Pro Testing', author=someoneelse).save() self.assertEqual(3, BlogPost.objects.count()) self.Person.objects(name='Test User').delete() self.assertEqual(1, BlogPost.objects.count()) def test_reverse_delete_rule_cascade_on_abstract_document(self): """Ensure cascading deletion of referring documents from the database does not fail on abstract document. """ class AbstractBlogPost(Document): meta = {'abstract': True} author = ReferenceField(self.Person, reverse_delete_rule=CASCADE) class BlogPost(AbstractBlogPost): content = StringField() BlogPost.drop_collection() me = self.Person(name='Test User') me.save() someoneelse = self.Person(name='Some-one Else') someoneelse.save() BlogPost(content='Watching TV', author=me).save() BlogPost(content='Chilling out', author=me).save() BlogPost(content='Pro Testing', author=someoneelse).save() self.assertEqual(3, BlogPost.objects.count()) self.Person.objects(name='Test User').delete() self.assertEqual(1, BlogPost.objects.count()) def test_reverse_delete_rule_cascade_cycle(self): """Ensure reference cascading doesn't loop if reference graph isn't a tree """ class Dummy(Document): reference = ReferenceField('self', reverse_delete_rule=CASCADE) base = Dummy().save() other = Dummy(reference=base).save() base.reference = other base.save() base.delete() self.assertRaises(DoesNotExist, base.reload) self.assertRaises(DoesNotExist, other.reload) def test_reverse_delete_rule_cascade_complex_cycle(self): """Ensure reference cascading doesn't loop if reference graph isn't a tree """ class Category(Document): name = StringField() class Dummy(Document): reference = ReferenceField('self', reverse_delete_rule=CASCADE) cat = ReferenceField(Category, reverse_delete_rule=CASCADE) cat = Category(name='cat').save() base = Dummy(cat=cat).save() other = Dummy(reference=base).save() other2 = Dummy(reference=other).save() base.reference = other base.save() cat.delete() self.assertRaises(DoesNotExist, base.reload) self.assertRaises(DoesNotExist, other.reload) self.assertRaises(DoesNotExist, other2.reload) def test_reverse_delete_rule_cascade_self_referencing(self): """Ensure self-referencing CASCADE deletes do not result in infinite loop """ class Category(Document): name = StringField() parent = ReferenceField('self', reverse_delete_rule=CASCADE) Category.drop_collection() num_children = 3 base = Category(name='Root') base.save() # Create a simple parent-child tree for i in range(num_children): child_name = 'Child-%i' % i child = Category(name=child_name, parent=base) child.save() for i in range(num_children): child_child_name = 'Child-Child-%i' % i child_child = Category(name=child_child_name, parent=child) child_child.save() tree_size = 1 + num_children + (num_children * num_children) self.assertEqual(tree_size, Category.objects.count()) self.assertEqual(num_children, Category.objects(parent=base).count()) # The delete should effectively wipe out the Category collection # without resulting in infinite parent-child cascade recursion base.delete() self.assertEqual(0, Category.objects.count()) def test_reverse_delete_rule_nullify(self): """Ensure nullification of references to deleted documents. """ class Category(Document): name = StringField() class BlogPost(Document): content = StringField() category = ReferenceField(Category, reverse_delete_rule=NULLIFY) BlogPost.drop_collection() Category.drop_collection() lameness = Category(name='Lameness') lameness.save() post = BlogPost(content='Watching TV', category=lameness) post.save() self.assertEqual(1, BlogPost.objects.count()) self.assertEqual('Lameness', BlogPost.objects.first().category.name) Category.objects.delete() self.assertEqual(1, BlogPost.objects.count()) self.assertEqual(None, BlogPost.objects.first().category) def test_reverse_delete_rule_nullify_on_abstract_document(self): """Ensure nullification of references to deleted documents when reference is on an abstract document. """ class AbstractBlogPost(Document): meta = {'abstract': True} author = ReferenceField(self.Person, reverse_delete_rule=NULLIFY) class BlogPost(AbstractBlogPost): content = StringField() BlogPost.drop_collection() me = self.Person(name='Test User') me.save() someoneelse = self.Person(name='Some-one Else') someoneelse.save() BlogPost(content='Watching TV', author=me).save() self.assertEqual(1, BlogPost.objects.count()) self.assertEqual(me, BlogPost.objects.first().author) self.Person.objects(name='Test User').delete() self.assertEqual(1, BlogPost.objects.count()) self.assertEqual(None, BlogPost.objects.first().author) def test_reverse_delete_rule_deny(self): """Ensure deletion gets denied on documents that still have references to them. """ class BlogPost(Document): content = StringField() author = ReferenceField(self.Person, reverse_delete_rule=DENY) BlogPost.drop_collection() self.Person.drop_collection() me = self.Person(name='Test User') me.save() post = BlogPost(content='Watching TV', author=me) post.save() self.assertRaises(OperationError, self.Person.objects.delete) def test_reverse_delete_rule_deny_on_abstract_document(self): """Ensure deletion gets denied on documents that still have references to them, when reference is on an abstract document. """ class AbstractBlogPost(Document): meta = {'abstract': True} author = ReferenceField(self.Person, reverse_delete_rule=DENY) class BlogPost(AbstractBlogPost): content = StringField() BlogPost.drop_collection() me = self.Person(name='Test User') me.save() BlogPost(content='Watching TV', author=me).save() self.assertEqual(1, BlogPost.objects.count()) self.assertRaises(OperationError, self.Person.objects.delete) def test_reverse_delete_rule_pull(self): """Ensure pulling of references to deleted documents. """ class BlogPost(Document): content = StringField() authors = ListField(ReferenceField(self.Person, reverse_delete_rule=PULL)) BlogPost.drop_collection() self.Person.drop_collection() me = self.Person(name='Test User') me.save() someoneelse = self.Person(name='Some-one Else') someoneelse.save() post = BlogPost(content='Watching TV', authors=[me, someoneelse]) post.save() another = BlogPost(content='Chilling Out', authors=[someoneelse]) another.save() someoneelse.delete() post.reload() another.reload() self.assertEqual(post.authors, [me]) self.assertEqual(another.authors, []) def test_reverse_delete_rule_pull_on_abstract_documents(self): """Ensure pulling of references to deleted documents when reference is defined on an abstract document.. """ class AbstractBlogPost(Document): meta = {'abstract': True} authors = ListField(ReferenceField(self.Person, reverse_delete_rule=PULL)) class BlogPost(AbstractBlogPost): content = StringField() BlogPost.drop_collection() self.Person.drop_collection() me = self.Person(name='Test User') me.save() someoneelse = self.Person(name='Some-one Else') someoneelse.save() post = BlogPost(content='Watching TV', authors=[me, someoneelse]) post.save() another = BlogPost(content='Chilling Out', authors=[someoneelse]) another.save() someoneelse.delete() post.reload() another.reload() self.assertEqual(post.authors, [me]) self.assertEqual(another.authors, []) def test_delete_with_limits(self): class Log(Document): pass Log.drop_collection() for i in xrange(10): Log().save() Log.objects()[3:5].delete() self.assertEqual(8, Log.objects.count()) def test_delete_with_limit_handles_delete_rules(self): """Ensure cascading deletion of referring documents from the database. """ class BlogPost(Document): content = StringField() author = ReferenceField(self.Person, reverse_delete_rule=CASCADE) BlogPost.drop_collection() me = self.Person(name='Test User') me.save() someoneelse = self.Person(name='Some-one Else') someoneelse.save() BlogPost(content='Watching TV', author=me).save() BlogPost(content='Chilling out', author=me).save() BlogPost(content='Pro Testing', author=someoneelse).save() self.assertEqual(3, BlogPost.objects.count()) self.Person.objects()[:1].delete() self.assertEqual(1, BlogPost.objects.count()) def test_limit_with_write_concern_0(self): p1 = self.Person(name="User Z", age=20).save() del_result = p1.delete(w=0) self.assertEqual(None, del_result) def test_reference_field_find(self): """Ensure cascading deletion of referring documents from the database. """ class BlogPost(Document): content = StringField() author = ReferenceField(self.Person) BlogPost.drop_collection() self.Person.drop_collection() me = self.Person(name='Test User').save() BlogPost(content="test 123", author=me).save() self.assertEqual(1, BlogPost.objects(author=me).count()) self.assertEqual(1, BlogPost.objects(author=me.pk).count()) self.assertEqual(1, BlogPost.objects(author="%s" % me.pk).count()) self.assertEqual(1, BlogPost.objects(author__in=[me]).count()) self.assertEqual(1, BlogPost.objects(author__in=[me.pk]).count()) self.assertEqual( 1, BlogPost.objects(author__in=["%s" % me.pk]).count()) def test_reference_field_find_dbref(self): """Ensure cascading deletion of referring documents from the database. """ class BlogPost(Document): content = StringField() author = ReferenceField(self.Person, dbref=True) BlogPost.drop_collection() self.Person.drop_collection() me = self.Person(name='Test User').save() BlogPost(content="test 123", author=me).save() self.assertEqual(1, BlogPost.objects(author=me).count()) self.assertEqual(1, BlogPost.objects(author=me.pk).count()) self.assertEqual(1, BlogPost.objects(author="%s" % me.pk).count()) self.assertEqual(1, BlogPost.objects(author__in=[me]).count()) self.assertEqual(1, BlogPost.objects(author__in=[me.pk]).count()) self.assertEqual( 1, BlogPost.objects(author__in=["%s" % me.pk]).count()) def test_update(self): """Ensure that atomic updates work properly. """ class BlogPost(Document): name = StringField() title = StringField() hits = IntField() tags = ListField(StringField()) BlogPost.drop_collection() post = BlogPost(name="Test Post", hits=5, tags=['test']) post.save() BlogPost.objects.update(set__hits=10) post.reload() self.assertEqual(post.hits, 10) BlogPost.objects.update_one(inc__hits=1) post.reload() self.assertEqual(post.hits, 11) BlogPost.objects.update_one(dec__hits=1) post.reload() self.assertEqual(post.hits, 10) BlogPost.objects.update(push__tags='mongo') post.reload() self.assertTrue('mongo' in post.tags) BlogPost.objects.update_one(push_all__tags=['db', 'nosql']) post.reload() self.assertTrue('db' in post.tags and 'nosql' in post.tags) tags = post.tags[:-1] BlogPost.objects.update(pop__tags=1) post.reload() self.assertEqual(post.tags, tags) BlogPost.objects.update_one(add_to_set__tags='unique') BlogPost.objects.update_one(add_to_set__tags='unique') post.reload() self.assertEqual(post.tags.count('unique'), 1) self.assertNotEqual(post.hits, None) BlogPost.objects.update_one(unset__hits=1) post.reload() self.assertEqual(post.hits, None) BlogPost.drop_collection() def test_update_push_and_pull_add_to_set(self): """Ensure that the 'pull' update operation works correctly. """ class BlogPost(Document): slug = StringField() tags = ListField(StringField()) BlogPost.drop_collection() post = BlogPost(slug="test") post.save() BlogPost.objects.filter(id=post.id).update(push__tags="code") post.reload() self.assertEqual(post.tags, ["code"]) BlogPost.objects.filter(id=post.id).update( push_all__tags=["mongodb", "code"]) post.reload() self.assertEqual(post.tags, ["code", "mongodb", "code"]) BlogPost.objects(slug="test").update(pull__tags="code") post.reload() self.assertEqual(post.tags, ["mongodb"]) BlogPost.objects(slug="test").update( pull_all__tags=["mongodb", "code"]) post.reload() self.assertEqual(post.tags, []) BlogPost.objects(slug="test").update( __raw__={"$addToSet": {"tags": {"$each": ["code", "mongodb", "code"]}}}) post.reload() self.assertEqual(post.tags, ["code", "mongodb"]) def test_add_to_set_each(self): class Item(Document): name = StringField(required=True) description = StringField(max_length=50) parents = ListField(ReferenceField('self')) Item.drop_collection() item = Item(name='test item').save() parent_1 = Item(name='parent 1').save() parent_2 = Item(name='parent 2').save() item.update(add_to_set__parents=[parent_1, parent_2, parent_1]) item.reload() self.assertEqual([parent_1, parent_2], item.parents) def test_pull_nested(self): class Collaborator(EmbeddedDocument): user = StringField() def __unicode__(self): return '%s' % self.user class Site(Document): name = StringField(max_length=75, unique=True, required=True) collaborators = ListField(EmbeddedDocumentField(Collaborator)) Site.drop_collection() c = Collaborator(user='Esteban') s = Site(name="test", collaborators=[c]).save() Site.objects(id=s.id).update_one(pull__collaborators__user='Esteban') self.assertEqual(Site.objects.first().collaborators, []) def pull_all(): Site.objects(id=s.id).update_one( pull_all__collaborators__user=['Ross']) self.assertRaises(InvalidQueryError, pull_all) def test_pull_from_nested_embedded(self): class User(EmbeddedDocument): name = StringField() def __unicode__(self): return '%s' % self.name class Collaborator(EmbeddedDocument): helpful = ListField(EmbeddedDocumentField(User)) unhelpful = ListField(EmbeddedDocumentField(User)) class Site(Document): name = StringField(max_length=75, unique=True, required=True) collaborators = EmbeddedDocumentField(Collaborator) Site.drop_collection() c = User(name='Esteban') f = User(name='Frank') s = Site(name="test", collaborators=Collaborator( helpful=[c], unhelpful=[f])).save() Site.objects(id=s.id).update_one(pull__collaborators__helpful=c) self.assertEqual(Site.objects.first().collaborators['helpful'], []) Site.objects(id=s.id).update_one( pull__collaborators__unhelpful={'name': 'Frank'}) self.assertEqual(Site.objects.first().collaborators['unhelpful'], []) def pull_all(): Site.objects(id=s.id).update_one( pull_all__collaborators__helpful__name=['Ross']) self.assertRaises(InvalidQueryError, pull_all) def test_pull_from_nested_mapfield(self): class Collaborator(EmbeddedDocument): user = StringField() def __unicode__(self): return '%s' % self.user class Site(Document): name = StringField(max_length=75, unique=True, required=True) collaborators = MapField( ListField(EmbeddedDocumentField(Collaborator))) Site.drop_collection() c = Collaborator(user='Esteban') f = Collaborator(user='Frank') s = Site(name="test", collaborators={'helpful': [c], 'unhelpful': [f]}) s.save() Site.objects(id=s.id).update_one( pull__collaborators__helpful__user='Esteban') self.assertEqual(Site.objects.first().collaborators['helpful'], []) Site.objects(id=s.id).update_one( pull__collaborators__unhelpful={'user': 'Frank'}) self.assertEqual(Site.objects.first().collaborators['unhelpful'], []) def pull_all(): Site.objects(id=s.id).update_one( pull_all__collaborators__helpful__user=['Ross']) self.assertRaises(InvalidQueryError, pull_all) def test_update_one_pop_generic_reference(self): class BlogTag(Document): name = StringField(required=True) class BlogPost(Document): slug = StringField() tags = ListField(ReferenceField(BlogTag), required=True) BlogPost.drop_collection() BlogTag.drop_collection() tag_1 = BlogTag(name='code') tag_1.save() tag_2 = BlogTag(name='mongodb') tag_2.save() post = BlogPost(slug="test", tags=[tag_1]) post.save() post = BlogPost(slug="test-2", tags=[tag_1, tag_2]) post.save() self.assertEqual(len(post.tags), 2) BlogPost.objects(slug="test-2").update_one(pop__tags=-1) post.reload() self.assertEqual(len(post.tags), 1) BlogPost.drop_collection() BlogTag.drop_collection() def test_editting_embedded_objects(self): class BlogTag(EmbeddedDocument): name = StringField(required=True) class BlogPost(Document): slug = StringField() tags = ListField(EmbeddedDocumentField(BlogTag), required=True) BlogPost.drop_collection() tag_1 = BlogTag(name='code') tag_2 = BlogTag(name='mongodb') post = BlogPost(slug="test", tags=[tag_1]) post.save() post = BlogPost(slug="test-2", tags=[tag_1, tag_2]) post.save() self.assertEqual(len(post.tags), 2) BlogPost.objects(slug="test-2").update_one(set__tags__0__name="python") post.reload() self.assertEqual(post.tags[0].name, 'python') BlogPost.objects(slug="test-2").update_one(pop__tags=-1) post.reload() self.assertEqual(len(post.tags), 1) BlogPost.drop_collection() def test_set_list_embedded_documents(self): class Author(EmbeddedDocument): name = StringField() class Message(Document): title = StringField() authors = ListField(EmbeddedDocumentField('Author')) Message.drop_collection() message = Message(title="hello", authors=[Author(name="Harry")]) message.save() Message.objects(authors__name="Harry").update_one( set__authors__S=Author(name="Ross")) message = message.reload() self.assertEqual(message.authors[0].name, "Ross") Message.objects(authors__name="Ross").update_one( set__authors=[Author(name="Harry"), Author(name="Ross"), Author(name="Adam")]) message = message.reload() self.assertEqual(message.authors[0].name, "Harry") self.assertEqual(message.authors[1].name, "Ross") self.assertEqual(message.authors[2].name, "Adam") def test_reload_embedded_docs_instance(self): class SubDoc(EmbeddedDocument): val = IntField() class Doc(Document): embedded = EmbeddedDocumentField(SubDoc) doc = Doc(embedded=SubDoc(val=0)).save() doc.reload() self.assertEqual(doc.pk, doc.embedded._instance.pk) def test_reload_list_embedded_docs_instance(self): class SubDoc(EmbeddedDocument): val = IntField() class Doc(Document): embedded = ListField(EmbeddedDocumentField(SubDoc)) doc = Doc(embedded=[SubDoc(val=0)]).save() doc.reload() self.assertEqual(doc.pk, doc.embedded[0]._instance.pk) def test_order_by(self): """Ensure that QuerySets may be ordered. """ self.Person(name="User B", age=40).save() self.Person(name="User A", age=20).save() self.Person(name="User C", age=30).save() names = [p.name for p in self.Person.objects.order_by('-age')] self.assertEqual(names, ['User B', 'User C', 'User A']) names = [p.name for p in self.Person.objects.order_by('+age')] self.assertEqual(names, ['User A', 'User C', 'User B']) names = [p.name for p in self.Person.objects.order_by('age')] self.assertEqual(names, ['User A', 'User C', 'User B']) ages = [p.age for p in self.Person.objects.order_by('-name')] self.assertEqual(ages, [30, 40, 20]) def test_order_by_optional(self): class BlogPost(Document): title = StringField() published_date = DateTimeField(required=False) BlogPost.drop_collection() blog_post_3 = BlogPost(title="Blog Post #3", published_date=datetime(2010, 1, 6, 0, 0, 0)) blog_post_2 = BlogPost(title="Blog Post #2", published_date=datetime(2010, 1, 5, 0, 0, 0)) blog_post_4 = BlogPost(title="Blog Post #4", published_date=datetime(2010, 1, 7, 0, 0, 0)) blog_post_1 = BlogPost(title="Blog Post #1", published_date=None) blog_post_3.save() blog_post_1.save() blog_post_4.save() blog_post_2.save() expected = [blog_post_1, blog_post_2, blog_post_3, blog_post_4] self.assertSequence(BlogPost.objects.order_by('published_date'), expected) self.assertSequence(BlogPost.objects.order_by('+published_date'), expected) expected.reverse() self.assertSequence(BlogPost.objects.order_by('-published_date'), expected) def test_order_by_list(self): class BlogPost(Document): title = StringField() published_date = DateTimeField(required=False) BlogPost.drop_collection() blog_post_1 = BlogPost(title="A", published_date=datetime(2010, 1, 6, 0, 0, 0)) blog_post_2 = BlogPost(title="B", published_date=datetime(2010, 1, 6, 0, 0, 0)) blog_post_3 = BlogPost(title="C", published_date=datetime(2010, 1, 7, 0, 0, 0)) blog_post_2.save() blog_post_3.save() blog_post_1.save() qs = BlogPost.objects.order_by('published_date', 'title') expected = [blog_post_1, blog_post_2, blog_post_3] self.assertSequence(qs, expected) qs = BlogPost.objects.order_by('-published_date', '-title') expected.reverse() self.assertSequence(qs, expected) def test_order_by_chaining(self): """Ensure that an order_by query chains properly and allows .only() """ self.Person(name="User B", age=40).save() self.Person(name="User A", age=20).save() self.Person(name="User C", age=30).save() only_age = self.Person.objects.order_by('-age').only('age') names = [p.name for p in only_age] ages = [p.age for p in only_age] # The .only('age') clause should mean that all names are None self.assertEqual(names, [None, None, None]) self.assertEqual(ages, [40, 30, 20]) qs = self.Person.objects.all().order_by('-age') qs = qs.limit(10) ages = [p.age for p in qs] self.assertEqual(ages, [40, 30, 20]) qs = self.Person.objects.all().limit(10) qs = qs.order_by('-age') ages = [p.age for p in qs] self.assertEqual(ages, [40, 30, 20]) qs = self.Person.objects.all().skip(0) qs = qs.order_by('-age') ages = [p.age for p in qs] self.assertEqual(ages, [40, 30, 20]) def test_confirm_order_by_reference_wont_work(self): """Ordering by reference is not possible. Use map / reduce.. or denormalise""" class Author(Document): author = ReferenceField(self.Person) Author.drop_collection() person_a = self.Person(name="User A", age=20) person_a.save() person_b = self.Person(name="User B", age=40) person_b.save() person_c = self.Person(name="User C", age=30) person_c.save() Author(author=person_a).save() Author(author=person_b).save() Author(author=person_c).save() names = [ a.author.name for a in Author.objects.order_by('-author__age')] self.assertEqual(names, ['User A', 'User B', 'User C']) def test_map_reduce(self): """Ensure map/reduce is both mapping and reducing. """ class BlogPost(Document): title = StringField() tags = ListField(StringField(), db_field='post-tag-list') BlogPost.drop_collection() BlogPost(title="Post #1", tags=['music', 'film', 'print']).save() BlogPost(title="Post #2", tags=['music', 'film']).save() BlogPost(title="Post #3", tags=['film', 'photography']).save() map_f = """ function() { this[~tags].forEach(function(tag) { emit(tag, 1); }); } """ reduce_f = """ function(key, values) { var total = 0; for(var i=0; i<values.length; i++) { total += values[i]; } return total; } """ # run a map/reduce operation spanning all posts results = BlogPost.objects.map_reduce(map_f, reduce_f, "myresults") results = list(results) self.assertEqual(len(results), 4) music = list(filter(lambda r: r.key == "music", results))[0] self.assertEqual(music.value, 2) film = list(filter(lambda r: r.key == "film", results))[0] self.assertEqual(film.value, 3) BlogPost.drop_collection() def test_map_reduce_with_custom_object_ids(self): """Ensure that QuerySet.map_reduce works properly with custom primary keys. """ class BlogPost(Document): title = StringField(primary_key=True) tags = ListField(StringField()) post1 = BlogPost(title="Post #1", tags=["mongodb", "mongoengine"]) post2 = BlogPost(title="Post #2", tags=["django", "mongodb"]) post3 = BlogPost(title="Post #3", tags=["hitchcock films"]) post1.save() post2.save() post3.save() self.assertEqual(BlogPost._fields['title'].db_field, '_id') self.assertEqual(BlogPost._meta['id_field'], 'title') map_f = """ function() { emit(this._id, 1); } """ # reduce to a list of tag ids and counts reduce_f = """ function(key, values) { var total = 0; for(var i=0; i<values.length; i++) { total += values[i]; } return total; } """ results = BlogPost.objects.map_reduce(map_f, reduce_f, "myresults") results = list(results) self.assertEqual(results[0].object, post1) self.assertEqual(results[1].object, post2) self.assertEqual(results[2].object, post3) BlogPost.drop_collection() def test_map_reduce_custom_output(self): """ Test map/reduce custom output """ register_connection('test2', 'mongoenginetest2') class Family(Document): id = IntField( primary_key=True) log = StringField() class Person(Document): id = IntField( primary_key=True) name = StringField() age = IntField() family = ReferenceField(Family) Family.drop_collection() Person.drop_collection() # creating first family f1 = Family(id=1, log="Trav 02 de Julho") f1.save() # persons of first family Person(id=1, family=f1, name=u"Wilson Jr", age=21).save() Person(id=2, family=f1, name=u"Wilson Father", age=45).save() Person(id=3, family=f1, name=u"Eliana Costa", age=40).save() Person(id=4, family=f1, name=u"Tayza Mariana", age=17).save() # creating second family f2 = Family(id=2, log="Av prof frasc brunno") f2.save() # persons of second family Person(id=5, family=f2, name="Isabella Luanna", age=16).save() Person(id=6, family=f2, name="Sandra Mara", age=36).save() Person(id=7, family=f2, name="Igor Gabriel", age=10).save() # creating third family f3 = Family(id=3, log="Av brazil") f3.save() # persons of thrird family Person(id=8, family=f3, name="Arthur WA", age=30).save() Person(id=9, family=f3, name="Paula Leonel", age=25).save() # executing join map/reduce map_person = """ function () { emit(this.family, { totalAge: this.age, persons: [{ name: this.name, age: this.age }]}); } """ map_family = """ function () { emit(this._id, { totalAge: 0, persons: [] }); } """ reduce_f = """ function (key, values) { var family = {persons: [], totalAge: 0}; values.forEach(function(value) { if (value.persons) { value.persons.forEach(function (person) { family.persons.push(person); family.totalAge += person.age; }); } }); return family; } """ cursor = Family.objects.map_reduce( map_f=map_family, reduce_f=reduce_f, output={'replace': 'family_map', 'db_alias': 'test2'}) # start a map/reduce cursor.next() results = Person.objects.map_reduce( map_f=map_person, reduce_f=reduce_f, output={'reduce': 'family_map', 'db_alias': 'test2'}) results = list(results) collection = get_db('test2').family_map self.assertEqual( collection.find_one({'_id': 1}), { '_id': 1, 'value': { 'persons': [ {'age': 21, 'name': u'Wilson Jr'}, {'age': 45, 'name': u'Wilson Father'}, {'age': 40, 'name': u'Eliana Costa'}, {'age': 17, 'name': u'Tayza Mariana'}], 'totalAge': 123} }) self.assertEqual( collection.find_one({'_id': 2}), { '_id': 2, 'value': { 'persons': [ {'age': 16, 'name': u'Isabella Luanna'}, {'age': 36, 'name': u'Sandra Mara'}, {'age': 10, 'name': u'Igor Gabriel'}], 'totalAge': 62} }) self.assertEqual( collection.find_one({'_id': 3}), { '_id': 3, 'value': { 'persons': [ {'age': 30, 'name': u'Arthur WA'}, {'age': 25, 'name': u'Paula Leonel'}], 'totalAge': 55} }) def test_map_reduce_finalize(self): """Ensure that map, reduce, and finalize run and introduce "scope" by simulating "hotness" ranking with Reddit algorithm. """ from time import mktime class Link(Document): title = StringField(db_field='bpTitle') up_votes = IntField() down_votes = IntField() submitted = DateTimeField(db_field='sTime') Link.drop_collection() now = datetime.utcnow() # Note: Test data taken from a custom Reddit homepage on # Fri, 12 Feb 2010 14:36:00 -0600. Link ordering should # reflect order of insertion below, but is not influenced # by insertion order. Link(title="Google Buzz auto-followed a woman's abusive ex ...", up_votes=1079, down_votes=553, submitted=now - timedelta(hours=4)).save() Link(title="We did it! Barbie is a computer engineer.", up_votes=481, down_votes=124, submitted=now - timedelta(hours=2)).save() Link(title="This Is A Mosquito Getting Killed By A Laser", up_votes=1446, down_votes=530, submitted=now - timedelta(hours=13)).save() Link(title="Arabic flashcards land physics student in jail.", up_votes=215, down_votes=105, submitted=now - timedelta(hours=6)).save() Link(title="The Burger Lab: Presenting, the Flood Burger", up_votes=48, down_votes=17, submitted=now - timedelta(hours=5)).save() Link(title="How to see polarization with the naked eye", up_votes=74, down_votes=13, submitted=now - timedelta(hours=10)).save() map_f = """ function() { emit(this[~id], {up_delta: this[~up_votes] - this[~down_votes], sub_date: this[~submitted].getTime() / 1000}) } """ reduce_f = """ function(key, values) { data = values[0]; x = data.up_delta; // calculate time diff between reddit epoch and submission sec_since_epoch = data.sub_date - reddit_epoch; // calculate 'Y' if(x > 0) { y = 1; } else if (x = 0) { y = 0; } else { y = -1; } // calculate 'Z', the maximal value if(Math.abs(x) >= 1) { z = Math.abs(x); } else { z = 1; } return {x: x, y: y, z: z, t_s: sec_since_epoch}; } """ finalize_f = """ function(key, value) { // f(sec_since_epoch,y,z) = // log10(z) + ((y*sec_since_epoch) / 45000) z_10 = Math.log(value.z) / Math.log(10); weight = z_10 + ((value.y * value.t_s) / 45000); return weight; } """ # provide the reddit epoch (used for ranking) as a variable available # to all phases of the map/reduce operation: map, reduce, and finalize. reddit_epoch = mktime(datetime(2005, 12, 8, 7, 46, 43).timetuple()) scope = {'reddit_epoch': reddit_epoch} # run a map/reduce operation across all links. ordering is set # to "-value", which orders the "weight" value returned from # "finalize_f" in descending order. results = Link.objects.order_by("-value") results = results.map_reduce(map_f, reduce_f, "myresults", finalize_f=finalize_f, scope=scope) results = list(results) # assert troublesome Buzz article is ranked 1st self.assertTrue(results[0].object.title.startswith("Google Buzz")) # assert laser vision is ranked last self.assertTrue(results[-1].object.title.startswith("How to see")) Link.drop_collection() def test_item_frequencies(self): """Ensure that item frequencies are properly generated from lists. """ class BlogPost(Document): hits = IntField() tags = ListField(StringField(), db_field='blogTags') BlogPost.drop_collection() BlogPost(hits=1, tags=['music', 'film', 'actors', 'watch']).save() BlogPost(hits=2, tags=['music', 'watch']).save() BlogPost(hits=2, tags=['music', 'actors']).save() def test_assertions(f): f = dict((key, int(val)) for key, val in f.items()) self.assertEqual( set(['music', 'film', 'actors', 'watch']), set(f.keys())) self.assertEqual(f['music'], 3) self.assertEqual(f['actors'], 2) self.assertEqual(f['watch'], 2) self.assertEqual(f['film'], 1) exec_js = BlogPost.objects.item_frequencies('tags') map_reduce = BlogPost.objects.item_frequencies('tags', map_reduce=True) test_assertions(exec_js) test_assertions(map_reduce) # Ensure query is taken into account def test_assertions(f): f = dict((key, int(val)) for key, val in f.items()) self.assertEqual(set(['music', 'actors', 'watch']), set(f.keys())) self.assertEqual(f['music'], 2) self.assertEqual(f['actors'], 1) self.assertEqual(f['watch'], 1) exec_js = BlogPost.objects(hits__gt=1).item_frequencies('tags') map_reduce = BlogPost.objects( hits__gt=1).item_frequencies('tags', map_reduce=True) test_assertions(exec_js) test_assertions(map_reduce) # Check that normalization works def test_assertions(f): self.assertAlmostEqual(f['music'], 3.0 / 8.0) self.assertAlmostEqual(f['actors'], 2.0 / 8.0) self.assertAlmostEqual(f['watch'], 2.0 / 8.0) self.assertAlmostEqual(f['film'], 1.0 / 8.0) exec_js = BlogPost.objects.item_frequencies('tags', normalize=True) map_reduce = BlogPost.objects.item_frequencies( 'tags', normalize=True, map_reduce=True) test_assertions(exec_js) test_assertions(map_reduce) # Check item_frequencies works for non-list fields def test_assertions(f): self.assertEqual(set([1, 2]), set(f.keys())) self.assertEqual(f[1], 1) self.assertEqual(f[2], 2) exec_js = BlogPost.objects.item_frequencies('hits') map_reduce = BlogPost.objects.item_frequencies('hits', map_reduce=True) test_assertions(exec_js) test_assertions(map_reduce) BlogPost.drop_collection() def test_item_frequencies_on_embedded(self): """Ensure that item frequencies are properly generated from lists. """ class Phone(EmbeddedDocument): number = StringField() class Person(Document): name = StringField() phone = EmbeddedDocumentField(Phone) Person.drop_collection() doc = Person(name="Guido") doc.phone = Phone(number='62-3331-1656') doc.save() doc = Person(name="Marr") doc.phone = Phone(number='62-3331-1656') doc.save() doc = Person(name="WP Junior") doc.phone = Phone(number='62-3332-1656') doc.save() def test_assertions(f): f = dict((key, int(val)) for key, val in f.items()) self.assertEqual( set(['62-3331-1656', '62-3332-1656']), set(f.keys())) self.assertEqual(f['62-3331-1656'], 2) self.assertEqual(f['62-3332-1656'], 1) exec_js = Person.objects.item_frequencies('phone.number') map_reduce = Person.objects.item_frequencies( 'phone.number', map_reduce=True) test_assertions(exec_js) test_assertions(map_reduce) # Ensure query is taken into account def test_assertions(f): f = dict((key, int(val)) for key, val in f.items()) self.assertEqual(set(['62-3331-1656']), set(f.keys())) self.assertEqual(f['62-3331-1656'], 2) exec_js = Person.objects( phone__number='62-3331-1656').item_frequencies('phone.number') map_reduce = Person.objects( phone__number='62-3331-1656').item_frequencies('phone.number', map_reduce=True) test_assertions(exec_js) test_assertions(map_reduce) # Check that normalization works def test_assertions(f): self.assertEqual(f['62-3331-1656'], 2.0 / 3.0) self.assertEqual(f['62-3332-1656'], 1.0 / 3.0) exec_js = Person.objects.item_frequencies( 'phone.number', normalize=True) map_reduce = Person.objects.item_frequencies( 'phone.number', normalize=True, map_reduce=True) test_assertions(exec_js) test_assertions(map_reduce) def test_item_frequencies_null_values(self): class Person(Document): name = StringField() city = StringField() Person.drop_collection() Person(name="Wilson Snr", city="CRB").save() Person(name="Wilson Jr").save() freq = Person.objects.item_frequencies('city') self.assertEqual(freq, {'CRB': 1.0, None: 1.0}) freq = Person.objects.item_frequencies('city', normalize=True) self.assertEqual(freq, {'CRB': 0.5, None: 0.5}) freq = Person.objects.item_frequencies('city', map_reduce=True) self.assertEqual(freq, {'CRB': 1.0, None: 1.0}) freq = Person.objects.item_frequencies( 'city', normalize=True, map_reduce=True) self.assertEqual(freq, {'CRB': 0.5, None: 0.5}) def test_item_frequencies_with_null_embedded(self): class Data(EmbeddedDocument): name = StringField() class Extra(EmbeddedDocument): tag = StringField() class Person(Document): data = EmbeddedDocumentField(Data, required=True) extra = EmbeddedDocumentField(Extra) Person.drop_collection() p = Person() p.data = Data(name="Wilson Jr") p.save() p = Person() p.data = Data(name="Wesley") p.extra = Extra(tag="friend") p.save() ot = Person.objects.item_frequencies('extra.tag', map_reduce=False) self.assertEqual(ot, {None: 1.0, u'friend': 1.0}) ot = Person.objects.item_frequencies('extra.tag', map_reduce=True) self.assertEqual(ot, {None: 1.0, u'friend': 1.0}) def test_item_frequencies_with_0_values(self): class Test(Document): val = IntField() Test.drop_collection() t = Test() t.val = 0 t.save() ot = Test.objects.item_frequencies('val', map_reduce=True) self.assertEqual(ot, {0: 1}) ot = Test.objects.item_frequencies('val', map_reduce=False) self.assertEqual(ot, {0: 1}) def test_item_frequencies_with_False_values(self): class Test(Document): val = BooleanField() Test.drop_collection() t = Test() t.val = False t.save() ot = Test.objects.item_frequencies('val', map_reduce=True) self.assertEqual(ot, {False: 1}) ot = Test.objects.item_frequencies('val', map_reduce=False) self.assertEqual(ot, {False: 1}) def test_item_frequencies_normalize(self): class Test(Document): val = IntField() Test.drop_collection() for i in xrange(50): Test(val=1).save() for i in xrange(20): Test(val=2).save() freqs = Test.objects.item_frequencies( 'val', map_reduce=False, normalize=True) self.assertEqual(freqs, {1: 50.0 / 70, 2: 20.0 / 70}) freqs = Test.objects.item_frequencies( 'val', map_reduce=True, normalize=True) self.assertEqual(freqs, {1: 50.0 / 70, 2: 20.0 / 70}) def test_average(self): """Ensure that field can be averaged correctly. """ self.Person(name='person', age=0).save() self.assertEqual(int(self.Person.objects.average('age')), 0) ages = [23, 54, 12, 94, 27] for i, age in enumerate(ages): self.Person(name='test%s' % i, age=age).save() avg = float(sum(ages)) / (len(ages) + 1) # take into account the 0 self.assertAlmostEqual(int(self.Person.objects.average('age')), avg) self.assertAlmostEqual( int(self.Person.objects.aggregate_average('age')), avg ) self.Person(name='ageless person').save() self.assertEqual(int(self.Person.objects.average('age')), avg) self.assertEqual( int(self.Person.objects.aggregate_average('age')), avg ) # dot notation self.Person( name='person meta', person_meta=self.PersonMeta(weight=0)).save() self.assertAlmostEqual( int(self.Person.objects.average('person_meta.weight')), 0) self.assertAlmostEqual( int(self.Person.objects.aggregate_average('person_meta.weight')), 0 ) for i, weight in enumerate(ages): self.Person( name='test meta%i', person_meta=self.PersonMeta(weight=weight)).save() self.assertAlmostEqual( int(self.Person.objects.average('person_meta.weight')), avg ) self.assertAlmostEqual( int(self.Person.objects.aggregate_average('person_meta.weight')), avg ) self.Person(name='test meta none').save() self.assertEqual( int(self.Person.objects.average('person_meta.weight')), avg ) self.assertEqual( int(self.Person.objects.aggregate_average('person_meta.weight')), avg ) # test summing over a filtered queryset over_50 = [a for a in ages if a >= 50] avg = float(sum(over_50)) / len(over_50) self.assertEqual( self.Person.objects.filter(age__gte=50).average('age'), avg ) self.assertEqual( self.Person.objects.filter(age__gte=50).aggregate_average('age'), avg ) def test_sum(self): """Ensure that field can be summed over correctly. """ ages = [23, 54, 12, 94, 27] for i, age in enumerate(ages): self.Person(name='test%s' % i, age=age).save() self.assertEqual(self.Person.objects.sum('age'), sum(ages)) self.assertEqual( self.Person.objects.aggregate_sum('age'), sum(ages) ) self.Person(name='ageless person').save() self.assertEqual(self.Person.objects.sum('age'), sum(ages)) self.assertEqual( self.Person.objects.aggregate_sum('age'), sum(ages) ) for i, age in enumerate(ages): self.Person(name='test meta%s' % i, person_meta=self.PersonMeta(weight=age)).save() self.assertEqual( self.Person.objects.sum('person_meta.weight'), sum(ages) ) self.assertEqual( self.Person.objects.aggregate_sum('person_meta.weight'), sum(ages) ) self.Person(name='weightless person').save() self.assertEqual(self.Person.objects.sum('age'), sum(ages)) self.assertEqual( self.Person.objects.aggregate_sum('age'), sum(ages) ) # test summing over a filtered queryset self.assertEqual( self.Person.objects.filter(age__gte=50).sum('age'), sum([a for a in ages if a >= 50]) ) self.assertEqual( self.Person.objects.filter(age__gte=50).aggregate_sum('age'), sum([a for a in ages if a >= 50]) ) def test_embedded_average(self): class Pay(EmbeddedDocument): value = DecimalField() class Doc(Document): name = StringField() pay = EmbeddedDocumentField( Pay) Doc.drop_collection() Doc(name=u"Wilson Junior", pay=Pay(value=150)).save() Doc(name=u"Isabella Luanna", pay=Pay(value=530)).save() Doc(name=u"Tayza mariana", pay=Pay(value=165)).save() Doc(name=u"Eliana Costa", pay=Pay(value=115)).save() self.assertEqual( Doc.objects.average('pay.value'), 240) def test_embedded_array_average(self): class Pay(EmbeddedDocument): values = ListField(DecimalField()) class Doc(Document): name = StringField() pay = EmbeddedDocumentField( Pay) Doc.drop_collection() Doc(name=u"Wilson Junior", pay=Pay(values=[150, 100])).save() Doc(name=u"Isabella Luanna", pay=Pay(values=[530, 100])).save() Doc(name=u"Tayza mariana", pay=Pay(values=[165, 100])).save() Doc(name=u"Eliana Costa", pay=Pay(values=[115, 100])).save() self.assertEqual( Doc.objects.average('pay.values'), 170) def test_array_average(self): class Doc(Document): values = ListField(DecimalField()) Doc.drop_collection() Doc(values=[150, 100]).save() Doc(values=[530, 100]).save() Doc(values=[165, 100]).save() Doc(values=[115, 100]).save() self.assertEqual( Doc.objects.average('values'), 170) def test_embedded_sum(self): class Pay(EmbeddedDocument): value = DecimalField() class Doc(Document): name = StringField() pay = EmbeddedDocumentField( Pay) Doc.drop_collection() Doc(name=u"Wilson Junior", pay=Pay(value=150)).save() Doc(name=u"Isabella Luanna", pay=Pay(value=530)).save() Doc(name=u"Tayza mariana", pay=Pay(value=165)).save() Doc(name=u"Eliana Costa", pay=Pay(value=115)).save() self.assertEqual( Doc.objects.sum('pay.value'), 960) def test_embedded_array_sum(self): class Pay(EmbeddedDocument): values = ListField(DecimalField()) class Doc(Document): name = StringField() pay = EmbeddedDocumentField( Pay) Doc.drop_collection() Doc(name=u"Wilson Junior", pay=Pay(values=[150, 100])).save() Doc(name=u"Isabella Luanna", pay=Pay(values=[530, 100])).save() Doc(name=u"Tayza mariana", pay=Pay(values=[165, 100])).save() Doc(name=u"Eliana Costa", pay=Pay(values=[115, 100])).save() self.assertEqual( Doc.objects.sum('pay.values'), 1360) def test_array_sum(self): class Doc(Document): values = ListField(DecimalField()) Doc.drop_collection() Doc(values=[150, 100]).save() Doc(values=[530, 100]).save() Doc(values=[165, 100]).save() Doc(values=[115, 100]).save() self.assertEqual( Doc.objects.sum('values'), 1360) def test_distinct(self): """Ensure that the QuerySet.distinct method works. """ self.Person(name='Mr Orange', age=20).save() self.Person(name='Mr White', age=20).save() self.Person(name='Mr Orange', age=30).save() self.Person(name='Mr Pink', age=30).save() self.assertEqual(set(self.Person.objects.distinct('name')), set(['Mr Orange', 'Mr White', 'Mr Pink'])) self.assertEqual(set(self.Person.objects.distinct('age')), set([20, 30])) self.assertEqual(set(self.Person.objects(age=30).distinct('name')), set(['Mr Orange', 'Mr Pink'])) def test_distinct_handles_references(self): class Foo(Document): bar = ReferenceField("Bar") class Bar(Document): text = StringField() Bar.drop_collection() Foo.drop_collection() bar = Bar(text="hi") bar.save() foo = Foo(bar=bar) foo.save() self.assertEqual(Foo.objects.distinct("bar"), [bar]) @skip_older_mongodb def test_text_indexes(self): class News(Document): title = StringField() content = StringField() is_active = BooleanField(default=True) meta = {'indexes': [ {'fields': ['$title', "$content"], 'default_language': 'portuguese', 'weight': {'title': 10, 'content': 2} } ]} News.drop_collection() info = News.objects._collection.index_information() self.assertTrue('title_text_content_text' in info) self.assertTrue('textIndexVersion' in info['title_text_content_text']) News(title="Neymar quebrou a vertebra", content="O Brasil sofre com a perda de Neymar").save() News(title="Brasil passa para as quartas de finais", content="Com o brasil nas quartas de finais teremos um " "jogo complicado com a alemanha").save() count = News.objects.search_text( "neymar", language="portuguese").count() self.assertEqual(count, 1) count = News.objects.search_text( "brasil -neymar").count() self.assertEqual(count, 1) News(title=u"As eleições no Brasil já estão em planejamento", content=u"A candidata dilma roussef já começa o teu planejamento", is_active=False).save() new = News.objects(is_active=False).search_text( "dilma", language="pt").first() query = News.objects(is_active=False).search_text( "dilma", language="pt")._query self.assertEqual( query, {'$text': { '$search': 'dilma', '$language': 'pt'}, 'is_active': False}) self.assertEqual(new.is_active, False) self.assertTrue('dilma' in new.content) self.assertTrue('planejamento' in new.title) query = News.objects.search_text("candidata") self.assertEqual(query._search_text, "candidata") new = query.first() self.assertTrue(isinstance(new.get_text_score(), float)) # count query = News.objects.search_text('brasil').order_by('$text_score') self.assertEqual(query._search_text, "brasil") self.assertEqual(query.count(), 3) self.assertEqual(query._query, {'$text': {'$search': 'brasil'}}) cursor_args = query._cursor_args if not IS_PYMONGO_3: cursor_args_fields = cursor_args['fields'] else: cursor_args_fields = cursor_args['projection'] self.assertEqual( cursor_args_fields, {'_text_score': {'$meta': 'textScore'}}) text_scores = [i.get_text_score() for i in query] self.assertEqual(len(text_scores), 3) self.assertTrue(text_scores[0] > text_scores[1]) self.assertTrue(text_scores[1] > text_scores[2]) max_text_score = text_scores[0] # get item item = News.objects.search_text( 'brasil').order_by('$text_score').first() self.assertEqual(item.get_text_score(), max_text_score) @skip_older_mongodb def test_distinct_handles_references_to_alias(self): register_connection('testdb', 'mongoenginetest2') class Foo(Document): bar = ReferenceField("Bar") meta = {'db_alias': 'testdb'} class Bar(Document): text = StringField() meta = {'db_alias': 'testdb'} Bar.drop_collection() Foo.drop_collection() bar = Bar(text="hi") bar.save() foo = Foo(bar=bar) foo.save() self.assertEqual(Foo.objects.distinct("bar"), [bar]) def test_distinct_handles_db_field(self): """Ensure that distinct resolves field name to db_field as expected. """ class Product(Document): product_id = IntField(db_field='pid') Product.drop_collection() Product(product_id=1).save() Product(product_id=2).save() Product(product_id=1).save() self.assertEqual(set(Product.objects.distinct('product_id')), set([1, 2])) self.assertEqual(set(Product.objects.distinct('pid')), set([1, 2])) Product.drop_collection() def test_distinct_ListField_EmbeddedDocumentField(self): class Author(EmbeddedDocument): name = StringField() class Book(Document): title = StringField() authors = ListField(EmbeddedDocumentField(Author)) Book.drop_collection() mark_twain = Author(name="Mark Twain") john_tolkien = Author(name="John Ronald Reuel Tolkien") book = Book(title="Tom Sawyer", authors=[mark_twain]).save() book = Book( title="The Lord of the Rings", authors=[john_tolkien]).save() book = Book( title="The Stories", authors=[mark_twain, john_tolkien]).save() authors = Book.objects.distinct("authors") self.assertEqual(authors, [mark_twain, john_tolkien]) def test_distinct_ListField_EmbeddedDocumentField_EmbeddedDocumentField(self): class Continent(EmbeddedDocument): continent_name = StringField() class Country(EmbeddedDocument): country_name = StringField() continent = EmbeddedDocumentField(Continent) class Author(EmbeddedDocument): name = StringField() country = EmbeddedDocumentField(Country) class Book(Document): title = StringField() authors = ListField(EmbeddedDocumentField(Author)) Book.drop_collection() europe = Continent(continent_name='europe') asia = Continent(continent_name='asia') scotland = Country(country_name="Scotland", continent=europe) tibet = Country(country_name="Tibet", continent=asia) mark_twain = Author(name="Mark Twain", country=scotland) john_tolkien = Author(name="John Ronald Reuel Tolkien", country=tibet) book = Book(title="Tom Sawyer", authors=[mark_twain]).save() book = Book( title="The Lord of the Rings", authors=[john_tolkien]).save() book = Book( title="The Stories", authors=[mark_twain, john_tolkien]).save() country_list = Book.objects.distinct("authors.country") self.assertEqual(country_list, [scotland, tibet]) continent_list = Book.objects.distinct("authors.country.continent") self.assertEqual(continent_list, [europe, asia]) def test_distinct_ListField_ReferenceField(self): class Bar(Document): text = StringField() class Foo(Document): bar = ReferenceField('Bar') bar_lst = ListField(ReferenceField('Bar')) Bar.drop_collection() Foo.drop_collection() bar_1 = Bar(text="hi") bar_1.save() bar_2 = Bar(text="bye") bar_2.save() foo = Foo(bar=bar_1, bar_lst=[bar_1, bar_2]) foo.save() self.assertEqual(Foo.objects.distinct("bar_lst"), [bar_1, bar_2]) def test_custom_manager(self): """Ensure that custom QuerySetManager instances work as expected. """ class BlogPost(Document): tags = ListField(StringField()) deleted = BooleanField(default=False) date = DateTimeField(default=datetime.now) @queryset_manager def objects(cls, qryset): opts = {"deleted": False} return qryset(**opts) @queryset_manager def music_posts(doc_cls, queryset, deleted=False): return queryset(tags='music', deleted=deleted).order_by('date') BlogPost.drop_collection() post1 = BlogPost(tags=['music', 'film']).save() post2 = BlogPost(tags=['music']).save() post3 = BlogPost(tags=['film', 'actors']).save() post4 = BlogPost(tags=['film', 'actors', 'music'], deleted=True).save() self.assertEqual([p.id for p in BlogPost.objects()], [post1.id, post2.id, post3.id]) self.assertEqual([p.id for p in BlogPost.music_posts()], [post1.id, post2.id]) self.assertEqual([p.id for p in BlogPost.music_posts(True)], [post4.id]) BlogPost.drop_collection() def test_custom_manager_overriding_objects_works(self): class Foo(Document): bar = StringField(default='bar') active = BooleanField(default=False) @queryset_manager def objects(doc_cls, queryset): return queryset(active=True) @queryset_manager def with_inactive(doc_cls, queryset): return queryset(active=False) Foo.drop_collection() Foo(active=True).save() Foo(active=False).save() self.assertEqual(1, Foo.objects.count()) self.assertEqual(1, Foo.with_inactive.count()) Foo.with_inactive.first().delete() self.assertEqual(0, Foo.with_inactive.count()) self.assertEqual(1, Foo.objects.count()) def test_inherit_objects(self): class Foo(Document): meta = {'allow_inheritance': True} active = BooleanField(default=True) @queryset_manager def objects(klass, queryset): return queryset(active=True) class Bar(Foo): pass Bar.drop_collection() Bar.objects.create(active=False) self.assertEqual(0, Bar.objects.count()) def test_inherit_objects_override(self): class Foo(Document): meta = {'allow_inheritance': True} active = BooleanField(default=True) @queryset_manager def objects(klass, queryset): return queryset(active=True) class Bar(Foo): @queryset_manager def objects(klass, queryset): return queryset(active=False) Bar.drop_collection() Bar.objects.create(active=False) self.assertEqual(0, Foo.objects.count()) self.assertEqual(1, Bar.objects.count()) def test_query_value_conversion(self): """Ensure that query values are properly converted when necessary. """ class BlogPost(Document): author = ReferenceField(self.Person) BlogPost.drop_collection() person = self.Person(name='test', age=30) person.save() post = BlogPost(author=person) post.save() # Test that query may be performed by providing a document as a value # while using a ReferenceField's name - the document should be # converted to an DBRef, which is legal, unlike a Document object post_obj = BlogPost.objects(author=person).first() self.assertEqual(post.id, post_obj.id) # Test that lists of values work when using the 'in', 'nin' and 'all' post_obj = BlogPost.objects(author__in=[person]).first() self.assertEqual(post.id, post_obj.id) BlogPost.drop_collection() def test_update_value_conversion(self): """Ensure that values used in updates are converted before use. """ class Group(Document): members = ListField(ReferenceField(self.Person)) Group.drop_collection() user1 = self.Person(name='user1') user1.save() user2 = self.Person(name='user2') user2.save() group = Group() group.save() Group.objects(id=group.id).update(set__members=[user1, user2]) group.reload() self.assertTrue(len(group.members) == 2) self.assertEqual(group.members[0].name, user1.name) self.assertEqual(group.members[1].name, user2.name) Group.drop_collection() def test_dict_with_custom_baseclass(self): """Ensure DictField working with custom base clases. """ class Test(Document): testdict = DictField() Test.drop_collection() t = Test(testdict={'f': 'Value'}) t.save() self.assertEqual( Test.objects(testdict__f__startswith='Val').count(), 1) self.assertEqual(Test.objects(testdict__f='Value').count(), 1) Test.drop_collection() class Test(Document): testdict = DictField(basecls=StringField) t = Test(testdict={'f': 'Value'}) t.save() self.assertEqual(Test.objects(testdict__f='Value').count(), 1) self.assertEqual( Test.objects(testdict__f__startswith='Val').count(), 1) Test.drop_collection() def test_bulk(self): """Ensure bulk querying by object id returns a proper dict. """ class BlogPost(Document): title = StringField() BlogPost.drop_collection() post_1 = BlogPost(title="Post #1") post_2 = BlogPost(title="Post #2") post_3 = BlogPost(title="Post #3") post_4 = BlogPost(title="Post #4") post_5 = BlogPost(title="Post #5") post_1.save() post_2.save() post_3.save() post_4.save() post_5.save() ids = [post_1.id, post_2.id, post_5.id] objects = BlogPost.objects.in_bulk(ids) self.assertEqual(len(objects), 3) self.assertTrue(post_1.id in objects) self.assertTrue(post_2.id in objects) self.assertTrue(post_5.id in objects) self.assertTrue(objects[post_1.id].title == post_1.title) self.assertTrue(objects[post_2.id].title == post_2.title) self.assertTrue(objects[post_5.id].title == post_5.title) BlogPost.drop_collection() def tearDown(self): self.Person.drop_collection() def test_custom_querysets(self): """Ensure that custom QuerySet classes may be used. """ class CustomQuerySet(QuerySet): def not_empty(self): return self.count() > 0 class Post(Document): meta = {'queryset_class': CustomQuerySet} Post.drop_collection() self.assertTrue(isinstance(Post.objects, CustomQuerySet)) self.assertFalse(Post.objects.not_empty()) Post().save() self.assertTrue(Post.objects.not_empty()) Post.drop_collection() def test_custom_querysets_set_manager_directly(self): """Ensure that custom QuerySet classes may be used. """ class CustomQuerySet(QuerySet): def not_empty(self): return self.count() > 0 class CustomQuerySetManager(QuerySetManager): queryset_class = CustomQuerySet class Post(Document): objects = CustomQuerySetManager() Post.drop_collection() self.assertTrue(isinstance(Post.objects, CustomQuerySet)) self.assertFalse(Post.objects.not_empty()) Post().save() self.assertTrue(Post.objects.not_empty()) Post.drop_collection() def test_custom_querysets_managers_directly(self): """Ensure that custom QuerySet classes may be used. """ class CustomQuerySetManager(QuerySetManager): @staticmethod def get_queryset(doc_cls, queryset): return queryset(is_published=True) class Post(Document): is_published = BooleanField(default=False) published = CustomQuerySetManager() Post.drop_collection() Post().save() Post(is_published=True).save() self.assertEqual(Post.objects.count(), 2) self.assertEqual(Post.published.count(), 1) Post.drop_collection() def test_custom_querysets_inherited(self): """Ensure that custom QuerySet classes may be used. """ class CustomQuerySet(QuerySet): def not_empty(self): return self.count() > 0 class Base(Document): meta = {'abstract': True, 'queryset_class': CustomQuerySet} class Post(Base): pass Post.drop_collection() self.assertTrue(isinstance(Post.objects, CustomQuerySet)) self.assertFalse(Post.objects.not_empty()) Post().save() self.assertTrue(Post.objects.not_empty()) Post.drop_collection() def test_custom_querysets_inherited_direct(self): """Ensure that custom QuerySet classes may be used. """ class CustomQuerySet(QuerySet): def not_empty(self): return self.count() > 0 class CustomQuerySetManager(QuerySetManager): queryset_class = CustomQuerySet class Base(Document): meta = {'abstract': True} objects = CustomQuerySetManager() class Post(Base): pass Post.drop_collection() self.assertTrue(isinstance(Post.objects, CustomQuerySet)) self.assertFalse(Post.objects.not_empty()) Post().save() self.assertTrue(Post.objects.not_empty()) Post.drop_collection() def test_count_limit_and_skip(self): class Post(Document): title = StringField() Post.drop_collection() for i in xrange(10): Post(title="Post %s" % i).save() self.assertEqual(5, Post.objects.limit(5).skip(5).count(with_limit_and_skip=True)) self.assertEqual( 10, Post.objects.limit(5).skip(5).count(with_limit_and_skip=False)) def test_count_and_none(self): """Test count works with None()""" class MyDoc(Document): pass MyDoc.drop_collection() for i in xrange(0, 10): MyDoc().save() self.assertEqual(MyDoc.objects.count(), 10) self.assertEqual(MyDoc.objects.none().count(), 0) def test_count_list_embedded(self): class B(EmbeddedDocument): c = StringField() class A(Document): b = ListField(EmbeddedDocumentField(B)) self.assertEqual(A.objects(b=[{'c': 'c'}]).count(), 0) def test_call_after_limits_set(self): """Ensure that re-filtering after slicing works """ class Post(Document): title = StringField() Post.drop_collection() Post(title="Post 1").save() Post(title="Post 2").save() posts = Post.objects.all()[0:1] self.assertEqual(len(list(posts())), 1) Post.drop_collection() def test_order_then_filter(self): """Ensure that ordering still works after filtering. """ class Number(Document): n = IntField() Number.drop_collection() n2 = Number.objects.create(n=2) n1 = Number.objects.create(n=1) self.assertEqual(list(Number.objects), [n2, n1]) self.assertEqual(list(Number.objects.order_by('n')), [n1, n2]) self.assertEqual(list(Number.objects.order_by('n').filter()), [n1, n2]) Number.drop_collection() def test_clone(self): """Ensure that cloning clones complex querysets """ class Number(Document): n = IntField() Number.drop_collection() for i in xrange(1, 101): t = Number(n=i) t.save() test = Number.objects test2 = test.clone() self.assertFalse(test == test2) self.assertEqual(test.count(), test2.count()) test = test.filter(n__gt=11) test2 = test.clone() self.assertFalse(test == test2) self.assertEqual(test.count(), test2.count()) test = test.limit(10) test2 = test.clone() self.assertFalse(test == test2) self.assertEqual(test.count(), test2.count()) Number.drop_collection() def test_using(self): """Ensure that switching databases for a queryset is possible """ class Number2(Document): n = IntField() Number2.drop_collection() with switch_db(Number2, 'test2') as Number2: Number2.drop_collection() for i in range(1, 10): t = Number2(n=i) t.switch_db('test2') t.save() self.assertEqual(len(Number2.objects.using('test2')), 9) def test_unset_reference(self): class Comment(Document): text = StringField() class Post(Document): comment = ReferenceField(Comment) Comment.drop_collection() Post.drop_collection() comment = Comment.objects.create(text='test') post = Post.objects.create(comment=comment) self.assertEqual(post.comment, comment) Post.objects.update(unset__comment=1) post.reload() self.assertEqual(post.comment, None) Comment.drop_collection() Post.drop_collection() def test_order_works_with_custom_db_field_names(self): class Number(Document): n = IntField(db_field='number') Number.drop_collection() n2 = Number.objects.create(n=2) n1 = Number.objects.create(n=1) self.assertEqual(list(Number.objects), [n2, n1]) self.assertEqual(list(Number.objects.order_by('n')), [n1, n2]) Number.drop_collection() def test_order_works_with_primary(self): """Ensure that order_by and primary work. """ class Number(Document): n = IntField(primary_key=True) Number.drop_collection() Number(n=1).save() Number(n=2).save() Number(n=3).save() numbers = [n.n for n in Number.objects.order_by('-n')] self.assertEqual([3, 2, 1], numbers) numbers = [n.n for n in Number.objects.order_by('+n')] self.assertEqual([1, 2, 3], numbers) Number.drop_collection() def test_ensure_index(self): """Ensure that manual creation of indexes works. """ class Comment(Document): message = StringField() meta = {'allow_inheritance': True} Comment.ensure_index('message') info = Comment.objects._collection.index_information() info = [(value['key'], value.get('unique', False), value.get('sparse', False)) for key, value in info.iteritems()] self.assertTrue(([('_cls', 1), ('message', 1)], False, False) in info) def test_where(self): """Ensure that where clauses work. """ class IntPair(Document): fielda = IntField() fieldb = IntField() IntPair.objects._collection.remove() a = IntPair(fielda=1, fieldb=1) b = IntPair(fielda=1, fieldb=2) c = IntPair(fielda=2, fieldb=1) a.save() b.save() c.save() query = IntPair.objects.where('this[~fielda] >= this[~fieldb]') self.assertEqual( 'this["fielda"] >= this["fieldb"]', query._where_clause) results = list(query) self.assertEqual(2, len(results)) self.assertTrue(a in results) self.assertTrue(c in results) query = IntPair.objects.where('this[~fielda] == this[~fieldb]') results = list(query) self.assertEqual(1, len(results)) self.assertTrue(a in results) query = IntPair.objects.where( 'function() { return this[~fielda] >= this[~fieldb] }') self.assertEqual( 'function() { return this["fielda"] >= this["fieldb"] }', query._where_clause) results = list(query) self.assertEqual(2, len(results)) self.assertTrue(a in results) self.assertTrue(c in results) def invalid_where(): list(IntPair.objects.where(fielda__gte=3)) self.assertRaises(TypeError, invalid_where) def test_scalar(self): class Organization(Document): name = StringField() class User(Document): name = StringField() organization = ObjectIdField() User.drop_collection() Organization.drop_collection() whitehouse = Organization(name="White House") whitehouse.save() User(name="Bob Dole", organization=whitehouse.id).save() # Efficient way to get all unique organization names for a given # set of users (Pretend this has additional filtering.) user_orgs = set(User.objects.scalar('organization')) orgs = Organization.objects(id__in=user_orgs).scalar('name') self.assertEqual(list(orgs), ['White House']) # Efficient for generating listings, too. orgs = Organization.objects.scalar('name').in_bulk(list(user_orgs)) user_map = User.objects.scalar('name', 'organization') user_listing = [(user, orgs[org]) for user, org in user_map] self.assertEqual([("Bob Dole", "White House")], user_listing) def test_scalar_simple(self): class TestDoc(Document): x = IntField() y = BooleanField() TestDoc.drop_collection() TestDoc(x=10, y=True).save() TestDoc(x=20, y=False).save() TestDoc(x=30, y=True).save() plist = list(TestDoc.objects.scalar('x', 'y')) self.assertEqual(len(plist), 3) self.assertEqual(plist[0], (10, True)) self.assertEqual(plist[1], (20, False)) self.assertEqual(plist[2], (30, True)) class UserDoc(Document): name = StringField() age = IntField() UserDoc.drop_collection() UserDoc(name="Wilson Jr", age=19).save() UserDoc(name="Wilson", age=43).save() UserDoc(name="Eliana", age=37).save() UserDoc(name="Tayza", age=15).save() ulist = list(UserDoc.objects.scalar('name', 'age')) self.assertEqual(ulist, [ (u'Wilson Jr', 19), (u'Wilson', 43), (u'Eliana', 37), (u'Tayza', 15)]) ulist = list(UserDoc.objects.scalar('name').order_by('age')) self.assertEqual(ulist, [ (u'Tayza'), (u'Wilson Jr'), (u'Eliana'), (u'Wilson')]) def test_scalar_embedded(self): class Profile(EmbeddedDocument): name = StringField() age = IntField() class Locale(EmbeddedDocument): city = StringField() country = StringField() class Person(Document): profile = EmbeddedDocumentField(Profile) locale = EmbeddedDocumentField(Locale) Person.drop_collection() Person(profile=Profile(name="Wilson Jr", age=19), locale=Locale(city="Corumba-GO", country="Brazil")).save() Person(profile=Profile(name="Gabriel Falcao", age=23), locale=Locale(city="New York", country="USA")).save() Person(profile=Profile(name="Lincoln de souza", age=28), locale=Locale(city="Belo Horizonte", country="Brazil")).save() Person(profile=Profile(name="Walter cruz", age=30), locale=Locale(city="Brasilia", country="Brazil")).save() self.assertEqual( list(Person.objects.order_by( 'profile__age').scalar('profile__name')), [u'Wilson Jr', u'Gabriel Falcao', u'Lincoln de souza', u'Walter cruz']) ulist = list(Person.objects.order_by('locale.city') .scalar('profile__name', 'profile__age', 'locale__city')) self.assertEqual(ulist, [(u'Lincoln de souza', 28, u'Belo Horizonte'), (u'Walter cruz', 30, u'Brasilia'), (u'Wilson Jr', 19, u'Corumba-GO'), (u'Gabriel Falcao', 23, u'New York')]) def test_scalar_decimal(self): from decimal import Decimal class Person(Document): name = StringField() rating = DecimalField() Person.drop_collection() Person(name="Wilson Jr", rating=Decimal('1.0')).save() ulist = list(Person.objects.scalar('name', 'rating')) self.assertEqual(ulist, [(u'Wilson Jr', Decimal('1.0'))]) def test_scalar_reference_field(self): class State(Document): name = StringField() class Person(Document): name = StringField() state = ReferenceField(State) State.drop_collection() Person.drop_collection() s1 = State(name="Goias") s1.save() Person(name="Wilson JR", state=s1).save() plist = list(Person.objects.scalar('name', 'state')) self.assertEqual(plist, [(u'Wilson JR', s1)]) def test_scalar_generic_reference_field(self): class State(Document): name = StringField() class Person(Document): name = StringField() state = GenericReferenceField() State.drop_collection() Person.drop_collection() s1 = State(name="Goias") s1.save() Person(name="Wilson JR", state=s1).save() plist = list(Person.objects.scalar('name', 'state')) self.assertEqual(plist, [(u'Wilson JR', s1)]) def test_scalar_db_field(self): class TestDoc(Document): x = IntField() y = BooleanField() TestDoc.drop_collection() TestDoc(x=10, y=True).save() TestDoc(x=20, y=False).save() TestDoc(x=30, y=True).save() plist = list(TestDoc.objects.scalar('x', 'y')) self.assertEqual(len(plist), 3) self.assertEqual(plist[0], (10, True)) self.assertEqual(plist[1], (20, False)) self.assertEqual(plist[2], (30, True)) def test_scalar_primary_key(self): class SettingValue(Document): key = StringField(primary_key=True) value = StringField() SettingValue.drop_collection() s = SettingValue(key="test", value="test value") s.save() val = SettingValue.objects.scalar('key', 'value') self.assertEqual(list(val), [('test', 'test value')]) def test_scalar_cursor_behaviour(self): """Ensure that a query returns a valid set of results. """ person1 = self.Person(name="User A", age=20) person1.save() person2 = self.Person(name="User B", age=30) person2.save() # Find all people in the collection people = self.Person.objects.scalar('name') self.assertEqual(people.count(), 2) results = list(people) self.assertEqual(results[0], "User A") self.assertEqual(results[1], "User B") # Use a query to filter the people found to just person1 people = self.Person.objects(age=20).scalar('name') self.assertEqual(people.count(), 1) person = people.next() self.assertEqual(person, "User A") # Test limit people = list(self.Person.objects.limit(1).scalar('name')) self.assertEqual(len(people), 1) self.assertEqual(people[0], 'User A') # Test skip people = list(self.Person.objects.skip(1).scalar('name')) self.assertEqual(len(people), 1) self.assertEqual(people[0], 'User B') person3 = self.Person(name="User C", age=40) person3.save() # Test slice limit people = list(self.Person.objects[:2].scalar('name')) self.assertEqual(len(people), 2) self.assertEqual(people[0], 'User A') self.assertEqual(people[1], 'User B') # Test slice skip people = list(self.Person.objects[1:].scalar('name')) self.assertEqual(len(people), 2) self.assertEqual(people[0], 'User B') self.assertEqual(people[1], 'User C') # Test slice limit and skip people = list(self.Person.objects[1:2].scalar('name')) self.assertEqual(len(people), 1) self.assertEqual(people[0], 'User B') people = list(self.Person.objects[1:1].scalar('name')) self.assertEqual(len(people), 0) # Test slice out of range people = list(self.Person.objects.scalar('name')[80000:80001]) self.assertEqual(len(people), 0) # Test larger slice __repr__ self.Person.objects.delete() for i in xrange(55): self.Person(name='A%s' % i, age=i).save() self.assertEqual(self.Person.objects.scalar('name').count(), 55) self.assertEqual( "A0", "%s" % self.Person.objects.order_by('name').scalar('name').first()) self.assertEqual( "A0", "%s" % self.Person.objects.scalar('name').order_by('name')[0]) if PY3: self.assertEqual( "['A1', 'A2']", "%s" % self.Person.objects.order_by('age').scalar('name')[1:3]) self.assertEqual("['A51', 'A52']", "%s" % self.Person.objects.order_by( 'age').scalar('name')[51:53]) else: self.assertEqual("[u'A1', u'A2']", "%s" % self.Person.objects.order_by( 'age').scalar('name')[1:3]) self.assertEqual("[u'A51', u'A52']", "%s" % self.Person.objects.order_by( 'age').scalar('name')[51:53]) # with_id and in_bulk person = self.Person.objects.order_by('name').first() self.assertEqual("A0", "%s" % self.Person.objects.scalar('name').with_id(person.id)) pks = self.Person.objects.order_by('age').scalar('pk')[1:3] if PY3: self.assertEqual("['A1', 'A2']", "%s" % sorted( self.Person.objects.scalar('name').in_bulk(list(pks)).values())) else: self.assertEqual("[u'A1', u'A2']", "%s" % sorted( self.Person.objects.scalar('name').in_bulk(list(pks)).values())) def test_elem_match(self): class Foo(EmbeddedDocument): shape = StringField() color = StringField() thick = BooleanField() meta = {'allow_inheritance': False} class Bar(Document): foo = ListField(EmbeddedDocumentField(Foo)) meta = {'allow_inheritance': False} Bar.drop_collection() b1 = Bar(foo=[Foo(shape="square", color="purple", thick=False), Foo(shape="circle", color="red", thick=True)]) b1.save() b2 = Bar(foo=[Foo(shape="square", color="red", thick=True), Foo(shape="circle", color="purple", thick=False)]) b2.save() b3 = Bar(foo=[Foo(shape="square", thick=True), Foo(shape="circle", color="purple", thick=False)]) b3.save() ak = list( Bar.objects(foo__match={'shape': "square", "color": "purple"})) self.assertEqual([b1], ak) ak = list( Bar.objects(foo__elemMatch={'shape': "square", "color": "purple"})) self.assertEqual([b1], ak) ak = list(Bar.objects(foo__match=Foo(shape="square", color="purple"))) self.assertEqual([b1], ak) ak = list( Bar.objects(foo__elemMatch={'shape': "square", "color__exists": True})) self.assertEqual([b1, b2], ak) ak = list( Bar.objects(foo__match={'shape': "square", "color__exists": True})) self.assertEqual([b1, b2], ak) ak = list( Bar.objects(foo__elemMatch={'shape': "square", "color__exists": False})) self.assertEqual([b3], ak) ak = list( Bar.objects(foo__match={'shape': "square", "color__exists": False})) self.assertEqual([b3], ak) def test_upsert_includes_cls(self): """Upserts should include _cls information for inheritable classes """ class Test(Document): test = StringField() Test.drop_collection() Test.objects(test='foo').update_one(upsert=True, set__test='foo') self.assertFalse('_cls' in Test._collection.find_one()) class Test(Document): meta = {'allow_inheritance': True} test = StringField() Test.drop_collection() Test.objects(test='foo').update_one(upsert=True, set__test='foo') self.assertTrue('_cls' in Test._collection.find_one()) def test_update_upsert_looks_like_a_digit(self): class MyDoc(DynamicDocument): pass MyDoc.drop_collection() self.assertEqual(1, MyDoc.objects.update_one(upsert=True, inc__47=1)) self.assertEqual(MyDoc.objects.get()['47'], 1) def test_dictfield_key_looks_like_a_digit(self): """Only should work with DictField even if they have numeric keys.""" class MyDoc(Document): test = DictField() MyDoc.drop_collection() doc = MyDoc(test={'47': 1}) doc.save() self.assertEqual(MyDoc.objects.only('test__47').get().test['47'], 1) def test_read_preference(self): class Bar(Document): txt = StringField() meta = { 'indexes': [ 'txt' ] } Bar.drop_collection() bars = list(Bar.objects(read_preference=ReadPreference.PRIMARY)) self.assertEqual([], bars) if not IS_PYMONGO_3: error_class = ConfigurationError else: error_class = TypeError self.assertRaises(error_class, Bar.objects, read_preference='Primary') # read_preference as a kwarg bars = Bar.objects(read_preference=ReadPreference.SECONDARY_PREFERRED) self.assertEqual( bars._read_preference, ReadPreference.SECONDARY_PREFERRED) self.assertEqual(bars._cursor._Cursor__read_preference, ReadPreference.SECONDARY_PREFERRED) # read_preference as a query set method bars = Bar.objects.read_preference(ReadPreference.SECONDARY_PREFERRED) self.assertEqual( bars._read_preference, ReadPreference.SECONDARY_PREFERRED) self.assertEqual(bars._cursor._Cursor__read_preference, ReadPreference.SECONDARY_PREFERRED) # read_preference after skip bars = Bar.objects.skip(1) \ .read_preference(ReadPreference.SECONDARY_PREFERRED) self.assertEqual( bars._read_preference, ReadPreference.SECONDARY_PREFERRED) self.assertEqual(bars._cursor._Cursor__read_preference, ReadPreference.SECONDARY_PREFERRED) # read_preference after limit bars = Bar.objects.limit(1) \ .read_preference(ReadPreference.SECONDARY_PREFERRED) self.assertEqual( bars._read_preference, ReadPreference.SECONDARY_PREFERRED) self.assertEqual(bars._cursor._Cursor__read_preference, ReadPreference.SECONDARY_PREFERRED) # read_preference after order_by bars = Bar.objects.order_by('txt') \ .read_preference(ReadPreference.SECONDARY_PREFERRED) self.assertEqual( bars._read_preference, ReadPreference.SECONDARY_PREFERRED) self.assertEqual(bars._cursor._Cursor__read_preference, ReadPreference.SECONDARY_PREFERRED) # read_preference after hint bars = Bar.objects.hint([('txt', 1)]) \ .read_preference(ReadPreference.SECONDARY_PREFERRED) self.assertEqual( bars._read_preference, ReadPreference.SECONDARY_PREFERRED) self.assertEqual(bars._cursor._Cursor__read_preference, ReadPreference.SECONDARY_PREFERRED) def test_json_simple(self): class Embedded(EmbeddedDocument): string = StringField() class Doc(Document): string = StringField() embedded_field = EmbeddedDocumentField(Embedded) Doc.drop_collection() Doc(string="Hi", embedded_field=Embedded(string="Hi")).save() Doc(string="Bye", embedded_field=Embedded(string="Bye")).save() Doc().save() json_data = Doc.objects.to_json(sort_keys=True, separators=(',', ':')) doc_objects = list(Doc.objects) self.assertEqual(doc_objects, Doc.objects.from_json(json_data)) def test_json_complex(self): if pymongo.version_tuple[0] <= 2 and pymongo.version_tuple[1] <= 3: raise SkipTest("Need pymongo 2.4 as has a fix for DBRefs") class EmbeddedDoc(EmbeddedDocument): pass class Simple(Document): pass class Doc(Document): string_field = StringField(default='1') int_field = IntField(default=1) float_field = FloatField(default=1.1) boolean_field = BooleanField(default=True) datetime_field = DateTimeField(default=datetime.now) embedded_document_field = EmbeddedDocumentField( EmbeddedDoc, default=lambda: EmbeddedDoc()) list_field = ListField(default=lambda: [1, 2, 3]) dict_field = DictField(default=lambda: {"hello": "world"}) objectid_field = ObjectIdField(default=ObjectId) reference_field = ReferenceField( Simple, default=lambda: Simple().save()) map_field = MapField(IntField(), default=lambda: {"simple": 1}) decimal_field = DecimalField(default=1.0) complex_datetime_field = ComplexDateTimeField(default=datetime.now) url_field = URLField(default="http://mongoengine.org") dynamic_field = DynamicField(default=1) generic_reference_field = GenericReferenceField( default=lambda: Simple().save()) sorted_list_field = SortedListField(IntField(), default=lambda: [1, 2, 3]) email_field = EmailField(default="[email protected]") geo_point_field = GeoPointField(default=lambda: [1, 2]) sequence_field = SequenceField() uuid_field = UUIDField(default=uuid.uuid4) generic_embedded_document_field = GenericEmbeddedDocumentField( default=lambda: EmbeddedDoc()) Simple.drop_collection() Doc.drop_collection() Doc().save() json_data = Doc.objects.to_json() doc_objects = list(Doc.objects) self.assertEqual(doc_objects, Doc.objects.from_json(json_data)) def test_as_pymongo(self): from decimal import Decimal class User(Document): id = ObjectIdField('_id') name = StringField() age = IntField() price = DecimalField() User.drop_collection() User(name="Bob Dole", age=89, price=Decimal('1.11')).save() User(name="Barack Obama", age=51, price=Decimal('2.22')).save() results = User.objects.only('id', 'name').as_pymongo() self.assertEqual(sorted(results[0].keys()), sorted(['_id', 'name'])) users = User.objects.only('name', 'price').as_pymongo() results = list(users) self.assertTrue(isinstance(results[0], dict)) self.assertTrue(isinstance(results[1], dict)) self.assertEqual(results[0]['name'], 'Bob Dole') self.assertEqual(results[0]['price'], 1.11) self.assertEqual(results[1]['name'], 'Barack Obama') self.assertEqual(results[1]['price'], 2.22) # Test coerce_types users = User.objects.only( 'name', 'price').as_pymongo(coerce_types=True) results = list(users) self.assertTrue(isinstance(results[0], dict)) self.assertTrue(isinstance(results[1], dict)) self.assertEqual(results[0]['name'], 'Bob Dole') self.assertEqual(results[0]['price'], Decimal('1.11')) self.assertEqual(results[1]['name'], 'Barack Obama') self.assertEqual(results[1]['price'], Decimal('2.22')) def test_as_pymongo_json_limit_fields(self): class User(Document): email = EmailField(unique=True, required=True) password_hash = StringField( db_field='password_hash', required=True) password_salt = StringField( db_field='password_salt', required=True) User.drop_collection() User(email="[email protected]", password_salt="SomeSalt", password_hash="SomeHash").save() serialized_user = User.objects.exclude( 'password_salt', 'password_hash').as_pymongo()[0] self.assertEqual(set(['_id', 'email']), set(serialized_user.keys())) serialized_user = User.objects.exclude( 'id', 'password_salt', 'password_hash').to_json() self.assertEqual('[{"email": "[email protected]"}]', serialized_user) serialized_user = User.objects.exclude( 'password_salt').only('email').as_pymongo()[0] self.assertEqual(set(['email']), set(serialized_user.keys())) serialized_user = User.objects.exclude( 'password_salt').only('email').to_json() self.assertEqual('[{"email": "[email protected]"}]', serialized_user) def test_no_dereference(self): class Organization(Document): name = StringField() class User(Document): name = StringField() organization = ReferenceField(Organization) User.drop_collection() Organization.drop_collection() whitehouse = Organization(name="White House").save() User(name="Bob Dole", organization=whitehouse).save() qs = User.objects() self.assertTrue(isinstance(qs.first().organization, Organization)) self.assertFalse(isinstance(qs.no_dereference().first().organization, Organization)) self.assertFalse(isinstance(qs.no_dereference().get().organization, Organization)) self.assertTrue(isinstance(qs.first().organization, Organization)) def test_no_dereference_embedded_doc(self): class User(Document): name = StringField() class Member(EmbeddedDocument): name = StringField() user = ReferenceField(User) class Organization(Document): name = StringField() members = ListField(EmbeddedDocumentField(Member)) ceo = ReferenceField(User) member = EmbeddedDocumentField(Member) admin = ListField(ReferenceField(User)) Organization.drop_collection() User.drop_collection() user = User(name="Flash") user.save() member = Member(name="Flash", user=user) company = Organization(name="Mongo Inc", ceo=user, member=member) company.admin.append(user) company.members.append(member) company.save() result = Organization.objects().no_dereference().first() self.assertTrue(isinstance(result.admin[0], (DBRef, ObjectId))) self.assertTrue(isinstance(result.member.user, (DBRef, ObjectId))) self.assertTrue(isinstance(result.members[0].user, (DBRef, ObjectId))) def test_cached_queryset(self): class Person(Document): name = StringField() Person.drop_collection() for i in xrange(100): Person(name="No: %s" % i).save() with query_counter() as q: self.assertEqual(q, 0) people = Person.objects [x for x in people] self.assertEqual(100, len(people._result_cache)) import platform if platform.python_implementation() != "PyPy": # PyPy evaluates __len__ when iterating with list comprehensions while CPython does not. # This may be a bug in PyPy (PyPy/#1802) but it does not affect # the behavior of MongoEngine. self.assertEqual(None, people._len) self.assertEqual(q, 1) list(people) self.assertEqual(100, people._len) # Caused by list calling len self.assertEqual(q, 1) people.count(with_limit_and_skip=True) # count is cached self.assertEqual(q, 1) def test_no_cached_queryset(self): class Person(Document): name = StringField() Person.drop_collection() for i in xrange(100): Person(name="No: %s" % i).save() with query_counter() as q: self.assertEqual(q, 0) people = Person.objects.no_cache() [x for x in people] self.assertEqual(q, 1) list(people) self.assertEqual(q, 2) people.count() self.assertEqual(q, 3) def test_cache_not_cloned(self): class User(Document): name = StringField() def __unicode__(self): return self.name User.drop_collection() User(name="Alice").save() User(name="Bob").save() users = User.objects.all().order_by('name') self.assertEqual("%s" % users, "[<User: Alice>, <User: Bob>]") self.assertEqual(2, len(users._result_cache)) users = users.filter(name="Bob") self.assertEqual("%s" % users, "[<User: Bob>]") self.assertEqual(1, len(users._result_cache)) def test_no_cache(self): """Ensure you can add meta data to file""" class Noddy(Document): fields = DictField() Noddy.drop_collection() for i in xrange(100): noddy = Noddy() for j in range(20): noddy.fields["key" + str(j)] = "value " + str(j) noddy.save() docs = Noddy.objects.no_cache() counter = len([1 for i in docs]) self.assertEqual(counter, 100) self.assertEqual(len(list(docs)), 100) self.assertRaises(TypeError, lambda: len(docs)) with query_counter() as q: self.assertEqual(q, 0) list(docs) self.assertEqual(q, 1) list(docs) self.assertEqual(q, 2) def test_nested_queryset_iterator(self): # Try iterating the same queryset twice, nested. names = ['Alice', 'Bob', 'Chuck', 'David', 'Eric', 'Francis', 'George'] class User(Document): name = StringField() def __unicode__(self): return self.name User.drop_collection() for name in names: User(name=name).save() users = User.objects.all().order_by('name') outer_count = 0 inner_count = 0 inner_total_count = 0 with query_counter() as q: self.assertEqual(q, 0) self.assertEqual(users.count(with_limit_and_skip=True), 7) for i, outer_user in enumerate(users): self.assertEqual(outer_user.name, names[i]) outer_count += 1 inner_count = 0 # Calling len might disrupt the inner loop if there are bugs self.assertEqual(users.count(with_limit_and_skip=True), 7) for j, inner_user in enumerate(users): self.assertEqual(inner_user.name, names[j]) inner_count += 1 inner_total_count += 1 # inner loop should always be executed seven times self.assertEqual(inner_count, 7) # outer loop should be executed seven times total self.assertEqual(outer_count, 7) # inner loop should be executed fourtynine times total self.assertEqual(inner_total_count, 7 * 7) self.assertEqual(q, 2) def test_no_sub_classes(self): class A(Document): x = IntField() y = IntField() meta = {'allow_inheritance': True} class B(A): z = IntField() class C(B): zz = IntField() A.drop_collection() A(x=10, y=20).save() A(x=15, y=30).save() B(x=20, y=40).save() B(x=30, y=50).save() C(x=40, y=60).save() self.assertEqual(A.objects.no_sub_classes().count(), 2) self.assertEqual(A.objects.count(), 5) self.assertEqual(B.objects.no_sub_classes().count(), 2) self.assertEqual(B.objects.count(), 3) self.assertEqual(C.objects.no_sub_classes().count(), 1) self.assertEqual(C.objects.count(), 1) for obj in A.objects.no_sub_classes(): self.assertEqual(obj.__class__, A) for obj in B.objects.no_sub_classes(): self.assertEqual(obj.__class__, B) for obj in C.objects.no_sub_classes(): self.assertEqual(obj.__class__, C) def test_query_reference_to_custom_pk_doc(self): class A(Document): id = StringField(unique=True, primary_key=True) class B(Document): a = ReferenceField(A) A.drop_collection() B.drop_collection() a = A.objects.create(id='custom_id') b = B.objects.create(a=a) self.assertEqual(B.objects.count(), 1) self.assertEqual(B.objects.get(a=a).a, a) self.assertEqual(B.objects.get(a=a.id).a, a) def test_cls_query_in_subclassed_docs(self): class Animal(Document): name = StringField() meta = { 'allow_inheritance': True } class Dog(Animal): pass class Cat(Animal): pass self.assertEqual(Animal.objects(name='Charlie')._query, { 'name': 'Charlie', '_cls': {'$in': ('Animal', 'Animal.Dog', 'Animal.Cat')} }) self.assertEqual(Dog.objects(name='Charlie')._query, { 'name': 'Charlie', '_cls': 'Animal.Dog' }) self.assertEqual(Cat.objects(name='Charlie')._query, { 'name': 'Charlie', '_cls': 'Animal.Cat' }) def test_can_have_field_same_name_as_query_operator(self): class Size(Document): name = StringField() class Example(Document): size = ReferenceField(Size) Size.drop_collection() Example.drop_collection() instance_size = Size(name="Large").save() Example(size=instance_size).save() self.assertEqual(Example.objects(size=instance_size).count(), 1) self.assertEqual(Example.objects(size__in=[instance_size]).count(), 1) def test_cursor_in_an_if_stmt(self): class Test(Document): test_field = StringField() Test.drop_collection() queryset = Test.objects if queryset: raise AssertionError('Empty cursor returns True') test = Test() test.test_field = 'test' test.save() queryset = Test.objects if not test: raise AssertionError('Cursor has data and returned False') queryset.next() if not queryset: raise AssertionError('Cursor has data and it must returns True,' ' even in the last item.') def test_bool_performance(self): class Person(Document): name = StringField() Person.drop_collection() for i in xrange(100): Person(name="No: %s" % i).save() with query_counter() as q: if Person.objects: pass self.assertEqual(q, 1) op = q.db.system.profile.find({"ns": {"$ne": "%s.system.indexes" % q.db.name}})[0] self.assertEqual(op['nreturned'], 1) def test_bool_with_ordering(self): class Person(Document): name = StringField() Person.drop_collection() Person(name="Test").save() qs = Person.objects.order_by('name') with query_counter() as q: if qs: pass op = q.db.system.profile.find({"ns": {"$ne": "%s.system.indexes" % q.db.name}})[0] self.assertFalse('$orderby' in op['query'], 'BaseQuerySet cannot use orderby in if stmt') with query_counter() as p: for x in qs: pass op = p.db.system.profile.find({"ns": {"$ne": "%s.system.indexes" % q.db.name}})[0] self.assertTrue('$orderby' in op['query'], 'BaseQuerySet cannot remove orderby in for loop') def test_bool_with_ordering_from_meta_dict(self): class Person(Document): name = StringField() meta = { 'ordering': ['name'] } Person.drop_collection() Person(name="B").save() Person(name="C").save() Person(name="A").save() with query_counter() as q: if Person.objects: pass op = q.db.system.profile.find({"ns": {"$ne": "%s.system.indexes" % q.db.name}})[0] self.assertFalse('$orderby' in op['query'], 'BaseQuerySet must remove orderby from meta in boolen test') self.assertEqual(Person.objects.first().name, 'A') self.assertTrue(Person.objects._has_data(), 'Cursor has data and returned False') def test_queryset_aggregation_framework(self): class Person(Document): name = StringField() age = IntField() Person.drop_collection() p1 = Person(name="Isabella Luanna", age=16) p1.save() p2 = Person(name="Wilson Junior", age=21) p2.save() p3 = Person(name="Sandra Mara", age=37) p3.save() data = Person.objects(age__lte=22).aggregate( {'$project': {'name': {'$toUpper': '$name'}}} ) self.assertEqual(list(data), [ {'_id': p1.pk, 'name': "ISABELLA LUANNA"}, {'_id': p2.pk, 'name': "WILSON JUNIOR"} ]) data = Person.objects(age__lte=22).order_by('-name').aggregate( {'$project': {'name': {'$toUpper': '$name'}}} ) self.assertEqual(list(data), [ {'_id': p2.pk, 'name': "WILSON JUNIOR"}, {'_id': p1.pk, 'name': "ISABELLA LUANNA"} ]) data = Person.objects( age__gte=17, age__lte=40).order_by('-age').aggregate( {'$group': { '_id': None, 'total': {'$sum': 1}, 'avg': {'$avg': '$age'} } } ) self.assertEqual(list(data), [ {'_id': None, 'avg': 29, 'total': 2} ]) def test_delete_count(self): [self.Person(name="User {0}".format(i), age=i * 10).save() for i in xrange(1, 4)] self.assertEqual(self.Person.objects().delete(), 3) # test ordinary QuerySey delete count [self.Person(name="User {0}".format(i), age=i * 10).save() for i in xrange(1, 4)] self.assertEqual(self.Person.objects().skip(1).delete(), 2) # test Document delete with existing documents self.Person.objects().delete() self.assertEqual(self.Person.objects().skip(1).delete(), 0) # test Document delete without existing documents def test_max_time_ms(self): # 778: max_time_ms can get only int or None as input self.assertRaises(TypeError, self.Person.objects(name="name").max_time_ms, "not a number") def test_subclass_field_query(self): class Animal(Document): is_mamal = BooleanField() meta = dict(allow_inheritance=True) class Cat(Animal): whiskers_length = FloatField() class ScottishCat(Cat): folded_ears = BooleanField() Animal.drop_collection() Animal(is_mamal=False).save() Cat(is_mamal=True, whiskers_length=5.1).save() ScottishCat(is_mamal=True, folded_ears=True).save() self.assertEquals(Animal.objects(folded_ears=True).count(), 1) self.assertEquals(Animal.objects(whiskers_length=5.1).count(), 1) def test_loop_via_invalid_id_does_not_crash(self): class Person(Document): name = StringField() Person.objects.delete() Person._get_collection().update({"name": "a"}, {"$set": {"_id": ""}}, upsert=True) for p in Person.objects(): self.assertEqual(p.name, 'a') def test_last_field_name_like_operator(self): class EmbeddedItem(EmbeddedDocument): type = StringField() class Doc(Document): item = EmbeddedDocumentField(EmbeddedItem) Doc.drop_collection() doc = Doc(item=EmbeddedItem(type="axe")) doc.save() self.assertEqual(1, Doc.objects(item__type__="axe").count()) if __name__ == '__main__': unittest.main()
mit
-371,470,882,911,775,600
32.161409
118
0.567382
false
eahrold/SysOps
observy/notifications/SlackNotification.py
1
1532
#!/usr/bin/env python import subprocess import json import urllib2, urllib from notifications import HookableNotifications class SlackNotification(HookableNotifications): """Slack Notification class""" _webhook_service_name = 'slack' def __init__(self, errors): super(SlackNotification, self).__init__(errors) def send(self): print "Sening slack notifications" for error in self.errors: message = error['message'] status_code = error['status_code'] icon_emoji = ":fire_engine:" if status_code is 3 else ":fire:" username = "server-notice" if status_code is 3 else "server-alert" host_info = self.host_info() full_message = "Alert from %s: %s at %s" % (host_info['host'], message, self.timestamp() ) payload={ "text": full_message, "icon_emoji": icon_emoji, "username": username, } data = urllib.urlencode(payload) for webhook in self.webhooks(): try: req = urllib2.Request(webhook) req.add_header('Content-Type', 'application/json') response = urllib2.urlopen(req, json.dumps(payload)) except Exception as e: pass
mit
8,007,633,411,876,338,000
31.595745
78
0.490862
false
Delosari/dazer
bin/lib/ssp_functions/ssp_Hector_Fit3D_my.py
1
2446
#!/usr/bin/python import sys import numpy as np from numpy import float_ from numpy import absolute as abs from numpy import random as ran import matplotlib from scipy.signal.signaltools import convolve2d from scipy.interpolate.interpolate import interp1d def A_l(Rv,l): l=l/10000.; #Amstrongs to Microns x=1/l if x > 1.1: y=(x-1.82) ax=1+0.17699*y-0.50447*y**2-0.02427*y**3+0.72085*y**4+0.01979*y**5-0.77530*y**6+0.32999*y**7 bx=1.41338*y+2.28305*y**2+1.07233*y**3-5.38434*y**4-0.62251*y**5+5.30260*y**6-2.09002*y**7 else: ax=0.574*x**1.61 bx=-0.527*x**1.61 Arat=ax+bx/Rv return Arat def median_filter(box,arra): if box == 2*int(box/2.): box=box+1 val=arra # print val.shape,box for i in range(box, (len(val)+1-box)): tmp=np.zeros(2*box) for jk in range(0, 2*box): tmp[jk]=arra[i-box+jk] val[i]=np.median(tmp) for i in range(1, box): effec_box=i tmp=np.zeros(2*effec_box) for jk in range(0, 2*effec_box): tmp[jk]=arra[i-effec_box+jk] val[i]=np.median(tmp) for i in range(len(val)+1-box, len(val)): effec_box=len(val)-i+1 tmp=np.zeros(2*effec_box) for jk in range(0, 2*effec_box): tmp[jk]=arra[i-effec_box+jk-1] val[i]=np.median(tmp) val[0]=val[1] return val def median_box(box, arra): if box == 2*int(box/2.): box=box+1 in_val=arra out_val=[] k=0 for i in range(box, len(in_val)+1-box, 2*box): tmp=np.zeros(2*box) for j in range(0, 2*box): tmp[j]=arra[i-box+j] out_val.extend([np.median(tmp)]) out_val=np.array(out_val) return out_val def sycall(comand): from subprocess import call line=comand.split(" ") fcomand=[] fcomand.extend(line) call(fcomand) def stats(data): out=np.zeros(7) out[0]=np.mean(data) out[1]=np.mean(data)+np.std(data) out[2]=np.median(data) out[3]=np.amin(data) out[4]=np.amax(data) out[5]=np.std(data) out[6]=np.mean(data)+np.std(data) return out def mycall(comand, alias_py='python'): from subprocess import call line=comand.split(" ") fcomand=[alias_py]#2.7'] fcomand.extend(line) linp='' nx=len(fcomand) for i in range(1, nx): linp=linp+fcomand[i]+" " print linp call(fcomand) print "DONE"
mit
1,560,906,364,386,569,200
25.311828
100
0.572772
false
KarlParkinson/practice
dataStructures/hashTable.py
1
2520
class HashTable: def __init__(self, size): self.size = size self.keys = [None]*size self.data = [None]*size def put(self, key, data): hashValue = self._hash(key) if (self.keys[hashValue] == None): # no collision, found empty slot, so insert self.keys[hashValue] = key self.data[hashValue] = data elif (self.keys[hashValue] == key): # no collision, found spot, replace old data self.data[hashValue] = data else: hashValue = self._rehash(hashValue) while (self.keys[hashValue] != None and self.keys[hashValue] != key and self.keys[hashValue] != 'deleted'): hashValue = self._rehash(hashValue) if (self.keys[hashValue] == None or self.keys[hashValue] == 'deleted'): # found empty slot, insert data self.keys[hashValue] = key self.data[hashValue] = data else: # found slot, replace data self.data[hashValue] = data def get(self, key): hashValue = self._hash(key) found = False stop = False startPos = hashValue while (self.keys[hashValue] != None and not found and not stop): if (self.keys[hashValue] == key): found = True else: hashValue = self._rehash(hashValue) if (hashValue == startPos): stop = True if (found): return self.data[hashValue] else: return None def delete(self, key): hashValue = self._hash(key) found = False stop = False startPos = hashValue while (self.keys[hashValue] != None and not found and not stop): if (self.keys[hashValue] == key): found = True else: hashValue = self._rehash(hashValue) if (hashValue == startPos): stop = True if (found): self.keys[hashValue] = 'deleted' self.data[hashValue] = None else: return False def _hash(self, key): return key % self.size def _rehash(self, hashValue): return (hashValue+1) % self.size h = HashTable(11) h.put(1,3) h.put(12,5) h.put(23, 78) print h.keys h.delete(12) print h.get(1) print h.get(23) print h.keys h.put(34, 35) print h.keys print h.data #h.put(5,6) #h.put(7,9)
mit
2,377,790,933,382,312,000
27.965517
119
0.518651
false
mbodenhamer/syn
syn/tree/b/tests/test_tree.py
1
8401
from operator import attrgetter from nose.tools import assert_raises from syn.tree.b import Tree, Node, TreeError, do_nothing, identity from syn.base.b import check_idempotence, Attr from syn.base_utils import get_typename from syn.tree.b.tests.test_node import Tst2, tree_node_from_nested_list,\ tree_node_from_nested_list_types #------------------------------------------------------------------------------- # Tree #----------------------------------------------------------- # Tree Test 1 def tree_tst_1(treecls): cls = treecls._attrs.types['root'].type clsname = get_typename(cls) n1 = cls(_name='n1', _id=0) n2 = cls(_name='n2', _id=1) n3 = cls(_name='n3', _id=2) n4 = cls(_name='n4', _id=3) n5 = cls(_name='n5', _id=4) n6 = cls(_name='n6', _id=5) n1.add_child(n2) n1.add_child(n3) assert list(n1.siblings()) == [] assert list(n2.siblings()) == [n3] assert list(n3.siblings()) == [n2] obj = treecls(n1) check_idempotence(obj) assert obj.nodes == [n1, n2, n3] assert obj.node_types == [clsname] assert obj.id_dict == {0: n1, 1: n2, 2: n3} assert obj.type_dict == {clsname: [n1, n2, n3]} assert_raises(TreeError, obj.add_node, n3) assert_raises(TreeError, obj.add_node, n4, parent=n5) obj.add_node(n4, parent=n3) assert n4 in obj.nodes assert n4 in n3._children assert n4._parent is n3 assert_raises(TreeError, obj.add_node, n5, parent_id=100) obj.add_node(n5, parent_id=1) assert n5 in obj.nodes assert n5 in n2._children assert n5._parent is n2 obj.add_node(n6) assert n6 in obj.nodes assert n6 in n5._children assert n6._parent is n5 assert obj.nodes == [n1, n2, n3, n4, n5, n6] assert obj.node_types == [clsname] assert obj.id_dict == {0: n1, 1: n2, 2: n3, 3:n4, 4:n5, 5:n6} assert obj.type_dict == {clsname: [n1, n2, n3, n4, n5, n6]} for _id,node in enumerate([n1, n2, n3, n4, n5, n6]): assert obj.get_node_by_id(_id) == obj._get_node_by_id(_id) assert obj.get_node_by_id(_id) == node assert obj.get_node_by_id(100) is None assert obj.get_node_by_id(-1) is None assert_raises(TypeError, obj.depth_first, FooType = do_nothing) assert_raises(TypeError, obj._check_search_kwarg_types, {Tst2: do_nothing}) assert_raises(TypeError, obj._check_search_kwarg_types, {0: do_nothing}) assert list(n1.descendants()) == [n2, n5, n6, n3, n4] accum = [] def collect(node): accum.append(node._id) obj.depth_first(collect) assert accum == [0, 1, 4, 5, 2, 3] accum = [] obj.depth_first(**{clsname: collect}) assert accum == [0, 1, 4, 5, 2, 3] accum = [] obj.search_rootward(collect) assert accum == [0] accum = [] obj.search_rootward(**{'current_node': n5, clsname: collect}) assert accum == [4, 1, 0] def stop(node): return node._id == 3 def get(node): return node._name assert obj.depth_first(stop_test = stop, _return = get) == 'n4' assert obj.search_rootward(stop_test = stop, _return = get) is None assert obj.search_rootward(current_node = n4, stop_test = stop, _return = get) == 'n4' assert obj.search_rootward(current_node = n6, stop_test = stop, _return = get) is None n7 = cls(_name='n7', _id=6) n8 = cls(_name='n8', _id=7) n7.add_child(n8) obj.replace_node(n5, n7) assert n2._children == [n7] assert n7._parent is n2 assert n7._children == [n8] assert n8._parent is n7 assert n5._children == [n6] assert n6._parent is n5 assert n5._parent is None assert obj.nodes == [n1, n2, n3, n4, n7, n8] assert obj.node_types == [clsname] assert obj.id_dict == {0: n1, 1: n2, 2: n3, 3:n4, 6:n7, 7:n8} assert obj.type_dict == {clsname: [n1, n2, n3, n4, n7, n8]} assert_raises(TreeError, obj.remove_node, n5) assert_raises(TreeError, obj.replace_node, n5, n7) obj.remove_node(n2) assert n1._children == [n3] assert n2._parent is None assert n2._children == [n7] assert n7._parent is n2 assert obj.nodes == [n1, n3, n4] assert obj.node_types == [clsname] assert obj.id_dict == {0: n1, 2: n3, 3:n4} assert obj.type_dict == {clsname: [n1, n3, n4]} assert_raises(TreeError, obj.replace_node, n1, n7) assert_raises(TreeError, obj.replace_node, n3, n7) obj.replace_node(n1, n2) assert n1._children == [n3] assert n3._parent is n1 assert obj.root is n2 assert obj.nodes == [n2, n7, n8] assert obj.node_types == [clsname] assert obj.id_dict == {1: n2, 6: n7, 7:n8} assert obj.type_dict == {clsname: [n2, n7, n8]} obj.rebuild() assert obj.root is n2 assert obj.nodes == [n2, n7, n8] assert obj.node_types == [clsname] assert obj.id_dict == {1: n2, 6: n7, 7:n8} assert obj.type_dict == {clsname: [n2, n7, n8]} obj.remove_node(n2) assert obj.root is None assert obj.nodes == [] assert obj.node_types == [] assert obj.id_dict == {} assert obj.type_dict == {} #----------------------------------------------------------- # Tree Test 2 def tree_tst_2(treecls): from syn.base_utils import seq_list_nested b = 3 d = 4 # 121 nodes # d = 6 # 1093 nodes # d = 7 # 3280 nodes # d = 8 # Almost 10,000 nodes lst, N = seq_list_nested(b, d, top_level=False) root = tree_node_from_nested_list(lst[0], lst[1]) assert isinstance(root, Node) tree1 = treecls(root) base_id = 0 check_idempotence(tree1) assert len(tree1.nodes) == N assert tree1.node_types == ['Tst1',] assert sorted(tree1.id_dict.keys()) == list(range(base_id,base_id + N)) assert list(tree1.type_dict.keys()) == ['Tst1'] assert sorted(tree1.type_dict['Tst1'], key=attrgetter('_id')) == \ sorted(tree1.nodes, key=attrgetter('_id')) accum = [] def collect(node): accum.append(node.value) tree1.depth_first(collect) assert sum(accum) == sum(range(1, N+1)) while tree1.root._children: tree1.remove_node(tree1.root._children[0]) assert tree1.nodes == [tree1.root] assert tree1.root._children == [] mod = 4 base_id = 0 sproot = tree_node_from_nested_list_types(lst[0], lst[1], mod) tree2 = Tree(sproot) assert len(tree2.nodes) == N assert tree2.node_types == ['Tst1', 'Tst2'] assert sorted(tree2.id_dict.keys()) == list(range(base_id,base_id+N)) assert sorted(tree2.type_dict.keys()) == sorted(['Tst1', 'Tst2']) assert sorted(tree2.type_dict['Tst1'] + tree2.type_dict['Tst2'], key=attrgetter('_id')) == \ sorted(tree2.nodes, key=attrgetter('_id')) accum = [] tree2.depth_first(collect) assert sum(accum) == sum(range(1, N+1)) accum = [] tree2.depth_first(Tst2 = collect) if N % mod != 0: assert sum(accum) == sum(range(mod, N, mod)) #----------------------------------------------------------- # Tree def test_tree(): # Test that inequal roots mean inequal Trees n1 = Node() n2 = Node(_id=2) t1 = Tree(n1) t2 = Tree(n2) assert n1 != n2 assert t1 != t2 t3 = Tree() t3.validate() assert t3 == t1 # In-depth tree tests tree_tst_1(Tree) # basic tree operations tree_tst_2(Tree) # test with a moderate/large number of nodes # Miscellaneous tests assert identity(5) == 5 n3 = Node(_id = 3) t2.add_node(n3, parent=n2) n3._parent = None assert_raises(TreeError, t2.remove_node, n3) assert_raises(TreeError, t2.replace_node, n3, n1) #------------------------------------------------------------------------------- # Test root node validation rnv_accum = [] class Root1(Node): def validate(self): super(Root1, self).validate() rnv_accum.append(1) class RNVTree(Tree): _attrs = dict(root = Attr(Root1)) def test_root_validation(): RNVTree(Root1()) assert sum(rnv_accum) == len(rnv_accum) == 1 # just a sanity check #------------------------------------------------------------------------------- if __name__ == '__main__': # pragma: no cover from syn.base_utils import run_all_tests run_all_tests(globals(), verbose=True, print_errors=False)
mit
5,143,387,071,253,162,000
27.770548
80
0.562195
false
buzz/flicks
flicksapp/management/commands/import_amc_xml.py
1
6230
from datetime import datetime import re import elementtree.ElementTree as et from django.core.management.base import BaseCommand from flicksapp.models import Movie, Country, Person, Genre, File imdb_regex = re.compile("http://.*imdb.com/title/tt0*(\d+)") imdb_regex2 = re.compile("http://.*imdb.com/Title\?0*(\d+)") def parse_imdb(f): """Parse imdb number out of imdb url. Skip field if not possible.""" global imdb_regex, imdb_regex2 r = imdb_regex.match(f) try: return int(r.groups()[0]) except AttributeError: r = imdb_regex2.match(f) return int(r.groups()[0]) class Command(BaseCommand): args = '<FILE>' help = 'Imports AMC XML file' def handle(self, *args, **options): # load xml file try: xml_doc = et.parse(args[0]) except IndexError: self.stdout.write('No file given\n') return except IOError: self.stdout.write("Could not open file: %s" % args[0]) return # add movies movies = xml_doc.findall("//Movie") for i, movie in enumerate(movies): a = movie.attrib # keep track of imported fields fields = {} new_movie = Movie() try: new_movie.id = int(a["Number"]) new_movie.title = a["OriginalTitle"].strip() except KeyError: self.stdout.write( "Panic! Could not extract Number nor OriginalTitle." + "Skipping title: %s\n" % a) continue new_movie.save() # or relations cannot be assigned # if we can extract imdb id we leave most other fields # empty that can be filled by imdb try: url = a["URL"] new_movie.imdb_id = parse_imdb(url) fields['imdb_id'] = True except (KeyError, AttributeError): # if imdb id is not present we need to copy other # fields fields['imdb_id'] = False if url and len(url) > 2: new_movie.notes = "URL: %s\n" % url.strip() fields['notes'] = True # director try: director_name = a["Director"].strip() try: p = Person.objects.get(name=director_name) except Person.DoesNotExist: # ok we have to fill imdb person ourselves in some cases if director_name == 'David Lynch': imdb_id = 186 elif director_name == 'Carsten Walter': imdb_id = None elif director_name == 'Roger Sommer': imdb_id = None elif director_name == 'Dieter Rhode': imdb_id = None else: raise Exception( "Panic! Manually assign imdb id for person " + "'%s' (%s)\n" % (director_name, new_movie.title)) p = Person(imdb_id=imdb_id, name=director_name) p.save() new_movie.directors.add(p) fields['directors'] = True except KeyError: fields['directors'] = False # country try: country_name = a["Country"].strip() c, created = Country.objects.get_or_create( name=country_name) c.save() new_movie.countries.add(c) fields['countries'] = True except KeyError: fields['countries'] = False # category try: genre_name = a["Category"].strip() g, created = Genre.objects.get_or_create( name=genre_name) g.save() new_movie.genres.add(g) fields['genres'] = True except KeyError: fields['genres'] = False # year try: new_movie.year = int(a["Year"].strip()) fields['year'] = True except (KeyError, ValueError): fields['year'] = False # runtime try: new_movie.runtime = int(a["Length"].strip()) fields['runtime'] = True except (KeyError, ValueError): fields['runtime'] = False # plot (description) try: new_movie.plot = a["Description"].strip() fields['plot'] = True except (KeyError, ValueError): fields['plot'] = False # always import non-imdb fields # seen (checked) try: checked = a["Checked"] if checked == 'True': seen = True elif checked == 'False': seen = False else: raise ValueError() new_movie.seen = seen fields['seen'] = True except (KeyError, ValueError): fields['seen'] = False # date added try: new_movie.added_on = datetime.strptime(a["Date"], '%m/%d/%Y') fields['added_on'] = True except (KeyError, ValueError): fields['added_on'] = False # finally save movie new_movie.save() # log import imported = ' '.join([f for f in fields.keys() if fields[f]]) not_imported = ' '.join( [('-%s' % f) for f in fields.keys() if not fields[f]]) self.stdout.write( "Imported '%s' (%s %s)\n" % (new_movie.title, imported, not_imported))
gpl-2.0
6,874,207,777,880,569,000
37.9375
80
0.440931
false
telerainbow/randgame
randgame.py
1
3031
import random, cmd, sys class randgame(cmd.Cmd): intro = "this is randgame" prompt = "randgame # " players = ["asdf", "foo"] turn = 0 rounds = 0 active = False settings = {"no_two" : 0, "voice" : 0} def do_f(self, arg): print self.last_player def do_set(self, arg): 'set settings. see \"list settings\" for available options' if arg == "" or len(arg.split()) != 2: print "*** syntax: set <key> <value>, where value may be 0 or 1" return setting, value = arg.split() if setting not in self.settings.keys(): print '*** unrecognized setting. available settings: {0}'.format(", ".join(self.settings.keys())) return if value not in ['0', '1']: print "*** value must be 0 or 1" return self.settings[setting] = int(value) def do_start(self, arg): 'starts the game' if self.active == False: print "Game Started! glhf" self.active = True self.shuffle() self.do_next(1) else: print "*** Game already started! use \"next\" instead" def do_next(self, arg): 'shows next player' if self.active == False : print "*** No active game, use \"start\" first" return print "#"*50 print print "{0} Player is {1}".format(("First" if arg==1 else "Next"), self.players[self.turn]) print print "#"*50 self.turn += 1 if self.turn == len(self.players): self.turn = 0 self.shuffle() self.rounds += 1 def do_end(self, arg): 'ends current game (but does not exit)' if self.active != True: print "*** no active game!" return self.turn = 0 self.active = False print "game ended after {0} rounds!".format(self.rounds) self.rounds = 0 def shuffle(self): if self.settings["no_two"] == 0 : random.shuffle(self.players) else: last_player = self.players.pop() random.shuffle(self.players) self.players.insert( random.randint(1,len(self.players)), last_player) def do_addplayer(self, arg): 'add a player to the game' if self.active == True: print "*** can't add player during active game" return if arg != "": if arg not in self.players: self.players.append(arg) else: print "*** player already added, please specify a different name" else: print "*** please specify a name" def do_remove(self, arg): 'remove a player from the game' if self.active == True: print "*** can't remove player during game" return try: self.players.remove(arg) print "removed player {0} from game".format(arg) except ValueError: print "*** player not in game (check spelling?)" def do_list(self, arg): 'list settings or list players' if arg == "settings": print "settings: " for key in self.settings: print "\t{0}\t{1}".format(key, self.settings[key]) elif arg == "players": print ", ".join(map(str, self.players)) else: print "*** \"list settings\" or \"list players\"" def do_q(self, arg): 'exit the program' return True def do_exit(self, arg): 'exit the program' return True if __name__ == '__main__': randgame().cmdloop()
gpl-2.0
6,003,976,552,822,596,000
23.642276
100
0.626196
false
MatKallada/nbgrader
nbgrader/tests/apps/test_nbgrader_fetch.py
2
1347
import os from nbgrader.tests import run_command from nbgrader.tests.apps.base import BaseTestApp class TestNbGraderFetch(BaseTestApp): def _release(self, assignment, exchange): self._copy_file("files/test.ipynb", "release/ps1/p1.ipynb") run_command([ "nbgrader", "release", assignment, "--NbGraderConfig.course_id=abc101", "--TransferApp.exchange_directory={}".format(exchange) ]) def _fetch(self, assignment, exchange, flags=None, retcode=0): cmd = [ "nbgrader", "fetch", assignment, "--course", "abc101", "--TransferApp.exchange_directory={}".format(exchange) ] if flags is not None: cmd.extend(flags) run_command(cmd, retcode=retcode) def test_help(self): """Does the help display without error?""" run_command(["nbgrader", "fetch", "--help-all"]) def test_fetch(self, exchange): self._release("ps1", exchange) self._fetch("ps1", exchange) assert os.path.isfile("ps1/p1.ipynb") # make sure it fails if the assignment already exists self._fetch("ps1", exchange, retcode=1) # make sure it fails even if the assignment is incomplete os.remove("ps1/p1.ipynb") self._fetch("ps1", exchange, retcode=1)
bsd-3-clause
4,670,988,268,560,823,000
30.325581
67
0.602821
false
Meriipu/quodlibet
quodlibet/browsers/soundcloud/api.py
1
10780
# Copyright 2016 Nick Boultbee # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. from datetime import datetime from urllib.parse import urlencode from gi.repository import GObject, Gio, Soup from quodlibet import util, config from quodlibet.util import website from quodlibet.util.dprint import print_w, print_d from quodlibet.util.http import download_json, download from .library import SoundcloudFile from .util import json_callback, Wrapper, sanitise_tag, DEFAULT_BITRATE, EPOCH class RestApi(GObject.Object): """Semi-generic REST API client, using libsoup / `http.py`""" def __init__(self, root): super().__init__() self._cancellable = Gio.Cancellable.new() self.root = root def _default_params(self): return {} def _get(self, path, callback, **kwargs): args = self._default_params() args.update(kwargs) msg = Soup.Message.new('GET', self._url(path, args)) download_json(msg, self._cancellable, callback, None) def _post(self, path, callback, **kwargs): args = self._default_params() args.update(kwargs) msg = Soup.Message.new('POST', self._url(path)) post_body = urlencode(args) if not isinstance(post_body, bytes): post_body = post_body.encode("ascii") msg.set_request('application/x-www-form-urlencoded', Soup.MemoryUse.COPY, post_body) download_json(msg, self._cancellable, callback, None) def _put(self, path, callback, **kwargs): args = self._default_params() args.update(kwargs) msg = Soup.Message.new('PUT', self._url(path)) body = urlencode(args) if not isinstance(body, bytes): body = body.encode("ascii") msg.set_request('application/x-www-form-urlencoded', Soup.MemoryUse.COPY, body) download_json(msg, self._cancellable, callback, None) def _delete(self, path, callback, **kwargs): args = self._default_params() args.update(kwargs) # Turns out the SC API doesn't mind body arguments for DELETEs, # and as it's neater and slightly more secure, let's do that. body = urlencode(args) if not isinstance(body, bytes): body = body.encode("ascii") msg = Soup.Message.new('DELETE', self._url(path)) msg.set_request('application/x-www-form-urlencoded', Soup.MemoryUse.COPY, body) download(msg, self._cancellable, callback, None, try_decode=True) def _url(self, path, args=None): path = "%s%s" % (self.root, path) return "%s?%s" % (path, urlencode(args)) if args else path class SoundcloudApiClient(RestApi): __CLIENT_SECRET = 'ca2b69301bd1f73985a9b47224a2a239' __CLIENT_ID = '5acc74891941cfc73ec8ee2504be6617' API_ROOT = "https://api.soundcloud.com" REDIRECT_URI = 'https://quodlibet.github.io/callbacks/soundcloud.html' PAGE_SIZE = 150 MIN_DURATION_SECS = 120 COUNT_TAGS = {'%s_count' % t for t in ('playback', 'download', 'likes', 'favoritings', 'download', 'comments')} __gsignals__ = { 'fetch-success': (GObject.SignalFlags.RUN_LAST, None, (object,)), 'fetch-failure': (GObject.SignalFlags.RUN_LAST, None, (object,)), 'songs-received': (GObject.SignalFlags.RUN_LAST, None, (object,)), 'comments-received': (GObject.SignalFlags.RUN_LAST, None, (int, object,)), 'authenticated': (GObject.SignalFlags.RUN_LAST, None, (object,)), } def __init__(self): print_d("Starting Soundcloud API...") super().__init__(self.API_ROOT) self.access_token = config.get("browsers", "soundcloud_token", None) self.online = bool(self.access_token) self.user_id = config.get("browsers", "soundcloud_user_id", None) if not self.user_id: self._get_me() self.username = None def _default_params(self): params = {'client_id': self.__CLIENT_ID} if self.access_token: params["oauth_token"] = self.access_token return params def authenticate_user(self): # create client object with app credentials if self.access_token: print_d("Ignoring saved Soundcloud token...") # redirect user to authorize URL website(self._authorize_url) def log_out(self): print_d("Destroying access token...") self.access_token = None self.save_auth() self.online = False def get_token(self, code): print_d("Getting access token...") options = { 'grant_type': 'authorization_code', 'redirect_uri': self.REDIRECT_URI, 'client_id': self.__CLIENT_ID, 'client_secret': self.__CLIENT_SECRET, 'code': code, } self._post('/oauth2/token', self._receive_token, **options) @json_callback def _receive_token(self, json): self.access_token = json['access_token'] print_d("Got an access token: %s" % self.access_token) self.save_auth() self.online = True self._get_me() def _get_me(self): self._get('/me', self._receive_me) @json_callback def _receive_me(self, json): self.username = json['username'] self.user_id = json['id'] self.emit('authenticated', Wrapper(json)) def get_tracks(self, params): merged = { "q": "", "limit": self.PAGE_SIZE, "duration[from]": self.MIN_DURATION_SECS * 1000, } for k, v in params.items(): delim = " " if k == 'q' else "," merged[k] = delim.join(list(v)) print_d("Getting tracks: params=%s" % merged) self._get('/tracks', self._on_track_data, **merged) @json_callback def _on_track_data(self, json): songs = list(filter(None, [self._audiofile_for(r) for r in json])) self.emit('songs-received', songs) def get_favorites(self): self._get('/me/favorites', self._on_track_data, limit=self.PAGE_SIZE) def get_my_tracks(self): self._get('/me/tracks', self._on_track_data, limit=self.PAGE_SIZE) def get_comments(self, track_id): self._get('/tracks/%s/comments' % track_id, self._receive_comments, limit=200) @json_callback def _receive_comments(self, json): print_d("Got comments: %s" % json) if json and len(json): # Should all be the same track... track_id = json[0]["track_id"] self.emit('comments-received', track_id, json) def save_auth(self): config.set("browsers", "soundcloud_token", self.access_token or "") config.set("browsers", "soundcloud_user_id", self.user_id or "") def put_favorite(self, track_id): print_d("Saving track %s as favorite" % track_id) url = '/me/favorites/%s' % track_id self._put(url, self._on_favorited) def remove_favorite(self, track_id): print_d("Deleting favorite for %s" % track_id) url = '/me/favorites/%s' % track_id self._delete(url, self._on_favorited) @json_callback def _on_favorited(self, json): print_d("Successfully updated favorite: %s" % json) def _audiofile_for(self, response): r = Wrapper(response) d = r.data dl = d.get("downloadable", False) and d.get("download_url", None) try: url = dl or r.stream_url except AttributeError as e: print_w("Unusable result (%s) from SC: %s" % (e, d)) return None uri = SoundcloudApiClient._add_secret(url) song = SoundcloudFile(uri=uri, track_id=r.id, favorite=d.get("user_favorite", False), client=self) def get_utc_date(s): parts = s.split() dt = datetime.strptime(" ".join(parts[:-1]), "%Y/%m/%d %H:%M:%S") return int((dt - EPOCH).total_seconds()) def put_time(tag, r, attr): try: song[tag] = get_utc_date(r[attr]) except KeyError: pass def put_date(tag, r, attr): try: parts = r[attr].split() dt = datetime.strptime(" ".join(parts[:-1]), "%Y/%m/%d %H:%M:%S") song[tag] = dt.strftime("%Y-%m-%d") except KeyError: pass def put_counts(tags): for tag in tags: try: song["~#%s" % tag] = int(r[tag]) except KeyError: pass try: song.update(title=r.title, artist=r.user["username"], soundcloud_user_id=str(r.user_id), website=r.permalink_url, genre=u"\n".join(r.genre and r.genre.split(",") or [])) if dl: song.update(format=r.original_format) song["~#bitrate"] = r.original_content_size * 8 / r.duration else: song["~#bitrate"] = DEFAULT_BITRATE if r.description: song["comment"] = sanitise_tag(r.description) song["~#length"] = int(r.duration) / 1000 art_url = r.artwork_url if art_url: song["artwork_url"] = ( art_url.replace("-large.", "-t500x500.")) put_time("~#mtime", r, "last_modified") put_date("date", r, "created_at") put_counts(self.COUNT_TAGS) plays = d.get("user_playback_count", 0) if plays: song["~#playcount"] = plays # print_d("Got song: %s" % song) except Exception as e: print_w("Couldn't parse a song from %s (%r). " "Had these tags:\n %s" % (r, e, song.keys())) return song @classmethod def _add_secret(cls, stream_url): return "%s?client_id=%s" % (stream_url, cls.__CLIENT_ID) @util.cached_property def _authorize_url(self): url = '%s/connect' % (self.API_ROOT,) options = { 'scope': 'non-expiring', 'client_id': self.__CLIENT_ID, 'response_type': 'code', 'redirect_uri': self.REDIRECT_URI } return '%s?%s' % (url, urlencode(options))
gpl-2.0
-444,583,630,564,009,100
35.542373
79
0.552041
false
rockfruit/bika.lims
bika/lims/browser/referencesample.py
1
17439
# This file is part of Bika LIMS # # Copyright 2011-2016 by it's authors. # Some rights reserved. See LICENSE.txt, AUTHORS.txt. from AccessControl import getSecurityManager from bika.lims.browser import BrowserView from bika.lims import bikaMessageFactory as _ from bika.lims.utils import t from bika.lims.browser.bika_listing import BikaListingView from bika.lims.utils import isActive from bika.lims.browser.analyses import AnalysesView from datetime import datetime from operator import itemgetter from plone.app.layout.globals.interfaces import IViewView from Products.Archetypes.config import REFERENCE_CATALOG from Products.ATContentTypes.utils import DT2dt from Products.CMFCore.utils import getToolByName from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile from zope.component import getMultiAdapter from zope.interface import implements import json, plone class ViewView(BrowserView): """ Reference Sample View """ implements(IViewView) template = ViewPageTemplateFile("templates/referencesample_view.pt") def __init__(self, context, request): BrowserView.__init__(self, context, request) self.icon = self.portal_url + "/++resource++bika.lims.images/referencesample_big.png" def __call__(self): rc = getToolByName(self.context, REFERENCE_CATALOG) self.results = {} # {category_title: listofdicts} for r in self.context.getReferenceResults(): service = rc.lookupObject(r['uid']) cat = service.getCategoryTitle() if cat not in self.results: self.results[cat] = [] r['service'] = service self.results[cat].append(r) self.categories = self.results.keys() self.categories.sort() return self.template() class ReferenceAnalysesViewView(BrowserView): """ View of Reference Analyses linked to the Reference Sample. """ implements(IViewView) template = ViewPageTemplateFile("templates/referencesample_analyses.pt") def __init__(self, context, request): super(ReferenceAnalysesViewView, self).__init__(context, request) self.icon = self.portal_url + "/++resource++bika.lims.images/referencesample_big.png" self.title = self.context.translate(_("Reference Analyses")) self.description = "" self._analysesview = None def __call__(self): return self.template() def get_analyses_table(self): """ Returns the table of Reference Analyses """ return self.get_analyses_view().contents_table() def get_analyses_view(self): if not self._analysesview: # Creates the Analyses View if not exists yet self._analysesview = ReferenceAnalysesView(self.context, self.request) self._analysesview.allow_edit = False self._analysesview.show_select_column = False self._analysesview.show_workflow_action_buttons = False self._analysesview.form_id = "%s_qcanalyses" % self.context.UID() self._analysesview.review_states[0]['transitions'] = [{}] return self._analysesview def getReferenceSampleId(self): return self.context.id; def get_analyses_json(self): return self.get_analyses_view().get_analyses_json() class ReferenceAnalysesView(AnalysesView): """ Reference Analyses on this sample """ implements(IViewView) def __init__(self, context, request): AnalysesView.__init__(self, context, request) self.catalog = 'bika_analysis_catalog' self.contentFilter = {'portal_type':'ReferenceAnalysis', 'path': {'query':"/".join(self.context.getPhysicalPath()), 'level':0}} self.show_select_row = False self.show_sort_column = False self.show_select_column = False self.allow_edit = False self.columns = { 'id': {'title': _('ID'), 'toggle':False}, 'getReferenceAnalysesGroupID': {'title': _('QC Sample ID'), 'toggle': True}, 'Category': {'title': _('Category'), 'toggle': True}, 'Service': {'title': _('Service'), 'toggle':True}, 'Worksheet': {'title': _('Worksheet'), 'toggle':True}, 'Method': { 'title': _('Method'), 'sortable': False, 'toggle': True}, 'Instrument': { 'title': _('Instrument'), 'sortable': False, 'toggle': True}, 'Result': {'title': _('Result'), 'toggle':True}, 'Captured': {'title': _('Captured'), 'toggle':True}, 'Uncertainty': {'title': _('+-'), 'toggle':True}, 'DueDate': {'title': _('Due Date'), 'index': 'getDueDate', 'toggle':True}, 'retested': {'title': _('Retested'), 'type':'boolean', 'toggle':True}, 'state_title': {'title': _('State'), 'toggle':True}, } self.review_states = [ {'id':'default', 'title': _('All'), 'contentFilter':{}, 'transitions': [], 'columns':['id', 'getReferenceAnalysesGroupID', 'Category', 'Service', 'Worksheet', 'Method', 'Instrument', 'Result', 'Captured', 'Uncertainty', 'DueDate', 'state_title'], }, ] self.anjson = {} def isItemAllowed(self, obj): allowed = super(ReferenceAnalysesView, self).isItemAllowed(obj) return allowed if not allowed else obj.getResult() != '' def folderitem(self, obj, item, index): item = super(ReferenceAnalysesView, self).folderitem(obj, item, index) if not item: return None service = obj.getService() item['Category'] = service.getCategoryTitle() item['Service'] = service.Title() item['Captured'] = self.ulocalized_time(obj.getResultCaptureDate()) brefs = obj.getBackReferences("WorksheetAnalysis") item['Worksheet'] = brefs and brefs[0].Title() or '' # The following item keywords are required for the # JSON return value below, which is used to render graphs. # they are not actually used in the table rendering. item['Keyword'] = service.getKeyword() item['Unit'] = service.getUnit() self.addToJSON(obj, service, item) return item def addToJSON(self, analysis, service, item): """ Adds an analysis item to the self.anjson dict that will be used after the page is rendered to generate a QC Chart """ parent = analysis.aq_parent qcid = parent.id serviceref = "%s (%s)" % (item['Service'], item['Keyword']) trows = self.anjson.get(serviceref, {}) anrows = trows.get(qcid, []) anid = '%s.%s' % (item['getReferenceAnalysesGroupID'], item['id']) rr = parent.getResultsRangeDict() uid = service.UID() if uid in rr: specs = rr.get(uid, None) smin = float(specs.get('min', 0)) smax = float(specs.get('max', 0)) error = float(specs.get('error', 0)) target = float(specs.get('result', 0)) result = float(item['Result']) error_amount = ((target / 100) * error) if target > 0 else 0 upper = smax + error_amount lower = smin - error_amount anrow = {'date': item['Captured'], 'min': smin, 'max': smax, 'target': target, 'error': error, 'erroramount': error_amount, 'upper': upper, 'lower': lower, 'result': result, 'unit': item['Unit'], 'id': item['uid']} anrows.append(anrow) trows[qcid] = anrows self.anjson[serviceref] = trows def get_analyses_json(self): return json.dumps(self.anjson) class ReferenceResultsView(BikaListingView): """ """ def __init__(self, context, request): super(ReferenceResultsView, self).__init__(context, request) bsc = getToolByName(context, 'bika_setup_catalog') self.title = self.context.translate(_("Reference Values")) self.description = self.context.translate(_( "Click on Analysis Categories (against shaded background) " "to see Analysis Services in each category. Enter minimum " "and maximum values to indicate a valid results range. " "Any result outside this range will raise an alert. " "The % Error field allows for an % uncertainty to be " "considered when evaluating results against minimum and " "maximum values. A result out of range but still in range " "if the % error is taken into consideration, will raise a " "less severe alert.")) self.contentFilter = {} self.context_actions = {} self.show_sort_column = False self.show_select_row = False self.show_workflow_action_buttons = False self.show_select_column = False self.pagesize = 999999 self.columns = { 'Service': {'title': _('Service')}, 'result': {'title': _('Result')}, 'min': {'title': _('Min')}, 'max': {'title': _('Max')}, } self.review_states = [ {'id':'default', 'title': _('All'), 'contentFilter':{}, 'columns': ['Service', 'result', 'min', 'max']}, ] def folderitems(self): items = [] uc = getToolByName(self.context, 'uid_catalog') # not using <self.contentsMethod=bsc> for x in self.context.getReferenceResults(): service = uc(UID=x['uid'])[0].getObject() item = { 'obj': self.context, 'id': x['uid'], 'uid': x['uid'], 'result': x['result'], 'min': x['min'], 'max': x['max'], 'title': service.Title(), 'Service': service.Title(), 'type_class': 'contenttype-ReferenceResult', 'url': service.absolute_url(), 'relative_url': service.absolute_url(), 'view_url': self.context.absolute_url() + "/results", 'replace': {}, 'before': {}, 'after': {}, 'choices':{}, 'class': {}, 'state_class': 'state-active', 'allow_edit': [], } item['replace']['Service'] = "<a href='%s'>%s</a>" % \ (service.absolute_url(), service.Title()) items.append(item) items = sorted(items, key = itemgetter('Service')) return items class ReferenceSamplesView(BikaListingView): """Main reference samples folder view """ def __init__(self, context, request): super(ReferenceSamplesView, self).__init__(context, request) portal = getToolByName(context, 'portal_url').getPortalObject() self.icon = self.portal_url + "/++resource++bika.lims.images/referencesample_big.png" self.title = self.context.translate(_("Reference Samples")) self.catalog = 'bika_catalog' self.contentFilter = {'portal_type': 'ReferenceSample', 'sort_on':'id', 'sort_order': 'reverse', 'path':{"query": ["/"], "level" : 0 }, } self.context_actions = {} self.show_select_column = True request.set('disable_border', 1) self.columns = { 'ID': { 'title': _('ID'), 'index': 'id'}, 'Title': { 'title': _('Title'), 'index': 'sortable_title', 'toggle':True}, 'Supplier': { 'title': _('Supplier'), 'toggle':True, 'attr': 'aq_parent.Title', 'replace_url': 'aq_parent.absolute_url'}, 'Manufacturer': { 'title': _('Manufacturer'), 'toggle': True, 'attr': 'getManufacturer.Title', 'replace_url': 'getManufacturer.absolute_url'}, 'Definition': { 'title': _('Reference Definition'), 'toggle':True, 'attr': 'getReferenceDefinition.Title', 'replace_url': 'getReferenceDefinition.absolute_url'}, 'DateSampled': { 'title': _('Date Sampled'), 'index': 'getDateSampled', 'toggle':True}, 'DateReceived': { 'title': _('Date Received'), 'index': 'getDateReceived', 'toggle':True}, 'DateOpened': { 'title': _('Date Opened'), 'toggle':True}, 'ExpiryDate': { 'title': _('Expiry Date'), 'index': 'getExpiryDate', 'toggle':True}, 'state_title': { 'title': _('State'), 'toggle':True}, } self.review_states = [ {'id':'default', 'title': _('Current'), 'contentFilter':{'review_state':'current'}, 'columns': ['ID', 'Title', 'Supplier', 'Manufacturer', 'Definition', 'DateSampled', 'DateReceived', 'DateOpened', 'ExpiryDate']}, {'id':'expired', 'title': _('Expired'), 'contentFilter':{'review_state':'expired'}, 'columns': ['ID', 'Title', 'Supplier', 'Manufacturer', 'Definition', 'DateSampled', 'DateReceived', 'DateOpened', 'ExpiryDate']}, {'id':'disposed', 'title': _('Disposed'), 'contentFilter':{'review_state':'disposed'}, 'columns': ['ID', 'Title', 'Supplier', 'Manufacturer', 'Definition', 'DateSampled', 'DateReceived', 'DateOpened', 'ExpiryDate']}, {'id':'all', 'title': _('All'), 'contentFilter':{}, 'columns': ['ID', 'Title', 'Supplier', 'Manufacturer', 'Definition', 'DateSampled', 'DateReceived', 'DateOpened', 'ExpiryDate', 'state_title']}, ] def folderitem(self, obj, item, index): workflow = getToolByName(obj, 'portal_workflow') if item.get('review_state', 'current') == 'current': # Check expiry date exdate = obj.getExpiryDate() if exdate: expirydate = DT2dt(exdate).replace(tzinfo=None) if (datetime.today() > expirydate): # Trigger expiration workflow.doActionFor(obj, 'expire') item['review_state'] = 'expired' item['obj'] = obj if self.contentFilter.get('review_state', '') \ and item.get('review_state', '') == 'expired': # This item must be omitted from the list return None item['ID'] = obj.id item['DateSampled'] = self.ulocalized_time(obj.getDateSampled(), long_format=True) item['DateReceived'] = self.ulocalized_time(obj.getDateReceived()) item['DateOpened'] = self.ulocalized_time(obj.getDateOpened()) item['ExpiryDate'] = self.ulocalized_time(obj.getExpiryDate()) after_icons = '' if obj.getBlank(): after_icons += "<img\ src='%s/++resource++bika.lims.images/blank.png' \ title='%s'>" % (self.portal_url, t(_('Blank'))) if obj.getHazardous(): after_icons += "<img\ src='%s/++resource++bika.lims.images/hazardous.png' \ title='%s'>" % (self.portal_url, t(_('Hazardous'))) item['replace']['ID'] = "<a href='%s/base_view'>%s</a>&nbsp;%s" % \ (item['url'], item['ID'], after_icons) return item
agpl-3.0
343,148,943,437,828,700
39.089655
93
0.495843
false
predatell/python-oauth2
oauth2/__init__.py
1
29176
""" The MIT License Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ import base64 import urllib import time import random import urlparse import hmac import binascii import httplib2 try: from urlparse import parse_qs parse_qs # placate pyflakes except ImportError: # fall back for Python 2.5 from cgi import parse_qs try: from hashlib import sha1 sha = sha1 except ImportError: # hashlib was added in Python 2.5 import sha import _version __version__ = _version.__version__ OAUTH_VERSION = '1.0' # Hi Blaine! HTTP_METHOD = 'GET' SIGNATURE_METHOD = 'PLAINTEXT' class Error(RuntimeError): """Generic exception class.""" def __init__(self, message='OAuth error occurred.'): self._message = message @property def message(self): """A hack to get around the deprecation errors in 2.6.""" return self._message def __str__(self): return self._message class MissingSignature(Error): pass def build_authenticate_header(realm=''): """Optional WWW-Authenticate header (401 error)""" return {'WWW-Authenticate': 'OAuth realm="%s"' % realm} def build_xoauth_string(url, consumer, token=None): """Build an XOAUTH string for use in SMTP/IMPA authentication.""" request = Request.from_consumer_and_token(consumer, token, "GET", url) signing_method = SignatureMethod_HMAC_SHA1() request.sign_request(signing_method, consumer, token) params = [] for k, v in sorted(request.iteritems()): if v is not None: params.append('%s="%s"' % (k, escape(v))) return "%s %s %s" % ("GET", url, ','.join(params)) def to_unicode(s): """ Convert to unicode, raise exception with instructive error message if s is not unicode, ascii, or utf-8. """ if not isinstance(s, unicode): if not isinstance(s, str): raise TypeError('You are required to pass either unicode or string here, not: %r (%s)' % (type(s), s)) try: s = s.decode('utf-8') except UnicodeDecodeError, le: raise TypeError('You are required to pass either a unicode object or a utf-8 string here. You passed a Python string object which contained non-utf-8: %r. The UnicodeDecodeError that resulted from attempting to interpret it as utf-8 was: %s' % (s, le,)) return s def to_utf8(s): return to_unicode(s).encode('utf-8') def to_unicode_if_string(s): if isinstance(s, basestring): return to_unicode(s) else: return s def to_utf8_if_string(s): if isinstance(s, basestring): return to_utf8(s) else: return s def to_unicode_optional_iterator(x): """ Raise TypeError if x is a str containing non-utf8 bytes or if x is an iterable which contains such a str. """ if isinstance(x, basestring): return to_unicode(x) try: l = list(x) except TypeError, e: assert 'is not iterable' in str(e) return x else: return [ to_unicode(e) for e in l ] def to_utf8_optional_iterator(x): """ Raise TypeError if x is a str or if x is an iterable which contains a str. """ if isinstance(x, basestring): return to_utf8(x) try: l = list(x) except TypeError, e: assert 'is not iterable' in str(e) return x else: return [ to_utf8_if_string(e) for e in l ] def escape(s): """Escape a URL including any /.""" return urllib.quote(s.encode('utf-8'), safe='~') def generate_timestamp(): """Get seconds since epoch (UTC).""" return int(time.time()) def generate_nonce(length=8): """Generate pseudorandom number.""" return ''.join([str(random.randint(0, 9)) for i in range(length)]) def generate_verifier(length=8): """Generate pseudorandom number.""" return ''.join([str(random.randint(0, 9)) for i in range(length)]) class Consumer(object): """A consumer of OAuth-protected services. The OAuth consumer is a "third-party" service that wants to access protected resources from an OAuth service provider on behalf of an end user. It's kind of the OAuth client. Usually a consumer must be registered with the service provider by the developer of the consumer software. As part of that process, the service provider gives the consumer a *key* and a *secret* with which the consumer software can identify itself to the service. The consumer will include its key in each request to identify itself, but will use its secret only when signing requests, to prove that the request is from that particular registered consumer. Once registered, the consumer can then use its consumer credentials to ask the service provider for a request token, kicking off the OAuth authorization process. """ key = None secret = None def __init__(self, key, secret): self.key = key self.secret = secret if self.key is None or self.secret is None: raise ValueError("Key and secret must be set.") def __str__(self): data = {'oauth_consumer_key': self.key, 'oauth_consumer_secret': self.secret} return urllib.urlencode(data) class Token(object): """An OAuth credential used to request authorization or a protected resource. Tokens in OAuth comprise a *key* and a *secret*. The key is included in requests to identify the token being used, but the secret is used only in the signature, to prove that the requester is who the server gave the token to. When first negotiating the authorization, the consumer asks for a *request token* that the live user authorizes with the service provider. The consumer then exchanges the request token for an *access token* that can be used to access protected resources. """ key = None secret = None callback = None callback_confirmed = None verifier = None def __init__(self, key, secret): self.key = key self.secret = secret if self.key is None or self.secret is None: raise ValueError("Key and secret must be set.") def set_callback(self, callback): self.callback = callback self.callback_confirmed = 'true' def set_verifier(self, verifier=None): if verifier is not None: self.verifier = verifier else: self.verifier = generate_verifier() def get_callback_url(self): if self.callback and self.verifier: # Append the oauth_verifier. parts = urlparse.urlparse(self.callback) scheme, netloc, path, params, query, fragment = parts[:6] if query: query = '%s&oauth_verifier=%s' % (query, self.verifier) else: query = 'oauth_verifier=%s' % self.verifier return urlparse.urlunparse((scheme, netloc, path, params, query, fragment)) return self.callback def to_string(self): """Returns this token as a plain string, suitable for storage. The resulting string includes the token's secret, so you should never send or store this string where a third party can read it. """ data = { 'oauth_token': self.key, 'oauth_token_secret': self.secret, } if self.callback_confirmed is not None: data['oauth_callback_confirmed'] = self.callback_confirmed return urllib.urlencode(data) @staticmethod def from_string(s): """Deserializes a token from a string like one returned by `to_string()`.""" if not len(s): raise ValueError("Invalid parameter string.") params = parse_qs(s, keep_blank_values=False) if not len(params): raise ValueError("Invalid parameter string.") try: key = params['oauth_token'][0] except Exception: raise ValueError("'oauth_token' not found in OAuth request.") try: secret = params['oauth_token_secret'][0] except Exception: raise ValueError("'oauth_token_secret' not found in " "OAuth request.") token = Token(key, secret) try: token.callback_confirmed = params['oauth_callback_confirmed'][0] except KeyError: pass # 1.0, no callback confirmed. return token def __str__(self): return self.to_string() def setter(attr): name = attr.__name__ def getter(self): try: return self.__dict__[name] except KeyError: raise AttributeError(name) def deleter(self): del self.__dict__[name] return property(getter, attr, deleter) class Request(dict): """The parameters and information for an HTTP request, suitable for authorizing with OAuth credentials. When a consumer wants to access a service's protected resources, it does so using a signed HTTP request identifying itself (the consumer) with its key, and providing an access token authorized by the end user to access those resources. """ version = OAUTH_VERSION def __init__(self, method=HTTP_METHOD, url=None, parameters=None, body='', is_form_encoded=False): if url is not None: self.url = to_unicode(url) self.method = method if parameters is not None: for k, v in parameters.iteritems(): k = to_unicode(k) v = to_unicode_optional_iterator(v) self[k] = v self.body = body self.is_form_encoded = is_form_encoded @setter def url(self, value): self.__dict__['url'] = value if value is not None: scheme, netloc, path, params, query, fragment = urlparse.urlparse(value) # Exclude default port numbers. if scheme == 'http' and netloc[-3:] == ':80': netloc = netloc[:-3] elif scheme == 'https' and netloc[-4:] == ':443': netloc = netloc[:-4] if scheme not in ('http', 'https'): raise ValueError("Unsupported URL %s (%s)." % (value, scheme)) # Normalized URL excludes params, query, and fragment. self.normalized_url = urlparse.urlunparse((scheme, netloc, path, None, None, None)) else: self.normalized_url = None self.__dict__['url'] = None @setter def method(self, value): self.__dict__['method'] = value.upper() def _get_timestamp_nonce(self): return self['oauth_timestamp'], self['oauth_nonce'] def get_nonoauth_parameters(self): """Get any non-OAuth parameters.""" return dict([(k, v) for k, v in self.iteritems() if not k.startswith('oauth_')]) def to_header(self, realm=''): """Serialize as a header for an HTTPAuth request.""" oauth_params = ((k, v) for k, v in self.items() if k.startswith('oauth_')) stringy_params = ((k, escape(str(v))) for k, v in oauth_params) header_params = ('%s="%s"' % (k, v) for k, v in stringy_params) params_header = ', '.join(header_params) auth_header = 'OAuth realm="%s"' % realm if params_header: auth_header = "%s, %s" % (auth_header, params_header) return {'Authorization': auth_header} def to_postdata(self): """Serialize as post data for a POST request.""" d = {} for k, v in self.iteritems(): d[k.encode('utf-8')] = to_utf8_optional_iterator(v) # tell urlencode to deal with sequence values and map them correctly # to resulting querystring. for example self["k"] = ["v1", "v2"] will # result in 'k=v1&k=v2' and not k=%5B%27v1%27%2C+%27v2%27%5D return urllib.urlencode(d, True).replace('+', '%20') def to_url(self): """Serialize as a URL for a GET request.""" base_url = urlparse.urlparse(self.url) try: query = base_url.query except AttributeError: # must be python <2.5 query = base_url[4] query = parse_qs(query) if 'oauth_signature' in query: del(query['oauth_signature']) for k, v in self.items(): if not k in query: query.setdefault(k, []).append(v) try: scheme = base_url.scheme netloc = base_url.netloc path = base_url.path params = base_url.params fragment = base_url.fragment except AttributeError: # must be python <2.5 scheme = base_url[0] netloc = base_url[1] path = base_url[2] params = base_url[3] fragment = base_url[5] url = (scheme, netloc, path, params, urllib.urlencode(query, True), fragment) return urlparse.urlunparse(url) def get_parameter(self, parameter): ret = self.get(parameter) if ret is None: raise Error('Parameter not found: %s' % parameter) return ret def get_normalized_parameters(self): """Return a string that contains the parameters that must be signed.""" items = [] query = urlparse.urlparse(self.url)[4] url_items = self._split_url_string(query).items() for key, value in self.iteritems(): if key == 'oauth_signature' or key in query: continue # 1.0a/9.1.1 states that kvp must be sorted by key, then by value, # so we unpack sequence values into multiple items for sorting. if isinstance(value, basestring): items.append((to_utf8_if_string(key), to_utf8(value))) else: try: value = list(value) except TypeError, e: assert 'is not iterable' in str(e) items.append((to_utf8_if_string(key), to_utf8_if_string(value))) else: items.extend((to_utf8_if_string(key), to_utf8_if_string(item)) for item in value) # Include any query string parameters from the provided URL url_items = [(to_utf8(k), to_utf8(v)) for k, v in url_items if k != 'oauth_signature' ] items.extend(url_items) items.sort() encoded_str = urllib.urlencode(items) # Encode signature parameters per Oauth Core 1.0 protocol # spec draft 7, section 3.6 # (http://tools.ietf.org/html/draft-hammer-oauth-07#section-3.6) # Spaces must be encoded with "%20" instead of "+" return encoded_str.replace('+', '%20').replace('%7E', '~') def sign_request(self, signature_method, consumer, token): """Set the signature parameter to the result of sign.""" if not self.is_form_encoded: # according to # http://oauth.googlecode.com/svn/spec/ext/body_hash/1.0/oauth-bodyhash.html # section 4.1.1 "OAuth Consumers MUST NOT include an # oauth_body_hash parameter on requests with form-encoded # request bodies." self['oauth_body_hash'] = base64.b64encode(sha(self.body).digest()) if 'oauth_consumer_key' not in self: self['oauth_consumer_key'] = consumer.key if token and 'oauth_token' not in self: self['oauth_token'] = token.key self['oauth_signature_method'] = signature_method.name self['oauth_signature'] = signature_method.sign(self, consumer, token) @classmethod def make_timestamp(cls): """Get seconds since epoch (UTC).""" return str(int(time.time())) @classmethod def make_nonce(cls): """Generate pseudorandom number.""" return str(random.randint(0, 100000000)) @classmethod def from_request(cls, http_method, http_url, headers=None, parameters=None, query_string=None): """Combines multiple parameter sources.""" if parameters is None: parameters = {} # Headers if headers and 'Authorization' in headers: auth_header = headers['Authorization'] # Check that the authorization header is OAuth. if auth_header[:6] == 'OAuth ': auth_header = auth_header[6:] try: # Get the parameters from the header. header_params = cls._split_header(auth_header) parameters.update(header_params) except: raise Error('Unable to parse OAuth parameters from ' 'Authorization header.') # GET or POST query string. if query_string: query_params = cls._split_url_string(query_string) parameters.update(query_params) # URL parameters. param_str = urlparse.urlparse(http_url)[4] # query url_params = cls._split_url_string(param_str) parameters.update(url_params) if parameters: return cls(http_method, http_url, parameters) return None @classmethod def from_consumer_and_token(cls, consumer, token=None, http_method=HTTP_METHOD, http_url=None, parameters=None, body='', is_form_encoded=False): if not parameters: parameters = {} defaults = { 'oauth_consumer_key': consumer.key, 'oauth_timestamp': cls.make_timestamp(), 'oauth_nonce': cls.make_nonce(), 'oauth_version': cls.version, } defaults.update(parameters) parameters = defaults if token: parameters['oauth_token'] = token.key if token.verifier: parameters['oauth_verifier'] = token.verifier return Request(http_method, http_url, parameters, body=body, is_form_encoded=is_form_encoded) @classmethod def from_token_and_callback(cls, token, callback=None, http_method=HTTP_METHOD, http_url=None, parameters=None): if not parameters: parameters = {} parameters['oauth_token'] = token.key if callback: parameters['oauth_callback'] = callback return cls(http_method, http_url, parameters) @staticmethod def _split_header(header): """Turn Authorization: header into parameters.""" params = {} parts = header.split(',') for param in parts: # Ignore realm parameter. if param.find('realm') > -1: continue # Remove whitespace. param = param.strip() # Split key-value. param_parts = param.split('=', 1) # Remove quotes and unescape the value. params[param_parts[0]] = urllib.unquote(param_parts[1].strip('\"')) return params @staticmethod def _split_url_string(param_str): """Turn URL string into parameters.""" parameters = parse_qs(param_str.encode('utf-8'), keep_blank_values=True) for k, v in parameters.iteritems(): parameters[k] = urllib.unquote(v[0]) return parameters class Client(httplib2.Http): """OAuthClient is a worker to attempt to execute a request.""" def __init__(self, consumer, token=None, cache=None, timeout=None, proxy_info=None): if consumer is not None and not isinstance(consumer, Consumer): raise ValueError("Invalid consumer.") if token is not None and not isinstance(token, Token): raise ValueError("Invalid token.") self.consumer = consumer self.token = token self.method = SignatureMethod_HMAC_SHA1() httplib2.Http.__init__(self, cache=cache, timeout=timeout, proxy_info=proxy_info) def set_signature_method(self, method): if not isinstance(method, SignatureMethod): raise ValueError("Invalid signature method.") self.method = method def request(self, uri, method="GET", body='', headers=None, redirections=httplib2.DEFAULT_MAX_REDIRECTS, connection_type=None): DEFAULT_POST_CONTENT_TYPE = 'application/x-www-form-urlencoded' if not isinstance(headers, dict): headers = {} if method == "POST": headers['Content-Type'] = headers.get('Content-Type', DEFAULT_POST_CONTENT_TYPE) is_form_encoded = \ headers.get('Content-Type') == 'application/x-www-form-urlencoded' if is_form_encoded and body: parameters = parse_qs(body) else: parameters = None req = Request.from_consumer_and_token(self.consumer, token=self.token, http_method=method, http_url=uri, parameters=parameters, body=body, is_form_encoded=is_form_encoded) req.sign_request(self.method, self.consumer, self.token) schema, rest = urllib.splittype(uri) if rest.startswith('//'): hierpart = '//' else: hierpart = '' host, rest = urllib.splithost(rest) realm = schema + ':' + hierpart + host if is_form_encoded: body = req.to_postdata() elif method == "GET": uri = req.to_url() else: headers.update(req.to_header(realm=realm)) return httplib2.Http.request(self, uri, method=method, body=body, headers=headers, redirections=redirections, connection_type=connection_type) class Server(object): """A skeletal implementation of a service provider, providing protected resources to requests from authorized consumers. This class implements the logic to check requests for authorization. You can use it with your web server or web framework to protect certain resources with OAuth. """ timestamp_threshold = 300 # In seconds, five minutes. version = OAUTH_VERSION signature_methods = None def __init__(self, signature_methods=None): self.signature_methods = signature_methods or {} def add_signature_method(self, signature_method): self.signature_methods[signature_method.name] = signature_method return self.signature_methods def verify_request(self, request, consumer, token): """Verifies an api call and checks all the parameters.""" self._check_version(request) self._check_signature(request, consumer, token) parameters = request.get_nonoauth_parameters() return parameters def build_authenticate_header(self, realm=''): """Optional support for the authenticate header.""" return {'WWW-Authenticate': 'OAuth realm="%s"' % realm} def _check_version(self, request): """Verify the correct version of the request for this server.""" version = self._get_version(request) if version and version != self.version: raise Error('OAuth version %s not supported.' % str(version)) def _get_version(self, request): """Return the version of the request for this server.""" try: version = request.get_parameter('oauth_version') except: version = OAUTH_VERSION return version def _get_signature_method(self, request): """Figure out the signature with some defaults.""" try: signature_method = request.get_parameter('oauth_signature_method') except: signature_method = SIGNATURE_METHOD try: # Get the signature method object. signature_method = self.signature_methods[signature_method] except: signature_method_names = ', '.join(self.signature_methods.keys()) raise Error('Signature method %s not supported try one of the following: %s' % (signature_method, signature_method_names)) return signature_method def _get_verifier(self, request): return request.get_parameter('oauth_verifier') def _check_signature(self, request, consumer, token): timestamp, nonce = request._get_timestamp_nonce() self._check_timestamp(timestamp) signature_method = self._get_signature_method(request) try: signature = request.get_parameter('oauth_signature') except: raise MissingSignature('Missing oauth_signature.') # Validate the signature. valid = signature_method.check(request, consumer, token, signature) if not valid: key, base = signature_method.signing_base(request, consumer, token) raise Error('Invalid signature. Expected signature base ' 'string: %s' % base) def _check_timestamp(self, timestamp): """Verify that timestamp is recentish.""" timestamp = int(timestamp) now = int(time.time()) lapsed = now - timestamp if lapsed > self.timestamp_threshold: raise Error('Expired timestamp: given %d and now %s has a ' 'greater difference than threshold %d' % (timestamp, now, self.timestamp_threshold)) class SignatureMethod(object): """A way of signing requests. The OAuth protocol lets consumers and service providers pick a way to sign requests. This interface shows the methods expected by the other `oauth` modules for signing requests. Subclass it and implement its methods to provide a new way to sign requests. """ def signing_base(self, request, consumer, token): """Calculates the string that needs to be signed. This method returns a 2-tuple containing the starting key for the signing and the message to be signed. The latter may be used in error messages to help clients debug their software. """ raise NotImplementedError def sign(self, request, consumer, token): """Returns the signature for the given request, based on the consumer and token also provided. You should use your implementation of `signing_base()` to build the message to sign. Otherwise it may be less useful for debugging. """ raise NotImplementedError def check(self, request, consumer, token, signature): """Returns whether the given signature is the correct signature for the given consumer and token signing the given request.""" built = self.sign(request, consumer, token) return built == signature class SignatureMethod_HMAC_SHA1(SignatureMethod): name = 'HMAC-SHA1' def signing_base(self, request, consumer, token): if not hasattr(request, 'normalized_url') or request.normalized_url is None: raise ValueError("Base URL for request is not set.") sig = ( escape(request.method), escape(request.normalized_url), escape(request.get_normalized_parameters()), ) key = '%s&' % escape(consumer.secret) if token: key += escape(token.secret) raw = '&'.join(sig) return key, raw def sign(self, request, consumer, token): """Builds the base signature string.""" key, raw = self.signing_base(request, consumer, token) hashed = hmac.new(key, raw, sha) # Calculate the digest base 64. return binascii.b2a_base64(hashed.digest())[:-1] class SignatureMethod_PLAINTEXT(SignatureMethod): name = 'PLAINTEXT' def signing_base(self, request, consumer, token): """Concatenates the consumer key and secret with the token's secret.""" sig = '%s&' % escape(consumer.secret) if token: sig = sig + escape(token.secret) return sig, sig def sign(self, request, consumer, token): key, raw = self.signing_base(request, consumer, token) return raw
mit
-5,403,328,534,905,743,000
32.807648
265
0.609199
false
uxlsl/uxlsl.github.io
demo/code/test/xiyanghui.py
1
1481
import requests infos = requests.get('https://job.xiyanghui.com/api/q1/json').json() def build(n, parent, dic): dic[n["id"]] = {"name": n["name"], "parent": parent} for i in n.get("children", []): build(i, n["id"], dic) def builds(infos, dic): for i in infos: build(i, -1, dic) def check(dic, id): if id not in dic: return '不存在' lst = [] while id != -1: lst.append(dic[id]['name']) id = dic[id]["parent"] return '>'.join(lst[::-1]) dic = {} builds(infos, dic) print(check(dic, 1120)) print(check(dic, 2221)) ############################## # 请根据汇率接口实现 SDK 类,可提供方法,输入币种与价格,输出人民币相应的实时价格。 rates = requests.get('https://app-cdn.2q10.com/api/v2/currency').json() class RateConverter: @staticmethod def convertToCNY(s): small = {j:i for i,j in [('USD', '$'),('GBP', '£'),('EUR', '€'),('HKD','HK$'),('JPY', '¥')]} coin = '' num = 0 for index, c in enumerate(s): if c.isdigit(): coin = s[:index] num = float(s[index:].replace(',', '')) if coin in small: coin = small[coin] return num / rates['rates'][coin] * rates['rates']['CNY'] return -1 for i in ['$1,999.00', 'HKD2399', 'EUR499.99', '€499.99']: print('输入 {}, 输出 {}'.format(i, RateConverter.convertToCNY(i)))
mit
2,871,237,075,796,517,400
23.75
100
0.501805
false
openfisca/openfisca-qt
openfisca_qt/plugins/scenario/graph.py
1
26654
# -*- coding:utf-8 -*- # Copyright © 2011 Clément Schaff, Mahdi Ben Jelloul """ openFisca, Logiciel libre de simulation du système socio-fiscal français Copyright © 2011 Clément Schaff, Mahdi Ben Jelloul This file is part of openFisca. openFisca is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. openFisca is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with openFisca. If not, see <http://www.gnu.org/licenses/>. """ from __future__ import division import locale import os from matplotlib.lines import Line2D from matplotlib.patches import Rectangle, FancyArrow from matplotlib.ticker import FuncFormatter import numpy as np #from openfisca_core import model from ...gui.baseconfig import get_translation from ...gui.config import get_icon from ...gui.qt.compat import (to_qvariant, getsavefilename) from ...gui.qt.QtCore import ( QAbstractItemModel, QModelIndex, Qt, SIGNAL, QSize, QString, ) from ...gui.qt.QtGui import ( QColor, QVBoxLayout, QDialog, QMessageBox, QTreeView, QIcon, QPixmap, QHBoxLayout, QPushButton, ) from ...gui.utils.qthelpers import create_action from ...gui.views.ui_graph import Ui_Graph from .. import OpenfiscaPluginWidget from ..utils import OutNode _ = get_translation('openfisca_qt') locale.setlocale(locale.LC_ALL, '') class GraphFormater(QDialog): def __init__(self, data, mode, parent = None): super(GraphFormater, self).__init__(parent) self.setObjectName(u'Affichage') self.setWindowTitle(u'Options du graphique') self.data = data self.parent = parent view = QTreeView(self) view.setIndentation(10) self.model = DataModel(data, mode, self) view.setModel(self.model) VLayout = QVBoxLayout() HLayout = QHBoxLayout() allBtn = QPushButton(u'Tout cocher') noneBtn = QPushButton(u'Tout décocher') HLayout.addWidget(allBtn) HLayout.addWidget(noneBtn) self.setLayout(VLayout) VLayout.addLayout(HLayout) VLayout.addWidget(view) self.connect(self.model, SIGNAL('dataChanged(QModelIndex, QModelIndex)'), self.updateGraph) self.connect(allBtn, SIGNAL('clicked()'), self.checkAll) self.connect(noneBtn, SIGNAL('clicked()'), self.checkNone) def checkAll(self): self.data.setLeavesVisible() self.updateGraph() self.model.reset() def checkNone(self): self.data.hideAll() self.updateGraph() self.model.reset() def updateGraph(self): self.parent.updateGraph2() def colorIcon(color): r, g, b = color qcolor = QColor(r, g, b) size = QSize(22,22) pixmap = QPixmap(size) pixmap.fill(qcolor) return QIcon(pixmap) class DataModel(QAbstractItemModel): def __init__(self, root, mode, parent=None): super(DataModel, self).__init__(parent) self._rootNode = root self.mode = mode def rowCount(self, parent): if not parent.isValid(): parentNode = self._rootNode else: parentNode = self.getNode(parent) return parentNode.childCount() def columnCount(self, parent): return 1 def data(self, index, role = Qt.DisplayRole): if not index.isValid(): return None node = self.getNode(index) if role == Qt.DisplayRole or role == Qt.EditRole: return to_qvariant(node.desc) if role == Qt.DecorationRole: return colorIcon(node.color) if role == Qt.CheckStateRole: return to_qvariant(2*(node.visible>=1)) def setData(self, index, value, role = Qt.EditRole): if not index.isValid(): return None node = self.getNode(index) if role == Qt.CheckStateRole: if not(node.parent == self._rootNode): first_index = self.createIndex(node.parent.row(), 0, node.parent) else: first_sibling = node.parent.children[0] first_index = self.createIndex(first_sibling.row(), 0, first_sibling) last_sibling = node.parent.children[-1] last_index = self.createIndex(last_sibling.row(), 0, last_sibling) if self.mode == 'bareme': if node.visible>=1: node.visible = 0 else: node.visible = 1 else: if node.visible>=1: node.setHidden() else: node.setVisible() self.dataChanged.emit(first_index, last_index) return True return False def headerData(self, section, orientation, role): if role == Qt.DisplayRole: if section == 0: return u"Variable" def flags(self, index): node = self.getNode(index) if np.any(node.vals != 0): return Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsUserCheckable else: return Qt.ItemIsSelectable """Should return the parent of the node with the given QModelIndex""" def parent(self, index): node = self.getNode(index) parentNode = node.parent if parentNode == self._rootNode: return QModelIndex() return self.createIndex(parentNode.row(), 0, parentNode) """Should return a QModelIndex that corresponds to the given row, column and parent node""" def index(self, row, column, parent): parentNode = self.getNode(parent) childItem = parentNode.child(row) if childItem: return self.createIndex(row, column, childItem) else: return QModelIndex() def getNode(self, index): if index.isValid(): node = index.internalPointer() if node: return node return self._rootNode class ScenarioGraphWidget(OpenfiscaPluginWidget, Ui_Graph): """ Scenario Graph Widget """ CONF_SECTION = 'composition' def __init__(self, parent = None): super(ScenarioGraphWidget, self).__init__(parent) self.setupUi(self) self._parent = parent self.mplwidget.mpl_connect('pick_event', self.on_pick) self.mplwidget.mpl_connect('motion_notify_event', self.pick) self.connect(self.option_btn, SIGNAL('clicked()'), self.set_option) self.connect(self.taux_btn, SIGNAL('stateChanged(int)'), self.set_taux) self.connect(self.hidelegend_btn, SIGNAL('toggled(bool)'), self.hide_legend) self.taux = False self.legend = True self.simulation = None self.setLayout(self.verticalLayout) self.initialize_plugin() #------ Public API --------------------------------------------- def set_taux(self, value): """ Switch on/off the tax rates view Parameters ---------- value : bool If True, switch to tax rates view """ if value: self.taux = True else: self.taux = False self.updateGraph2() def hide_legend(self, value): if value: self.legend = False else: self.legend = True self.updateGraph2() def set_option(self): ''' Sets graph options ''' try: mode = self.simulation.mode except: mode = 'bareme' gf = GraphFormater(self.data, mode, self) gf.exec_() def pick(self, event): if not event.xdata is None and not event.ydata is None: self.mplwidget.figure.pick(event) else: self.setToolTip("") def on_pick(self, event): label = event.artist._label self.setToolTip(label) def updateGraph(self, scenario): """ Update the graph according to simulation """ self.scenario = scenario print scenario # TODO: link the decompsotion with parameters data = OutNode.create_from_scenario_decomposition_json( scenario = scenario, simulation = None, decomposiiton_json = None ) dataDefault = data # TODO: data_default reforme = scenario.reforme = False # TODO: fix this mode = scenario.mode = "castype" # TODO: "castype" ou "bareme" x_axis = scenario.x_axis = "sal" # TODO change this too self.data = data self.dataDefault = dataDefault self.data.setLeavesVisible() data['revdisp'].visible = 1 if mode == 'bareme': # TODO: make this country-totals specific for rev in ['salsuperbrut', 'salbrut', 'chobrut', 'rstbrut']: try: data[rev].setHidden() except: pass if reforme: data.hideAll() self.populate_absBox(x_axis, mode) for axe in self.main.composition.XAXIS_PROPERTIES.itervalues(): if axe['name'] == x_axis: self.graph_x_axis = axe['typ_tot_default'] break self.updateGraph2() def updateGraph2(self): ax = self.mplwidget.axes ax.clear() currency = self.main.tax_benefit_system.CURRENCY mode = self.scenario.mode reforme = self.scenario.reforme if mode == 'castype': drawWaterfall(self.data, ax) else: if self.taux: drawTaux(self.data, ax, self.graph_x_axis, reforme, self.dataDefault) else: drawBareme(self.data, ax, self.graph_x_axis, reforme, self.dataDefault, self.legend, currency = currency) self.mplwidget.draw() def populate_absBox(self, x_axis, mode): self.disconnect(self.absBox, SIGNAL('currentIndexChanged(int)'), self.x_axis_changed) self.absBox.clear() if mode == 'castype': self.absBox.setEnabled(False) self.taux_btn.setEnabled(False) self.hidelegend_btn.setEnabled(False) return self.taux_btn.setEnabled(True) self.absBox.setEnabled(True) self.hidelegend_btn.setEnabled(True) for axe in model.x_axes.itervalues(): if axe.name == x_axis: typ_revs_labels = axe.typ_tot.values() typ_revs = axe.typ_tot.keys() self.absBox.addItems(typ_revs_labels) # TODO: get label from description self.absBox.setCurrentIndex(typ_revs.index(axe.typ_tot_default)) self.connect(self.absBox, SIGNAL('currentIndexChanged(int)'), self.x_axis_changed) return def x_axis_changed(self): mode = self.simulation.mode if mode == "bareme": text = self.absBox.currentText() for axe in self.main.composition.XAXIS_PROPERTIES.itervalues(): for key, label in axe.typ_tot.iteritems(): if text == label: self.graph_x_axis = key self.updateGraph2() return def save_figure(self, *args): filetypes = self.mplwidget.get_supported_filetypes_grouped() sorted_filetypes = filetypes.items() sorted_filetypes.sort() default_filetype = self.mplwidget.get_default_filetype() output_dir = self.get_option('graph/export_dir') start = os.path.join(output_dir, 'image.') + default_filetype filters = [] selectedFilter = None for name, exts in sorted_filetypes: exts_list = " ".join(['*.%s' % ext for ext in exts]) filtre = '%s (%s)' % (name, exts_list) if default_filetype in exts: selectedFilter = filtre filters.append(filtre) filters = ';;'.join(filters) fname, format = getsavefilename( self, _("Save image"), start, filters, selectedFilter) # "Enregistrer l'image" if fname: output_dir = os.path.dirname(str(fname)) self.main.composition.set_option('graph/export_dir', output_dir) try: self.mplwidget.print_figure( fname ) except Exception, e: QMessageBox.critical( self, _("Error when saving image"), str(e), QMessageBox.Ok, QMessageBox.NoButton) #------ OpenfiscaPluginMixin API --------------------------------------------- #------ OpenfiscaPluginWidget API --------------------------------------------- def get_plugin_title(self): """ Return plugin title Note: after some thinking, it appears that using a method is more flexible here than using a class attribute """ return _("Test case graphic") def get_plugin_icon(self): """ Return plugin icon (QIcon instance) Note: this is required for plugins creating a main window (see OpenfiscaPluginMixin.create_mainwindow) and for configuration dialog widgets creation """ return get_icon('OpenFisca22.png') def get_plugin_actions(self): """ Return a list of actions related to plugin Note: these actions will be enabled when plugin's dockwidget is visible and they will be disabled when it's hidden """ self.save_action = create_action( self, _("Save &graph"), icon = 'filesave.png', tip = _("Save test case graph"), triggered = self.save_figure ) self.register_shortcut( self.save_action, context = "Graph", name =_("Save test case graph"), default = "Ctrl+G" ) self.file_menu_actions = [self.save_action] self.main.file_menu_actions += self.file_menu_actions return self.file_menu_actions def register_plugin(self): """ Register plugin in OpenFisca's main window """ self.main.add_dockwidget(self) def refresh_plugin(self): ''' Update Graph ''' self.updateGraph(self.main.scenario) def closing_plugin(self, cancelable=False): """ Perform actions before parent main window is closed Return True or False whether the plugin may be closed immediately or not Note: returned value is ignored if *cancelable* is False """ return True def draw_simulation_bareme(simulation, ax, graph_x_axis = None, legend = False, position = 1): """ Draws a bareme on matplotlib.axes.Axes """ reforme = simulation.reforme alter = (simulation.alternative_scenario is not None) currency = self.main.tax_benefit_system.CURRENCY simulation.compute() data = simulation.data data_default = simulation.data_default data.setLeavesVisible() data_default.setLeavesVisible() if graph_x_axis is None: graph_x_axis = 'sal' if not alter: drawBareme(data, ax, graph_x_axis, reforme, data_default, legend, currecncy = currency) else: drawBaremeCompareHouseholds(data, ax, graph_x_axis, data_default, legend, currecny = currency, position = position) def draw_simulation_taux(simulation, ax, graph_x_axis = None, legend = True): """ Draws a bareme on matplotlib.axes.Axes object ax """ reforme = simulation.reforme or (simulation.alternative_scenario is not None) simulation.compute() data, data_default = simulation.data, simulation.data_default data.setLeavesVisible() data_default.setLeavesVisible() if graph_x_axis is None: graph_x_axis = 'sal' drawTaux(data, ax, graph_x_axis, reforme, data_default, legend = legend) def draw_simulation_waterfall(simulation, ax): """ Draws a waterfall on matplotlib.axes.Axes object ax """ data, data_default = simulation.compute() del data_default data.setLeavesVisible() drawWaterfall(data, ax) def drawWaterfall(data, ax, currency = None): ax.figure.subplots_adjust(bottom = 0.15, right = 0.95, top = 0.95, left = 0.1) barwidth = 0.8 number = [0] patches = [] codes = [] shortnames = [] def drawNode(node, prv): prev = prv + 0 val = node.vals[0] bot = prev for child in node.children: drawNode(child, prev) prev += child.vals[0] if (val != 0) and node.visible: r, g, b = node.color arrow = FancyArrow( number[0] + barwidth / 2, bot, 0, val, width = barwidth, fc = (r / 255, g / 255, b / 255), linewidth = 0.5, edgecolor = 'black', label = node.desc, picker = True, length_includes_head = True, head_width = barwidth, head_length = abs(val / 15), ) arrow.top = bot + max(0, val) arrow.absci = number[0] + 0 # a = Rectangle((number[0], bot), barwidth, val, fc = node.color, linewidth = 0.5, edgecolor = 'black', label = node.desc, picker = True) arrow.value = round(val) patches.append(arrow) codes.append(node.code) shortnames.append(node.shortname) number[0] += 1 prv = 0 drawNode(data, prv) for patch in patches: ax.add_patch(patch) n = len(patches) abscisses = np.arange(n) xlim = (- barwidth * 0.5, n - 1 + barwidth * 1.5) ax.hold(True) ax.plot(xlim, [0, 0], color = 'black') ax.set_xticklabels(shortnames, rotation = '45') ax.set_xticks(abscisses + barwidth / 2) ax.set_xlim((-barwidth / 2, n - 1 + barwidth * 1.5)) ticks = ax.get_xticklines() for tick in ticks: tick.set_visible(False) for rect in patches: x = rect.absci y = rect.top val = u'{} {}'.format(int(rect.value), currency) width = barwidth if rect.value >= 0: col = 'black' else: col = 'red' ax.text(x + width / 2, y + 1, val, horizontalalignment = 'center', verticalalignment = 'bottom', color= col, weight = 'bold') m, M = ax.get_ylim() ax.set_ylim((m, 1.05 * M)) def drawBareme(data, axes, x_axis, reform = False, reference_data = None, legend = True, currency = None, legend_position = 2): ''' Draws bareme ''' if reference_data is None: reference_data = data axes.figure.subplots_adjust(bottom = 0.09, top = 0.95, left = 0.11, right = 0.95) if reform: prefix = 'Variation ' else: prefix = '' axes.hold(True) x_axis_data = reference_data[x_axis] n_points = len(x_axis_data.vals) xlabel = x_axis_data.desc axes.set_xlabel(xlabel) axes.set_ylabel(prefix + u"{} ({} par an)".format(data.code, currency)) axes.set_xlim(np.amin(x_axis_data.vals), np.amax(x_axis_data.vals)) if not reform: axes.set_ylim(np.amin(x_axis_data.vals), np.amax(x_axis_data.vals)) axes.plot(x_axis_data.vals, np.zeros(n_points), color = 'black', label = 'x_axis') def drawNode(node, prv): prev = prv + 0 if np.any(node.vals != 0) and node.visible: r, g, b = node.color col = (r / 255, g / 255, b / 255) if node.typevar == 2: a = axes.plot( x_axis_data.vals, node.vals, color = col, linewidth = 2, label = prefix + node.desc, ) else: a = axes.fill_between( x_axis_data.vals, prev + node.vals, prev, color = col, linewidth = 0.2, edgecolor = 'black', picker = True, ) a.set_label(prefix + node.desc) for child in node.children: drawNode(child, prev) prev += child.vals prv = np.zeros(n_points) drawNode(data, prv) if legend: createLegend(axes, position = legend_position) def drawBaremeCompareHouseholds(data, ax, x_axis, dataDefault = None, legend = True , currency = "", position = 2): ''' Draws bareme ''' if dataDefault is None: raise Exception('drawBaremeCompareHouseHolds: dataDefault must be defined') ax.figure.subplots_adjust(bottom = 0.09, top = 0.95, left = 0.11, right = 0.95) prefix = 'Variation ' ax.hold(True) xdata = dataDefault[x_axis] NMEN = len(xdata.vals) xlabel = xdata.desc ax.set_xlabel(xlabel) ax.set_ylabel(prefix + u"Revenu disponible (" + currency + " par an)") ax.set_xlim(np.amin(xdata.vals), np.amax(xdata.vals)) ax.plot(xdata.vals, np.zeros(NMEN), color = 'black', label = 'x_axis') code_list = ['af', 'cf', 'ars', 'rsa', 'aefa', 'psa', 'logt', 'irpp', 'ppe', 'revdisp'] def drawNode(node, prv): minimum = 0 maximum = 0 prev = prv + 0 # if np.any(node.vals != 0) and node.visible and node.code != 'root' and node.code in code_list: if np.any(node.vals != 0) and node.code != 'root' and node.code in code_list: node.visible = True r, g, b = node.color col = (r / 255, g / 255, b / 255) if node.typevar == 2: a = ax.plot(xdata.vals, node.vals, color = col, linewidth = 2, label = prefix + node.desc) else: a = ax.fill_between(xdata.vals, prev + node.vals, prev, color = col, linewidth = 0.2, edgecolor = 'black', picker = True) a.set_label(prefix + node.desc) for child in node.children: drawNode(child, prev) prev += child.vals minimum = min([np.amin(prev), minimum]) maximum = max([np.amax(prev), maximum]) return minimum, maximum * 1.1 prv = np.zeros(NMEN) minimum, maximum = drawNode(data, prv) ax.set_ylim(minimum, maximum) if legend: createLegend(ax, position = position) def drawBaremeCompareHouseholds2(data, ax, x_axis, dataDefault = None, legend = True, currency = "", position = 2): ''' Draws bareme ''' if dataDefault is None: raise Exception('drawBaremeCompareHouseHolds: dataDefault must be defined') ax.figure.subplots_adjust(bottom = 0.09, top = 0.95, left = 0.11, right = 0.95) prefix = 'Variation ' ax.hold(True) xdata = dataDefault[x_axis] NMEN = len(xdata.vals) xlabel = xdata.desc ax.set_xlabel(xlabel) ax.set_ylabel(prefix + u"Revenu disponible (" + currency + " par an)") ax.set_xlim(np.amin(xdata.vals), np.amax(xdata.vals)) ax.plot(xdata.vals, np.zeros(NMEN), color = 'black', label = 'x_axis') node_list = ['af', 'cf', 'ars', 'rsa', 'aefa', 'psa', 'logt', 'irpp', 'ppe', 'revdisp'] prv = np.zeros(NMEN) for nod in node_list: node = data[nod] prev = prv + 0 r, g, b = node.color col = (r / 255, g / 255, b / 255) if node.typevar == 2: a = ax.plot(xdata.vals, node.vals, color = col, linewidth = 2, label = prefix + node.desc) else: a = ax.fill_between(xdata.vals, prev + node.vals, prev, color = col, linewidth = 0.2, edgecolor = 'black', picker = True) a.set_label(prefix + node.desc) prv += node.vals if legend: createLegend(ax, position = position) def percentFormatter(x, pos=0): return '%1.0f%%' % (x) def drawTaux(data, ax, x_axis, reforme = False, dataDefault = None, legend = True): ''' Draws marginal and average tax rates ''' if dataDefault is None: dataDefault = data print "x_axis :", x_axis # TODO: the following is an ugly fix which is not general enough if x_axis == "rev_cap_brut": typ_rev = 'superbrut' elif x_axis == "rev_cap_net": typ_rev = 'net' elif x_axis == "fon": typ_rev = 'brut' else: for typrev, vars in model.REVENUES_CATEGORIES.iteritems(): if x_axis in vars: typ_rev = typrev RB = RevTot(dataDefault, typ_rev) xdata = dataDefault[x_axis] RD = dataDefault['revdisp'].vals div = RB*(RB != 0) + (RB == 0) taumoy = (1 - RD / div) * 100 taumar = 100 * (1 - (RD[:-1]-RD[1:]) / (RB[:-1]-RB[1:])) ax.hold(True) ax.set_xlim(np.amin(xdata.vals), np.amax(xdata.vals)) ax.set_ylabel(r"$\left(1 - \frac{RevDisponible}{RevInitial} \right)\ et\ \left(1 - \frac{d (RevDisponible)}{d (RevInitial)}\right)$") ax.set_ylabel(r"$\left(1 - \frac{RevDisponible}{RevInitial} \right)\ et\ \left(1 - \frac{d (RevDisponible)}{d (RevInitial)}\right)$") ax.plot(xdata.vals, taumoy, label = u"Taux moyen d'imposition", linewidth = 2) ax.plot(xdata.vals[1:], taumar, label = u"Taux marginal d'imposition", linewidth = 2) ax.set_ylim(0,100) ax.yaxis.set_major_formatter(FuncFormatter(percentFormatter)) if legend: createLegend(ax) def createLegend(ax, position = 2): ''' Creates legend ''' p = [] l = [] for collec in ax.collections: if collec._visible: p.insert(0, Rectangle((0, 0), 1, 1, fc = collec._facecolors[0], linewidth = 0.5, edgecolor = 'black' )) l.insert(0, collec._label) for line in ax.lines: if line._visible and (line._label != 'x_axis'): p.insert(0, Line2D([0,1],[.5,.5],color = line._color)) l.insert(0, line._label) ax.legend(p,l, loc= position, prop = {'size':'medium'}) def RevTot(data, typrev): ''' Computes total revenues by type with definition is country specific ''' dct = model.REVENUES_CATEGORIES first = True try: for var in dct[typrev]: if first: out = data[var].vals.copy() # WARNING: Copy is needed to avoid pointers problems (do not remove this line)!!!! first = False else: out += data[var].vals return out except: raise Exception("typrev is %s but typrev should be one of the following: %s" %(str(typrev), str(dct.keys())) )
agpl-3.0
-2,987,348,892,159,805,400
33.338918
148
0.576875
false
JustinSGray/pyCycle
pycycle/elements/test/test_turbine_od.py
1
7060
import numpy as np import unittest import os from openmdao.api import Problem, Group from openmdao.utils.assert_utils import assert_near_equal from pycycle.mp_cycle import Cycle from pycycle.thermo.cea.species_data import janaf from pycycle.elements.turbine import Turbine from pycycle.elements.combustor import Combustor from pycycle.elements.flow_start import FlowStart from pycycle.maps.lpt2269 import LPT2269 fpath = os.path.dirname(os.path.realpath(__file__)) ref_data = np.loadtxt(fpath + "/reg_data/turbineOD1.csv", delimiter=",", skiprows=1) header = [ 'turb.PRdes', 'turb.effDes', 'shaft.Nmech', 'burn.FAR', 'burn.Fl_I.W', 'burn.Fl_I.Pt', 'burn.Fl_I.Tt', 'burn.Fl_I.ht', 'burn.Fl_I.s', 'burn.Fl_I.MN', 'burn.Fl_I.V', 'burn.Fl_I.A', 'burn.Fl_I.Ps', 'burn.Fl_I.Ts', 'burn.Fl_I.hs', 'turb.Fl_I.W', 'turb.Fl_I.Pt', 'turb.Fl_I.Tt', 'turb.Fl_I.ht', 'turb.Fl_I.s', 'turb.Fl_I.MN', 'turb.Fl_I.V', 'turb.Fl_I.A', 'turb.Fl_I.Ps', 'turb.Fl_I.Ts', 'turb.Fl_I.hs', 'turb.Fl_O.W', 'turb.Fl_O.Pt', 'turb.Fl_O.Tt', 'turb.Fl_O.ht', 'turb.Fl_O.s', 'turb.Fl_O.MN', 'turb.Fl_O.V', 'turb.Fl_O.A', 'turb.Fl_O.Ps', 'turb.Fl_O.Ts', 'turb.Fl_O.hs', 'turb.PR', 'turb.eff', 'turb.Np', 'turb.Wp', 'turb.pwr', 'turb.PRmap', 'turb.effMap', 'turb.NpMap', 'turb.WpMap', 'turb.s_WpDes', 'turb.s_PRdes', 'turb.s_effDes', 'turb.s_NpDes'] h_map = dict(((v_name, i) for i, v_name in enumerate(header))) class TurbineODTestCase(unittest.TestCase): def setUp(self): self.prob = Problem() cycle = self.prob.model = Cycle() cycle.options['thermo_method'] = 'CEA' cycle.options['thermo_data'] = janaf cycle.options['design'] = False cycle.add_subsystem('flow_start', FlowStart()) cycle.add_subsystem('burner', Combustor(fuel_type="JP-7")) cycle.add_subsystem('turbine', Turbine( map_data=LPT2269)) cycle.set_input_defaults('burner.Fl_I:FAR', .01, units=None) cycle.set_input_defaults('turbine.Nmech', 1000., units='rpm'), cycle.set_input_defaults('flow_start.P', 17., units='psi'), cycle.set_input_defaults('flow_start.T', 500.0, units='degR'), cycle.set_input_defaults('flow_start.W', 0., units='lbm/s'), cycle.set_input_defaults('turbine.area', 150., units='inch**2') cycle.pyc_connect_flow("flow_start.Fl_O", "burner.Fl_I") cycle.pyc_connect_flow("burner.Fl_O", "turbine.Fl_I") self.prob.set_solver_print(level=-1) self.prob.setup(check=False) def test_case1(self): # 6 cases to check against for i, data in enumerate(ref_data): # input turbine variables self.prob['turbine.s_Wp'] = data[h_map['turb.s_WpDes']] self.prob['turbine.s_eff'] = data[h_map['turb.s_effDes']] self.prob['turbine.s_PR'] = data[h_map['turb.s_PRdes']] self.prob['turbine.s_Np'] = data[h_map['turb.s_NpDes']] self.prob['turbine.map.NpMap']= data[h_map['turb.NpMap']] self.prob['turbine.map.PRmap']= data[h_map['turb.PRmap']] # input flowstation variables self.prob['flow_start.P'] = data[h_map['burn.Fl_I.Pt']] self.prob['flow_start.T'] = data[h_map['burn.Fl_I.Tt']] self.prob['flow_start.W'] = data[h_map['burn.Fl_I.W']] self.prob['turbine.PR'] = data[h_map['turb.PR']] # input shaft variable self.prob['turbine.Nmech'] = data[h_map['shaft.Nmech']] # input burner variable self.prob['burner.Fl_I:FAR'] = data[h_map['burn.FAR']] self.prob['turbine.area'] = data[h_map['turb.Fl_O.A']] self.prob.run_model() print('---- Test Case', i, ' ----') print("corrParams --") print("Wp", self.prob['turbine.Wp'][0], data[h_map['turb.Wp']]) print("Np", self.prob['turbine.Np'][0], data[h_map['turb.Np']]) print("flowConv---") print("PR ", self.prob['turbine.PR'][0], data[h_map['turb.PR']]) print("mapInputs---") print("NpMap", self.prob['turbine.map.readMap.NpMap'][0], data[h_map['turb.NpMap']]) print("PRmap", self.prob['turbine.map.readMap.PRmap'][0], data[h_map['turb.PRmap']]) print("readMap --") print( "effMap", self.prob['turbine.map.scaledOutput.effMap'][0], data[ h_map['turb.effMap']]) print( "WpMap", self.prob['turbine.map.scaledOutput.WpMap'][0], data[ h_map['turb.WpMap']]) print("Scaled output --") print("eff", self.prob['turbine.eff'][0], data[h_map['turb.eff']]) tol = 1.0e-3 print() npss = data[h_map['burn.Fl_I.Pt']] pyc = self.prob['flow_start.Fl_O:tot:P'][0] print('Pt in:', npss, pyc) assert_near_equal(pyc, npss, tol) npss = data[h_map['burn.Fl_I.s']] pyc = self.prob['flow_start.Fl_O:tot:S'][0] print('S in:', npss, pyc) assert_near_equal(pyc, npss, tol) npss = data[h_map['turb.Fl_O.W']] pyc = self.prob['turbine.Fl_O:stat:W'][0] print('W in:', npss, pyc) assert_near_equal(pyc, npss, tol) npss = data[h_map['turb.Fl_O.ht']] - data[h_map['turb.Fl_I.ht']] pyc = self.prob['turbine.Fl_O:tot:h'][0] - self.prob['burner.Fl_O:tot:h'][0] print('delta h:', npss, pyc) assert_near_equal(pyc, npss, tol) npss = data[h_map['turb.Fl_I.s']] pyc = self.prob['burner.Fl_O:tot:S'][0] print('S in:', npss, pyc) assert_near_equal(pyc, npss, tol) npss = data[h_map['turb.Fl_O.s']] pyc = self.prob['turbine.Fl_O:tot:S'][0] print('S out:', npss, pyc) assert_near_equal(pyc, npss, tol) npss = data[h_map['turb.pwr']] pyc = self.prob['turbine.power'][0] print('Power:', npss, pyc) assert_near_equal(pyc, npss, tol) npss = data[h_map['turb.Fl_O.Pt']] pyc = self.prob['turbine.Fl_O:tot:P'][0] print('Pt out:', npss, pyc) assert_near_equal(pyc, npss, tol) # these fail downstream of combustor npss = data[h_map['turb.Fl_O.Ps']] pyc = self.prob['turbine.Fl_O:stat:P'][0] print('Ps out:', npss, pyc) assert_near_equal(pyc, npss, tol) npss = data[h_map['turb.Fl_O.Ts']] pyc = self.prob['turbine.Fl_O:stat:T'][0] print('Ts out:', npss, pyc) assert_near_equal(pyc, npss, tol) print("") print() if __name__ == "__main__": unittest.main()
apache-2.0
5,855,211,299,384,399,000
31.534562
96
0.525779
false
Superjom/NeuralNetworks
apps/126/validate.py
1
4599
#!/usr/bin/env python # -*- coding: utf-8 -*- ''' Created on March 3, 2014 @author: Chunwei Yan @ PKU @mail: [email protected] ''' from __future__ import division import sys import theano import math import numpy from theano import scalar as T import cPickle as pickle import argparse sys.path.append('../..') from models.stacked_autoencoder import StackedAutoEncoder from dataset import Dataset as DenoDataset def load_dataset(dataset_ph): ''' test if the file in pickle format predict if the file in csv format ''' dataset_ph = dataset_ph if dataset_ph.endswith('.pk'): with open(dataset_ph) as f: dataset = pickle.load(f) else: print '!!\tdataset is in csv format' print '!!!\tattention: validator will ignore the first line' deno_dataset = DenoDataset(dataset_ph) records = deno_dataset.load_records_to_norm_float() dataset = (records, None) return dataset def load_model(path): ''' load pretrained StackedAutoencoder object from a file ''' with open(path, 'rb') as f: model = pickle.load(f) return model class Validator(object): ''' given some records and predict label ''' def __init__(self, dataset, model): self.dataset = dataset self.model = model self._init() def _init(self): try: train_fn, self.predict_fn = self.model.compile_finetune_funcs() except: self.predict_fn = self.model.compile_predict_fn() def predict(self): res = [] records,labels = self.dataset n_records = records.shape[0] for i in range(n_records): x = records[i:i+1] #print 'x:', x y = self.predict_fn(x)[0] #print 'y:', y, labels[i] res.append(y) return res def batch_predict(self): ''' predict by batch ''' records,labels = self.dataset n_records = records.shape[0] batch_size = 40 n_batches = int(math.ceil(n_records/batch_size)) res = [] for i in xrange(n_batches): x = records[i*batch_size:(i+1) * batch_size] #print 'x', x # to fix a bug x_size = x.shape[0] if x_size < batch_size: #print 'x_size < batch_size', x_size, batch_size x = records[-batch_size:] y_preds = self.predict_fn(x)[0] y_preds = y_preds[-x_size:] else: y_preds = self.predict_fn(x)[0] #print 'y_preds', y_preds for y in y_preds: res.append(y) #res.append(y_preds) return res def validate(self): records,labels = self.dataset labels = list(labels) n_records = records.shape[0] #res = self.batch_predict() res = self.predict() #print 'predict res', res num = 0 #print 'labels', labels print 'len res labels', len(res), len(labels) for i in xrange(n_records): if res[i] == labels[i]: num += 1.0 #num = len(filter(lambda x:x, res == labels)) #print 'num', num c_rate = num/n_records print 'Correct rate:', c_rate print 'Error rate:', 1 - c_rate return c_rate if __name__ == '__main__': parser = argparse.ArgumentParser( description = "predict and validate") parser.add_argument('-d', action='store', dest='dataset_ph', help='path to dataset' ) parser.add_argument('-t', action='store', dest='task', help='task: validate or predict', ) parser.add_argument('-m', action='store', dest='model_ph', help='path of model file', ) parser.add_argument('-f', action='store', dest='topath', help='path of output file' ) if len(sys.argv) == 1: parser.print_help() exit(-1) args = parser.parse_args() dataset = load_dataset(args.dataset_ph) model = load_model(args.model_ph) validator = Validator( dataset = dataset, model = model, ) # task if args.task == 'predict': res = validator.batch_predict() print 'predict %d labels' % len(res) with open(args.topath, 'w') as f: f.write( '\n'.join([str(s) for s in res])) elif args.task == 'validate': validator.validate() else: print 'unrecognized task: "%s"' % args.task # TODO to file?
apache-2.0
-510,086,034,167,619,840
25.738372
75
0.543379
false
azure-satellite/pyunite
pyunite/option.py
1
3196
import re from itertools import imap from collections import namedtuple import funcy as fn option = namedtuple('option', ['name', 'value']) # Options can be specified in the PyUnite command line. They are merged into a # state that uniquely identifies a PyUnite buffer. default_options = dict( # Scope of a PyUnite buffer: # - global: Buffer is global. The quickfix list behaves like this. # - tabpage: Buffer is tab local. # - window: Buffer is window local. The location list behaves like this. # Notice that there could actually be more than one PyUnite buffer per # scope if other PyUnite buffers in the same scope are marked as # replaceable. scope='tabpage', # Whether to quit if another PyUnite in the same scope is being opened replace=True, # Height if horizontal split. Width if vertical split. Zero means don't # resize the window. size=0, # Split vertically instead of horizontally vsplit=False, # Direction of the window split. See # https://technotales.wordpress.com/2010/04/29/vim-splits-a-guide-to-doing-exactly-what-you-want/ direction='leftabove', # Don't open window when there are no candidates close_on_empty=False, # Steal focus from current window focus_on_open=False, # Close window after performing an action on a candidate close_on_action=False, # Leave window after performing an action on a candidate leave_on_action=False, ) class Option(object): def __init__(self, name, value): self.name = name self.value = value error_msg = 'Option "{}" is not recognized'.format(original_name) assert name in default_options, error_msg expected_type = type(default_options[name]) error_msg = 'Expected value of {} for option "{}"'.format(str(expected_type), original_name) assert type(value) == expected_type, error_msg if name == 'scope': scopes = ['global', 'tabpage', 'window'] error = 'Option "-scope" has to be one of {}'.format(str(scopes)) assert value in scopes, error if name == 'direction': directions = ['topleft', 'botright', 'leftabove', 'rightbelow'] error = 'Option "-direction" has to be one of {}'.format(str(directions)) assert value in directions, error def format_option(self): name = re.sub('_', '-', self.name) if isinstance(default_options[self.name], bool): if self.value: return '-{}'.format(name) else: return '-no-{}'.format(name) else: return '-{}='.format(name) def format(self): original_name = '-' + re.sub('_', '-', name) def parse_option(string): if '=' not in string: string += '=' name, value = re.split('=', string) if value == '': value = False if name.startswith('-no-') else True else: value = fn.silent(eval)(value) or value name = re.sub('-', '_', re.sub('^(-no-|-)', '', name)) return Option(name=name, value=value) def format_options(options): return fn.iflatten(imap(format_option, options))
bsd-3-clause
6,447,050,693,128,628,000
35.318182
102
0.623279
false
suryakencana/niimanga
niimanga/views/main.py
1
4364
""" # Copyright (c) 04 2015 | surya # 21/04/15 [email protected] # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # main.py """ from niimanga.configs.view import ZHandler from niimanga.libs.crawlable import CrawlAble from niimanga.models.manga import Manga from pyramid.view import view_config class MainView(ZHandler): @view_config(route_name='home', renderer='layouts/home.html') @CrawlAble() def home_view(self): return {'project': 'moori'} @view_config(route_name='url_popular', renderer='layouts/home.html') @CrawlAble() def popular_view(self): return {'project': 'moori'} @view_config(route_name='url_latest', renderer='layouts/home.html') @CrawlAble() def latest_view(self): return {'project': 'moori'} @view_config(route_name='url_series', renderer='layouts/series.html') @CrawlAble() def series_view(self): _ = self.R slug = _.matchdict.get('seriesSlug', "No Title") print(slug) qry = Manga.query manga = qry.filter(Manga.slug == slug.strip()).first() if manga is not None: filename = '/'.join([manga.id, manga.thumb]) thumb = _.storage.url(filename) aka = manga.aka artists = manga.get_artist() authors = manga.get_authors() description = manga.description name = manga.title last = Manga.last_chapter(manga.id) last_chapter = ' '.join([str(last.chapter), last.title]) return dict( aka=aka, url='/manga/{slug}'.format(slug=slug), thumb_url=thumb, artists=artists, authors=authors, description=description, name=name, last_chapter=last_chapter ) return {'project': 'moori'} @view_config(route_name='url_chapter', renderer='layouts/chapter.html') @CrawlAble() def chapter_view(self): _ = self.R slug = _.matchdict.get('seriesSlug', "No Title") chap_slug = _.matchdict.get('chapterSlug', "No Title") # cari manga by slug manga = Manga.query.filter(Manga.slug == slug).first() if manga is not None: filename = '/'.join([manga.id, manga.thumb]) thumb = _.storage.url(filename) aka = manga.aka artists = manga.get_artist() authors = manga.get_authors() description = manga.description name = manga.title last = Manga.last_chapter(manga.id) last_chapter = ' '.join([str(last.chapter), last.title]) # cari chapter manga chapter = manga.get_chapter(manga, chap_slug) return dict( aka=aka, url='/chapter/{slug}/{chap}'.format(slug=slug, chap=chap_slug), thumb_url=thumb, artists=artists, authors=authors, description=description, name=' '.join([name, '|', 'Ch.', str(chapter.chapter).replace('.0', ''), chapter.title]), last_chapter=last_chapter ) return {'project': 'moori'} @view_config(route_name='url_search', renderer='layouts/home.html') @CrawlAble() def search_view(self): return {'project': 'moori'} @view_config(route_name='url_genre', renderer='layouts/home.html') @CrawlAble() def genre_view(self): return {'project': 'moori'} @view_config(context='pyramid.exceptions.NotFound', renderer='layouts/404.html') def not_found_view(self): return {'project': 'moori'}
lgpl-3.0
-2,011,897,003,556,208,400
34.201613
105
0.592805
false
Saevon/Recipes
python/ticket_semaphore.py
1
2045
import contextlib import multiprocessing import time import ctypes class Ticket(): ''' A ticket from a TicketSemaphore ''' def __init__(self, ticketer, size): self.size = size self.ticketer = ticketer def release(self, *args, **kwargs): ''' Releases this ticket from the owning ticketer ''' self.ticketer.release(self, *args, **kwargs) class TicketSemaphore(): ''' Semaphore that allows grabbing different size of product ticketer = TicketSemaphore(10) ticket = ticketer.acquire(3) ticket.release() with ticketer(size=3): pass ''' def __init__(self, size): self.available = multiprocessing.Value(ctypes.c_int) self.size = size self.lock = multiprocessing.Condition() def acquire(self, timeout=None, size=1): ''' Grabs a ticket of the given size ''' time_left = None if timeout: start = time.time() time_left = timeout self.lock.acquire(timeout=time_left) # Wait until there is enough space while self.available < size: if timeout: time_left = timeout - (time.time() - start) try: self.lock.wait(timeout=time_left) except RuntimeError: # We've run out of time return False # The ticket is ours! self.available -= size return Ticket(self, size) def release(self, ticket): ''' Releases the given ticket ''' with self.lock: self.available += ticket.size if self.available >= self.size: raise OverflowError("Too many tickets returned") def __call__(self, **kwargs): ''' ContextManager with arguments ''' @contextlib.contextmanager def with_ticket_lock(): try: ticket = self.acquire(**kwargs) yield ticket finally: if ticket: ticket.release()
mit
-3,672,569,619,838,448,000
24.5625
64
0.556479
false
alvarofe/cassle
handlers/pin.py
1
1816
from handlers import handlers from handlers import handler from conf import config, debug_logger from handlers.base import BaseHandler from db.database import PinDB import logging from notification.event_notification import MITMNotification import base64 logger = logging.getLogger(__name__) #TODO rewrite this handler to do it properly @handler(handlers, isHandler=config.V_PINNING) class Pinning(BaseHandler): name = "pinning" def __init__(self, cert, ocsp): super(Pinning, self).__init__(cert, ocsp) self.on_certificate(cert) def on_certificate(self, cert): name = cert.subject_common_name() issuer_name = cert.issuer_common_name() query = db.get(name) if query is None: debug_logger.debug( "\t[-] You have not pinned this certificate %s" % name) return try: spki = cert.hash_spki(deep=1, algorithm="sha256") spki = base64.b64encode(spki) except: logger.error("Getting spki of the intermediate CA %s" % name) return try: issuers = query["issuers"] for i in issuers[issuer_name]: if spki == i: debug_logger.debug("\t[+] pin correct %s " % name) return logger.info("\t[-] Pin does not match %s" % name) debug_logger.debug("\t[-] Pin does not match %s" % name) MITMNotification.notify( title="Pinning", message=cert.subject_common_name()) except: MITMNotification.notify( title="Pinning", message="Issuer different") debug_logger.debug("\t[-] issuer with different name %s" % name) db = PinDB(config.DB_NAME, "pinning")
gpl-3.0
-4,842,815,281,196,532,000
29.779661
76
0.5837
false
kcarnold/counterfactual-lm
code/tokenization.py
1
2976
import re import string from nltk.tokenize import RegexpTokenizer, PunktSentenceTokenizer WORD_RE = re.compile(r'\w+(?:[\',:]\w+)*') END_PUNCT = set('.,?!:') def token_spans(text): for match in re.finditer(r'[^-/\s]+', text): start, end = match.span() token_match = WORD_RE.search(text, start, end) if token_match is not None: span = token_match.span() yield span tok_end = span[1] if tok_end < end and text[tok_end] in END_PUNCT: yield tok_end, tok_end + 1 START_DOC = '<D>' START_PARA = '<P>' START_SENT = '<S>' END_SENT = '</S>' paragraph_re = re.compile(r'(?:[ ]*[^\s][^\n]*[\n]?)+') paragraph_tokenizer = RegexpTokenizer(paragraph_re) sentence_tokenizer = PunktSentenceTokenizer() def tokenize(doc): res = [START_DOC] afters = [] end_of_prev_para = 0 for para_start, para_end in paragraph_tokenizer.span_tokenize(doc): afters.append(doc[end_of_prev_para:para_start]) para = doc[para_start:para_end] end_of_prev_para = para_end end_of_prev_sentence = 0 res.append(START_PARA) for sent_start, sent_end in sentence_tokenizer.span_tokenize(para): sent = para[sent_start:sent_end] tspans = list(token_spans(sent)) if not tspans: continue afters.append(para[end_of_prev_sentence:sent_start]) end_of_prev_sentence = sent_end res.append(START_SENT) end_of_prev_token = 0 for tok_start, tok_end in tspans: afters.append(sent[end_of_prev_token:tok_start]) res.append(sent[tok_start:tok_end]) end_of_prev_token = tok_end res.append(END_SENT) afters.append(sent[end_of_prev_token:]) end_of_prev_para -= len(para) - end_of_prev_sentence afters.append(doc[end_of_prev_para:]) return res, afters def tokenize_mid_document(doc_so_far): if len(doc_so_far.strip()) == 0: return [START_DOC, START_PARA, START_SENT, ''], ['', '', '', ''] tok_list, afters = tokenize(doc_so_far) if doc_so_far.endswith('\n\n'): # starting a new paragraph if tok_list[-1] in [START_PARA, START_SENT]: print("Huh? Ended with double-newlines but also with start-of-para?", repr(tok_list[-5:])) tok_list += [START_PARA, START_SENT, ''] afters += ['', '', ''] else: assert tok_list[-1] == END_SENT if tok_list[-2] in '.?!': # Real EOS tok_list += [START_SENT, ''] afters += ['', ''] elif doc_so_far[-1] in string.whitespace: # The last EOS was spurious, but we're not mid-word. tok_list[-1] = "" else: # The last EOS was spurious, but we ARE mid-word. tok_list.pop(-1) after = afters.pop(-1) afters[-1] += after return tok_list, afters
mit
1,327,267,331,160,409,600
34.011765
102
0.552755
false
uclouvain/OSIS-Louvain
program_management/tests/ddd/repositories/test_persist_tree.py
1
4221
############################################################################## # # OSIS stands for Open Student Information System. It's an application # designed to manage the core business of higher education institutions, # such as universities, faculties, institutes and professional schools. # The core business involves the administration of students, teachers, # courses, programs and so on. # # Copyright (C) 2015-2020 Université catholique de Louvain (http://www.uclouvain.be) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of this license - GNU General Public License - is available # at the root of the source code of this program. If not, # see http://www.gnu.org/licenses/. # ############################################################################## from django.test import TestCase from base.models.group_element_year import GroupElementYear from base.tests.factories.academic_year import AcademicYearFactory from base.tests.factories.education_group_year import TrainingFactory, GroupFactory from base.tests.factories.learning_unit_year import LearningUnitYearFactory from program_management.ddd.domain.node import NodeEducationGroupYear, NodeLearningUnitYear from program_management.ddd.repositories import persist_tree from program_management.tests.ddd.factories.program_tree import ProgramTreeFactory class TestSaveTree(TestCase): def setUp(self): academic_year = AcademicYearFactory(current=True) training = TrainingFactory(academic_year=academic_year) common_core = GroupFactory(academic_year=academic_year) learning_unit_year = LearningUnitYearFactory(academic_year=academic_year) self.root_node = NodeEducationGroupYear( node_id=training.pk, acronym=training.acronym, title=training.title, year=training.academic_year.year ) self.common_core_node = NodeEducationGroupYear( node_id=common_core.pk, acronym=common_core.acronym, title=common_core.title, year=common_core.academic_year.year ) self.learning_unit_year_node = NodeLearningUnitYear( node_id=learning_unit_year.pk, acronym=learning_unit_year.acronym, title=learning_unit_year.specific_title, year=learning_unit_year.academic_year.year ) def test_case_tree_persist_from_scratch(self): self.common_core_node.add_child(self.learning_unit_year_node) self.root_node.add_child(self.common_core_node) tree = ProgramTreeFactory(root_node=self.root_node) persist_tree.persist(tree) self.assertEquals(GroupElementYear.objects.all().count(), 2) def test_case_tree_persist_with_some_existing_part(self): self.root_node.add_child(self.common_core_node) tree = ProgramTreeFactory(root_node=self.root_node) persist_tree.persist(tree) self.assertEquals(GroupElementYear.objects.all().count(), 1) # Append UE to common core self.common_core_node.add_child(self.learning_unit_year_node) persist_tree.persist(tree) self.assertEquals(GroupElementYear.objects.all().count(), 2) def test_case_tree_persist_after_detach_element(self): self.root_node.add_child(self.common_core_node) tree = ProgramTreeFactory(root_node=self.root_node) persist_tree.persist(tree) self.assertEquals(GroupElementYear.objects.all().count(), 1) path_to_detach = "|".join([str(self.root_node.pk), str(self.common_core_node.pk)]) tree.detach_node(path_to_detach) persist_tree.persist(tree) self.assertEquals(GroupElementYear.objects.all().count(), 0)
agpl-3.0
-9,200,787,776,018,081,000
43.893617
91
0.682701
false
derdmitry/socraticqs2
mysite/psa/migrations/0003_auto_20150420_0308.py
1
1092
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('default', '0001_initial'), ('psa', '0002_usersession'), ] operations = [ migrations.CreateModel( name='SecondaryEmail', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('email', models.EmailField(max_length=75, verbose_name=b'Secondary Email')), ('provider', models.ForeignKey(to='default.UserSocialAuth')), ('user', models.ForeignKey(related_name='secondary', to=settings.AUTH_USER_MODEL)), ], options={ }, bases=(models.Model,), ), migrations.AlterUniqueTogether( name='secondaryemail', unique_together=set([('provider', 'email')]), ), ]
apache-2.0
-710,191,695,645,804,500
32.090909
114
0.576923
false
lilydjwg/you-get
src/you_get/extractors/tumblr.py
1
4239
#!/usr/bin/env python __all__ = ['tumblr_download'] from ..common import * from .universal import * from .dailymotion import dailymotion_download from .vimeo import vimeo_download from .vine import vine_download def tumblr_download(url, output_dir='.', merge=True, info_only=False, **kwargs): if re.match(r'https?://\d+\.media\.tumblr\.com/', url): universal_download(url, output_dir, merge=merge, info_only=info_only) return html = parse.unquote(get_html(url)).replace('\/', '/') feed = r1(r'<meta property="og:type" content="tumblr-feed:(\w+)" />', html) if feed in ['photo', 'photoset', 'entry'] or feed is None: # try to extract photos page_title = r1(r'<meta name="description" content="([^"\n]+)', html) or \ r1(r'<meta property="og:description" content="([^"\n]+)', html) or \ r1(r'<title>([^<\n]*)', html) urls = re.findall(r'(https?://[^;"&]+/tumblr_[^;"]+_\d+\.jpg)', html) +\ re.findall(r'(https?://[^;"&]+/tumblr_[^;"]+_\d+\.png)', html) +\ re.findall(r'(https?://[^;"&]+/tumblr_[^";]+_\d+\.gif)', html) tuggles = {} for url in urls: filename = parse.unquote(url.split('/')[-1]) title = '.'.join(filename.split('.')[:-1]) tumblr_id = r1(r'^tumblr_(.+)_\d+$', title) quality = int(r1(r'^tumblr_.+_(\d+)$', title)) ext = filename.split('.')[-1] size = int(get_head(url)['Content-Length']) if tumblr_id not in tuggles or tuggles[tumblr_id]['quality'] < quality: tuggles[tumblr_id] = { 'title': title, 'url': url, 'quality': quality, 'ext': ext, 'size': size, } if tuggles: size = sum([tuggles[t]['size'] for t in tuggles]) print_info(site_info, page_title, None, size) if not info_only: for t in tuggles: title = tuggles[t]['title'] ext = tuggles[t]['ext'] size = tuggles[t]['size'] url = tuggles[t]['url'] print_info(site_info, title, ext, size) download_urls([url], title, ext, size, output_dir=output_dir) return # feed == 'audio' or feed == 'video' or feed is None # try to extract video / audio real_url = r1(r'source src=\\x22([^\\]+)\\', html) if not real_url: real_url = r1(r'audio_file=([^&]+)&', html) if real_url: real_url = real_url + '?plead=please-dont-download-this-or-our-lawyers-wont-let-us-host-audio' if not real_url: real_url = r1(r'<source src="([^"]*)"', html) if not real_url: iframe_url = r1(r'<iframe[^>]+src=[\'"]([^\'"]*)[\'"]', html) if iframe_url[:2] == '//': iframe_url = 'http:' + iframe_url if re.search(r'player\.vimeo\.com', iframe_url): vimeo_download(iframe_url, output_dir, merge=merge, info_only=info_only, referer='http://tumblr.com/', **kwargs) return elif re.search(r'dailymotion\.com', iframe_url): dailymotion_download(iframe_url, output_dir, merge=merge, info_only=info_only, **kwargs) return elif re.search(r'vine\.co', iframe_url): vine_download(iframe_url, output_dir, merge=merge, info_only=info_only, **kwargs) return else: iframe_html = get_content(iframe_url) real_url = r1(r'<source src="([^"]*)"', iframe_html) title = unescape_html(r1(r'<meta property="og:title" content="([^"]*)" />', html) or r1(r'<meta property="og:description" content="([^"]*)" />', html) or r1(r'<title>([^<\n]*)', html) or url.split("/")[4]).replace('\n', '') type, ext, size = url_info(real_url) print_info(site_info, title, type, size) if not info_only: download_urls([real_url], title, ext, size, output_dir, merge = merge) site_info = "Tumblr.com" download = tumblr_download download_playlist = playlist_not_supported('tumblr')
mit
514,626,946,501,440,450
42.255102
106
0.513565
false
mkoistinen/aldryn-newsblog
docs/conf.py
1
8602
# -*- coding: utf-8 -*- # # Aldryn Newsblog documentation build configuration file, created by # sphinx-quickstart on Wed Dec 10 15:42:58 2014. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Aldryn Newsblog' copyright = u'2014, Tom Berger' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.0.1' # The full version, including alpha/beta/rc tags. release = '0.0.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # on_rtd is whether we are on readthedocs.org on_rtd = os.environ.get('READTHEDOCS', None) == 'True' if not on_rtd: # only import and set the theme if we're building docs locally try: import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] except: html_theme = 'default' # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'AldrynNewsblogdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'a4paper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'AldrynNewsblog.tex', u'Aldryn Newsblog Documentation', u'Tom Berger', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'aldrynnewsblog', u'Aldryn Newsblog Documentation', [u'Tom Berger'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'AldrynNewsblog', u'Aldryn Newsblog Documentation', u'Tom Berger', 'AldrynNewsblog', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
bsd-3-clause
-5,511,913,359,570,509,000
30.977695
79
0.706231
false
mikeckennedy/consuming_services_python_demos
services/consuming_services_apis/consuming_services_apis/api/blog_soap.py
1
17656
from datetime import datetime from pyramid.httpexceptions import exception_response from pyramid.view import view_config from pyramid.response import Response from xml.etree import ElementTree from consuming_services_apis import Post from consuming_services_apis.data.memory_db import MemoryDb @view_config(route_name='soap') def blog_posts(request): print("Processing {} request from {} for the SOAP service: {}, ua: {}".format( # noqa request.method, get_ip(request), request.url, request.user_agent )) if "WSDL" in request.GET or "wsdl" in request.GET: return Response(body=build_wsdl(request), content_type='application/xml') # noqa action = request.headers.get('Soapaction').replace('http://tempuri.org/', '').lower().strip("\"") # noqa if action == 'getpost': body = clean_namespaces(request.body.decode('utf-8')) dom = ElementTree.fromstring(body) return get_post_response(dom, request) if action == 'allposts': return all_post_response(request) if action == 'createpost': body = clean_namespaces(request.body.decode('utf-8')) print("CREATE VIA:" + body) dom = ElementTree.fromstring(body) return create_post(dom, request) if action == 'updatepost': body = clean_namespaces(request.body.decode('utf-8')) print("UPDATE VIA:" + body) dom = ElementTree.fromstring(body) return update_post(dom, request) if action == 'deletepost': body = clean_namespaces(request.body.decode('utf-8')) dom = ElementTree.fromstring(body) return delete_post_response(dom, request) print("BODY: {}".format(request.body.decode('utf-8'))) return Response("<TEST />") def all_post_response(request): posts = MemoryDb.get_posts(get_ip(request)) post_template = """ <Post> <Id>{}</Id> <Title>{}</Title> <Published>{}</Published> <Content>{}</Content> <ViewCount>{}</ViewCount> </Post>""" posts_fragments = [ post_template.format(p.id, p.title, p.published, p.content, p.view_count) # noqa for p in posts ] resp_xml = """<?xml version="1.0" encoding="utf-8"?> <soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"> <soap:Body> <AllPostsResponse xmlns="http://tempuri.org/"> <AllPostsResult> {} </AllPostsResult> </AllPostsResponse> </soap:Body> </soap:Envelope>""".format("\n".join(posts_fragments)) # noqa return Response(body=resp_xml, content_type='text/xml') def get_post_response(dom, request): id_text = dom.find('Body/GetPost/id').text post = MemoryDb.get_post(id_text, get_ip(request)) if not post: raise exception_response(404) resp_xml = """<?xml version="1.0" encoding="utf-8"?> <soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"> <soap:Body> <GetPostResponse xmlns="http://tempuri.org/"> <GetPostResult> <Id>{}</Id> <Title>{}</Title> <Published>{}</Published> <Content>{}</Content> <ViewCount>{}</ViewCount> </GetPostResult> </GetPostResponse> </soap:Body> </soap:Envelope>""".format(post.id, post.title, post.published, post.content, post.view_count) # noqa return Response(body=resp_xml, content_type='text/xml') def delete_post_response(dom, request): id_text = dom.find('Body/DeletePost/id').text post = MemoryDb.get_post(id_text, get_ip(request)) if not post: raise exception_response(404) if MemoryDb.is_post_read_only(post.id): raise exception_response(403) MemoryDb.delete_post(post, get_ip(request)) resp_xml = """<?xml version="1.0" encoding="utf-8"?> <soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"> <soap:Body> <DeletePostResponse xmlns="http://tempuri.org/" /> </soap:Body> </soap:Envelope>""" # noqa return Response(body=resp_xml, content_type='text/xml') def create_post(dom, request): title = dom.find('Body/CreatePost/title').text content = dom.find('Body/CreatePost/content').text view_count = int(dom.find('Body/CreatePost/viewCount').text) now = datetime.now() published = "{}-{}-{}".format(now.year, str(now.month).zfill(2), str(now.day).zfill(2)) # noqa post = Post( title, content, view_count, published ) trim_post_size(post) MemoryDb.add_post(post, get_ip(request)) resp_xml = """<?xml version="1.0" encoding="utf-8"?> <soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"> <soap:Body> <CreatePostResponse xmlns="http://tempuri.org/"> <CreatePostResult> <Id>{}</Id> <Title>{}</Title> <Published>{}</Published> <Content>{}</Content> <ViewCount>{}</ViewCount> </CreatePostResult> </CreatePostResponse> </soap:Body> </soap:Envelope>""".format(post.id, post.title, post.published, post.content, post.view_count) # noqa return Response(body=resp_xml, content_type='text/xml') def update_post(dom, request): post_id = dom.find('Body/UpdatePost/id').text post = MemoryDb.get_post(post_id, get_ip(request)) if not post: raise exception_response(404) if MemoryDb.is_post_read_only(post_id): raise exception_response(403) post.title = dom.find('Body/UpdatePost/title').text post.content = dom.find('Body/UpdatePost/content').text post.view_count = int(dom.find('Body/UpdatePost/viewCount').text) resp_xml = """<?xml version="1.0" encoding="utf-8"?> <soap:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"> <soap:Body> <UpdatePostResponse xmlns="http://tempuri.org/"> <UpdatePostResult> <Id>{}</Id> <Title>{}</Title> <Published>{}</Published> <Content>{}</Content> <ViewCount>{}</ViewCount> </UpdatePostResult> </UpdatePostResponse> </soap:Body> </soap:Envelope>""".format(post.id, post.title, post.published, post.content, post.view_count) # noqa return Response(body=resp_xml, content_type='text/xml') def get_ip(request): # The real IP is stripped by nginx and the direct request # looks like a call from localhost. I've configured nginx # to pass the IP it sees under the header X-Real-IP. proxy_pass_real_ip = request.headers.get('X-Real-IP') if proxy_pass_real_ip: return proxy_pass_real_ip elif request.remote_addr: return request.remote_addr else: return request.client_addr def clean_namespaces(body): return ( body.replace('SOAP-ENV:', '') .replace('xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/"', '') # noqa .replace('xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"', '') # noqa .replace('xmlns:ns0="http://schemas.xmlsoap.org/soap/envelope/"', '') # noqa .replace('xmlns:ns1="http://tempuri.org/"', '') .replace('xmlns:ns0="http://tempuri.org/"', '') .replace('xmlns:ns1="http://schemas.xmlsoap.org/soap/envelope/"', '') # noqa .replace('xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"', '') # noqa .replace('soap:', '') .replace('xmlns:xsd="http://www.w3.org/2001/XMLSchema"', '') .replace('xmlns="http://tempuri.org/"', '') .replace('SOAP-ENV:', '') .replace('ns0:', '') .replace('ns1:', '')) # noqa def build_wsdl(request): wsdl = """ <wsdl:definitions xmlns:tm="http://microsoft.com/wsdl/mime/textMatching/" xmlns:soapenc="http://schemas.xmlsoap.org/soap/encoding/" xmlns:mime="http://schemas.xmlsoap.org/wsdl/mime/" xmlns:tns="http://tempuri.org/" xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/" xmlns:s="http://www.w3.org/2001/XMLSchema" xmlns:soap12="http://schemas.xmlsoap.org/wsdl/soap12/" xmlns:http="http://schemas.xmlsoap.org/wsdl/http/" xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/" targetNamespace="http://tempuri.org/"> <wsdl:types> <s:schema elementFormDefault="qualified" targetNamespace="http://tempuri.org/"> <s:element name="AllPosts"> <s:complexType/> </s:element> <s:element name="AllPostsResponse"> <s:complexType> <s:sequence> <s:element minOccurs="0" maxOccurs="1" name="AllPostsResult" type="tns:ArrayOfPost"/> </s:sequence> </s:complexType> </s:element> <s:complexType name="ArrayOfPost"> <s:sequence> <s:element minOccurs="0" maxOccurs="unbounded" name="Post" nillable="true" type="tns:Post"/> </s:sequence> </s:complexType> <s:complexType name="Post"> <s:sequence> <s:element minOccurs="0" maxOccurs="1" name="Id" type="s:string"/> <s:element minOccurs="0" maxOccurs="1" name="Title" type="s:string"/> <s:element minOccurs="1" maxOccurs="1" name="Published" type="s:string"/> <s:element minOccurs="0" maxOccurs="1" name="Content" type="s:string"/> <s:element minOccurs="1" maxOccurs="1" name="ViewCount" type="s:int"/> </s:sequence> </s:complexType> <s:element name="GetPost"> <s:complexType> <s:sequence> <s:element minOccurs="0" maxOccurs="1" name="id" type="s:string"/> </s:sequence> </s:complexType> </s:element> <s:element name="GetPostResponse"> <s:complexType> <s:sequence> <s:element minOccurs="0" maxOccurs="1" name="GetPostResult" type="tns:Post"/> </s:sequence> </s:complexType> </s:element> <s:element name="CreatePost"> <s:complexType> <s:sequence> <s:element minOccurs="0" maxOccurs="1" name="title" type="s:string"/> <s:element minOccurs="0" maxOccurs="1" name="content" type="s:string"/> <s:element minOccurs="1" maxOccurs="1" name="viewCount" type="s:int"/> </s:sequence> </s:complexType> </s:element> <s:element name="CreatePostResponse"> <s:complexType> <s:sequence> <s:element minOccurs="0" maxOccurs="1" name="CreatePostResult" type="tns:Post"/> </s:sequence> </s:complexType> </s:element> <s:element name="UpdatePost"> <s:complexType> <s:sequence> <s:element minOccurs="0" maxOccurs="1" name="id" type="s:string"/> <s:element minOccurs="0" maxOccurs="1" name="title" type="s:string"/> <s:element minOccurs="0" maxOccurs="1" name="content" type="s:string"/> <s:element minOccurs="1" maxOccurs="1" name="viewCount" type="s:int"/> </s:sequence> </s:complexType> </s:element> <s:element name="UpdatePostResponse"> <s:complexType> <s:sequence> <s:element minOccurs="0" maxOccurs="1" name="UpdatePostResult" type="tns:Post"/> </s:sequence> </s:complexType> </s:element> <s:element name="DeletePost"> <s:complexType> <s:sequence> <s:element minOccurs="0" maxOccurs="1" name="id" type="s:string"/> </s:sequence> </s:complexType> </s:element> <s:element name="DeletePostResponse"> <s:complexType/> </s:element> </s:schema> </wsdl:types> <wsdl:message name="AllPostsSoapIn"> <wsdl:part name="parameters" element="tns:AllPosts"/> </wsdl:message> <wsdl:message name="AllPostsSoapOut"> <wsdl:part name="parameters" element="tns:AllPostsResponse"/> </wsdl:message> <wsdl:message name="GetPostSoapIn"> <wsdl:part name="parameters" element="tns:GetPost"/> </wsdl:message> <wsdl:message name="GetPostSoapOut"> <wsdl:part name="parameters" element="tns:GetPostResponse"/> </wsdl:message> <wsdl:message name="CreatePostSoapIn"> <wsdl:part name="parameters" element="tns:CreatePost"/> </wsdl:message> <wsdl:message name="CreatePostSoapOut"> <wsdl:part name="parameters" element="tns:CreatePostResponse"/> </wsdl:message> <wsdl:message name="UpdatePostSoapIn"> <wsdl:part name="parameters" element="tns:UpdatePost"/> </wsdl:message> <wsdl:message name="UpdatePostSoapOut"> <wsdl:part name="parameters" element="tns:UpdatePostResponse"/> </wsdl:message> <wsdl:message name="DeletePostSoapIn"> <wsdl:part name="parameters" element="tns:DeletePost"/> </wsdl:message> <wsdl:message name="DeletePostSoapOut"> <wsdl:part name="parameters" element="tns:DeletePostResponse"/> </wsdl:message> <wsdl:portType name="BlogSoap"> <wsdl:operation name="AllPosts"> <wsdl:input message="tns:AllPostsSoapIn"/> <wsdl:output message="tns:AllPostsSoapOut"/> </wsdl:operation> <wsdl:operation name="GetPost"> <wsdl:input message="tns:GetPostSoapIn"/> <wsdl:output message="tns:GetPostSoapOut"/> </wsdl:operation> <wsdl:operation name="CreatePost"> <wsdl:input message="tns:CreatePostSoapIn"/> <wsdl:output message="tns:CreatePostSoapOut"/> </wsdl:operation> <wsdl:operation name="UpdatePost"> <wsdl:input message="tns:UpdatePostSoapIn"/> <wsdl:output message="tns:UpdatePostSoapOut"/> </wsdl:operation> <wsdl:operation name="DeletePost"> <wsdl:input message="tns:DeletePostSoapIn"/> <wsdl:output message="tns:DeletePostSoapOut"/> </wsdl:operation> </wsdl:portType> <wsdl:binding name="BlogSoap" type="tns:BlogSoap"> <soap:binding transport="http://schemas.xmlsoap.org/soap/http"/> <wsdl:operation name="AllPosts"> <soap:operation soapAction="http://tempuri.org/AllPosts" style="document"/> <wsdl:input> <soap:body use="literal"/> </wsdl:input> <wsdl:output> <soap:body use="literal"/> </wsdl:output> </wsdl:operation> <wsdl:operation name="GetPost"> <soap:operation soapAction="http://tempuri.org/GetPost" style="document"/> <wsdl:input> <soap:body use="literal"/> </wsdl:input> <wsdl:output> <soap:body use="literal"/> </wsdl:output> </wsdl:operation> <wsdl:operation name="CreatePost"> <soap:operation soapAction="http://tempuri.org/CreatePost" style="document"/> <wsdl:input> <soap:body use="literal"/> </wsdl:input> <wsdl:output> <soap:body use="literal"/> </wsdl:output> </wsdl:operation> <wsdl:operation name="UpdatePost"> <soap:operation soapAction="http://tempuri.org/UpdatePost" style="document"/> <wsdl:input> <soap:body use="literal"/> </wsdl:input> <wsdl:output> <soap:body use="literal"/> </wsdl:output> </wsdl:operation> <wsdl:operation name="DeletePost"> <soap:operation soapAction="http://tempuri.org/DeletePost" style="document"/> <wsdl:input> <soap:body use="literal"/> </wsdl:input> <wsdl:output> <soap:body use="literal"/> </wsdl:output> </wsdl:operation> </wsdl:binding> <wsdl:binding name="BlogSoap12" type="tns:BlogSoap"> <soap12:binding transport="http://schemas.xmlsoap.org/soap/http"/> <wsdl:operation name="AllPosts"> <soap12:operation soapAction="http://tempuri.org/AllPosts" style="document"/> <wsdl:input> <soap12:body use="literal"/> </wsdl:input> <wsdl:output> <soap12:body use="literal"/> </wsdl:output> </wsdl:operation> <wsdl:operation name="GetPost"> <soap12:operation soapAction="http://tempuri.org/GetPost" style="document"/> <wsdl:input> <soap12:body use="literal"/> </wsdl:input> <wsdl:output> <soap12:body use="literal"/> </wsdl:output> </wsdl:operation> <wsdl:operation name="CreatePost"> <soap12:operation soapAction="http://tempuri.org/CreatePost" style="document"/> <wsdl:input> <soap12:body use="literal"/> </wsdl:input> <wsdl:output> <soap12:body use="literal"/> </wsdl:output> </wsdl:operation> <wsdl:operation name="UpdatePost"> <soap12:operation soapAction="http://tempuri.org/UpdatePost" style="document"/> <wsdl:input> <soap12:body use="literal"/> </wsdl:input> <wsdl:output> <soap12:body use="literal"/> </wsdl:output> </wsdl:operation> <wsdl:operation name="DeletePost"> <soap12:operation soapAction="http://tempuri.org/DeletePost" style="document"/> <wsdl:input> <soap12:body use="literal"/> </wsdl:input> <wsdl:output> <soap12:body use="literal"/> </wsdl:output> </wsdl:operation> </wsdl:binding> <wsdl:service name="Blog"> <wsdl:port name="BlogSoap" binding="tns:BlogSoap"> <soap:address location="{0}/soap"/> </wsdl:port> <wsdl:port name="BlogSoap12" binding="tns:BlogSoap12"> <soap12:address location="{0}/soap"/> </wsdl:port> </wsdl:service> </wsdl:definitions>""".format(request.host_url) # noqa return wsdl def trim_post_size(post): text_limit = 500 if post.content and len(post.content) > text_limit: post.content = post.content[:500] if post.title and len(post.title) > text_limit: post.title = post.title[:500] if post.published and len(post.published) > text_limit: post.published = post.published[:500]
mit
3,373,341,063,547,171,000
35.706861
503
0.632023
false
zenieldanaku/DyDCreature_Editor
main.py
1
1129
from sys import exit from pygame import display as pantalla, event from pygame import init as py_init, quit as py_quit from azoe.engine import EventHandler from azoe.widgets import NamedValue from backend.entidad import Entidad py_init() fondo = pantalla.set_mode((400, 400)) event.set_blocked([12, 13]) entity = Entidad() initiative = NamedValue('Iniciativa') initiative.rect.top = entity.caracteristicas['DES'].name.rect.bottom + 2 entity.iniciativa.valor.rect.topleft = initiative.rect.right + 2, initiative.rect.top EventHandler.add_widget(initiative) EventHandler.add_widget(entity.iniciativa.valor) EventHandler.add_widget(entity.caracteristicas['DES'].name) EventHandler.add_widget(entity.caracteristicas['DES'].punt) EventHandler.add_widget(entity.caracteristicas['DES'].mod) EventHandler.currentFocus = entity.caracteristicas['DES'].punt hayCambios = True while hayCambios: fondo.fill((255, 255, 255)) entity.update() events = event.get() hayCambios = EventHandler.update(events, fondo) if hayCambios: pantalla.update(hayCambios) py_quit() exit()
mit
-6,875,978,619,516,324,000
28.513514
85
0.741364
false
UCSBarchlab/PyRTL
pyrtl/rtllib/matrix.py
1
54553
from functools import reduce from six.moves import builtins from pyrtl.rtllib import multipliers as mult from ..wire import Const, WireVector from ..corecircuits import as_wires, concat, select from ..pyrtlexceptions import PyrtlError from ..helperfuncs import formatted_str_to_val class Matrix(object): ''' Class for making a Matrix using PyRTL. Provides the ability to perform different matrix operations. ''' # Internally, this class uses a Python matrix of WireVectors. # So, a Matrix is represented as follows for a 2 x 2: # [[WireVector, WireVector], [WireVector, WireVector]] def __init__(self, rows, columns, bits, signed=False, value=None, max_bits=64): ''' Constructs a Matrix object. :param int rows: the number of rows in the matrix. Must be greater than 0 :param int columns: the number of columns in the matrix. Must be greater than 0 :param int bits: The amount of bits per wirevector. Must be greater than 0 :param bool signed: Currently not supported (will be added in the future) :param (WireVector/list) value: The value you want to initialize the Matrix with. If a WireVector, must be of size `rows * columns * bits`. If a list, must have `rows` rows and `columns` columns, and every element must fit in `bits` size. If not given, the matrix initializes to 0 :param int max_bits: The maximum number of bits each wirevector can have, even after operations like adding two matrices together results in larger resulting wirevectors :return: a constructed Matrix object ''' if not isinstance(rows, int): raise PyrtlError('Rows must be of type int, instead "%s" ' 'was passed of type %s' % (str(rows), type(rows))) if rows <= 0: raise PyrtlError('Rows cannot be less than or equal to zero. ' 'Rows value passed: %s' % str(rows)) if not isinstance(columns, int): raise PyrtlError('Columns must be of type int, instead "%s" ' 'was passed of type %s' % (str(columns), type(columns))) if columns <= 0: raise PyrtlError('Columns cannot be less than or equal to zero. ' 'Columns value passed: %s' % str(columns)) if not isinstance(bits, int): raise PyrtlError('Bits must be of type int, instead "%s" ' 'was passed of type %s' % (str(bits), type(bits))) if bits <= 0: raise PyrtlError( 'Bits cannot be negative or zero, ' 'instead "%s" was passed' % str(bits)) if max_bits is not None: if bits > max_bits: bits = max_bits self._matrix = [[0 for _ in range(columns)] for _ in range(rows)] if value is None: for i in range(rows): for j in range(columns): self._matrix[i][j] = Const(0) elif isinstance(value, WireVector): if value.bitwidth != bits * rows * columns: raise PyrtlError('Initialized bitwidth value does not match ' 'given value.bitwidth: %s, expected: %s' '' % (str(value.bitwidth), str(bits * rows * columns))) for i in range(rows): for j in range(columns): start_index = (j * bits) + (i * columns * bits) self._matrix[rows - i - 1][columns - j - 1] =\ as_wires(value[start_index:start_index + bits], bitwidth=bits) elif isinstance(value, list): if len(value) != rows or any(len(row) != columns for row in value): raise PyrtlError('Rows and columns mismatch\n' 'Rows: %s, expected: %s\n' 'Columns: %s, expected: %s' '' % (str(len(value)), str(rows), str(len(value[0])), str(columns))) for i in range(rows): for j in range(columns): self._matrix[i][j] = as_wires(value[i][j], bitwidth=bits) else: raise PyrtlError('Initialized value must be of type WireVector or ' 'list. Instead was passed value of type %s' % (type(value))) self.rows = rows self.columns = columns self._bits = bits self.bits = bits self.signed = False self.max_bits = max_bits @property def bits(self): ''' Gets the number of bits each value is allowed to hold. :return: an integer representing the number of bits ''' return self._bits @bits.setter def bits(self, bits): ''' Sets the number of bits. :param int bits: The number of bits. Must be greater than 0 Called automatically when bits is changed. NOTE: This function will truncate the most significant bits. ''' if not isinstance(bits, int): raise PyrtlError('Bits must be of type int, instead "%s" ' 'was passed of type %s' % (str(bits), type(bits))) if bits <= 0: raise PyrtlError( 'Bits cannot be negative or zero, ' 'instead "%s" was passed' % str(bits)) self._bits = bits for i in range(self.rows): for j in range(self.columns): self._matrix[i][j] = self._matrix[i][j][:bits] def __len__(self): ''' Gets the output WireVector length. :return: an integer representing the output WireVector bitwidth Used with default `len()` function ''' return self.bits * self.rows * self.columns def to_wirevector(self): ''' Outputs the PyRTL Matrix as a singular concatenated Wirevector. :return: a Wirevector representing the whole PyRTL matrix For instance, if we had a 2 x 1 matrix `[[wire_a, wire_b]]` it would return the concatenated wire: `wire = wire_a.wire_b` ''' result = [] for i in range(len(self._matrix)): for j in range(len(self._matrix[0])): result.append(as_wires(self[i, j], bitwidth=self.bits)) return as_wires(concat(*result), bitwidth=len(self)) def transpose(self): ''' Constructs the transpose of the matrix :return: a Matrix object representing the transpose ''' result = Matrix(self.columns, self.rows, self.bits, max_bits=self.max_bits) for i in range(result.rows): for j in range(result.columns): result[i, j] = self[j, i] return result def __reversed__(self): ''' Constructs the reverse of matrix :return: a Matrix object representing the reverse Used with the reversed() method ''' result = Matrix(self.rows, self.columns, self.bits, max_bits=self.max_bits) for i in range(self.rows): for j in range(self.columns): result[i, j] = self[self.rows - 1 - i, self.columns - 1 - j] return result def __getitem__(self, key): ''' Accessor for the matrix. :param (int/slice row, int/slice column) key: The key value to get :return: WireVector or Matrix containing the value of key Called when using square brackets ([]). Examples:: int_matrix = [[0, 1, 2], [3, 4, 5], [6, 7, 8]] matrix = Matrix.Matrix(3, 3, 4, value=int_matrix) matrix[1] == [3, 4, 5] matrix[2, 0] == 6 matrix[(2, 0)] = 6 matrix[slice(0, 2), slice(0, 3)] == [[0, 1, 2], [3, 4, 5]] matrix[0:2, 0:3] == [[0, 1, 2], [3, 4, 5]] matrix[:2] == [[0, 1, 2], [3, 4, 5]] matrix[-1] == [6, 7, 8] matrix[-2:] == [[3, 4, 5], [6, 7, 8]] ''' if isinstance(key, tuple): rows, columns = key # First set up proper slice if not isinstance(rows, slice): if not isinstance(rows, int): raise PyrtlError('Rows must be of type int or slice, ' 'instead "%s" was passed of type %s' % (str(rows), type(rows))) if rows < 0: rows = self.rows - abs(rows) if rows < 0: raise PyrtlError("Invalid bounds for rows. Max rows: %s, got: %s" % ( str(self.rows), str(rows))) rows = slice(rows, rows + 1, 1) if not isinstance(columns, slice): if not isinstance(columns, int): raise PyrtlError('Columns must be of type int or slice, ' 'instead "%s" was passed of type %s' % (str(columns), type(columns))) if columns < 0: columns = self.columns - abs(columns) if columns < 0: raise PyrtlError("Invalid bounds for columns. Max columns: %s, got: %s" % ( str(self.columns), str(columns))) columns = slice(columns, columns + 1, 1) if rows.start is None: rows = slice(0, rows.stop, rows.step) elif rows.start < 0: rows = slice(self.rows - abs(rows.start), rows.stop, rows.step) if rows.stop is None: rows = slice(rows.start, self.rows, rows.step) elif rows.stop < 0: rows = slice(rows.start, self.rows - abs(rows.stop), rows.step) rows = slice(rows.start, rows.stop, 1) if columns.start is None: columns = slice(0, columns.stop, columns.step) elif columns.start < 0: columns = slice(self.columns - abs(columns.start), columns.stop, columns.step) if columns.stop is None: columns = slice(columns.start, self.columns, columns.step) elif columns.stop < 0: columns = slice( columns.start, self.columns - abs(columns.stop), columns.step) columns = slice(columns.start, columns.stop, 1) # Check slice bounds if rows.start > self.rows or rows.stop > self.rows \ or rows.start < 0 or rows.stop < 0: raise PyrtlError("Invalid bounds for rows. Max rows: %s, got: %s" % ( str(self.rows), str(rows.start) + ":" + str(rows.stop))) if columns.start > self.columns or columns.stop > self.columns \ or columns.start < 0 or columns.stop < 0: raise PyrtlError("Invalid bounds for columns. Max columns: %s, got: %s" % ( str(self.columns), str(columns.start) + ":" + str(columns.stop))) # If it's a single value we want to return a wirevector if rows.stop - rows.start == 1 and \ columns.stop - columns.start == 1: return as_wires(self._matrix[rows][0][columns][0], bitwidth=self.bits) # Otherwise set up matrix and return that result = [[0 for _ in range(columns.stop - columns.start)] for _ in range(rows.stop - rows.start)] for i in range(len(result)): for j in range(len(result[0])): result[i][j] = self._matrix[i + rows.start][j + columns.start] return Matrix(len(result), len(result[0]), self._bits, signed=self.signed, value=result, max_bits=self.max_bits) # Second case when we just want to get full row if isinstance(key, int): if key < 0: start = self.rows - abs(key) if start < 0: raise PyrtlError('Index %d is out of bounds for ' 'matrix with %d rows' % (key, self.rows)) key = slice(start, start + 1, None) else: key = slice(key, key + 1, None) return self[key, :] # Third case when we want multiple rows if isinstance(key, slice): return self[key, :] # Otherwise improper value was passed raise PyrtlError('Rows must be of type int or slice, ' 'instead "%s" was passed of type %s' % (str(key), type(key))) def __setitem__(self, key, value): ''' Mutator for the matrix. :param (slice/int rows, slice/int columns) key: The key value to set :param Wirevector/int/Matrix value: The value in which to set the key Called when setting a value using square brackets (e.g. `matrix[a, b] = value`). The value given will be truncated to match the bitwidth of all the elements in the matrix. ''' if isinstance(key, tuple): rows, columns = key # First ensure that slices are correct if not isinstance(rows, slice): if not isinstance(rows, int): raise PyrtlError('Rows must be of type int or slice, ' 'instead "%s" was passed of type %s' % (str(rows), type(rows))) rows = slice(rows, rows + 1, 1) if not isinstance(columns, slice): if not isinstance(columns, int): raise PyrtlError('Columns must be of type int or slice, ' 'instead "%s" was passed of type %s' % (str(columns), type(columns))) columns = slice(columns, columns + 1, 1) if rows.start is None: rows = slice(0, rows.stop, rows.step) elif rows.start < 0: rows = slice(self.rows - abs(rows.start), rows.stop, rows.step) if rows.stop is None: rows = slice(rows.start, self.rows, rows.step) elif rows.stop < 0: rows = slice(rows.start, self.rows - abs(rows.stop), rows.step) if columns.start is None: columns = slice(0, columns.stop, columns.step) elif columns.start < 0: columns = slice(self.columns - abs(columns.start), columns.stop, columns.step) if columns.stop is None: columns = slice(columns.start, self.columns, columns.step) elif columns.stop < 0: columns = slice( columns.start, self.columns - abs(columns.stop), columns.step) # Check Slice Bounds if rows.start > self.rows or rows.stop > self.rows \ or rows.start < 0 or rows.stop < 0: raise PyrtlError("Invalid bounds for rows. Max rows: %s, got: %s" % ( str(self.rows), str(rows.start) + ":" + str(rows.stop))) if columns.start > self.columns or columns.stop > self.columns \ or columns.start < 0 or columns.stop < 0: raise PyrtlError("Invalid bounds for columns. Max columns: %s, got: %s" % ( str(self.columns), str(columns.start) + ":" + str(columns.stop))) # First case when setting value to Matrix if isinstance(value, Matrix): if value.rows != (rows.stop - rows.start): raise PyrtlError( 'Value rows mismatch. Expected Matrix ' 'of rows "%s", instead recieved Matrix of rows "%s"' % (str(rows.stop - rows.start), str(value.rows))) if value.columns != (columns.stop - columns.start): raise PyrtlError( 'Value columns mismatch. Expected Matrix ' 'of columns "%s", instead recieved Matrix of columns "%s"' % (str(columns.stop - columns.start), str(value.columns))) for i in range(rows.stop - rows.start): for j in range(columns.stop - columns.start): self._matrix[rows.start + i][columns.start + j] =\ as_wires(value[i, j], bitwidth=self.bits) # Second case when setting value to wirevector elif isinstance(value, (int, WireVector)): if ((rows.stop - rows.start) != 1) or \ ((columns.stop - columns.start) != 1): raise PyrtlError( 'Value mismatch: expected Matrix, instead received WireVector') self._matrix[rows.start][columns.start] = as_wires(value, bitwidth=self.bits) # Otherwise Error else: raise PyrtlError('Invalid value of type %s' % type(value)) else: # Second case if we just want to set a full row if isinstance(key, int): if key < 0: start = self.rows - abs(key) if start < 0: raise PyrtlError('Index %d is out of bounds for ' 'matrix with %d rows' % (key, self.rows)) key = slice(start, start + 1, None) else: key = slice(key, key + 1, None) self[key, :] = value # Third case if we want to set full rows elif isinstance(key, slice): self[key, :] = value else: raise PyrtlError('Rows must be of type int or slice, ' 'instead "%s" was passed of type %s' % (str(key), type(key))) def copy(self): ''' Constructs a deep copy of the Matrix. :return: a Matrix copy ''' return Matrix(self.rows, self.columns, self.bits, value=self.to_wirevector(), max_bits=self.max_bits) def __iadd__(self, other): ''' Perform the in-place addition operation. :return: a Matrix object with the elementwise addition being preformed Is used with `a += b`. Performs an elementwise addition. ''' new_value = (self + other) self._matrix = new_value._matrix self.bits = new_value._bits return self.copy() def __add__(self, other): ''' Perform the addition operation. :return: a Matrix object with the element wise addition being performed Is used with `a + b`. Performs an elementwise addition. ''' if not isinstance(other, Matrix): raise PyrtlError('error: expecting a Matrix, ' 'got %s instead' % type(other)) if self.columns != other.columns: raise PyrtlError('error: columns mismatch. ' 'Matrix a: %s columns, Matrix b: %s rows' % (str(self.columns), str(other.columns))) elif self.rows != other.rows: raise PyrtlError('error: row mismatch. ' 'Matrix a: %s columns, Matrix b: %s column' % (str(self.rows), str(other.rows))) new_bits = self.bits if other.bits > new_bits: new_bits = other.bits result = Matrix(self.rows, self.columns, new_bits + 1, max_bits=self.max_bits) for i in range(result.rows): for j in range(result.columns): result[i, j] = self[i, j] + other[i, j] return result def __isub__(self, other): ''' Perform the inplace subtraction opperation. :Matrix other: the PyRTL Matrix to subtract :return: a Matrix object with the element wise subtraction being performed Is used with `a -= b`. Performs an elementwise subtraction. ''' new_value = self - other self._matrix = new_value._matrix self._bits = new_value._bits return self.copy() def __sub__(self, other): ''' Perform the subtraction operation. :Matrix other: the PyRTL Matrix to subtract :return: a Matrix object with the elementwise subtraction being performed Is used with `a - b`. Performs an elementwise subtraction. Note: If using unsigned numbers, the result will be floored at 0. ''' if not isinstance(other, Matrix): raise PyrtlError('error: expecting a Matrix, ' 'got %s instead' % type(other)) if self.columns != other.columns: raise PyrtlError('error: columns mismatch. ' 'Matrix a: %s columns, Matrix b: %s rows' % (str(self.columns), str(other.columns))) if self.rows != other.rows: raise PyrtlError('error: row mismatch. ' 'Matrix a: %s columns, Matrix b: %s column' % (str(self.rows), str(other.rows))) new_bits = self.bits if other.bits > new_bits: new_bits = other.bits result = Matrix(self.rows, self.columns, new_bits, max_bits=self.max_bits) for i in range(result.rows): for j in range(result.columns): if self.signed: result[i, j] = self[i, j] - other[i, j] else: result[i, j] = select(self[i, j] > other[i, j], self[i, j] - other[i, j], Const(0)) return result def __imul__(self, other): ''' Perform the in-place multiplication operation. :param Matrix/Wirevector other: the Matrix or scalar to multiply :return: a Matrix object with the resulting multiplication operation being preformed Is used with `a *= b`. Performs an elementwise or scalar multiplication. ''' new_value = self * other self._matrix = new_value._matrix self._bits = new_value._bits return self.copy() def __mul__(self, other): ''' Perform the elementwise or scalar multiplication operation. :param Matrix/Wirevector other: the Matrix to multiply :return: a Matrix object with the resulting multiplication operation being performed Is used with `a * b`. ''' if isinstance(other, Matrix): if self.columns != other.columns: raise PyrtlError('error: columns mismatch. ' 'Martrix a: %s columns, Matrix b: %s rows' % (str(self.columns), str(other.columns))) if self.rows != other.rows: raise PyrtlError('error, row mismatch ' 'Martrix a: %s columns, Matrix b: %s column' % (str(self.rows), str(other.rows))) bits = self.bits + other.bits elif isinstance(other, WireVector): bits = self.bits + len(other) else: raise PyrtlError('Expecting a Matrix or WireVector ' 'got %s instead' % type(other)) result = Matrix(self.rows, self.columns, bits, max_bits=self.max_bits) for i in range(self.rows): for j in range(self.columns): if isinstance(other, Matrix): result[i, j] = self[i, j] * other[i, j] else: result[i, j] = self[i, j] * other return result def __imatmul__(self, other): ''' Performs the inplace matrix multiplication operation. :param Matrix other: the second matrix. :return: a PyRTL Matrix that contains the matrix multiplication product of this and other Is used with `a @= b`. Note: The matmul symbol (@) only works in python 3.5+. Otherwise you must call `__imatmul__(other)`. ''' new_value = self.__matmul__(other) self.columns = new_value.columns self.rows = new_value.rows self._matrix = new_value._matrix self._bits = new_value._bits return self.copy() def __matmul__(self, other): ''' Performs the matrix multiplication operation. :param Matrix other: the second matrix. :return: a PyRTL Matrix that contains the matrix multiplication product of this and other Is used with `a @ b`. Note: The matmul symbol (@) only works in python 3.5+. Otherwise you must call `__matmul__(other)`. ''' if not isinstance(other, Matrix): raise PyrtlError('error: expecting a Matrix, ' 'got %s instead' % type(other)) if self.columns != other.rows: raise PyrtlError('error: rows and columns mismatch. ' 'Matrix a: %s columns, Matrix b: %s rows' % (str(self.columns), str(other.rows))) result = Matrix(self.rows, other.columns, self.columns * other.rows * (self.bits + other.bits), max_bits=self.max_bits) for i in range(self.rows): for j in range(other.columns): for k in range(self.columns): result[i, j] = mult.fused_multiply_adder( self[i, k], other[k, j], result[i, j], signed=self.signed) return result def __ipow__(self, power): ''' Performs the matrix power operation. :param int power: the power to perform the matrix on :return: a PyRTL Matrix that contains the matrix power product Is used with `a **= b`. ''' new_value = self ** power self._matrix = new_value._matrix self._bits = new_value._bits return self.copy() def __pow__(self, power): ''' Performs the matrix power operation. :param int power: the power to perform the matrix on :return: a PyRTL Matrix that contains the matrix power product Is used with `a ** b`. ''' if not isinstance(power, int): raise PyrtlError('Unexpected power given. Type int expected, ' 'but recieved type %s' % type(power)) if self.rows != self.columns: raise PyrtlError("Matrix must be square") result = self.copy() # First case: return identity matrix if power == 0: for i in range(self.rows): for j in range(self.columns): if i != j: result[i, j] = Const(0) else: result[i, j] = Const(1) return result # Second case: do matrix multiplications if power >= 1: inputs = [result] * power def pow_2(first, second): return first.__matmul__(second) return reduce(pow_2, inputs) raise PyrtlError('Power must be greater than or equal to 0') def put(self, ind, v, mode='raise'): ''' Replace specified elements of the matrix with given values :param int/list[int]/tuple[int] ind: target indices :param int/list[int]/tuple[int]/Matrix row-vector v: values to place in matrix at target indices; if v is shorter than ind, it is repeated as necessary :param str mode: how out-of-bounds indices behave; 'raise' raises an error, 'wrap' wraps aoround, and 'clip' clips to the range Note that the index is on the flattened matrix. ''' count = self.rows * self.columns if isinstance(ind, int): ind = (ind,) elif not isinstance(ind, (tuple, list)): raise PyrtlError("Expected int or list-like indices, got %s" % type(ind)) if isinstance(v, int): v = (v,) if isinstance(v, (tuple, list)) and len(v) == 0: return elif isinstance(v, Matrix): if v.rows != 1: raise PyrtlError( "Expected a row-vector matrix, instead got matrix with %d rows" % v.rows ) if mode not in ['raise', 'wrap', 'clip']: raise PyrtlError( "Unexpected mode %s; allowable modes are 'raise', 'wrap', and 'clip'" % mode ) def get_ix(ix): if ix < 0: ix = count - abs(ix) if ix < 0 or ix >= count: if mode == 'raise': raise PyrtlError("index %d is out of bounds with size %d" % (ix, count)) elif mode == 'wrap': ix = ix % count elif mode == 'clip': ix = 0 if ix < 0 else count - 1 return ix def get_value(ix): if isinstance(v, (tuple, list)): if ix >= len(v): return v[-1] # if v is shorter than ind, repeat last as necessary return v[ix] elif isinstance(v, Matrix): if ix >= count: return v[0, -1] return v[0, ix] for v_ix, mat_ix in enumerate(ind): mat_ix = get_ix(mat_ix) row = mat_ix // self.columns col = mat_ix % self.columns self[row, col] = get_value(v_ix) def reshape(self, *newshape, order='C'): ''' Create a matrix of the given shape from the current matrix. :param int/ints/tuple[int] newshape: shape of the matrix to return; if a single int, will result in a 1-D row-vector of that length; if a tuple, will use values for number of rows and cols. Can also be a varargs. :param str order: 'C' means to read from self using row-major order (C-style), and 'F' means to read from self using column-major order (Fortran-style). :return: A copy of the matrix with same data, with a new number of rows/cols One shape dimension in newshape can be -1; in this case, the value for that dimension is inferred from the other given dimension (if any) and the number of elements in the matrix. Examples:: int_matrix = [[0, 1, 2, 3], [4, 5, 6, 7]] matrix = Matrix.Matrix(2, 4, 4, value=int_matrix) matrix.reshape(-1) == [[0, 1, 2, 3, 4, 5, 6, 7]] matrix.reshape(8) == [[0, 1, 2, 3, 4, 5, 6, 7]] matrix.reshape(1, 8) == [[0, 1, 2, 3, 4, 5, 6, 7]] matrix.reshape((1, 8)) == [[0, 1, 2, 3, 4, 5, 6, 7]] matrix.reshape((1, -1)) == [[0, 1, 2, 3, 4, 5, 6, 7]] matrix.reshape(4, 2) == [[0, 1], [2, 3], [4, 5], [6, 7]] matrix.reshape(-1, 2) == [[0, 1], [2, 3], [4, 5], [6, 7]] matrix.reshape(4, -1) == [[0, 1], [2, 3], [4, 5], [6, 7]] ''' count = self.rows * self.columns if isinstance(newshape, int): if newshape == -1: newshape = (1, count) else: newshape = (1, newshape) elif isinstance(newshape, tuple): if isinstance(newshape[0], tuple): newshape = newshape[0] if len(newshape) == 1: newshape = (1, newshape[0]) if len(newshape) > 2: raise PyrtlError("length of newshape tuple must be <= 2") rows, cols = newshape if not isinstance(rows, int) or not isinstance(cols, int): raise PyrtlError( "newshape dimensions must be integers, instead got %s" % type(newshape) ) if rows == -1 and cols == -1: raise PyrtlError("Both dimensions in newshape cannot be -1") if rows == -1: rows = count // cols newshape = (rows, cols) elif cols == -1: cols = count // rows newshape = (rows, cols) else: raise PyrtlError( "newshape can be an integer or tuple of integers, not %s" % type(newshape) ) rows, cols = newshape if rows * cols != count: raise PyrtlError( "Cannot reshape matrix of size %d into shape %s" % (count, str(newshape)) ) if order not in 'CF': raise PyrtlError( "Invalid order %s. Acceptable orders are 'C' (for row-major C-style order) " "and 'F' (for column-major Fortran-style order)." % order ) value = [[0] * cols for _ in range(rows)] ix = 0 if order == 'C': # Read and write in row-wise order for newr in range(rows): for newc in range(cols): r = ix // self.columns c = ix % self.columns value[newr][newc] = self[r, c] ix += 1 else: # Read and write in column-wise order for newc in range(cols): for newr in range(rows): r = ix % self.rows c = ix // self.rows value[newr][newc] = self[r, c] ix += 1 return Matrix(rows, cols, self.bits, self.signed, value, self.max_bits) def flatten(self, order='C'): ''' Flatten the matrix into a single row. :param str order: 'C' means row-major order (C-style), and 'F' means column-major order (Fortran-style) :return: A copy of the matrix flattened in to a row vector matrix ''' return self.reshape(self.rows * self.columns, order=order) def multiply(first, second): ''' Perform the elementwise or scalar multiplication operation. :param Matrix first: first matrix :param Matrix/Wirevector second: second matrix :return: a Matrix object with the element wise or scaler multiplication being performed ''' if not isinstance(first, Matrix): raise PyrtlError('error: expecting a Matrix, ' 'got %s instead' % type(second)) return first * second def sum(matrix, axis=None, bits=None): ''' Returns the sum of all the values in a matrix :param Matrix/Wirevector matrix: the matrix to perform sum operation on. If it is a WireVector, it will return itself :param None/int axis: The axis to perform the operation on None refers to sum of all item. 0 is sum of column. 1 is sum of rows. Defaults to None :param int bits: The bits per value of the sum. Defaults to bits of old matrix :return: A wirevector or Matrix representing sum ''' def sum_2(first, second): return first + second if isinstance(matrix, WireVector): return matrix if not isinstance(matrix, Matrix): raise PyrtlError('error: expecting a Matrix or Wirevector for matrix, ' 'got %s instead' % type(matrix)) if not isinstance(bits, int) and bits is not None: raise PyrtlError('error: expecting an int/None for bits, ' 'got %s instead' % type(bits)) if not isinstance(axis, int) and axis is not None: raise PyrtlError('error: expecting an int or None for axis, ' 'got %s instead' % type(axis)) if bits is None: bits = matrix.bits if bits <= 0: raise PyrtlError('error: bits cannot be negative or zero, ' 'got %s instead' % bits) if axis is None: inputs = [] for i in range(matrix.rows): for j in range(matrix.columns): inputs.append(matrix[i, j]) return reduce(sum_2, inputs) if axis == 0: result = Matrix(1, matrix.columns, signed=matrix.signed, bits=bits) for i in range(matrix.columns): inputs = [] for j in range(matrix.rows): inputs.append(matrix[j, i]) result[0, i] = reduce(sum_2, inputs) return result if axis == 1: result = Matrix(1, matrix.rows, signed=matrix.signed, bits=bits) for i in range(matrix.rows): inputs = [] for j in range(matrix.columns): inputs.append(matrix[i, j]) result[0, i] = reduce(sum_2, inputs) return result raise PyrtlError('Axis invalid: expected (None, 0, or 1), got %s' % axis) def min(matrix, axis=None, bits=None): ''' Returns the minimum value in a matrix. :param Matrix/Wirevector matrix: the matrix to perform min operation on. If it is a WireVector, it will return itself :param None/int axis: The axis to perform the operation on None refers to min of all item. 0 is min of column. 1 is min of rows. Defaults to None :param int bits: The bits per value of the min. Defaults to bits of old matrix :return: A WireVector or Matrix representing the min value ''' def min_2(first, second): return select(first < second, first, second) if isinstance(matrix, WireVector): return matrix if not isinstance(matrix, Matrix): raise PyrtlError('error: expecting a Matrix or Wirevector for matrix, ' 'got %s instead' % type(matrix)) if not isinstance(bits, int) and bits is not None: raise PyrtlError('error: expecting an int/None for bits, ' 'got %s instead' % type(bits)) if not isinstance(axis, int) and axis is not None: raise PyrtlError('error: expecting an int or None for axis, ' 'got %s instead' % type(axis)) if bits is None: bits = matrix.bits if bits <= 0: raise PyrtlError('error: bits cannot be negative or zero, ' 'got %s instead' % bits) if axis is None: inputs = [] for i in range(matrix.rows): for j in range(matrix.columns): inputs.append(matrix[i, j]) return reduce(min_2, inputs) if axis == 0: result = Matrix(1, matrix.columns, signed=matrix.signed, bits=bits) for i in range(matrix.columns): inputs = [] for j in range(matrix.rows): inputs.append(matrix[j, i]) result[0, i] = reduce(min_2, inputs) return result if axis == 1: result = Matrix(1, matrix.rows, signed=matrix.signed, bits=bits) for i in range(matrix.rows): inputs = [] for j in range(matrix.columns): inputs.append(matrix[i, j]) result[0, i] = reduce(min_2, inputs) return result raise PyrtlError('Axis invalid: expected (None, 0, or 1), got %s' % axis) def max(matrix, axis=None, bits=None): ''' Returns the max value in a matrix. :param Matrix/Wirevector matrix: the matrix to perform max operation on. If it is a wirevector, it will return itself :param None/int axis: The axis to perform the operation on None refers to max of all items. 0 is max of the columns. 1 is max of rows. Defaults to None :param int bits: The bits per value of the max. Defaults to bits of old matrix :return: A WireVector or Matrix representing the max value ''' def max_2(first, second): return select(first > second, first, second) if isinstance(matrix, WireVector): return matrix if not isinstance(matrix, Matrix): raise PyrtlError('error: expecting a Matrix or WireVector for matrix, ' 'got %s instead' % type(matrix)) if not isinstance(bits, int) and bits is not None: raise PyrtlError('error: expecting an int/None for bits, ' 'got %s instead' % type(bits)) if not isinstance(axis, int) and axis is not None: raise PyrtlError('error: expecting an int or None for axis, ' 'got %s instead' % type(axis)) if bits is None: bits = matrix.bits if bits <= 0: raise PyrtlError('error: bits cannot be negative or zero, ' 'got %s instead' % bits) if axis is None: inputs = [] for i in range(matrix.rows): for j in range(matrix.columns): inputs.append(matrix[i, j]) return reduce(max_2, inputs) if axis == 0: result = Matrix( 1, matrix.columns, signed=matrix.signed, bits=bits) for i in range(matrix.columns): inputs = [] for j in range(matrix.rows): inputs.append(matrix[j, i]) result[0, i] = reduce(max_2, inputs) return result if axis == 1: result = Matrix( 1, matrix.rows, signed=matrix.signed, bits=bits) for i in range(matrix.rows): inputs = [] for j in range(matrix.columns): inputs.append(matrix[i, j]) result[0, i] = reduce(max_2, inputs) return result raise PyrtlError('Axis invalid: expected (None, 0, or 1), got %s' % axis) def argmax(matrix, axis=None, bits=None): ''' Returns the index of the max value of the matrix. :param Matrix/Wirevector matrix: the matrix to perform argmax operation on. If it is a WireVector, it will return itself :param None/int axis: The axis to perform the operation on. None refers to argmax of all items. 0 is argmax of the columns. 1 is argmax of rows. Defaults to None :param int bits: The bits per value of the argmax. Defaults to bits of old matrix :return: A WireVector or Matrix representing the argmax value NOTE: If there are two indices with the same max value, this function picks the first instance. ''' if isinstance(matrix, WireVector): return Const(0) if not isinstance(matrix, Matrix): raise PyrtlError('error: expecting a Matrix or Wirevector for matrix, ' 'got %s instead' % type(matrix)) if not isinstance(bits, int) and bits is not None: raise PyrtlError('error: expecting an int/None for bits, ' 'got %s instead' % type(bits)) if not isinstance(axis, int) and axis is not None: raise PyrtlError('error: expecting an int or None for axis, ' 'got %s instead' % type(axis)) if bits is None: bits = matrix.bits if bits <= 0: raise PyrtlError('error: bits cannot be negative or zero, ' 'got %s instead' % bits) max_number = max(matrix, axis=axis, bits=bits) if axis is None: index = Const(0) arg = matrix.rows * matrix.columns - 1 for i in reversed(range(matrix.rows)): for j in reversed(range(matrix.columns)): index = select( max_number == matrix[i, j], Const(arg), index) arg -= 1 return index if axis == 0: result = Matrix( 1, matrix.columns, signed=matrix.signed, bits=bits) for i in range(matrix.columns): local_max = max_number[0, i] index = Const(0) arg = matrix.rows - 1 for j in reversed(range(matrix.rows)): index = select( local_max == matrix[j, i], Const(arg), index) arg -= 1 result[0, i] = index return result if axis == 1: result = Matrix( 1, matrix.rows, signed=matrix.signed, bits=bits) for i in range(matrix.rows): local_max = max_number[0, i] index = Const(0) arg = matrix.columns - 1 for j in reversed(range(matrix.columns)): index = select( local_max == matrix[i, j], Const(arg), index) arg -= 1 result[0, i] = index return result def dot(first, second): ''' Performs the dot product on two matrices. :param Matrix first: the first matrix :param Matrix second: the second matrix :return: a PyRTL Matrix that contains the dot product of the two PyRTL Matrices Specifically, the dot product on two matrices is * If either first or second are WireVectors/have both rows and columns equal to 1, it is equivalent to multiply * If both first and second are both arrays (have rows or columns equal to 1), it is inner product of vectors. * Otherwise it is the matrix multiplaction between first and second NOTE: Row vectors and column vectors are both treated as arrays ''' if not isinstance(first, (WireVector, Matrix)): raise PyrtlError('error: expecting a Matrix, ' 'got %s instead' % type(first)) if not isinstance(second, (WireVector, Matrix)): raise PyrtlError('error: expecting a Matrix/WireVector, ' 'got %s instead' % type(second)) # First case when it is multiply if isinstance(first, WireVector): if isinstance(second, WireVector): return first * second return second[:, :] * first if isinstance(second, WireVector): return first[:, :] * second if (first.rows == 1 and first.columns == 1) \ or (second.rows == 1 and second.columns == 1): return first[:, :] * second[:, :] # Second case when it is Inner Product if first.rows == 1: if second.rows == 1: return sum(first * second) if second.columns == 1: return sum(first * second.transpose()) elif first.columns == 1: if second.rows == 1: return sum(first * second.transpose()) if second.columns == 1: return sum(first * second) # Third case when it is Matrix Multiply return first.__matmul__(second) def hstack(*matrices): """ Stack matrices in sequence horizontally (column-wise). :param list[Matrix] matrices: a list of matrices to concatenate one after another horizontally :return Matrix: a new Matrix, with the same number of rows as the original, with a bitwidth equal to the max of the bitwidths of all the matrices All the matrices must have the same number of rows and same 'signed' value. For example:: m1 = Matrix(2, 3, bits=5, value=[[1,2,3], [4,5,6]]) m2 = Matrix(2, 1, bits=10, value=[[17], [23]]]) m3 = hstack(m1, m2) m3 looks like:: [[1,2,3,17], [4,5,6,23]] """ if len(matrices) == 0: raise PyrtlError("Must supply at least one matrix to hstack()") if any([not isinstance(matrix, Matrix) for matrix in matrices]): raise PyrtlError("All arguments to hstack must be matrices.") if len(matrices) == 1: return matrices[0].copy() new_rows = matrices[0].rows if any([m.rows != new_rows for m in matrices]): raise PyrtlError("All matrices being hstacked together must have the same number of rows") new_signed = matrices[0].signed if any([m.signed != new_signed for m in matrices]): raise PyrtlError("All matrices being hstacked together must have the same signedness") new_cols = builtins.sum(m.columns for m in matrices) new_bits = builtins.max(m.bits for m in matrices) new_max_bits = builtins.max(m.max_bits for m in matrices) new = Matrix(new_rows, new_cols, new_bits, max_bits=new_max_bits) new_c = 0 for matrix in matrices: for c in range(matrix.columns): for r in range(matrix.rows): new[r, new_c] = matrix[r, c] new_c += 1 return new def vstack(*matrices): """ Stack matrices in sequence vertically (row-wise). :param list[Matrix] matrices: a list of matrices to concatenate one after another vertically :return Matrix: a new Matrix, with the same number of columns as the original, with a bitwidth equal to the max of the bitwidths of all the matrices All the matrices must have the same number of columns and same 'signed' value. For example:: m1 = Matrix(2, 3, bits=5, value=[[1,2,3], [4,5,6]]) m2 = Matrix(1, 3, bits=10, value=[[7,8,9]]) m3 = vstack(m1, m2) m3 looks like:: [[1,2,3], [4,5,6], [7,8,9]] """ if len(matrices) == 0: raise PyrtlError("Must supply at least one matrix to hstack()") if any([not isinstance(matrix, Matrix) for matrix in matrices]): raise PyrtlError("All arguments to vstack must be matrices.") if len(matrices) == 1: return matrices[0].copy() new_cols = matrices[0].columns if any([m.columns != new_cols for m in matrices]): raise PyrtlError("All matrices being vstacked together must have the " "same number of columns") new_signed = matrices[0].signed if any([m.signed != new_signed for m in matrices]): raise PyrtlError("All matrices being hstacked together must have the same signedness") new_rows = builtins.sum(m.rows for m in matrices) new_bits = builtins.max(m.bits for m in matrices) new_max_bits = builtins.max(m.max_bits for m in matrices) new = Matrix(new_rows, new_cols, new_bits, max_bits=new_max_bits) new_r = 0 for matrix in matrices: for r in range(matrix.rows): for c in range(matrix.columns): new[new_r, c] = matrix[r, c] new_r += 1 return new def concatenate(matrices, axis=0): """ Join a sequence of matrices along an existing axis. :param list[Matrix] matrices: a list of matrices to concatenate one after another :param int axix: axis along which to join; 0 is horizontally, 1 is vertically (defaults to 0) :return: a new Matrix composed of the given matrices joined together This function essentially wraps hstack/vstack. """ if axis == 0: return hstack(*matrices) elif axis == 1: return vstack(*matrices) else: raise PyrtlError("Only allowable axes are 0 or 1") def matrix_wv_to_list(matrix_wv, rows, columns, bits): ''' Convert a wirevector representing a matrix into a Python list of lists. :param WireVector matrix_wv: result of calling to_wirevector() on a Matrix object :param int rows: number of rows in the matrix matrix_wv represents :param int columns: number of columns in the matrix matrix_wv represents :param int bits: number of bits in each element of the matrix matrix_wv represents :return list[list[int]]: a Python list of lists This is useful when printing the value of a wire you've inspected during Simulation that you know represnts a matrix. Example:: values = [[1, 2, 3], [4, 5, 6]] rows = 2 cols = 3 bits = 4 m = Matrix.Matrix(rows, cols, bits, values=values) output = Output(name='output') output <<= m.to_wirevector() sim = Simulation() sim.step({}) raw_matrix = Matrix.matrix_wv_to_list(sim.inspect('output'), rows, cols, bits) print(raw_matrix) # Produces: # [[1, 2, 3], [4, 5, 6]] ''' value = bin(matrix_wv)[2:].zfill(rows * columns * bits) result = [[0 for _ in range(columns)] for _ in range(rows)] bit_pointer = 0 for i in range(rows): for j in range(columns): int_value = int(value[bit_pointer: bit_pointer + bits], 2) result[i][j] = int_value bit_pointer += bits return result def list_to_int(matrix, n_bits): ''' Convert a Python matrix (a list of lists) into an integer. :param list[list[int]] matrix: a pure Python list of lists representing a matrix :param int n_bits: number of bits to be used to represent each element; if an element doesn't fit in n_bits, it truncates the most significant bits :return int: a N*n_bits wide wirevector containing the elements of `matrix`, where N is the number of elements in `matrix` Integers that are signed will automatically be converted to their two's complement form. This function is helpful for turning a pure Python list of lists into a integer suitable for creating a Constant wirevector that can be passed in to as a Matrix intializer's `value` argument, or for passing into a Simulation's step function for a particular input wire. For example, calling Matrix.list_to_int([3, 5], [7, 9], 4) produces 13,689, which in binary looks like this:: 0011 0101 0111 1001 Note how the elements of the list of lists were added, 4 bits at a time, in row order, such that the element at row 0, column 0 is in the most significant 4 bits, and the element at row 1, column 1 is in the least significant 4 bits. Here's an example of using it in simulation:: a_vals = [[0, 1], [2, 3]] b_vals = [[2, 4, 6], [8, 10, 12]] a_in = pyrtl.Input(4 * 4, 'a_in') b_in = pyrtl.Input(6 * 4, 'b_in') a = Matrix.Matrix(2, 2, 4, value=a_in) b = Matrix.Matrix(2, 3, 4, value=b_in) ... sim = pyrtl.Simulation() sim.step({ 'a_in': Matrix.list_to_int(a_vals) 'b_in': Matrix.list_to_int(b_vals) }) ''' if n_bits <= 0: raise PyrtlError("Number of bits per element must be positive, instead got %d" % n_bits) result = 0 for i in range(len(matrix)): for j in range(len(matrix[0])): val = formatted_str_to_val(str(matrix[i][j]), 's' + str(n_bits)) result = (result << n_bits) | val return result
bsd-3-clause
5,829,635,579,326,780,000
37.966429
99
0.54151
false
aschleg/mathpy
mathpy/random/random.py
1
12138
# encoding=utf8 import random import sys import numpy as np from mathpy.numtheory import isrelativelyprime def lcg(n, seed=None): r""" Implementation of a linear congruential generator for generating n random samples in U(0, 1). Parameters ---------- n : int The number of random samples to generate seed : int, default None Returns ------- array-like numpy array of length :math:`n` of randomly generated numbers in the range :math:`U(0, 1)`. Raises ------ ValueError number of randomly generated values to return must be at least one. Notes ----- Linear congruential generators (LCGs) are a class of pseudorandom number generator (PRNG) algorithms used for generating sequences of random-like numbers. The generation of random numbers plays a large role in many applications ranging from cryptography to Monte Carlo methods. Linear congruential generators are one of the oldest and most well-known methods for generating random numbers primarily due to their comparative ease of implementation and speed and their need for little memory. Other methods such as the Mersenne Twister are much more common in practical use today. Linear congruential generators are defined by a recurrence relation: .. math:: \large{X_{i+1} = (aX_i + c) \space \text{mod} \space m} There are many choices for the parameters :math:`m`, the modulus, :math:`a`, the multiplier, and :math:`c` the increment. Wikipedia has a seemingly comprehensive list of the parameters in common use here: https://en.wikipedia.org/wiki/Linear_congruential_generator#Parameters_in_common_use References ---------- Saucier, R. (2000). Computer Generation of Statistical Distributions (1st ed.). Aberdeen, MD. Army Research Lab. """ rn = np.empty(n, np.float64) m = 2 ** 32 a = 1103515245 c = 12345 if seed is None: d = random.randrange(sys.maxsize) else: d = seed d = (a * d + c) % m for i in np.arange(n - 1): d = (a * d + c) % m rn[i] = d / m return rn def mcg(n, seed=None): r""" Implementation of a Lehmer random number generator, also known as a multiplicative congruential generator for generating n random samples in U(0, 1). Parameters ---------- n : int, default 10 The number of random samples to generate Returns ------- array-like: numpy array of length :math`n` of randomly generated numbers in the range :math:`U(0, 1)`. Notes ----- Multiplicative congruential generators, also known as Lehmer random number generators, is a type of linear congruential generator for generating pseudorandom numbers in :math:`U(0, 1)`. The multiplicative congruential generator, often abbreviated as MLCG or MCG, is defined as a recurrence relation similar to the LCG with :math:`c = 0`. .. math:: X_{i+1} = aX_i \space \text{mod} \space m Unlike the LCG, the parameters :math:`a` and :math:`m` for multiplicative congruential generators are more restricted and the initial seed :math:`X_0` must be relatively prime to the modulus :math:`m` (the greatest common divisor between :math:`X_0` and :math:`m` is :math:`0`). The current parameters in common use are :math:`m = 2^{31} - 1 = 2,147,483,647 \text{and} a = 7^5 = 16,807`. However, in a correspondence from the Communications of the ACM, Park, Miller and Stockmeyer changed the value of the parameter :math:`a`, stating: "The minimal standard Lehmer generator we advocated had a modulus of m = 2^31 - 1 and a multiplier of a = 16807. Relative to this particular choice of multiplier, we wrote "... if this paper were to be written again in a few years it is quite possible that we would advocate a different multiplier .... " We are now prepared to do so. That is, we now advocate a = 48271 and, indeed, have done so "officially" since July 1990. This new advocacy is consistent with the discussion on page 1198 of [10]. There is nothing wrong with 16807; we now believe, however, that 48271 is a little better (with q = 44488, r = 3399). When using a large prime modulus :math:`m` such as :math:`2^{31} - 1`, the multiplicative congruential generator can overflow. Schrage's method was invented to overcome the possibility of overflow and is based on the fact that :math:`a(m \space \text{mod} \space a) < m`. We can check the parameters in use satisfy this condition: Schrage's method restates the modulus :math:`m1` as a decomposition :math:`m = aq + r` where :math:`r = m \space \text{mod} \space a` and :math:`q = m / a`. .. math:: ax \space \text{mod} \space m = \begin{cases} a(x \space \text{mod} \space q) - r\frac{x}{q} & \text{if} \space x \space \text{is} \geq 0 \\ a(x \space \text{mod} \space q) - r\frac{x}{q} + m & \text{if} \space x \space \text{is} \leq 0 \end{cases} References ---------- Anne Gille-Genest (March 1, 2012). Implementation of the Pseudo-Random Number Generators and the Low Discrepancy Sequences. Saucier, R. (2000). Computer Generation of Statistical Distributions (1st ed.). Aberdeen, MD. Army Research Lab. Stephen K. Park; Keith W. Miller; Paul K. Stockmeyer (1988). "Technical Correspondence". Communications of the ACM. 36 (7): 105-110. """ rn = np.empty(n, dtype=np.float64) m = 2147483647 a = 48271 # 16807 q = 44488 # 127773 r = 3399 # 2836 if seed is None: s = random.randrange(sys.maxsize) else: s = seed while isrelativelyprime(s, m) is False: s += 1 for i in np.arange(n): h = s / q l = s % q t = a * l - r * h if t > 0: s = t else: s = t + m rn[i] = s / m return rn def clcg_32bit(n, seed=None): r""" Implementation of a combined linear congruential generator suited for 32-bit processors as proposed by L'Ecuyer. Parameters ---------- n : int The number of random samples to generate Returns ------- array-like numpy array of length :math`n` of randomly generated numbers in the range :math:`U(0, 1)`. Notes ----- Combined linear congruential generators are a type of PRNG (pseudorandom number generator) that combine two or more LCGs (linear congruential generators). The combination of two or more LCGs into one random number generator can result in a marked increase in the period length of the generator which makes them better suited for simulating more complex systems. The combined linear congruential generator algorithm is defined as: .. math:: X_i \equiv \Bigg(\sum^k_{j=1} (-1)^{j-1} Y_{i,j} \Bigg) \space (\text{mod} \space (m_1 - 1)) Where :math:`m_1` is the modulus of the LCG, :math:`Y_{i,j}` is the :math:`ith` input from the :math:`jth` LCG and :math:`X_i` is the :math:`ith` random generated value. L'Ecuyer describes a combined linear generator that utilizes two LCGs in *Efficient and Portable Combined Random Number Generators* for 32-bit processors. To be precise, the congruential generators used are actually multiplicative since :math:`c_1 = c_2 = 0`. The parameters used for the MCGs are: .. math:: a_1 = 40014 \qquad m_1 = 2147483563 \qquad a_2 = 40692 \qquad m_2 = 2147483399 The combined linear congruential generator algorithm proposed by L'Ecuyer can be described with the following steps: The two MCGs, :math:`Y_{0,1}, \space Y_{0,2}`, are seeded. The seed values are recommended to be in the range :math:`[1, m_1 - 1]` and :math:`[1, m_2 - 1]`, respectively. Next, the two MCGs are evaluated using the algorithm above: .. math:: Y_{i+1,1} = a_1 \times Y_{i,1} (\text{mod} \space m_1) \qquad Y_{i+1,2} = a_1 \times Y_{i,2} (\text{mod} \space m_2) With :math:`Y_{i+1,1} \text{and} Y_{i+1,2}` evaluated, find :math:`X_{i+1}` .. math:: X_{i+1} = (Y_{i+1,1} - Y_{i+1,2}) \space \text{mod} \space m_1 - 1 Finally, the random number to be output can be generated: .. math:: R_{i+1} = \begin{cases} \frac{X_{i+1}}{m_1} & \text{for} \space X_{i+1} > 0 \\ (\frac{X_{i+1}}{m_1}) + 1 & \text{for} \space X_{i+1} < 0 \\ \frac{(m_1 - 1)}{m_1} & \text{for} \space X_{i+1} = 0 \end{cases} References ---------- Combined Linear Congruential Generator. (2017, July 5). In Wikipedia, The Free Encyclopedia. From https://en.wikipedia.org/w/index.php?title=Combined_Linear_Congruential_Generator&oldid=789099445 Pierre L'Ecuyer (1988). Efficient and Portable Combined Random Number Generators. Communications of the ACM. 31: 742–749, 774. doi:10.1145/62959.62969 Pierre L'Ecuyer, (1999) Good Parameters and Implementations for Combined Multiple Recursive Random Number Generators. Operations Research 47(1):159-164. doi.org/10.1287/opre.47.1.159 """ rn = np.empty(n, dtype=np.float64) random.seed(seed) a1, a2 = 40014, 40692 m1, m2 = 2147483563, 2147483399 y1, y2 = random.randrange(1, m1 - 1), random.randrange(1, m2 - 1) for i in np.arange(n): y1, y2 = a1 * y1 % m1, a2 * y2 % m2 x = (y1 - y2) % (m1 - 1) if x > 0: r = x / m1 elif x < 0: r = (x / m1) + 1 else: # x == 0 r = (m1 - 1) / m1 rn[i] = r return rn def clcg_16bit(n, seed=None): r""" Implementation of a combined linear congruential generator suited for 16-bit processors as proposed by L'Ecuyer. Parameters ---------- n : int The number of random samples to generate Returns ------- list or float If n is greater than 1, a list of the generated random values is returned. If n is equal to 1, the generated value is returned as float. Notes ----- The 16-bit version of the combined linear congruential generator proceeds in the same way as the 32-bit version but uses three MCGs with the following parameters: .. math:: a_1 = 157 \qquad m_1 = 32363 \qquad a_2 = 146 \qquad m_2 = 31727 \qquad a_3 = 142 \qquad m_3 = 31657 See Also -------- clcg_32bit() : Function 32-bit implementation of a combined linear congruential generator as proposed by L'Ecuyer. References ---------- Combined Linear Congruential Generator. (2017, July 5). In Wikipedia, The Free Encyclopedia. From https://en.wikipedia.org/w/index.php?title=Combined_Linear_Congruential_Generator&oldid=789099445 Pierre L'Ecuyer (1988). Efficient and Portable Combined Random Number Generators. Communications of the ACM. 31: 742–749, 774. doi:10.1145/62959.62969 Pierre L'Ecuyer, (1999) Good Parameters and Implementations for Combined Multiple Recursive Random Number Generators. Operations Research 47(1):159-164. doi.org/10.1287/opre.47.1.159 """ rn = np.empty(n, dtype=np.float64) random.seed(seed) a1, a2, a3 = 157, 146, 142 m1, m2, m3 = 32363, 31727, 31657 y1, y2, y3 = random.randrange(1, m1 - 1), \ random.randrange(1, m2 - 1), \ random.randrange(1, m3 - 1) for i in np.arange(n): y1, y2, y3 = a1 * y1 % m1, \ a2 * y2 % m2, \ a3 * y3 % m3 x = (y1 - y2 - y3) % (m1 - 1) if x > 0: r = x / m1 elif x < 0: r = (x / m1) + 1 else: # x == 0 r = (m1 - 1) / m1 rn[i] = r return rn
mit
6,682,255,296,660,250,000
33.682353
121
0.607896
false
schlegelp/pymaid
pymaid/user_stats.py
1
48870
# This script is part of pymaid (http://www.github.com/schlegelp/pymaid). # Copyright (C) 2017 Philipp Schlegel # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along """This module contains functions to retrieve user statistics. Examples -------- >>> import pymaid >>> myInstance = pymaid.CatmaidInstance('https://www.your.catmaid-server.org', ... api_token='YOURTOKEN', ... http_user='HTTP_PASSWORD', # omit if not required ... http_password='TOKEN') >>> skeleton_ids = pymaid.get_skids_by_annotation('Hugin') >>> cont = pymaid.get_user_contributions(skeleton_ids) >>> cont user nodes presynapses postsynapses 0 Schlegel 47221 470 1408 1 Tran 1645 7 4 2 Lacin 1300 1 20 3 Li 1244 5 45 ... >>> # Get the time that each user has invested >>> time_inv = pymaid.get_time_invested(skeleton_ids, ... remote_instance = myInstance) >>> time_inv user total creation edition review 0 Schlegel 4649 3224 2151 1204 1 Tran 174 125 59 0 2 Li 150 114 65 0 3 Lacin 133 119 30 0 ... >>> # Plot contributions as pie chart >>> import plotly >>> fig = {"data": [{"values": time_inv.total.tolist(), ... "labels": time_inv.user.tolist(), ... "type": "pie"}]} >>> plotly.offline.plot(fig) """ # TODOs # - Github punch card-like figure import datetime import pandas as pd import numpy as np from . import core, fetch, utils, config # Set up logging logger = config.logger __all__ = ['get_user_contributions', 'get_time_invested', 'get_user_actions', 'get_team_contributions', 'get_user_stats'] def get_user_stats(start_date=None, end_date=None, remote_instance=None): """Get user stats similar to the pie chart statistics widget in CATMAID. Returns cable [nm], nodes created/reviewed and connector links created. Parameters ---------- start_date : tuple | datetime.date, optional end_date : tuple | datetime.date, optional Start and end date of time window to check. If ``None``, will use entire project history. remote_instance : CatmaidInstance, optional Either pass explicitly or define globally. Returns ------- pandas.DataFrame Dataframe in which each row represents a user:: cable nodes_created nodes_reviewed links_created username user1 ... user2 ... Examples -------- Create a pie chart similar to the stats widget in CATMAID: >>> import matplotlib.pyplot as plt >>> stats = pymaid.get_user_stats() >>> stats_to_plot = ['cable', 'nodes_created', 'nodes_reviewed', ... 'links_created'] >>> fig, axes = plt.subplots(1, len(stats_to_plot), figsize=(12, 4)) >>> for s, ax in zip(stats_to_plot, axes): ... # Get the top 10 contributors for this stat ... this_stats = stats[s].sort_values(ascending=False).iloc[:10] ... # Calculate "others" ... this_stats.loc['others'] = stats[s].sort_values(ascending=False).iloc[10:].sum() ... # Plot ... this_stats.plot.pie(ax=ax, textprops={'size': 6}, ... explode=[.05] * this_stats.shape[0], ... rotatelabels=True) ... # Make labels a bit smaller ... ax.set_ylabel(s.replace('_', ' '), fontsize=8) >>> plt.show() See Also -------- :func:`~pymaid.get_history` Returns day-by-day stats. """ remote_instance = utils._eval_remote_instance(remote_instance) if isinstance(start_date, type(None)): start_date = datetime.date(2010, 1, 1) elif not isinstance(start_date, datetime.date): start_date = datetime.date(*start_date) if isinstance(end_date, type(None)): end_date = datetime.date.today() elif not isinstance(end_date, datetime.date): end_date = datetime.date(*end_date) # Get and summarize other stats hist = fetch.get_history(remote_instance=remote_instance, start_date=start_date, end_date=end_date) stats = pd.concat([hist.cable.sum(axis=1), hist.treenodes.sum(axis=1), hist.reviewed.sum(axis=1), hist.connector_links.sum(axis=1)], axis=1, sort=True).fillna(0).astype(int) stats.index.name = 'username' stats.columns = ['cable', 'nodes_created', 'nodes_reviewed', 'links_created'] stats.sort_values('nodes_created', ascending=False, inplace=True) return stats def get_team_contributions(teams, neurons=None, remote_instance=None): """Get contributions by teams (nodes, reviews, connectors, time invested). Notes ----- 1. Time calculation uses defaults from :func:`pymaid.get_time_invested`. 2. ``total_reviews`` > ``total_nodes`` is possible if nodes have been reviewed multiple times by different users. Similarly, ``total_reviews`` = ``total_nodes`` does not imply that the neuron is fully reviewed! Parameters ---------- teams dict Teams to group contributions for. Users must be logins. Format can be either: 1. Simple user assignments. For example:: {'teamA': ['user1', 'user2'], 'team2': ['user3'], ...]} 2. Users with start and end dates. Start and end date must be either ``datetime.date`` or a single ``pandas.date_range`` object. For example:: {'team1': { 'user1': (datetime.date(2017, 1, 1), datetime.date(2018, 1, 1)), 'user2': (datetime.date(2016, 6, 1), datetime.date(2017, 1, 1) } 'team2': { 'user3': pandas.date_range('2017-1-1', '2018-1-1'), }} Mixing both styles is permissible. For second style, use e.g. ``'user1': None`` for no date restrictions on that user. neurons skeleton ID(s) | CatmaidNeuron/List, optional Restrict check to given set of neurons. If CatmaidNeuron/List, will use this neurons nodes/ connectors. Use to subset contributions e.g. to a given neuropil by pruning neurons before passing to this function. remote_instance : CatmaidInstance, optional Either pass explicitly or define globally. Returns ------- pandas.DataFrame DataFrame in which each row represents a neuron. Example for two teams, ``teamA`` and ``teamB``:: skeleton_id total_nodes teamA_nodes teamB_nodes ... 0 1 total_reviews teamA_reviews teamB_reviews ... 0 1 total_connectors teamA_connectors teamB_connectors ... 0 1 total_time teamA_time teamB_time 0 1 Examples -------- >>> from datetime import date >>> import pandas as pd >>> teams = {'teamA': ['user1', 'user2'], ... 'teamB': {'user3': None, ... 'user4': (date(2017, 1, 1), date(2018, 1, 1))}, ... 'teamC': {'user5': pd.date_range('2015-1-1', '2018-1-1')}} >>> stats = pymaid.get_team_contributions(teams) See Also -------- :func:`~pymaid.get_contributor_statistics` Gives you more basic info on neurons of interest such as total reconstruction/review time. :func:`~pymaid.get_time_invested` Time invested by individual users. Gives you more control over how time is calculated. """ remote_instance = utils._eval_remote_instance(remote_instance) # Prepare teams if not isinstance(teams, dict): raise TypeError('Expected teams of type dict, got ' '{}'.format(type(teams))) beginning_of_time = datetime.date(1900, 1, 1) today = datetime.date.today() all_time = pd.date_range(beginning_of_time, today) for t in teams: if isinstance(teams[t], list): teams[t] = {u: all_time for u in teams[t]} elif isinstance(teams[t], dict): for u in teams[t]: if isinstance(teams[t][u], type(None)): teams[t][u] = all_time elif isinstance(teams[t][u], (tuple, list)): try: teams[t][u] = pd.date_range(*teams[t][u]) except BaseException: raise Exception('Error converting "{}" to pandas.' 'date_range'.format(teams[t][u])) elif isinstance(teams[t][u], pd.core.indexes.datetimes.DatetimeIndex): pass else: TypeError('Expected user dates to be either None, tuple ' 'of datetimes or pandas.date_range, ' 'got {}'.format(type(teams[t][u]))) else: raise TypeError('Expected teams to be either lists or dicts of ' 'users, got {}'.format(type(teams[t]))) # Get all users all_users = [u for t in teams for u in teams[t]] # Prepare neurons - download if neccessary if not isinstance(neurons, type(None)): if isinstance(neurons, core.CatmaidNeuron): neurons = core.CatmaidNeuronList(neurons) elif isinstance(neurons, core.CatmaidNeuronList): pass else: neurons = fetch.get_neurons(neurons, remote_instance=remote_instance) else: all_dates = [d.date() for t in teams for u in teams[t] for d in teams[t][u]] neurons = fetch.find_neurons(users=all_users, from_date=min(all_dates), to_date=max(all_dates), remote_instance=remote_instance) neurons.get_skeletons() # Get user list user_list = fetch.get_user_list(remote_instance=remote_instance).set_index('login') for u in all_users: if u not in user_list.index: raise ValueError('User "{}" not found in user list'.format(u)) # Get all node details all_node_details = fetch.get_node_details(neurons, remote_instance=remote_instance) # Get connector links link_details = fetch.get_connector_links(neurons, remote_instance=remote_instance) # link_details contains all links. We have to subset this to existing # connectors in case the input neurons have been pruned link_details = link_details[link_details.connector_id.isin(neurons.connectors.connector_id.values)] interval = 3 bin_width = '%iMin' % interval minimum_actions = 10 * interval stats = [] for n in config.tqdm(neurons, desc='Processing', disable=config.pbar_hide, leave=config.pbar_leave): # Get node details tn_ids = n.nodes.node_id.values.astype(str) cn_ids = n.connectors.connector_id.values.astype(str) current_status = config.pbar_hide config.pbar_hide = True node_details = all_node_details[all_node_details.node_id.isin(np.append(tn_ids, cn_ids))] config.pbar_hide = current_status # Extract node creation node_creation = node_details.loc[node_details.node_id.isin(tn_ids), ['creator', 'creation_time']].values node_creation = np.c_[node_creation, ['node_creation'] * node_creation.shape[0]] # Extract connector creation cn_creation = node_details.loc[node_details.node_id.isin(cn_ids), ['creator', 'creation_time']].values cn_creation = np.c_[cn_creation, ['cn_creation'] * cn_creation.shape[0]] # Extract edition times (treenodes + connectors) node_edits = node_details.loc[:, ['editor', 'edition_time']].values node_edits = np.c_[node_edits, ['editor'] * node_edits.shape[0]] # Link creation link_creation = link_details.loc[link_details.connector_id.isin(cn_ids), ['creator', 'creation_time']].values link_creation = np.c_[link_creation, ['link_creation'] * link_creation.shape[0]] # Extract review times reviewers = [u for l in node_details.reviewers.values for u in l] timestamps = [ts for l in node_details.review_times.values for ts in l] node_review = np.c_[reviewers, timestamps, ['review'] * len(reviewers)] # Merge all timestamps (ignore edits for now) to get time_invested all_ts = pd.DataFrame(np.vstack([node_creation, node_review, cn_creation, link_creation, node_edits]), columns=['user', 'timestamp', 'type']) return all_ts # Add column with just the date and make it the index all_ts['date'] = all_ts.timestamp.values.astype('datetime64[D]') all_ts.index = pd.to_datetime(all_ts.date) # Fill in teams for each timestamp based on user + date all_ts['team'] = None for t in teams: for u in teams[t]: # Assign all timestamps by this user in the right time to # this team existing_dates = (teams[t][u] & all_ts.index).unique() ss = (all_ts.index.isin(existing_dates)) & (all_ts.user.values == user_list.loc[u, 'id']) all_ts.loc[ss, 'team'] = t # Get total total_time = sum(all_ts.timestamp.to_frame().set_index( 'timestamp', drop=False).groupby(pd.Grouper(freq=bin_width)).count().values >= minimum_actions)[0] * interval this_neuron = [n.skeleton_id, n.n_nodes, n.n_connectors, node_review.shape[0], total_time] # Go over the teams and collect values for t in teams: # Subset to team this_team = all_ts[all_ts.team == t] if this_team.shape[0] > 0: # Subset to user ID team_time = sum(this_team.timestamp.to_frame().set_index( 'timestamp', drop=False).groupby(pd.Grouper(freq=bin_width)).count().values >= minimum_actions)[0] * interval team_nodes = this_team[this_team['type'] == 'node_creation'].shape[0] team_cn = this_team[this_team['type'] == 'cn_creation'].shape[0] team_rev = this_team[this_team['type'] == 'review'].shape[0] else: team_nodes = team_cn = team_rev = team_time = 0 this_neuron += [team_nodes, team_cn, team_rev, team_time] stats.append(this_neuron) cols = ['skeleton_id', 'total_nodes', 'total_connectors', 'total_reviews', 'total_time'] for t in teams: for s in ['nodes', 'connectors', 'reviews', 'time']: cols += ['{}_{}'.format(t, s)] stats = pd.DataFrame(stats, columns=cols) cols_ordered = ['skeleton_id'] + ['{}_{}'.format(t, v) for v in ['nodes', 'connectors', 'reviews', 'time'] for t in ['total'] + list(teams)] stats = stats[cols_ordered] return stats def get_user_contributions(x, teams=None, remote_instance=None): """Return number of nodes and synapses contributed by each user. This is essentially a wrapper for :func:`pymaid.get_contributor_statistics` - if you are also interested in e.g. construction time, review time, etc. you may want to consider using :func:`~pymaid.get_contributor_statistics` instead. Parameters ---------- x Which neurons to check. Can be either: 1. skeleton IDs (int or str) 2. neuron name (str, must be exact match) 3. annotation: e.g. 'annotation:PN right' 4. CatmaidNeuron or CatmaidNeuronList object teams dict, optional Teams to group contributions for. Users must be logins:: {'teamA': ['user1', 'user2'], 'team2': ['user3'], ...]} Users not part of any team, will be grouped as team ``'others'``. remote_instance : CatmaidInstance, optional Either pass explicitly or define globally. Returns ------- pandas.DataFrame DataFrame in which each row represents a user:: user nodes presynapses postsynapses nodes_reviewed 0 1 ... Examples -------- >>> import matplotlib.pyplot as plt >>> # Get contributors for a single neuron >>> cont = pymaid.get_user_contributions(2333007) >>> # Get top 10 (by node contribution) >>> top10 = cont.iloc[:10].set_index('user') >>> # Plot as bar chart >>> ax = top10.plot(kind='bar') >>> plt.show() >>> # Plot relative contributions >>> cont = pymaid.get_user_contributions(2333007) >>> cont = cont.set_index('user') >>> # Normalize >>> cont_rel = cont / cont.sum(axis=0).values >>> # Plot contributors with >5% node contributions >>> ax = cont_rel[cont_rel.nodes > .05].plot(kind='bar') >>> plt.show() See Also -------- :func:`~pymaid.get_contributor_statistics` Gives you more basic info on neurons of interest such as total reconstruction/review time. """ if not isinstance(teams, type(None)): # Prepare teams if not isinstance(teams, dict): raise TypeError('Expected teams of type dict, got ' '{}'.format(type(teams))) for t in teams: if not isinstance(teams[t], list): raise TypeError('Teams need to list of user logins, ' 'got {}'.format(type(teams[t]))) # Turn teams into a login -> team dict teams = {u: t for t in teams for u in teams[t]} remote_instance = utils._eval_remote_instance(remote_instance) skids = utils.eval_skids(x, remote_instance=remote_instance) cont = fetch.get_contributor_statistics(skids, remote_instance=remote_instance, separate=False) all_users = set(list(cont.node_contributors.keys()) + list(cont.pre_contributors.keys()) + list(cont.post_contributors.keys())) stats = { 'nodes': {u: 0 for u in all_users}, 'presynapses': {u: 0 for u in all_users}, 'postsynapses': {u: 0 for u in all_users}, 'nodes_reviewed': {u: 0 for u in all_users} } for u in cont.node_contributors: stats['nodes'][u] = cont.node_contributors[u] for u in cont.pre_contributors: stats['presynapses'][u] = cont.pre_contributors[u] for u in cont.post_contributors: stats['postsynapses'][u] = cont.post_contributors[u] for u in cont.review_contributors: stats['nodes_reviewed'][u] = cont.review_contributors[u] stats = pd.DataFrame([[u, stats['nodes'][u], stats['presynapses'][u], stats['postsynapses'][u], stats['nodes_reviewed'][u]] for u in all_users], columns=['user', 'nodes', 'presynapses', 'postsynapses', 'nodes_reviewed'] ).sort_values('nodes', ascending=False).reset_index(drop=True) if isinstance(teams, type(None)): return stats stats['team'] = [teams.get(u, 'others') for u in stats.user.values] return stats.groupby('team').sum() def get_time_invested(x, mode='SUM', by='USER', minimum_actions=10, max_inactive_time=3, treenodes=True, connectors=True, links=True, start_date=None, end_date=None, remote_instance=None): """Calculate the time spent working on a set of neurons. Use ``minimum_actions`` and ``max_inactive_time`` to fine tune how time invested is calculated: by default, time is binned over 3 minutes in which a user has to perform 3x10 actions for that interval to be counted towards the time spent tracing. Important --------- Creation/Edition/Review times can overlap! This is why total time spent is not just creation + edition + review. Please note that this does currently not take placement of pre-/postsynaptic nodes into account! Be aware of the ``minimum_actions`` parameter: at low values even a single action (e.g. connecting a node) will add considerably to time invested. To keep total reconstruction time comparable to what Catmaid calculates, you should consider about 10 actions/minute (= a click every 6 seconds) and ``max_inactive_time`` of 3 mins. CATMAID gives reconstruction time across all users. Here, we calculate the time spent tracing for individuals. This may lead to a discrepancy between sum of time invested over of all users from this function vs. CATMAID's reconstruction time. Parameters ---------- x Which neurons to check. Can be either: 1. skeleton IDs (int or str) 2. neuron name (str, must be exact match) 3. annotation: e.g. 'annotation:PN right' 4. CatmaidNeuron or CatmaidNeuronList object If you pass a CatmaidNeuron/List, its node/connectors are used to calculate time invested. You can exploit this to get time spent reconstructing in given compartment of a neurons, e.g. by pruning it to a volume before passing it to ``get_time_invested``. mode : 'SUM' | 'SUM2' | 'OVER_TIME' | 'ACTIONS', optional (1) 'SUM' will return total time invested (in minutes) broken down by creation, edition and review. (2) 'SUM2' will return total time invested (in minutes) broken down by `treenodes`, `connectors` and `links`. (3) 'OVER_TIME' will return minutes invested/day over time. (4) 'ACTIONS' will return actions (node/connectors placed/edited) per day. by : 'USER' | 'NEURON', optional Determines whether the stats are broken down by user or by neuron. minimum_actions : int, optional Minimum number of actions per minute to be counted as active. max_inactive_time : int, optional Interval in minutes over which time invested is binned. Essentially determines how much time can be between bouts of activity. treenodes : bool, optional If False, treenodes will not be taken into account. connectors : bool, optional If False, connectors will not be taken into account. links : bool, optional If False, connector links will not be taken into account. start_date : iterable | datetime.date | numpy.datetime64, optional Restricts time invested to window. Applies to creation but not edition time! If iterable, must be year, month day, e.g. ``[2018, 1, 1]``. end_date : iterable | datetime.date | numpy.datetime64, optional See ``start_date``. remote_instance : CatmaidInstance, optional Either pass explicitly or define globally. Returns ------- pandas.DataFrame If ``mode='SUM'``, values represent minutes invested:: total creation edition review user1 user2 .. . If ``mode='SUM2'``, values represent minutes invested:: total treenodes connectors links user1 user2 .. . If ``mode='OVER_TIME'`` or ``mode='ACTIONS'``:: date1 date2 date3 ... user1 user2 .. . For `OVER_TIME`, values respresent minutes invested on that day. For `ACTIONS`, values represent actions (creation, edition, review) on that day. Examples -------- Get time invested for a set of neurons: >>> da1 = pymaid.get_neurons('annotation:glomerulus DA1') >>> time = pymaid.get_time_invested(da1) Get time spent tracing in a specific compartment: >>> da1_lh = pymaid.prune_by_volume('LH_R', inplace=False) >>> time_lh = pymaid.get_time_invested(da1_lh) Get contributions within a given time window: >>> time_jan = pymaid.get_time_invested(da1, ... start_date=[2018, 1, 1], ... end_date=[2018, 1, 31]) Plot pie chart of contributions per user using Plotly: >>> import plotly >>> stats = pymaid.get_time_invested(skids, remote_instance) >>> # Use plotly to generate pie chart >>> fig = {"data": [{"values": stats.total.tolist(), ... "labels": stats.user.tolist(), "type" : "pie" }]} >>> plotly.offline.plot(fig) Plot reconstruction efforts over time: >>> stats = pymaid.get_time_invested(skids, mode='OVER_TIME') >>> # Plot time invested over time >>> stats.T.plot() >>> # Plot cumulative time invested over time >>> stats.T.cumsum(axis=0).plot() >>> # Filter for major contributors >>> stats[stats.sum(axis=1) > 20].T.cumsum(axis=0).plot() """ def _extract_timestamps(ts, restrict_groups, desc='Calc'): if ts.empty: return {} grouped = ts.set_index('timestamp', drop=False).groupby(['group', pd.Grouper(freq=bin_width)]).count() >= minimum_actions temp_stats = {} for g in config.tqdm(set(ts.group.unique()) & set(restrict_groups), desc=desc, disable=config.pbar_hide, leave=False): temp_stats[g] = sum(grouped.loc[g].values)[0] * interval return temp_stats if mode not in ['SUM', 'SUM2', 'OVER_TIME', 'ACTIONS']: raise ValueError('Unknown mode "{}"'.format(mode)) if by not in ['NEURON', 'USER']: raise ValueError('Unknown by "{}"'.format(by)) remote_instance = utils._eval_remote_instance(remote_instance) skids = utils.eval_skids(x, remote_instance=remote_instance) # Maximal inactive time is simply translated into binning # We need this later for pandas.TimeGrouper() anyway interval = max_inactive_time bin_width = '%iMin' % interval # Update minimum_actions to reflect actions/interval instead of # actions/minute minimum_actions *= interval user_list = fetch.get_user_list(remote_instance=remote_instance).set_index('id') user_dict = user_list.login.to_dict() if not isinstance(x, (core.CatmaidNeuron, core.CatmaidNeuronList)): x = fetch.get_neuron(skids, remote_instance=remote_instance) if isinstance(x, core.CatmaidNeuron): skdata = core.CatmaidNeuronList(x) elif isinstance(x, core.CatmaidNeuronList): skdata = x if not isinstance(end_date, (datetime.date, np.datetime64, type(None))): end_date = datetime.date(*end_date) if not isinstance(start_date, (datetime.date, np.datetime64, type(None))): start_date = datetime.date(*start_date) # Extract connector and node IDs node_ids = [] connector_ids = [] for n in skdata.itertuples(): if treenodes: node_ids += n.nodes.node_id.tolist() if connectors: connector_ids += n.connectors.connector_id.tolist() # Get node details node_details = fetch.get_node_details(node_ids + connector_ids, remote_instance=remote_instance) # Get details for links if links: link_details = fetch.get_connector_links(skdata, remote_instance=remote_instance) # link_details contains all links. We have to subset this to existing # connectors in case the input neurons have been pruned link_details = link_details[link_details.connector_id.isin(connector_ids)] else: link_details = pd.DataFrame([], columns=['creator', 'creation_time']) # Remove timestamps outside of date range (if provided) if start_date: node_details = node_details[node_details.creation_time >= np.datetime64(start_date)] link_details = link_details[link_details.creation_time >= np.datetime64(start_date)] if end_date: node_details = node_details[node_details.creation_time <= np.datetime64(end_date)] link_details = link_details[link_details.creation_time <= np.datetime64(end_date)] # If we want to group by neuron, we need to add a "skeleton ID" column and # make check if we need to duplicate rows with connectors if by == 'NEURON': # Need to add a column with the skeleton ID node_details['skeleton_id'] = None node_details['node_type'] = 'connector' col_name = 'skeleton_id' for n in skdata: cond = node_details.node_id.isin(n.nodes.node_id.values.astype(str)) node_details.loc[cond, 'skeleton_id'] = n.skeleton_id node_details.loc[cond, 'node_type'] = 'treenode' # Connectors can show up in more than one neuron -> we need to duplicate # those rows for each of the associated neurons cn_details = [] for n in skdata: cond1 = node_details.node_type == 'connector' cond2 = node_details.node_id.isin(n.connectors.connector_id.values.astype(str)) node_details.loc[cond1 & cond2, 'skeleton_id'] = n.skeleton_id this_cn = node_details.loc[cond1 & cond2] cn_details.append(this_cn) cn_details = pd.concat(cn_details, axis=0) # Merge the node details again cond1 = node_details.node_type == 'treenode' node_details = pd.concat([node_details.loc[cond1], cn_details], axis=0).reset_index(drop=True) # Note that link_details already has a "skeleton_id" column # but we need to make sure it's strings link_details['skeleton_id'] = link_details.skeleton_id.astype(str) create_group = edit_group = 'skeleton_id' else: create_group = 'creator' edit_group = 'editor' col_name = 'user' # Dataframe for creation (i.e. the actual generation of the nodes) creation_timestamps = np.append(node_details[[create_group, 'creation_time']].values, link_details[[create_group, 'creation_time']].values, axis=0) creation_timestamps = pd.DataFrame(creation_timestamps, columns=['group', 'timestamp']) # Dataframe for edition times - can't use links as there is no editor # Because creation of a node counts as an edit, we are removing # timestamps where creation and edition time are less than 100ms apart is_edit = (node_details.edition_time - node_details.creation_time) > np.timedelta64(200, 'ms') edition_timestamps = node_details.loc[is_edit, [edit_group, 'edition_time']] edition_timestamps.columns = ['group', 'timestamp'] # Generate dataframe for reviews -> here we have to unpack if by == 'USER': groups = [u for l in node_details.reviewers.values for u in l] else: groups = [s for l, s in zip(node_details.review_times.values, node_details.skeleton_id.values) for ts in l] timestamps = [ts for l in node_details.review_times.values for ts in l] review_timestamps = pd.DataFrame([groups, timestamps]).T review_timestamps.columns = ['group', 'timestamp'] # Change user ID to login if by == 'USER': if mode == 'SUM2': node_details['creator'] = node_details.creator.map(user_dict) node_details['editor'] = node_details.editor.map(user_dict) link_details['creator'] = link_details.creator.map(user_dict) creation_timestamps['group'] = creation_timestamps.group.map(user_dict) edition_timestamps['group'] = edition_timestamps.group.map(user_dict) review_timestamps['group'] = review_timestamps.group.map(user_dict) # Merge all timestamps all_timestamps = pd.concat([creation_timestamps, edition_timestamps, review_timestamps], axis=0) all_timestamps.sort_values('timestamp', inplace=True) if by == 'USER': # Extract the users that are relevant for us relevant_users = all_timestamps.groupby('group').count() groups = relevant_users[relevant_users.timestamp >= minimum_actions].index.values else: groups = skdata.skeleton_id if mode == 'SUM': # This breaks it down by time spent on creation, edition and review stats = {k: {g: 0 for g in groups} for k in ['total', 'creation', 'edition', 'review']} stats['total'].update(_extract_timestamps(all_timestamps, groups, desc='Calc total')) stats['creation'].update(_extract_timestamps(creation_timestamps, groups, desc='Calc creation')) stats['edition'].update(_extract_timestamps(edition_timestamps, groups, desc='Calc edition')) stats['review'].update(_extract_timestamps(review_timestamps, groups, desc='Calc review')) return pd.DataFrame([[g, stats['total'][g], stats['creation'][g], stats['edition'][g], stats['review'][g]] for g in groups], columns=[col_name, 'total', 'creation', 'edition', 'review'] ).sort_values('total', ascending=False ).reset_index(drop=True).set_index(col_name) elif mode == 'SUM2': # This breaks it down by time spent on nodes, connectors and links stats = {k: {g: 0 for g in groups} for k in ['total', 'treenodes', 'connectors', 'links']} stats['total'].update(_extract_timestamps(all_timestamps, groups, desc='Calc total')) # We need to construct separate DataFrames for nodes, connectors + links # Note that we are using only edits that do not stem from the creation is_tn = node_details.node_id.astype(int).isin(node_ids) conc = np.concatenate([node_details.loc[is_tn, [create_group, 'creation_time'] ].values, node_details.loc[is_edit & is_tn, [edit_group, 'edition_time'] ].values ], axis=0) treenode_timestamps = pd.DataFrame(conc, columns=['group', 'timestamp']) stats['treenodes'].update(_extract_timestamps(treenode_timestamps, groups, desc='Calc treenodes')) # Now connectors # Note that we are using only edits that do not stem from the creation is_cn = node_details.node_id.astype(int).isin(connector_ids) conc = np.concatenate([node_details.loc[is_cn, [create_group, 'creation_time'] ].values, node_details.loc[is_edit & is_cn, [edit_group, 'edition_time'] ].values ], axis=0) connector_timestamps = pd.DataFrame(conc, columns=['group', 'timestamp']) stats['connectors'].update(_extract_timestamps(connector_timestamps, groups, desc='Calc connectors')) # Now links link_timestamps = pd.DataFrame(link_details[[create_group, 'creation_time']].values, columns=['group', 'timestamp']) stats['links'].update(_extract_timestamps(link_timestamps, groups, desc='Calc links')) return pd.DataFrame([[g, stats['total'][g], stats['treenodes'][g], stats['connectors'][g], stats['links'][g]] for g in groups], columns=[col_name, 'total', 'treenodes', 'connectors', 'links'] ).sort_values('total', ascending=False ).reset_index(drop=True ).set_index(col_name) elif mode == 'ACTIONS': all_ts = all_timestamps.set_index('timestamp', drop=False ).timestamp.groupby(pd.Grouper(freq='1d') ).count().to_frame() all_ts.columns = ['all_groups'] all_ts = all_ts.T # Get total time spent for g in config.tqdm(all_timestamps.group.unique(), desc='Calc. total', disable=config.pbar_hide, leave=False): this_ts = all_timestamps[all_timestamps.group == g].set_index( 'timestamp', drop=False).timestamp.groupby(pd.Grouper(freq='1d')).count().to_frame() this_ts.columns = [g] all_ts = pd.concat([all_ts, this_ts.T]) return all_ts.fillna(0) elif mode == 'OVER_TIME': # Go over all users and collect time invested all_ts = [] for g in config.tqdm(all_timestamps.group.unique(), desc='Calc. total', disable=config.pbar_hide, leave=False): # First count all minutes with minimum number of actions minutes_counting = (all_timestamps[all_timestamps.group == g].set_index( 'timestamp', drop=False).timestamp.groupby(pd.Grouper(freq=bin_width)).count().to_frame() >= minimum_actions) # Then remove the minutes that have less than minimum actions minutes_counting = minutes_counting[minutes_counting.timestamp] # Now group timestamps by day this_ts = minutes_counting.groupby(pd.Grouper(freq='1d')).count() # Rename columns to user login this_ts.columns = [g] # Append if an and move on if not this_ts.empty: all_ts.append(this_ts.T) # Turn into DataFrame all_ts = pd.concat(all_ts).sort_index() # Replace NaNs with 0 all_ts.fillna(0, inplace=True) # Add all users column all_users = all_ts.sum(axis=0) all_users.name = 'all_groups' all_ts = pd.concat([all_users, all_ts.T], axis=1).T return all_ts def get_user_actions(users=None, neurons=None, start_date=None, end_date=None, remote_instance=None): """Get timestamps of user actions (creations, editions, reviews, linking). Important --------- This function returns most but not all user actions:: 1. The API endpoint used for finding neurons worked on by a given user (:func:`pymaid.find_neurons`) does not return single-node neurons. Hence, placing e.g. postsynaptic nodes is not taken into account. 2. Any creation is also an edit. However, only the last edit is kept track of. So each creation counts as an edit for the creator until a different user makes an edit. Parameters ---------- users : str | list, optional Users login(s) for which to return timestamps. neurons : list of skeleton IDs | CatmaidNeuron/List, optional Neurons for which to return timestamps. If None, will find neurons by user. start_date : tuple | datetime.date, optional end_date : tuple | datetime.date, optional Start and end date of time window to check. remote_instance : CatmaidInstance, optional Return ------ pandas.DataFrame DataFrame in which each row is a user action:: user timestamp action 0 1 ... Examples -------- In the first example we will have a look at how active a user is over the course of a day. >>> import pandas as pd >>> import matplotlib.pyplot as plt >>> # Get all actions for a single user >>> actions = pymaid.get_user_actions(users='schlegelp', .... start_date=(2017, 11, 1)) >>> # Group by hour and see what time of the day user is usually active >>> actions.set_index(pd.DatetimeIndex(actions.timestamp), inplace=True) >>> hours = actions.groupby(actions.index.hour).count() >>> ax = hours.action.plot() >>> plt.show() >>> # Plot day-by-day activity >>> ax = plt.subplot() >>> ax.scatter(actions.timestamp.date.values, ... actions.timestamp.time.values, ... marker='_') """ if not neurons and not users and not (start_date or end_date): raise ValueError('Query must be restricted by at least a single ' 'parameter!') if users and not isinstance(users, (list, np.ndarray)): users = [users] # Get user dictionary (needed later) user_list = fetch.get_user_list(remote_instance=remote_instance) user_dict = user_list.set_index('id').login.to_dict() if isinstance(neurons, type(None)): neurons = fetch.find_neurons(users=users, from_date=start_date, to_date=end_date, reviewed_by=users, remote_instance=remote_instance) # Get skeletons neurons.get_skeletons() elif not isinstance(neurons, (core.CatmaidNeuron, core.CatmaidNeuronList)): neurons = fetch.get_neuron(neurons, remote_instance=remote_instance) if not isinstance(end_date, (datetime.date, type(None))): end_date = datetime.date(*end_date) if not isinstance(start_date, (datetime.date, type(None))): start_date = datetime.date(*start_date) node_ids = neurons.nodes.node_id.tolist() connector_ids = neurons.connectors.connector_id.tolist() # Get node details node_details = fetch.get_node_details(node_ids + connector_ids, remote_instance=remote_instance) # Get details for links link_details = fetch.get_connector_links(neurons, remote_instance=remote_instance) # Dataframe for creation (i.e. the actual generation of the nodes) creation_timestamps = node_details[['creator', 'creation_time']].copy() creation_timestamps['action'] = 'creation' creation_timestamps.columns = ['user', 'timestamp', 'action'] # Dataframe for edition times edition_timestamps = node_details[['editor', 'edition_time']].copy() edition_timestamps['action'] = 'edition' edition_timestamps.columns = ['user', 'timestamp', 'action'] # DataFrame for linking linking_timestamps = link_details[['creator', 'creation_time']].copy() linking_timestamps['action'] = 'linking' linking_timestamps.columns = ['user', 'timestamp', 'action'] # Generate dataframe for reviews reviewers = [u for l in node_details.reviewers.tolist() for u in l] timestamps = [ts for l in node_details.review_times.tolist() for ts in l] review_timestamps = pd.DataFrame([[u, ts, 'review'] for u, ts in zip( reviewers, timestamps)], columns=['user', 'timestamp', 'action']) # Merge all timestamps all_timestamps = pd.concat([creation_timestamps, edition_timestamps, review_timestamps, linking_timestamps], axis=0).reset_index(drop=True) # Map login onto user ID all_timestamps.user = [user_dict[u] for u in all_timestamps.user.values] # Remove other users all_timestamps = all_timestamps[all_timestamps.user.isin(users)] # Remove timestamps outside of date range (if provided) if start_date: all_timestamps = all_timestamps[all_timestamps.timestamp.values >= np.datetime64(start_date)] if end_date: all_timestamps = all_timestamps[all_timestamps.timestamp.values <= np.datetime64(end_date)] return all_timestamps.sort_values('timestamp').reset_index(drop=True)
gpl-3.0
476,608,047,681,674,560
41.385082
131
0.538347
false
mganeva/mantid
scripts/SANS/sans/state/convert_to_q.py
1
10526
# Mantid Repository : https://github.com/mantidproject/mantid # # Copyright &copy; 2018 ISIS Rutherford Appleton Laboratory UKRI, # NScD Oak Ridge National Laboratory, European Spallation Source # & Institut Laue - Langevin # SPDX - License - Identifier: GPL - 3.0 + # pylint: disable=too-few-public-methods """State describing the conversion to momentum transfer""" from __future__ import (absolute_import, division, print_function) import json import copy from sans.state.state_base import (StateBase, rename_descriptor_names, BoolParameter, PositiveFloatParameter, ClassTypeParameter, StringParameter) from sans.common.enums import (ReductionDimensionality, RangeStepType, SANSFacility) from sans.state.state_functions import (is_pure_none_or_not_none, is_not_none_and_first_larger_than_second, validation_message) from sans.state.automatic_setters import (automatic_setters) # ---------------------------------------------------------------------------------------------------------------------- # State # ---------------------------------------------------------------------------------------------------------------------- @rename_descriptor_names class StateConvertToQ(StateBase): reduction_dimensionality = ClassTypeParameter(ReductionDimensionality) use_gravity = BoolParameter() gravity_extra_length = PositiveFloatParameter() radius_cutoff = PositiveFloatParameter() wavelength_cutoff = PositiveFloatParameter() # 1D settings q_min = PositiveFloatParameter() q_max = PositiveFloatParameter() q_1d_rebin_string = StringParameter() # 2D settings q_xy_max = PositiveFloatParameter() q_xy_step = PositiveFloatParameter() q_xy_step_type = ClassTypeParameter(RangeStepType) # ----------------------- # Q Resolution specific # --------------------- use_q_resolution = BoolParameter() q_resolution_collimation_length = PositiveFloatParameter() q_resolution_delta_r = PositiveFloatParameter() moderator_file = StringParameter() # Circular aperture settings q_resolution_a1 = PositiveFloatParameter() q_resolution_a2 = PositiveFloatParameter() # Rectangular aperture settings q_resolution_h1 = PositiveFloatParameter() q_resolution_h2 = PositiveFloatParameter() q_resolution_w1 = PositiveFloatParameter() q_resolution_w2 = PositiveFloatParameter() def __init__(self): super(StateConvertToQ, self).__init__() self.reduction_dimensionality = ReductionDimensionality.OneDim self.use_gravity = False self.gravity_extra_length = 0.0 self.use_q_resolution = False self.radius_cutoff = 0.0 self.wavelength_cutoff = 0.0 def validate(self): is_invalid = {} # 1D Q settings if not is_pure_none_or_not_none([self.q_min, self.q_max]): entry = validation_message("The q boundaries for the 1D reduction are inconsistent.", "Make sure that both q boundaries are set (or none).", {"q_min": self.q_min, "q_max": self.q_max}) is_invalid.update(entry) if is_not_none_and_first_larger_than_second([self.q_min, self.q_max]): entry = validation_message("Incorrect q bounds for 1D reduction.", "Make sure that the lower q bound is smaller than the upper q bound.", {"q_min": self.q_min, "q_max": self.q_max}) is_invalid.update(entry) if self.reduction_dimensionality is ReductionDimensionality.OneDim: if self.q_min is None or self.q_max is None: entry = validation_message("Q bounds not set for 1D reduction.", "Make sure to set the q boundaries when using a 1D reduction.", {"q_min": self.q_min, "q_max": self.q_max}) is_invalid.update(entry) if self.q_1d_rebin_string is not None: if self.q_1d_rebin_string == "": entry = validation_message("Q rebin string does not seem to be valid.", "Make sure to provide a valid rebin string", {"q_1d_rebin_string": self.q_1d_rebin_string}) is_invalid.update(entry) elif not is_valid_rebin_string(self.q_1d_rebin_string): entry = validation_message("Q rebin string does not seem to be valid.", "Make sure to provide a valid rebin string", {"q_1d_rebin_string": self.q_1d_rebin_string}) is_invalid.update(entry) # QXY settings if self.reduction_dimensionality is ReductionDimensionality.TwoDim: if self.q_xy_max is None or self.q_xy_step is None: entry = validation_message("Q bounds not set for 2D reduction.", "Make sure that the q_max value bound and the step for the 2D reduction.", {"q_xy_max": self.q_xy_max, "q_xy_step": self.q_xy_step}) is_invalid.update(entry) # Q Resolution settings if self.use_q_resolution: if not is_pure_none_or_not_none([self.q_resolution_a1, self.q_resolution_a2]): entry = validation_message("Inconsistent circular geometry.", "Make sure that both diameters for the circular apertures are set.", {"q_resolution_a1": self.q_resolution_a1, "q_resolution_a2": self.q_resolution_a2}) is_invalid.update(entry) if not is_pure_none_or_not_none([self.q_resolution_h1, self.q_resolution_h2, self.q_resolution_w1, self.q_resolution_w2]): entry = validation_message("Inconsistent rectangular geometry.", "Make sure that both diameters for the circular apertures are set.", {"q_resolution_h1": self.q_resolution_h1, "q_resolution_h2": self.q_resolution_h2, "q_resolution_w1": self.q_resolution_w1, "q_resolution_w2": self.q_resolution_w2}) is_invalid.update(entry) if all(element is None for element in [self.q_resolution_a1, self.q_resolution_a2, self.q_resolution_w1, self.q_resolution_w2, self.q_resolution_h1, self.q_resolution_h2]): entry = validation_message("Aperture is undefined.", "Make sure that you set the geometry for a circular or a " "rectangular aperture.", {"q_resolution_a1": self.q_resolution_a1, "q_resolution_a2": self.q_resolution_a2, "q_resolution_h1": self.q_resolution_h1, "q_resolution_h2": self.q_resolution_h2, "q_resolution_w1": self.q_resolution_w1, "q_resolution_w2": self.q_resolution_w2}) is_invalid.update(entry) if self.moderator_file is None: entry = validation_message("Missing moderator file.", "Make sure to specify a moderator file when using q resolution.", {"moderator_file": self.moderator_file}) is_invalid.update(entry) is_invalid.update({"moderator_file": "A moderator file is required for the q resolution calculation."}) if is_invalid: raise ValueError("StateMoveDetectorISIS: The provided inputs are illegal. " "Please see: {0}".format(json.dumps(is_invalid))) # ---------------------------------------------------------------------------------------------------------------------- # Builder # ---------------------------------------------------------------------------------------------------------------------- class StateConvertToQBuilder(object): @automatic_setters(StateConvertToQ) def __init__(self): super(StateConvertToQBuilder, self).__init__() self.state = StateConvertToQ() def build(self): self.state.validate() return copy.copy(self.state) # ------------------------------------------ # Factory method for StateConvertToQBuilder # ------------------------------------------ def get_convert_to_q_builder(data_info): # The data state has most of the information that we require to define the q conversion. # For the factory method, only the facility/instrument is of relevance. facility = data_info.facility if facility is SANSFacility.ISIS: return StateConvertToQBuilder() else: raise NotImplementedError("StateConvertToQBuilder: Could not find any valid save builder for the " "specified StateData object {0}".format(str(data_info))) # ------------------------------------------- # Free functions # ------------------------------------------- def is_valid_rebin_string(rebin_string): is_valid = True try: values = [float(el) for el in rebin_string.split(",")] if len(values) < 2: is_valid = False elif len(values) == 2: if values[0] > values[1]: is_valid = False elif len(values) % 2 == 1: # odd number of entries step_points = values[::2] if not is_increasing(step_points): is_valid = False else: is_valid = False except: # noqa is_valid = False return is_valid def is_increasing(step_points): return all(el1 <= el2 for el1, el2 in zip(step_points, step_points[1:]))
gpl-3.0
-7,986,338,881,087,957,000
48.650943
120
0.519286
false
andrewcbennett/iris
lib/iris/tests/unit/concatenate/test__CubeSignature.py
1
4172
# (C) British Crown Copyright 2014 - 2015, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """Test class :class:`iris._concatenate._CubeSignature`.""" from __future__ import (absolute_import, division, print_function) from six.moves import (filter, input, map, range, zip) # noqa # import iris tests first so that some things can be initialised # before importing anything else. import iris.tests as tests import numpy as np import iris from iris._concatenate import _CubeSignature as CubeSignature from iris.coords import AuxCoord, DimCoord from iris.cube import Cube from cf_units import Unit from iris.util import new_axis class Test__coordinate_dim_metadata_equality(tests.IrisTest): def setUp(self): nt = 10 data = np.arange(nt, dtype=np.float32) cube = Cube(data, standard_name='air_temperature', units='K') # Temporal coordinate. t_units = Unit('hours since 1970-01-01 00:00:00', calendar='gregorian') t_coord = DimCoord(points=np.arange(nt), standard_name='time', units=t_units) cube.add_dim_coord(t_coord, 0) # Increasing 1D time-series cube. self.series_inc_cube = cube self.series_inc = CubeSignature(self.series_inc_cube) # Decreasing 1D time-series cube. self.series_dec_cube = self.series_inc_cube.copy() self.series_dec_cube.remove_coord('time') t_tmp = DimCoord(points=t_coord.points[::-1], standard_name='time', units=t_units) self.series_dec_cube.add_dim_coord(t_tmp, 0) self.series_dec = CubeSignature(self.series_dec_cube) # Scalar 0D time-series cube with scalar time coordinate. cube = Cube(0, standard_name='air_temperature', units='K') cube.add_aux_coord(DimCoord(points=nt, standard_name='time', units=t_units)) self.scalar_cube = cube def test_scalar_non_common_axis(self): scalar = CubeSignature(self.scalar_cube) self.assertNotEqual(self.series_inc.dim_metadata, scalar.dim_metadata) self.assertNotEqual(self.series_dec.dim_metadata, scalar.dim_metadata) def test_1d_single_value_common_axis(self): # Manually promote scalar time cube to be a 1d cube. single = CubeSignature(new_axis(self.scalar_cube, 'time')) self.assertEqual(self.series_inc.dim_metadata, single.dim_metadata) self.assertEqual(self.series_dec.dim_metadata, single.dim_metadata) def test_increasing_common_axis(self): series_inc = self.series_inc series_dec = self.series_dec self.assertEqual(series_inc.dim_metadata, series_inc.dim_metadata) self.assertNotEqual(series_inc.dim_metadata, series_dec.dim_metadata) def test_decreasing_common_axis(self): series_inc = self.series_inc series_dec = self.series_dec self.assertNotEqual(series_dec.dim_metadata, series_inc.dim_metadata) self.assertEqual(series_dec.dim_metadata, series_dec.dim_metadata) def test_circular(self): series_inc = self.series_inc circular_cube = self.series_inc_cube.copy() circular_cube.coord('time').circular = True circular = CubeSignature(circular_cube) self.assertNotEqual(circular.dim_metadata, series_inc.dim_metadata) self.assertEqual(circular.dim_metadata, circular.dim_metadata) if __name__ == '__main__': tests.main()
gpl-3.0
6,694,130,792,319,167,000
40.306931
79
0.668265
false
vmahuli/contrail-controller
src/config/device-manager/device_manager/db.py
1
36175
# # Copyright (c) 2014 Juniper Networks, Inc. All rights reserved. # """ This file contains implementation of data model for physical router configuration manager """ from vnc_api.common.exceptions import NoIdError from physical_router_config import PhysicalRouterConfig from sandesh.dm_introspect import ttypes as sandesh from cfgm_common.vnc_db import DBBase from cfgm_common.uve.physical_router.ttypes import * from vnc_api.vnc_api import * import copy import socket import gevent from gevent import queue from cfgm_common.vnc_cassandra import VncCassandraClient class DBBaseDM(DBBase): obj_type = __name__ class BgpRouterDM(DBBaseDM): _dict = {} obj_type = 'bgp_router' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.bgp_routers = {} self.physical_router = None self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.params = obj['bgp_router_parameters'] if self.params is not None: if self.params.get('autonomous_system') is None: self.params['autonomous_system'] = GlobalSystemConfigDM.get_global_asn() self.update_single_ref('physical_router', obj) new_peers = {} for ref in obj.get('bgp_router_refs', []): new_peers[ref['uuid']] = ref['attr'] for peer_id in set(self.bgp_routers.keys()) - set(new_peers.keys()): peer = BgpRouterDM.get(peer_id) if self.uuid in peer.bgp_routers: del peer.bgp_routers[self.uuid] for peer_id, attrs in new_peers.items(): peer = BgpRouterDM.get(peer_id) if peer: peer.bgp_routers[self.uuid] = attrs self.bgp_routers = new_peers def sandesh_build(self): return sandesh.BgpRouter(name=self.name, uuid=self.uuid, peers=self.bgp_routers, physical_router=self.physical_router) @classmethod def sandesh_request(cls, req): # Return the list of BGP routers resp = sandesh.BgpRouterListResp(bgp_routers=[]) if req.name_or_uuid is None: for router in cls.values(): sandesh_router = router.sandesh_build() resp.bgp_routers.extend(sandesh_router) else: router = cls.find_by_name_or_uuid(req.name_or_uuid) if router: sandesh_router = router.sandesh_build() resp.bgp_routers.extend(sandesh_router) resp.response(req.context()) def get_all_bgp_router_ips(self): if self.params['address'] is not None: bgp_router_ips = set([self.params['address']]) else: bgp_router_ips = set() for peer_uuid in self.bgp_routers: peer = BgpRouterDM.get(peer_uuid) if peer is None or peer.params['address'] is None: continue bgp_router_ips.add(peer.params['address']) return bgp_router_ips #end get_all_bgp_router_ips # end class BgpRouterDM class PhysicalRouterDM(DBBaseDM): _dict = {} obj_type = 'physical_router' _sandesh = None def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.virtual_networks = set() self.bgp_router = None self.config_manager = None self.nc_q = queue.Queue(maxsize=1) self.nc_handler_gl = gevent.spawn(self.nc_handler) self.vn_ip_map = {} self.init_cs_state() self.update(obj_dict) self.config_manager = PhysicalRouterConfig( self.management_ip, self.user_credentials, self.vendor, self.product, self.vnc_managed, self._logger) self.uve_send() # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.management_ip = obj.get('physical_router_management_ip') self.dataplane_ip = obj.get('physical_router_dataplane_ip') self.vendor = obj.get('physical_router_vendor_name', '') self.product = obj.get('physical_router_product_name', '') self.vnc_managed = obj.get('physical_router_vnc_managed') self.user_credentials = obj.get('physical_router_user_credentials') self.junos_service_ports = obj.get('physical_router_junos_service_ports') self.update_single_ref('bgp_router', obj) self.update_multiple_refs('virtual_network', obj) self.physical_interfaces = set([pi['uuid'] for pi in obj.get('physical_interfaces', [])]) self.logical_interfaces = set([li['uuid'] for li in obj.get('logical_interfaces', [])]) if self.config_manager is not None: self.config_manager.update( self.management_ip, self.user_credentials, self.vendor, self.product, self.vnc_managed) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj._cassandra.delete_pr(uuid) obj.config_manager.delete_bgp_config() obj.uve_send(True) obj.update_single_ref('bgp_router', {}) obj.update_multiple_refs('virtual_network', {}) del cls._dict[uuid] # end delete def is_junos_service_ports_enabled(self): if self.junos_service_ports is not None and self.junos_service_ports.get('service_port') is not None: return True return False #end is_junos_service_ports_enabled def set_config_state(self): try: self.nc_q.put_nowait(1) except queue.Full: pass #end def nc_handler(self): while self.nc_q.get() is not None: try: self.push_config() except Exception as e: self._logger.error("Exception: " + str(e)) #end def is_valid_ip(self, ip_str): try: socket.inet_aton(ip_str) return True except socket.error: return False #end def init_cs_state(self): vn_subnet_set = self._cassandra.get_pr_vn_set(self.uuid) for vn_subnet in vn_subnet_set: ip = self._cassandra.get(self._cassandra._PR_VN_IP_CF, self.uuid + ':' + vn_subnet) if ip is not None: self.vn_ip_map[vn_subnet] = ip['ip_address'] #end init_cs_state def reserve_ip(self, vn_uuid, subnet_prefix): try: vn = VirtualNetwork() vn.set_uuid(vn_uuid) ip_addr = self._manager._vnc_lib.virtual_network_ip_alloc( vn, subnet=subnet_prefix) if ip_addr: return ip_addr[0] #ip_alloc default ip count is 1 except Exception as e: self._logger.error("Exception: %s" %(str(e))) return None #end def free_ip(self, vn_uuid, subnet_prefix, ip_addr): try: vn = VirtualNetwork() vn.set_uuid(vn_uuid) self._manager._vnc_lib.virtual_network_ip_free( vn, [ip_addr], subnet=subnet_prefix) return True except Exception as e: self._logger.error("Exception: %s" %(str(e))) return False #end def get_vn_irb_ip_map(self): irb_ips = {} for vn_subnet, ip_addr in self.vn_ip_map.items(): (vn_uuid, subnet_prefix) = vn_subnet.split(':') vn = VirtualNetworkDM.get(vn_uuid) if vn_uuid not in irb_ips: irb_ips[vn_uuid] = set() irb_ips[vn_uuid].add((ip_addr, vn.gateways[subnet_prefix])) return irb_ips #end get_vn_irb_ip_map def evaluate_vn_irb_ip_map(self, vn_set): new_vn_ip_set = set() for vn_uuid in vn_set: vn = VirtualNetworkDM.get(vn_uuid) if vn.router_external == True: #dont need irb ip, gateway ip continue for subnet_prefix in vn.gateways.keys(): new_vn_ip_set.add(vn_uuid + ':' + subnet_prefix) old_set = set(self.vn_ip_map.keys()) delete_set = old_set.difference(new_vn_ip_set) create_set = new_vn_ip_set.difference(old_set) for vn_subnet in delete_set: (vn_uuid, subnet_prefix) = vn_subnet.split(':') ret = self.free_ip(vn_uuid, subnet_prefix, self.vn_ip_map[vn_subnet]) if ret == False: self._logger.error("Unable to free ip for vn/subnet/pr \ (%s/%s/%s)" %(vn_uuid, subnet_prefix, self.uuid)) ret = self._cassandra.delete(self._cassandra._PR_VN_IP_CF, self.uuid + ':' + vn_uuid + ':' + subnet_prefix) if ret == False: self._logger.error("Unable to free ip from db for vn/subnet/pr \ (%s/%s/%s)" %(vn_uuid, subnet_prefix, self.uuid)) continue self._cassandra.delete_from_pr_map(self.uuid, vn_subnet) del self.vn_ip_map[vn_subnet] for vn_subnet in create_set: (vn_uuid, subnet_prefix) = vn_subnet.split(':') (sub, length) = subnet_prefix.split('/') ip_addr = self.reserve_ip(vn_uuid, subnet_prefix) if ip_addr is None: self._logger.error("Unable to allocate ip for vn/subnet/pr \ (%s/%s/%s)" %(vn_uuid, subnet_prefix, self.uuid)) continue ret = self._cassandra.add(self._cassandra._PR_VN_IP_CF, self.uuid + ':' + vn_uuid + ':' + subnet_prefix, {'ip_address': ip_addr + '/' + length}) if ret == False: self._logger.error("Unable to store ip for vn/subnet/pr \ (%s/%s/%s)" %(self.uuid, subnet_prefix, self.uuid)) if self.free_ip(vn_uuid, subnet_prefix, ip_addr) == False: self._logger.error("Unable to free ip for vn/subnet/pr \ (%s/%s/%s)" %(self.uuid, subnet_prefix, self.uuid)) continue self._cassandra.add_to_pr_map(self.uuid, vn_subnet) self.vn_ip_map[vn_subnet] = ip_addr + '/' + length #end evaluate_vn_irb_ip_map def get_vn_li_map(self): vn_dict = {} for vn_id in self.virtual_networks: vn_dict[vn_id] = [] li_set = self.logical_interfaces for pi_uuid in self.physical_interfaces: pi = PhysicalInterfaceDM.get(pi_uuid) if pi is None: continue li_set |= pi.logical_interfaces for li_uuid in li_set: li = LogicalInterfaceDM.get(li_uuid) if li is None: continue vmi_id = li.virtual_machine_interface vmi = VirtualMachineInterfaceDM.get(vmi_id) if vmi is None: continue vn_id = vmi.virtual_network if vn_id in vn_dict: vn_dict[vn_id].append(li.name) else: vn_dict[vn_id] = [li.name] return vn_dict #end def push_config(self): self.config_manager.reset_bgp_config() bgp_router = BgpRouterDM.get(self.bgp_router) if bgp_router: for peer_uuid, attr in bgp_router.bgp_routers.items(): peer = BgpRouterDM.get(peer_uuid) if peer is None: continue external = (bgp_router.params['autonomous_system'] != peer.params['autonomous_system']) self.config_manager.add_bgp_peer(peer.params['address'], peer.params, attr, external) self.config_manager.set_bgp_config(bgp_router.params) self.config_manager.set_global_routing_options(bgp_router.params) bgp_router_ips = bgp_router.get_all_bgp_router_ips() if self.dataplane_ip is not None and self.is_valid_ip(self.dataplane_ip): self.config_manager.add_dynamic_tunnels(self.dataplane_ip, GlobalSystemConfigDM.ip_fabric_subnets, bgp_router_ips) vn_dict = self.get_vn_li_map() self.evaluate_vn_irb_ip_map(set(vn_dict.keys())) vn_irb_ip_map = self.get_vn_irb_ip_map() for vn_id, interfaces in vn_dict.items(): vn_obj = VirtualNetworkDM.get(vn_id) if vn_obj is None or vn_obj.vxlan_vni is None or vn_obj.vn_network_id is None: continue export_set = None import_set = None for ri_id in vn_obj.routing_instances: # Find the primary RI by matching the name ri_obj = RoutingInstanceDM.get(ri_id) if ri_obj is None: continue if ri_obj.fq_name[-1] == vn_obj.fq_name[-1]: vrf_name_l2 = vn_obj.get_vrf_name(vrf_type='l2') vrf_name_l3 = vn_obj.get_vrf_name(vrf_type='l3') export_set = copy.copy(ri_obj.export_targets) import_set = copy.copy(ri_obj.import_targets) for ri2_id in ri_obj.routing_instances: ri2 = RoutingInstanceDM.get(ri2_id) if ri2 is None: continue import_set |= ri2.export_targets if vn_obj.forwarding_mode in ['l2', 'l2_l3']: irb_ips = None if vn_obj.forwarding_mode == 'l2_l3': irb_ips = vn_irb_ip_map.get(vn_id, []) self.config_manager.add_routing_instance(vrf_name_l2, True, vn_obj.forwarding_mode == 'l2_l3', import_set, export_set, vn_obj.get_prefixes(), irb_ips, vn_obj.router_external, interfaces, vn_obj.vxlan_vni, None, vn_obj.vn_network_id) if vn_obj.forwarding_mode in ['l3', 'l2_l3']: self.config_manager.add_routing_instance(vrf_name_l3, False, vn_obj.forwarding_mode == 'l2_l3', import_set, export_set, vn_obj.get_prefixes(), None, vn_obj.router_external) break if export_set is not None and self.is_junos_service_ports_enabled() and len(vn_obj.instance_ip_map) > 0: service_port_id = 2*vn_obj.vn_network_id - 1 if self.is_service_port_id_valid(service_port_id) == False: self._logger.error("DM can't allocate service interfaces for \ (vn, vn-id)=(%s,%s)" % (vn_obj.fq_name, vn_obj.vn_network_id)) else: vrf_name = vrf_name_l3[:123] + '-nat' interfaces = [] service_ports = self.junos_service_ports.get('service_port') interfaces.append(service_ports[0] + "." + str(service_port_id)) interfaces.append(service_ports[0] + "." + str(service_port_id + 1)) self.config_manager.add_routing_instance(vrf_name, False, False, import_set, set(), None, None, False, interfaces, None, vn_obj.instance_ip_map, vn_obj.vn_network_id) self.config_manager.send_bgp_config() self.uve_send() # end push_config def is_service_port_id_valid(self, service_port_id): #mx allowed ifl unit number range is (1, 16385) for service ports if service_port_id < 1 or service_port_id > 16384: return False return True #end is_service_port_id_valid def uve_send(self, deleted=False): pr_trace = UvePhysicalRouterConfig(name=self.name, ip_address=self.management_ip, connected_bgp_router=self.bgp_router, auto_conf_enabled=self.vnc_managed, product_info=self.vendor + ':' + self.product) if deleted: pr_trace.deleted = True pr_msg = UvePhysicalRouterConfigTrace(data=pr_trace, sandesh=PhysicalRouterDM._sandesh) pr_msg.send(sandesh=PhysicalRouterDM._sandesh) return commit_stats = self.config_manager.get_commit_stats() if commit_stats['netconf_enabled'] is True: pr_trace.last_commit_time = commit_stats['last_commit_time'] pr_trace.last_commit_duration = commit_stats['last_commit_duration'] pr_trace.commit_status_message = commit_stats['commit_status_message'] pr_trace.total_commits_sent_since_up = commit_stats['total_commits_sent_since_up'] else: pr_trace.netconf_enabled_status = commit_stats['netconf_enabled_status'] pr_msg = UvePhysicalRouterConfigTrace(data=pr_trace, sandesh=PhysicalRouterDM._sandesh) pr_msg.send(sandesh=PhysicalRouterDM._sandesh) # end uve_send # end PhysicalRouterDM class GlobalVRouterConfigDM(DBBaseDM): _dict = {} obj_type = 'global_vrouter_config' global_vxlan_id_mode = None def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) new_global_vxlan_id_mode = obj.get('vxlan_network_identifier_mode') if GlobalVRouterConfigDM.global_vxlan_id_mode != new_global_vxlan_id_mode: GlobalVRouterConfigDM.global_vxlan_id_mode = new_global_vxlan_id_mode self.update_physical_routers() # end update def update_physical_routers(self): for vn in VirtualNetworkDM.values(): vn.set_vxlan_vni() for pr in PhysicalRouterDM.values(): pr.set_config_state() #end update_physical_routers @classmethod def is_global_vxlan_id_mode_auto(cls): if cls.global_vxlan_id_mode is not None and cls.global_vxlan_id_mode == 'automatic': return True return False @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] # end delete # end GlobalVRouterConfigDM class GlobalSystemConfigDM(DBBaseDM): _dict = {} obj_type = 'global_system_config' global_asn = None ip_fabric_subnets = None def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.physical_routers = set() self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) GlobalSystemConfigDM.global_asn = obj.get('autonomous_system') GlobalSystemConfigDM.ip_fabric_subnets = obj.get('ip_fabric_subnets') self.set_children('physical_router', obj) # end update @classmethod def get_global_asn(cls): return cls.global_asn @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] # end delete # end GlobalSystemConfigDM class PhysicalInterfaceDM(DBBaseDM): _dict = {} obj_type = 'physical_interface' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.update(obj_dict) pr = PhysicalRouterDM.get(self.physical_router) if pr: pr.physical_interfaces.add(self.uuid) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.physical_router = self.get_parent_uuid(obj) self.logical_interfaces = set([li['uuid'] for li in obj.get('logical_interfaces', [])]) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] pr = PhysicalRouterDM.get(obj.physical_router) if pr: pr.physical_interfaces.discard(obj.uuid) del cls._dict[uuid] # end delete # end PhysicalInterfaceDM class LogicalInterfaceDM(DBBaseDM): _dict = {} obj_type = 'logical_interface' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.virtual_machine_interface = None self.update(obj_dict) if self.physical_interface: parent = PhysicalInterfaceDM.get(self.physical_interface) elif self.physical_router: parent = PhysicalRouterDM.get(self.physical_router) if parent: parent.logical_interfaces.add(self.uuid) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) if obj['parent_type'] == 'physical-router': self.physical_router = self.get_parent_uuid(obj) self.physical_interface = None else: self.physical_interface = self.get_parent_uuid(obj) self.physical_router = None self.update_single_ref('virtual_machine_interface', obj) self.name = obj['fq_name'][-1] # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] if obj.physical_interface: parent = PhysicalInterfaceDM.get(obj.physical_interface) elif obj.physical_router: parent = PhysicalInterfaceDM.get(obj.physical_router) if parent: parent.logical_interfaces.discard(obj.uuid) obj.update_single_ref('virtual_machine_interface', {}) del cls._dict[uuid] # end delete # end LogicalInterfaceDM class FloatingIpDM(DBBaseDM): _dict = {} obj_type = 'floating_ip' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.virtual_machine_interface = None self.floating_ip_address = None self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.floating_ip_address = obj.get("floating_ip_address") self.public_network = self.get_pool_public_network(self.get_parent_uuid(obj)) self.update_single_ref('virtual_machine_interface', obj) # end update def get_pool_public_network(self, pool_uuid): pool_obj = self.read_obj(pool_uuid, "floating_ip_pool") if pool_obj is None: return None return self.get_parent_uuid(pool_obj) # end get_pool_public_network @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_single_ref('virtual_machine_interface', {}) del cls._dict[uuid] # end delete #end FloatingIpDM class InstanceIpDM(DBBaseDM): _dict = {} obj_type = 'instance_ip' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.instance_ip_address = None self.virtual_machine_interface = None self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.instance_ip_address = obj.get("instance_ip_address") self.update_single_ref('virtual_machine_interface', obj) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_single_ref('virtual_machine_interface', {}) del cls._dict[uuid] # end delete #end InstanceIpDM class VirtualMachineInterfaceDM(DBBaseDM): _dict = {} obj_type = 'virtual_machine_interface' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.virtual_network = None self.floating_ip = None self.instance_ip = None self.logical_interface = None self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.device_owner = obj.get("virtual_machine_interface_device_owner") self.update_single_ref('logical_interface', obj) self.update_single_ref('virtual_network', obj) self.update_single_ref('floating_ip', obj) self.update_single_ref('instance_ip', obj) # end update def is_device_owner_bms(self): if not self.device_owner or self.device_owner.lower() == 'physicalrouter': return True return False #end @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_single_ref('logical_interface', {}) obj.update_single_ref('virtual_network', {}) obj.update_single_ref('floating_ip', {}) obj.update_single_ref('instance_ip', {}) del cls._dict[uuid] # end delete # end VirtualMachineInterfaceDM class VirtualNetworkDM(DBBaseDM): _dict = {} obj_type = 'virtual_network' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.physical_routers = set() self.router_external = False self.vxlan_vni = None self.forwarding_mode = None self.gateways = None self.instance_ip_map = {} self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.update_multiple_refs('physical_router', obj) self.fq_name = obj['fq_name'] try: self.router_external = obj['router_external'] except KeyError: self.router_external = False self.vn_network_id = obj.get('virtual_network_network_id') self.set_vxlan_vni(obj) self.forwarding_mode = self.get_forwarding_mode(obj) self.routing_instances = set([ri['uuid'] for ri in obj.get('routing_instances', [])]) self.virtual_machine_interfaces = set( [vmi['uuid'] for vmi in obj.get('virtual_machine_interface_back_refs', [])]) self.gateways = {} for ipam_ref in obj.get('network_ipam_refs', []): for subnet in ipam_ref['attr'].get('ipam_subnets', []): prefix = subnet['subnet']['ip_prefix'] prefix_len = subnet['subnet']['ip_prefix_len'] self.gateways[prefix + '/' + str(prefix_len)] = \ subnet.get('default_gateway', '') # end update def get_prefixes(self): return set(self.gateways.keys()) #end get_prefixes def get_vrf_name(self, vrf_type): #this function must be called only after vn gets its vn_id if self.vn_network_id is None: self._logger.error("network id is null for vn: %s" % (self.fq_name[-1])) return '_contrail_' + vrf_type + '_' + self.fq_name[-1] if vrf_type is None: self._logger.error("vrf type can't be null : %s" % (self.fq_name[-1])) vrf_name = '_contrail_' + str(self.vn_network_id) + '_' + self.fq_name[-1] else: vrf_name = '_contrail_' + vrf_type + '_' + str(self.vn_network_id) + '_' + self.fq_name[-1] #mx has limitation for vrf name, allowed max 127 chars return vrf_name[:127] #end def set_vxlan_vni(self, obj=None): self.vxlan_vni = None if obj is None: obj = self.read_obj(self.uuid) if GlobalVRouterConfigDM.is_global_vxlan_id_mode_auto(): self.vxlan_vni = obj.get('virtual_network_network_id') else: try: prop = obj['virtual_network_properties'] if prop['vxlan_network_identifier'] is not None: self.vxlan_vni = prop['vxlan_network_identifier'] except KeyError: pass #end set_vxlan_vni def get_forwarding_mode(self, obj): default_mode = 'l2_l3' prop = obj.get('virtual_network_properties') if prop: return prop.get('forwarding_mode', default_mode) return default_mode #end get_forwarding_mode def update_instance_ip_map(self): self.instance_ip_map = {} for vmi_uuid in self.virtual_machine_interfaces: vmi = VirtualMachineInterfaceDM.get(vmi_uuid) if vmi is None or vmi.is_device_owner_bms() == False: continue if vmi.floating_ip is not None and vmi.instance_ip is not None: fip = FloatingIpDM.get(vmi.floating_ip) inst_ip = InstanceIpDM.get(vmi.instance_ip) if fip is None or inst_ip is None: continue instance_ip = inst_ip.instance_ip_address floating_ip = fip.floating_ip_address public_vn = VirtualNetworkDM.get(fip.public_network) if public_vn is None or public_vn.vn_network_id is None: continue public_vrf_name = public_vn.get_vrf_name(vrf_type='l3') self.instance_ip_map[instance_ip] = {'floating_ip': floating_ip, 'vrf_name': public_vrf_name} # end update_instance_ip_map @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_multiple_refs('physical_router', {}) del cls._dict[uuid] # end delete # end VirtualNetworkDM class RoutingInstanceDM(DBBaseDM): _dict = {} obj_type = 'routing_instance' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.virtual_network = None self.import_targets = set() self.export_targets = set() self.routing_instances = set() self.update(obj_dict) vn = VirtualNetworkDM.get(self.virtual_network) if vn: vn.routing_instances.add(self.uuid) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.fq_name = obj['fq_name'] self.virtual_network = self.get_parent_uuid(obj) self.import_targets = set() self.export_targets = set() for rt_ref in obj.get('route_target_refs', []): rt_name = rt_ref['to'][0] exim = rt_ref.get('attr').get('import_export') if exim == 'export': self.export_targets.add(rt_name) elif exim == 'import': self.import_targets.add(rt_name) else: self.import_targets.add(rt_name) self.export_targets.add(rt_name) self.update_multiple_refs('routing_instance', obj) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] vn = VirtualNetworkDM.get(obj.virtual_network) if vn: vn.routing_instances.discard(obj.uuid) del cls._dict[uuid] # end delete # end RoutingInstanceDM class DMCassandraDB(VncCassandraClient): _KEYSPACE = 'dm_keyspace' _PR_VN_IP_CF = 'dm_pr_vn_ip_table' dm_cassandra_instance = None @classmethod def getInstance(cls, manager): if cls.dm_cassandra_instance == None: cls.dm_cassandra_instance = DMCassandraDB(manager) return cls.dm_cassandra_instance #end def __init__(self, manager): self._manager = manager self._args = manager._args if self._args.cluster_id: self._keyspace = '%s_%s' % (self._args.cluster_id, self._KEYSPACE) else: self._keyspace = self._KEYSPACE keyspaces = { self._keyspace: [(self._PR_VN_IP_CF, None)]} cass_server_list = self._args.cassandra_server_list cred = None if self._args.cassandra_user is not None and \ self._args.cassandra_password is not None: cred={'username':self._args.cassandra_user, 'password':self._args.cassandra_password} super(DMCassandraDB, self).__init__( cass_server_list, self._args.cluster_id, keyspaces, manager.config_log, credential=cred) self.pr_vn_ip_map = {} self.init_pr_map() #end def init_pr_map(self): cf = self.get_cf(self._PR_VN_IP_CF) keys = dict(cf.get_range(column_count=0,filter_empty=False)).keys() for key in keys: (pr_uuid, vn_subnet_uuid) = key.split(':', 1) self.add_to_pr_map(pr_uuid, vn_subnet_uuid) #end def add_to_pr_map(self, pr_uuid, vn_subnet): if pr_uuid in self.pr_vn_ip_map: self.pr_vn_ip_map[pr_uuid].add(vn_subnet) else: self.pr_vn_ip_map[pr_uuid] = set() self.pr_vn_ip_map[pr_uuid].add(vn_subnet) #end def delete_from_pr_map(self, pr_uuid, vn_subnet): if pr_uuid in self.pr_vn_ip_map: self.pr_vn_ip_map[pr_uuid].remove(vn_subnet) if not self.pr_vn_ip_map[pr_uuid]: del self.pr_vn_ip_map[pr_uuid] #end def delete_pr(self, pr_uuid): vn_subnet_set = self.pr_vn_ip_map.get(pr_uuid, set()) for vn_subnet in vn_subnet_set: ret = self.delete(self._PR_VN_IP_CF, pr_uuid + ':' + vn_subnet) if ret == False: self._logger.error("Unable to free ip from db for vn/pr/subnet \ (%s/%s)" %(pr_uuid, vn_subnet)) #end def handle_pr_deletes(self, current_pr_set): cs_pr_set = set(self.pr_vn_ip_map.keys()) delete_set = cs_pr_set.difference(current_pr_set) for pr_uuid in delete_set: self.delete_pr(vn_uuid) #end def get_pr_vn_set(self, pr_uuid): return self.pr_vn_ip_map.get(pr_uuid, set()) #end @classmethod def get_db_info(cls): db_info = [(cls._KEYSPACE, [cls._PR_VN_IP_CF])] return db_info # end get_db_info #end
apache-2.0
-9,041,703,776,330,975,000
36.959077
116
0.532467
false
qtproject/pyside-pyside
tests/QtWidgets/qtabwidget_test.py
1
1910
############################################################################# ## ## Copyright (C) 2016 The Qt Company Ltd. ## Contact: https://www.qt.io/licensing/ ## ## This file is part of the test suite of PySide2. ## ## $QT_BEGIN_LICENSE:GPL-EXCEPT$ ## Commercial License Usage ## Licensees holding valid commercial Qt licenses may use this file in ## accordance with the commercial license agreement provided with the ## Software or, alternatively, in accordance with the terms contained in ## a written agreement between you and The Qt Company. For licensing terms ## and conditions see https://www.qt.io/terms-conditions. For further ## information use the contact form at https://www.qt.io/contact-us. ## ## GNU General Public License Usage ## Alternatively, this file may be used under the terms of the GNU ## General Public License version 3 as published by the Free Software ## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT ## included in the packaging of this file. Please review the following ## information to ensure the GNU General Public License requirements will ## be met: https://www.gnu.org/licenses/gpl-3.0.html. ## ## $QT_END_LICENSE$ ## ############################################################################# import unittest from PySide2.QtWidgets import * from helper import TimedQApplication def makeBug643(tab): button = QPushButton('Foo') tab.insertTab(0, button, 'Foo') class RemoveTabMethod(TimedQApplication): def setUp(self): TimedQApplication.setUp(self) self.tab = QTabWidget() def tearDown(self): del self.tab TimedQApplication.tearDown(self) def testRemoveTabPresence(self): self.assertTrue(getattr(self.tab, 'removeTab')) def testInsertTab(self): makeBug643(self.tab) self.assertEqual(self.tab.count(), 1) if __name__ == '__main__': unittest.main()
lgpl-2.1
-3,240,639,572,649,791,000
33.107143
77
0.662304
false
AMOboxTV/AMOBox.LegoBuild
plugin.video.salts/salts_lib/constants.py
1
7791
""" SALTS XBMC Addon Copyright (C) 2014 tknorris This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ def __enum(**enums): return type('Enum', (), enums) MODES = __enum( MAIN='main', BROWSE='browse', TRENDING='trending', RECOMMEND='recommend', CAL='calendar', MY_CAL='my_calendar', MY_LISTS='lists', SEARCH='search', SEASONS='seasons', EPISODES='episodes', GET_SOURCES='get_sources', MANAGE_SUBS='manage_subs', GET_LIST='get_list', SET_URL_MANUAL='set_url_manual', SET_URL_SEARCH='set_url_search', SHOW_FAVORITES='browse_favorites', SHOW_WATCHLIST='browse_watchlist', PREMIERES='premiere_calendar', SHOW_LIST='show_list', OTHER_LISTS='other_lists', ADD_OTHER_LIST='add_other_list', PICK_SUB_LIST='pick_sub_list', PICK_FAV_LIST='pick_fav_list', UPDATE_SUBS='update_subs', CLEAN_SUBS='clean_subs', SET_SUB_LIST='set_sub_list', SET_FAV_LIST='set_fav_list', REM_FROM_LIST='rem_from_list', ADD_TO_LIST='add_to_list', ADD_TO_LIBRARY='add_to_library', SCRAPERS='scrapers', TOGGLE_SCRAPER='toggle_scraper', RESET_DB='reset_db', FLUSH_CACHE='flush_cache', RESOLVE_SOURCE='resolve_source', SEARCH_RESULTS='search_results', MOVE_SCRAPER='scraper_move', EDIT_TVSHOW_ID='edit_id', SELECT_SOURCE='select_source', SHOW_COLLECTION='show_collection', SHOW_PROGRESS='show_progress', PLAY_TRAILER='play_trailer', RENAME_LIST='rename_list', EXPORT_DB='export_db', IMPORT_DB='import_db', COPY_LIST='copy_list', REMOVE_LIST='remove_list', ADD_TO_COLL='add_to_collection', TOGGLE_WATCHED='toggle_watched', RATE='rate', FORCE_REFRESH='force_refresh', TOGGLE_TITLE='toggle_title', RES_SETTINGS='resolver_settings', ADDON_SETTINGS='addon_settings', TOGGLE_ALL='toggle_all', MOVE_TO='move_to', REM_FROM_COLL='rem_from_collection', URL_EXISTS='url_exists', RECENT_SEARCH='recent_search', SAVED_SEARCHES='saved_searches', SAVE_SEARCH='save_search', DELETE_SEARCH='delete_search', SET_VIEW='set_view', SETTINGS='settings', SHOW_VIEWS='show_views', BROWSE_VIEW='browse_view', BROWSE_URLS='browse_urls', DELETE_URL='delete_url', DOWNLOAD_SOURCE='download_source', DIRECT_DOWNLOAD='direct_download', POPULAR='popular', RECENT='recent', DELETE_RECENT='delete_recent', CLEAR_RECENT='clear_recent', AUTH_TRAKT='auth_trakt', AUTO_CONF='auto_config', CLEAR_SAVED='clear_saved', RESET_BASE_URL='reset_base_url', TOGGLE_TO_MENU='toggle_to_menu', LIKED_LISTS='liked_lists', MOSTS='mosts', PLAYED='played', WATCHED='watched', COLLECTED='collected', SHOW_BOOKMARKS='show_bookmarks', DELETE_BOOKMARK='delete_bookmark', SHOW_HISTORY='show_history', RESET_FAILS='reset_failures', MANAGE_PROGRESS='toggle_progress', AUTOPLAY='autoplay', INSTALL_THEMES='install_themes', RESET_REL_URLS='reset_rel_urls', ANTICIPATED='anticipated') SECTIONS = __enum(TV='TV', MOVIES='Movies') VIDEO_TYPES = __enum(TVSHOW='TV Show', MOVIE='Movie', EPISODE='Episode', SEASON='Season') CONTENT_TYPES = __enum(TVSHOWS='tvshows', MOVIES='movies', SEASONS='seasons', EPISODES='episodes', SOURCES='sources') TRAKT_SECTIONS = {SECTIONS.TV: 'shows', SECTIONS.MOVIES: 'movies'} TRAKT_SORT = __enum(TITLE='title', ACTIVITY='activity', MOST_COMPLETED='most-completed', LEAST_COMPLETED='least-completed', RECENTLY_AIRED='recently-aired', PREVIOUSLY_AIRED='previously-aired') TRAKT_LIST_SORT = __enum(RANK='rank', RECENTLY_ADDED='added', TITLE='title', RELEASE_DATE='released', RUNTIME='runtime', POPULARITY='popularity', PERCENTAGE='percentage', VOTES='votes') TRAKT_SORT_DIR = __enum(ASCENDING='asc', DESCENDING='desc') SORT_MAP = [TRAKT_SORT.ACTIVITY, TRAKT_SORT.TITLE, TRAKT_SORT.MOST_COMPLETED, TRAKT_SORT.LEAST_COMPLETED, TRAKT_SORT.RECENTLY_AIRED, TRAKT_SORT.PREVIOUSLY_AIRED] QUALITIES = __enum(LOW='Low', MEDIUM='Medium', HIGH='High', HD720='HD720', HD1080='HD1080') DIRS = __enum(UP='up', DOWN='down') WATCHLIST_SLUG = 'watchlist_slug' COLLECTION_SLUG = 'collection_slug' SEARCH_HISTORY = 10 DEFAULT_EXT = '.mpg' CHUNK_SIZE = 512 * 1024 PROGRESS = __enum(OFF=0, WINDOW=1, BACKGROUND=2) FORCE_NO_MATCH = '***FORCE_NO_MATCH***' SHORT_MONS = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] ACTIONS = __enum(ADD='add', REMOVE='remove') TRIG_DB_UPG = False # sort keys need to be defined such that "best" have highest values # unknown (i.e. None) is always worst SORT_KEYS = {} SORT_KEYS['quality'] = {None: 0, QUALITIES.LOW: 1, QUALITIES.MEDIUM: 2, QUALITIES.HIGH: 3, QUALITIES.HD720: 4, QUALITIES.HD1080: 5} SORT_LIST = ['none', 'source', 'quality', 'views', 'rating', 'direct', 'debrid'] SORT_SIGNS = {'0': -1, '1': 1} # 0 = Best to Worst; 1 = Worst to Best HOURS_LIST = {} HOURS_LIST[MODES.UPDATE_SUBS] = [.5, 1] + range(2, 25) LONG_AGO = '1970-01-01 23:59:00.000000' TEMP_ERRORS = [500, 502, 503, 504, 520, 521, 522, 524] SRT_SOURCE = 'addic7ed' DISABLE_SETTINGS = __enum(OFF='0', PROMPT='1', ON='2') BLOG_Q_MAP = {} BLOG_Q_MAP[QUALITIES.LOW] = [' CAM ', ' TS ', ' R6 ', 'CAMRIP'] BLOG_Q_MAP[QUALITIES.MEDIUM] = ['-XVID', '-MP4', 'MEDIUM'] BLOG_Q_MAP[QUALITIES.HIGH] = ['HDRIP', 'DVDRIP', 'BRRIP', 'BDRIP', '480P', 'HDTV'] BLOG_Q_MAP[QUALITIES.HD720] = ['720', 'HDTS', ' HD '] BLOG_Q_MAP[QUALITIES.HD1080] = ['1080'] HOST_Q = {} HOST_Q[QUALITIES.LOW] = ['youwatch', 'allmyvideos', 'played.to', 'gorillavid'] HOST_Q[QUALITIES.MEDIUM] = ['primeshare', 'exashare', 'bestreams', 'flashx', 'vidto', 'vodlocker', 'vidzi', 'vidbull', 'realvid', 'nosvideo', 'daclips', 'sharerepo', 'zalaa', 'filehoot', 'vshare.io'] HOST_Q[QUALITIES.HIGH] = ['vidspot', 'mrfile', 'divxstage', 'streamcloud', 'mooshare', 'novamov', 'mail.ru', 'vid.ag', 'thevideo'] HOST_Q[QUALITIES.HD720] = ['thefile', 'sharesix', 'filenuke', 'vidxden', 'movshare', 'nowvideo', 'vidbux', 'streamin.to', 'allvid.ch'] HOST_Q[QUALITIES.HD1080] = ['hugefiles', '180upload', 'mightyupload', 'videomega', 'allmyvideos'] Q_ORDER = {QUALITIES.LOW: 1, QUALITIES.MEDIUM: 2, QUALITIES.HIGH: 3, QUALITIES.HD720: 4, QUALITIES.HD1080: 5} IMG_SIZES = ['full', 'medium', 'thumb'] USER_AGENT = "Mozilla/5.0 (compatible, MSIE 11, Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko" BR_VERS = [ ['%s.0' % i for i in xrange(18, 43)], ['37.0.2062.103', '37.0.2062.120', '37.0.2062.124', '38.0.2125.101', '38.0.2125.104', '38.0.2125.111', '39.0.2171.71', '39.0.2171.95', '39.0.2171.99', '40.0.2214.93', '40.0.2214.111', '40.0.2214.115', '42.0.2311.90', '42.0.2311.135', '42.0.2311.152', '43.0.2357.81', '43.0.2357.124', '44.0.2403.155', '44.0.2403.157', '45.0.2454.101', '45.0.2454.85', '46.0.2490.71', '46.0.2490.80', '46.0.2490.86', '47.0.2526.73', '47.0.2526.80'], ['11.0']] WIN_VERS = ['Windows NT 10.0', 'Windows NT 7.0', 'Windows NT 6.3', 'Windows NT 6.2', 'Windows NT 6.1', 'Windows NT 6.0', 'Windows NT 5.1', 'Windows NT 5.0'] FEATURES = ['; WOW64', '; Win64; IA64', '; Win64; x64', ''] RAND_UAS = ['Mozilla/5.0 ({win_ver}{feature}; rv:{br_ver}) Gecko/20100101 Firefox/{br_ver}', 'Mozilla/5.0 ({win_ver}{feature}) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/{br_ver} Safari/537.36', 'Mozilla/5.0 ({win_ver}{feature}; Trident/7.0; rv:{br_ver}) like Gecko']
gpl-2.0
5,276,069,409,270,380,000
71.813084
187
0.671416
false
woodem/woo
py/remote.py
1
7933
# encoding: utf-8 # 2008-2009 © Václav Šmilauer <[email protected]> """ Remote connections to woo: authenticated python command-line over telnet and anonymous socket for getting some read-only information about current simulation. These classes are used internally in gui/py/PythonUI_rc.py and are not intended for direct use. """ import socketserver,xmlrpc.client,socket import sys,time,os,math useQThread=False "Set before using any of our classes to use QThread for background execution instead of the standard thread module. Mixing the two (in case the qt UI is running, for instance) does not work well." plotImgFormat,plotImgMimetype='png','image/png' #plotImgFormat,plotImgMimetype='svg','image/svg+xml' bgThreads=[] # needed to keep background threads alive class InfoProvider(object): def basicInfo(self): import woo S=woo.master.scene ret=dict(step=S.step,dt=S.dt,stopAtStep=S.stopAtStep,stopAtTime=S.stopAtTime,time=S.time,id=S.tags['id'] if 'id' in S.tags else None,title=S.tags['title'] if 'title' in S.tags else None,threads=woo.master.numThreads,numBodies=(len(S.dem.par) if S.hasDem else -1),numIntrs=(len(S.dem.con) if S.hasDem else -1),PID=os.getpid()) sys.stdout.flush(); sys.stderr.flush() return ret def plot(self): try: import woo S=woo.master.scene if len(S.plot.plots)==0: return None fig=S.plot.plot(subPlots=True,noShow=True)[0] img=woo.master.tmpFilename()+'.'+plotImgFormat sqrtFigs=math.sqrt(len(S.plot.plots)) fig.set_size_inches(5*sqrtFigs,7*sqrtFigs) fig.savefig(img) f=open(img,'rb'); data=f.read(); f.close(); os.remove(img) # print 'returning %s (%d bytes read)'%(plotImgFormat,len(data)) return xmlrpc.client.Binary(data) except: print('Error updating plots:') import traceback traceback.print_exc() return None class PythonConsoleSocketEmulator(socketserver.BaseRequestHandler): """Class emulating python command-line over a socket connection. The connection is authenticated by requiring a cookie. Only connections from localhost (127.0.0.*) are allowed. """ def setup(self): if not self.client_address[0].startswith('127.0.0'): print("TCP Connection from non-127.0.0.* address %s rejected"%self.client_address[0]) return print(self.client_address, 'connected!') self.request.send('Enter auth cookie: ') def tryLogin(self): if self.request.recv(1024).rstrip()==self.server.cookie: self.server.authenticated+=[self.client_address] self.request.send("Woo / TCP\n(connected from %s:%d)\n>>>"%(str(self.client_address[0]),self.client_address[1])) return True else: import time time.sleep(5) print("invalid cookie") return False def displayhook(self,s): import pprint self.request.send(pprint.pformat(s)) def handle(self): if self.client_address not in self.server.authenticated and not self.tryLogin(): return import code,io,traceback buf=[] while True: data = self.request.recv(1024).rstrip() if data=='\x04' or data=='exit' or data=='quit': # \x04 == ^D return buf.append(data) orig_displayhook,orig_stdout=sys.displayhook,sys.stdout sio=io.StringIO() continuation=False #print "buffer:",buf try: comp=code.compile_command('\n'.join(buf)) if comp: sys.displayhook=self.displayhook sys.stdout=sio exec(comp) self.request.send(sio.getvalue()) buf=[] else: self.request.send('... '); continuation=True except: self.request.send(traceback.format_exc()) buf=[] finally: sys.displayhook,sys.stdout=orig_displayhook,orig_stdout if not continuation: self.request.send('\n>>> ') def finish(self): print(self.client_address, 'disconnected!') self.request.send('\nBye ' + str(self.client_address) + '\n') def _runInBackground(func): if useQThread: import woo.config from PyQt5.QtCore import QThread class WorkerThread(QThread): def __init__(self,func_): QThread.__init__(self); self.func=func_ def run(self): self.func() wt=WorkerThread(func) wt.start() global bgThreads; bgThreads.append(wt) else: import _thread; _thread.start_new_thread(func,()) class GenericTCPServer(object): "Base class for socket server, handling port allocation, initial logging and thead backgrounding." def __init__(self,handler,title,cookie=True,minPort=9000,host='',maxPort=65536,background=True): import socket, random, sys self.port=-1 self.host=host tryPort=minPort if maxPort==None: maxPort=minPort while self.port==-1 and tryPort<=maxPort: try: self.server=socketserver.ThreadingTCPServer((host,tryPort),handler) self.port=tryPort if cookie: self.server.cookie=''.join([i for i in random.sample('woosucks',6)]) self.server.authenticated=[] sys.stderr.write(title+" on %s:%d, auth cookie `%s'\n"%(host if host else 'localhost',self.port,self.server.cookie)) else: sys.stderr.write(title+" on %s:%d\n"%(host if host else 'localhost',self.port)) if background: _runInBackground(self.server.serve_forever) else: self.server.serve_forever() except socket.error: tryPort+=1 if self.port==-1: raise RuntimeError("No free port to listen on in range %d-%d"%(minPort,maxPort)) def runServers(xmlrpc=False,tcpPy=False): """Run python telnet server and info socket. They will be run at localhost on ports 9000 (or higher if used) and 21000 (or higer if used) respectively. The python telnet server accepts only connection from localhost, after authentication by random cookie, which is printed on stdout at server startup. The info socket provides read-only access to several simulation parameters at runtime. Each connection receives pickled dictionary with those values. This socket is primarily used by woo-multi batch scheduler. """ if tcpPy: import woo.runtime srv=GenericTCPServer(handler=woo.remote.PythonConsoleSocketEmulator,title='TCP python prompt',cookie=True,minPort=9000) woo.runtime.cookie=srv.server.cookie if xmlrpc: from xmlrpc.server import SimpleXMLRPCServer port,maxPort=21000,65535 # minimum port number while port<maxPort: try: info=SimpleXMLRPCServer(('',port),logRequests=False,allow_none=True); break except socket.error: port+=1 if port==maxPort: raise RuntimeError("No free port to listen on in range 21000-%d"%maxPort) # register methods, as per http://docs.python.org/library/simplexmlrpcserver.html#simplexmlrpcserver-example info.register_instance(InfoProvider()) # gets all defined methods by introspection #prov=InfoProvider() #for m in prov.exposedMethods(): info.register_function(m) _runInBackground(info.serve_forever) print('XMLRPC info provider on http://localhost:%d'%port) sys.stdout.flush() #if __name__=='__main__': # p=GenericTCPServer(PythonConsoleSocketEmulator,'Python TCP server',background=False) # #while True: time.sleep(2)
gpl-2.0
5,875,989,687,096,059,000
42.565934
333
0.627696
false
GoogleCloudPlatform/PerfKitBenchmarker
perfkitbenchmarker/linux_packages/kernel_compile.py
1
1306
# Copyright 2016 PerfKitBenchmarker Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from perfkitbenchmarker import linux_packages URL = 'https://www.kernel.org/pub/linux/kernel/v4.x/linux-4.4.25.tar.gz' TARBALL = 'linux-4.4.25.tar.gz' UNTAR_DIR = 'linux-4.4.25' KERNEL_TARBALL = os.path.join(linux_packages.INSTALL_DIR, TARBALL) def _Install(vm): vm.Install('build_tools') vm.Install('wget') vm.InstallPackages('bc') vm.RemoteCommand('mkdir -p {0} && ' 'cd {0} && wget {1}'.format(linux_packages.INSTALL_DIR, URL)) def AptInstall(vm): _Install(vm) def YumInstall(vm): _Install(vm) def Cleanup(vm): vm.RemoteCommand('cd {} && rm -f {}'.format(linux_packages.INSTALL_DIR, TARBALL))
apache-2.0
-7,830,537,716,163,203,000
29.372093
80
0.691424
false
patrickwestphal/owlapy
owlapy/vocab/owlfacet.py
1
2515
from enum import Enum from .namespaces import Namespaces from owlapy.model import IRI from owlapy.util.decorators import ClassProperty class OWLFacet(Enum): LENGTH = (Namespaces.XSD, 'length', 'length') MIN_LENGTH = (Namespaces.XSD, 'minLength', 'minLength') MAX_LENGTH = (Namespaces.XSD, 'maxLength', 'maxLength') PATTERN = (Namespaces.XSD, 'pattern', 'pattern') MIN_INCLUSIVE = (Namespaces.XSD, 'minInclusive', '>=') MIN_EXCLUSIVE = (Namespaces.XSD, 'minExclusive', '>') MAX_INCLUSIVE = (Namespaces.XSD, 'maxInclusive', '<=') MAX_EXCLUSIVE = (Namespaces.XSD, 'maxExclusive', '<') TOTAL_DIGITS = (Namespaces.XSD, 'totalDigits', 'totalDigits') FRACTION_DIGITS = (Namespaces.XSD, 'fractionDigits', 'fractionDigits') LANG_RANGE = (Namespaces.RDF, 'langRange', 'langRange') def __init__(self, ns, short_form, symbolic_form): """ :param ns: an owlapy.vocab.namespaces.Namespaces object :param short_form: a string containing the short form :param symbolic_form: a string containing the symbolic form :return: """ self.iri = IRI(str(ns), short_form) self.short_form = short_form self.symbolic_form = symbolic_form self.prefixed_name = ns.prefix_name + ':' + short_form @ClassProperty @classmethod def FACET_IRIS(cls): if not hasattr(cls, '_FACET_IRIS'): cls._FACET_IRIS = set() for facet in cls: cls._FACET_IRIS.add(facet.iri) return cls._FACET_IRIS @classmethod def get_facet(cls, iri): """ :param iri: an owlapy.model.IRI object """ for vocabulary in cls: if vocabulary.iri == iri: return vocabulary @classmethod def get_facet_by_short_name(cls, short_form): """ :param short_form: a string containing the short name """ for vocabulary in cls: if vocabulary.short_form == short_form: return vocabulary @classmethod def get_facet_by_symbolic_name(cls, symbolic_form): for vocabulary in cls: if vocabulary.symbolic_form == symbolic_form: return vocabulary @classmethod def get_facets(cls): """ :return: a set of strings containing the symbolic form if the defined facets """ result = set() for facet in cls: result.add(facet.symbolic_form) return result
gpl-3.0
-992,769,188,573,107,000
31.675325
77
0.603579
false
twotwo/tools-python
git-filter-branch/main.py
1
5551
# -*- coding: utf-8 -*- ############################################################ # # Read & Modify commits of a Git Reposotory # ############################################################ import os import sys import argparse import subprocess import datetime from git import Repo, RefLog import json from loguru_helper import message_logger, event_logger, emit_logger class GitModifier(object): def __init__(self, repo_path): self.repo = Repo(repo_path) @staticmethod def format(obj): """ refer to https://git-scm.com/docs/git-commit-tree#_commit_information """ template = """ if test "$GIT_COMMIT" = "{commit_id}" then GIT_AUTHOR_NAME="{user}" GIT_AUTHOR_EMAIL="{email}" GIT_AUTHOR_DATE="{date}" GIT_COMMITTER_NAME="{user}" GIT_COMMITTER_EMAIL="{email}" GIT_COMMITTER_DATE="{date}" fi """ return template.format(commit_id=obj.get('id'), date=obj.get('date'), user=obj.get('author').get('name'), email=obj.get('author').get('email')) @staticmethod def filter_branch(path: str, msg: str, verbose=False): """ https://git-scm.com/docs/git-filter-branch """ cmd = f"git -C {path} filter-branch -f --env-filter {msg}" if verbose: event_logger.info(f"executing...\n{cmd}") return GitModifier.execute(cmd) @staticmethod def execute(cmd: str) -> str: """Excuete a shell command, get result """ subprocess.run(cmd, shell=True, check=True) return '' # with subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) as proc: # lines = proc.stdout.readlines() # return '\n'.join([line.decode("utf-8").strip() for line in lines]) def chunks(l: list, n: int): """ Yield successive n-sized chunks from l. :param list to devide :n range to devide """ for i in range(0, len(l), n): yield l[i:i + n] if __name__ == '__main__': parser = argparse.ArgumentParser(description='Console2RabbitMQ') parser.add_argument('-p', dest='path', type=str, default="/var/lib/repo") parser.add_argument('--export', dest='export', action='store_true') parser.add_argument('--modify', dest='modify', action='store_true') parser.add_argument('--verbose', dest='verbose', action='store_true') parser.add_argument('-r', dest='range', type=int, default=20) parser.add_argument('-f', dest='file', type=str, default="commits.json") parser.add_argument('-m', dest='match', type=str, default=None, help="matching email address to change") parser.add_argument('-e', dest='email', type=str, default=None, help="change matching email to this email") parser.add_argument('-n', dest='name', type=str, default=None, help="change matching email to this name") args = parser.parse_args() # event_logger.warning(f'args={args}') if(args.export): repo = Repo(args.path) event_logger.opt(ansi=True).info(f'[{args.path}] begin export ...') if os.path.exists(args.file): event_logger.error(f"file [{args.file}] exist, cancel export") sys.exit(0) f = open(args.file, 'w') for log in repo.iter_commits(): committer = log.committer obj = {'id': log.hexsha, 'author': {'email': committer.email, 'name': committer.name}, 'date': str(log.authored_datetime), 'message': str(log.message.strip())} # emit_logger.info(f'{obj}') f.write(json.dumps(obj)+'\n') if args.verbose: emit_logger.opt(ansi=True).debug( f'<level>{log.hexsha}</level>\t<cyan>{log.authored_datetime}</cyan>\t<blue>{committer.email}</blue>\t<green>{log.message.strip()}</green>') f.close() event_logger.opt(ansi=True).info(f'write to {args.file}') if(args.modify): envs = [] event_logger.opt(ansi=True).info(f"read config [{args.file}]...") with open(args.file) as f: for line in f: obj = json.loads(line) if args.verbose: event_logger.opt(ansi=True).info( f"<level>{obj.get('id')}</level>\t<cyan>{obj.get('date')}</cyan>\t<blue>{obj.get('author').get('email')}</blue>\t<green>{obj.get('message')}</green>") envs.append(GitModifier.format(obj)) event_logger.opt(ansi=True).info("begin filter-branch ...") for chunk in chunks(envs, args.range): emit_logger.opt(ansi=True).debug( GitModifier.filter_branch(args.path, f"'{''.join(chunk)}' -- --all", args.verbose)) if(args.match): template = """git -C {path} filter-branch -f --env-filter ' if test "$GIT_COMMITTER_EMAIL" = "{match_email}" then GIT_AUTHOR_NAME="{name}" GIT_AUTHOR_EMAIL="{email}" GIT_COMMITTER_NAME="{name}" GIT_COMMITTER_EMAIL="{email}" fi ' -- --all """ command = template.format( path=args.path, match_email=args.match, name=args.name, email=args.email) if args.verbose: event_logger.info(f"executing...\n{command}") GitModifier.execute(command)
mit
9,088,962,088,859,095,000
36.255034
174
0.542064
false
Trust-Code/trust-addons
trust_base_data/__openerp__.py
1
1924
# -*- encoding: utf-8 -*- ############################################################################### # # # Copyright (C) 2015 Trustcode - www.trustcode.com.br # # # # This program is free software: you can redistribute it and/or modify # # it under the terms of the GNU Affero General Public License as published by # # the Free Software Foundation, either version 3 of the License, or # # (at your option) any later version. # # # # This program is distributed in the hope that it will be useful, # # but WITHOUT ANY WARRANTY; without even the implied warranty of # # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################### { 'name': 'Base Data - Trustcode', 'summary': """Base Data for Trustcode""", 'version': '8.0', 'category': 'Tools', 'author': 'Trustcode', 'license': 'AGPL-3', 'website': 'http://www.trustcode.com.br', 'contributors': ['Danimar Ribeiro <[email protected]>', 'Mackilem Van der Laan Soares <[email protected]>' ], 'depends': [ 'base', ], 'data': [ 'data/base_data.xml', ], 'application': True, 'auto_install': False }
agpl-3.0
473,508,669,122,369,600
48.333333
79
0.424116
false
bruth/restlib2
restlib2/resources.py
1
35324
import io import time import hashlib import collections from six import add_metaclass # http://mail.python.org/pipermail/python-list/2010-March/1239510.html from calendar import timegm from datetime import datetime, timedelta from django.conf import settings from django.http import HttpResponse, HttpRequest from django.utils.http import http_date, parse_http_date, parse_etags, \ quote_etag from django.utils.cache import patch_cache_control from .http import codes, methods from .serializers import serializers from .mixins import TemplateResponseMixin from . import mimeparse EPOCH_DATE = datetime(1970, 1, 1, 0, 0, 0) MAX_CACHE_AGE = 60 * 60 * 24 * 30 # Convenience function for checking for existent, callable methods usable = lambda x, y: isinstance(getattr(x, y, None), collections.Callable) try: str = unicode except NameError: pass def no_content_response(response): "Cautious assessment of the response body for no content." if not hasattr(response, '_container'): return True if response._container is None: return True if isinstance(response._container, (list, tuple)): if len(response._container) == 1 and not response._container[0]: return True return False def get_content_length(request): try: return int(request.META.get('CONTENT_LENGTH')) except (ValueError, TypeError): return 0 class UncacheableResponse(HttpResponse): "Response class that will never be cached." def __init__(self, *args, **kwargs): super(UncacheableResponse, self).__init__(*args, **kwargs) self['Expires'] = 0 patch_cache_control(self, no_cache=True, must_revalidate=True, max_age=0) # ## Resource Metaclass # Sets up a few helper components for the `Resource` class. class ResourceMetaclass(type): def __new__(cls, name, bases, attrs): # Create the new class as is to start. Subclass attributes can be # checked for in `attrs` and handled as necessary relative to the base # classes. new_cls = type.__new__(cls, name, bases, attrs) # If `allowed_methods` is not defined explicitly in attrs, this # could mean one of two things: that the user wants it to inherit # from the parent class (if exists) or for it to be set implicitly. # The more explicit (and flexible) behavior will be to not inherit # it from the parent class, therefore the user must explicitly # re-set the attribute. if 'allowed_methods' not in attrs or not new_cls.allowed_methods: allowed_methods = [] for method in methods: if usable(new_cls, method.lower()): allowed_methods.append(method) # If the attribute is defined in this subclass, ensure all methods that # are said to be allowed are actually defined and callable. else: allowed_methods = list(new_cls.allowed_methods) for method in allowed_methods: if not usable(new_cls, method.lower()): msg = 'The {0} method is not defined for the resource {1}' raise ValueError(msg.format(method, new_cls.__name__)) # If `GET` is not allowed, remove `HEAD` method. if 'GET' not in allowed_methods and 'HEAD' in allowed_methods: allowed_methods.remove('HEAD') new_cls.allowed_methods = tuple(allowed_methods) if not new_cls.supported_content_types: new_cls.supported_content_types = new_cls.supported_accept_types if not new_cls.supported_patch_types: new_cls.supported_patch_types = new_cls.supported_content_types return new_cls def __call__(cls, *args, **kwargs): """Tests to see if the first argument is an HttpRequest object, creates an instance, and calls it with the arguments. """ if args and isinstance(args[0], HttpRequest): instance = super(ResourceMetaclass, cls).__call__() return instance.__call__(*args, **kwargs) return super(ResourceMetaclass, cls).__call__(*args, **kwargs) # ## Resource # Comprehensive ``Resource`` class which implements sensible request # processing. The process flow is largely derived from Alan Dean's # [status code activity diagram][0]. # # ### Implementation Considerations # [Section 2][1] of the HTTP/1.1 specification states: # # > The methods GET and HEAD MUST be supported by all general-purpose servers. # > All other methods are OPTIONAL; # # The `HEAD` handler is already implemented on the `Resource` class, but # requires the `GET` handler to be implemented. Although not required, the # `OPTIONS` handler is also implemented. # # Response representations should follow the rules outlined in [Section # 5.1][2]. # # [Section 6.1][3] defines that `GET`, `HEAD`, `OPTIONS` and `TRACE` are # considered _safe_ methods, thus ensure the implementation of these methods do # not have any side effects. In addition to the safe methods, `PUT` and # `DELETE` are considered _idempotent_ which means subsequent identical # requests to the same resource does not result it different responses to # the client. # # Request bodies on `GET`, `HEAD`, `OPTIONS`, and `DELETE` requests are # ignored. The HTTP spec does not define any semantics surrounding this # situtation. # # Typical uses of `POST` requests are described in [Section 6.5][4], but in # most cases should be assumed by clients as _black box_, neither safe nor # idempotent. If updating an existing resource, it is more appropriate to use # `PUT`. # # [Section 7.2.1][5] defines that `GET`, `HEAD`, `POST`, and 'TRACE' should # have a payload for status code of 200 OK. If not supplied, a different 2xx # code may be more appropriate. # # [0]: http://code.google.com/p/http-headers-status/downloads/detail?name # =http-headers-status%20v3%20draft.png # [1]: http://tools.ietf.org/html/draft-ietf-httpbis-p2-semantics-18#section-2 # [2]: http://tools.ietf.org/html/draft-ietf-httpbis-p2-semantics-18#section # -5.1 # [3]: http://tools.ietf.org/html/draft-ietf-httpbis-p2-semantics-18#section # -6.1 # [4]: http://tools.ietf.org/html/draft-ietf-httpbis-p2-semantics-18#section # -6.5 @add_metaclass(ResourceMetaclass) class Resource(object): # ### Service Availability # Toggle this resource as unavailable. If `True`, the service # will be unavailable indefinitely. If an integer or datetime is # used, the `Retry-After` header will set. An integer can be used # to define a seconds delta from the current time (good for unexpected # downtimes). If a datetime is set, the number of seconds will be # calculated relative to the current time (good for planned downtime). unavailable = False # ### Allowed Methods # If `None`, the allowed methods will be determined based on the resource # methods define, e.g. `get`, `put`, `post`. A list of methods can be # defined explicitly to have not expose defined methods. allowed_methods = None # ### Request Rate Limiting # Enforce request rate limiting. Both `rate_limit_count` and # `rate_limit_seconds` must be defined and not zero to be active. # By default, the number of seconds defaults to 1 hour, but the count # is `None`, therefore rate limiting is not enforced. rate_limit_count = None rate_limit_seconds = 60 * 60 # ### Max Request Entity Length # If not `None`, checks if the request entity body is too large to # be processed. max_request_entity_length = None # ### Supported _Accept_ Mimetypes # Define a list of mimetypes supported for encoding response entity # bodies. Default to `('application/json',)` # _See also: `supported_content_types`_ supported_accept_types = ('application/json',) # ### Supported _Content-Type_ Mimetypes # Define a list of mimetypes supported for decoding request entity bodies. # This is independent of the mimetypes encoders for request bodies. # Defaults to mimetypes defined in `supported_accept_types`. supported_content_types = None # ### Supported PATCH Mimetypes # Define a list of mimetypes supported for decoding request entity bodies # for `PATCH` requests. Defaults to mimetypes defined in # `supported_content_types`. supported_patch_types = None # ### Validation Caching # #### Require Conditional Request # If `True`, `PUT` and `PATCH` requests are required to have a conditional # header for verifying the operation applies to the current state of the # resource on the server. This must be used in conjunction with either # the `use_etags` or `use_last_modified` option to take effect. require_conditional_request = False # #### Use ETags # If `True`, the `ETag` header will be set on responses and conditional # requests are supported. This applies to _GET_, _HEAD_, _PUT_, _PATCH_ # and _DELETE_ requests. Defaults to Django's `USE_ETAGS` setting. use_etags = settings.USE_ETAGS # #### Use Last Modified # If `True`, the `Last-Modified` header will be set on responses and # conditional requests are supported. This applies to _GET_, _HEAD_, _PUT_, # _PATCH_ and _DELETE_ requests. use_last_modified = False # ### Expiration Caching # Define a maximum cache age in seconds or as a date this resource is valid # for. If an integer in seconds is specified, the 'Cache-Control' 'max-age' # attribute will be used. If a timedelta is specified, the 'Expires' header # will be used with a calculated date. Both of these mechanisms are # non-conditional and are considered _strong_ cache headers. Clients (such # as browsers) will not send send a conditional request until the resource # has expired locally, this is sometimes referred to as a _cold cache_. # Most dynamic resources will want to set this to 0, unless it's a # read-only resource that is ok to be a bit stale. # - http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.3 # - http://www.odino.org/301/rest-better-http-cache # - http://www.subbu.org/blog/2005/01/http-caching cache_max_age = None # Defines the cache_type of the response, public, private or no-cache # - http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.1 cache_type = None # Applies to cache servers. No part of the response will be cached by # downstream cached. # - http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.2 cache_no_store = False # Applies to cache servers. This ensures a cache always revalidates with # the origin server before responding to a client request. # - http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9.4 cache_must_revalidate = False def __init__(self, **kwargs): for key in kwargs: # Not methods nor methods if key in self.allowed_methods or isinstance( getattr(self, key, None), collections.Callable): raise TypeError( 'No HTTP handlers nor methods can be overriden.') if not hasattr(self, key): tpl = '{0} is not a valid keyword argument for this resource.' raise TypeError(tpl.format(key)) setattr(self, key, kwargs[key]) # ## Initialize Once, Process Many # Every `Resource` class can be initialized once since they are stateless # (and thus thread-safe). def __call__(self, request, *args, **kwargs): return self.dispatch(request, *args, **kwargs) def dispatch(self, request, *args, **kwargs): # Process the request. This includes all the necessary checks prior to # actually interfacing with the resource itself. response = self.process_request(request, *args, **kwargs) if not isinstance(response, HttpResponse): # Attempt to process the request given the corresponding # `request.method` handler. method_handler = getattr(self, request.method.lower()) response = method_handler(request, *args, **kwargs) if not isinstance(response, HttpResponse): # If the return value of the handler is not a response, pass # the return value into the render method. response = self.render(request, response, args=args, kwargs=kwargs) # Process the response, check if the response is overridden and # use that instead. return self.process_response(request, response) def render(self, request, content=None, status=codes.ok, content_type=None, args=None, kwargs=None): "Renders the response based on the content returned from the handler." response = HttpResponse(status=status, content_type=content_type) if content is not None: if not isinstance(content, (str, bytes, io.IOBase)): accept_type = self.get_accept_type(request) if serializers.supports_encoding(accept_type): content = serializers.encode(accept_type, content) response['Content-Type'] = accept_type response.content = content return response # ## Request Method Handlers # ### _HEAD_ Request Handler # Default handler for _HEAD_ requests. For this to be available, # a _GET_ handler must be defined. def head(self, request, *args, **kwargs): return self.get(request, *args, **kwargs) # ### _OPTIONS_ Request Handler # Default handler _OPTIONS_ requests. def options(self, request, *args, **kwargs): response = UncacheableResponse() # See [RFC 5789][0] # [0]: http://tools.ietf.org/html/rfc5789#section-3.1 if 'PATCH' in self.allowed_methods: response['Accept-Patch'] = ', '.join(self.supported_patch_types) response['Allow'] = ', '.join(sorted(self.allowed_methods)) response['Content-Length'] = 0 return response # ## Response Status Code Handlers # Each handler prefixed with `is_` corresponds to various client (4xx) # and server (5xx) error checking. For example, `is_not_found` will # return `True` if the resource does not exit. _Note: all handlers are # must return `True` to fail the check._ # ### Service Unavailable # Checks if the service is unavailable based on the `unavailable` flag. # Set the `Retry-After` header if possible to inform clients when # the resource is expected to be available. # See also: `unavailable` def is_service_unavailable(self, request, response, *args, **kwargs): if self.unavailable: if type(self.unavailable) is int and self.unavailable > 0: retry = self.unavailable elif type(self.unavailable) is datetime: retry = http_date(timegm(self.unavailable.utctimetuple())) else: retry = None if retry: response['Retry-After'] = retry return True return False # ### Unauthorized # Checks if the request is authorized to access this resource. # Default is a no-op. def is_unauthorized(self, request, response, *args, **kwargs): return False # ### Forbidden # Checks if the request is forbidden. Default is a no-op. def is_forbidden(self, request, response, *args, **kwargs): return False # ### Too Many Requests # Checks if this request is rate limited. Default is a no-op. def is_too_many_requests(self, request, response, *args, **kwargs): return False # ### Request Entity Too Large # Check if the request entity is too large to process. def is_request_entity_too_large(self, request, response, *args, **kwargs): content_length = get_content_length(request) if self.max_request_entity_length and \ content_length > self.max_request_entity_length: return True # ### Method Not Allowed # Check if the request method is not allowed. def is_method_not_allowed(self, request, response, *args, **kwargs): if request.method not in self.allowed_methods: response['Allow'] = ', '.join(sorted(self.allowed_methods)) return True return False # ### Unsupported Media Type # Check if this resource can process the request entity body. Note # `Content-Type` is set as the empty string, so ensure it is not falsy # when processing it. def is_unsupported_media_type(self, request, response, *args, **kwargs): # Ensure there actually is a request body to be decoded if not get_content_length(request): return if 'CONTENT_TYPE' in request.META: if not self.content_type_supported(request, response): return True if not self.content_encoding_supported(request, response): return True if not self.content_language_supported(request, response): return True return False # ### Not Acceptable # Check if this resource can return an acceptable response. def is_not_acceptable(self, request, response, *args, **kwargs): if not self.accept_type_supported(request, response): return True if 'HTTP_ACCEPT_LANGUAGE' in request.META: if not self.accept_language_supported(request, response): return True if 'HTTP_ACCEPT_CHARSET' in request.META: if not self.accept_charset_supported(request, response): return True if 'HTTP_ACCEPT_ENCODING' in request.META: if not self.accept_encoding_supported(request, response): return True return False # ### Precondition Required # Check if a conditional request is def is_precondition_required(self, request, response, *args, **kwargs): if not self.require_conditional_request: return False if self.use_etags and 'HTTP_IF_MATCH' not in request.META: return True if self.use_last_modified and 'HTTP_IF_UNMODIFIED_SINCE' not in \ request.META: return True return False def is_precondition_failed(self, request, response, *args, **kwargs): # ETags are enabled. Check for conditional request headers. The current # ETag value is used for the conditional requests. After the request # method handler has been processed, the new ETag will be calculated. if self.use_etags and 'HTTP_IF_MATCH' in request.META: request_etag = parse_etags(request.META['HTTP_IF_MATCH'])[0] etag = self.get_etag(request, request_etag) if request_etag != etag: return True # Last-Modified date enabled. check for conditional request headers. # The current modification datetime value is used for the conditional # requests. After the request method handler has been processed, the # new Last-Modified datetime will be returned. if self.use_last_modified and 'HTTP_IF_UNMODIFIED_SINCE' in \ request.META: last_modified = self.get_last_modified(request, *args, **kwargs) known_last_modified = EPOCH_DATE + timedelta( seconds=parse_http_date( request.META['HTTP_IF_UNMODIFIED_SINCE'])) if known_last_modified != last_modified: return True return False # ### Not Found # Checks if the requested resource exists. def is_not_found(self, request, response, *args, **kwargs): return False # ### Gone # Checks if the resource _no longer_ exists. def is_gone(self, request, response, *args, **kwargs): return False # ## Request Accept-* handlers # Checks if the requested `Accept` mimetype is supported. Defaults # to using the first specified mimetype in `supported_accept_types`. def accept_type_supported(self, request, response): if 'HTTP_ACCEPT' in request.META: accept_type = request.META['HTTP_ACCEPT'] mimetypes = list(self.supported_accept_types) mimetypes.reverse() match = mimeparse.best_match(mimetypes, accept_type) if match: request._accept_type = match return True # Only if `Accept` explicitly contains a `*/*;q=0.0` # does it preclude from returning a non-matching mimetype. # This may be desirable behavior (or not), so add this as an # option, e.g. `force_accept_type` if mimeparse.quality('*/*', accept_type) == 0: return False # If `supported_accept_types` is empty, it is assumed that the resource # will return whatever it wants. if self.supported_accept_types: request._accept_type = self.supported_accept_types[0] return True # Checks if the requested `Accept-Charset` is supported. def accept_charset_supported(self, request, response): return True # Checks if the requested `Accept-Encoding` is supported. def accept_encoding_supported(self, request, response): return True # Checks if the requested `Accept-Language` is supported. def accept_language_supported(self, request, response): return True def get_accept_type(self, request): if hasattr(request, '_accept_type'): return request._accept_type if self.supported_accept_types: return self.supported_accept_types[0] # ## Conditionl Request Handlers # ### Get/Calculate ETag # Calculates an etag for the requested entity. # Provides the client an entity tag for future conditional # requests. # For GET and HEAD requests the `If-None-Match` header may be # set to check if the entity has changed since the last request. # For PUT, PATCH, and DELETE requests, the `If-Match` header may be # set to ensure the entity is the same as the cllient's so the current # operation is valid (optimistic concurrency). def get_etag(self, request, response, etag=None): cache = self.get_cache(request, response) # Check cache first if etag is not None and etag in cache: return etag # If the Etag has been set already upstream use it, otherwise calculate def set_etag(self, request, response): if 'ETag' in response: etag = parse_etags(response['ETag'])[0] else: etag = hashlib.md5(response.content).hexdigest() response['ETag'] = quote_etag(etag) # Cache the etag for subsequent look ups. This can be cached # indefinitely since these are unique values cache = self.get_cache(request, response) cache.set(etag, 1, MAX_CACHE_AGE) # ### Get/Calculate Last Modified Datetime # Calculates the last modified time for the requested entity. # Provides the client the last modified of the entity for future # conditional requests. def get_last_modified(self, request): return datetime.now() # Set the last modified date on the response def set_last_modified(self, request, response): if 'Last-Modified' not in response: response['Last-Modified'] = self.get_last_modified(request) # ### Calculate Expiry Datetime # (not implemented) # Gets the expiry date and time for the requested entity. # Informs the client when the entity will be invalid. This is most # useful for clients to only refresh when they need to, otherwise the # client's local cache is used. def get_expiry(self, request, cache_timeout=None): if cache_timeout is None: cache_timeout = self.cache_max_age return time.time() + cache_timeout def set_expiry(self, request, response, cache_timeout=None): if 'Expires' not in response: response['Expires'] = http_date( self.get_expiry(request, cache_timeout)) # ## Entity Content-* handlers # Check if the request Content-Type is supported by this resource # for decoding. def content_type_supported(self, request, response, *args, **kwargs): content_type = request.META['CONTENT_TYPE'] mimetypes = list(self.supported_content_types) mimetypes.reverse() match = mimeparse.best_match(mimetypes, content_type) if match: request._content_type = match return True return False def content_encoding_supported(self, request, response, *args, **kwargs): return True def content_language_supported(self, request, response, *args, **kwargs): return True # Utility methods def get_cache(self, request, response): "Returns the cache to be used for various components." from django.core.cache import cache return cache def get_cache_timeout(self, request, response): if isinstance(self.cache_max_age, timedelta): return datetime.now() + self.cache_max_age return self.cache_max_age def response_cache_control(self, request, response): attrs = {} timeout = self.get_cache_timeout(request, response) # If explicit 0, do no apply max-age or expires if isinstance(timeout, datetime): response['Expires'] = http_date(timegm(timeout.utctimetuple())) elif isinstance(timeout, int): if timeout <= 0: timeout = 0 attrs['no_cache'] = True attrs['max_age'] = timeout if self.cache_must_revalidate: attrs['must_revalidate'] = True if self.cache_no_store: attrs['no_store'] = True if self.cache_type: attrs[self.cache_type] = True if attrs: patch_cache_control(response, **attrs) # Process methods def process_request(self, request, *args, **kwargs): # Initilize a new response for this request. Passing the response along # the request cycle allows for gradual modification of the headers. response = HttpResponse() # TODO keep track of a list of request headers used to # determine the resource representation for the 'Vary' # header. # ### 503 Service Unavailable # The server does not need to be unavailable for a resource to be # unavailable... if self.is_service_unavailable(request, response, *args, **kwargs): response.status_code = codes.service_unavailable return response # ### 414 Request URI Too Long _(not implemented)_ # This should be be handled upstream by the Web server # ### 400 Bad Request _(not implemented)_ # Note that many services respond with this code when entities are # unprocessable. This should really be a 422 Unprocessable Entity # Most actualy bad requests are handled upstream by the Web server # when parsing the HTTP message # ### 401 Unauthorized # Check if the request is authorized to access this resource. if self.is_unauthorized(request, response, *args, **kwargs): response.status_code = codes.unauthorized return response # ### 403 Forbidden # Check if this resource is forbidden for the request. if self.is_forbidden(request, response, *args, **kwargs): response.status_code = codes.forbidden return response # ### 501 Not Implemented _(not implemented)_ # This technically refers to a service-wide response for an # unimplemented request method, again this is upstream. # ### 429 Too Many Requests # Both `rate_limit_count` and `rate_limit_seconds` must be none # falsy values to be checked. if self.rate_limit_count and self.rate_limit_seconds: if self.is_too_many_requests(request, response, *args, **kwargs): response.status_code = codes.too_many_requests return response # ### 405 Method Not Allowed if self.is_method_not_allowed(request, response, *args, **kwargs): response.status_code = codes.method_not_allowed return response # ### 406 Not Acceptable # Checks Accept and Accept-* headers if self.is_not_acceptable(request, response, *args, **kwargs): response.status_code = codes.not_acceptable return response # ### Process an _OPTIONS_ request # Enough processing has been performed to allow an OPTIONS request. if request.method == methods.OPTIONS and 'OPTIONS' in \ self.allowed_methods: return self.options(request, response) # ## Request Entity Checks # ### 415 Unsupported Media Type # Check if the entity `Content-Type` supported for decoding. if self.is_unsupported_media_type(request, response, *args, **kwargs): response.status_code = codes.unsupported_media_type return response # ### 413 Request Entity Too Large # Check if the entity is too large for processing if self.is_request_entity_too_large(request, response, *args, **kwargs): response.status_code = codes.request_entity_too_large return response # ### 404 Not Found # Check if this resource exists. Note, if this requires a database # lookup or some other expensive lookup, the relevant object may # be _attached_ to the request or response object to be used # dowstream in the handler. This prevents multiple database # hits or filesystem lookups. if self.is_not_found(request, response, *args, **kwargs): response.status_code = codes.not_found return response # ### 410 Gone # Check if this resource used to exist, but does not anymore. A common # strategy for this when dealing with this in a database context is to # have an `archived` or `deleted` flag that can be used associated with # the given lookup key while the rest of the content in the row may be # deleted. if self.is_gone(request, response, *args, **kwargs): response.status_code = codes.gone return response # ### 428 Precondition Required # Prevents the "lost udpate" problem and requires client to confirm # the state of the resource has not changed since the last `GET` # request. This applies to `PUT` and `PATCH` requests. if request.method == methods.PUT or request.method == methods.PATCH: if self.is_precondition_required(request, response, *args, **kwargs): return UncacheableResponse(status=codes.precondition_required) # ### 412 Precondition Failed # Conditional requests applies to GET, HEAD, PUT, and PATCH. # For GET and HEAD, the request checks the either the entity changed # since the last time it requested it, `If-Modified-Since`, or if the # entity tag (ETag) has changed, `If-None-Match`. if request.method == methods.PUT or request.method == methods.PATCH: if self.is_precondition_failed(request, response, *args, **kwargs): return UncacheableResponse(status=codes.precondition_failed) # Check for conditional GET or HEAD request if request.method == methods.GET or request.method == methods.HEAD: # Check Etags before Last-Modified... if self.use_etags and 'HTTP_IF_NONE_MATCH' in request.META: # Parse request Etags (only one is currently supported) request_etag = parse_etags( request.META['HTTP_IF_NONE_MATCH'])[0] # Check if the request Etag is valid. The current Etag is # supplied to enable strategies where the etag does not need # to be used to regenerate the Etag. This may include # generating an MD5 of the resource and storing it as a key # in memcache. etag = self.get_etag(request, response, request_etag, *args, **kwargs) # Nothing has changed, simply return if request_etag == etag: response.status_code = codes.not_modified return response if self.use_last_modified and 'HTTP_IF_MODIFIED_SINCE' in \ request.META: # Get the last known modified date from the client, compare it # to the last modified date of the resource last_modified = self.get_last_modified(request, *args, **kwargs) known_last_modified = EPOCH_DATE + timedelta( seconds=parse_http_date( request.META['HTTP_IF_MODIFIED_SINCE'])) if known_last_modified >= last_modified: response.status_code = codes.not_modified return response if get_content_length(request): content_type = request._content_type if content_type in serializers: if isinstance(request.body, bytes): data = serializers.decode(content_type, request.body.decode('utf-8')) else: data = serializers.decode(content_type, request.body) request.data = data # ## Process the normal response returned by the handler def process_response(self, request, response): # Set default content-type for no content response if no_content_response(response): # Do not alter the content-type if an attachment is supplied if 'Content-Disposition' not in response: accept_type = self.get_accept_type(request) if accept_type: response['Content-Type'] = accept_type if request.method != methods.HEAD and response.status_code == \ codes.ok: response.status_code = codes.no_content # Set content to nothing after no content is handled since it must # retain the properties of the GET response if request.method == methods.HEAD: response.content = '' if request.method in (methods.GET, methods.HEAD): self.response_cache_control(request, response) if self.use_etags: self.set_etag(request, response) if self.use_last_modified: self.set_last_modified(request, response) return response class TemplateResource(TemplateResponseMixin, Resource): pass
bsd-2-clause
-3,642,157,530,269,288,400
40.557647
79
0.635602
false
macarthur-lab/xbrowse
xbrowse_server/base/management/commands/transfer_dataset_from_other_project.py
1
6685
from django.core import serializers import os from datetime import datetime from django.core.management.base import BaseCommand from xbrowse_server.base.models import Project, ProjectCollaborator, Project, \ Family, FamilyImageSlide, Cohort, Individual, \ FamilySearchFlag, ProjectPhenotype, IndividualPhenotype, FamilyGroup, \ CausalVariant, ProjectTag, VariantTag, VariantNote, ReferencePopulation, \ UserProfile, VCFFile, ProjectGeneList from xbrowse_server.mall import get_project_datastore, get_datastore from pprint import pprint from xbrowse_server import sample_management def update(mongo_collection, match_json, set_json): print("-----") print("updating %s to %s" % (match_json, set_json)) #return update_result = mongo_collection.update_many(match_json, {'$set': set_json}) print("updated %s out of %s records" % (update_result.modified_count, update_result.matched_count)) return update_result def update_family_analysis_status(project_id): for family in Family.objects.filter(project__project_id=project_id): if family.analysis_status == "Q" and family.get_data_status() == "loaded": print("Setting family %s to Analysis in Progress" % family.family_id) family.analysis_status = "I" # switch status from Waiting for Data to Analysis in Progress family.save() def check_that_exists(mongo_collection, match_json, not_more_than_one=False): #return records = list(mongo_collection.find(match_json)) if len(records) == 0: print("%s query %s matched 0 records" % (mongo_collection, match_json)) return False if not_more_than_one and len(records) > 1: print("%s query %s matched more than one record: %s" % (mongo_collection, match_json, records)) return False print("-----") print("%s query %s returned %s record(s): \n%s" % (mongo_collection, match_json, len(records), "\n".join(map(str, records)))) return True class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument('-d', '--destination-project', help="project id to which to transfer the datasets", required=True) parser.add_argument('-f', '--from-project', help="project id from which to take the datatsets", required=True) def transfer_project(self, from_project_id, destination_project_id): print("From: " + from_project_id) print("To: " + destination_project_id) from_project = Project.objects.get(project_id=from_project_id) destination_project = Project.objects.get(project_id=destination_project_id) # Make sure individuals are the same indivs_missing_from_dest_project = (set( [i.indiv_id for i in Individual.objects.filter(project=from_project)]) - set( [i.indiv_id for i in Individual.objects.filter(project=destination_project)])) if indivs_missing_from_dest_project: raise Exception("Individuals missing from dest project: " + str(indivs_missing_from_dest_project)) # update VCFs vcfs = from_project.families_by_vcf().keys() for vcf_file_path in vcfs: vcf_file = VCFFile.objects.get_or_create(file_path=os.path.abspath(vcf_file_path))[0] sample_management.add_vcf_file_to_project(destination_project, vcf_file) print("Added %s to project %s" % (vcf_file, destination_project.project_id)) families_db = get_datastore()._db projects_db = get_project_datastore()._db print("==========") print("Checking 'from' Projects and Families:") if not check_that_exists(projects_db.projects, {'project_id': from_project_id}, not_more_than_one=True): raise ValueError("There needs to be 1 project db in %(from_project_id)s" % locals()) if not check_that_exists(families_db.families, {'project_id': from_project_id}, not_more_than_one=False): raise ValueError("There needs to be atleast 1 family db in %(from_project_id)s" % locals()) print("==========") print("Make Updates:") datestamp = datetime.now().strftime("%Y-%m-%d") if check_that_exists(projects_db.projects, {'project_id': destination_project_id}, not_more_than_one=True): result = update(projects_db.projects, {'project_id': destination_project_id}, {'project_id': destination_project_id+'_previous', 'version': datestamp}) if check_that_exists(families_db.families, {'project_id': destination_project_id}, not_more_than_one=False): result = update(families_db.families, {'project_id': destination_project_id}, {'project_id': destination_project_id+'_previous', 'version': datestamp}) result = update(projects_db.projects, {'project_id': from_project_id}, {'project_id': destination_project_id, 'version': '2'}) result = update(families_db.families, {'project_id': from_project_id}, {'project_id': destination_project_id, 'version': '2'}) print("==========") print("Checking Projects:") if not check_that_exists(projects_db.projects, {'project_id': destination_project_id}, not_more_than_one=True): raise ValueError("After: There needs to be 1 project db in %(destination_project_id)s" % locals()) if not check_that_exists(families_db.families, {'project_id': destination_project_id}, not_more_than_one=False): raise ValueError("After: There needs to be atleast 1 family db in %(destination_project_id)s" % locals()) update_family_analysis_status(destination_project_id) print("Data transfer finished.") i = raw_input("Delete the 'from' project: %s? [Y/n] " % from_project_id) if i.strip() == 'Y': sample_management.delete_project(from_project_id) print("Project %s deleted" % from_project_id) else: print("Project not deleted") def handle(self, *args, **options): from_project_id = options["from_project"] destination_project_id = options["destination_project"] assert from_project_id assert destination_project_id print("Transfering data from project %s to %s" % (from_project_id, destination_project_id)) print("WARNING: this can only be done once") if raw_input("Continue? [Y/n] ").lower() != 'y': return else: print("") self.transfer_project(from_project_id, destination_project_id) #for project in Project.objects.all(): # print("Project: " + project.project_id) # update_family_analysis_status(project.project_id)
agpl-3.0
4,557,343,489,278,923,300
51.637795
163
0.653403
false
suutari/shoop
shuup/notify/models/script.py
1
2353
# -*- coding: utf-8 -*- # This file is part of Shuup. # # Copyright (c) 2012-2016, Shoop Commerce Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. from __future__ import unicode_literals from django.db import models from django.utils.encoding import python_2_unicode_compatible from django.utils.translation import ugettext_lazy as _ from jsonfield.fields import JSONField from shuup.core.fields import InternalIdentifierField from shuup.notify.base import Event from shuup.notify.enums import StepNext @python_2_unicode_compatible class Script(models.Model): event_identifier = models.CharField(max_length=64, blank=False, db_index=True, verbose_name=_('event identifier')) identifier = InternalIdentifierField(unique=True) created_on = models.DateTimeField(auto_now_add=True, editable=False, verbose_name=_('created on')) name = models.CharField(max_length=64, verbose_name=_('name')) enabled = models.BooleanField(default=False, db_index=True, verbose_name=_('enabled')) _step_data = JSONField(default=[], db_column="step_data") def get_steps(self): """ :rtype Iterable[Step] """ if getattr(self, "_steps", None) is None: from shuup.notify.script import Step self._steps = [Step.unserialize(data) for data in self._step_data] return self._steps def set_steps(self, steps): self._step_data = [step.serialize() for step in steps] self._steps = steps def get_serialized_steps(self): return [step.serialize() for step in self.get_steps()] def set_serialized_steps(self, serialized_data): self._steps = None self._step_data = serialized_data # Poor man's validation for step in self.get_steps(): pass @property def event_class(self): return Event.class_for_identifier(self.event_identifier) def __str__(self): return self.name def execute(self, context): """ Execute the script in the given context. :param context: Script context :type context: shuup.notify.script.Context """ for step in self.get_steps(): if step.execute(context) == StepNext.STOP: break
agpl-3.0
998,860,068,812,881,000
33.602941
118
0.666383
false
beiko-lab/gengis
bin/Lib/site-packages/scipy/ndimage/tests/test_filters.py
1
2260
''' Some tests for filters ''' from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import assert_equal, assert_raises import scipy.ndimage as sndi def test_ticket_701(): # Test generic filter sizes arr = np.arange(4).reshape((2,2)) func = lambda x: np.min(x) res = sndi.generic_filter(arr, func, size=(1,1)) # The following raises an error unless ticket 701 is fixed res2 = sndi.generic_filter(arr, func, size=1) assert_equal(res, res2) def test_orders_gauss(): # Check order inputs to Gaussians arr = np.zeros((1,)) yield assert_equal, 0, sndi.gaussian_filter(arr, 1, order=0) yield assert_equal, 0, sndi.gaussian_filter(arr, 1, order=3) yield assert_raises, ValueError, sndi.gaussian_filter, arr, 1, -1 yield assert_raises, ValueError, sndi.gaussian_filter, arr, 1, 4 yield assert_equal, 0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=0) yield assert_equal, 0, sndi.gaussian_filter1d(arr, 1, axis=-1, order=3) yield assert_raises, ValueError, sndi.gaussian_filter1d, arr, 1, -1, -1 yield assert_raises, ValueError, sndi.gaussian_filter1d, arr, 1, -1, 4 def test_valid_origins(): """Regression test for #1311.""" func = lambda x: np.mean(x) data = np.array([1,2,3,4,5], dtype=np.float64) assert_raises(ValueError, sndi.generic_filter, data, func, size=3, origin=2) func2 = lambda x, y: np.mean(x + y) assert_raises(ValueError, sndi.generic_filter1d, data, func, filter_size=3, origin=2) assert_raises(ValueError, sndi.percentile_filter, data, 0.2, size=3, origin=2) for filter in [sndi.uniform_filter, sndi.minimum_filter, sndi.maximum_filter, sndi.maximum_filter1d, sndi.median_filter, sndi.minimum_filter1d]: # This should work, since for size == 3, the valid range for origin is # -1 to 1. list(filter(data, 3, origin=-1)) list(filter(data, 3, origin=1)) # Just check this raises an error instead of silently accepting or # segfaulting. assert_raises(ValueError, filter, data, 3, origin=2)
gpl-3.0
3,568,039,800,449,397,000
39.090909
78
0.633628
false
jeremiahyan/odoo
addons/mail/models/mail_alias.py
1
13423
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. import ast import re from markupsafe import Markup from odoo import _, api, fields, models from odoo.exceptions import ValidationError, UserError from odoo.tools import remove_accents, is_html_empty # see rfc5322 section 3.2.3 atext = r"[a-zA-Z0-9!#$%&'*+\-/=?^_`{|}~]" dot_atom_text = re.compile(r"^%s+(\.%s+)*$" % (atext, atext)) class Alias(models.Model): """A Mail Alias is a mapping of an email address with a given Odoo Document model. It is used by Odoo's mail gateway when processing incoming emails sent to the system. If the recipient address (To) of the message matches a Mail Alias, the message will be either processed following the rules of that alias. If the message is a reply it will be attached to the existing discussion on the corresponding record, otherwise a new record of the corresponding model will be created. This is meant to be used in combination with a catch-all email configuration on the company's mail server, so that as soon as a new mail.alias is created, it becomes immediately usable and Odoo will accept email for it. """ _name = 'mail.alias' _description = "Email Aliases" _rec_name = 'alias_name' _order = 'alias_model_id, alias_name' def _default_alias_domain(self): return self.env["ir.config_parameter"].sudo().get_param("mail.catchall.domain") alias_name = fields.Char('Alias Name', copy=False, help="The name of the email alias, e.g. 'jobs' if you want to catch emails for <[email protected]>") alias_model_id = fields.Many2one('ir.model', 'Aliased Model', required=True, ondelete="cascade", help="The model (Odoo Document Kind) to which this alias " "corresponds. Any incoming email that does not reply to an " "existing record will cause the creation of a new record " "of this model (e.g. a Project Task)", # hack to only allow selecting mail_thread models (we might # (have a few false positives, though) domain="[('field_id.name', '=', 'message_ids')]") alias_user_id = fields.Many2one('res.users', 'Owner', default=lambda self: self.env.user, help="The owner of records created upon receiving emails on this alias. " "If this field is not set the system will attempt to find the right owner " "based on the sender (From) address, or will use the Administrator account " "if no system user is found for that address.") alias_defaults = fields.Text('Default Values', required=True, default='{}', help="A Python dictionary that will be evaluated to provide " "default values when creating new records for this alias.") alias_force_thread_id = fields.Integer( 'Record Thread ID', help="Optional ID of a thread (record) to which all incoming messages will be attached, even " "if they did not reply to it. If set, this will disable the creation of new records completely.") alias_domain = fields.Char('Alias domain', compute='_compute_alias_domain', default=_default_alias_domain) alias_parent_model_id = fields.Many2one( 'ir.model', 'Parent Model', help="Parent model holding the alias. The model holding the alias reference " "is not necessarily the model given by alias_model_id " "(example: project (parent_model) and task (model))") alias_parent_thread_id = fields.Integer('Parent Record Thread ID', help="ID of the parent record holding the alias (example: project holding the task creation alias)") alias_contact = fields.Selection([ ('everyone', 'Everyone'), ('partners', 'Authenticated Partners'), ('followers', 'Followers only')], default='everyone', string='Alias Contact Security', required=True, help="Policy to post a message on the document using the mailgateway.\n" "- everyone: everyone can post\n" "- partners: only authenticated partners\n" "- followers: only followers of the related document or members of following channels\n") alias_bounced_content = fields.Html( "Custom Bounced Message", translate=True, help="If set, this content will automatically be sent out to unauthorized users instead of the default message.") _sql_constraints = [ ('alias_unique', 'UNIQUE(alias_name)', 'Unfortunately this email alias is already used, please choose a unique one') ] @api.constrains('alias_name') def _alias_is_ascii(self): """ The local-part ("display-name" <local-part@domain>) of an address only contains limited range of ascii characters. We DO NOT allow anything else than ASCII dot-atom formed local-part. Quoted-string and internationnal characters are to be rejected. See rfc5322 sections 3.4.1 and 3.2.3 """ for alias in self: if alias.alias_name and not dot_atom_text.match(alias.alias_name): raise ValidationError(_( "You cannot use anything else than unaccented latin characters in the alias address (%s).", alias.alias_name, )) def _compute_alias_domain(self): alias_domain = self._default_alias_domain() for record in self: record.alias_domain = alias_domain @api.constrains('alias_defaults') def _check_alias_defaults(self): for alias in self: try: dict(ast.literal_eval(alias.alias_defaults)) except Exception: raise ValidationError(_('Invalid expression, it must be a literal python dictionary definition e.g. "{\'field\': \'value\'}"')) @api.model_create_multi def create(self, vals_list): """ Creates email.alias records according to the values provided in ``vals`` with 1 alteration: * ``alias_name`` value may be cleaned by replacing certain unsafe characters; :raise UserError: if given alias_name is already assigned or there are duplicates in given vals_list; """ alias_names = [vals['alias_name'] for vals in vals_list if vals.get('alias_name')] if alias_names: sanitized_names = self._clean_and_check_unique(alias_names) for vals in vals_list: if vals.get('alias_name'): vals['alias_name'] = sanitized_names[alias_names.index(vals['alias_name'])] return super(Alias, self).create(vals_list) def write(self, vals): """"Raises UserError if given alias name is already assigned""" if vals.get('alias_name') and self.ids: if len(self) > 1: raise UserError(_( 'Email alias %(alias_name)s cannot be used on %(count)d records at the same time. Please update records one by one.', alias_name=vals['alias_name'], count=len(self) )) vals['alias_name'] = self._clean_and_check_unique([vals.get('alias_name')])[0] return super(Alias, self).write(vals) def name_get(self): """Return the mail alias display alias_name, including the implicit mail catchall domain if exists from config otherwise "New Alias". e.g. `[email protected]` or `jobs` or 'New Alias' """ res = [] for record in self: if record.alias_name and record.alias_domain: res.append((record['id'], "%s@%s" % (record.alias_name, record.alias_domain))) elif record.alias_name: res.append((record['id'], "%s" % (record.alias_name))) else: res.append((record['id'], _("Inactive Alias"))) return res def _clean_and_check_unique(self, names): """When an alias name appears to already be an email, we keep the local part only. A sanitizing / cleaning is also performed on the name. If name already exists an UserError is raised. """ def _sanitize_alias_name(name): """ Cleans and sanitizes the alias name """ sanitized_name = remove_accents(name).lower().split('@')[0] sanitized_name = re.sub(r'[^\w+.]+', '-', sanitized_name) sanitized_name = re.sub(r'^\.+|\.+$|\.+(?=\.)', '', sanitized_name) sanitized_name = sanitized_name.encode('ascii', errors='replace').decode() return sanitized_name sanitized_names = [_sanitize_alias_name(name) for name in names] catchall_alias = self.env['ir.config_parameter'].sudo().get_param('mail.catchall.alias') bounce_alias = self.env['ir.config_parameter'].sudo().get_param('mail.bounce.alias') alias_domain = self.env["ir.config_parameter"].sudo().get_param("mail.catchall.domain") # matches catchall or bounce alias for sanitized_name in sanitized_names: if sanitized_name in [catchall_alias, bounce_alias]: matching_alias_name = '%s@%s' % (sanitized_name, alias_domain) if alias_domain else sanitized_name raise UserError( _('The e-mail alias %(matching_alias_name)s is already used as %(alias_duplicate)s alias. Please choose another alias.', matching_alias_name=matching_alias_name, alias_duplicate=_('catchall') if sanitized_name == catchall_alias else _('bounce')) ) # matches existing alias domain = [('alias_name', 'in', sanitized_names)] if self: domain += [('id', 'not in', self.ids)] matching_alias = self.search(domain, limit=1) if not matching_alias: return sanitized_names sanitized_alias_name = _sanitize_alias_name(matching_alias.alias_name) matching_alias_name = '%s@%s' % (sanitized_alias_name, alias_domain) if alias_domain else sanitized_alias_name if matching_alias.alias_parent_model_id and matching_alias.alias_parent_thread_id: # If parent model and parent thread ID both are set, display document name also in the warning document_name = self.env[matching_alias.alias_parent_model_id.model].sudo().browse(matching_alias.alias_parent_thread_id).display_name raise UserError( _('The e-mail alias %(matching_alias_name)s is already used by the %(document_name)s %(model_name)s. Choose another alias or change it on the other document.', matching_alias_name=matching_alias_name, document_name=document_name, model_name=matching_alias.alias_parent_model_id.name) ) raise UserError( _('The e-mail alias %(matching_alias_name)s is already linked with %(alias_model_name)s. Choose another alias or change it on the linked model.', matching_alias_name=matching_alias_name, alias_model_name=matching_alias.alias_model_id.name) ) def open_document(self): if not self.alias_model_id or not self.alias_force_thread_id: return False return { 'view_mode': 'form', 'res_model': self.alias_model_id.model, 'res_id': self.alias_force_thread_id, 'type': 'ir.actions.act_window', } def open_parent_document(self): if not self.alias_parent_model_id or not self.alias_parent_thread_id: return False return { 'view_mode': 'form', 'res_model': self.alias_parent_model_id.model, 'res_id': self.alias_parent_thread_id, 'type': 'ir.actions.act_window', } def _get_alias_bounced_body_fallback(self, message_dict): return Markup( _("""<p>Hi,<br/> The following email sent to %s cannot be accepted because this is a private email address. Only allowed people can contact us at this address.</p>""") ) % self.display_name def _get_alias_bounced_body(self, message_dict): """Get the body of the email return in case of bounced email. :param message_dict: dictionary of mail values """ lang_author = False if message_dict.get('author_id'): try: lang_author = self.env['res.partner'].browse(message_dict['author_id']).lang except: pass if lang_author: self = self.with_context(lang=lang_author) if not is_html_empty(self.alias_bounced_content): body = self.alias_bounced_content else: body = self._get_alias_bounced_body_fallback(message_dict) template = self.env.ref('mail.mail_bounce_alias_security', raise_if_not_found=True) return template._render({ 'body': body, 'message': message_dict }, engine='ir.qweb', minimal_qcontext=True)
gpl-3.0
4,102,590,126,485,825,500
50.429119
175
0.603889
false
iogf/candocabot
module.py
1
1573
import sys class Module: """ This class is used to load/unload plugins. It calls functions which are defined inside the plugin files according to their names. These name functions correspond to irc event commands. """ def __init__(self, *args1, **args2): self.modules = {} for i in args1: self.load(i) for i, j in args2.iteritems(): self.load(i, **j) def load(self, module, **args): """ Load a plugin """ __import__(module) self.modules[module] = sys.modules[module] if len(args): for i, j in args.iteritems(): setattr(self.modules[module], i, j) def reload(self, module, **args): """ This function was suggested by joo. It eases the work when debugging modules. """ self.unload(module) self.load(module, **args) def unload(self, module): """ Unload a plugin """ del self.modules[module] del sys.modules[module] """ It sends a signal to a specific module """ def signal_module(self, module, sign, *args1, **args2): if vars(self.modules[module]).has_key(sign): act = getattr(self.modules[module], sign) """verify whether act receives argument or not """ act(*args1, **args2) """ It sends a signal to all modules """ def signal(self, sign, *args1, **args2): for i in self.modules.keys(): self.signal_module(i, sign, *args1, **args2)
apache-2.0
3,497,972,722,302,391,300
26.12069
72
0.551176
false
navcoindev/navcoin-core
qa/rpc-tests/mempool_resurrect_test.py
1
3169
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test resurrection of mined transactions when # the blockchain is re-organized. # from test_framework.test_framework import NavCoinTestFramework from test_framework.util import * # Create one-input, one-output, no-fee transaction: class MempoolCoinbaseTest(NavCoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 1 self.setup_clean_chain = False def setup_network(self): # Just need one node for this test args = ["-checkmempool", "-debug=mempool"] self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, args)) self.is_network_split = False def run_test(self): node0_address = self.nodes[0].getnewaddress() # Spend block 1/2/3's coinbase transactions # Mine a block. # Create three more transactions, spending the spends # Mine another block. # ... make sure all the transactions are confirmed # Invalidate both blocks # ... make sure all the transactions are put back in the mempool # Mine a new block # ... make sure all the transactions are confirmed again. b = [ self.nodes[0].getblockhash(n) for n in range(1, 4) ] coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ] spends1_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.99) for txid in coinbase_txids ] spends1_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends1_raw ] blocks = [] blocks.extend(self.nodes[0].generate(1)) spends2_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.98) for txid in spends1_id ] spends2_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends2_raw ] blocks.extend(self.nodes[0].generate(1)) # mempool should be empty, all txns confirmed assert_equal(set(self.nodes[0].getrawmempool()), set()) for txid in spends1_id+spends2_id: tx = self.nodes[0].gettransaction(txid) assert(tx["confirmations"] > 0) # Use invalidateblock to re-org back; all transactions should # end up unconfirmed and back in the mempool for node in self.nodes: node.invalidateblock(blocks[0]) # mempool should be empty, all txns confirmed assert_equal(set(self.nodes[0].getrawmempool()), set(spends1_id+spends2_id)) for txid in spends1_id+spends2_id: tx = self.nodes[0].gettransaction(txid) assert(tx["confirmations"] == 0) # Generate another block, they should all get mined self.nodes[0].generate(1) # mempool should be empty, all txns confirmed assert_equal(set(self.nodes[0].getrawmempool()), set()) for txid in spends1_id+spends2_id: tx = self.nodes[0].gettransaction(txid) assert(tx["confirmations"] > 0) if __name__ == '__main__': MempoolCoinbaseTest().main()
mit
5,990,899,284,377,277,000
38.123457
105
0.637741
false
mkhuthir/catkin_ws
src/chessbot/src/r2_chess_pgn.py
1
2487
#!/usr/bin/python import sys, rospy, tf, moveit_commander, random from geometry_msgs.msg import Pose, Point, Quaternion import pgn class R2ChessboardPGN: def __init__(self): self.left_arm = moveit_commander.MoveGroupCommander("left_arm") self.left_hand = moveit_commander.MoveGroupCommander("left_hand") def setGrasp(self, state): if state == "pre-pinch": vec = [ 0.3, 0, 1.57, 0, # index -0.1, 0, 1.57, 0, # middle 0, 0, 0, # ring 0, 0, 0, # pinkie 0, 1.1, 0, 0] # thumb elif state == "pinch": vec = [ 0, 0, 1.57, 0, 0, 0, 1.57, 0, 0, 0, 0, 0, 0, 0, 0, 1.1, 0, 0] elif state == "open": vec = [0] * 18 else: raise ValueError("unknown hand state: %s" % state) self.left_hand.set_joint_value_target(vec) self.left_hand.go(True) def setPose(self, x, y, z, phi, theta, psi): orient = \ Quaternion(*tf.transformations.quaternion_from_euler(phi, theta, psi)) pose = Pose(Point(x, y, z), orient) self.left_arm.set_pose_target(pose) self.left_arm.go(True) def setSquare(self, square, height_above_board): if len(square) != 2 or not square[1].isdigit(): raise ValueError( "expected a chess rank and file like 'b3' but found %s instead" % square) print "going to %s" % square rank_y = -0.24 - 0.05 * int(square[1]) file_x = 0.5 - 0.05 * (ord(square[0]) - ord('a')) z = float(height_above_board) + 1.0 self.setPose(file_x, rank_y, z, 3.14, 0.3, -1.57) def playGame(self, pgn_filename): game = pgn.loads(open(pgn_filename).read())[0] self.setGrasp("pre-pinch") self.setSquare("a1", 0.15) for move in game.moves: self.setSquare(move[0:2], 0.10) self.setSquare(move[0:2], 0.015) self.setGrasp("pinch") self.setSquare(move[0:2], 0.10) self.setSquare(move[2:4], 0.10) self.setSquare(move[2:4], 0.015) self.setGrasp("pre-pinch") self.setSquare(move[2:4], 0.10) if __name__ == '__main__': moveit_commander.roscpp_initialize(sys.argv) rospy.init_node('r2_chess_pgn',anonymous=True) argv = rospy.myargv(argv=sys.argv) # filter out any arguments used by ROS if len(argv) != 2: print "usage: r2_chess_pgn.py PGNFILE" sys.exit(1) print "playing %s" % argv[1] r2pgn = R2ChessboardPGN() r2pgn.playGame(argv[1]) moveit_commander.roscpp_shutdown()
gpl-3.0
6,829,077,378,582,487,000
33.068493
76
0.582228
false
cuoretech/dowork
dowork/Model/Task.py
1
8188
from database_config import * from datetime import datetime from py2neo import neo4j, node # Class : Task # Methods: # 1) db_init(self) - Private # 2) getNode(self) - Returns the Task Node # 3) getName(self) - Returns name of task # 4) setDescription(self, description) - Takes description as a string # 5) getDescription(self) - Gets the description as a string # 6) setEndTime(self, sTime) - Sets eTime in millis # 7) getEndTime(self) - Gets eTime in millis # 8) setInvestedTime(self, iTime) - Sets iTime in millis # 9) getInvestedTime(self, iTime) - Gets iTime in millis # 10) setDeadline(self, deadline) - Sets the deadline in millis # 11) getDeadline(self) - Gets the deadline in millis # 10) setPriority(self, priority) - Sets the priority of the task # 11) getPriority(self) - Gets the priority of the task # 12) assignToUser(self, owner) - owner is a node, Owner.getNode() # 13) getAssignedUsers(self) - Returns a list of 'Node' Objects containing the User Nodes # 14) setStatus(self, Status) - Status should be one of the STS Constants contained in Task # 15) getStatus(self) - Returns status of Task # 16) addSubTask(self, subtask) - Takes a (Task Node) subTask, returns a 'Path' object # containing nodes and relationships used # 17) getSubTasks(self) - a list of subtasks the current task has # 18) addFile(self, file) - adds a file to the task # 19) getFiles(self) - Returns a list of File Nodes # # Properties: # 1) name # 2) description # 3) eTime # 4) iTime # 5) deadline # 6) priority # 7) status STS_OPEN = "Open" STS_CLOSED = "Closed" STS_IN_PROG = "In_Progress" class Task: graph_db = None taskInstance = None def db_init(self): if self.graph_db is None: self.graph_db = neo4j.GraphDatabaseService(db_config['uri']) # # Function : getNode # Arguments : # Returns : TaskInstance Node # def getNode(self): return self.taskInstance # # Function : Constructor # Arguments : Uri of Existing Task Node OR Name of Task # def __init__(self, URI=None, Name=None, Status=None): global LBL_TASK self.db_init() tempTask = None if URI is not None: tempTask = neo4j.Node(URI) elif Name is not None and Status is not None: tempTask = self.graph_db.get_or_create_indexed_node(IND_TASK, "nametime", Name+str(datetime.now()), {"name": Name, "status": Status}) tempTask.add_labels(LBL_TASK) else: raise Exception("Name/Status or URI not specified") self.taskInstance = tempTask if self.getUpdateTime() is None: self.setUpdateTime() # Function : __str__ # Arguments : # Returns : name of task # def __str__(self): if self.taskInstance is not None: return self.taskInstance["name"] else: return None # # Function : getName # Arguments : # Returns : name of task # def getName(self): if self.taskInstance is not None: return self.taskInstance["name"] else: return None # # Function : setDescription # Arguments : (String) description # def setDescription(self, description): self.taskInstance["description"] = description # # Function : getDescription # Arguments : # Returns : (String) description # def getDescription(self): return self.taskInstance["description"]; # # Function : setEndTime # Arguments : eTime in millis # Returns : # def setEndTime(self, eTime): self.taskInstance["eTime"] = eTime # # Function : getEndTime # Arguments : # Returns : eTime in millis # def getEndTime(self): return self.taskInstance["eTime"] # # Function : setUpdateTime # Arguments : String uTime (in milliseconds) # Returns : # def setUpdateTime(self): self.taskInstance['uTime'] = datetime.now() # # Function : getUpdateTime # Arguments : # Returns : (String) uTime # def getUpdateTime(self): return self.taskInstance['uTime'] # # Function : setInvestedTime # Arguments : iTime # Returns : # def setInvestedTime(self, iTime): self.taskInstance["iTime"] = iTime # # Function : getInvestedTime # Arguments : # Returns : iTime in millis # def getInvestedTime(self): return self.taskInstance["iTime"] # # Function : setDeadline # Arguments : deadline # Returns : # def setDeadline(self, deadline): self.taskInstance["deadline"] = deadline # # Function : getDeadline # Arguments : # Returns : list of deadlines for the task # def getDeadline(self): return self.taskInstance["deadline"] # # Function : setPriority # Arguments : priority integer as string # Returns : # def setPriority(self, priority): self.taskInstance["priority"] = priority # # Function : getPriority # Arguments : # Returns : priority as string # def getPriority(self): return self.taskInstance["priority"] # # Function : assignToUser # Arguments : (User Node) owner # Returns : a 'Path' object containing nodes and relationships used # def assignToUser(self, user): global REL_ASSIGNEDTO, LBL_USER if LBL_USER in user.get_labels(): return self.taskInstance.get_or_create_path(REL_ASSIGNEDTO, user) else: raise Exception("The Node Provided is not a User") # # Function : getAssignedUsers # Arguments : # Returns : a list of 'Node' Objects containing the User Nodes # def getAssignedUsers(self): global REL_ASSIGNEDTO users = list() for relationship in list(self.taskInstance.match_outgoing(REL_ASSIGNEDTO)): users.append(relationship.end_node) return users # # Function : setStatus # Arguments : (String) Status # Returns : # def setStatus(self, Status): self.taskInstance["status"] = Status # # Function : getStatus # Arguments : # Returns : Status of Task # def getStatus(self): return self.taskInstance["status"] # # Function : addSubTask # Arguments : (Task Node) subTask # Returns : a 'Path' object containing nodes and relationships used # def addSubTask(self, subtask): global REL_HASSUBTASK, LBL_TASK if subtask is not None and LBL_TASK in subtask.get_labels(): return self.taskInstance.get_or_create_path(REL_HASSUBTASK, subtask) else: raise Exception("Please supply a proper Task Node(Task in Labels") # # Function : getSubTask # Arguments : # Returns : a list of subtasks the current task has # def getSubTasks(self): global REL_HASSUBTASK subTasks = list() for relationship in list(self.taskInstance.match_outgoing(REL_HASSUBTASK)): subTasks.append(relationship.end_node) return subTasks # # Function : addFile # Arguments : File Node # Returns : a 'Path' object # def addFile(self, File): global LBL_FILE, REL_HASFILE if File is not None and LBL_FILE in File.get_labels(): return self.taskInstance.get_or_create_path(REL_HASFILE, File) else: raise Exception("Please supply a proper File Node (Node in Label)") # # Function : getFiles # Arguments : # Returns : a list of File Nodes # def getFiles(self): global REL_HASFILE files = list() for relationship in list(self.taskInstance.match_outgoing(REL_HASFILE)): files.append(relationship.end_node) return files # Clears the entire DB for dev purposes def clear(self): self.graph_db.clear()
apache-2.0
1,286,026,942,883,633,700
27.237931
145
0.605154
false
mfnch/pyrtist
pyrtist/lib2d/text_formatter.py
1
3338
# Copyright (C) 2017, 2020 Matteo Franchin # # This file is part of Pyrtist. # Pyrtist is free software: you can redistribute it and/or modify it # under the terms of the GNU Lesser General Public License as published # by the Free Software Foundation, either version 2.1 of the License, or # (at your option) any later version. # # Pyrtist is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Pyrtist. If not, see <http://www.gnu.org/licenses/>. try: from cStringIO import StringIO # Python 2 except: from io import StringIO # Python 3 __all__ = ('TextFormatter',) class TextFormatter(object): def __init__(self): self.max_stack_level = 10 self.out = StringIO() self.states = [] self.level = 0 self.string = None self.cursor = 0 def push_state(self, old_state, new_state): self.states.append(old_state) return new_state def pop_state(self): return self.states.pop() def pop_text(self): ret = self.out.getvalue() self.out = StringIO() self.out.truncate() return ret def run(self, string): self.cursor = 0 self.string = string fn = self._state_normal while self.cursor < len(self.string): c = self.string[self.cursor] fn = fn(c) self.cmd_draw() def _state_normal(self, c): self.cursor += 1 if c == '_': return self._state_wait_sub if c == '^': return self._state_wait_sup if c == '\\': return self.push_state(self._state_normal, self._state_literal) if c == '}': if self.level > 0: self.cmd_draw() self.cmd_restore() self.level -= 1 return self._state_normal elif c == '\n': self.cmd_draw() self.cmd_newline() else: self.out.write(c) return self._state_normal def _state_single(self, c): self.cursor += 1 if c == '\n': # Ignore newlines. return self._state_single if c == '\\': return self.push_state(self._state_single, self._state_literal) self.out.write(c) self.cmd_draw() self.cmd_restore() return self._state_normal def _state_literal(self, c): self.cursor += 1 self.out.write(c) return self.pop_state() def _state_wait_sup(self, c): return self._state_wait_sub(c, sup=True) def _state_wait_sub(self, c, sup=False): self.cursor += 1 if c in '_^': if (c == '^') == sup: self.out.write(c) return self._state_normal self.cmd_draw() self.cmd_save() if sup: self.cmd_superscript() else: self.cmd_subscript() if c != '{': self.cursor -= 1 return self._state_single self.level += 1 return self._state_normal
lgpl-2.1
-1,941,142,295,996,412,400
27.775862
76
0.547933
false
ganeshcmohan/mongoengine.0.8.7_v1
mongoengine/connection.py
1
5806
import pymongo from pymongo import MongoClient, MongoReplicaSetClient, uri_parser __all__ = ['ConnectionError', 'connect', 'register_connection', 'DEFAULT_CONNECTION_NAME'] DEFAULT_CONNECTION_NAME = 'default' class ConnectionError(Exception): pass _connection_settings = {} _connections = {} _dbs = {} def register_connection(alias, name, host=None, port=None, is_slave=False, read_preference=False, slaves=None, username=None, password=None, **kwargs): """Add a connection. :param alias: the name that will be used to refer to this connection throughout MongoEngine :param name: the name of the specific database to use :param host: the host name of the :program:`mongod` instance to connect to :param port: the port that the :program:`mongod` instance is running on :param is_slave: whether the connection can act as a slave ** Depreciated pymongo 2.0.1+ :param read_preference: The read preference for the collection ** Added pymongo 2.1 :param slaves: a list of aliases of slave connections; each of these must be a registered connection that has :attr:`is_slave` set to ``True`` :param username: username to authenticate with :param password: password to authenticate with :param kwargs: allow ad-hoc parameters to be passed into the pymongo driver """ global _connection_settings conn_settings = { 'name': name, 'host': host or 'localhost', 'port': port or 27017, 'is_slave': is_slave, 'slaves': slaves or [], 'username': username, 'password': password, #'read_preference': read_preference } # Handle uri style connections if "://" in conn_settings['host']: uri_dict = uri_parser.parse_uri(conn_settings['host']) conn_settings.update({ 'name': uri_dict.get('database') or name, 'username': uri_dict.get('username'), 'password': uri_dict.get('password'), #'read_preference': read_preference, }) if "replicaSet" in conn_settings['host']: conn_settings['replicaSet'] = True conn_settings.update(kwargs) _connection_settings[alias] = conn_settings def disconnect(alias=DEFAULT_CONNECTION_NAME): global _connections global _dbs if alias in _connections: get_connection(alias=alias).disconnect() del _connections[alias] if alias in _dbs: del _dbs[alias] def get_connection(alias=DEFAULT_CONNECTION_NAME, reconnect=False): global _connections # Connect to the database if not already connected if reconnect: disconnect(alias) if alias not in _connections: if alias not in _connection_settings: msg = 'Connection with alias "%s" has not been defined' % alias if alias == DEFAULT_CONNECTION_NAME: msg = 'You have not defined a default connection' raise ConnectionError(msg) conn_settings = _connection_settings[alias].copy() if hasattr(pymongo, 'version_tuple'): # Support for 2.1+ conn_settings.pop('name', None) conn_settings.pop('slaves', None) conn_settings.pop('is_slave', None) conn_settings.pop('username', None) conn_settings.pop('password', None) else: # Get all the slave connections if 'slaves' in conn_settings: slaves = [] for slave_alias in conn_settings['slaves']: slaves.append(get_connection(slave_alias)) conn_settings['slaves'] = slaves conn_settings.pop('read_preference', None) connection_class = MongoClient if 'replicaSet' in conn_settings: conn_settings['hosts_or_uri'] = conn_settings.pop('host', None) # Discard port since it can't be used on MongoReplicaSetClient conn_settings.pop('port', None) # Discard replicaSet if not base string if not isinstance(conn_settings['replicaSet'], basestring): conn_settings.pop('replicaSet', None) connection_class = MongoReplicaSetClient try: _connections[alias] = connection_class(**conn_settings) except Exception, e: raise ConnectionError("Cannot connect to database %s :\n%s" % (alias, e)) return _connections[alias] def get_db(alias=DEFAULT_CONNECTION_NAME, reconnect=False): global _dbs if reconnect: disconnect(alias) if alias not in _dbs: conn = get_connection(alias) conn_settings = _connection_settings[alias] db = conn[conn_settings['name']] # Authenticate if necessary if conn_settings['username'] and conn_settings['password']: db.authenticate(conn_settings['username'], conn_settings['password']) _dbs[alias] = db return _dbs[alias] def connect(db, alias=DEFAULT_CONNECTION_NAME, **kwargs): """Connect to the database specified by the 'db' argument. Connection settings may be provided here as well if the database is not running on the default port on localhost. If authentication is needed, provide username and password arguments as well. Multiple databases are supported by using aliases. Provide a separate `alias` to connect to a different instance of :program:`mongod`. .. versionchanged:: 0.6 - added multiple database support. """ global _connections if alias not in _connections: register_connection(alias, db, **kwargs) return get_connection(alias) # Support old naming convention _get_connection = get_connection _get_db = get_db
mit
8,341,579,631,674,937,000
33.975904
85
0.631244
false
tornado-utils/tornado-restless
tornado_restless/handler.py
1
28297
#!/usr/bin/python # -*- encoding: utf-8 -*- """ Tornado Restless BaseHandler Handles all registered blueprints, you may override this class and use the modification via create_api_blueprint(handler_class=...) """ import inspect from json import loads import logging from math import ceil from traceback import print_exception from urllib.parse import parse_qs import sys import itertools from sqlalchemy import inspect as sqinspect from sqlalchemy.exc import SQLAlchemyError from sqlalchemy.orm.exc import NoResultFound, UnmappedInstanceError, MultipleResultsFound from sqlalchemy.util import memoized_instancemethod, memoized_property from tornado.web import RequestHandler, HTTPError from .convert import to_dict, to_filter from .errors import IllegalArgumentError, MethodNotAllowedError, ProcessingException from .wrapper import SessionedModelWrapper __author__ = 'Martin Martimeo <[email protected]>' __date__ = '26.04.13 - 22:09' class BaseHandler(RequestHandler): """ Basic Blueprint for a sqlalchemy model Subclass of :class:`tornado.web.RequestHandler` that handles web requests. Overwrite :func:`get() <get>` / :func:`post() <post>` / :func:`put() <put>` / :func:`patch() <patch>` / :func:`delete() <delete>` if you want complete customize handling of the methods. Note that the default implementation of this function check for the allowness and then call depending on the instance_id parameter the associated _single / _many method, so you probably want to call super() If you just want to customize the handling of the methods overwrite method_single or method_many. If you want completly disable a method overwrite the SUPPORTED_METHODS constant """ ID_SEPARATOR = "," SUPPORTED_METHODS = ['GET', 'POST', 'PUT', 'PATCH', 'DELETE'] # noinspection PyMethodOverriding def initialize(self, model, manager, methods: set, preprocessor: dict, postprocessor: dict, allow_patch_many: bool, allow_method_override: bool, validation_exceptions, exclude_queries: bool, exclude_hybrids: bool, include_columns: list, exclude_columns: list, results_per_page: int, max_results_per_page: int): """ Init of the handler, derives arguments from api create_api_blueprint :param model: The sqlalchemy model :param manager: The tornado_restless Api Manager :param methods: Allowed methods for this model :param preprocessor: A dictionary of preprocessor functions :param postprocessor: A dictionary of postprocessor functions :param allow_patch_many: Allow PATCH with multiple datasets :param allow_method_override: Support X-HTTP-Method-Override Header :param validation_exceptions: :param exclude_queries: Don't execude dynamic queries (like from associations or lazy relations) :param exclude_hybrids: When exclude_queries is True and exclude_hybrids is False, hybrids are still included. :param include_columns: Whitelist of columns to be included :param exclude_columns: Blacklist of columns to be excluded :param results_per_page: The default value of how many results are returned per request :param max_results_per_page: The hard upper limit of resutest per page :reqheader X-HTTP-Method-Override: If allow_method_override is True, this header overwrites the request method """ # Override Method if Header provided if allow_method_override and 'X-HTTP-Method-Override' in self.request.headers: self.request.method = self.request.headers['X-HTTP-Method-Override'] super(BaseHandler, self).initialize() self.model = SessionedModelWrapper(model, manager.session_maker()) self.pk_length = len(sqinspect(model).primary_key) self.methods = [method.lower() for method in methods] self.allow_patch_many = allow_patch_many self.validation_exceptions = validation_exceptions self.preprocessor = preprocessor self.postprocessor = postprocessor self.results_per_page = results_per_page self.max_results_per_page = max_results_per_page self.include = self.parse_columns(include_columns) self.exclude = self.parse_columns(exclude_columns) self.to_dict_options = {'execute_queries': not exclude_queries, 'execute_hybrids': not exclude_hybrids} def prepare(self): """ Prepare the request """ self._call_preprocessor() def on_finish(self): """ Finish the request """ self._call_postprocessor() def parse_columns(self, strings: list) -> dict: """ Parse a list of column names (name1, name2, relation.name1, ...) :param strings: List of Column Names :return: """ columns = {} # Strings if strings is None: return None # Parse for column in [column.split(".", 1) for column in strings]: if len(column) == 1: columns[column[0]] = True else: columns.setdefault(column[0], []).append(column[1]) # Now parse relations for (key, item) in columns.items(): if isinstance(item, list): columns[key] = itertools.chain.from_iterable(self.parse_columns(strings) for strings in item) # Return return columns def get_filters(self): """ Returns a list of filters made by the query argument :query filters: list of filters :query order_by: list of orderings """ # Get all provided filters argument_filters = self.get_query_argument("filters", []) # Get all provided orders argument_orders = self.get_query_argument("order_by", []) return to_filter(self.model.model, argument_filters, argument_orders) def write_error(self, status_code: int, **kwargs): """ Encodes any exceptions thrown to json SQLAlchemyError will be encoded as 400 / SQLAlchemy: Bad Request Errors from the restless api as 400 / Restless: Bad Arguments ProcessingException will be encoded with status code / ProcessingException: Stopped Processing Any other exceptions will occur as an 500 exception :param status_code: The Status Code in Response :param kwargs: Additional Parameters """ if 'exc_info' in kwargs: exc_type, exc_value = kwargs['exc_info'][:2] if status_code >= 300: print_exception(*kwargs['exc_info']) if issubclass(exc_type, UnmappedInstanceError): self.set_status(400, reason='SQLAlchemy: Unmapped Instance') self.finish(dict(type=exc_type.__module__ + "." + exc_type.__name__, message="%s" % exc_value)) elif issubclass(exc_type, SQLAlchemyError): if issubclass(exc_type, NoResultFound): status = 404 reason = message = 'No result found' elif issubclass(exc_type, MultipleResultsFound): status = 400 reason = 'SQLAlchemy: Bad Request' message = 'Multiple results found' else: status = 400 reason = 'SQLAlchemy: Bad Request' message = "%s" % exc_value self.set_status(status, reason=reason) self.finish(dict(type=exc_type.__module__ + "." + exc_type.__name__, message=message)) elif issubclass(exc_type, IllegalArgumentError): self.set_status(400, reason='Restless: Bad Arguments') self.finish(dict(type=exc_type.__module__ + "." + exc_type.__name__, message="%s" % exc_value)) elif issubclass(exc_type, ProcessingException): self.set_status(status_code, reason='ProcessingException: %s' % (exc_value.reason or "Stopped Processing")) self.finish(dict(type=exc_type.__module__ + "." + exc_type.__name__, message="%s" % exc_value)) elif issubclass(exc_type, HTTPError) and exc_value.reason: self.set_status(status_code, reason=exc_value.reason) self.finish(dict(type=exc_type.__module__ + "." + exc_type.__name__, message="%s" % exc_value, **exc_value.__dict__)) else: super().write_error(status_code, **kwargs) else: super().write_error(status_code, **kwargs) def patch(self, instance_id: str=None): """ PATCH (update instance) request :param instance_id: query argument of request :type instance_id: comma seperated string list :statuscode 403: PATCH MANY disallowed :statuscode 405: PATCH disallowed """ if not 'patch' in self.methods: raise MethodNotAllowedError(self.request.method) self._call_preprocessor(search_params=self.search_params) if instance_id is None: if self.allow_patch_many: result = self.patch_many() else: raise MethodNotAllowedError(self.request.method, status_code=403) else: result = self.patch_single(self.parse_pk(instance_id)) self._call_postprocessor(result=result) self.finish(result) def patch_many(self) -> dict: """ Patch many instances :statuscode 201: instances successfull modified :query limit: limit the count of modified instances :query single: If true sqlalchemy will raise an error if zero or more than one instances would be modified """ # Flush self.model.session.flush() # Get values values = self.get_argument_values() # Filters filters = self.get_filters() # Limit limit = self.get_query_argument("limit", None) # Call Preprocessor self._call_preprocessor(filters=filters, data=values) # Modify Instances if self.get_query_argument("single", False): instances = [self.model.one(filters=filters)] for instance in instances: for (key, value) in values.items(): logging.debug("%s => %s" % (key, value)) setattr(instance, key, value) num = 1 else: num = self.model.update(values, limit=limit, filters=filters) # Commit self.model.session.commit() # Result self.set_status(201, "Patched") return {'num_modified': num} def patch_single(self, instance_id: list) -> dict: """ Patch one instance :param instance_id: query argument of request :type instance_id: list of primary keys :statuscode 201: instance successfull modified :statuscode 404: Error """ try: with self.model.session.begin_nested(): values = self.get_argument_values() # Call Preprocessor self._call_preprocessor(instance_id=instance_id, data=values) # Get Instance instance = self.model.get(*instance_id) # Set Values for (key, value) in values.items(): self.logger.debug("%r.%s => %s" % (instance, key, value)) setattr(instance, key, value) # Flush try: self.model.session.flush() except SQLAlchemyError as ex: logging.exception(ex) self.model.session.rollback() self.send_error(status_code=400, exc_info=sys.exc_info()) return # Refresh self.model.session.refresh(instance) # Set Status self.set_status(201, "Patched") # To Dict return self.to_dict(instance) except SQLAlchemyError as ex: logging.exception(ex) self.send_error(status_code=400, exc_info=sys.exc_info()) finally: # Commit self.model.session.commit() def delete(self, instance_id: str=None): """ DELETE (delete instance) request :param instance_id: query argument of request :type instance_id: comma seperated string list :statuscode 403: DELETE MANY disallowed :statuscode 405: DELETE disallowed """ if not 'delete' in self.methods: raise MethodNotAllowedError(self.request.method) # Call Preprocessor self._call_preprocessor(search_params=self.search_params) if instance_id is None: if self.allow_patch_many: result = self.delete_many() else: raise MethodNotAllowedError(self.request.method, status_code=403) else: result = self.delete_single(self.parse_pk(instance_id)) self._call_postprocessor(result=result) self.finish(result) def delete_many(self) -> dict: """ Remove many instances :statuscode 200: instances successfull removed :query limit: limit the count of deleted instances :query single: If true sqlalchemy will raise an error if zero or more than one instances would be deleted """ # Flush self.model.session.flush() # Filters filters = self.get_filters() # Limit limit = self.get_query_argument("limit", None) # Call Preprocessor self._call_preprocessor(filters=filters) # Modify Instances if self.get_query_argument("single", False): instance = self.model.one(filters=filters) self.model.session.delete(instance) self.model.session.commit() num = 1 else: num = self.model.delete(limit=limit, filters=filters) # Commit self.model.session.commit() # Result self.set_status(200, "Removed") return {'num_removed': num} def delete_single(self, instance_id: list) -> dict: """ Get one instance :param instance_id: query argument of request :type instance_id: list of primary keys :statuscode 204: instance successfull removed """ # Call Preprocessor self._call_preprocessor(instance_id=instance_id) # Get Instance instance = self.model.get(*instance_id) # Trigger deletion self.model.session.delete(instance) self.model.session.commit() # Status self.set_status(204, "Instance removed") return {} def put(self, instance_id: str=None): """ PUT (update instance) request :param instance_id: query argument of request :type instance_id: comma seperated string list :statuscode 403: PUT MANY disallowed :statuscode 404: Error :statuscode 405: PUT disallowed """ if not 'put' in self.methods: raise MethodNotAllowedError(self.request.method) # Call Preprocessor self._call_preprocessor(search_params=self.search_params) if instance_id is None: if self.allow_patch_many: result = self.put_many() else: raise MethodNotAllowedError(self.request.method, status_code=403) else: result = self.put_single(self.parse_pk(instance_id)) self._call_postprocessor(result=result) self.finish(result) put_many = patch_many put_single = patch_single def post(self, instance_id: str=None): """ POST (new input) request :param instance_id: (ignored) :statuscode 204: instance successfull created :statuscode 404: Error :statuscode 405: POST disallowed """ if not 'post' in self.methods: raise MethodNotAllowedError(self.request.method) # Call Preprocessor self._call_preprocessor(search_params=self.search_params) result = self.post_single() self._call_postprocessor(result=result) self.finish(result) def post_single(self): """ Post one instance """ try: values = self.get_argument_values() # Call Preprocessor self._call_preprocessor(data=values) # Create Instance instance = self.model(**values) # Flush self.model.session.commit() # Refresh self.model.session.refresh(instance) # Set Status self.set_status(201, "Created") # To Dict return self.to_dict(instance) except SQLAlchemyError: self.send_error(status_code=400, exc_info=sys.exc_info()) self.model.session.rollback() finally: # Commit self.model.session.commit() @memoized_instancemethod def get_content_encoding(self) -> str: """ Get the encoding the client sends us for encoding request.body correctly :reqheader Content-Type: Provide a charset in addition for decoding arguments. """ content_type_args = {k.strip(): v for k, v in parse_qs(self.request.headers['Content-Type']).items()} if 'charset' in content_type_args and content_type_args['charset']: return content_type_args['charset'][0] else: return 'latin1' @memoized_instancemethod def get_body_arguments(self) -> dict: """ Get arguments encode as json body :statuscode 415: Content-Type mismatch :reqheader Content-Type: application/x-www-form-urlencoded or application/json """ self.logger.debug(self.request.body) content_type = self.request.headers.get('Content-Type') if 'www-form-urlencoded' in content_type: payload = self.request.arguments for key, value in payload.items(): if len(value) == 0: payload[key] = None elif len(value) == 1: payload[key] = str(value[0], encoding=self.get_content_encoding()) else: payload[key] = [str(value, encoding=self.get_content_encoding()) for value in value] return payload elif 'application/json' in content_type: return loads(str(self.request.body, encoding=self.get_content_encoding())) else: raise HTTPError(415, content_type=content_type) def get_body_argument(self, name: str, default=RequestHandler._ARG_DEFAULT): """ Get an argument named key from json encoded body :param name: Name of argument :param default: Default value, if not provided HTTPError 404 is raised :return: :statuscode 404: Missing Argument """ arguments = self.get_body_arguments() if name in arguments: return arguments[name] elif default is RequestHandler._ARG_DEFAULT: raise HTTPError(400, "Missing argument %s" % name) else: return default @property def search_params(self) -> dict: """ The 'q' Dictionary """ try: return self._search_params except AttributeError: self._search_params = loads(self.get_argument("q", default="{}")) return self._search_params def get_query_argument(self, name: str, default=RequestHandler._ARG_DEFAULT): """ Get an argument named key from json encoded body :param name: :param default: :return: :raise: 400 Missing Argument :query q: The query argument """ if name in self.search_params: return self.search_params[name] elif default is RequestHandler._ARG_DEFAULT: raise HTTPError(400, "Missing argument %s" % name) else: return default def get_argument(self, name: str, *args, **kwargs): """ On PUT/PATCH many request parameter may be located in body instead of query :param name: Name of argument :param args: Additional position arguments @see tornado.web.RequestHandler.get_argument :param kwargs: Additional keyword arguments @see tornado.web.RequestHandler.get_argument """ try: return super().get_argument(name, *args, **kwargs) except HTTPError: if name == "q" and self.request.method in ['PUT', 'PATCH']: return self.get_body_argument(name, *args, **kwargs) else: raise def get_argument_values(self): """ Get all values provided via arguments :query q: (ignored) """ # Include Columns if self.include is not None: values = {k: self.get_body_argument(k) for k in self.include} else: values = {k: v for k, v in self.get_body_arguments().items()} # Exclude "q" if "q" in values: del values["q"] # Exclude Columns if self.exclude is not None: for column in list(self.exclude): if column in values: del values[column] # Silently Ignore proxies for proxy in self.model.proxies: if proxy.key in values: self.logger.debug("Skipping proxy: %s" % proxy.key) del values[proxy.key] # Silently Ignore hybrids for hybrid in self.model.hybrids: if hybrid.key in values: self.logger.debug("Skipping hybrid: %s" % hybrid.key) del values[hybrid.key] # Handle Relations extra values_relations = {} for relation_key, relation in self.model.relations.items(): if relation_key in values: values_relations[relation_key] = values[relation_key] del values[relation_key] # Check Columns #for column in values: # if not column in self.model.column_names: # raise IllegalArgumentError("Column '%s' not defined for model %s" % (column, self.model.model)) return values def get(self, instance_id: str=None): """ GET request :param instance_id: query argument of request :type instance_id: comma seperated string list :statuscode 405: GET disallowed """ if not 'get' in self.methods: raise MethodNotAllowedError(self.request.method) # Call Preprocessor self._call_preprocessor(search_params=self.search_params) if instance_id is None: result = self.get_many() else: result = self.get_single(self.parse_pk(instance_id)) self._call_postprocessor(result=result) self.finish(result) def get_single(self, instance_id: list) -> dict: """ Get one instance :param instance_id: query argument of request :type instance_id: list of primary keys """ # Call Preprocessor self._call_preprocessor(instance_id=instance_id) # Get Instance instance = self.model.get(*instance_id) # To Dict return self.to_dict(instance) def get_many(self) -> dict: """ Get all instances Note that it is possible to provide offset and page as argument then it will return instances of the nth page and skip offset items :statuscode 400: if results_per_page > max_results_per_page or offset < 0 :query results_per_page: Overwrite the returned results_per_page :query offset: Skip offset instances :query page: Return nth page :query limit: limit the count of modified instances :query single: If true sqlalchemy will raise an error if zero or more than one instances would be deleted """ # All search params search_params = {'single': self.get_query_argument("single", False), 'results_per_page': int(self.get_argument("results_per_page", self.results_per_page)), 'offset': int(self.get_query_argument("offset", 0))} # Results per Page Check if search_params['results_per_page'] > self.max_results_per_page: raise IllegalArgumentError("request.results_per_page > application.max_results_per_page") # Offset & Page page = int(self.get_argument("page", '1')) - 1 search_params['offset'] += page * search_params['results_per_page'] if search_params['offset'] < 0: raise IllegalArgumentError("request.offset < 0") # Limit search_params['limit'] = self.get_query_argument("limit", search_params['results_per_page'] or None) # Filters filters = self.get_filters() # Call Preprocessor self._call_preprocessor(filters=filters, search_params=search_params) # Num Results num_results = self.model.count(filters=filters) if search_params['results_per_page']: total_pages = ceil(num_results / search_params['results_per_page']) else: total_pages = 1 # Get Instances if search_params['single']: instance = self.model.one(offset=search_params['offset'], filters=filters) return self.to_dict(instance) else: instances = self.model.all(offset=search_params['offset'], limit=search_params['limit'], filters=filters) return {'num_results': num_results, "total_pages": total_pages, "page": page + 1, "objects": self.to_dict(instances)} def _call_preprocessor(self, *args, **kwargs): """ Calls a preprocessor with args and kwargs """ func_name = inspect.stack()[1][3] if func_name in self.preprocessor: for func in self.preprocessor[func_name]: func(*args, model=self.model, handler=self, **kwargs) def _call_postprocessor(self, *args, **kwargs): """ Calls a postprocessor with args and kwargs """ func_name = inspect.stack()[1][3] if func_name in self.postprocessor: for func in self.postprocessor[func_name]: func(*args, model=self.model, handler=self, **kwargs) @memoized_property def logger(self): """ Tornado Restless Logger """ return logging.getLogger('tornado.restless') def to_dict(self, instance): """ Wrapper to convert.to_dict with arguments from blueprint init :param instance: Instance to be translated """ return to_dict(instance, include=self.include, exclude=self.exclude, options=self.to_dict_options) def parse_pk(self, instance_id): return instance_id.split(self.ID_SEPARATOR, self.pk_length - 1)
bsd-3-clause
8,033,356,386,240,784,000
33.805658
118
0.57628
false
colour-science/colour
colour/phenomena/tests/test_rayleigh.py
1
38091
# -*- coding: utf-8 -*- """ Defines the unit tests for the :mod:`colour.phenomena.rayleigh` module. """ import numpy as np import unittest from itertools import permutations from colour.phenomena.rayleigh import ( air_refraction_index_Penndorf1957, air_refraction_index_Edlen1966, air_refraction_index_Peck1972, air_refraction_index_Bodhaine1999, N2_depolarisation, O2_depolarisation, F_air_Penndorf1957, F_air_Young1981, F_air_Bates1984, F_air_Bodhaine1999, molecular_density, mean_molecular_weights, gravity_List1968) from colour.phenomena import (scattering_cross_section, rayleigh_optical_depth, sd_rayleigh_scattering) from colour.utilities import ignore_numpy_errors __author__ = 'Colour Developers' __copyright__ = 'Copyright (C) 2013-2021 - Colour Developers' __license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause' __maintainer__ = 'Colour Developers' __email__ = '[email protected]' __status__ = 'Production' __all__ = [ 'DATA_SD_RAYLEIGH_SCATTERING', 'TestAirRefractionIndexPenndorf1957', 'TestAirRefractionIndexEdlen1966', 'TestAirRefractionIndexPeck1972', 'TestAirRefractionIndexBodhaine1999', 'TestN2Depolarisation', 'TestO2Depolarisation', 'TestF_airPenndorf1957', 'TestF_airYoung1981', 'TestF_airBates1984', 'TestF_airBodhaine1999', 'TestMolecularDensity', 'TestMeanMolecularWeights', 'TestGravityList1968', 'TestScatteringCrossSection', 'TestRayleighOpticalDepth', 'TestSdRayleighScattering' ] DATA_SD_RAYLEIGH_SCATTERING = ( 0.59910134, 0.59217069, 0.58534101, 0.57861051, 0.57197745, 0.56544013, 0.55899687, 0.55264605, 0.54638605, 0.54021531, 0.53413228, 0.52813547, 0.52222340, 0.51639461, 0.51064769, 0.50498125, 0.49939393, 0.49388440, 0.48845134, 0.48309347, 0.47780954, 0.47259832, 0.46745859, 0.46238917, 0.45738890, 0.45245664, 0.44759127, 0.44279170, 0.43805685, 0.43338567, 0.42877712, 0.42423020, 0.41974390, 0.41531726, 0.41094930, 0.40663910, 0.40238573, 0.39818829, 0.39404589, 0.38995765, 0.38592273, 0.38194029, 0.37800949, 0.37412954, 0.37029964, 0.36651900, 0.36278687, 0.35910250, 0.35546514, 0.35187408, 0.34832860, 0.34482800, 0.34137161, 0.33795874, 0.33458873, 0.33126094, 0.32797472, 0.32472946, 0.32152453, 0.31835934, 0.31523328, 0.31214577, 0.30909624, 0.30608413, 0.30310889, 0.30016996, 0.29726682, 0.29439893, 0.29156579, 0.28876688, 0.28600171, 0.28326979, 0.28057063, 0.27790377, 0.27526872, 0.27266505, 0.27009229, 0.26755001, 0.26503777, 0.26255513, 0.26010169, 0.25767703, 0.25528074, 0.25291242, 0.25057168, 0.24825813, 0.24597138, 0.24371107, 0.24147683, 0.23926830, 0.23708511, 0.23492692, 0.23279339, 0.23068417, 0.22859893, 0.22653734, 0.22449907, 0.22248382, 0.22049127, 0.21852111, 0.21657303, 0.21464674, 0.21274195, 0.21085836, 0.20899570, 0.20715367, 0.20533201, 0.20353045, 0.20174871, 0.19998654, 0.19824368, 0.19651987, 0.19481486, 0.19312840, 0.19146025, 0.18981018, 0.18817794, 0.18656331, 0.18496605, 0.18338593, 0.18182275, 0.18027628, 0.17874630, 0.17723261, 0.17573499, 0.17425324, 0.17278715, 0.17133653, 0.16990119, 0.16848091, 0.16707553, 0.16568484, 0.16430867, 0.16294683, 0.16159914, 0.16026543, 0.15894551, 0.15763923, 0.15634640, 0.15506686, 0.15380046, 0.15254701, 0.15130638, 0.15007839, 0.14886289, 0.14765974, 0.14646877, 0.14528984, 0.14412281, 0.14296753, 0.14182386, 0.14069165, 0.13957077, 0.13846109, 0.13736246, 0.13627476, 0.13519785, 0.13413161, 0.13307590, 0.13203061, 0.13099561, 0.12997078, 0.12895599, 0.12795114, 0.12695609, 0.12597074, 0.12499498, 0.12402869, 0.12307176, 0.12212407, 0.12118554, 0.12025604, 0.11933547, 0.11842374, 0.11752073, 0.11662635, 0.11574049, 0.11486307, 0.11399398, 0.11313313, 0.11228043, 0.11143577, 0.11059908, 0.10977026, 0.10894922, 0.10813587, 0.10733013, 0.10653191, 0.10574113, 0.10495770, 0.10418154, 0.10341257, 0.10265072, 0.10189589, 0.10114802, 0.10040702, 0.09967282, 0.09894535, 0.09822452, 0.09751028, 0.09680254, 0.09610123, 0.09540629, 0.09471764, 0.09403522, 0.09335896, 0.09268878, 0.09202464, 0.09136645, 0.09071416, 0.09006770, 0.08942701, 0.08879203, 0.08816270, 0.08753895, 0.08692073, 0.08630797, 0.08570063, 0.08509864, 0.08450194, 0.08391049, 0.08332421, 0.08274307, 0.08216700, 0.08159596, 0.08102988, 0.08046872, 0.07991243, 0.07936096, 0.07881425, 0.07827225, 0.07773493, 0.07720222, 0.07667408, 0.07615047, 0.07563133, 0.07511663, 0.07460631, 0.07410033, 0.07359865, 0.07310122, 0.07260800, 0.07211894, 0.07163402, 0.07115317, 0.07067636, 0.07020356, 0.06973471, 0.06926979, 0.06880874, 0.06835154, 0.06789814, 0.06744851, 0.06700260, 0.06656038, 0.06612182, 0.06568688, 0.06525551, 0.06482770, 0.06440339, 0.06398256, 0.06356518, 0.06315120, 0.06274060, 0.06233333, 0.06192938, 0.06152871, 0.06113128, 0.06073706, 0.06034603, 0.05995814, 0.05957338, 0.05919171, 0.05881310, 0.05843752, 0.05806494, 0.05769534, 0.05732868, 0.05696494, 0.05660408, 0.05624609, 0.05589093, 0.05553858, 0.05518901, 0.05484219, 0.05449810, 0.05415671, 0.05381800, 0.05348194, 0.05314851, 0.05281768, 0.05248942, 0.05216372, 0.05184055, 0.05151988, 0.05120170, 0.05088598, 0.05057269, 0.05026182, 0.04995333, 0.04964722, 0.04934346, 0.04904202, 0.04874288, 0.04844603, 0.04815144, 0.04785910, 0.04756897, 0.04728105, 0.04699530, 0.04671172, 0.04643028, 0.04615096, 0.04587374, 0.04559861, 0.04532554, 0.04505452, 0.04478553, 0.04451855, 0.04425355, 0.04399054, 0.04372947, 0.04347035, 0.04321315, 0.04295785, 0.04270444, 0.04245290, 0.04220321, 0.04195537, 0.04170934, 0.04146512, 0.04122268, 0.04098202, 0.04074312, 0.04050596, 0.04027053, 0.04003681, 0.03980479, 0.03957445, 0.03934578, 0.03911876, 0.03889338, 0.03866963, 0.03844748, 0.03822694, 0.03800797, 0.03779058, 0.03757474, 0.03736044, 0.03714767, 0.03693642, 0.03672667, 0.03651841, 0.03631163, 0.03610632, 0.03590245, 0.03570003, 0.03549903, 0.03529945, 0.03510128, 0.03490449, 0.03470909, 0.03451505, 0.03432237, 0.03413104, 0.03394104, 0.03375237, 0.03356500, 0.03337894, 0.03319417, 0.03301068, 0.03282846, 0.03264749, 0.03246778, 0.03228930, 0.03211204, 0.03193601, 0.03176118, 0.03158755, 0.03141511, 0.03124385, 0.03107375, 0.03090481, 0.03073702, 0.03057037, 0.03040485, 0.03024045, 0.03007717, 0.02991498, 0.02975389, 0.02959389, 0.02943496, 0.02927710, 0.02912030, 0.02896455, 0.02880984, 0.02865616, 0.02850351, 0.02835188, 0.02820126, 0.02805164, 0.02790301, 0.02775536, 0.02760869, 0.02746300, 0.02731826, 0.02717448, 0.02703164, 0.02688975, 0.02674878, 0.02660874, 0.02646962, 0.02633141, 0.02619410, 0.02605768, 0.02592215, 0.02578751, 0.02565374, 0.02552084, 0.02538880, 0.02525761) # yapf: disable class TestAirRefractionIndexPenndorf1957(unittest.TestCase): """ Defines :func:`colour.phenomena.rayleigh.\ air_refraction_index_Penndorf1957` definition unit tests methods. """ def test_air_refraction_index_Penndorf1957(self): """ Tests :func:`colour.phenomena.rayleigh.\ air_refraction_index_Penndorf1957` definition. """ self.assertAlmostEqual( air_refraction_index_Penndorf1957(0.360), 1.000285316795146, places=10) self.assertAlmostEqual( air_refraction_index_Penndorf1957(0.555), 1.000277729533864, places=10) self.assertAlmostEqual( air_refraction_index_Penndorf1957(0.830), 1.000274856640486, places=10) def test_n_dimensional_air_refraction_index_Penndorf1957(self): """ Tests :func:`colour.phenomena.rayleigh.\ air_refraction_index_Penndorf1957` definition n-dimensional arrays support. """ wl = 0.360 n = air_refraction_index_Penndorf1957(wl) wl = np.tile(wl, 6) n = np.tile(n, 6) np.testing.assert_almost_equal( air_refraction_index_Penndorf1957(wl), n, decimal=7) wl = np.reshape(wl, (2, 3)) n = np.reshape(n, (2, 3)) np.testing.assert_almost_equal( air_refraction_index_Penndorf1957(wl), n, decimal=7) wl = np.reshape(wl, (2, 3, 1)) n = np.reshape(n, (2, 3, 1)) np.testing.assert_almost_equal( air_refraction_index_Penndorf1957(wl), n, decimal=7) @ignore_numpy_errors def test_nan_air_refraction_index_Penndorf1957(self): """ Tests :func:`colour.phenomena.rayleigh.\ air_refraction_index_Penndorf1957` definition nan support. """ air_refraction_index_Penndorf1957( np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])) class TestAirRefractionIndexEdlen1966(unittest.TestCase): """ Defines :func:`colour.phenomena.rayleigh.air_refraction_index_Edlen1966` definition unit tests methods. """ def test_air_refraction_index_Edlen1966(self): """ Tests :func:`colour.phenomena.\ rayleigh.air_refraction_index_Edlen1966` definition. """ self.assertAlmostEqual( air_refraction_index_Edlen1966(0.360), 1.000285308809879, places=10) self.assertAlmostEqual( air_refraction_index_Edlen1966(0.555), 1.000277727690364, places=10) self.assertAlmostEqual( air_refraction_index_Edlen1966(0.830), 1.000274862218835, places=10) def test_n_dimensional_air_refraction_index_Edlen1966(self): """ Tests :func:`colour.phenomena.rayleigh.\ air_refraction_index_Edlen1966` definition n-dimensional arrays support. """ wl = 0.360 n = air_refraction_index_Edlen1966(wl) wl = np.tile(wl, 6) n = np.tile(n, 6) np.testing.assert_almost_equal( air_refraction_index_Edlen1966(wl), n, decimal=7) wl = np.reshape(wl, (2, 3)) n = np.reshape(n, (2, 3)) np.testing.assert_almost_equal( air_refraction_index_Edlen1966(wl), n, decimal=7) wl = np.reshape(wl, (2, 3, 1)) n = np.reshape(n, (2, 3, 1)) np.testing.assert_almost_equal( air_refraction_index_Edlen1966(wl), n, decimal=7) @ignore_numpy_errors def test_nan_air_refraction_index_Edlen1966(self): """ Tests :func:`colour.phenomena.rayleigh.\ air_refraction_index_Edlen1966` definition nan support. """ air_refraction_index_Edlen1966( np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])) class TestAirRefractionIndexPeck1972(unittest.TestCase): """ Defines :func:`colour.phenomena.rayleigh.air_refraction_index_Peck1972` definition unit tests methods. """ def test_air_refraction_index_Peck1972(self): """ Tests :func:`colour.phenomena.rayleigh.air_refraction_index_Peck1972` definition. """ self.assertAlmostEqual( air_refraction_index_Peck1972(0.360), 1.000285310285056, places=10) self.assertAlmostEqual( air_refraction_index_Peck1972(0.555), 1.000277726541484, places=10) self.assertAlmostEqual( air_refraction_index_Peck1972(0.830), 1.000274859144804, places=10) def test_n_dimensional_air_refraction_index_Peck1972(self): """ Tests :func:`colour.phenomena.rayleigh.air_refraction_index_Peck1972` definition n-dimensional arrays support. """ wl = 0.360 n = air_refraction_index_Peck1972(wl) wl = np.tile(wl, 6) n = np.tile(n, 6) np.testing.assert_almost_equal( air_refraction_index_Peck1972(wl), n, decimal=7) wl = np.reshape(wl, (2, 3)) n = np.reshape(n, (2, 3)) np.testing.assert_almost_equal( air_refraction_index_Peck1972(wl), n, decimal=7) wl = np.reshape(wl, (2, 3, 1)) n = np.reshape(n, (2, 3, 1)) np.testing.assert_almost_equal( air_refraction_index_Peck1972(wl), n, decimal=7) @ignore_numpy_errors def test_nan_air_refraction_index_Peck1972(self): """ Tests :func:`colour.phenomena.rayleigh.air_refraction_index_Peck1972` definition nan support. """ air_refraction_index_Peck1972( np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])) class TestAirRefractionIndexBodhaine1999(unittest.TestCase): """ Defines :func:`colour.phenomena.rayleigh.\ air_refraction_index_Bodhaine1999` definition unit tests methods. """ def test_air_refraction_index_Bodhaine1999(self): """ Tests :func:`colour.phenomena.rayleigh.\ air_refraction_index_Bodhaine1999` definition. """ self.assertAlmostEqual( air_refraction_index_Bodhaine1999(0.360), 1.000285310285056, places=10) self.assertAlmostEqual( air_refraction_index_Bodhaine1999(0.555), 1.000277726541484, places=10) self.assertAlmostEqual( air_refraction_index_Bodhaine1999(0.830), 1.000274859144804, places=10) self.assertAlmostEqual( air_refraction_index_Bodhaine1999(0.360, 0), 1.000285264064789, places=10) self.assertAlmostEqual( air_refraction_index_Bodhaine1999(0.555, 360), 1.000277735539824, places=10) self.assertAlmostEqual( air_refraction_index_Bodhaine1999(0.830, 620), 1.000274906640464, places=10) def test_n_dimensional_air_refraction_index_Bodhaine1999(self): """ Tests :func:`colour.phenomena.rayleigh.\ air_refraction_index_Bodhaine1999` definition n-dimensional arrays support. """ wl = 0.360 n = air_refraction_index_Bodhaine1999(wl) wl = np.tile(wl, 6) n = np.tile(n, 6) np.testing.assert_almost_equal( air_refraction_index_Bodhaine1999(wl), n, decimal=7) wl = np.reshape(wl, (2, 3)) n = np.reshape(n, (2, 3)) np.testing.assert_almost_equal( air_refraction_index_Bodhaine1999(wl), n, decimal=7) wl = np.reshape(wl, (2, 3, 1)) n = np.reshape(n, (2, 3, 1)) np.testing.assert_almost_equal( air_refraction_index_Bodhaine1999(wl), n, decimal=7) @ignore_numpy_errors def test_nan_air_refraction_index_Bodhaine1999(self): """ Tests :func:`colour.phenomena.rayleigh.\ air_refraction_index_Bodhaine1999` definition nan support. """ cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan] cases = set(permutations(cases * 3, r=1)) for case in cases: wavelength = case CO2_concentration = case air_refraction_index_Bodhaine1999(wavelength, CO2_concentration) class TestN2Depolarisation(unittest.TestCase): """ Defines :func:`colour.phenomena.rayleigh.N2_depolarisation` definition unit tests methods. """ def test_N2_depolarisation(self): """ Tests :func:`colour.phenomena.rayleigh.N2_depolarisation` definition. """ self.assertAlmostEqual( N2_depolarisation(0.360), 1.036445987654321, places=7) self.assertAlmostEqual( N2_depolarisation(0.555), 1.035029137245354, places=7) self.assertAlmostEqual( N2_depolarisation(0.830), 1.034460153868486, places=7) def test_n_dimensional_N2_depolarisation(self): """ Tests :func:`colour.phenomena.rayleigh.N2_depolarisation` definition n-dimensional arrays support. """ wl = 0.360 n = N2_depolarisation(wl) wl = np.tile(wl, 6) n = np.tile(n, 6) np.testing.assert_almost_equal(N2_depolarisation(wl), n, decimal=7) wl = np.reshape(wl, (2, 3)) n = np.reshape(n, (2, 3)) np.testing.assert_almost_equal(N2_depolarisation(wl), n, decimal=7) wl = np.reshape(wl, (2, 3, 1)) n = np.reshape(n, (2, 3, 1)) np.testing.assert_almost_equal(N2_depolarisation(wl), n, decimal=7) @ignore_numpy_errors def test_nan_N2_depolarisation(self): """ Tests :func:`colour.phenomena.rayleigh.N2_depolarisation` definition nan support. """ N2_depolarisation(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])) class TestO2Depolarisation(unittest.TestCase): """ Defines :func:`colour.phenomena.rayleigh.O2_depolarisation` definition unit tests methods. """ def test_O2_depolarisation(self): """ Tests :func:`colour.phenomena.rayleigh.O2_depolarisation` definition. """ self.assertAlmostEqual( O2_depolarisation(0.360), 1.115307746532541, places=7) self.assertAlmostEqual( O2_depolarisation(0.555), 1.102022536201071, places=7) self.assertAlmostEqual( O2_depolarisation(0.830), 1.098315561269013, places=7) def test_n_dimensional_O2_depolarisation(self): """ Tests :func:`colour.phenomena.rayleigh.O2_depolarisation` definition n-dimensional arrays support. """ wl = 0.360 n = O2_depolarisation(wl) wl = np.tile(wl, 6) n = np.tile(n, 6) np.testing.assert_almost_equal(O2_depolarisation(wl), n, decimal=7) wl = np.reshape(wl, (2, 3)) n = np.reshape(n, (2, 3)) np.testing.assert_almost_equal(O2_depolarisation(wl), n, decimal=7) wl = np.reshape(wl, (2, 3, 1)) n = np.reshape(n, (2, 3, 1)) np.testing.assert_almost_equal(O2_depolarisation(wl), n, decimal=7) @ignore_numpy_errors def test_nan_O2_depolarisation(self): """ Tests :func:`colour.phenomena.rayleigh.O2_depolarisation` definition nan support. """ O2_depolarisation(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])) class TestF_airPenndorf1957(unittest.TestCase): """ Defines :func:`colour.phenomena.rayleigh.F_air_Penndorf1957` definition unit tests methods. """ def test_F_air_Penndorf1957(self): """ Tests :func:`colour.phenomena.rayleigh.F_air_Penndorf1957` definition. """ self.assertEqual(F_air_Penndorf1957(0.360), 1.0608) def test_n_dimensional_F_air_Penndorf1957(self): """ Tests :func:`colour.phenomena.rayleigh.F_air_Penndorf1957` definition n-dimensional arrays support. """ wl = 0.360 n = F_air_Penndorf1957(wl) wl = np.tile(wl, 6) n = np.tile(n, 6) np.testing.assert_almost_equal(F_air_Penndorf1957(wl), n, decimal=7) wl = np.reshape(wl, (2, 3)) n = np.reshape(n, (2, 3)) np.testing.assert_almost_equal(F_air_Penndorf1957(wl), n, decimal=7) wl = np.reshape(wl, (2, 3, 1)) n = np.reshape(n, (2, 3, 1)) np.testing.assert_almost_equal(F_air_Penndorf1957(wl), n, decimal=7) @ignore_numpy_errors def test_nan_F_air_Penndorf1957(self): """ Tests :func:`colour.phenomena.rayleigh.F_air_Penndorf1957` definition nan support. """ F_air_Penndorf1957(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])) class TestF_airYoung1981(unittest.TestCase): """ Defines :func:`colour.phenomena.rayleigh.F_air_Young1981` definition unit tests methods. """ def test_F_air_Young1981(self): """ Tests :func:`colour.phenomena.rayleigh.F_air_Young1981` definition. """ self.assertEqual(F_air_Young1981(0.360), 1.0480) def test_n_dimensional_F_air_Young1981(self): """ Tests :func:`colour.phenomena.rayleigh.F_air_Young1981` definition n-dimensional arrays support. """ wl = 0.360 n = F_air_Young1981(wl) wl = np.tile(wl, 6) n = np.tile(n, 6) np.testing.assert_almost_equal(F_air_Young1981(wl), n, decimal=7) wl = np.reshape(wl, (2, 3)) n = np.reshape(n, (2, 3)) np.testing.assert_almost_equal(F_air_Young1981(wl), n, decimal=7) wl = np.reshape(wl, (2, 3, 1)) n = np.reshape(n, (2, 3, 1)) np.testing.assert_almost_equal(F_air_Young1981(wl), n, decimal=7) @ignore_numpy_errors def test_nan_F_air_Young1981(self): """ Tests :func:`colour.phenomena.rayleigh.F_air_Young1981` definition nan support. """ F_air_Young1981(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])) class TestF_airBates1984(unittest.TestCase): """ Defines :func:`colour.phenomena.rayleigh.F_air_Bates1984` definition unit tests methods. """ def test_F_air_Bates1984(self): """ Tests :func:`colour.phenomena.rayleigh.F_air_Bates1984` definition. """ self.assertAlmostEqual( F_air_Bates1984(0.360), 1.051997277711708, places=7) self.assertAlmostEqual( F_air_Bates1984(0.555), 1.048153579718658, places=7) self.assertAlmostEqual( F_air_Bates1984(0.830), 1.046947068600589, places=7) def test_n_dimensional_F_air_Bates1984(self): """ Tests :func:`colour.phenomena.rayleigh.F_air_Bates1984` definition n-dimensional arrays support. """ wl = 0.360 n = F_air_Bates1984(wl) wl = np.tile(wl, 6) n = np.tile(n, 6) np.testing.assert_almost_equal(F_air_Bates1984(wl), n, decimal=7) wl = np.reshape(wl, (2, 3)) n = np.reshape(n, (2, 3)) np.testing.assert_almost_equal(F_air_Bates1984(wl), n, decimal=7) wl = np.reshape(wl, (2, 3, 1)) n = np.reshape(n, (2, 3, 1)) np.testing.assert_almost_equal(F_air_Bates1984(wl), n, decimal=7) @ignore_numpy_errors def test_nan_F_air_Bates1984(self): """ Tests :func:`colour.phenomena.rayleigh.F_air_Bates1984` definition nan support. """ F_air_Bates1984(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])) class TestF_airBodhaine1999(unittest.TestCase): """ Defines :func:`colour.phenomena.rayleigh.F_air_Bodhaine1999` definition unit tests methods. """ def test_F_air_Bodhaine1999(self): """ Tests :func:`colour.phenomena.rayleigh.F_air_Bodhaine1999` definition. """ self.assertAlmostEqual( F_air_Bodhaine1999(0.360), 1.125664021159081, places=7) self.assertAlmostEqual( F_air_Bodhaine1999(0.555), 1.124691670240156, places=7) self.assertAlmostEqual( F_air_Bodhaine1999(0.830), 1.124386455783539, places=7) self.assertAlmostEqual( F_air_Bodhaine1999(0.360, 0), 1.052629792313939, places=7) self.assertAlmostEqual( F_air_Bodhaine1999(0.555, 360), 1.127993015096689, places=7) self.assertAlmostEqual( F_air_Bodhaine1999(0.830, 620), 1.13577082, places=7) def test_n_dimensional_F_air_Bodhaine1999(self): """ Tests :func:`colour.phenomena.rayleigh.F_air_Bodhaine1999` definition n-dimensional arrays support. """ wl = 0.360 n = F_air_Bodhaine1999(wl) wl = np.tile(wl, 6) n = np.tile(n, 6) np.testing.assert_almost_equal(F_air_Bodhaine1999(wl), n, decimal=7) wl = np.reshape(wl, (2, 3)) n = np.reshape(n, (2, 3)) np.testing.assert_almost_equal(F_air_Bodhaine1999(wl), n, decimal=7) wl = np.reshape(wl, (2, 3, 1)) n = np.reshape(n, (2, 3, 1)) np.testing.assert_almost_equal(F_air_Bodhaine1999(wl), n, decimal=7) @ignore_numpy_errors def test_nan_F_air_Bodhaine1999(self): """ Tests :func:`colour.phenomena.rayleigh.F_air_Bodhaine1999` definition nan support. """ cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan] cases = set(permutations(cases * 3, r=1)) for case in cases: wavelength = case CO2_concentration = case F_air_Bodhaine1999(wavelength, CO2_concentration) class TestMolecularDensity(unittest.TestCase): """ Defines :func:`colour.phenomena.rayleigh.molecular_density` definition unit tests methods. """ def test_molecular_density(self): """ Tests :func:`colour.phenomena.rayleigh.molecular_density` definition. """ self.assertAlmostEqual( molecular_density(200), 3.669449208173649e+19, delta=10000) self.assertAlmostEqual( molecular_density(300), 2.4462994721157665e+19, delta=10000) self.assertAlmostEqual( molecular_density(400), 1.834724604086825e+19, delta=10000) def test_n_dimensional_molecular_density(self): """ Tests :func:`colour.phenomena.rayleigh.molecular_density` definition n-dimensional arrays support. """ temperature = 200 N_s = molecular_density(temperature) temperature = np.tile(temperature, 6) N_s = np.tile(N_s, 6) np.testing.assert_almost_equal( molecular_density(temperature), N_s, decimal=7) temperature = np.reshape(temperature, (2, 3)) N_s = np.reshape(N_s, (2, 3)) np.testing.assert_almost_equal( molecular_density(temperature), N_s, decimal=7) temperature = np.reshape(temperature, (2, 3, 1)) N_s = np.reshape(N_s, (2, 3, 1)) np.testing.assert_almost_equal( molecular_density(temperature), N_s, decimal=7) @ignore_numpy_errors def test_nan_molecular_density(self): """ Tests :func:`colour.phenomena.rayleigh.molecular_density` definition nan support. """ molecular_density(np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])) class TestMeanMolecularWeights(unittest.TestCase): """ Defines :func:`colour.phenomena.rayleigh.mean_molecular_weights` definition unit tests methods. """ def test_mean_molecular_weights(self): """ Tests :func:`colour.phenomena.rayleigh.mean_molecular_weights` definition. """ self.assertAlmostEqual(mean_molecular_weights(0), 28.9595, places=7) self.assertAlmostEqual( mean_molecular_weights(360), 28.964920015999997, places=7) self.assertAlmostEqual( mean_molecular_weights(620), 28.968834471999998, places=7) def test_n_dimensional_mean_molecular_weights(self): """ Tests :func:`colour.phenomena.rayleigh.mean_molecular_weights` definition n-dimensional arrays support. """ CO2_c = 300 m_a = mean_molecular_weights(CO2_c) CO2_c = np.tile(CO2_c, 6) m_a = np.tile(m_a, 6) np.testing.assert_almost_equal( mean_molecular_weights(CO2_c), m_a, decimal=7) CO2_c = np.reshape(CO2_c, (2, 3)) m_a = np.reshape(m_a, (2, 3)) np.testing.assert_almost_equal( mean_molecular_weights(CO2_c), m_a, decimal=7) CO2_c = np.reshape(CO2_c, (2, 3, 1)) m_a = np.reshape(m_a, (2, 3, 1)) np.testing.assert_almost_equal( mean_molecular_weights(CO2_c), m_a, decimal=7) @ignore_numpy_errors def test_nan_mean_molecular_weights(self): """ Tests :func:`colour.phenomena.rayleigh.mean_molecular_weights` definition nan support. """ mean_molecular_weights( np.array([-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan])) class TestGravityList1968(unittest.TestCase): """ Defines :func:`colour.phenomena.rayleigh.gravity_List1968` definition unit tests methods. """ def test_gravity_List1968(self): """ Tests :func:`colour.phenomena.rayleigh.gravity_List1968` definition. """ self.assertAlmostEqual( gravity_List1968(0.0, 0.0), 978.03560706, places=7) self.assertAlmostEqual( gravity_List1968(45.0, 1500.0), 980.15334386, places=7) self.assertAlmostEqual( gravity_List1968(48.8567, 35.0), 980.95241784, places=7) def test_n_dimensional_gravity_List1968(self): """ Tests :func:`colour.phenomena.rayleigh.gravity_List1968` definition n-dimensional arrays support. """ g = 978.03560706 np.testing.assert_almost_equal(gravity_List1968(), g, decimal=7) g = np.tile(g, 6) np.testing.assert_almost_equal(gravity_List1968(), g, decimal=7) g = np.reshape(g, (2, 3)) np.testing.assert_almost_equal(gravity_List1968(), g, decimal=7) g = np.reshape(g, (2, 3, 1)) np.testing.assert_almost_equal(gravity_List1968(), g, decimal=7) @ignore_numpy_errors def test_nan_gravity_List1968(self): """ Tests :func:`colour.phenomena.rayleigh.gravity_List1968` definition nan support. """ cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan] cases = set(permutations(cases * 3, r=1)) for case in cases: latitude = case altitude = case gravity_List1968(latitude, altitude) class TestScatteringCrossSection(unittest.TestCase): """ Defines :func:`colour.phenomena.rayleigh.scattering_cross_section` definition unit tests methods. """ def test_scattering_cross_section(self): """ Tests :func:`colour.phenomena.rayleigh.scattering_cross_section` definition. """ self.assertAlmostEqual( scattering_cross_section(360 * 10e-8), 2.781289234802031e-26, places=32) self.assertAlmostEqual( scattering_cross_section(555 * 10e-8), 4.661330902337604e-27, places=32) self.assertAlmostEqual( scattering_cross_section(830 * 10e-8), 9.125100352218880e-28, places=32) self.assertAlmostEqual( scattering_cross_section(555 * 10e-8, 0), 4.346543336839102e-27, places=32) self.assertAlmostEqual( scattering_cross_section(555 * 10e-8, 360), 4.675013461928133e-27, places=32) self.assertAlmostEqual( scattering_cross_section(555 * 10e-8, 620), 4.707951639592975e-27, places=32) self.assertAlmostEqual( scattering_cross_section(555 * 10e-8, temperature=200), 2.245601437154005e-27, places=32) self.assertAlmostEqual( scattering_cross_section(555 * 10e-8, temperature=300), 5.052603233596510e-27, places=32) self.assertAlmostEqual( scattering_cross_section(555 * 10e-8, temperature=400), 8.982405748616020e-27, places=32) def test_n_dimensional_scattering_cross_section(self): """ Tests :func:`colour.phenomena.rayleigh.scattering_cross_section` definition n-dimensional arrays support. """ wl = 360 * 10e-8 sigma = scattering_cross_section(wl) sigma = np.tile(sigma, 6) np.testing.assert_almost_equal( scattering_cross_section(wl), sigma, decimal=32) sigma = np.reshape(sigma, (2, 3)) np.testing.assert_almost_equal( scattering_cross_section(wl), sigma, decimal=32) sigma = np.reshape(sigma, (2, 3, 1)) np.testing.assert_almost_equal( scattering_cross_section(wl), sigma, decimal=32) @ignore_numpy_errors def test_nan_scattering_cross_section(self): """ Tests :func:`colour.phenomena.rayleigh.scattering_cross_section` definition nan support. """ cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan] cases = set(permutations(cases * 3, r=1)) for case in cases: wavelength = case CO2_concentration = case temperature = case scattering_cross_section(wavelength, CO2_concentration, temperature) class TestRayleighOpticalDepth(unittest.TestCase): """ Defines :func:`colour.phenomena.rayleigh.rayleigh_optical_depth` definition unit tests methods. """ def test_rayleigh_optical_depth(self): """ Tests :func:`colour.phenomena.rayleigh.rayleigh_optical_depth` definition. """ self.assertAlmostEqual( rayleigh_optical_depth(360 * 10e-8), 0.599101336848028, places=7) self.assertAlmostEqual( rayleigh_optical_depth(555 * 10e-8), 0.100407017728965, places=7) self.assertAlmostEqual( rayleigh_optical_depth(830 * 10e-8), 0.019655847912114, places=7) self.assertAlmostEqual( rayleigh_optical_depth(555 * 10e-8, 0), 0.093640964348049, places=7) self.assertAlmostEqual( rayleigh_optical_depth(555 * 10e-8, 360), 0.100698605176897, places=7) self.assertAlmostEqual( rayleigh_optical_depth(555 * 10e-8, 620), 0.101394382260863, places=7) self.assertAlmostEqual( rayleigh_optical_depth(555 * 10e-8, temperature=200), 0.048371194415621, places=7) self.assertAlmostEqual( rayleigh_optical_depth(555 * 10e-8, temperature=300), 0.108835187435146, places=7) self.assertAlmostEqual( rayleigh_optical_depth(555 * 10e-8, temperature=400), 0.193484777662482, places=7) self.assertAlmostEqual( rayleigh_optical_depth(555 * 10e-8, pressure=101325), 0.100407017728965, places=7) self.assertAlmostEqual( rayleigh_optical_depth(555 * 10e-8, pressure=100325), 0.099416077509583, places=7) self.assertAlmostEqual( rayleigh_optical_depth(555 * 10e-8, pressure=99325), 0.098425137290200, places=7) self.assertAlmostEqual( rayleigh_optical_depth(555 * 10e-8, latitude=0, altitude=0), 0.100407017728965, places=10) self.assertAlmostEqual( rayleigh_optical_depth(555 * 10e-8, latitude=45, altitude=1500), 0.100190076534634, places=10) self.assertAlmostEqual( rayleigh_optical_depth(555 * 10e-8, latitude=48.8567, altitude=35), 0.100108462705423, places=10) def test_n_dimensional_rayleigh_optical_depth(self): """ Tests :func:`colour.phenomena.rayleigh.rayleigh_optical_depth` definition n-dimensional arrays support. """ wl = 360 * 10e-8 T_R = rayleigh_optical_depth(wl) T_R = np.tile(T_R, 6) np.testing.assert_almost_equal( rayleigh_optical_depth(wl), T_R, decimal=7) T_R = np.reshape(T_R, (2, 3)) np.testing.assert_almost_equal( rayleigh_optical_depth(wl), T_R, decimal=7) T_R = np.reshape(T_R, (2, 3, 1)) np.testing.assert_almost_equal( rayleigh_optical_depth(wl), T_R, decimal=7) @ignore_numpy_errors def test_nan_rayleigh_optical_depth(self): """ Tests :func:`colour.phenomena.rayleigh.rayleigh_optical_depth` definition nan support. """ cases = [-1.0, 0.0, 1.0, -np.inf, np.inf, np.nan] cases = set(permutations(cases * 3, r=1)) for case in cases: wavelength = case CO2_concentration = case temperature = case latitude = case altitude = case rayleigh_optical_depth(wavelength, CO2_concentration, temperature, latitude, altitude) class TestSdRayleighScattering(unittest.TestCase): """ Defines :func:`colour.phenomena.rayleigh.sd_rayleigh_scattering` definition unit tests methods. """ def test_sd_rayleigh_scattering(self): """ Tests :func:`colour.phenomena.rayleigh.sd_rayleigh_scattering` definition. """ np.testing.assert_almost_equal( sd_rayleigh_scattering().values, DATA_SD_RAYLEIGH_SCATTERING, decimal=7) if __name__ == '__main__': unittest.main()
bsd-3-clause
2,041,704,542,507,327,500
25.67437
79
0.593894
false
the-zebulan/CodeWars
tests/beta_tests/test_string_cleaning.py
1
1709
import unittest from katas.beta.string_cleaning import string_clean class StringCleaningTestCase(unittest.TestCase): def test_equal_1(self): self.assertEqual(string_clean( 'My "me3ssy" d8ata issues2! Will1 th4ey ever, e3ver be3 so0lved?' ), 'My "messy" data issues! Will they ever, ever be solved?') def test_equal_2(self): self.assertEqual(string_clean( "Wh7y can't we3 bu1y the goo0d software3? #cheapskates3" ), "Why can't we buy the good software? #cheapskates") def test_equal_3(self): self.assertEqual(string_clean('Dsa32 cdsc34232 csa!!! 1I 4Am cool!'), 'Dsa cdsc csa!!! I Am cool!') def test_equal_4(self): self.assertEqual(string_clean('A1 A1! AAA 3J4K5L@!!!'), 'A A! AAA JKL@!!!') def test_equal_5(self): self.assertEqual(string_clean( 'Adgre2321 A1sad! A2A3A4 fv3fdv3J544K5L@' ), 'Adgre Asad! AAA fvfdvJKL@') def test_equal_6(self): self.assertEqual(string_clean( 'Ad2dsad3ds21 A 1$$s122ad! A2A3Ae24 f44K5L@222222 ' ), 'Addsadds A $$sad! AAAe fKL@ ') def test_equal_7(self): self.assertEqual(string_clean( '33333Ad2dsad3ds21 A3333 1$$s122a!d! A2!A!3Ae$24 f2##222 ' ), 'Addsadds A $$sa!d! A!A!Ae$ f## ') def test_equal_8(self): self.assertEqual(string_clean(''), '') def test_equal_9(self): self.assertEqual(string_clean('! !'), '! !') def test_equal_10(self): self.assertEqual(string_clean('123456789'), '') def test_equal_11(self): self.assertEqual(string_clean('(E3at m2e2!!)'), '(Eat me!!)')
mit
1,643,443,821,819,264,800
33.18
77
0.588648
false
pkimber/block
example_block/views.py
1
1619
# -*- encoding: utf-8 -*- from django.views.generic import TemplateView from braces.views import ( LoginRequiredMixin, StaffuserRequiredMixin, ) from base.view_utils import BaseMixin from block.forms import ContentEmptyForm from block.views import ( ContentCreateView, ContentPublishView, ContentRemoveView, ContentUpdateView, PageTemplateView, ) from .forms import TitleForm from .models import ( Title, TitleBlock, ) class ExampleView(PageTemplateView): def get_context_data(self, **kwargs): context = super(ExampleView, self).get_context_data(**kwargs) context.update(dict( calendar=('Jan', 'Feb', 'Mar'), )) return context class SettingsView(BaseMixin, TemplateView): template_name = 'example/settings.html' class TitleCreateView( LoginRequiredMixin, StaffuserRequiredMixin, ContentCreateView): block_class = TitleBlock form_class = TitleForm model = Title template_name = 'example/title_update.html' class TitleUpdateView( LoginRequiredMixin, StaffuserRequiredMixin, ContentUpdateView): form_class = TitleForm model = Title template_name = 'example/title_update.html' class TitlePublishView( LoginRequiredMixin, StaffuserRequiredMixin, ContentPublishView): form_class = ContentEmptyForm model = Title template_name = 'example/title_publish.html' class TitleRemoveView( LoginRequiredMixin, StaffuserRequiredMixin, ContentRemoveView): form_class = ContentEmptyForm model = Title template_name = 'example/title_remove.html'
apache-2.0
4,447,388,726,142,022,700
21.802817
72
0.714021
false
T2DREAM/t2dream-portal
src/encoded/commands/generate_annotations.py
1
9507
import requests import json import re import time import multiprocessing as mp EPILOG = __doc__ _HGNC_FILE = 'https://www.encodeproject.org/files/ENCFF277WZC/@@download/ENCFF277WZC.tsv' _MOUSE_FILE = 'https://www.encodeproject.org/files/ENCFF097CIT/@@download/ENCFF097CIT.tsv' _DM_FILE = 'https://www.encodeproject.org/files/ENCFF311QAL/@@download/ENCFF311QAL.tsv' _CE_FILE = 'https://www.encodeproject.org/files/ENCFF324UJT/@@download/ENCFF324UJT.tsv' _ENSEMBL_URL = 'http://rest.ensembl.org/' _GENEINFO_URL = 'http://mygene.info/v2/gene/' def get_annotation(): return { 'assembly_name': '', 'chromosome': '', 'start': '', 'end': '' } def rate_limited_request(url): response = requests.get(url) if int(response.headers.get('X-RateLimit-Remaining')) < 2: print('spleeping for about {} seconds'.format(response.headers.get('X-RateLimit-Reset'))) time.sleep(int(float(response.headers.get('X-RateLimit-Reset'))) + 1) return response.json() def assembly_mapper(location, species, input_assembly, output_assembly): # All others new_url = _ENSEMBL_URL + 'map/' + species + '/' \ + input_assembly + '/' + location + '/' + output_assembly \ + '/?content-type=application/json' try: new_response = rate_limited_request(new_url) except: return('', '', '') else: if not len(new_response['mappings']): return('', '', '') data = new_response['mappings'][0]['mapped'] chromosome = data['seq_region_name'] start = data['start'] end = data['end'] return(chromosome, start, end) def human_single_annotation(r): annotations = [] species = ' (homo sapiens)' species_for_payload = re.split('[(|)]', species)[1] # Ensembl ID is used to grab annotations for different references if 'Ensembl Gene ID' not in r: return if not r['Ensembl Gene ID']: return # Annotations are keyed by Gene ID in ES if 'Entrez Gene ID' not in r: return if not r['Entrez Gene ID']: return # Assumption: payload.id and id should always be same doc = {'annotations': []} doc['suggest'] = { 'input': [r['Approved Name'] + species, r['Approved Symbol'] + species, r['HGNC ID'], r['Entrez Gene ID'] + ' (Gene ID)'] } doc['payload'] = {'id': r['HGNC ID'], 'species': species_for_payload} doc['id'] = r['HGNC ID'] if r['Entrez Gene ID'].isdigit(): r['Entrez Gene ID'] = int(r['Entrez Gene ID']) # Adding gene synonyms to autocomplete if r['Synonyms'] is not None and r['Synonyms'] != '': synonyms = [x.strip(' ') + species for x in r['Synonyms'].split(',')] doc['suggest']['input'] = doc['suggest']['input'] + synonyms url = '{ensembl}lookup/id/{id}?content-type=application/json'.format( ensembl=_ENSEMBL_URL, id=r['Ensembl Gene ID']) try: response = rate_limited_request(url) except: return else: annotation = get_annotation() if 'assembly_name' not in response: return annotation['assembly_name'] = response['assembly_name'] annotation['chromosome'] = response['seq_region_name'] annotation['start'] = response['start'] annotation['end'] = response['end'] doc['annotations'].append(annotation) # Get GRcH37 annotation location = response['seq_region_name'] \ + ':' + str(response['start']) \ + '-' + str(response['end']) ann = get_annotation() ann['assembly_name'] = 'GRCh37' ann['chromosome'], ann['start'], ann['end'] = \ assembly_mapper(location, response['species'], 'GRCh38', 'GRCh37') doc['annotations'].append(ann) annotations.append({ "index": { "_index": "annotations", "_type": "default", "_id": doc['id'] } }) annotations.append(doc) print('human {}'.format(time.time())) return annotations def mouse_single_annotation(r): annotations = [] if 'Chromosome Name' not in r: return doc = {'annotations': []} species = ' (mus musculus)' species_for_payload = re.split('[(|)]', species)[1] doc['suggest'] = { 'input': [] } doc['payload'] = {'id': r['Ensembl Gene ID'], 'species': species_for_payload} doc['id'] = r['Ensembl Gene ID'] if 'MGI symbol' in r and r['MGI symbol'] is not None: doc['suggest']['input'].append(r['MGI symbol'] + species) if 'MGI ID' in r and r['MGI ID'] is not None: doc['suggest']['input'].append(r['MGI ID'] + species) doc['annotations'].append({ 'assembly_name': 'GRCm38', 'chromosome': r['Chromosome Name'], 'start': r['Gene Start (bp)'], 'end': r['Gene End (bp)'] }) mm9_url = '{geneinfo}{ensembl}?fields=genomic_pos_mm9'.format( geneinfo=_GENEINFO_URL, ensembl=r['Ensembl Gene ID'] ) try: response = requests.get(mm9_url).json() except: return else: if 'genomic_pos_mm9' in response and isinstance(response['genomic_pos_mm9'], dict): ann = get_annotation() ann['assembly_name'] = 'GRCm37' ann['chromosome'] = response['genomic_pos_mm9']['chr'] ann['start'] = response['genomic_pos_mm9']['start'] ann['end'] = response['genomic_pos_mm9']['end'] doc['annotations'].append(ann) annotations.append({ "index": { "_index": "annotations", "_type": "default", "_id": doc['id'] } }) annotations.append(doc) print('mouse {}'.format(time.time())) return annotations def get_rows_from_file(file_name, row_delimiter): response = requests.get(file_name) rows = response.content.decode('utf-8').split(row_delimiter) header = rows[0].split('\t') zipped_rows = [dict(zip(header, row.split('\t'))) for row in rows[1:]] return zipped_rows def prepare_for_bulk_indexing(annotations): flattened_annotations = [] for annotation in annotations: if annotation: for item in annotation: flattened_annotations.append(item) return flattened_annotations def human_annotations(human_file): """ Generates JSON from TSV files """ zipped_rows = get_rows_from_file(human_file, '\r') # Too many processes causes the http requests causes the remote to respond with error pool = mp.Pool(processes=1) annotations = pool.map(human_single_annotation, zipped_rows) return prepare_for_bulk_indexing(annotations) def mouse_annotations(mouse_file): """ Updates and get JSON file for mouse annotations """ zipped_rows = get_rows_from_file(mouse_file, '\n') # Too many processes causes the http requests causes the remote to respond with error pool = mp.Pool(processes=1) annotations = pool.map(mouse_single_annotation, zipped_rows) return prepare_for_bulk_indexing(annotations) def other_annotations(file, species, assembly): """ Generates C. elegans and drosophila annotaions """ annotations = [] response = requests.get(file) header = [] species_for_payload = re.split('[(|)]', species)[1] for row in response.content.decode('utf-8').split('\n'): # skipping header row if len(header) == 0: header = row.split('\t') continue r = dict(zip(header, row.split('\t'))) if 'Chromosome Name' not in r or 'Ensembl Gene ID' not in r: continue doc = {'annotations': []} annotation = get_annotation() doc['suggest'] = {'input': [r['Associated Gene Name'] + species]} doc['payload'] = {'id': r['Ensembl Gene ID'], 'species': species_for_payload} doc['id'] = r['Ensembl Gene ID'] annotation['assembly_name'] = assembly annotation['chromosome'] = r['Chromosome Name'] annotation['start'] = r['Gene Start (bp)'] annotation['end'] = r['Gene End (bp)'] doc['annotations'].append(annotation) annotations.append({ "index": { "_index": "annotations", "_type": "default", "_id": doc['id'] } }) annotations.append(doc) return annotations def main(): ''' Get annotations from multiple sources This helps to implement autocomplete for region search ''' import argparse parser = argparse.ArgumentParser( description="Generate annotations JSON file for multiple species", epilog=EPILOG, formatter_class=argparse.RawDescriptionHelpFormatter, ) human = human_annotations(_HGNC_FILE) mouse = mouse_annotations(_MOUSE_FILE) annotations = human + mouse # Create annotations JSON file with open('annotations.json', 'w') as outfile: json.dump(annotations, outfile) if __name__ == '__main__': main()
mit
-2,805,111,701,800,776,700
31.896194
97
0.560429
false
childsplay-mobi/cp-pygame
SPDataManager.py
1
27828
# -*- coding: utf-8 -*- # Copyright (c) 2007-2010 Stas Zykiewicz <[email protected]> # # SPDataManager.py # This program is free software; you can redistribute it and/or # modify it under the terms of version 3 of the GNU General Public License # as published by the Free Software Foundation. A copy of this license should # be included in the file GPL-3. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Library General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # TODO: what do we do when a error in dbase stuff occurs? #create logger, logger was configured in SPLogging import logging module_logger = logging.getLogger("childsplay.SPDataManager") import atexit, os, sys, datetime # Don't do from 'sqlalchemy import *' as SQA has also 'logging' and 'types' # modules. This is very bad coding practice but they claim to have good reasons # for it. Those reasons suck of course but I don't have the time to discuss it # with them. So I will just use practices *I* think are right and which I should # have used to begin with and that's '*never* do from foo import *'. # The braindead part of it all is that SQA use 'from sqlalchemy import *' in their # docs and tutorials :-( # None the less, SQA is a very good lib. from SPConstants import ACTIVITYDATADIR import SPHelpText from utils import MyError, StopmeException try: import sqlalchemy as sqla import sqlalchemy.orm as sqlorm except ImportError: module_logger.exception("No sqlalchemy package found") raise MyError try: import sqlalchemy.exceptions as sqlae except ImportError: from sqlalchemy import exc as sqlae # attempt to prevent sqlalchemy trhowing recursion limit error sys.setrecursionlimit(2000) # 1000 is the default from utils import set_locale #import SPgdm from SPDataManagerCreateDbase import DbaseMaker DEBUG = False DEMO_DT = [{'fortune': 0, 'target': 'demo', 'level': 2, 'group': 'Lange termijn', 'cycles': 1,'act_name': 'quiz_picture', 'order': 0}, \ {'fortune': 0, 'target': 'demo', 'level': 2, 'group': 'Puzzels', 'cycles': 1,'act_name': 'electro_sp', 'order': 1}, \ {'fortune': 0, 'target': 'demo', 'level': 2, 'group': 'Lange termijn', 'cycles': 1,'act_name': 'quiz_melody', 'order': 2},\ ] DEFAULT_DT = [{'fortune': 0, 'target': 'default', 'level': 2, 'group': 'Lange termijn', 'cycles': 1,'act_name': 'quiz_picture', 'order': 0},\ {'fortune': 0, 'target': 'default', 'level': 2, 'group': 'Puzzels', 'cycles': 2,'act_name': 'electro_sp', 'order': 1}, \ {'fortune': 0, 'target': 'default', 'level': 2, 'group': 'Lange termijn', 'cycles': 1,'act_name': 'quiz_math', 'order': 2},\ {'fortune': 0, 'target': 'default', 'level': 2, 'group': 'Puzzels', 'cycles': 2,'act_name': 'numbers_sp', 'order': 3},\ {'fortune': 0, 'target': 'default', 'level': 2, 'group': 'Lange termijn', 'cycles': 1,'act_name': 'quiz_sayings', 'order': 4},\ {'fortune': 0, 'target': 'default', 'level': 2, 'group': 'Korte termijn', 'cycles': 2,'act_name': 'memory_sp', 'order': 5},\ {'fortune': 0, 'target': 'default', 'level': 2, 'group': 'Lange termijn', 'cycles': 1,'act_name': 'quiz_picture', 'order': 6}, \ {'fortune': 0, 'target': 'default', 'level': 2, 'group': 'Puzzels', 'cycles': 2,'act_name': 'findit_sp', 'order': 7}, \ {'fortune': 0, 'target': 'default', 'level': 2, 'group': 'Lange termijn', 'cycles': 1,'act_name': 'quiz_melody', 'order': 8} ] EASY_DT = [{'fortune': 0, 'target': 'Easy', 'level': 2, 'group': 'Lange termijn', 'cycles': 1, 'act_name': 'quiz_picture', 'order': 0},\ {'fortune': 0, 'target': 'Easy', 'level': 2, 'group': 'Puzzels', 'cycles': 3, 'act_name': 'electro_sp', 'order': 1},\ {'fortune': 0, 'target': 'Easy', 'level': 2, 'group': 'Lange termijn', 'cycles': 1, 'act_name': 'quiz_sayings', 'order': 2},\ {'fortune': 0, 'target': 'Easy', 'level': 2, 'group': 'Puzzels', 'cycles': 3, 'act_name': 'puzzle', 'order': 3},\ {'fortune': 0, 'target': 'Easy', 'level': 2, 'group': 'Lange termijn', 'cycles': 1, 'act_name': 'quiz_math', 'order': 4},\ {'fortune': 0, 'target': 'Easy', 'level': 2, 'group': 'Lange termijn', 'cycles': 1, 'act_name': 'quiz_melody', 'order': 5},\ ] HARD_DT = [{'fortune': 0, 'target': 'Hard', 'level': 2, 'group': 'Lange termijn', 'cycles': 1, 'act_name': 'quiz_picture', 'order': 0},\ {'fortune': 0, 'target': 'Hard', 'level': 3, 'group': 'Puzzels', 'cycles': 3, 'act_name': 'electro_sp', 'order': 1},\ {'fortune': 0, 'target': 'Hard', 'level': 2, 'group': 'Lange termijn', 'cycles': 1, 'act_name': 'quiz_sayings', 'order': 2},\ {'fortune': 0, 'target': 'Hard', 'level': 3, 'group': 'Korte termijn', 'cycles': 3,'act_name': 'memory_sp', 'order': 3}, \ {'fortune': 0, 'target': 'Hard', 'level': 2, 'group': 'Lange termijn', 'cycles': 1,'act_name': 'quiz_history', 'order': 4}, \ {'fortune': 0, 'target': 'Hard', 'level': 2, 'group': 'Korte termijn', 'cycles': 3, 'act_name': 'soundmemory', 'order': 5},\ {'fortune': 0, 'target': 'Hard', 'level': 2, 'group': 'Lange termijn', 'cycles': 1, 'act_name': 'quiz_math', 'order': 6},\ {'fortune': 0, 'target': 'Hard', 'level': 2, 'group': 'Puzzels', 'cycles': 3, 'act_name': 'numbers_sp', 'order': 7},\ {'fortune': 0, 'target': 'Hard', 'level': 3, 'group': 'Lange termijn', 'cycles': 1, 'act_name': 'quiz_math', 'order': 8},\ {'fortune': 0, 'target': 'Hard', 'level': 2, 'group': 'Puzzels', 'cycles': 3, 'act_name': 'fourrow', 'order': 9},\ {'fortune': 0, 'target': 'Hard', 'level': 2, 'group': 'Lange termijn', 'cycles': 1, 'act_name': 'quiz_melody', 'order': 10}\ ] class DataManager: """Class that handles all users data related stuff except the collecting that should be done by the activity.""" def __init__(self, spgoodies, dbm): self.logger = logging.getLogger("childsplay.SPDataManager.DataManager") self.logger.debug("Starting") self.SPG = spgoodies self.cmd_options = self.SPG._cmd_options self.current_user = self.cmd_options.user self.current_user_id = None self.COPxml = None# controlpanel stuff atexit.register(self._cleanup) self.content_engine, self.user_engine = dbm.get_engines() self.metadata_contentdb, self.metadata_usersdb = dbm.get_metadatas() self.all_orms = dbm.get_all_orms() self.orms_content_db, self.orms_userdb = dbm.get_orms() self.UserSession = sqlorm.sessionmaker(bind=self.user_engine) self.ContentSession = sqlorm.sessionmaker(bind=self.content_engine) # query which language we should use. orm, session = self.get_orm('spconf', 'user') row = session.query(orm).filter_by(activity_name = 'language_select')\ .filter_by(key = 'locale').first() if not row: language = self.cmd_options.lang if not language: language = self.cmd_options.default_language row = orm(activity_name='language_select', key='locale', value=language, comment='locale used by the core') session.add(row) row = orm(activity_name='language_select', key='lang', value=language[:2], comment='language code used by the core') session.add(row) session.commit() session.close() language = set_locale(language) elif not self.cmd_options.lang: language = set_locale(row.value) else: language = self.cmd_options.lang if not language: language = self.cmd_options.default_language language = set_locale(language) self.language = language self.SPG.localesetting = language self._check_tables_uptodate() # query to get all availabe cids, used to check served_content orm, session = self.get_orm('game_available_content', 'content') query = session.query(orm) self.all_ids = [result.CID for result in query.all()] session.close() if self.cmd_options.no_login: self.current_user = 'SPUser' self._start_gdm_greeter() elif self.cmd_options.user: self.current_user = self.cmd_options.user self._start_gdm_greeter() elif self.SPG.get_theme() == 'braintrainer': self.WeAreBTP = True self._start_btp_screen() else: self.WeAreBTP = False # we don't have a working login screen yet self.current_user='SPUser' self._start_gdm_greeter() def reset(self): self.UserSession.close_all() self.ContentSession.close_all() try: self.user_engine.dispose() self.content_engine.dispose() except: pass def _get_language(self): return self.language def _check_tables_uptodate(self): self.logger.debug("_check_tables_uptodate") reload(SPHelpText) modules = [x for x in os.listdir(ACTIVITYDATADIR) if '.py' in x and not '.pyc' in x] # check that all the activities are present in the activity_options table orm, session = self.get_orm('activity_options', 'user') if orm == None: self.logger.error("No activity_options ORM found, dbase corrupt") raise MyError, "No activity_options ORM found, dbase corrupt" for m in modules: m = m[:-3] query = session.query(orm) query = query.filter_by(activity = m) result = query.first() if not result: # Not found activity name, set activity name with default values session.add(orm(m)) session.commit() session.close() orm, session = self.get_orm('group_names', 'user') # make user demo orm, session = self.get_orm('users', 'user') result = session.query(orm).filter_by(login_name = 'Demo').first() if not result: session.query(orm).filter_by(user_id = 1).delete() neworm = orm() neworm.user_id = 1 neworm.first_name = 'Demo' neworm.last_name = '' neworm.login_name = 'Demo' neworm.audio = 50 neworm.usersgroup = 0 neworm.dt_target = 'demo' session.add(neworm) session.commit() session.close() # check for mandatory DT sequences orm, session = self.get_orm('dt_sequence', 'user') query = session.query(orm).filter_by(target = 'demo').all() if len(query) != len(DEMO_DT): self.logger.info("demo dt target differs from hardcoded sequence, replacing it") session.query(orm).filter(orm.target == 'demo').delete() session.commit() for row in DEMO_DT: session.add(orm(**row)) query = session.query(orm).filter_by(target = 'default').all() if not query: self.logger.info("default dt target missing, adding a hardcoded sequence.") session.query(orm).filter(orm.target == 'default').delete() session.commit() for row in DEFAULT_DT: session.add(orm(**row)) session.commit() session.close() val = self._get_rcrow('SPDatamanager', 'set_extra_dt_sequences') if not val or val != 'yes': # we also set two DT sequences once, user can remove them orm, session = self.get_orm('dt_sequence', 'user') query = session.query(orm).filter_by(target = 'Easy').all() if not query: self.logger.info("First time Easy dt target missing, adding a hardcoded sequence.") session.query(orm).filter(orm.target == 'Easy').delete() session.commit() for row in EASY_DT: session.add(orm(**row)) query = session.query(orm).filter_by(target = 'Hard').all() if not query: self.logger.info("First time Hard dt target missing, adding a hardcoded sequence.") session.query(orm).filter(orm.target == 'Hard').delete() session.commit() for row in HARD_DT: session.add(orm(**row)) session.commit() session.close() self._set_rcrow('SPDatamanager', 'set_extra_dt_sequences', 'yes', 'flag to check if we already have set the extra dt sequences') def _cleanup(self): """atexit function""" # Nothing to see here, please move on. self.reset() def _start_btp_screen(self): """Starts a login screen for the braintrainer plus. Beaware that this only works on a BTP system as the login and control panel is a proprietary piece of code and it's not included in the free versions.""" sys.path.insert(0, './controlpanel_lgpl') import Start_screen as Ss #@UnresolvedImport self.SPG.dm = self ss = Ss.Controller(self.SPG, fullscr=self.cmd_options.fullscreen) result = ss.get_result() if result[0] == 'user': self.current_user = result[1] self._start_gdm_greeter() elif result[0] == 'quit': raise StopmeException, 0 elif result[0] == 'controlpanel': self.COPxml = result[1] def are_we_cop(self): return self.COPxml def _start_gdm_greeter(self): """Will start login screen and stores the login name in the db""" self.current_user = 'Demo' if not self.current_user: g = SPgdm.SPGreeter(self.cmd_options, \ theme=self.cmd_options.theme, \ vtkb=self.SPG.get_virtual_keyboard(), \ fullscr=self.cmd_options.fullscreen)# returns when user hits login button username = g.get_loginname() else: self.logger.debug("Username %s passed as cmdline option, no login screen" % self.current_user) username = self.current_user self.logger.debug("Got login: %s" % username) if not username: # we always must run under a user name so we use default username = self.cmd_options.user self.logger.debug("No login, setting username to default: %s" % username) # Now that we have a name we first check if it already exists # get the users table orm, session = self.get_orm('users', 'user') query = session.query(orm) query = query.filter_by(login_name = username) result = query.first() if result: self.logger.debug("found existing username: %s" % result.login_name) else: # insert just user_name, NULL for others, the user_id will be generated session.add(orm(login_name=username, first_name=username, usersgroup='SPusers')) self.logger.debug("inserted %s" % username) session.commit() query = session.query(orm) query = query.filter_by(login_name = username) result = query.first() session.close() # we must also check if the SPusers group exists. orm, session = self.get_orm('group_names', 'user') rows = [row for row in session.query(orm).order_by(orm.group_name).all()] if not rows: # we set a first group neworm = orm() neworm.group_name = 'SP Group' session.add(neworm) session.commit() session.close() self.logger.debug("%s has user id %s" % (username, result.user_id)) self.current_user_id = result.user_id self.current_user = username def get_username(self): """Returns the current user or None if in anonymousmode""" self.logger.debug("get_username returns:%s" % self.current_user) if not self.current_user: return '' return self.current_user def get_user_id(self): return self.current_user_id def get_user_id_by_loginname(self, username): """Returns the user_id. @username must be the users login name""" orm, session = self.get_orm('users', 'user') query = session.query(orm) query = query.filter_by(login_name = username) result = query.first() if not result: self.logger.warning("No user %s found, expect more trouble :-(" % username) else: return result.user_id def get_user_dbrow_by_loginname(self, username): """Returns the user_id. @username must be the users login name""" orm, session = self.get_orm('users', 'user') query = session.query(orm) query = query.filter_by(login_name = username) result = query.first() if not result: self.logger.warning("No user %s found, expect more trouble :-(" % username) return else: return result def get_table_names(self): """Returns a list with the names (strings) of the SQL tables currently in use.""" tl = self.metadata_usersdb.tables.keys() return tl def get_orm(self, tablename, dbase): try: t = self.all_orms[tablename] except KeyError: self.logger.warning("get_orm No such table: %s" % tablename) return None,None else: if dbase == 'user': self.user_engine.dispose() return (t, self.UserSession()) elif dbase == 'content': self.content_engine.dispose() return (t, self.ContentSession()) else: self.logger.warning("no such dbase: %s" % t) return None, None def get_served_content_orm(self): return self.get_orm('served_content', 'user') def get_table_data_userdb(self, table): orm, session = self.get_orm(table, 'user') query = session.query(orm) return query.all() def get_mu_sigma(self, name): orm, session = self.get_orm('activity_options', 'user') query = session.query(orm) query = query.filter_by(activity = name) result = query.first() if not result: self.logger.warning("Not found mu and sigma for %s, expect more trouble :-(" % name) return return (result.mu, result.sigma) def get_served_content_mapper(self): orm, session = self.get_orm('served_content', 'user') mclass = ServedMapper(orm, session, self.current_user_id, self.current_user) return mclass def get_mapper(self, activity, dbase='user'): self.logger.debug("get_mapper called with activity:%s" % activity) #self.metadata_usersdb.bind.echo = True if not activity: self.logger.debug("anonymous or no activity, returning bogus") return BogusMapper() try: orm, session = self.get_orm(activity, dbase) mclass = RowMapper(orm, session, self.current_user_id, self.current_user) except (KeyError, TypeError): self.logger.warning("Failed to get mapper or activity doesn't have a dbase table : %s, returning bogus mapper" % activity) return BogusMapper() else: return mclass # Used by multiple acts through spgoodies def _check_already_served(self, rows, game_theme, minimum=10, all_ids=None): """Returns the rows with the ones that are served removed. When not enough 'free' rows are left it resets all the count_served fields and return the complete rows list. all_ids is a list with with possible ids to check against served ids.""" self.logger.debug("_check_already_served called: %s rows offered" % len(rows)) if not all_ids: all_ids = self.all_ids orm, session = self.get_served_content_orm() query = session.query(orm) query = query.filter_by(user_id = self.current_user_id) query = query.filter(orm.game_theme_id.in_(game_theme)) query = query.filter(orm.count_served > 0) allrows = [] served_ids = [] for row in query.all(): allrows.append(row) served_ids.append(row.CID) self.logger.debug("already served rows: %s" % len(served_ids)) notserved = set(all_ids).difference(served_ids) self.logger.debug("found %s not served cids" % len(notserved)) if len(notserved) < minimum: # Not enough unserved rows # first we set all the count_served back to 0 query = session.query(orm).filter_by(user_id = self.current_user_id) query = query.filter(orm.game_theme_id.in_(game_theme)) query.update({orm.count_served: 0}, synchronize_session=False) session.commit() session.close() # we now return all rows as there are now considered not yet served. self.logger.debug("Resetting served count and returning %s original rows" % len(rows)) return rows else: # We must filter the rows by removing nonfree ones session.close() rows = [row for row in rows if row.CID in notserved] self.logger.debug("returning %s rows" % len(rows)) return rows def _set_rcrow(self, actname, key, value, comment): orm, session = self.get_orm('spconf', 'user') query = session.query(orm).filter_by(activity_name = actname).filter_by(key = key).all() for row in query: session.delete(row) row = orm(activity_name=actname, key=key, value=value, comment=comment) session.add(row) session.commit() session.close() def _get_rcrow(self, actname, key): val = None orm, session = self.get_orm('spconf', 'user') query = session.query(orm).filter_by(activity_name = actname).filter_by(key = key).first() if query: val = query.value session.commit() session.close() return val def _update_rcrow(self, actname, key, val): orm, session = self.get_orm('spconf', 'user') query = session.query(orm).filter_by(activity_name = actname).filter_by(key = key).first() if query: comm = query.comment session.commit() session.close() self._set_rcrow(actname, key, val, comm) class RowMapper: """DB object used by the core and activity to store data in the dbase table and row beloging to the current activity. Don't use this class directly, use the DataManagers get_mapper method.""" def __init__(self, orm, session, user_id=None, current_user=''): self.logger = logging.getLogger("childsplay.SPDataManager.RowMapper") self.currentuser = current_user self.user_id = user_id self.orm = orm self.session = session self.coldata = {} def insert(self, col, data): """collects all the data which should go into a row. You must call 'commit' to actually store it into the dbase.""" self.logger.debug("insert in %s: %s" % (col, data)) self.coldata[col] = data def update(self, rowdata): """insert a row in to the current table. @rowdata must be a dictionary with column keys and data values. You must call 'commit' to actually store it into the dbase.""" self.coldata.update(rowdata) def commit(self): """Flush dbase data to disk. Returns None on success and True on faillure.""" self.logger.debug("orm %s commit data to dbase" % self.orm._name) if hasattr(self.orm, 'user_id'): self.insert('user_id', self.user_id) self.logger.debug("raw row data:%s" % self.coldata) self.session.add(self.orm(**self.coldata)) if not self.session: return self.session.commit() self.session.close() def _get_level_data(self, levelnum=1): """Used by maincore""" query = self.session.query(self.orm) query.filter_by(level = levelnum) query.filter_by(user_id = self.user_id) return query.all() def _get_start_time(self): """Used by the maincore""" if self.coldata.has_key('start_time'): return self.coldata['start_time'] def _get_end_time(self): """Used by the maincore""" if self.coldata.has_key('end_time'): return self.coldata['end_time'] def get_orm(self): return self.orm def get_session(self): return self.session def close(self): if not self.session: return self.session.close() class ServedMapper: """DB object for the served_content table in the users db. Used by the core and activity to store data in the dbase table and row beloging to the current activity. Don't use this class directly, use the DataManagers get_mapper method.""" def __init__(self, orm, session, user_id=None, current_user=''): self.logger = logging.getLogger("childsplay.SPDataManager.ServedMapper") self.currentuser = current_user self.user_id = user_id self.orm = orm self.session = session self.coldata = {} def insert(self, cid, gtheme): """collects all the data which should go into a row. You must call 'commit' to actually store it into the dbase.""" self.logger.debug("insert cid:%s game_theme_id:%s" % (cid, gtheme)) svc = self.orm(user_id=self.user_id, CID=cid,\ game_theme_id=gtheme, \ module='', start_time=datetime.datetime.now(), \ count_served=1) self.session.add(svc) def commit(self): if not self.session: return self.logger.debug("commiting session") self.session.commit() self.session.close() def close(self): if not self.session: return self.session.close() class BogusMapper: """Bogus mapper class used when we are in anonymousmode""" def __init__(self): pass def __str__(self): return "BogusMapper" def __repr__(self): return "BogusMapper" def insert(self, col, data): pass def insert_row(self, rowdata): pass def update(self, rowdata): pass def commit(self): pass def close(self): pass def get_table_column_names(self): pass def get_table_data(self): pass def delete_row(self, row_id): pass def get_table_selection(self, args): pass def _get_level_data(self, levelnum=1): pass def _get_start_time(self): return "2000-01-01_00:00:00" def _get_end_time(self): return "2000-01-01_00:00:00" def _get_level_data(self, level=1): return None
gpl-3.0
-4,219,586,625,147,050,500
43.883871
141
0.582974
false
liavkoren/djangoDev
tests/test_discovery_sample/doctests.py
1
1192
""" Doctest example from the official Python documentation. https://docs.python.org/3/library/doctest.html """ def factorial(n): """Return the factorial of n, an exact integer >= 0. >>> [factorial(n) for n in range(6)] [1, 1, 2, 6, 24, 120] >>> factorial(30) 265252859812191058636308480000000 >>> factorial(-1) Traceback (most recent call last): ... ValueError: n must be >= 0 Factorials of floats are OK, but the float must be an exact integer: >>> factorial(30.1) Traceback (most recent call last): ... ValueError: n must be exact integer >>> factorial(30.0) 265252859812191058636308480000000 It must also not be ridiculously large: >>> factorial(1e100) Traceback (most recent call last): ... OverflowError: n too large """ import math if not n >= 0: raise ValueError("n must be >= 0") if math.floor(n) != n: raise ValueError("n must be exact integer") if n+1 == n: # catch a value like 1e300 raise OverflowError("n too large") result = 1 factor = 2 while factor <= n: result *= factor factor += 1 return result
bsd-3-clause
-1,257,162,544,663,341,300
25.488889
72
0.605705
false
ilstreltsov/django-db-mailer
dbmail/providers/twilio/sms.py
1
1317
# -*- coding: utf-8 -*- from httplib import HTTPSConnection from urllib import urlencode from base64 import b64encode from json import loads from django.conf import settings from dbmail.providers.prowl.push import from_unicode from dbmail import get_version class TwilioSmsError(Exception): pass def send(sms_to, sms_body, **kwargs): """ Site: https://www.twilio.com/ API: https://www.twilio.com/docs/api/rest/sending-messages """ headers = { "Content-type": "application/x-www-form-urlencoded", "User-Agent": "DBMail/%s" % get_version(), 'Authorization': 'Basic %s' % b64encode( "%s:%s" % ( settings.TWILIO_ACCOUNT_SID, settings.TWILIO_AUTH_TOKEN )).decode("ascii") } kwargs.update({ 'From': kwargs.pop('sms_from', settings.TWILIO_FROM), 'To': sms_to, 'Body': from_unicode(sms_body) }) http = HTTPSConnection(kwargs.pop("api_url", "api.twilio.com")) http.request( "POST", "/2010-04-01/Accounts/%s/Messages.json" % settings.TWILIO_ACCOUNT_SID, headers=headers, body=urlencode(kwargs)) response = http.getresponse() if response.status != 201: raise TwilioSmsError(response.reason) return loads(response.read()).get('sid')
gpl-2.0
-5,037,271,726,113,817,000
25.34
78
0.626424
false
JensTimmerman/easybuild-easyblocks
easybuild/easyblocks/s/samtools.py
1
3213
# This file is an EasyBuild recipy as per https://github.com/hpcugent/easybuild # # Copyright:: Copyright (c) 2012 University of Luxembourg / LCSB # Author:: Cedric Laczny <[email protected]>, Fotis Georgatos <[email protected]> # License:: MIT/GPL # File:: $File$ # Date:: $Date$ """ Easybuild support for building SAMtools (SAM - Sequence Alignment/Map) """ import os import shutil from easybuild.easyblocks.generic.configuremake import ConfigureMake class EB_SAMtools(ConfigureMake): """ Support for building SAMtools; SAM (Sequence Alignment/Map) format is a generic format for storing large nucleotide sequence alignments. """ def __init__(self, *args, **kwargs): """Define lists of files to install.""" super(EB_SAMtools, self).__init__(*args, **kwargs) self.bin_files = ["bcftools/vcfutils.pl", "bcftools/bcftools", "misc/blast2sam.pl", "misc/bowtie2sam.pl", "misc/export2sam.pl", "misc/interpolate_sam.pl", "misc/novo2sam.pl", "misc/psl2sam.pl", "misc/sam2vcf.pl", "misc/samtools.pl", "misc/soap2sam.pl", "misc/varfilter.py", "misc/wgsim_eval.pl", "misc/zoom2sam.pl", "misc/md5sum-lite", "misc/md5fa", "misc/maq2sam-short", "misc/maq2sam-long", "misc/wgsim", "misc/seqtk", "samtools"] self.lib_files = ["libbam.a"] self.include_files = ["bam.h", "bam2bcf.h", "bam_endian.h", "bgzf.h", "errmod.h", "faidx.h", "kaln.h", "khash.h", "klist.h", "knetfile.h", "kprobaln.h", "kseq.h", "ksort.h", "kstring.h", "razf.h", "sam.h", "sam_header.h", "sample.h"] def configure_step(self): """ No configure """ pass def install_step(self): """ Install by copying files to install dir """ for (srcdir, dest, files) in [ (self.cfg['start_dir'], 'bin', self.bin_files), (self.cfg['start_dir'], 'lib', self.lib_files), (self.cfg['start_dir'], 'include/bam', self.include_files) ]: destdir = os.path.join(self.installdir, dest) srcfile = None try: os.makedirs(destdir) for filename in files: srcfile = os.path.join(srcdir, filename) shutil.copy2(srcfile, destdir) except OSError, err: self.log.error("Copying %s to installation dir %s failed: %s" % (srcfile, destdir, err)) def sanity_check_step(self): """Custom sanity check for SAMtools.""" custom_paths = { 'files': ['bin/%s' % x for x in [f.split('/')[-1] for f in self.bin_files]] + ['lib/%s' % x for x in self.lib_files] + ['include/bam/%s' % x for x in self.include_files], 'dirs': [] } super(EB_SAMtools, self).sanity_check_step(custom_paths=custom_paths)
gpl-2.0
7,332,690,638,706,083,000
41.276316
113
0.521631
false
chryss/pygaarst
tests/test_landsat.py
1
1783
#!/usr/bin/env python # encoding: utf-8 """ test_landsat.py Created by Chris Waigl on 2015-04-22. """ from __future__ import division, print_function, absolute_import, unicode_literals import os import pytest from pygaarst import raster from pygaarst import landsat as ls def setup_module(module): global datadir datadir = 'tests/data' global scname scname = 'LC8_test' @pytest.fixture(scope='module') def landsatscene(): scpath = os.path.join(datadir, scname) sc = raster.Landsatscene(scpath) sc.infix = '_clip' return sc @pytest.fixture(scope='module') def landsatscene_direct(): scpath = os.path.join(datadir, scname) sc = ls.Landsatscene(scpath) sc.infix = '_clip' return sc @pytest.fixture(scope='module') def tirband(landsatscene): return landsatscene.TIRband def test_open_valid_landsatscene(landsatscene): assert landsatscene assert landsatscene.spacecraft == 'L8' def test_open_valid_landsatscene_directly(landsatscene_direct): assert landsatscene_direct def test_landsatscene_basic_properties(landsatscene): assert int(landsatscene.NDVI[6][6] * 100) == 35 assert int(landsatscene.NBR[6][10] * 100) == 52 def test_usgsband_basic_properties(landsatscene): assert landsatscene.band2.sensor == 'OLI_TIRS' assert landsatscene.band2.spacecraft == 'L8' def test_tir(tirband): assert tirband.data[5][5] == 28786 def test_LTK(landsatscene): assert landsatscene.ltkcloud[3][3] == 5.0 def test_naivecloud(landsatscene): assert landsatscene.naivecloud[3][3] == 0.0 def test_radiance(landsatscene): assert landsatscene.band7.radiance[2][12] == 1.368852 def test_reflectance(landsatscene): assert landsatscene.band7.reflectance[2][12] == 0.074893323237735315
mit
4,926,654,142,590,418,000
21.2875
82
0.716769
false
xaxa89/mitmproxy
mitmproxy/proxy/config.py
1
3861
import os import re from typing import Any from OpenSSL import SSL, crypto from mitmproxy import exceptions from mitmproxy import options as moptions from mitmproxy import certs from mitmproxy.net import tcp from mitmproxy.net import server_spec CONF_BASENAME = "mitmproxy" class HostMatcher: def __init__(self, patterns=tuple()): self.patterns = list(patterns) self.regexes = [re.compile(p, re.IGNORECASE) for p in self.patterns] def __call__(self, address): if not address: return False host = "%s:%s" % address if any(rex.search(host) for rex in self.regexes): return True else: return False def __bool__(self): return bool(self.patterns) class ProxyConfig: def __init__(self, options: moptions.Options) -> None: self.options = options self.check_ignore = None # type: HostMatcher self.check_tcp = None # type: HostMatcher self.certstore = None # type: certs.CertStore self.client_certs = None # type: str self.openssl_verification_mode_server = None # type: int self.configure(options, set(options.keys())) options.changed.connect(self.configure) def configure(self, options: moptions.Options, updated: Any) -> None: if options.add_upstream_certs_to_client_chain and not options.ssl_insecure: raise exceptions.OptionsError( "The verify-upstream-cert requires certificate verification to be disabled. " "If upstream certificates are verified then extra upstream certificates are " "not available for inclusion to the client chain." ) if options.ssl_insecure: self.openssl_verification_mode_server = SSL.VERIFY_NONE else: self.openssl_verification_mode_server = SSL.VERIFY_PEER self.check_ignore = HostMatcher(options.ignore_hosts) self.check_tcp = HostMatcher(options.tcp_hosts) self.openssl_method_client, self.openssl_options_client = \ tcp.sslversion_choices[options.ssl_version_client] self.openssl_method_server, self.openssl_options_server = \ tcp.sslversion_choices[options.ssl_version_server] certstore_path = os.path.expanduser(options.cadir) if not os.path.exists(os.path.dirname(certstore_path)): raise exceptions.OptionsError( "Certificate Authority parent directory does not exist: %s" % os.path.dirname(options.cadir) ) self.certstore = certs.CertStore.from_store( certstore_path, CONF_BASENAME ) if options.client_certs: client_certs = os.path.expanduser(options.client_certs) if not os.path.exists(client_certs): raise exceptions.OptionsError( "Client certificate path does not exist: %s" % options.client_certs ) self.client_certs = client_certs for c in options.certs: parts = c.split("=", 1) if len(parts) == 1: parts = ["*", parts[0]] cert = os.path.expanduser(parts[1]) if not os.path.exists(cert): raise exceptions.OptionsError( "Certificate file does not exist: %s" % cert ) try: self.certstore.add_cert_file(parts[0], cert) except crypto.Error: raise exceptions.OptionsError( "Invalid certificate format: %s" % cert ) m = options.mode if m.startswith("upstream:") or m.startswith("reverse:"): _, spec = server_spec.parse_with_mode(options.mode) self.upstream_server = spec
mit
2,367,711,487,147,280,000
34.75
93
0.599068
false
Kromey/pynano
tests/test_userhist.py
1
2196
import pytest import responses from pynano import User # noqa def test_userhist_wordcount(kromey_hist_response): with responses.RequestsMock() as rsps: kromey_hist_response(rsps) kromey = User('kromey') # Access history first to query the history API kromey.history assert kromey.wordcount == 64133 def test_userhist_name(kromey_hist_response): with responses.RequestsMock() as rsps: kromey_hist_response(rsps) kromey = User('kromey') # Access history first to query the history API kromey.history assert kromey.name == 'Kromey' def test_userhist_id(kromey_hist_response): with responses.RequestsMock() as rsps: kromey_hist_response(rsps) kromey = User('kromey') # Access history first to query the history API kromey.history assert kromey.id == '217507' def test_userhist_winner(kromey_hist_response): with responses.RequestsMock() as rsps: kromey_hist_response(rsps) kromey = User('kromey') # Access history first to query the history API kromey.history assert kromey.winner def test_userhist_daily_date(kromey_hist_response): with responses.RequestsMock() as rsps: kromey_hist_response(rsps) kromey = User('kromey') assert kromey.history[14].date == '2015-11-15' assert kromey.history[10].date == '2015-11-11' def test_userhist_daily_wordcount(kromey_hist_response): with responses.RequestsMock() as rsps: kromey_hist_response(rsps) kromey = User('kromey') assert kromey.history[14].wordcount == 10499 assert kromey.history[10].wordcount == 804 def test_userhist_sequence(kromey_hist_response): word_sum = 0 with responses.RequestsMock() as rsps: kromey_hist_response(rsps) kromey = User('kromey') assert len(kromey.history) == 30 with pytest.raises(TypeError): kromey.history['foo'] for day in kromey.history: word_sum += day.wordcount # Each day's wordcount should add up to the month's wordcount assert word_sum == kromey.wordcount
mit
440,602,202,943,399,000
27.894737
69
0.651184
false
aldebaran/qibuild
python/qitoolchain/conan.py
1
3506
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2012-2021 SoftBank Robotics. All rights reserved. # Use of this source code is governed by a BSD-style license (see the COPYING file). """ Create a Conan Package with QiBuild tools """ from __future__ import absolute_import from __future__ import unicode_literals from __future__ import print_function import os import tempfile import qisys.sh import qisys.command import qisys.interact from qisys import ui class Conan(object): """ This class create a conan package directory ready to be converted by qitoolchain """ def __init__(self, name, version, channels=None, is_shared=None): """ Conan class allows us to create a conanfile and compile the library with conan.""" self.name = name self.version = version self.channels = channels self.is_shared = is_shared self.temp_dir = None self.conanfile = None self.package_path = None def __del__(self): if self.package_path is not None: self.clean() def create(self): """ Ask conan channel and parameters to create a conanfile and build it Tested with: "boost/1.68.0@conan/stable" shared """ if not self.channels: question = "Which conan library do you want to add?" channel = qisys.interact.ask_string(question, default=True) self.channels = [channel] if self.is_shared is None: question = "Do you want it to be shared (highly recommended)?" self.is_shared = qisys.interact.ask_yes_no(question, default=True) self.prepare() self.write_conanfile() self.build() return self.package_path def prepare(self): """ Create a temporary directory where to build the library. """ self.temp_dir = tempfile.mkdtemp("-qiconan-{}-{}".format(self.name, self.version)) self.package_path = os.path.join(self.temp_dir, "package") def write_conanfile(self): """ Write a default conanfile.txt with standard informations """ assert self.temp_dir, "This build is not ready, please call prepare()" self.conanfile = os.path.join(self.temp_dir, "conanfile.txt") ui.info(" * Write conanfile in", self.conanfile) with open(self.conanfile, "w") as fp: fp.write("[requires]" + os.linesep) for c in self.channels: fp.write(c + os.linesep) fp.write(os.linesep) fp.write("[options]" + os.linesep) for c in self.channels: fp.write("{}:shared={}{}".format(c.split('/')[0], self.is_shared, os.linesep)) fp.write(os.linesep) contents = """\ [generators] json [imports] bin, *.dll -> ./bin lib, *.lib* -> ./lib lib, *.dylib* -> ./lib lib, *.so* -> ./lib lib, *.a* -> ./lib include, * -> ./include """ fp.write(contents) def build(self): """ Call conan command to build the package with the conanfile """ ui.info(" * Building library with conan in", self.package_path) qisys.command.check_is_in_path("conan") conan_path = qisys.command.find_program("conan") cmd = [conan_path, "install", self.conanfile, "--build", "--install-folder", self.package_path] qisys.command.call(cmd) def clean(self): """ Remove the temporary directory """ ui.info(" * Removing temporary directory") qisys.sh.rm(self.temp_dir)
bsd-3-clause
4,813,513,214,411,253,000
35.520833
103
0.611238
false
acapet/GHER-POSTPROC
Examples/O2bottomClim.py
1
1744
# We only import librairies needed for plotting # Other librairies are imported in the class definition file, G3D_class.py, # which contains all process and variables function definition. import matplotlib matplotlib.use('pdf') import matplotlib.pyplot as plt import matplotlib.dates as mdates import datetime as dt import numpy.ma as ma import N3D_class import G3D_class # We instantiate an object of the class G3D, just by giving the path to the netcdf file to work with # Up to now I'm working with 4D netcdf files containing several variables. # Outputs from different files can be merged easily, as can be seen in other examples for mm in range(1,13): Ni = N3D_class.N3D('BS_1d_20100101_20171231_ptrc_T_2010'+format(mm,'02')+'-2010'+format(mm,'02')+'.nc','local_NEMO_004.yml') Ni.testvar('O2bottom') NmaskDS= (Ni.bat<120 ) & ~(Ni.bat.mask) # Mask should be True where masked Ni.apO2=Ni.avgprofileSIGMA(varname='DOX',maskin=NmaskDS) if mm==1: N=Ni else: N.dates = ma.append(N.dates , Ni.dates,0) N.time = ma.append(N.time , Ni.time,0) N.O2bottom = ma.append(N.O2bottom, Ni.O2bottom,0) N.apO2 = ma.append(N.apO2 , Ni.apO2,0) del Ni N.makeclim('O2bottom') N.mapMonthlyClim('O2bottom',figsuffix='SHELF',cmapname='oxy', subdomain="NWS", Clim=[0,300]) N.mapMonthlyClim('O2bottom',figsuffix='WHOLE',cmapname='oxy', Clim=[0,30]) N.mapMonthlyClim('O2bottom',figsuffix='WHOLEb',cmapname='oxy', Clim=[0,3]) N.plotprofile('apO2',z=-N.z[0,:,0,0],cmapname='oxy',Clim=[0,300]) N.plotprofile('apO2',z=-N.z[0,:,0,0],cmapname='oxy',zlim=[-200,0]) N.plotprofile('apO2',z=-N.z[0,:,0,0],cmapname='oxy',Clim=[0,3],zlim=[-2200,-1000],figout='apO2b')
gpl-3.0
-861,104,597,107,073,700
38.636364
129
0.682913
false
bloomreach/briefly
src/briefly/defaults.py
1
5344
# # Copyright 2013-2015 BloomReach, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import getpass from properties import * # Bare minimum system settings for any pipeline PIPELINE_DEFAULT_PROPERTIES = Properties( # System-wide default values. build_dir = "build", num_retry = 3, retry_delay = 10, username = getpass.getuser(), log = "${build_dir}/execute.log", run_threads = 2, # Execution threads debug = False, # Extra debug information test_run = False, # Dry-run for check execution flow # Default values for shell process. shell = Properties( max_process = 4, runner = "/bin/sh", ), # Default values for java process. java = Properties( max_process = 2, classpath = ["."], # full list of classpath runner = "java", # full path to java executable. ), # Default values for hadoop process (local or remote). hadoop = Properties( runner = "emr", jar = None, # s3://<BUCKET>/path/hadoop_jobs.jar root = "${build_dir}", bin = "hadoop", # Full path to hadoop binary to execute (local mode only) ), # Default values for EMR cluster. emr = Properties( max_cluster = 2, cluster_name = "${username}-cluster", step_name = "${node_hash}", project_name = None, # Team name or project name to track costs tags = None, # EC2 tags for the EMR cluster keyname = None, instance_groups = [[1, "MASTER", "m1.small"]], # List of instance groups [num, MASTER/CORE, type] bootstrap_actions = [], # List of bootstrap actions: [[name1, action1, args...], [name2, action2, args...], ...] # Regular EC2 instance prices. See http://www.ec2instances.info/. prices = {"t2.micro": 0.01, "t1.micro": 0.02, "t2.small": 0.02, "m1.small": 0.04, "t2.medium": 0.05, "m3.medium": 0.07, "m1.medium": 0.08, "c3.large": 0.10, "c1.medium": 0.13, "m3.large": 0.14, "m1.large": 0.17, "r3.large": 0.17, "c3.xlarge": 0.21, "m2.xlarge": 0.24, "m3.xlarge": 0.28, "m1.xlarge": 0.35, "r3.xlarge": 0.35, "c3.2xlarge": 0.42, "m2.2xlarge": 0.49, "c1.xlarge": 0.52, "m3.2xlarge": 0.56, "g2.2xlarge": 0.65, "r3.2xlarge": 0.70, "c3.4xlarge": 0.84, "i2.xlarge": 0.85, "m2.4xlarge": 0.98, "r3.4xlarge": 1.40, "c3.8xlarge": 1.68, "i2.2xlarge": 1.70, "cc2.8xlarge": 2.00, "cg1.4xlarge": 2.10, "r3.8xlarge": 2.80, "hi1.4xlarge": 3.10, "i2.4xlarge": 3.41, "cr1.8xlarge": 3.50, "hs1.8xlarge": 4.60, "i2.8xlarge": 6.82,}, # Price multiplier for each level. 0 means on-demand instances. price_upgrade_rate = [0.8, 1.5, 0], log_uri = None, # S3 location for mapreduce logs e.g. "s3://<BUCKET>/${username}/mr-logs" ami_version = "2.4.2", step_timeout = 12 * 60 * 60, # 12 HR (in sec) ), # Default values for Qubole cluster. qubole = Properties( api_url = "https://api2.qubole.com/api", api_version = "latest", api_token = None, aws_region = "us-east-1", aws_availability_zone = None, persistent_security_groups = "ElasticMapReduce-slave", max_cluster = 1, max_job_per_cluster = 1, termination_timeout = 5 * 60, # Wait 5 min for cluster termination (in sec). project_name = None, hadoop_custom_config = {}, # Custom hadoop configs. Example: {"mapred.output.compress": "true", "mapred.output.compression.type": "BLOCK"} hadoop_settings = {"master_instance_type": "m1.small", "slave_instance_type": "m1.small", "initial_nodes": 1, "max_nodes": 1}, # Num/type config for the cluster bootstrap_actions = [], # List of bootstrap actions: [[name1, action1, args...], [name2, action2, args...], ...] price_upgrade_rate = [0.8, 1.5, 0], # Price multiplier for each level. 0 means on-demand instances. timeout_for_request = 15, # Timeout for spot instance requests (in min). log_uri = None, # S3 location for mapreduce logs e.g. "s3://<BUCKET>/${username}/mr-logs" step_timeout = 43200, # 43200 = 12 * 60 * 60 = 12 HR (in sec). cluster_id = None, # Default None. If a value is passed, the job is executed on that cluster, and the cluster is not terminated step_name = "${node_hash}", # If a value is passed, it will be displayed on the qubole analyzer and will help in debugging ), # Default values for EC2/boto commands. ec2 = Properties( key = "S3_ACCESS_KEY", secret = "S3_SECRET_KEY", ), )
apache-2.0
-2,506,245,100,201,422,000
36.900709
164
0.580838
false
pescobar/easybuild-easyblocks
easybuild/easyblocks/s/stata.py
1
3192
## # Copyright 2009-2019 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), # Flemish Research Foundation (FWO) (http://www.fwo.be/en) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # https://github.com/easybuilders/easybuild # # EasyBuild is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # EasyBuild is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. ## """ EasyBuild support for building and installing Stata, implemented as an easyblock author: Kenneth Hoste (HPC-UGent) """ import os import re from easybuild.easyblocks.generic.packedbinary import PackedBinary from easybuild.tools.build_log import EasyBuildError, print_msg from easybuild.tools.filetools import change_dir from easybuild.tools.run import run_cmd, run_cmd_qa class EB_Stata(PackedBinary): """Support for building/installing Stata.""" def install_step(self): """Custom install procedure for Stata.""" change_dir(self.installdir) cmd = os.path.join(self.cfg['start_dir'], 'install') std_qa = { "Do you wish to continue\?\s*\(y/n or q to quit\)": 'y', "Are you sure you want to install into .*\?\s*\(y/n or q\)": 'y', "Okay to proceed\s*\(y/n or q to quit\)": 'y', } no_qa = [ "About to proceed with installation:", "uncompressing files", "extracting files", "setting permissions", ] run_cmd_qa(cmd, {}, no_qa=no_qa, std_qa=std_qa, log_all=True, simple=True) print_msg("Note: you need to manually run ./stinit in %s to initialise the license for Stata!" % self.installdir) def sanity_check_step(self): """Custom sanity check for Stata.""" custom_paths = { 'files': ['stata', 'xstata'], 'dirs': [], } super(EB_Stata, self).sanity_check_step(custom_paths=custom_paths) # make sure required libpng library is there for Stata # Stata depends on a very old version of libpng, so we need to provide it out, _ = run_cmd("ldd %s" % os.path.join(self.installdir, 'stata'), simple=False) regex = re.compile('libpng.*not found', re.M) if regex.search(out): raise EasyBuildError("Required libpng library for 'stata' is not available") def make_module_req_guess(self): """Add top install directory to $PATH for Stata""" guesses = super(EB_Stata, self).make_module_req_guess() guesses['PATH'] = [''] return guesses
gpl-2.0
55,361,995,667,855,550
37.926829
121
0.656955
false
DayGitH/Python-Challenges
DailyProgrammer/DP20170428C.py
1
4387
""" [2017-04-28] Challenge #312 [Hard] Text Summarizer https://www.reddit.com/r/dailyprogrammer/comments/683w4s/20170428_challenge_312_hard_text_summarizer/ # Description Automatic summarization is the process of reducing a text document with a computer program in order to create a summary that retains the most important points of the original document. A number of algorithms have been developed, with the simplest being one that parses the text, finds the most unique (or important) words, and then finds a sentence or two that contains the most number of the most important words discovered. This is sometimes called "extraction-based summarization" because you are extracting a sentence that conveys the summary of the text. For your challenge, you should write an implementation of a text summarizer that can take a block of text (e.g. a paragraph) and emit a one or two sentence summarization of it. You can use a stop word list (words that appear in English that don't add any value) from [here](http://snowball.tartarus.org/algorithms/english/stop.txt). You may want to review this brief overview of the algorithms and approaches in text summarization from [Fast Forward labs](http://blog.fastforwardlabs.com/post/141666523533/hp-luhn-and-the-heuristic-value-of-simplicity). This is essentially what [the autotldr bot does](https://www.reddit.com/r/autotldr/comments/31b9fm/faq_autotldr_bot/). # Example Input Here's a paragraph that we want to summarize: The purpose of this paper is to extend existing research on entrepreneurial team formation under a competence-based perspective by empirically testing the influence of the sectoral context on that dynamics. We use inductive, theory-building design to understand how different sectoral characteristics moderate the influence of entrepreneurial opportunity recognition on subsequent entrepreneurial team formation. A sample of 195 founders who teamed up in the nascent phase of Interned-based and Cleantech sectors is analysed. The results suggest a twofold moderating effect of the sectoral context. First, a technologically more challenging sector (i.e. Cleantech) demands technically more skilled entrepreneurs, but at the same time, it requires still fairly commercially experienced and economically competent individuals. Furthermore, the business context also appears to exert an important influence on team formation dynamics: data reveals that individuals are more prone to team up with co-founders possessing complementary know-how when they are starting a new business venture in Cleantech rather than in the Internet-based sector. Overall, these results stress how the business context cannot be ignored when analysing entrepreneurial team formation dynamics by offering interesting insights on the matter to prospective entrepreneurs and interested policymakers. # Example Output Here's a simple extraction-based summary of that paragraph, one of a few possible outputs: Furthermore, the business context also appears to exert an important influence on team formation dynamics: data reveals that individuals are more prone to team up with co-founders possessing complementary know-how when they are starting a new business venture in Cleantech rather than in the Internet-based sector. # Challenge Input This case describes the establishment of a new Cisco Systems R&D facility in Shanghai, China, and the great concern that arises when a collaborating R&D site in the United States is closed down. What will that closure do to relationships between the Shanghai and San Jose business units? Will they be blamed and accused of replacing the U.S. engineers? How will it affect other projects? The case also covers aspects of the site's establishment, such as securing an appropriate building, assembling a workforce, seeking appropriate projects, developing managers, building teams, evaluating performance, protecting intellectual property, and managing growth. Suitable for use in organizational behavior, human resource management, and strategy classes at the MBA and executive education levels, the material dramatizes the challenges of changing a U.S.-based company into a global competitor. """ def main(): pass if __name__ == "__main__": main()
mit
5,493,960,812,901,667,000
72.116667
119
0.788238
false
emguy/Movie-Trailer
media.py
1
3386
# -*- coding: utf-8 -*- # # NOTE: This program is free software; you can redistribute it # and/or modify it under the terms of the GNU General Public # License as published by the Free Software Foundation; either # version 3, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # Bugs can be reported to Yu Zhang <[email protected]>. # # File Name : media.py # Last Modified : Mon, Feb 01, 2016 3:29:14 PM import json import urllib class Movie(): """ A Movie object which stores the meta-information about a movie. All movie data is retrieved from the Open Movie Database (OMDB). Attributes: title (str): title of the movie year (str): the year of production genre (str): the genre of the movie plot (str): the plot of the movie director (str): the name of the movie director actors (str): poster_image_url (str): the URL to the movie thumbnail trailer_youtube_url( str): the URL to the movie trailer (on youtube) """ # we retrive movie data from the Open Movie Database (OMDB) OMDB_API = "http://www.omdbapi.com/?y=&plot=short&r=json&t=" # constructor def __init__(self, title, trailer_url): # the requesting url url = Movie.OMDB_API + title # the json response response = urllib.urlopen(url, trailer_url) # parse json obj obj = json.load(response) # load the movie data self.title = obj["Title"] self.year = obj["Year"] self.genre = obj["Genre"] self.plot = obj["Plot"] self.director = obj["Director"] self.actors = obj["Actors"] self.poster_image_url = obj["Poster"] self.trailer_youtube_url = trailer_url # This list stores an array of created movies objects movie_list = list() # add movie #1 title = "Star Trek Beyond" trailer_url = "https://www.youtube.com/watch?v=XRVD32rnzOw" movie_list.append(Movie(title, trailer_url)) # add movie #2 title = "10 Cloverfield Lane" trailer_url = "https://www.youtube.com/watch?v=yQy-ANhnUpE" movie_list.append(Movie(title, trailer_url)) # add movie #3 title = "The Big Short" trailer_url = "https://www.youtube.com/watch?v=dxAcIWDi8ps" movie_list.append(Movie(title, trailer_url)) # add movie #4 title = "Zoolander 2" trailer_url = "https://www.youtube.com/watch?v=4CL4LNWHegk" movie_list.append(Movie(title, trailer_url)) # add movie #5 title = "ANOMALISA" trailer_url = "https://www.youtube.com/watch?v=WQkHA3fHk_0" movie_list.append(Movie(title, trailer_url)) # add movie #6 title = "Daddy's Home" trailer_url = "https://www.youtube.com/watch?v=Ngptwcz3-JA" movie_list.append(Movie(title, trailer_url)) # add movie #7 title = "The Little Prince" trailer_url = "https://www.youtube.com/watch?v=ihi491RQo5A" movie_list.append(Movie(title, trailer_url)) # add movie #8 title = "13 Hours: The Secret Soldiers of Benghazi" trailer_url = "https://www.youtube.com/watch?v=4CJBuUwd0Os" movie_list.append(Movie(title, trailer_url)) # add movie #9 title = "Barnyard" trailer_url = "https://www.youtube.com/watch?v=s5soJDEbzIc" movie_list.append(Movie(title, trailer_url))
gpl-3.0
-1,718,229,491,148,597,800
31.247619
73
0.680154
false
Karajlug/karajlug
viewhelper/models.py
1
2334
# coding: utf-8 # ----------------------------------------------------------------------------- # Karajlug.org # Copyright (C) 2010-2013 Karajlug community # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # ----------------------------------------------------------------------------- from calverter import Calverter from django.conf import settings import urllib DAYS_NAMES = ("شنبه", "یکشنبه", "دوشنبه", "سه شنبه", "چهارشنبه", "پنج شنبه", "جمعه") PERSIAN_DIGITS = {"1": "۱", "2": "۲", "3": "۳", "4": "۴", "5": "۵", "6": "۶", "7": "۷", "8": "۸", "9": "۹", "0": "۰"} MONTHS_NAMES = ("فروردین", "اردیبهشت", "خرداد", "تیر", "مرداد", "شهریور", "مهر", "آبان", "آذر", "دی", "بهمن", "اسفند") def format_date(date, lang): if lang == "fa": cal = Calverter() jd = cal.gregorian_to_jd(date.year, date.month, date.day) wday = cal.jwday(jd) jalali = cal.jd_to_jalali(jd) result = "%s، %d %s %d" % (DAYS_NAMES[wday], jalali[2], MONTHS_NAMES[jalali[1] - 1], jalali[0]) return to_persian_digits(result) return date def to_persian_digits(datestr): for i in PERSIAN_DIGITS: datestr = datestr.replace(i, PERSIAN_DIGITS[i]) return datestr def quote(url): return urllib.quote_plus("%s" % url) def full_path(absolute_url): site = getattr(settings, "URL", "www.karajlug.org") return "http://%s%s" % (site, absolute_url)
gpl-2.0
1,601,960,923,630,854,100
32.757576
79
0.557252
false
kaguna/Yummy-Recipes
classes/categories.py
1
5660
# This file handles the class for the categories and the # CRUD methods associated to the categories import re from classes.recipes import Recipes class Categories(object): """This class will handle all the functions related to the categories and recipes""" categories = [] def __init__(self, category_name=None, recipe_name=None): """constructor to initialize the global variables""" self.category_name = category_name self.recipe_name = recipe_name self.newRecipe = Recipes() def create_category(self, category_name, category_owner): """This will create new and unique category""" personal_categories = [owner_list for owner_list in self.categories if category_owner in owner_list] # The personal_categories variable hold several categories associated with the user in session. # In the case above i am using the list comprehension to retrieve the categories. similar_category_names = [searched_cat_name for searched_cat_name in personal_categories if searched_cat_name[0] == category_name] # The similar_category_names checks whether there exists a similar category name to the one # provided by the user. # In the case above i am using the list comprehension. regexcategory_name = "[a-zA-Z0-9- .]" if re.match(regexcategory_name, category_name): if category_name != '' and category_name != ' ' and category_name.strip(): if self.categories != []: if similar_category_names == []: # If no such name registration takes place. self.categories.append([category_name, category_owner, ]) return "check_category_creation_success" return "check_category_name_existence" self.categories.append([category_name, category_owner, ]) return "check_category_creation_success" return "check_null_empty_field" return "check_invalid_category_name" def view_category(self, category_owner): """ This will display the categories for the user in session """ personal_categories = [owner_list for owner_list in self.categories if category_owner in owner_list] # personal_categories holds several categories belonging to the owner who has logged in using # using list comprehensions. return personal_categories def edit_category(self, current_name, new_name, category_owner): """This method will aid in updating the category name""" personal_categories = [owner_list_of_categories for owner_list_of_categories in self.categories if category_owner in owner_list_of_categories] similar_category_name = [searched_cat_name for searched_cat_name in personal_categories if searched_cat_name[0] == new_name] regexcategory_name = "[a-zA-Z0-9- .]" if re.match(regexcategory_name, new_name): if new_name != '' and new_name.strip(): for categoryList in personal_categories: if current_name in categoryList: if similar_category_name == []: category_name_index = personal_categories.index(categoryList) personal_categories[category_name_index][0] = new_name # Update the category name in the recipes list for recipeList in self.newRecipe.recipes: if current_name in recipeList: for index in range(0, len(self.newRecipe.recipes)): # loop all the indexes with the current self.newRecipe.recipes[index][1] = new_name return "success_on_edit" return "success_on_edit" return "check_category_name_existence" return "check_null_empty_field" return "check_invalid_category_name" def delete_category(self, category_name, category_owner): """ This will help in deleting the categories from user in session by providing the category name and the and the owner of the category. """ personal_categories = [owner_list for owner_list in self.categories if category_owner in owner_list] specific_category_recipes = [specific_recipe for specific_recipe in self.newRecipe.recipes if category_owner == specific_recipe[2] and category_name == specific_recipe[1]] # Using list comprehensions retrieve all the recipes for a specific category for recipeList in self.newRecipe.recipes: if category_name in recipeList: for position_of_recipe in range(0, len(specific_category_recipes)): # loop all the indexes with the recipes of the specific category del specific_category_recipes[position_of_recipe] for categoryList in personal_categories: if category_name in categoryList: category_list_position = personal_categories.index(categoryList) del self.categories[category_list_position] del personal_categories[category_list_position] return personal_categories
mit
8,747,304,260,757,726,000
51.906542
103
0.59682
false
petrushev/mkopen
mkopen/crawlers/dksk.py
1
4021
# -*- coding: utf-8 -*- #---------- Државна комисија за спречување корупција ---------- import requests as rq from StringIO import StringIO import csv from datetime import datetime from time import sleep from random import random import locale from lxml.html import fromstring from mkopen.db.models import Data, Version, catalog2uuid, data2uuid from mkopen.utils import setlocale TODAY = datetime.utcnow().date() CATALOG_PREFIX = u"Државна комисија за спречување корупција" BASE = 'http://www.dksk.org.mk/imoti_2' def main(session): cur_page = 1 final_page = False collected_catalogs = [] while not final_page: start = BASE + '/index.php?search=%d' % cur_page print 'page:', cur_page sleep(random() * 0.5 + 0.5) content = rq.get(start).content doc = fromstring(content) # get links to detail pages detail_a = doc.cssselect("a[href^=detail\.php]") for link in detail_a: url = BASE + '/' + link.attrib['href'] catalog, content = crawl_details(url) if catalog is not None and content is not None: collected_catalogs.append(','.join(reversed(catalog))) catalog = (CATALOG_PREFIX, ) + catalog metadata = {'url': url, 'page_url': start, 'file_type': 'csv'} save(session, catalog, content, metadata) # check if final page next_ = doc.cssselect("img[src='img/forward.png']") final_page = (len(next_) == 0) cur_page = cur_page + 1 with setlocale(): collected_catalogs.sort(cmp=locale.strcoll) # save active pages catalog = (CATALOG_PREFIX, u'Анкетни листови', u'Активни') content = ('\n'.join(collected_catalogs)).encode('utf-8') metadata = {'file_type': 'csv'} save(session, catalog, content, metadata) def crawl_details(url): sleep(random() * 0.5 + 0.5) content = rq.get(url).content doc = fromstring(content) tables = doc.cssselect('table.class') if len(tables) < 2: # no details return None, None definer_table, details_table = tables[0], tables[1] tr = definer_table.cssselect('tr')[1] definer = [td.text_content().strip() for td in tr.cssselect('td')] definer = (definer[2], definer[3], definer[0] + ' ' + definer[1]) csv_handle = StringIO() writer = csv.writer(csv_handle) for tr in details_table.cssselect('tr'): line = [td.text_content().strip().encode('utf-8') for td in tr.cssselect('td')] writer.writerow(line) csv_content = csv_handle.getvalue() csv_handle.close() # resort data csv_content = csv_content.split('\n') csv_header = csv_content.pop(0) csv_content.sort() csv_content.insert(0, csv_header) csv_content = '\n'.join(csv_content) return definer, csv_content def save(session, catalog_id, data, metadata): # locate entry data_id = catalog2uuid(catalog_id) entry = Data.load(session, id=data_id) if entry is None: entry = Data(id=data_id, catalog_id=catalog_id, last_checked=TODAY) session.add(entry) elif entry.last_checked == TODAY: # data is crawled and recently checked print 'skip:' , entry return # check for changes data_hash = data2uuid(data) entry_version = Version.load(session, id=data_hash) if entry_version is None: # data is changed metadata = dict(metadata) metadata['file_type'] = 'csv' entry_version = Version(id=data_hash, data=data, updated=TODAY, metadata=metadata) entry_version.ref = entry elif entry_version.ref.id != entry.id: print 'data mistmatch:', entry_version.ref.id, entry.id # update entry for last check entry.last_checked = TODAY session.commit() return entry_version
gpl-3.0
-767,376,997,350,376,600
27.258993
90
0.60947
false
msfrank/Higgins
higgins/http/dav/davxml.py
1
2476
## # Copyright (c) 2005 Apple Computer, Inc. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # DRI: Wilfredo Sanchez, [email protected] ## """ WebDAV XML Support. This module provides XML utilities for use with WebDAV. This API is considered private to static.py and is therefore subject to change. See RFC 2518: http://www.ietf.org/rfc/rfc2518.txt (WebDAV) See RFC 3253: http://www.ietf.org/rfc/rfc3253.txt (WebDAV + Versioning) See RFC 3744: http://www.ietf.org/rfc/rfc3744.txt (WebDAV ACLs) """ from higgins.http.dav.element.parser import registerElements, WebDAVDocument, lookupElement from higgins.http.dav.element.util import encodeXMLName, decodeXMLName # # Import all XML element definitions # from higgins.http.dav.element.base import * from higgins.http.dav.element.rfc2518 import * from higgins.http.dav.element.rfc3253 import * from higgins.http.dav.element.rfc3744 import * # # Register all XML elements with the parser # import higgins.http.dav.element.base import higgins.http.dav.element.rfc2518 import higgins.http.dav.element.rfc3253 import higgins.http.dav.element.rfc3744 __all__ = ( registerElements(higgins.http.dav.element.base ) + registerElements(higgins.http.dav.element.rfc2518) + registerElements(higgins.http.dav.element.rfc3253) + registerElements(higgins.http.dav.element.rfc3744) + ["registerElements", "WebDAVDocument", "lookupElement", "encodeXMLName", "decodeXMLName"] )
lgpl-2.1
4,310,674,279,985,792,500
37.092308
93
0.771405
false
WielderOfMjoelnir/pypeira
main.py
1
1118
import pypeira.pypeira as pype if __name__ == "__main__": # Create instance of IRA (not necessary but much more convenient for now) ira = pype.IRA() path = "./data" # Read files. The read() function will walk from the given dir and find all files satisfying # the given criteria. Set 'walk' to False if this is not wanted. data = ira.read(path, dtype='bcd', walk=True) # Uncomment plot_brightest(data) below, and comment out EVERYTHING after this line for the easiest way. # ira.plot_brightest(data) # get_brigthest() returns a (index, maximum_value)-pair idx, max_val = ira.get_brightest(data) # pixel_data() then collects all the values of that specific pixel, for all the HDUs in the "data" list. xs, ys = ira.pixel_data(idx, data) # Finally one simply plots using Matplotlib # NOTE: Hot pixels have not been removed at this stage, so some use of plt.ylim() is highly recommended. import matplotlib.pyplot as plt plt.plot(xs, ys) plt.ylabel('Flux (MJy/sr)') plt.title('Flux vs. Time') plt.xlabel('Time (BJD)') plt.show()
mit
-8,322,726,822,375,709,000
35.064516
108
0.668157
false
xLegoz/fabric
integration/test_operations.py
1
6882
from six import StringIO as StringIO import os import posixpath import shutil from fabric.api import ( run, path, put, sudo, abort, warn_only, env, cd, local, settings, get ) from fabric.contrib.files import exists from utils import Integration def assert_mode(path, mode): remote_mode = run("stat -c \"%%a\" \"%s\"" % path).stdout assert remote_mode == mode, "remote %r != expected %r" % (remote_mode, mode) class TestOperations(Integration): filepath = "/tmp/whocares" dirpath = "/tmp/whatever/bin" not_owned = "/tmp/notmine" def setup(self): super(TestOperations, self).setup() run("mkdir -p %s" % " ".join([self.dirpath, self.not_owned])) def teardown(self): super(TestOperations, self).teardown() # Revert any chown crap from put sudo tests sudo("chown %s ." % env.user) # Nuke to prevent bleed sudo("rm -rf %s" % " ".join([self.dirpath, self.filepath])) sudo("rm -rf %s" % self.not_owned) def test_no_trailing_space_in_shell_path_in_run(self): put(StringIO("#!/bin/bash\necho hi"), "%s/myapp" % self.dirpath, mode="0755") with path(self.dirpath): assert run('myapp').stdout == 'hi' def test_string_put_mode_arg_doesnt_error(self): put(StringIO("#!/bin/bash\necho hi"), self.filepath, mode="0755") assert_mode(self.filepath, "755") def test_int_put_mode_works_ok_too(self): put(StringIO("#!/bin/bash\necho hi"), self.filepath, mode=0o755) assert_mode(self.filepath, "755") def _chown(self, target): sudo("chown root %s" % target) def _put_via_sudo(self, source=None, target_suffix='myfile', **kwargs): # Ensure target dir prefix is not owned by our user (so we fail unless # the sudo part of things is working) self._chown(self.not_owned) source = source if source else StringIO("whatever") # Drop temp file into that dir, via use_sudo, + any kwargs return put( source, self.not_owned + '/' + target_suffix, use_sudo=True, **kwargs ) def test_put_with_use_sudo(self): self._put_via_sudo() def test_put_with_dir_and_use_sudo(self): # Test cwd should be root of fabric source tree. Use our own folder as # the source, meh. self._put_via_sudo(source='integration', target_suffix='') def test_put_with_use_sudo_and_custom_temp_dir(self): # TODO: allow dependency injection in sftp.put or w/e, test it in # isolation instead. # For now, just half-ass it by ensuring $HOME isn't writable # temporarily. self._chown('.') self._put_via_sudo(temp_dir='/tmp') def test_put_with_use_sudo_dir_and_custom_temp_dir(self): self._chown('.') self._put_via_sudo(source='integration', target_suffix='', temp_dir='/tmp') def test_put_use_sudo_and_explicit_mode(self): # Setup target_dir = posixpath.join(self.filepath, 'blah') subdir = "inner" subdir_abs = posixpath.join(target_dir, subdir) filename = "whatever.txt" target_file = posixpath.join(subdir_abs, filename) run("mkdir -p %s" % subdir_abs) self._chown(subdir_abs) local_path = os.path.join('/tmp', filename) with open(local_path, 'w+') as fd: fd.write('stuff\n') # Upload + assert with cd(target_dir): put(local_path, subdir, use_sudo=True, mode='777') assert_mode(target_file, '777') def test_put_file_to_dir_with_use_sudo_and_mirror_mode(self): # Ensure mode of local file, umask varies on eg travis vs various # localhosts source = 'whatever.txt' try: local("touch %s" % source) local("chmod 644 %s" % source) # Target for _put_via_sudo is a directory by default uploaded = self._put_via_sudo( source=source, mirror_local_mode=True ) assert_mode(uploaded[0], '644') finally: local("rm -f %s" % source) def test_put_directory_use_sudo_and_spaces(self): localdir = 'I have spaces' localfile = os.path.join(localdir, 'file.txt') os.mkdir(localdir) with open(localfile, 'w') as fd: fd.write('stuff\n') try: uploaded = self._put_via_sudo(localdir, target_suffix='') # Kinda dumb, put() would've died if it couldn't do it, but. assert exists(uploaded[0]) assert exists(posixpath.dirname(uploaded[0])) finally: shutil.rmtree(localdir) def test_agent_forwarding_functions(self): # When paramiko #399 is present this will hang indefinitely with settings(forward_agent=True): run('ssh-add -L') def test_get_with_use_sudo_unowned_file(self): # Ensure target is not normally readable by us target = self.filepath sudo("echo 'nope' > %s" % target) sudo("chown root:root %s" % target) sudo("chmod 0440 %s" % target) # Pull down with use_sudo, confirm contents local_ = StringIO() result = get( local_path=local_, remote_path=target, use_sudo=True, ) assert local_.getvalue() == "nope\n" def test_get_with_use_sudo_groupowned_file(self): # Issue #1226: file gotten w/ use_sudo, file normally readable via # group perms (yes - so use_sudo not required - full use case involves # full-directory get() where use_sudo *is* required). Prior to fix, # temp file is chmod 404 which seems to cause perm denied due to group # membership (despite 'other' readability). target = self.filepath sudo("echo 'nope' > %s" % target) # Same group as connected user gid = run("id -g") sudo("chown root:%s %s" % (gid, target)) # Same perms as bug use case (only really need group read) sudo("chmod 0640 %s" % target) # Do eet local_ = StringIO() result = get( local_path=local_, remote_path=target, use_sudo=True, ) assert local_.getvalue() == "nope\n" def test_get_from_unreadable_dir(self): # Put file in dir as normal user remotepath = "%s/myfile.txt" % self.dirpath run("echo 'foo' > %s" % remotepath) # Make dir unreadable (but still executable - impossible to obtain # file if dir is both unreadable and unexecutable) sudo("chown root:root %s" % self.dirpath) sudo("chmod 711 %s" % self.dirpath) # Try gettin' it local_ = StringIO() get(local_path=local_, remote_path=remotepath) assert local_.getvalue() == 'foo\n'
bsd-2-clause
-6,177,240,608,647,160,000
36
85
0.586022
false
nektor211/imgaug
tests/check_background_augmentation.py
1
3270
from __future__ import print_function, division import imgaug as ia from imgaug import augmenters as iaa from scipy import misc, ndimage import numpy as np from skimage import data def main(): augseq = iaa.Sequential([ iaa.Fliplr(0.5), iaa.CoarseDropout(p=0.1, size_percent=0.1) ]) print("------------------") print("augseq.augment_batches(batches, background=True)") print("------------------") batches = list(load_images()) batches_aug = augseq.augment_batches(batches, background=True) images_aug = [] keypoints_aug = [] for batch_aug in batches_aug: images_aug.append(batch_aug.images_aug) keypoints_aug.append(batch_aug.keypoints_aug) misc.imshow(draw_grid(images_aug, keypoints_aug)) print("------------------") print("augseq.augment_batches(batches, background=True) -> only images") print("------------------") batches = list(load_images()) batches = [batch.images for batch in batches] batches_aug = augseq.augment_batches(batches, background=True) images_aug = [] keypoints_aug = None for batch_aug in batches_aug: images_aug.append(batch_aug) misc.imshow(draw_grid(images_aug, keypoints_aug)) print("------------------") print("BackgroundAugmenter") print("------------------") batch_loader = ia.BatchLoader(load_images) bg_augmenter = ia.BackgroundAugmenter(batch_loader, augseq) images_aug = [] keypoints_aug = [] while True: print("Next batch...") batch = bg_augmenter.get_batch() if batch is None: print("Finished.") break images_aug.append(batch.images_aug) keypoints_aug.append(batch.keypoints_aug) misc.imshow(draw_grid(images_aug, keypoints_aug)) def load_images(): batch_size = 4 astronaut = data.astronaut() astronaut = ia.imresize_single_image(astronaut, (64, 64)) kps = ia.KeypointsOnImage([ia.Keypoint(x=15, y=25)], shape=astronaut.shape) counter = 0 for i in range(10): batch_images = [] batch_kps = [] for b in range(batch_size): astronaut_text = ia.draw_text(astronaut, x=0, y=0, text="%d" % (counter,), color=[0, 255, 0], size=16) batch_images.append(astronaut_text) batch_kps.append(kps) counter += 1 batch = ia.Batch( images=np.array(batch_images, dtype=np.uint8), keypoints=batch_kps ) yield batch def draw_grid(images_aug, keypoints_aug): if keypoints_aug is None: keypoints_aug = [] for bidx in range(len(images_aug)): keypoints_aug.append([None for image in images_aug[bidx]]) images_kps_batches = [] for bidx in range(len(images_aug)): images_kps_batch = [] for image, kps in zip(images_aug[bidx], keypoints_aug[bidx]): if kps is None: image_kps = image else: image_kps = kps.draw_on_image(image, size=5, color=[255, 0, 0]) images_kps_batch.append(image_kps) images_kps_batches.extend(images_kps_batch) grid = ia.draw_grid(images_kps_batches, cols=len(images_aug[0])) return grid if __name__ == "__main__": main()
mit
4,145,880,567,530,394,600
33.0625
114
0.592355
false
jeromekelleher/msprime
verification.py
1
207568
""" Script to automate verification of msprime against known statistical results and benchmark programs such as ms and Seq-Gen. Tests are structured in a similar way to Python unittests. Tests are organised into classes of similar tests. Ideally, each test in the class is a simple call to a general method with different parameters (this is called ``_run``, by convention). Tests must be *independent* and not depend on any shared state within the test class, other than the ``self.output_dir`` variable which is guaranteed to be set when the method is called. The output directory is <output-dir>/<class name>/<test name>. Each test should output one or more diagnostic plots, which have a clear interpretation as "correct" or "incorrect". QQ-plots are preferred, where possible. Numerical results can also be output by using ``logging.debug()``, where appropriate; to view these, append ``--debug`` to the comand line running your tests. Test classes must be a subclass of the ``Test`` class defined in this module. To run the tests, first get some help from the CLI: python3 verification.py --help This will output some basic help on the tests. Use python3 verification.py --list to show all the available tests. If you run without any arguments, this will run all the tests sequentially. The progress bar and output behaviour can be controlled using command line parameters, and running over multiple processes is possible. If you wish to run a specific tests, you can provide the test names as positional arguments, i.e., python3 verification.py test_msdoc_outgroup_sequence test_msdoc_recomb_ex will just run these two specific tests. Using the ``-c`` option allows you to run all tests in a given class. Gotchas: - Any test superclasses must be abstract. That is, you cannot inherit from a test class that contains any tests. - Test method names must be unique across *all* classes. """ import argparse import ast import collections import concurrent.futures import functools import inspect import itertools import json import logging import math import pathlib import pickle import random import subprocess import sys import tempfile import warnings import allel import attr import daiquiri import dendropy import matplotlib import numpy as np import pandas as pd import pyslim import pyvolve import scipy.special import scipy.stats import seaborn as sns import tqdm import tskit from matplotlib import pyplot import msprime import msprime.cli as cli from msprime.demography import _matrix_exponential # Force matplotlib to not use any Xwindows backend. # Note this must be done before importing statsmodels. matplotlib.use("Agg") import statsmodels.api as sm # noqa: E402 _mspms_executable = [sys.executable, "mspms_dev.py"] _slim_executable = ["./data/slim"] _ms_executable = ["./data/ms"] _discoal_executable = ["./data/discoal"] _scrm_executable = ["./data/scrm"] _msms_executable = ["java", "-Xmx1G", "-jar", "data/msms.jar"] def flatten(li): return [x for sublist in li for x in sublist] def harmonic_number(n): return np.sum(1 / np.arange(1, n + 1)) def hk_f(n, z): """ Returns Hudson and Kaplan's f_n(z) function. This is based on the exact value for n=2 and the approximations given in the 1985 Genetics paper. """ ret = 0 if n == 2: ret = (18 + z) / (z ** 2 + 13 * z + 18) else: ret = sum(1 / j ** 2 for j in range(1, n)) * hk_f(2, z) return ret def get_predicted_variance(n, R): # We import this here as it's _very_ slow to import and we # only use it in this case. import scipy.integrate def g(z): return (R - z) * hk_f(n, z) res, err = scipy.integrate.quad(g, 0, R) return R * harmonic_number(n - 1) + 2 * res def write_slim_script(outfile, format_dict): slim_str = """ // set up a simple neutral simulation initialize() {{ initializeTreeSeq(checkCoalescence=T); initializeMutationRate(0); initializeMutationType('m1', 0.5, 'f', 0.0); // g1 genomic element type: uses m1 for all mutations initializeGenomicElementType('g1', m1, 1.0); // uniform chromosome initializeGenomicElement(g1, 0, {NUM_LOCI}); // uniform recombination along the chromosome initializeRecombinationRate({RHO}); }} // create a population 1 {{ {POP_STRS}; sim.tag = 0; }} // run for set number of generations 1: late() {{ if (sim.tag == 0) {{ if (sim.treeSeqCoalesced()) {{ sim.tag = sim.generation; catn(sim.tag + ': COALESCED'); }} }} if (sim.generation == sim.tag * 10) {{ sim.simulationFinished(); catn('Ran a further ' + sim.tag * 10 + ' generations'); sim.treeSeqOutput('{OUTFILE}'); }} }} 100000 late() {{ catn('No coalescence after 100000 generations!'); }} """ with open(outfile, "w") as f: f.write(slim_str.format(**format_dict)) def write_sweep_slim_script(outfile, format_dict): slim_str = """ initialize() {{ initializeTreeSeq(); initializeMutationRate(0); initializeMutationType('m1', 0.5, 'f', 0.0); initializeMutationType('m2', 0.5, 'f', {s}); initializeGenomicElementType('g1', m1, 1.0); initializeGenomicElement(g1, 0, {NUMLOCI}); initializeRecombinationRate({r}); }} s1 200000 late() {{ sim.treeSeqOutput('{OUTFILE}'); sim.simulationFinished(); }} 1 {{ // save this run's identifier, used to save and restore defineConstant("simID", getSeed()); sim.addSubpop("p1", {POPSIZE}); sim.setValue("flag",0); }} 2 late() {{ // save the state of the simulation sim.treeSeqOutput("/tmp/slim_" + simID + ".trees"); target = sample(p1.genomes, 1); target.addNewDrawnMutation(m2, {SWEEPPOS}); }} 2:2000 late() {{ if (sim.countOfMutationsOfType(m2) == 0) {{ fixed = (sum(sim.substitutions.mutationType == m2) == 1); if (fixed){{ sim.setValue("flag", sim.getValue("flag") + 1); }} if (fixed) {{ if (sim.getValue("flag") == 1){{ sim.rescheduleScriptBlock(s1, start=sim.generation+{TAU}, end=sim.generation+{TAU}); }} }} else {{ sim.readFromPopulationFile("/tmp/slim_" + simID + ".trees"); setSeed(rdunif(1, 0, asInteger(2^62) - 1)); target = sample(p1.genomes, 1); target.addNewDrawnMutation(m2, {SWEEPPOS}); }} }} }} """ with open(outfile, "w") as f: f.write(slim_str.format(**format_dict)) def subsample_simplify_slim_treesequence(ts, sample_sizes): tables = ts.dump_tables() samples = set(ts.samples()) num_populations = len(set(tables.nodes.population)) assert len(sample_sizes) == num_populations subsample = [] for i, size in enumerate(sample_sizes): # Stride 2 to only sample one chrom per diploid SLiM individual ss = np.where(tables.nodes.population == i)[0][::2] ss = list(samples.intersection(ss)) ss = np.random.choice(ss, replace=False, size=size) subsample.extend(ss) tables.nodes.individual = None tables.individuals.clear() tables.simplify(subsample) ts = tables.tree_sequence() return ts def plot_qq(v1, v2): sm.graphics.qqplot(v1) sm.qqplot_2samples(v1, v2, line="45") def plot_stat_hist(v1, v2, v1_name, v2_name): with warnings.catch_warnings(): warnings.simplefilter("ignore") sns.kdeplot(v1, color="b", shade=True, label=v1_name, legend=False) sns.kdeplot(v2, color="r", shade=True, label=v2_name, legend=False) pyplot.legend(loc="upper right") def plot_breakpoints_hist(v1, v2, v1_name, v2_name): with warnings.catch_warnings(): warnings.simplefilter("ignore") sns.kdeplot(v1, color="b", label=v1_name, shade=True, legend=False) sns.kdeplot(v2, color="r", label=v2_name, shade=True, legend=False) pyplot.legend(loc="upper right") def all_breakpoints_in_replicates(replicates): return [right for intervals in replicates for left, right in intervals] @attr.s class Test: """ The superclass of all tests. The only attribute defined is the output directory for the test, which is guaranteed to exist when the test method is called. """ output_dir = attr.ib(type=str, default=None) def _run_sample_stats(self, args): logging.debug(f"{' '.join(args)}") p1 = subprocess.Popen(args, stdout=subprocess.PIPE) p2 = subprocess.Popen( ["./data/sample_stats"], stdin=p1.stdout, stdout=subprocess.PIPE ) p1.stdout.close() output = p2.communicate()[0] p1.wait() if p1.returncode != 0: raise ValueError("Error occured in subprocess: ", p1.returncode) with tempfile.TemporaryFile() as f: f.write(output) f.seek(0) df = pd.read_csv(f, sep="\t") return df def _build_filename(self, *args): return self.output_dir / ("_".join(args[1:]) + ".png") def _plot_stats(self, stats_type, df1, df2, df1_name, df2_name): assert set(df1.columns.values) == set(df2.columns.values) for stat in df1.columns.values: v1 = df1[stat] v2 = df2[stat] if stat == "breakpoints": plot_breakpoints_hist(flatten(v1), flatten(v2), df1_name, df2_name) pyplot.xlabel("genome") f = self._build_filename(stats_type, stat) pyplot.savefig(f, dpi=72) else: plot_qq(v1, v2) pyplot.xlabel(df1_name) pyplot.ylabel(df2_name) f = self._build_filename(stats_type, stat) pyplot.savefig(f, dpi=72) pyplot.close("all") # Put the histograms in their own directory to avoid # cluttering up the qqplots. plot_stat_hist(v1, v2, df1_name, df2_name) histdir = self.output_dir / "histograms" histdir.mkdir(exist_ok=True) f = histdir / f.name pyplot.savefig(f, dpi=72) pyplot.close("all") def get_ms_seeds(self): max_seed = 2 ** 16 seeds = [random.randint(1, max_seed) for j in range(3)] return ["-seed"] + list(map(str, seeds)) def _run_msprime_mutation_stats(self, args): return self._run_sample_stats( _mspms_executable + args.split() + self.get_ms_seeds() ) class MsTest(Test): """ Superclass of tests that perform comparisons with ms. Provides some infrastructure for common operations. """ def _deserialize_breakpoints(self, df): breakpoints_strs = df["breakpoints"] breakpoints = [ast.literal_eval(literal) for literal in breakpoints_strs] df["breakpoints"] = breakpoints return df def _exec_coalescent_stats(self, executable, args, seeds=None): with tempfile.TemporaryFile() as f: argList = [executable] + args.split() + self.get_ms_seeds() logging.debug(f"{' '.join(argList)}") subprocess.call(argList, stdout=f) f.seek(0) df = pd.read_table(f) self._deserialize_breakpoints(df) return df def _run_ms_coalescent_stats(self, args): return self._exec_coalescent_stats("./data/ms_summary_stats", args) def _run_ms_mutation_stats(self, args): return self._run_sample_stats( _ms_executable + args.split() + self.get_ms_seeds() ) def _run_mutation_stats(self, args): df_ms = self._run_ms_mutation_stats(args) df_msp = self._run_msprime_mutation_stats(args) self._plot_stats("mutation", df_ms, df_msp, "ms", "msp") def _run_mspms_coalescent_stats(self, args): logging.debug(f"mspms: {args}") runner = cli.get_mspms_runner(args.split()) sim = runner.simulator num_populations = sim.num_populations replicates = runner.num_replicates num_trees = [0 for j in range(replicates)] time = [0 for j in range(replicates)] ca_events = [0 for j in range(replicates)] re_events = [0 for j in range(replicates)] gc_events = [0 for j in range(replicates)] mig_events = [None for j in range(replicates)] breakpoints = [[] for j in range(replicates)] for j in range(replicates): sim.reset() sim.run() num_trees[j] = sim.num_breakpoints + 1 breakpoints[j] = sim.breakpoints time[j] = sim.time ca_events[j] = sim.num_common_ancestor_events re_events[j] = sim.num_recombination_events gc_events[j] = sim.num_gene_conversion_events mig_events[j] = [r for row in sim.num_migration_events for r in row] d = { "t": time, "num_trees": num_trees, "ca_events": ca_events, "re_events": re_events, "gc_events": gc_events, } for j in range(num_populations ** 2): events = [mig_events[k][j] for k in range(replicates)] d[f"mig_events_{j}"] = events d["breakpoints"] = breakpoints df = pd.DataFrame(d) return df def _run_coalescent_stats(self, args): df_msp = self._run_mspms_coalescent_stats(args) df_ms = self._run_ms_coalescent_stats(args) self._plot_stats("coalescent", df_msp, df_ms, "msp", "ms") # end of tests common to MS and random def _run_variable_recombination_coalescent_stats(self, args): df_msp = self._run_mspms_coalescent_stats(args) df_mshot = self._run_mshot_coalescent_stats(args) self._plot_stats("recomb map coalescent", df_msp, df_mshot, "msp", "msHOT") def _run_mshot_coalescent_stats(self, args): return self._exec_coalescent_stats("./data/msHOT_summary_stats", args) def _run(self, cmd): self._run_coalescent_stats(cmd) self._run_mutation_stats(cmd) class MsDemography(MsTest): def test_size_change_1(self): self._run("10 10000 -t 2.0 -eN 0.1 2.0") def test_growth_rate_change_1(self): self._run("10 10000 -t 2.0 -eG 0.1 5.0") def test_growth_rate_change1(self): self._run("10 10000 -t 2.0 -eG 0.1 5.0") def test_growth_rate_2_pops1(self): self._run("10 10000 -t 2.0 -I 2 5 5 2.5 -G 5.0") def test_growth_rate_2_pops2(self): self._run("10 10000 -t 2.0 -I 2 5 5 2.5 -G 5.0 -g 1 0.1") def test_growth_rate_2_pops3(self): self._run("10 10000 -t 2.0 -I 2 5 5 2.5 -g 1 0.1") def test_growth_rate_2_pops4(self): self._run("10 10000 -t 2.0 -I 2 5 5 2.5 -eg 1.0 1 5.0") def test_pop_size_2_pops1(self): self._run("100 10000 -t 2.0 -I 2 50 50 2.5 -n 1 0.1") def test_pop_size_2_pops2(self): self._run("100 10000 -t 2.0 -I 2 50 50 2.5 -g 1 2 -n 1 0.1") def test_pop_size_2_pops3(self): self._run("100 10000 -t 2.0 -I 2 50 50 2.5 -eN 0.5 3.5") def test_pop_size_2_pops4(self): self._run("100 10000 -t 2.0 -I 2 50 50 2.5 -en 0.5 1 3.5") def test_migration_rate_2_pops1(self): self._run("100 10000 -t 2.0 -I 2 50 50 0 -eM 3 5") def test_migration_matrix_2_pops1(self): self._run("100 10000 -t 2.0 -I 2 50 50 -ma x 10 0 x") def test_migration_matrix_2_pops2(self): self._run("100 10000 -t 2.0 -I 2 50 50 -m 1 2 10 -m 2 1 50") def test_migration_rate_change_2_pops1(self): self._run("100 10000 -t 2.0 -I 2 50 50 -eM 5 10") def test_migration_matrix_entry_change_2_pops1(self): self._run("100 10000 -t 2.0 -I 2 50 50 -em 0.5 2 1 10") def test_migration_matrix_change_2_pops1(self): self._run("100 10000 -t 2.0 -I 2 50 50 -ema 10.0 2 x 10 0 x") def migration_matrix_change_2_pops2(self): cmd = """100 10000 -t 2.0 -I 2 50 50 -ema 1.0 2 x 0.1 0 x -eN 1.1 0.001 -ema 10 2 x 0 10 x""" self._run(cmd) def test_population_split_2_pops1(self): self._run("100 10000 -t 2.0 -I 2 50 50 5.0 -ej 2.0 1 2") def test_population_split_4_pops1(self): self._run("100 10000 -t 2.0 -I 4 50 50 0 0 2.0 -ej 0.5 2 1") def test_population_split_4_pops2(self): self._run("100 10000 -t 2.0 -I 4 25 25 25 25 -ej 1 2 1 -ej 2 3 1 -ej 3 4 1") def test_population_split_4_pops3(self): cmd = ( "100 10000 -t 2.0 -I 4 25 25 25 25 -ej 1 2 1 " "-em 1.5 4 1 2 -ej 2 3 1 -ej 3 4 1" ) self._run(cmd) def test_admixture_1_pop1(self): self._run("1000 1000 -t 2.0 -es 0.1 1 0.5 -em 0.1 1 2 1") def test_admixture_1_pop2(self): self._run("1000 1000 -t 2.0 -es 0.1 1 0.1 -em 0.1 1 2 1") def test_admixture_1_pop3(self): self._run("1000 1000 -t 2.0 -es 0.01 1 0.1 -em 0.1 2 1 1") def test_admixture_1_pop4(self): self._run("1000 1000 -t 2.0 -es 0.01 1 0.1 -es 0.1 2 0 -em 0.1 3 1 1") def test_admixture_1_pop5(self): self._run("1000 1000 -t 2.0 -es 0.01 1 0.1 -ej 1 2 1") def test_admixture_1_pop6(self): self._run("1000 1000 -t 2.0 -es 0.01 1 0.0 -eg 0.02 2 5.0 ") def test_admixture_1_pop7(self): self._run("1000 1000 -t 2.0 -es 0.01 1 0.0 -en 0.02 2 5.0 ") def test_admixture_2_pop1(self): self._run("1000 1000 -t 2.0 -I 2 500 500 1 -es 0.01 1 0.1 -ej 1 3 1") def test_admixture_2_pop2(self): self._run("1000 1000 -t 2.0 -I 2 500 500 2 -es 0.01 1 0.75 -em 2.0 3 1 1") def test_admixture_2_pop3(self): self._run( "1000 1000 -t 2.0 -I 2 500 500 2 -es 0.01 1 0.75 -G 5.0 " "-em 2.0 3 1 1" ) def test_admixture_2_pop4(self): cmd = ( "1000 1000 -t 2.0 -I 2 500 500 2 -es 0.01 1 0.75 " "-eg 0.02 1 5.0 -em 0.02 3 1 1" ) self._run(cmd) class MsGeneConversion(MsTest): def _run(self, cmd): # The mutation stats are a waste of time for GC, they tell us basically # nothing. self._run_coalescent_stats(cmd) def test_gene_conversion_c10_r0(self): self._run("100 10000 -t 5.0 -r 0 2501 -c 10 1") def test_gene_conversion_c100_tl1000_r0(self): self._run("100 10000 -t 5.0 -r 0 2501 -c 100 1000") def test_gene_conversion_c1000_tl_1(self): self._run("100 10000 -t 5.0 -r 0.01 2501 -c 1000 1") def test_gene_conversion_c1000_tl_1000(self): self._run("100 10000 -t 5.0 -r 0.01 2501 -c 1000 1000") def test_gene_conversion_c2_r10(self): self._run("100 10000 -t 5.0 -r 10 2501 -c 2 1") def test_gene_conversion_c2_tl_10_r10(self): self._run("100 10000 -t 5.0 -r 10 2501 -c 2 10") def test_gene_conversion_c2_tl_100(self): self._run("100 10000 -t 5.0 -r 10 2501 -c 2 100") def test_gene_conversion_c2_tl_100_r0(self): self._run("100 10000 -t 5.0 -r 0 2501 -c 2 100") def test_gene_conversion_c20_tl_1000_r0(self): self._run("100 10000 -t 5.0 -r 0 2501 -c 20 1000") class MsDocExamples(MsTest): def test_msdoc_simple_ex(self): self._run("4 20000 -t 5.0") def test_msdoc_recomb_ex(self): self._run("15 1000 -t 10.04 -r 100.0 2501") def test_msdoc_structure_ex1(self): self._run("15 1000 -t 2.0 -I 3 10 4 1 5.0") def test_msdoc_structure_ex2(self): self._run("15 1000 -t 2.0 -I 3 10 4 1 5.0 -m 1 2 10.0 -m 2 1 9.0") def test_msdoc_structure_ex3(self): self._run("15 1000 -t 10.0 -I 3 10 4 1 -ma x 1.0 2.0 3.0 x 4.0 5.0 6.0 x") def test_msdoc_outgroup_sequence(self): self._run("11 1000 -t 2.0 -I 2 1 10 -ej 6.0 1 2") def test_msdoc_two_species(self): cmd = ( "15 10000 -t 11.2 -I 2 3 12 -g 1 44.36 -n 2 " "0.125 -eg 0.03125 1 0.0 -en 0.0625 2 0.05 -ej 0.09375 2 1" ) self._run(cmd) def test_msdoc_stepping_stone(self): cmd = ( "15 10000 -t 3.0 -I 6 0 7 0 0 8 0 -m 1 2 2.5 -m 2 1 2.5 -m 2 3 2.5 " "-m 3 2 2.5 -m 4 5 2.5 -m 5 4 2.5 -m 5 6 2.5 -m 6 5 2.5 " "-em 2.0 3 4 2.5 -em 2.0 4 3 2.5" ) self._run(cmd) class MsMiscExamples(MsTest): """ Miscellaneous examples that have been good for finding bugs. """ def test_simultaneous_ex1(self): self._run("10 10000 -t 2.0 -eN 0.3 0.5 -eG .3 7.0") def test_zero_growth_rate(self): self._run("10 10000 -t 2.0 -G 6.93 -eG 0.2 0.0 -eN 0.3 0.5") def test_konrad_1(self): cmd = ( "4 1000 -t 2508 -I 2 2 2 0 -n 2 2.59 " "-ma x 0 1.502 x -ej 0.9485 1 2 -r 23.76 3000" ) self._run(cmd) def test_konrad_2(self): cmd = ( "3 10000 -t 0.423 -I 3 1 1 1 -es 0.0786 1 0.946635 " "-ej 0.0786 4 3 -ej 0.189256 1 2 -ej 0.483492 2 3" ) self._run(cmd) def test_konrad_3(self): self._run("100 100 -t 2 -I 10 10 10 10 10 10 10 10 10 10 10 0.001 ") class MsRandom(MsTest): """ Some tests made by generating random parameters. """ def _run(self, num_populations=1, num_replicates=1000, num_demographic_events=0): m = random.randint(1, 1000) r = random.uniform(0.01, 0.1) * m theta = random.uniform(1, 100) N = num_populations sample_sizes = [random.randint(2, 10) for _ in range(N)] migration_matrix = [random.random() * (j % (N + 1) != 0) for j in range(N ** 2)] structure = "" if num_populations > 1: structure = "-I {} {} -ma {}".format( num_populations, " ".join(str(s) for s in sample_sizes), " ".join(str(r) for r in migration_matrix), ) cmd = "{} {} -t {} -r {} {} {}".format( sum(sample_sizes), num_replicates, theta, r, m, structure ) # Set some initial growth rates, etc. if N == 1: if random.random() < 0.5: cmd += f" -G {random.random()}" else: cmd += f" -eN 0 {random.random()}" # Add some demographic events t = 0 for _ in range(num_demographic_events): t += 0.125 if random.random() < 0.5: cmd += f" -eG {t} {random.random()}" else: cmd += f" -eN {t} {random.random()}" super()._run(cmd) def test_ms_random_1(self): self._run() def test_ms_random_2(self): self._run(num_replicates=10 ** 4, num_demographic_events=10) def test_ms_random_2_pops1(self): self._run(num_populations=3) class MsHotTest(MsTest): def _run(self, cmd): self._run_variable_recombination_coalescent_stats(cmd) def test_mshotdoc_hotspot_ex(self): self._run("10 1000 -t 10.4 -r 10.0 25000 -v 2 100 200 10 7000 8000 20") def test_mshot_zero_recomb_interval(self): self._run("10 1000 -t 10.4 -r 10.0 25000 -v 1 5000 13000 0") def test_mshot_zero_recomb(self): self._run("10 1000 -t 10.4 -r 10.0 25000 -v 1 100 25000 0") def test_mshot_high_recomb_variants(self): hotspots = "4 1000 2000 0 7000 8000 20 12000 15000 10 20000 22000 0" cmd = f"10 1000 -t 10.4 -r 10.0 25000 -v {hotspots}" self._run(cmd) class DiscoalTest(Test): def get_discoal_seeds(self): max_seed = 2 ** 16 seeds = [random.randint(1, max_seed) for j in range(3)] return ["-d"] + list(map(str, seeds)) def _discoal_str_to_ms(self, args): # convert discoal string to msprime string tokens = args.split(" ") # cut out sites param del tokens[2] # adjust popIDs for i in range(len(tokens)): # pop size change case if tokens[i] == "-en": tokens[i + 2] = str(int(tokens[i + 2]) + 1) # migration rate case if tokens[i] == "-m": tokens[i + 1] = str(int(tokens[i + 1]) + 1) tokens[i + 2] = str(int(tokens[i + 2]) + 1) msp_str = " ".join(tokens) return msp_str def _run_discoal_mutation_stats(self, args): return self._run_sample_stats( _discoal_executable + args.split() + self.get_discoal_seeds() ) def _run_mutation_discoal_stats(self, args): msp_str = self._discoal_str_to_ms(args) df_msp = self._run_msprime_mutation_stats(msp_str) df_d = self._run_sample_stats( _discoal_executable + args.split() + self.get_discoal_seeds() ) self._plot_stats("mutation", df_d, df_msp, "discoal", "msp") def _discoal_str_to_simulation(self, args): # takes discoal command line as input # and returns msprime run treeseqs tokens = args.split(" ") # positional args sample_size = int(tokens[0]) nreps = int(tokens[1]) seq_length = int(tokens[2]) # parse discoal command line for params # init ones we definitely need for comparison theta = rho = alpha = sweep_site = sweep_mod_time = None refsize = 1e6 for i in range(3, len(tokens)): # pop size change case if tokens[i] == "-en": raise ValueError( "sweeps with population size changes remain unimplemented" ) # migration rate case if (tokens[i] == "-m") or (tokens[i] == "-p"): raise ValueError( "sweeps with multiple populations remain unimplemented" ) # split or admixture case if (tokens[i] == "-ea") or (tokens[i] == "-ed"): raise ValueError("sweeps with splits or admixture not supported") # sweep params if tokens[i] == "-x": sweep_site = float(tokens[i + 1]) if (tokens[i] == "-ws") or (tokens[i] == "-wd") or (tokens[i] == "-wn"): sweep_mod_time = float(tokens[i + 1]) if tokens[i] == "-a": alpha = float(tokens[i + 1]) if tokens[i] == "-N": refsize = float(tokens[i + 1]) # coalescent params if tokens[i] == "-t": theta = float(tokens[i + 1]) if tokens[i] == "-r": rho = float(tokens[i + 1]) mod_list = [] if alpha is not None: # sweep model s = alpha / (2 * refsize) mod = msprime.SweepGenicSelection( position=np.floor(sweep_site * seq_length), start_frequency=1.0 / (2 * refsize), end_frequency=1.0 - (1.0 / (2 * refsize)), s=s * 2, # discoal fitness model is 1, 1+s, 1+2s dt=1e-6, ) mod_list.append(msprime.StandardCoalescent(duration=sweep_mod_time)) mod_list.append(mod) # if an event is defined from discoal line # best thing to do is rescale to Ne=0.25 # so that time scale are consistent # see note at msprime/cli.py line 626 # and following for alternate solution if sweep_mod_time > 0: refsize = 0.25 mod.s = alpha / refsize # append final model mod_list.append("hudson") # scale theta and rho recomb_rate = rho / (4 * refsize * (seq_length - 1)) mu = theta / (4 * refsize * seq_length) replicates = msprime.sim_ancestry( [msprime.SampleSet(sample_size, ploidy=1)], population_size=refsize, model=mod_list, recombination_rate=recomb_rate, sequence_length=seq_length, discrete_genome=False, num_replicates=nreps, ) mutate = functools.partial( msprime.sim_mutations, discrete_genome=False, rate=mu ) return map(mutate, replicates) class DiscoalCompatibility(DiscoalTest): """ Basic tests to make sure that we have correctly set up the discoal interface. """ def _run(self, cmd): self._run_mutation_discoal_stats(cmd) def test_discoal_simple_ex(self): self._run("15 1000 100 -t 5.0") def test_discoal_size_change1(self): self._run("10 10000 100 -t 10.0 -en 0.1 0 2.0") def test_discoal_size_change2(self): self._run("10 10000 100 -t 10.0 -en 0.1 0 0.1") def test_discoal_size_change3(self): self._run("10 10000 100 -t 10.0 -en 0.01 0 0.01") def test_discoal_size_change4(self): self._run("10 10000 100 -t 10.0 -en 0.01 0 0.5 -en 0.05 0 1.0") # TODO we need to fix this test and to add a good number of examples. class DiscoalSweeps(DiscoalTest): """ Compare the result of sweeps in msprime and discoal. """ def _run(self, args): df = pd.DataFrame() data = collections.defaultdict(list) replicates = self._discoal_str_to_simulation(args) for ts in replicates: data["pi"].append(ts.diversity(span_normalise=False)) data["D"].append(ts.Tajimas_D()) data["ss"].append(ts.segregating_sites(span_normalise=False)) data["pi"] = np.array(data["pi"]).flatten() data["D"] = np.array(data["D"]).flatten() data["ss"] = np.array(data["ss"]).flatten() df = pd.DataFrame.from_dict(data) df = df.fillna(0) df_d = self._run_discoal_mutation_stats(args) df_df = df_d[["pi", "D", "ss"]] logging.debug(f"msp pi mean: {df['pi'].mean()}") logging.debug(f"discoal pi mean: {df_df['pi'].mean()}") logging.debug(f"msp ss mean: {df['ss'].mean()}") logging.debug(f"discoal ss mean: {df_df['ss'].mean()}") logging.debug(f"msp D mean: {df['D'].mean()}") logging.debug(f"discoal D mean: {df_df['D'].mean()}") logging.debug(f"sample sizes msp: {len(df['pi'])} discoal: {len(df_df['pi'])}") self._plot_stats("mutation", df, df_df, "msp", "discoal") def test_sweep_ex0(self): cmd = "10 1000 10000 -t 10.0 -r 10.0" self._run(cmd) def test_sweep_no_rec_ex1(self): cmd = "10 1000 10000 -t 10.0 -r 0.0 -ws 0 -a 100 -x 0.5 -N 10000" self._run(cmd) def test_sweep_no_rec_ex2(self): cmd = "100 1000 10000 -t 10.0 -r 0.0 -ws 0 -a 200 -x 0.5 -N 10000" self._run(cmd) def test_sweep_rec_ex1(self): cmd = "10 1000 10000 -t 10.0 -r 10.0 -ws 0 -a 1000 -x 0.5 -N 10000" self._run(cmd) def test_sweep_rec_ex2(self): cmd = "10 1000 10000 -t 10.0 -r 20.0 -ws 0 -a 1000 -x 0.5 -N 10000" self._run(cmd) def test_sweep_rec_ex3(self): cmd = "10 1000 10000 -t 10.0 -r 100.0 -ws 0 -a 1000 -x 0.5 -N 10000" self._run(cmd) def test_sweep_rec_ex4(self): cmd = "10 1000 10000 -t 10.0 -r 400.0 -ws 0 -a 2000 -x 0.5 -N 10000" self._run(cmd) def test_sweep_rec_ex5(self): cmd = "10 1000 10000 -t 100.0 -r 100.0 -ws 0 -a 250 -x 0.5 -N 10000" self._run(cmd) def test_sweep_tau_ex1(self): cmd = "10 1000 10000 -t 10.0 -r 20.0 -ws 0.001 -a 250 -x 0.5 -N 10000" self._run(cmd) def test_sweep_tau_ex2(self): cmd = "10 1000 10000 -t 10.0 -r 20.0 -ws 0.01 -a 250 -x 0.5 -N 10000" self._run(cmd) def test_sweep_tau_ex3(self): cmd = "10 1000 10000 -t 10.0 -r 20.0 -ws 1.0 -a 250 -x 0.5 -N 10000" self._run(cmd) def sample_recap_simplify(slim_ts, sample_size, Ne, r, mu): """ takes a ts from slim and samples, recaps, simplifies """ demography = msprime.Demography.from_tree_sequence(slim_ts) demography[1].initial_size = Ne with warnings.catch_warnings(): warnings.simplefilter( "ignore", category=msprime.IncompletePopulationMetadataWarning ) recap = msprime.sim_ancestry( initial_state=slim_ts, demography=demography, recombination_rate=r, # TODO is this needed now? Shouldn't be, right? start_time=slim_ts.metadata["SLiM"]["generation"], ) rts = pyslim.SlimTreeSequence(recap) logging.debug(f"pyslim: slim generation:{slim_ts.metadata['SLiM']['generation']}") alive_inds = rts.individuals_alive_at(0) keep_indivs = np.random.choice(alive_inds, sample_size, replace=False) keep_nodes = [] for i in keep_indivs: keep_nodes.extend(rts.individual(i).nodes) logging.debug(f"before simplify {rts.num_nodes} nodes") sts = rts.simplify(keep_nodes) logging.debug(f"after simplify {sts.num_nodes} nodes") logging.debug(f"after simplify {sts.num_trees} trees") return pyslim.SlimTreeSequence(msprime.mutate(sts, rate=mu)) class SweepVsSlim(Test): """ Tests where we compare the msprime sweeps with SLiM simulations. """ def run_sweep_slim_comparison(self, slim_args, **kwargs): df = pd.DataFrame() kwargs["model"] = "msp" logging.debug(f"Running: {kwargs}") seq_length = kwargs.get("seq_length") pop_size = kwargs.get("pop_size") s = kwargs.get("s") tau = kwargs.get("tau") sample_size = kwargs.get("sample_size") recombination_rate = kwargs.get("recombination_rate") num_replicates = kwargs.get("num_replicates") sweep = msprime.SweepGenicSelection( position=seq_length / 2, start_frequency=1.0 / (2 * pop_size), end_frequency=1.0 - (1.0 / (2 * pop_size)), s=s, dt=1e-6, ) replicates = msprime.sim_ancestry( sample_size, population_size=pop_size, model=[msprime.StandardCoalescent(duration=tau), sweep, "hudson"], recombination_rate=recombination_rate, sequence_length=seq_length, num_replicates=num_replicates, ) wins = range(0, int(seq_length + 1), int(seq_length / 20)) mids = np.zeros(len(wins) - 1) for i in range(len(wins) - 1): mids[i] = (wins[i + 1] + wins[i]) / 2 msp_win_pis = [] slim_win_pis = [] data = collections.defaultdict(list) for ts in replicates: t_mrca = np.zeros(ts.num_trees) for tree in ts.trees(): t_mrca[tree.index] = tree.time(tree.root) data["tmrca_mean"].append(np.mean(t_mrca)) data["num_trees"].append(ts.num_trees) mutated_ts = msprime.sim_mutations(ts, rate=1e-8) data["pi"].append(mutated_ts.diversity().reshape((1,))[0]) data["model"].append("msp") msp_num_samples = ts.num_samples msp_win_pis.append(mutated_ts.diversity(windows=wins)) slim_script = self.output_dir / "slim_script.txt" outfile = self.output_dir / "slim.trees" slim_args["OUTFILE"] = str(outfile) write_sweep_slim_script(slim_script, slim_args) cmd = _slim_executable + [slim_script] for _ in range(kwargs["num_replicates"]): subprocess.check_output(cmd) ts = pyslim.load(outfile) rts = sample_recap_simplify( ts, sample_size, pop_size, recombination_rate, 1e-8 ) assert rts.num_samples == msp_num_samples t_mrca = np.zeros(rts.num_trees) for tree in rts.trees(): t_mrca[tree.index] = tree.time(tree.root) data["tmrca_mean"].append(np.mean(t_mrca)) data["num_trees"].append(rts.num_trees) slim_win_pis.append(rts.diversity(windows=wins)) data["pi"].append(rts.diversity().reshape((1,))[0]) data["model"].append("slim") df = df.append(pd.DataFrame(data)) df_slim = df[df.model == "slim"] df_msp = df[df.model == "msp"] for stat in ["tmrca_mean", "num_trees", "pi"]: v1 = df_slim[stat] v2 = df_msp[stat] sm.graphics.qqplot(v1) sm.qqplot_2samples(v1, v2, line="45") pyplot.xlabel("msp") pyplot.ylabel("SLiM") f = self.output_dir / f"{stat}.png" pyplot.savefig(f, dpi=72) pyplot.close("all") plot_stat_hist(v1, v2, "slim", "msp") f = self.output_dir / f"{stat}.hist.png" pyplot.savefig(f, dpi=72) pyplot.close("all") pyplot.plot(mids, np.array(msp_win_pis).mean(axis=0), label="msp") pyplot.plot(mids, np.array(slim_win_pis).mean(axis=0), label="slim") pyplot.title(f"tau: {tau}") pyplot.xlabel("location (bp)") pyplot.ylabel("pairwise diversity") pyplot.legend() f = self.output_dir / "pi_wins.png" pyplot.savefig(f, dpi=72) pyplot.close("all") def _run( self, sample_size, seq_length, pop_size, recombination_rate, s, tau, num_replicates=None, ): """ basic tests for sweeps vs slim """ slim_args = {} if num_replicates is None: num_replicates = 20 # These are *diploid* samples in msprime slim_args["sample_size"] = 2 * sample_size slim_args["r"] = recombination_rate slim_args["NUMLOCI"] = int(seq_length - 1) slim_args["POPSIZE"] = int(pop_size) slim_args["TAU"] = tau slim_args["s"] = s slim_args["SWEEPPOS"] = int(seq_length / 2) self.run_sweep_slim_comparison( slim_args, pop_size=pop_size, sample_size=sample_size, num_replicates=num_replicates, seq_length=seq_length, tau=tau, s=s, recombination_rate=recombination_rate, ) def test_sweep_vs_slim_ex1(self): self._run(10, 1e6, 1e3, 1e-7, 0.25, 1, num_replicates=10) def test_sweep_vs_slim_ex2(self): self._run(10, 1e6, 1e3, 1e-7, 0.25, 200, num_replicates=10) def test_sweep_vs_slim_ex3(self): self._run(10, 1e6, 1e3, 1e-7, 0.25, 1000, num_replicates=10) def test_sweep_vs_slim_ex4(self): self._run(10, 1e6, 1e3, 1e-7, 0.25, 2000, num_replicates=10) def test_sweep_vs_slim_ex5(self): self._run(10, 1e6, 1e3, 1e-7, 0.25, 5000, num_replicates=10) class MsmsSweeps(Test): """ Compare msms with msprime/discoal for selective sweeps. NOTE: 1. Msms allows user to specify selection starting time/frequency (-SI), or, alternatively, specify selection ending time/frequency (-SF); msprime is able to simulate selection similar to the '-SF' option in msms 2. Msms allows user to specify different selection coefficients for AA and Aa, but in msprime/disocal only the selection coefficient for aA can be specified, and use h=0.5 to calculate that for AA. """ def _msms_str_to_parameters(self, msms_cmd): """ Parse msms cmdline arguments into a dictionary. This method is called by `_run_msp_sample_stats` msms cmdline pattern: nsam nrep -t theta -r rho num_sites -SF end_time end_frequency \ -SAA sAA -SaA saA -Sp sel_pos -N refsize -seed rand_seed eg. "5 1 -t 200 -r 200 500000 -SF 0.002 0.9 -Sp 0.5"\ " -SaA 5000 -SAA 10000 -N 10000 -seed 1" """ # initialize local variables end_time_lst = [] # use list for multiple sweeps end_frequency_lst = [] num_sweeps = 0 sAA = saA = sel_pos = -1.0 saA = -0.5 refsize = 1 rand_seed = (random.randint(1, 2 ** 16),) # parse arguments tokens = msms_cmd.split(" ") for ind in range(len(tokens)): if ind == 0: nsam = int(tokens[ind]) nrep = int(tokens[ind + 1]) elif tokens[ind] == "-t": theta = float(tokens[ind + 1]) elif tokens[ind] == "-r": rho = float(tokens[ind + 1]) num_sites = int(tokens[ind + 2]) elif tokens[ind] == "-SF": num_sweeps += 1 end_time_lst.append(float(tokens[ind + 1])) end_frequency_lst.append(float(tokens[ind + 2])) elif tokens[ind] == "-Sp": sel_pos = float(tokens[ind + 1]) elif tokens[ind] == "-SAA": sAA = float(tokens[ind + 1]) elif tokens[ind] == "-SaA": saA = float(tokens[ind + 1]) elif tokens[ind] == "-N": refsize = int(tokens[ind + 1]) elif tokens[ind] == "-seed": rand_seed = float(tokens[ind + 1]) else: pass # check if h = 0.5 if abs(saA * 2 - sAA) > 1e-5: logging.warning( "If 2 * saA is not equal to sAA, saA is set to sAA / 2," "that is, h can only be 0.5 in msprime" ) saA = sAA / 2.0 return { "nsam": nsam, "nrep": nrep, "num_sweeps": num_sweeps, "end_time_lst": end_time_lst, "end_frequency_lst": end_frequency_lst, "refsize": refsize, "alpha": saA, "theta": theta, "rho": rho, "num_sites": num_sites, "sel_pos": sel_pos, "rand_seed": rand_seed, } def _update_msms_cmd_to_match_discoal(self, msms_cmd): """ NOTE: discoal does not have options to specify allele frequencies and instead it calculates the frequency internally according to refsize. When msp or msms is compared with discoal, the "end_frequency_lst" from msms command arguments will be replaced by the following calculations. """ msms_params = self._msms_str_to_parameters(msms_cmd) if msms_params["num_sweeps"] == 0: return msms_cmd logging.warning( "When compared with discoal, selected allele frequency options are" " recalculated following discoal's way" ) # recalculate frequencies refsize = msms_params["refsize"] end_frequency_lst = msms_params["end_frequency_lst"] end_frequency_lst = [1 - 0.5 / refsize for _ in end_frequency_lst] # construct new msms cmd new_cmd = [ str(msms_params["nsam"]), str(msms_params["nrep"]), "-t", str(msms_params["theta"]), "-r", str(msms_params["rho"]), str(msms_params["num_sites"]), ] for i in range(len(end_frequency_lst)): new_cmd += [ "-SF", str(msms_params["end_time_lst"][i]), str(end_frequency_lst[i]), "-Sp", str(msms_params["sel_pos"]), ] new_cmd += [ "-SaA", str(msms_params["alpha"]), "-SAA", str(msms_params["alpha"] * 2), "-N", str(msms_params["refsize"]), ] new_msms_cmd = " ".join(new_cmd) return new_msms_cmd def _msms_params_to_run_msp(self, params): """ Run simulation for a single sample and return a tree sequence. This method is called by `_run_msp_sample_stats` in a loop to generate nrep samples. """ if params["num_sweeps"] > 0: model = [] t_start = params["end_time_lst"][0] * 4 * params["refsize"] model.append(msprime.StandardCoalescent(duration=(t_start - 0))) for i in range(params["num_sweeps"]): temp_model = msprime.SweepGenicSelection( position=params["sel_pos"] * params["num_sites"], end_frequency=params["end_frequency_lst"][i], start_frequency=0.5 / params["refsize"], s=params["alpha"] / params["refsize"], # alpha=saA, s=sAA/(2N) dt=1.0 / (40 * params["refsize"]), ) model.append(temp_model) # Before the Sweep model is made interruptable and support multiple # sweeps, we just use a single sweep for now. break model.append("hudson") else: model = "hudson" scale_factor = 4.0 * params["refsize"] * (params["num_sites"] - 1) recombination_rate = params["rho"] / scale_factor scale_factor = 4.0 * params["refsize"] * params["num_sites"] mutation_rate = params["theta"] / scale_factor repeats = msprime.sim_ancestry( samples=params["nsam"] / 2, # use sample size of diploids population_size=params["refsize"], model=model, recombination_rate=recombination_rate, discrete_genome=False, sequence_length=params["num_sites"], num_replicates=params["nrep"], ) # Critical to use BinaryMutationModel and get ancestral and derived alleles mutated_repeats = [ msprime.sim_mutations( ts, rate=mutation_rate, model=msprime.BinaryMutationModel(), discrete_genome=False, ) for ts in repeats ] return mutated_repeats def _run_msp_sample_stats(self, msms_cmd): """ Call methods to parse cmdline options and run simulation, and then output in ms format, pipe thru sample_stats and finally return stats dataframe. """ temp_file = tempfile.gettempdir() + "/tmp_msp_out" output = open(temp_file, "w") # run simulation and print ms format data into a file msms_params = self._msms_str_to_parameters(msms_cmd) print("ms " + msms_cmd, file=output) # needed by sample_stat tools self._ms_random_seeds = msms_params["rand_seed"] = self.get_ms_seeds() mutated_ts_repeats = self._msms_params_to_run_msp(msms_params) for tree_sequence in mutated_ts_repeats: print(file=output) print("//", file=output) if msms_params["theta"] > 0: s = tree_sequence.get_num_mutations() print("segsites:", s, file=output) if s != 0: print("positions: ", end="", file=output) positions = [ mutation.position / msms_params["num_sites"] for mutation in tree_sequence.mutations() ] positions.sort() for position in positions: print("{0:.{1}f}".format(position, 8), end=" ", file=output) print(file=output) for h in tree_sequence.haplotypes(): print(h, file=output) else: print(file=output) output.close() # pipe ms format output to sample_stats p1 = subprocess.Popen(["cat", temp_file], stdout=subprocess.PIPE) p2 = subprocess.Popen( ["./data/sample_stats"], stdin=p1.stdout, stdout=subprocess.PIPE ) p1.stdout.close() output = p2.communicate()[0] p1.wait() # read into pandas frame and return it with tempfile.TemporaryFile() as f: f.write(output) f.seek(0) df = pd.read_csv(f, sep="\t") return df def _run_msms_sample_stats(self, cmd): return self._run_sample_stats(_msms_executable + cmd.split(" ")) def _convert_to_discoal_cmd(self, msms_cmd): """ called by _run_discoal_sample_stats to convert msms cmdline args to discoal cmdline args NOTE: if -N option is not specified, discoal internally use N=1,000,000 """ params = self._msms_str_to_parameters(msms_cmd) return "%d %d %d -t %f -r %f -ws %f -a %f -x %f -N %f" % ( params["nsam"], params["nrep"], params["num_sites"], params["theta"], params["rho"], params["end_time_lst"][0], params["alpha"], params["sel_pos"], params["refsize"], ) def _run_discoal_sample_stats(self, msms_cmd): discoal_cmd = self._convert_to_discoal_cmd(msms_cmd) return self._run_sample_stats(_discoal_executable + discoal_cmd.split(" ")) def _cmp_msms_vs_msp(self, cmd): try: df_msp = self._run_msp_sample_stats(cmd) except pd.error.ParserError: logging.warning("msm_vs_msp FAILED") return df_msms = self._run_msms_sample_stats(cmd) self._plot_stats("msp_msms", df_msp, df_msms, "msp", "msms") def _cmp_discoal_vs_msp_via_msms_cmd(self, cmd): cmd = self._update_msms_cmd_to_match_discoal(cmd) df_discoal = self._run_discoal_sample_stats(cmd) df_msp = self._run_msp_sample_stats(cmd) self._plot_stats("msp_discoal", df_msp, df_discoal, "msp", "discoal") def _cmp_msms_vs_discoal(self, cmd): cmd = self._update_msms_cmd_to_match_discoal(cmd) df_discoal = self._run_discoal_sample_stats(cmd) df_msms = self._run_msms_sample_stats(cmd) self._plot_stats("discoal_msms", df_discoal, df_msms, "discoal", "msms") def test_neutral_msms_vs_msp(self): self._cmp_msms_vs_msp("100 300 -t 200 -r 200 500000 -N 10000") def _test_selective_discoal_vs_msp(self): self._cmp_discoal_vs_msp_via_msms_cmd( "100 300 -t 20 -r 20 50000" " -SF 0 0.99995 -Sp 0.5 -SaA 5000 -SAA 10000 -N 10000" ) def test_selective_msms_vs_msp(self): self._cmp_msms_vs_msp( "100 300 -t 200 -r 200 500000" " -SF 0 0.9 -Sp 0.5 -SaA 5000 -SAA 10000 -N 10000" ) def test_selective_msms_vs_msp_small_s(self): self._cmp_msms_vs_msp( "100 300 -t 200 -r 200 500000 -SF 0 0.9 -Sp 0.5 -SaA 1 -SAA 2 -N 10000" ) """ Not implemented def _test_selective_msms_vs_msp_multiple_sweeps(self): self._cmp_msms_vs_msp( "100 300 -t 200 -r 200 500000" " -SF 0 0.9 -Sp 0.5" " -SF 0.1 0.9 -Sp 0.5 -SaA 5000 -SAA 10000 -N 10000" ) """ def _test_selective_msp_50Mb(self): """ Test runtime of msprime for long chromosomes """ self._cmp_msp_sample_stats( "1000 1 -t 20000 -r 20000 50000000" " -SF 0 0.9 -Sp 0.5 -SaA 5000 -SAA 10000 -N 10000" ) def test_selective_msms_vs_discoal(self): self._cmp_msms_vs_discoal( # "100 300 -t 20 -r 20 50000" "100 300 -t 20 -r 20 5000" " -SF 0 0.9 -Sp 0.5 -SaA 5000 -SAA 10000 -N 10000" ) def test_selective_msms_vs_msp_use_discoal_paper_param(self): self._cmp_msms_vs_msp( "100 300 -t 100 -r 100 250000" " -SF 0 0.99995 -Sp 0.5 -SaA 2000 -SAA 4000 -N 10000" ) def test_selective_msms_vs_discoal_use_discoal_paper_param(self): """ NOTE: tests calling discoal will take a much longer time to finish especially when large num_sites are used. Use the lines commented out instead if we want to reproduce the results posted in issue # 1173 """ self._cmp_msms_vs_discoal( # "100 300 -t 100 -r 100 250000" "100 300 -t 100 -r 100 2500" " -SF 0 0.99995 -Sp 0.5 -SaA 2000 -SAA 4000 -N 10000" ) def test_selective_msms_vs_discoal_random_param(self): self._cmp_msms_vs_discoal( # "100 300 -t 40 -r 40 50000" "100 300 -t 40 -r 40 5000" " -SF 0 0.99995 -Sp 0.5 -SaA 1000 -SAA 2000 -N 10000" ) def test_selective_discoal_vs_msp_use_discoal_paper_param(self): self._cmp_discoal_vs_msp_via_msms_cmd( # "100 300 -t 100 -r 100 250000" "100 300 -t 100 -r 100 2500" " -SF 0 0.99995 -Sp 0.5 -SaA 2000 -SAA 4000 -N 10000" ) def test_selective_discoal_vs_msp_random_param(self): self._cmp_discoal_vs_msp_via_msms_cmd( # "100 300 -t 40 -r 40 50000" "100 300 -t 40 -r 40 5000" " -SF 0 0.99995 -Sp 0.5 -SaA 1000 -SAA 2000 -N 10000" ) class SweepAnalytical(Test): """ Analytical comparisons wrt to sweeps """ def hermissonPennings_exp_sojourn(self, alpha): """ analytic expectation of sojourn time equation A.17 from Hermisson and Pennings """ inner = np.log(alpha) + np.euler_gamma - (1.0 / alpha) return 4.0 / alpha * inner def charlesworth_exp_sojourn(self, alpha, s): """ same as above but scaled in number of gens """ inner = np.log(alpha) + np.euler_gamma - (1.0 / alpha) return 4.0 / s * inner def test_sojourn_time(self): """ testing against expected sojourn time of a beneficial mutation over a range of selection coefficients """ alphas = np.linspace(100, 5000, 20) refsize = 1e4 nreps = 50 seqlen = 1e4 rho = 0 p0 = 1.0 / (2 * refsize) p1 = 1 - p0 dt = 1.0 / (400 * refsize) pos = np.floor(seqlen / 2) df = pd.DataFrame() data = collections.defaultdict(list) for a in alphas: s = a / 2 / refsize mod = msprime.SweepGenicSelection( start_frequency=p0, end_frequency=p1, s=s, dt=dt, position=pos ) replicates = msprime.sim_ancestry( 5, population_size=refsize, model=mod, sequence_length=seqlen, num_labels=2, recombination_rate=rho, num_replicates=nreps, ) reptimes = np.zeros(nreps) i = 0 for x in replicates: tree_times = np.zeros(x.num_trees) j = 0 for tree in x.trees(): tree_times[j] = np.max([tree.time(root) for root in tree.roots]) j += 1 reptimes[i] = np.max(tree_times) i += 1 data["alpha_means"].append(np.mean(reptimes)) logging.debug( f"mean time for alpha={a} / s={s} -- \ {np.mean(reptimes)}" ) data["exp_means"].append(self.charlesworth_exp_sojourn(a, s)) df = pd.DataFrame.from_dict(data) df = df.fillna(0) sm.qqplot_2samples(df["exp_means"], df["alpha_means"], line="45") pyplot.xlabel("expected sojourn time") pyplot.ylabel("simulated sojourn time") f = self.output_dir / "sojourn.png" pyplot.savefig(f, dpi=72) pyplot.close("all") def test_sojourn_time2(self): """ testing against expected sojourn time of a beneficial mutation over a range of population sizes but keeping 2Ns constant """ alpha = 1000 refsizes = np.linspace(1e2, 1e4, 10) nreps = 50 seqlen = 1e4 dt = 1e-6 pos = np.floor(seqlen / 2) df = pd.DataFrame() data = collections.defaultdict(list) for n in refsizes: s = alpha / (2 * n) p0 = 1.0 / (2 * n) p1 = 1 - p0 mod = msprime.SweepGenicSelection( start_frequency=p0, end_frequency=p1, s=s, dt=dt, position=pos ) replicates = msprime.sim_ancestry( 5, population_size=n, model=mod, sequence_length=seqlen, num_labels=2, num_replicates=nreps, ) reptimes = np.zeros(nreps) i = 0 for x in replicates: tree_times = np.zeros(x.num_trees) j = 0 for tree in x.trees(): tree_times[j] = np.max([tree.time(root) for root in tree.roots]) j += 1 reptimes[i] = np.max(tree_times) i += 1 data["alpha_means"].append(np.mean(reptimes)) data["exp_means"].append(self.hermissonPennings_exp_sojourn(alpha) * 2 * n) logging.debug( f"mean time for N={n} -- \ {np.mean(reptimes)}" ) df = pd.DataFrame.from_dict(data) df = df.fillna(0) sm.qqplot_2samples(df["exp_means"], df["alpha_means"], line="45") pyplot.xlabel("expected sojourn time") pyplot.ylabel("simulated sojourn time") f = self.output_dir / "sojourn.png" pyplot.savefig(f, dpi=72) pyplot.close("all") # FIXME disabling these for now because the pedigree file that # they depend on doesn't exist. (Tests won't be picked up unless # they subclass Test.) class DtwfPedigreeVsCoalescent: def run_dtwf_pedigree_comparison(self, **kwargs): df = pd.DataFrame() pedigree = kwargs["pedigree"] assert kwargs["sample_size"] % 2 == 0 sample_size = kwargs["sample_size"] sample_size_diploid = sample_size // 2 for model in ["wf_ped", "dtwf"]: kwargs["model"] = model kwargs["pedigree"] = None kwargs["sample_size"] = sample_size if model == "wf_ped": kwargs["sample_size"] = sample_size_diploid kwargs["pedigree"] = pedigree des = [] if "demographic_events" in kwargs: des = kwargs["demographic_events"] max_ped_time = max(pedigree.times) des.append(msprime.SimulationModelChange(max_ped_time, "dtwf")) des = sorted(des, key=lambda x: x.time) kwargs["demographic_events"] = des logging.debug(f"Running: {kwargs}") data = collections.defaultdict(list) replicates = msprime.simulate(**kwargs) for ts in replicates: t_mrca = np.zeros(ts.num_trees) for tree in ts.trees(): t_mrca[tree.index] = tree.time(tree.root) data["tmrca_mean"].append(np.mean(t_mrca)) data["num_trees"].append(ts.num_trees) data["model"].append(model) df = df.append(pd.DataFrame(data)) df_wf_ped = df[df.model == "wf_ped"] df_dtwf = df[df.model == "dtwf"] for stat in ["tmrca_mean", "num_trees"]: v1 = df_wf_ped[stat] v2 = df_dtwf[stat] sm.graphics.qqplot(v1) sm.qqplot_2samples(v1, v2, line="45") f = self.output_dir / f"{stat}.png" pyplot.savefig(f, dpi=72) pyplot.close("all") def test_dtwf_vs_pedigree_single_locus(self): pedigree_file = "tests/data/pedigrees/wf_100Ne_10000gens.txt" pedigree = msprime.Pedigree.read_txt(pedigree_file, time_col=3) self.run_dtwf_pedigree_comparison( "dtwf_vs_pedigree_single_locus", sample_size=10, Ne=100, num_replicates=400, length=1, pedigree=pedigree, recombination_rate=0, mutation_rate=1e-8, ) def test_dtwf_vs_pedigree_short_region(self): pedigree_file = "tests/data/pedigrees/wf_100Ne_10000gens.txt" pedigree = msprime.Pedigree.read_txt(pedigree_file, time_col=3) self.run_dtwf_pedigree_comparison( "dtwf_vs_pedigree_short_region", sample_size=10, Ne=100, num_replicates=400, length=1e6, pedigree=pedigree, recombination_rate=1e-8, mutation_rate=1e-8, ) def test_dtwf_vs_pedigree_long_region(self): pedigree_file = "tests/data/pedigrees/wf_100Ne_10000gens.txt" pedigree = msprime.Pedigree.read_txt(pedigree_file, time_col=3) self.run_dtwf_pedigree_comparison( "dtwf_vs_pedigree_long_region", sample_size=10, Ne=100, num_replicates=200, length=1e8, pedigree=pedigree, recombination_rate=1e-8, mutation_rate=1e-8, ) class DtwfVsCoalescent(Test): """ Tests where we compare the DTWF with coalescent simulations. """ def run_dtwf_coalescent_stats(self, **kwargs): df = pd.DataFrame() for model in ["hudson", "dtwf"]: kwargs["model"] = model logging.debug(f"Running: {kwargs}") data = collections.defaultdict(list) replicates = msprime.sim_ancestry(**kwargs) for ts in replicates: t_mrca = np.zeros(ts.num_trees) t_intervals = [] for tree in ts.trees(): t_mrca[tree.index] = tree.time(tree.root) t_intervals.append(tree.interval) data["tmrca_mean"].append(np.mean(t_mrca)) data["num_trees"].append(ts.num_trees) data["intervals"].append(t_intervals) data["model"].append(model) df = df.append(pd.DataFrame(data)) return df def plot_dtwf_coalescent_stats(self, df): df_hudson = df[df.model == "hudson"] df_dtwf = df[df.model == "dtwf"] for stat in ["tmrca_mean", "num_trees"]: plot_qq(df_hudson[stat], df_dtwf[stat]) f = self.output_dir / f"{stat}.png" pyplot.savefig(f, dpi=72) pyplot.close("all") hudson_breakpoints = all_breakpoints_in_replicates(df_hudson["intervals"]) dtwf_breakpoints = all_breakpoints_in_replicates(df_dtwf["intervals"]) if len(hudson_breakpoints) > 0 or len(dtwf_breakpoints) > 0: plot_breakpoints_hist( hudson_breakpoints, dtwf_breakpoints, "hudson", "dtwf" ) pyplot.savefig(self.output_dir / "breakpoints.png", dpi=72) pyplot.close("all") def plot_tree_intervals(self, df): fig, ax_arr = pyplot.subplots(2, 1) for subplot_idx, model in enumerate(["hudson", "dtwf"]): intervals = df[df.model == model]["intervals"][0] for i, interval in enumerate(intervals): left, right = interval ax_arr[subplot_idx].set_title(model) ax_arr[subplot_idx].set_ylabel("tree index") ax_arr[subplot_idx].plot([left, right], [i, i], c="grey") ax_arr[1].set_xlabel("tree interval") pyplot.tight_layout() pyplot.savefig(self.output_dir / "intervals.png", dpi=72) pyplot.close("all") def _run(self, **kwargs): df = self.run_dtwf_coalescent_stats(**kwargs) self.plot_dtwf_coalescent_stats(df) self.plot_tree_intervals(df) class DtwfVsCoalescentSimple(DtwfVsCoalescent): """ Straightforward tests where we pass through simulate args directly. """ def test_dtwf_vs_coalescent_single_locus(self): self._run(samples=10, population_size=1000, num_replicates=300) def test_dtwf_vs_coalescent_recomb_discrete_hotspots(self): """ Checks the DTWF against the standard coalescent with a discrete recombination map with variable rates. """ recombination_map = msprime.RateMap( position=[0, 100, 500, 900, 1200, 1500, 2000], rate=[0.00001, 0, 0.0002, 0.00005, 0, 0.001], ) self._run( samples=10, population_size=1000, recombination_rate=recombination_map, num_replicates=300, discrete_genome=True, ) def test_dtwf_vs_coalescent_recomb_continuous_hotspots(self): """ Checks the DTWF against the standard coalescent with a continuous recombination map with variable rates. """ recombination_map = msprime.RateMap( position=[0, 0.1, 0.5, 0.9, 1.2, 1.5, 2.0], rate=[0.00001, 0, 0.0002, 0.00005, 0, 0.001], ) self._run( samples=10, population_size=1000, recombination_rate=recombination_map, num_replicates=300, discrete_genome=False, ) def test_dtwf_vs_coalescent_single_forced_recombination(self): recombination_map = msprime.RateMap(position=[0, 100, 101, 201], rate=[0, 1, 0]) self._run( samples=10, population_size=10, num_replicates=1, discrete_genome=True, recombination_rate=recombination_map, ) def test_dtwf_vs_coalescent_low_recombination(self): self._run( samples=10, population_size=1000, num_replicates=400, recombination_rate=0.01, sequence_length=5, ) def test_dtwf_vs_coalescent_2_pops_massmigration(self): demography = msprime.Demography.isolated_model([1000, 1000]) demography.add_mass_migration(time=10, source=1, dest=0, proportion=1.0) self._run( samples={0: 10, 1: 10}, demography=demography, sequence_length=10 ** 6, num_replicates=300, recombination_rate=1e-8, ) def test_dtwf_vs_coalescent_1_pop_growth(self): self._run( samples=10, demography=msprime.Demography.isolated_model([1000], growth_rate=[0.01]), recombination_rate=1e-8, sequence_length=5e7, num_replicates=300, discrete_genome=True, ) def test_dtwf_vs_coalescent_1_pop_shrink(self): initial_size = 1000 demography = msprime.Demography.isolated_model( [initial_size], growth_rate=[-0.01] ) demography.events.append( msprime.PopulationParametersChange( time=200, initial_size=initial_size, growth_rate=0.01, population=0 ) ) self._run( samples=10, demography=demography, recombination_rate=1e-8, sequence_length=5e7, num_replicates=300, discrete_genome=True, ) def test_dtwf_vs_coalescent_multiple_bottleneck(self): demography = msprime.Demography.isolated_model([1000, 1000]) demography.events = [ msprime.PopulationParametersChange( time=100, initial_size=100, growth_rate=-0.01, population=0 ), msprime.PopulationParametersChange( time=200, initial_size=100, growth_rate=-0.01, population=1 ), msprime.PopulationParametersChange( time=300, initial_size=1000, growth_rate=0.01, population=0 ), msprime.PopulationParametersChange( time=400, initial_size=1000, growth_rate=0.01, population=1 ), msprime.PopulationParametersChange( time=500, initial_size=100, growth_rate=0, population=0 ), msprime.PopulationParametersChange( time=600, initial_size=100, growth_rate=0, population=1 ), msprime.MigrationRateChange(time=700, rate=0.1, matrix_index=(0, 1)), ] self._run( samples={0: 5, 1: 5}, demography=demography, num_replicates=400, recombination_rate=1e-8, sequence_length=5e7, ) class DtwfVsCoalescentHighLevel(DtwfVsCoalescent): """ Tests for the DTWF and coalescent when we use a slightly more high-level intervace. """ def _run( self, initial_sizes, sample_sizes, num_loci, recombination_rate, migration_matrix=None, growth_rates=None, num_replicates=None, ): """ Generic test of DTWF vs hudson coalescent. Populations are not allowed to shrink to fewer than 100 individuals, and if starting with fewer than 100 have growth rate set to zero. """ assert len(sample_sizes) == len(initial_sizes) num_pops = len(sample_sizes) if num_replicates is None: num_replicates = 200 if growth_rates is None: default_growth_rate = 0.01 growth_rates = [default_growth_rate] * num_pops demography = msprime.Demography.isolated_model( initial_sizes, growth_rate=growth_rates ) for i in range(num_pops): if initial_sizes[i] > 100: # Growth rate set to zero at pop size 100 t_100 = (np.log(initial_sizes[i]) - np.log(100)) / growth_rates[i] de = msprime.PopulationParametersChange( t_100, growth_rate=0, population=i ) demography.events.append(de) else: # Enforce zero growth rate for small populations logging.warning( f"Warning - setting growth rate to zero for small \ population of size {initial_sizes[i]}", ) demography.populations[i].growth_rate = 0 if migration_matrix is None: default_mig_rate = 0.05 migration_matrix = [] for i in range(num_pops): row = [default_mig_rate] * num_pops row[i] = 0 migration_matrix.append(row) demography.migration_matrix[:] = migration_matrix super()._run( samples={j: sample_size for j, sample_size in enumerate(sample_sizes)}, demography=demography, num_replicates=num_replicates, sequence_length=num_loci, recombination_rate=recombination_rate, discrete_genome=True, ) def test_dtwf_vs_coalescent_long_region(self): self._run([1000], [10], int(1e8), 1e-8) def test_dtwf_vs_coalescent_short_region(self): self._run([1000], [10], int(1e6), 1e-8) def test_dtwf_vs_coalescent_2_pops(self): self._run( [500, 500], [5, 5], int(1e6), 1e-8, num_replicates=500, ) def test_dtwf_vs_coalescent_3_pops(self): self._run( [500, 500, 500], [5, 2, 0], int(1e7), 1e-8, ) def test_dtwf_vs_coalescent_4_pops(self): self._run( [1000, 1000, 1000, 1000], [0, 20, 0, 0], int(1e6), 1e-8, num_replicates=500, ) def test_dtwf_vs_coalescent_3_pops_asymm_mig(self): migration_matrix = [[0, 0.2, 0.1], [0.1, 0, 0.2], [0.2, 0.1, 0]] self._run( [500, 500, 500], [20, 0, 0], int(1e6), 1e-8, migration_matrix=migration_matrix, num_replicates=500, ) def test_dtwf_vs_coalescent_2_pops_high_asymm_mig(self): migration_matrix = [[0, 0.5], [0.7, 0]] self._run( [1000, 1000], [10, 10], int(1e6), 1e-8, migration_matrix=migration_matrix, num_replicates=200, growth_rates=[0.005, 0.005], ) class DtwfVsSlim(Test): """ Tests where we compare the DTWF with SLiM simulations. """ def run_dtwf_slim_comparison(self, slim_args, **kwargs): df = pd.DataFrame() kwargs["model"] = "dtwf" logging.debug(f"Running: {kwargs}") replicates = msprime.sim_ancestry(**kwargs) data = collections.defaultdict(list) for ts in replicates: t_mrca = np.zeros(ts.num_trees) for tree in ts.trees(): t_mrca[tree.index] = tree.time(tree.root) data["tmrca_mean"].append(np.mean(t_mrca)) data["num_trees"].append(ts.num_trees) data["model"].append("dtwf") msp_num_samples = ts.num_samples slim_script = self.output_dir / "slim_script.txt" outfile = self.output_dir / "slim.trees" slim_args["OUTFILE"] = str(outfile) write_slim_script(slim_script, slim_args) cmd = _slim_executable + [slim_script] for _ in range(kwargs["num_replicates"]): subprocess.check_output(cmd) ts = tskit.load(outfile) ts = subsample_simplify_slim_treesequence(ts, slim_args["sample_sizes"]) assert ts.num_samples == msp_num_samples t_mrca = np.zeros(ts.num_trees) for tree in ts.trees(): t_mrca[tree.index] = tree.time(tree.root) data["tmrca_mean"].append(np.mean(t_mrca)) data["num_trees"].append(ts.num_trees) data["model"].append("slim") df = df.append(pd.DataFrame(data)) df_slim = df[df.model == "slim"] df_dtwf = df[df.model == "dtwf"] for stat in ["tmrca_mean", "num_trees"]: v1 = df_slim[stat] v2 = df_dtwf[stat] sm.graphics.qqplot(v1) sm.qqplot_2samples(v1, v2, line="45") pyplot.xlabel("DTWF") pyplot.ylabel("SLiM") f = self.output_dir / f"{stat}.png" pyplot.savefig(f, dpi=72) pyplot.close("all") def check_slim_version(self): # This may not be robust but it's a start min_version = 3.1 raw_str = subprocess.check_output(_slim_executable + ["-version"]) version_list = str.split(str(raw_str)) for i in range(len(version_list)): if version_list[i].lower() == "version": version_str = version_list[i + 1] break version = float(version_str.strip(" ,")[0:3]) assert version >= min_version, "Require SLiM >= 3.1!" def _run( self, initial_sizes, sample_sizes, num_loci, recombination_rate, migration_matrix=None, num_replicates=None, ): """ Generic test of DTWF vs SLiM WF simulator, without growth rates """ assert len(sample_sizes) == len(initial_sizes) sample_sizes = np.array(sample_sizes) num_pops = len(sample_sizes) slim_args = {} if num_replicates is None: num_replicates = 200 # These are *diploid* samples in msprime slim_args["sample_sizes"] = 2 * sample_sizes demography = msprime.Demography.isolated_model(initial_sizes) slim_args["POP_STRS"] = "" for i in range(num_pops): slim_args["POP_STRS"] += "sim.addSubpop('p{i}', {N});\n".format( i=i, N=initial_sizes[i] ) if migration_matrix is None: default_mig_rate = 0.01 migration_matrix = [] for i in range(num_pops): row = [default_mig_rate] * num_pops row[i] = 0 migration_matrix.append(row) demography.migration_matrix[:] = migration_matrix # SLiM rates are 'immigration' forwards in time, which matches # DTWF backwards-time 'emmigration' assert len(migration_matrix) == num_pops if num_pops > 1: for i in range(num_pops): row = migration_matrix[i] indices = [j for j in range(num_pops) if j != i] pop_names = ["p" + str(j) for j in indices] rates = [str(row[j]) for j in indices] to_pop_str = ",".join(pop_names) rate_str = ",".join(rates) mig_str = "p{}.setMigrationRates(c({}), c({}));\n".format( i, to_pop_str, rate_str ) slim_args["POP_STRS"] += mig_str slim_args["RHO"] = recombination_rate slim_args["NUM_LOCI"] = int(num_loci) self.run_dtwf_slim_comparison( slim_args, samples={j: sample_size for j, sample_size in enumerate(sample_sizes)}, demography=demography, num_replicates=num_replicates, sequence_length=num_loci, recombination_rate=recombination_rate, discrete_genome=True, ) def test_dtwf_vs_slim_single_locus(self): self._run([100], [10], 1, 0) def test_dtwf_vs_slim_single_locus_2_pops(self): self._run([20, 20], [5, 5], 1, 0) def test_dtwf_vs_slim_short_region(self): self._run([100], [10], 1e7, 1e-8, num_replicates=200) def test_dtwf_vs_slim_long_region(self): self._run([50], [10], 1e8, 1e-8, num_replicates=200) class DtwfVsCoalescentRandom(DtwfVsCoalescent): """ Runs randomly generated test parameters. """ def _run(self, num_populations=1, num_replicates=200, num_demographic_events=0): # Make this deterministic np.random.seed(42) random.seed(42) N = num_populations num_loci = np.random.randint(1e5, 1e7) num_samples = np.random.randint(2, 10, size=num_populations) demography = msprime.Demography.isolated_model([1000 / N] * num_populations) migration_matrix = [] for i in range(N): migration_matrix.append( [random.uniform(0.05, 0.25) * (j != i) for j in range(N)] ) demography.migration_matrix[:] = migration_matrix # Add demographic events and some migration rate changes t_max = 1000 times = sorted(np.random.randint(300, t_max, size=num_demographic_events)) for t in times: initial_size = np.random.randint(500, 1000) # Setting growth_rate to 0 because it's too tricky to get # growth_rates in the DTWF which don't result in N going to 0. growth_rate = 0 pop_id = np.random.randint(N) demography.events.append( msprime.PopulationParametersChange( time=t, initial_size=initial_size, growth_rate=growth_rate, population_id=pop_id, ) ) if random.random() < 0.5 and N >= 2: rate = random.uniform(0.05, 0.25) index = tuple( np.random.choice(range(num_populations), size=2, replace=False) ) demography.events.append( msprime.MigrationRateChange(time=t, rate=rate, matrix_index=index) ) # Collect all pops together to control coalescence times for DTWF for i in range(1, N): demography.events.append( msprime.MassMigration( time=t_max, source=i, destination=0, proportion=1.0 ) ) demography.events.append( msprime.PopulationParametersChange( time=t_max, initial_size=100, growth_rate=0, population_id=0 ) ) super()._run( samples={j: sample_size for j, sample_size in enumerate(num_samples)}, demography=demography, num_replicates=num_replicates, sequence_length=num_loci, recombination_rate=1e-8, discrete_genome=True, ) def test_dtwf_vs_coalescent_random_1(self): self._run(num_populations=2, num_replicates=200, num_demographic_events=3) def test_dtwf_vs_coalescent_random_2(self): self._run(num_populations=3, num_replicates=200, num_demographic_events=3) def test_dtwf_vs_coalescent_random_3(self): self._run(num_populations=2, num_replicates=200, num_demographic_events=6) def test_dtwf_vs_coalescent_random_4(self): self._run(num_populations=1, num_replicates=200, num_demographic_events=8) class RecombinationBreakpointTest(Test): """ Verifies that the number of recombination breakpoints is proportional to the total branch length across all trees. """ def verify_breakpoint_distribution( self, name, sample_size, Ne, r, L, ploidy, model, growth_rate=0 ): ts = msprime.sim_ancestry( samples=sample_size, demography=msprime.Demography.isolated_model( [Ne], growth_rate=[growth_rate] ), ploidy=ploidy, sequence_length=L, recombination_rate=r, model=model, ) area = [tree.total_branch_length * tree.span for tree in ts.trees()] scipy.stats.probplot(area, dist=scipy.stats.expon(Ne * r), plot=pyplot) path = self.output_dir / f"{name}_growth={growth_rate}_ploidy={ploidy}.png" logging.debug(f"Writing {path}") pyplot.savefig(path) pyplot.close("all") def test_xi_beta_breakpoints(self): Ne = 10 ** 4 for alpha in [1.1, 1.3, 1.6, 1.9]: for p in [1, 2]: self.verify_breakpoint_distribution( f"n=100_alpha={alpha}", sample_size=100, Ne=Ne, r=1e-7, L=10 ** 6, ploidy=p, model=msprime.BetaCoalescent(alpha=alpha), ) # Add a growth rate with a higher recombination rate so # we still get decent numbers of trees self.verify_breakpoint_distribution( f"growth_n=100_alpha={alpha}", sample_size=100, Ne=Ne, r=1e-7, L=10 ** 6, ploidy=p, model=msprime.BetaCoalescent(alpha=alpha), growth_rate=0.05, ) def test_xi_dirac_breakpoints(self): Ne = 10 ** 2 for psi in [0.1, 0.3, 0.6, 0.9]: for c in [1, 10]: for p in [1, 2]: self.verify_breakpoint_distribution( f"n=100_psi={psi}_c={c}", sample_size=100, Ne=Ne, r=1e-8, L=10 ** 6, ploidy=p, model=msprime.DiracCoalescent(psi=psi, c=c), ) # Add a growth rate with a higher recombination rate so # we still get decent numbers of trees self.verify_breakpoint_distribution( f"growth_n=100_psi={psi}_c={c}", sample_size=100, Ne=Ne, r=1e-7, L=10 ** 6, ploidy=p, model=msprime.DiracCoalescent(psi=psi, c=c), growth_rate=0.05, ) def test_hudson_breakpoints(self): for p in [1, 2]: self.verify_breakpoint_distribution( "single_pop_n_50", sample_size=50, Ne=10 ** 4, r=1e-8, L=10 ** 6, ploidy=p, model="hudson", ) self.verify_breakpoint_distribution( "single_pop_n_100", sample_size=100, Ne=10 ** 4, r=1e-8, L=10 ** 6, ploidy=p, model="hudson", ) self.verify_breakpoint_distribution( "single_pop_n_100_growth", sample_size=100, Ne=10 ** 4, r=1e-7, L=10 ** 6, ploidy=p, model="hudson", growth_rate=0.05, ) class RecombinationMutationTest(Test): """ Verifies that the number of recombinations equals the number of mutations since both should be proportional to the total branch lenght of the trees. """ def verify_recombination( self, name, sample_size, Ne, r, m, L, ploidy, model, growth_rate=0 ): num_replicates = 500 empirical_theta = [] empirical_rho = [] for _ in range(num_replicates): sim = msprime.ancestry._parse_sim_ancestry( samples=[msprime.SampleSet(sample_size, ploidy=1)], recombination_rate=r, sequence_length=L, ploidy=ploidy, demography=msprime.Demography.isolated_model( [Ne], growth_rate=[growth_rate] ), model=model, ) ts = next(sim.run_replicates(1)) empirical_rho.append(sim.num_breakpoints) ts = msprime.sim_mutations(ts, rate=m) empirical_theta.append(ts.get_num_sites()) empirical_rho.sort() empirical_theta.sort() empirical_rho = np.array(empirical_rho) empirical_theta = np.array(empirical_theta) plot_qq(empirical_theta, empirical_rho) path = ( self.output_dir / f"{name}_growth={growth_rate}_ploidy={ploidy}_rec_check.png" ) logging.debug(f"Writing {path}") pyplot.savefig(path) pyplot.close("all") def test_xi_beta_recombinations(self): Ne = 10000 for alpha in [1.1, 1.3, 1.5, 1.9]: for p in [1, 2]: self.verify_recombination( f"n=100_alpha={alpha}", sample_size=100, Ne=Ne, r=1e-8, m=1e-8, L=10 ** 6, ploidy=p, model=msprime.BetaCoalescent(alpha=alpha), ) def test_xi_dirac_recombinations(self): Ne = 100 for psi in [0.1, 0.5, 0.9]: for c in [1, 10]: for p in [1, 2]: self.verify_recombination( f"n=100_psi={psi}_c={c}", sample_size=100, Ne=Ne, r=1e-8, m=1e-8, L=10 ** 6, ploidy=p, model=msprime.DiracCoalescent(psi=psi, c=c), ) def test_hudson_recombinations(self): for p in [1, 2]: self.verify_recombination( "n=100_hudson", sample_size=100, Ne=10000, r=1e-8, m=1e-8, L=10 ** 6, ploidy=p, model="hudson", ) class XiVsHudsonTest(Test): """ Test that Xi dirac coalescent is equivalent to the Hudson model in the appropriate regime. """ def _run(self, xi_model, num_replicates, num_samples, **kwargs): df = pd.DataFrame() for model in ["hudson", xi_model]: simulate_args = dict(kwargs) simulate_args["model"] = model model_str = "hudson" if model != "hudson": model_str = "Xi" # The Xi Dirac coalescent scales differently than the Hudson model. # (Ne² for Dirac and 2Ne for Hudson). # We need NeDirac= square_root(2NeHudson). simulate_args["population_size"] = math.sqrt( int(simulate_args["ploidy"]) * int(simulate_args["population_size"]) ) logging.debug(f"Running: {simulate_args}") sim = msprime.ancestry._parse_sim_ancestry( samples=[msprime.SampleSet(num_samples, ploidy=1)], sequence_length=1, discrete_genome=False, **simulate_args, ) replicates = sim.run_replicates(num_replicates) data = collections.defaultdict(list) for ts in replicates: t_mrca = np.zeros(ts.num_trees) for tree in ts.trees(): t_mrca[tree.index] = tree.time(tree.root) data["tmrca_mean"].append(np.mean(t_mrca)) data["num_trees"].append(ts.num_trees) data["num_nodes"].append(ts.num_nodes) data["num_edges"].append(ts.num_edges) data["model"].append(model_str) df = df.append(pd.DataFrame(data)) df_hudson = df[df.model == "hudson"] df_xi = df[df.model == "Xi"] p = int(simulate_args["ploidy"]) for stat in ["tmrca_mean", "num_trees", "num_nodes", "num_edges"]: v1 = df_hudson[stat] v2 = df_xi[stat] sm.graphics.qqplot(v1) sm.qqplot_2samples(v1, v2, line="45") f = self.output_dir / f"{stat}_ploidy={p}.png" pyplot.savefig(f, dpi=72) pyplot.close("all") def test_xi_dirac_vs_hudson_recombination(self): self._run( msprime.DiracCoalescent(psi=0.99, c=0), num_replicates=1000, num_samples=50, population_size=10000, recombination_rate=0.001, ploidy=1, ) self._run( msprime.DiracCoalescent(psi=0.99, c=0), num_replicates=1000, num_samples=50, population_size=10000, recombination_rate=0.001, ploidy=2, ) def test_xi_dirac_vs_hudson_single_locus(self): self._run( msprime.DiracCoalescent(psi=0.99, c=0), num_replicates=5000, num_samples=10, population_size=10000, ploidy=1, ) self._run( msprime.DiracCoalescent(psi=0.99, c=0), num_replicates=5000, num_samples=10, population_size=10000, ploidy=2, ) class KnownSFS(Test): """ Compare the simulated SFS to precomputed known values. """ def compare_sfs(self, sample_size, ploidy, model, num_replicates, sfs, name): data = collections.defaultdict(list) tbl_sum = [0] * (sample_size - 1) tot_bl_sum = [0] replicates = msprime.sim_ancestry( [msprime.SampleSet(sample_size, ploidy=1, population=0)], ploidy=ploidy, model=model, num_replicates=num_replicates, ) for ts in replicates: for tree in ts.trees(): tot_bl = 0.0 tbl = [0] * (sample_size - 1) for node in tree.nodes(): if tree.parent(node) != tskit.NULL: tbl[tree.num_samples(node) - 1] = tbl[ tree.num_samples(node) - 1 ] + tree.branch_length(node) tot_bl = tot_bl + tree.branch_length(node) for xi in range(sample_size - 1): rescaled_x = tbl[xi] data["total_branch_length"].append(rescaled_x / tot_bl) tbl_sum[xi] = tbl_sum[xi] + rescaled_x tot_bl_sum[0] = tot_bl_sum[0] + tot_bl data["num_leaves"].extend(range(1, sample_size)) f = self.output_dir / f"{name}.png" ax = sns.violinplot( data=data, x="num_leaves", y="total_branch_length", color="grey" ) ax.set_xlabel("num leaves") l1 = ax.plot(np.arange(sample_size - 1), sfs[::], ":", linewidth=3, marker="^") l2 = ax.plot( np.arange(sample_size - 1), [(x / num_replicates) / (tot_bl_sum[0] / num_replicates) for x in tbl_sum], "--", marker="o", linewidth=2, ) ax.legend((l1[0], l2[0]), ("Expected", "Observed")) pyplot.savefig(f, dpi=72) pyplot.close("all") class DiracSFS(KnownSFS): def _run( self, sample_size=10, ploidy=2, psi=None, c=None, sfs=None, num_replicates=10000 ): """ Runs simulations of the xi dirac model and calculates E[Bi]/E[B] (Bi branch length having i leaves and B total branch length) and compares to the expected SFS. """ logging.debug(f"running SFS for {sample_size} {psi} {c}") model = msprime.DiracCoalescent(psi=psi, c=c) name = f"n={sample_size}_psi={psi}_c={c}_ploidy={ploidy}" self.compare_sfs(sample_size, ploidy, model, num_replicates, sfs, name) def test_xi_dirac_expected_sfs_psi_0_1_c_1(self): self._run( psi=0.1, c=1, ploidy=2, sfs=[ 0.35352303, 0.17672997, 0.11781921, 0.08836481, 0.07069227, 0.05891075, 0.05049574, 0.04418514, 0.03927908, ], ) def test_xi_dirac_expected_sfs_psi_0_3_c_1(self): self._run( psi=0.3, c=1, ploidy=2, sfs=[ 0.35430737, 0.17650201, 0.11762438, 0.08822363, 0.07058696, 0.05883259, 0.05044232, 0.04416277, 0.03931799, ], ) def test_xi_dirac_expected_sfs_psi_0_5_c_1(self): self._run( psi=0.5, c=1, ploidy=2, sfs=[ 0.35655911, 0.17596878, 0.11711820, 0.08785514, 0.07030139, 0.05860142, 0.05025410, 0.04402755, 0.03931431, ], ) def test_xi_dirac_expected_sfs_psi_0_9_c_1(self): self._run( psi=0.9, c=1, ploidy=2, sfs=[ 0.36443828, 0.17490683, 0.11614708, 0.08717119, 0.06965759, 0.05790491, 0.04939935, 0.04279132, 0.03758346, ], ) def test_xi_dirac_expected_sfs_n3(self): self._run(sample_size=3, ploidy=2, psi=0.1, c=10, sfs=[0.6667343, 0.3332657]) self._run(sample_size=3, ploidy=2, psi=0.3, c=10, sfs=[0.6682113, 0.3317887]) self._run(sample_size=3, ploidy=2, psi=0.5, c=10, sfs=[0.6721853, 0.3278147]) self._run(sample_size=3, ploidy=2, psi=0.9, c=10, sfs=[0.6852703, 0.3147297]) self._run(sample_size=3, ploidy=1, psi=0.1, c=10000, sfs=[0.678571, 0.321429]) self._run(sample_size=3, ploidy=1, psi=0.3, c=10000, sfs=[0.708333, 0.291667]) self._run(sample_size=3, ploidy=1, psi=0.5, c=10000, sfs=[0.750000, 0.250000]) self._run(sample_size=3, ploidy=1, psi=0.9, c=10000, sfs=[0.916667, 0.083333]) def test_xi_dirac_expected_sfs_psi_0_1_c_10(self): self._run( psi=0.1, c=10, ploidy=2, sfs=[ 0.35385062, 0.17661522, 0.11773706, 0.08830646, 0.07064941, 0.05887993, 0.05047626, 0.04418035, 0.03930470, ], ) def test_xi_dirac_expected_sfs_psi_0_3_c_10(self): self._run( psi=0.3, c=10, ploidy=2, sfs=[ 0.36053858, 0.17456975, 0.11610005, 0.08713599, 0.06977685, 0.05822906, 0.05002797, 0.04398723, 0.03963453, ], ) self._run( num_replicates=10000, sample_size=10, psi=0.5, c=10, ploidy=2, sfs=[ 0.37556917, 0.17015781, 0.11285655, 0.08495119, 0.06808802, 0.05683977, 0.04886055, 0.04309158, 0.03958537, ], ) self._run( num_replicates=10000, sample_size=10, psi=0.9, c=10, ploidy=2, sfs=[ 0.41154361, 0.15908770, 0.10852899, 0.08341563, 0.06647774, 0.05471783, 0.04592602, 0.03818041, 0.03212207, ], ) # Compare SFS when c=10000 to the expected SFS where c tends to infinity def test_xi_dirac_expected_sfs_psi_0_1_c_10000(self): self._run( psi=0.1, c=10000, ploidy=2, sfs=[ 0.36939374, 0.17057448, 0.11408360, 0.08571572, 0.06874076, 0.05749423, 0.04958115, 0.04390987, 0.04050644, ], ) def test_xi_dirac_expected_sfs_psi_0_3_c_10000(self): self._run( psi=0.3, c=10000, ploidy=2, sfs=[ 0.39876239, 0.15840021, 0.10834860, 0.08165271, 0.06562863, 0.05508280, 0.04777344, 0.04280604, 0.04154517, ], ) def test_xi_dirac_expected_sfs_psi_0_5_c_10000(self): self._run( psi=0.5, c=10000, ploidy=2, sfs=[ 0.42603419, 0.14512841, 0.10505636, 0.07956441, 0.06368639, 0.05328134, 0.04595869, 0.04078814, 0.04050205, ], ) def test_xi_dirac_expected_sfs_psi_0_9_c_10000(self): self._run( psi=0.9, c=10000, ploidy=2, sfs=[ 0.47543921, 0.11338801, 0.10691661, 0.08342993, 0.06358921, 0.05162311, 0.04334855, 0.03416865, 0.02809671, ], ) def test_dirac_expected_sfs_psi_0_1_c_10000(self): self._run( psi=0.1, c=10000, ploidy=1, sfs=[ 0.422312, 0.148277, 0.101947, 0.077241, 0.062498, 0.052964, 0.046659, 0.043069, 0.045033, ], ) def test_dirac_expected_sfs_psi_0_3_c_10000(self): self._run( psi=0.3, c=10000, ploidy=1, sfs=[ 0.570300, 0.083920, 0.067942, 0.056251, 0.047302, 0.041406, 0.038521, 0.039844, 0.054512, ], ) def test_dirac_expected_sfs_psi_0_5_c_10000(self): self._run( psi=0.5, c=10000, ploidy=1, sfs=[ 0.710037, 0.036594, 0.031667, 0.031557, 0.032135, 0.031557, 0.031667, 0.036594, 0.058192, ], ) def test_dirac_expected_sfs_psi_0_9_c_10000(self): self._run( psi=0.9, c=10000, ploidy=1, sfs=[ 0.927920, 0.001810, 0.000476, 0.000096, 0.000148, 0.001040, 0.005356, 0.018413, 0.044742, ], ) class BetaSFS(KnownSFS): def _run(self, sample_size, ploidy, alpha, sfs, num_replicates=1000): """ Runs simulations of the xi beta model and compares to the expected SFS. """ logging.debug(f"running Beta SFS for {sample_size} {alpha}") model = msprime.BetaCoalescent(alpha=alpha) name = f"n={sample_size}_alpha={alpha}_ploidy={ploidy}" self.compare_sfs(sample_size, ploidy, model, num_replicates, sfs, name) def test_xi_beta_expected_sfs_alpha1_1(self): self._run( num_replicates=100000, sample_size=10, alpha=1.1, ploidy=2, sfs=[ 0.40838865, 0.15645421, 0.10765060, 0.08178884, 0.06548874, 0.05455910, 0.04672861, 0.04082172, 0.03811953, ], ) def test_xi_beta_expected_sfs_alpha1_3(self): self._run( num_replicates=100000, sample_size=10, alpha=1.3, ploidy=2, sfs=[ 0.39612917, 0.16173072, 0.10932728, 0.08270507, 0.06630221, 0.05534012, 0.04754038, 0.04182775, 0.03909731, ], ) def test_xi_beta_expected_sfs_alpha1_5(self): self._run( num_replicates=100000, sample_size=10, alpha=1.5, ploidy=2, sfs=[ 0.38395732, 0.16650213, 0.11136301, 0.08395003, 0.06731437, 0.05622960, 0.04837457, 0.04268961, 0.03961935, ], ) def test_xi_beta_expected_sfs_alpha1_9(self): self._run( num_replicates=100000, sample_size=10, alpha=1.9, ploidy=2, sfs=[ 0.35961114, 0.17486018, 0.11638771, 0.08734266, 0.06992360, 0.05832611, 0.05007349, 0.04396363, 0.03951149, ], ) def test_beta_expected_sfs_alpha1_1(self): self._run( num_replicates=100000, sample_size=10, alpha=1.1, ploidy=1, sfs=[ 0.580175, 0.119103, 0.066440, 0.047197, 0.038166, 0.033879, 0.032796, 0.035382, 0.046863, ], ) def test_beta_expected_sfs_alpha1_3(self): self._run( num_replicates=100000, sample_size=10, alpha=1.3, ploidy=1, sfs=[ 0.521296, 0.137166, 0.078487, 0.056070, 0.045115, 0.039481, 0.037258, 0.038479, 0.046649, ], ) def test_beta_expected_sfs_alpha1_5(self): self._run( num_replicates=100000, sample_size=10, alpha=1.5, ploidy=1, sfs=[ 0.467491, 0.152216, 0.090245, 0.065103, 0.052216, 0.045067, 0.041436, 0.040898, 0.045330, ], ) def test_beta_expected_sfs_alpha1_9(self): self._run( num_replicates=100000, sample_size=10, alpha=1.9, ploidy=1, sfs=[ 0.374086, 0.173264, 0.112565, 0.083644, 0.066914, 0.056165, 0.048856, 0.043826, 0.040681, ], ) class XiGrowth(Test): def compare_tmrca( self, pop_size, growth_rate, model, num_replicates, a, b, ploidy, name ): demography = msprime.Demography.isolated_model( initial_size=[pop_size], growth_rate=[growth_rate] ) replicates = msprime.ancestry.sim_ancestry( 2, demography=demography, model=model, ploidy=ploidy, num_replicates=num_replicates, ) T1 = np.array([ts.first().tmrca(0, 1) for ts in replicates]) sm.graphics.qqplot( T1, dist=scipy.stats.gompertz, distargs=(a / b,), scale=1 / b, line="45" ) filename = self.output_dir / f"{name}.png" pyplot.savefig(filename, dpi=72) pyplot.close("all") class BetaGrowth(XiGrowth): def _run(self, pop_size, alpha, growth_rate, num_replicates=10000): logging.debug(f"running Beta growth for {pop_size} {alpha} {growth_rate}") b = growth_rate * (alpha - 1) model = (msprime.BetaCoalescent(alpha=alpha),) ploidy = 2 a = 1 / (2 * ploidy * self.compute_beta_timescale(pop_size, alpha, ploidy)) name = f"N={pop_size}_alpha={alpha}_growth_rate={growth_rate}_ploidy={ploidy}" self.compare_tmrca( pop_size, growth_rate, model, num_replicates, a, b, ploidy, name ) ploidy = 1 a = 1 / self.compute_beta_timescale(pop_size, alpha, ploidy) name = f"N={pop_size}_alpha={alpha}_growth_rate={growth_rate}_ploidy={ploidy}" self.compare_tmrca( pop_size, growth_rate, model, num_replicates, a, b, ploidy, name ) def compute_beta_timescale(self, pop_size, alpha, ploidy): if ploidy > 1: N = pop_size / 2 m = 2 + np.exp( alpha * np.log(2) + (1 - alpha) * np.log(3) - np.log(alpha - 1) ) else: N = pop_size m = 1 + np.exp((1 - alpha) * np.log(2) - np.log(alpha - 1)) ret = np.exp( alpha * np.log(m) + (alpha - 1) * np.log(N) - np.log(alpha) - scipy.special.betaln(2 - alpha, alpha) ) return ret def test_10_15_01(self): self._run(pop_size=10, alpha=1.5, growth_rate=0.1) def test_1000_19_0001(self): self._run(pop_size=1000, alpha=1.9, growth_rate=0.001) def test_100000_11_001(self): self._run(pop_size=100000, alpha=1.1, growth_rate=0.01) class DiracGrowth(XiGrowth): def _run(self, pop_size, c, psi, growth_rate, num_replicates=10000): logging.debug(f"running Dirac growth for {pop_size} {c} {psi} {growth_rate}") b = growth_rate model = (msprime.DiracCoalescent(psi=psi, c=c),) p = 2 a = (1 + c * psi * psi / (2 * p)) / (pop_size * pop_size) name = f"N={pop_size}_c={c}_psi={psi}_growth_rate={growth_rate}_ploidy={p}" self.compare_tmrca(pop_size, growth_rate, model, num_replicates, a, b, p, name) p = 1 a = (1 + c * psi * psi) / (pop_size * pop_size) name = f"N={pop_size}_c={c}_psi={psi}_growth_rate={growth_rate}_ploidy={p}" self.compare_tmrca(pop_size, growth_rate, model, num_replicates, a, b, p, name) def test_1_01_05_01(self): self._run(pop_size=1, c=0.1, psi=0.5, growth_rate=0.1) def test_10_05_07_0001(self): self._run(pop_size=10, c=0.5, psi=0.7, growth_rate=0.001) def test_100_1_09_001(self): self._run(pop_size=100, c=1, psi=0.9, growth_rate=0.01) def test_10_5_03_01(self): self._run(pop_size=10, c=5, psi=0.3, growth_rate=0.1) class ContinuousVsDiscreteRecombination(Test): def _run_msprime_coalescent_stats(self, **kwargs): logging.debug(f"\t msprime: {kwargs}") if "num_replicates" in kwargs: replicates = kwargs["num_replicates"] num_trees = [0 for i in range(replicates)] breakpoints = [0 for i in range(replicates)] for i, ts in enumerate(msprime.sim_ancestry(**kwargs)): num_trees[i] = ts.num_trees breakpoints[i] = list(ts.breakpoints()) else: ts = msprime.sim_ancestry(**kwargs) num_trees = [ts.num_trees] breakpoints = [list(ts.breakpoints)] d = {"num_trees": num_trees, "breakpoints": breakpoints} df = pd.DataFrame(d) return df def run_cont_discrete_comparison(self, model, recomb_map): sample_size = 10 num_replicates = 400 N = 100 df_discrete = self._run_msprime_coalescent_stats( num_replicates=num_replicates, samples=sample_size, population_size=N, model=model, recombination_rate=recomb_map, discrete_genome=True, ) df_cont = self._run_msprime_coalescent_stats( num_replicates=num_replicates, samples=sample_size, model=model, population_size=N, recombination_rate=recomb_map, discrete_genome=False, ) self._plot_stats( "compare continuous and discrete coordinates", df_discrete, df_cont, "discrete", "continuous", ) class UniformRecombination(ContinuousVsDiscreteRecombination): def _run(self, model): recomb_map = msprime.RateMap.uniform(2000000, 1e-6) self.run_cont_discrete_comparison(model, recomb_map) def test_hudson_cont_discrete_uniform(self): self._run("hudson") def test_dtwf_cont_discrete_uniform(self): self._run("dtwf") class VariableRecombination(ContinuousVsDiscreteRecombination): def _run(self, model): r = 1e-6 positions = [0, 10000, 50000, 150000, 200000] rates = [0.0, r, 5 * r, r / 2] recomb_map = msprime.RateMap(position=positions, rate=rates) self.run_cont_discrete_comparison(model, recomb_map) def test_hudson_cont_discrete_variable(self): self._run("hudson") def test_dtwf_cont_discrete_variable(self): self._run("dtwf") class ArgRecordTest(Test): """ Check that we get the same distributions of nodes and edges when we simplify an ARG as we get in a direct simulation. """ def _run(self, num_replicates=1000, **kwargs): ts_node_counts = np.array([]) arg_node_counts = np.array([]) ts_tree_counts = np.array([]) arg_tree_counts = np.array([]) ts_edge_counts = np.array([]) arg_edge_counts = np.array([]) for ts in msprime.simulate(num_replicates=num_replicates, **kwargs): ts_node_counts = np.append(ts_node_counts, ts.num_nodes) ts_tree_counts = np.append(ts_tree_counts, ts.num_trees) ts_edge_counts = np.append(ts_edge_counts, ts.num_edges) reps = msprime.simulate( num_replicates=num_replicates, record_full_arg=True, **kwargs ) for arg in reps: arg = arg.simplify() arg_node_counts = np.append(arg_node_counts, arg.num_nodes) arg_tree_counts = np.append(arg_tree_counts, arg.num_trees) arg_edge_counts = np.append(arg_edge_counts, arg.num_edges) pp_ts = sm.ProbPlot(ts_node_counts) pp_arg = sm.ProbPlot(arg_node_counts) sm.qqplot_2samples(pp_ts, pp_arg, line="45") pyplot.savefig(self.output_dir / "nodes.png", dpi=72) pp_ts = sm.ProbPlot(ts_tree_counts) pp_arg = sm.ProbPlot(arg_tree_counts) sm.qqplot_2samples(pp_ts, pp_arg, line="45") pyplot.savefig(self.output_dir / "num_trees.png", dpi=72) pp_ts = sm.ProbPlot(ts_edge_counts) pp_arg = sm.ProbPlot(arg_edge_counts) sm.qqplot_2samples(pp_ts, pp_arg, line="45") pyplot.savefig(self.output_dir / "edges.png", dpi=72) pyplot.close("all") def test_arg_hudson_n10_rho_20(self): self._run(sample_size=10, recombination_rate=20) def test_arg_hudson_n1000_rho_0_2(self): self._run(sample_size=1000, recombination_rate=0.2) def test_arg_beta_n100_rho_2(self): model = msprime.BetaCoalescent(alpha=1.1) self._run(sample_size=100, recombination_rate=2, model=model) def test_arg_dirac_n100_rho_2(self): model = msprime.DiracCoalescent(psi=0.9, c=1) self._run(sample_size=100, recombination_rate=2, model=model) class HudsonAnalytical(Test): """ Miscellaneous tests for the hudson model where we verify against analytical results. """ def get_segregating_sites_histogram(self, cmd): logging.debug(f"\t {' '.join(cmd)}") output = subprocess.check_output(cmd) max_s = 200 hist = np.zeros(max_s) for line in output.splitlines(): if line.startswith(b"segsites"): s = int(line.split()[1]) if s <= max_s: hist[s] += 1 return hist / np.sum(hist) def get_S_distribution(self, k, n, theta): """ Returns the probability of having k segregating sites in a sample of size n. Wakely pg 94. """ s = 0.0 for i in range(2, n + 1): t1 = (-1) ** i t2 = scipy.special.binom(n - 1, i - 1) t3 = (i - 1) / (theta + i - 1) t4 = (theta / (theta + i - 1)) ** k s += t1 * t2 * t3 * t4 return s def test_analytical_segsites(self): """ Runs the check for the number of segregating sites against the analytical prediction. We also compare against ms. """ R = 100000 theta = 2 for n in range(2, 15): logging.debug(f"Running n = {n}") cmd = f"{n} {R} -t {theta}" S_ms = self.get_segregating_sites_histogram( _ms_executable + cmd.split() + self.get_ms_seeds() ) S_msp = self.get_segregating_sites_histogram( _mspms_executable + cmd.split() + self.get_ms_seeds() ) fig, ax = pyplot.subplots() index = np.arange(10) S_analytical = [self.get_S_distribution(j, n, theta) for j in index] bar_width = 0.35 pyplot.bar(index, S_ms[index], bar_width, color="b", label="ms") pyplot.bar( index + bar_width, S_msp[index], bar_width, color="r", label="msp" ) pyplot.plot(index + bar_width, S_analytical, "o", color="k") pyplot.legend() pyplot.xticks(index + bar_width, [str(j) for j in index]) pyplot.tight_layout() pyplot.savefig(self.output_dir / f"{n}.png") def test_analytical_pi(self): """ Runs the check for pi against analytical predictions. """ R = 100000 theta = 4.5 sample_size = np.arange(2, 15) mean = np.zeros_like(sample_size, dtype=float) var = np.zeros_like(sample_size, dtype=float) predicted_mean = np.zeros_like(sample_size, dtype=float) predicted_var = np.zeros_like(sample_size, dtype=float) for k, n in enumerate(sample_size): pi = np.zeros(R) replicates = msprime.simulate( sample_size=n, mutation_rate=theta / 4, num_replicates=R ) for j, ts in enumerate(replicates): pi[j] = ts.get_pairwise_diversity() # Predicted mean is theta. predicted_mean[k] = theta # From Wakely, eqn (4.14), pg. 101 predicted_var[k] = (n + 1) * theta / (3 * (n - 1)) + 2 * ( n ** 2 + n + 3 ) * theta ** 2 / (9 * n * (n - 1)) mean[k] = np.mean(pi) var[k] = np.var(pi) logging.debug( f"{n}\t{theta}\t{np.mean(pi)}\t{predicted_var[k]}\t{np.var(pi)}" ) filename = self.output_dir / "mean.png" pyplot.plot(sample_size, predicted_mean, "-") pyplot.plot(sample_size, mean, "-") pyplot.savefig(filename) pyplot.close("all") filename = self.output_dir / "var.png" pyplot.plot(sample_size, predicted_var, "-") pyplot.plot(sample_size, var, "-") pyplot.savefig(filename) pyplot.close("all") def test_gc_correlation_between_trees(self): """ Runs the check for the probability of same tree at two sites against analytical predictions. """ R = 1000 sample_size = 1 # 2 diploids gc_length_rate_ratio = np.array([0.05, 0.5, 5.0]) gc_length = np.array([100, 50, 20]) gc_rate = 0.25 / (gc_length_rate_ratio * gc_length) seq_length = 500 predicted_prob = np.zeros([gc_length_rate_ratio.size, seq_length], dtype=float) empirical_prob_first = np.zeros( [gc_length_rate_ratio.size, seq_length], dtype=float ) empirical_prob_mid = np.zeros( [gc_length_rate_ratio.size, seq_length], dtype=float ) empirical_prob_last = np.zeros( [gc_length_rate_ratio.size, seq_length], dtype=float ) for k, l in enumerate(gc_length): same_root_count_first = np.zeros(seq_length) same_root_count_mid = np.zeros(seq_length) same_root_count_last = np.zeros(seq_length) replicates = msprime.sim_ancestry( samples=sample_size, sequence_length=seq_length, gene_conversion_rate=gc_rate[k], gene_conversion_tract_length=gc_length[k], num_replicates=R, ) for ts in replicates: firstroot = ts.first().root lastroot = ts.last().root for tree in ts.trees(): left, right = tree.interval if left <= seq_length / 2 < right: midroot = tree.root for tree in ts.trees(): left, right = map(int, tree.interval) if firstroot == tree.root: same_root_count_first[left:right] += 1 if lastroot == tree.root: same_root_count_last[left:right] += 1 if midroot == tree.root: same_root_count_mid[left:right] += 1 empirical_prob_first[k, :] = same_root_count_first / R empirical_prob_last[k, :] = same_root_count_last / R empirical_prob_mid[k, :] = same_root_count_mid / R # Predicted prob # From Wiuf, Hein, 2000, eqn (15), pg. 457 rG = ( 2 / gc_length_rate_ratio[k] * (1.0 - np.exp(-np.arange(seq_length) / l)) ) predicted_prob[k, :] = (18.0 + rG) / (18.0 + 13.0 * rG + rG * rG) x = np.arange(500) + 1 pyplot.plot(x, predicted_prob[0], "--", label="prediction") pyplot.plot(x, empirical_prob_first[0], "-", label="simulation") pyplot.plot(x, predicted_prob[1], "--") pyplot.plot(x, empirical_prob_first[1], "-") pyplot.plot(x, predicted_prob[2], "--") pyplot.plot(x, empirical_prob_first[2], "-") pyplot.xlabel("chromosome positon") pyplot.ylabel("fraction of trees identical to first position tree") pyplot.legend(loc="upper right") pyplot.savefig(self.output_dir / "prob_first.png") pyplot.close("all") pyplot.plot(x, predicted_prob[0, ::-1], "--", label="prediction") pyplot.plot(x, empirical_prob_last[0], "-", label="simulation") pyplot.plot(x, predicted_prob[1, ::-1], "--") pyplot.plot(x, empirical_prob_last[1], "-") pyplot.plot(x, predicted_prob[2, ::-1], "--") pyplot.plot(x, empirical_prob_last[2], "-") pyplot.xlabel("chromosome positon") pyplot.ylabel("fraction of trees identical to last position tree") pyplot.legend(loc="upper left") pyplot.savefig(self.output_dir / "prob_last.png") pyplot.close("all") pyplot.plot( x, np.concatenate((predicted_prob[0, 249::-1], predicted_prob[0, :250])), "--", label="prediction", ) pyplot.plot(x, empirical_prob_mid[0], "-", label="simulation") pyplot.plot( x, np.concatenate((predicted_prob[1, 249::-1], predicted_prob[1, :250])), "--", ) pyplot.plot(x, empirical_prob_mid[1], "-") pyplot.plot( x, np.concatenate((predicted_prob[2, 249::-1], predicted_prob[2, :250])), "--", ) pyplot.plot(x, empirical_prob_mid[2], "-") pyplot.xlabel("chromosome positon") pyplot.ylabel("fraction of trees identical to middle position tree") pyplot.legend(loc="upper right") pyplot.savefig(self.output_dir / "prob_mid.png") pyplot.close("all") x = np.arange(10) + 1 pyplot.plot(x, predicted_prob[0, range(10)], "--", label="prediction") pyplot.plot(x, empirical_prob_first[0, range(10)], "-", label="simulation") pyplot.plot(x, predicted_prob[1, range(10)], "--") pyplot.plot(x, empirical_prob_first[1, range(10)], "-") pyplot.plot(x, predicted_prob[2, range(10)], "--") pyplot.plot(x, empirical_prob_first[2, range(10)], "-") pyplot.xlabel("chromosome positon") pyplot.ylabel("fraction of trees identical to first position tree") pyplot.legend(loc="upper right") pyplot.savefig(self.output_dir / "prob_first_zoom.png") pyplot.close("all") def test_gc_tract_length_expectation(self): """ Runs the check for the mean length of gene conversion tracts. """ num_replicates = 100 n = 10 gene_conversion_rate = 5 gc_tract_lengths = np.append(np.arange(1, 5.25, 0.25), [10, 50]) for discrete_genome in [True, False]: data_to_plot = [] for k, l in enumerate(gc_tract_lengths): num_gc_events = np.zeros(num_replicates) num_internal_gc_events = np.zeros(num_replicates) sum_internal_gc_tract_lengths = np.zeros(num_replicates) sim = msprime.ancestry._parse_sim_ancestry( samples=n, sequence_length=100, gene_conversion_rate=gene_conversion_rate, gene_conversion_tract_length=gc_tract_lengths[k], discrete_genome=discrete_genome, ploidy=1, ) for j, _ts in enumerate(sim.run_replicates(num_replicates)): num_gc_events[j] = sim.num_gene_conversion_events num_internal_gc_events[j] = sim.num_internal_gene_conversion_events sum_internal_gc_tract_lengths[j] = sim.sum_internal_gc_tract_lengths sim.reset() data_to_plot.append( sum_internal_gc_tract_lengths / num_internal_gc_events / l ) pyplot.boxplot(data_to_plot, labels=gc_tract_lengths) pyplot.xlabel("tl: mean tract length specified") pyplot.ylabel("average internal tract length / tl") filename = f"mean_gc_tract_lengths_discrete={int(discrete_genome)}.png" pyplot.savefig(self.output_dir / filename) pyplot.close("all") def get_tbl_distribution(self, n, R, executable): """ Returns an array of the R total branch length values from the specified ms-like executable. """ cmd = executable + f"{n} {R} -T -p 10".split() cmd += self.get_ms_seeds() logging.debug(f"\t {' '.join(cmd)}") output = subprocess.check_output(cmd) tbl = np.zeros(R) j = 0 for line in output.splitlines(): if line.startswith(b"("): t = dendropy.Tree.get_from_string(line.decode(), schema="newick") tbl[j] = t.length() j += 1 return tbl def get_analytical_tbl(self, n, t): """ Returns the probabily density of the total branch length t with a sample of n lineages. Wakeley Page 78. """ t1 = (n - 1) / 2 t2 = math.exp(-t / 2) t3 = pow(1 - math.exp(-t / 2), n - 2) return t1 * t2 * t3 def test_analytical_tbl(self): """ Runs the check for the total branch length. """ R = 10000 for n in range(2, 15): logging.debug(f"Running for n = {n}") tbl_ms = self.get_tbl_distribution(n, R, _ms_executable) tbl_msp = self.get_tbl_distribution(n, R, _mspms_executable) sm.graphics.qqplot(tbl_ms) sm.qqplot_2samples(tbl_ms, tbl_msp, line="45") pyplot.savefig(self.output_dir / f"qqplot_{n}.png", dpi=72) pyplot.close("all") hist_ms, bin_edges = np.histogram(tbl_ms, 20, density=True) hist_msp, _ = np.histogram(tbl_msp, bin_edges, density=True) index = bin_edges[:-1] # NOTE We don't to have the analytical value quite right here, # but since the value is so very close to ms's, there doesn't # seem to be much point in trying to fix it. analytical = [self.get_analytical_tbl(n, x * 2) for x in index] fig, ax = pyplot.subplots() bar_width = 0.15 pyplot.bar(index, hist_ms, bar_width, color="b", label="ms") pyplot.bar(index + bar_width, hist_msp, bar_width, color="r", label="msp") pyplot.plot(index + bar_width, analytical, "o", color="k") pyplot.legend() # pyplot.xticks(index + bar_width, [str(j) for j in index]) pyplot.tight_layout() pyplot.savefig(self.output_dir / f"hist_{n}.png") def get_num_trees(self, cmd, R): logging.debug(f"\t {' '.join(cmd)}") output = subprocess.check_output(cmd) T = np.zeros(R) j = -1 for line in output.splitlines(): if line.startswith(b"//"): j += 1 if line.startswith(b"["): T[j] += 1 return T def test_analytical_num_trees(self): """ Runs the check for number of trees using the CLI. """ r = 1e-8 # Per generation recombination rate. num_loci = np.linspace(100, 10 ** 5, 10).astype(int) Ne = 10 ** 4 n = 100 rho = r * 4 * Ne * (num_loci - 1) num_replicates = 100 ms_mean = np.zeros_like(rho) msp_mean = np.zeros_like(rho) for j in range(len(num_loci)): cmd = "{} {} -T -r {} {}".format(n, num_replicates, rho[j], num_loci[j]) T = self.get_num_trees( _ms_executable + cmd.split() + self.get_ms_seeds(), num_replicates ) ms_mean[j] = np.mean(T) T = self.get_num_trees( _mspms_executable + cmd.split() + self.get_ms_seeds(), num_replicates ) msp_mean[j] = np.mean(T) pyplot.plot(rho, ms_mean, "o") pyplot.plot(rho, msp_mean, "^") pyplot.plot(rho, rho * harmonic_number(n - 1), "-") pyplot.savefig(self.output_dir / "mean.png") pyplot.close("all") def get_pairwise_coalescence_time(self, cmd, R): # logging.debug(f"\t {' '.join(cmd)}") output = subprocess.check_output(cmd) T = np.zeros(R) j = 0 for line in output.splitlines(): if line.startswith(b"("): t = dendropy.Tree.get_from_string(line.decode(), schema="newick") a = t.calc_node_ages() T[j] = a[-1] j += 1 return T def test_analytical_pairwise_island_model(self): """ Runs the check for the pairwise coalscence times for within and between populations. """ R = 10000 M = 0.2 for d in range(2, 6): cmd = "2 {} -T -I {} 2 {} {}".format(R, d, "0 " * (d - 1), M) T_w_ms = self.get_pairwise_coalescence_time( _ms_executable + cmd.split() + self.get_ms_seeds(), R ) T_w_msp = self.get_pairwise_coalescence_time( _mspms_executable + cmd.split() + self.get_ms_seeds(), R ) cmd = "2 {} -T -I {} 1 1 {} {}".format(R, d, "0 " * (d - 2), M) T_b_ms = self.get_pairwise_coalescence_time( _ms_executable + cmd.split() + self.get_ms_seeds(), R ) T_b_msp = self.get_pairwise_coalescence_time( _mspms_executable + cmd.split() + self.get_ms_seeds(), R ) t_within = d / 2 t_between = (d + (d - 1) / M) / 2 logging.debug( f"d={d} within=({np.mean(T_w_msp):.2f},{t_within}) " f"between=({np.mean(T_b_msp):.2f}, {t_between})" ) sm.graphics.qqplot(T_w_ms) sm.qqplot_2samples(T_w_ms, T_w_msp, line="45") pyplot.savefig(self.output_dir / f"within_{d}.png", dpi=72) pyplot.close("all") sm.graphics.qqplot(T_b_ms) sm.qqplot_2samples(T_b_ms, T_b_msp, line="45") pyplot.savefig(self.output_dir / f"between_{d}.png", dpi=72) pyplot.close("all") class DemographyDebugger(Test): """ Tests for the demography debugger methods. """ def verify_ddb_mean_coaltime(self, model_factory, name): """ Checks the mean coalescence time calculation against pi. """ num_reps = 20 T = [] U = [] logging.debug("coaltime: theory mean sd z") for k, model in enumerate(model_factory()): ddb = msprime.DemographyDebugger( population_configurations=model["population_configurations"], demographic_events=model["demographic_events"], migration_matrix=model["migration_matrix"], ) u = ddb.mean_coalescence_time(num_samples=model["sample_size"], max_iter=18) U.append(u) mut_rate = 1e-7 replicates = msprime.simulate( length=1e7, recombination_rate=1e-8, mutation_rate=mut_rate, population_configurations=model["population_configurations"], demographic_events=model["demographic_events"], migration_matrix=model["migration_matrix"], random_seed=5 + k, num_replicates=num_reps, ) TT = np.zeros(num_reps) for j, ts in enumerate(replicates): TT[j] = ts.diversity(ts.samples()) TT[j] /= 2 * mut_rate T.append(TT) mT = np.mean(TT) sT = np.std(TT) logging.debug( " {:.2f} {:.2f} {:.2f} {:.2f}".format( u, mT, sT, (u - mT) / (sT / np.sqrt(num_reps)) ) ) U = np.array(U) T = np.array(T) fig, ax = pyplot.subplots() ax.scatter(np.column_stack([U] * T.shape[1]), T) ax.scatter(U, np.mean(T, axis=1)) # where oh where is abline(0,1) x_vals = np.array(ax.get_xlim()) ax.plot(x_vals, x_vals, "--") ax.set_xlabel("calculated mean coaltime") ax.set_ylabel("pairwise diversity, scaled") pyplot.savefig(self.output_dir / f"{name}_mean_coaltimes.png") pyplot.close("all") def random_model_factory(self): """ Checks the mean coalescence time calculation against pi. """ random.seed(5) num_models = 20 for _ in range(num_models): Ne = 100 npops = 4 pop_sizes = [random.uniform(0.1, 1) * Ne for _ in range(npops)] growth_rates = [random.uniform(-0.001, 0.001) for _ in range(npops)] migration_matrix = [ [random.random() * (i != j) for j in range(npops)] for i in range(npops) ] sample_size = [random.randint(2, 10) for _ in range(npops)] population_configurations = [ msprime.PopulationConfiguration( initial_size=j, sample_size=n, growth_rate=r ) for j, n, r in zip(pop_sizes, sample_size, growth_rates) ] demographic_events = [] for i in [0, 1]: n = random.uniform(0.1, 10) * Ne r = 0 demographic_events.append( msprime.PopulationParametersChange( time=100, initial_size=n, growth_rate=r, population_id=i ) ) for ij in [(0, 1), (2, 3), (0, 3)]: demographic_events.append( msprime.MigrationRateChange(180, random.random(), matrix_index=ij) ) demographic_events.append( msprime.MassMigration(time=200, source=3, dest=0, proportion=0.3) ) for i in [1, 3]: n = random.uniform(0.1, 10) * Ne r = random.uniform(-0.01, 0.01) demographic_events.append( msprime.PopulationParametersChange( time=210, initial_size=n, growth_rate=r, population_id=i ) ) for i in [1, 2, 3]: n = random.uniform(0.1, 10) * Ne r = random.uniform(0.0, 0.01) demographic_events.append( msprime.PopulationParametersChange( time=250, initial_size=n, growth_rate=r, population_id=i ) ) yield { "population_configurations": population_configurations, "demographic_events": demographic_events, "migration_matrix": migration_matrix, "sample_size": sample_size, } def migration_model_factory(self): random.seed(5) Ne = 100 npops = 3 num_models = 10 for k in range(num_models): pop_sizes = [Ne] * (npops - 1) + [Ne * (2 ** k)] migration_matrix = [ [2 ** (k - 4) * ((i - j) % npops == 1) / Ne for j in range(npops)] for i in range(npops) ] sample_size = [1 + j for j in range(npops)] population_configurations = [ msprime.PopulationConfiguration(initial_size=j, sample_size=n) for j, n in zip(pop_sizes, sample_size) ] demographic_events = [] yield { "population_configurations": population_configurations, "demographic_events": demographic_events, "migration_matrix": migration_matrix, "sample_size": sample_size, } def popsize_change_model_factory(self): random.seed(5) Ne = 100 npops = 3 num_models = 16 change_times = [j * Ne / 4 for j in range(8)] for k in range(num_models): pop_sizes = [Ne] * (npops - 1) + [Ne * (2 ** k)] migration_matrix = [ [10 * ((i - j) % npops == 1) / Ne for j in range(npops)] for i in range(npops) ] sample_size = [1 + j for j in range(npops)] population_configurations = [ msprime.PopulationConfiguration(initial_size=j, sample_size=n) for j, n in zip(pop_sizes, sample_size) ] demographic_events = [] for t in change_times: pop_sizes = pop_sizes[1:] + pop_sizes[:1] r = 0 for i, n in enumerate(pop_sizes): demographic_events.append( msprime.PopulationParametersChange( time=t, initial_size=n, growth_rate=r, population_id=i ) ) yield { "population_configurations": population_configurations, "demographic_events": demographic_events, "migration_matrix": migration_matrix, "sample_size": sample_size, } def test_random_mean_coaltime(self): """ Checks the mean coalescence time calculation against pi. """ self.verify_ddb_mean_coaltime(self.random_model_factory, "random") def test_popsize_change_mean_coaltime(self): """ Checks the mean coalescence time calculation against pi for some models with population size changes. """ self.verify_ddb_mean_coaltime( self.popsize_change_model_factory, "popsize_change" ) def test_migration_mean_coaltime(self): """ Checks the mean coalescence time calculation against pi for some models with migration. """ self.verify_ddb_mean_coaltime(self.migration_model_factory, "migration") class SmcTest(Test): """ Tests for the SMC model against scrm. """ def get_scrm_num_trees(self, cmd, R): logging.debug(f"\t {' '.join(cmd)}") output = subprocess.check_output(cmd) T = np.zeros(R) j = -1 for line in output.splitlines(): if line.startswith(b"//"): j += 1 if line.startswith(b"time"): T[j] += 1 return T def get_scrm_oldest_time(self, cmd, R): logging.debug(f"\t {' '.join(cmd)}") output = subprocess.check_output(cmd) T = np.zeros(R) j = -1 for line in output.splitlines(): if line.startswith(b"//"): j += 1 if line.startswith(b"time:"): T[j] = max(T[j], float(line.split()[1])) return T def test_smc_oldest_time(self): """ Runs the check for number of trees using the CLI. """ r = 1e-8 # Per generation recombination rate. num_loci = np.linspace(100, 10 ** 5, 10).astype(int) Ne = 10 ** 4 n = 100 rho = r * 4 * Ne * (num_loci - 1) num_replicates = 1000 scrm_mean = np.zeros_like(rho) scrm_smc_mean = np.zeros_like(rho) msp_mean = np.zeros_like(rho) msp_smc_mean = np.zeros_like(rho) for j in range(len(num_loci)): cmd = "{} {} -L -r {} {} -p 14".format( n, num_replicates, rho[j], num_loci[j] ) T = self.get_scrm_oldest_time( _scrm_executable + cmd.split() + self.get_ms_seeds(), num_replicates ) scrm_mean[j] = np.mean(T) cmd += " -l 0" T = self.get_scrm_oldest_time( _scrm_executable + cmd.split() + self.get_ms_seeds(), num_replicates ) scrm_smc_mean[j] = np.mean(T) for dest, model in [(msp_mean, "hudson"), (msp_smc_mean, "smc_prime")]: replicates = msprime.simulate( sample_size=n, length=num_loci[j], recombination_rate=r, Ne=Ne, num_replicates=num_replicates, model=model, ) T = np.zeros(num_replicates) for k, ts in enumerate(replicates): for record in ts.records(): T[k] = max(T[k], record.time) # Normalise back to coalescent time. T /= 4 * Ne dest[j] = np.mean(T) pyplot.plot(rho, scrm_mean, "-", color="blue", label="scrm") pyplot.plot(rho, scrm_smc_mean, "-", color="red", label="scrm_smc") pyplot.plot(rho, msp_smc_mean, "--", color="red", label="msprime_smc") pyplot.plot(rho, msp_mean, "--", color="blue", label="msprime") pyplot.xlabel("rho") pyplot.ylabel("Mean oldest coalescence time") pyplot.legend(loc="lower right") pyplot.savefig(self.output_dir / "mean.png") pyplot.close("all") def test_smc_num_trees(self): """ Runs the check for number of trees in the SMC and full coalescent using the API. We compare this with scrm using the SMC as a check. """ r = 1e-8 # Per generation recombination rate. L = np.linspace(100, 10 ** 5, 10).astype(int) Ne = 10 ** 4 n = 100 rho = r * 4 * Ne * (L - 1) num_replicates = 10000 num_trees = np.zeros(num_replicates) mean_exact = np.zeros_like(rho) var_exact = np.zeros_like(rho) mean_smc = np.zeros_like(rho) var_smc = np.zeros_like(rho) mean_smc_prime = np.zeros_like(rho) var_smc_prime = np.zeros_like(rho) mean_scrm = np.zeros_like(rho) var_scrm = np.zeros_like(rho) for j in range(len(L)): # Run SCRM under the SMC to see if we get the correct variance. cmd = "{} {} -L -r {} {} -l 0".format(n, num_replicates, rho[j], L[j]) T = self.get_scrm_num_trees( _scrm_executable + cmd.split() + self.get_ms_seeds(), num_replicates ) mean_scrm[j] = np.mean(T) var_scrm[j] = np.var(T) # IMPORTANT!! We have to use the get_num_breakpoints method # on the simulator as there is a significant drop in the number # of trees if we use the tree sequence. There is a significant # number of common ancestor events that result in a recombination # being undone. exact_sim = msprime.ancestry._parse_simulate( sample_size=n, recombination_rate=r, Ne=Ne, length=L[j] ) for k in range(num_replicates): exact_sim.run() num_trees[k] = exact_sim.num_breakpoints exact_sim.reset() mean_exact[j] = np.mean(num_trees) var_exact[j] = np.var(num_trees) smc_sim = msprime.ancestry._parse_simulate( sample_size=n, recombination_rate=r, Ne=Ne, length=L[j], model="smc" ) for k in range(num_replicates): smc_sim.run() num_trees[k] = smc_sim.num_breakpoints smc_sim.reset() mean_smc[j] = np.mean(num_trees) var_smc[j] = np.var(num_trees) smc_prime_sim = msprime.ancestry._parse_simulate( sample_size=n, recombination_rate=r, Ne=Ne, length=L[j], model="smc_prime", ) for k in range(num_replicates): smc_prime_sim.run() num_trees[k] = smc_prime_sim.num_breakpoints smc_prime_sim.reset() mean_smc_prime[j] = np.mean(num_trees) var_smc_prime[j] = np.var(num_trees) pyplot.plot(rho, mean_exact, "o", label="msprime (hudson)") pyplot.plot(rho, mean_smc, "^", label="msprime (smc)") pyplot.plot(rho, mean_smc_prime, "*", label="msprime (smc_prime)") pyplot.plot(rho, mean_scrm, "x", label="scrm") pyplot.plot(rho, rho * harmonic_number(n - 1), "-") pyplot.legend(loc="upper left") pyplot.xlabel("scaled recombination rate rho") pyplot.ylabel("Mean number of breakpoints") pyplot.savefig(self.output_dir / "mean.png") pyplot.close("all") v = np.zeros(len(rho)) for j in range(len(rho)): v[j] = get_predicted_variance(n, rho[j]) pyplot.plot(rho, var_exact, "o", label="msprime (hudson)") pyplot.plot(rho, var_smc, "^", label="msprime (smc)") pyplot.plot(rho, var_smc_prime, "*", label="msprime (smc_prime)") pyplot.plot(rho, var_scrm, "x", label="scrm") pyplot.plot(rho, v, "-") pyplot.xlabel("scaled recombination rate rho") pyplot.ylabel("variance in number of breakpoints") pyplot.legend(loc="upper left") pyplot.savefig(self.output_dir / "var.png") pyplot.close("all") class SimulateFrom(Test): def test_simulate_from_single_locus(self): num_replicates = 1000 for n in [10, 50, 100, 200]: logging.debug(f"running for n = {n}") T1 = np.zeros(num_replicates) reps = msprime.simulate(n, num_replicates=num_replicates) for j, ts in enumerate(reps): T1[j] = np.max(ts.tables.nodes.time) for t in [0.5, 1, 1.5, 5]: T2 = np.zeros(num_replicates) reps = msprime.simulate(n, num_replicates=num_replicates, end_time=t) for j, ts in enumerate(reps): final_ts = msprime.simulate( from_ts=ts, start_time=np.max(ts.tables.nodes.time) ) final_ts = final_ts.simplify() T2[j] = np.max(final_ts.tables.nodes.time) sm.graphics.qqplot(T1) sm.qqplot_2samples(T1, T2, line="45") filename = self.output_dir / f"T_mrca_n={n}_t={t}.png" pyplot.savefig(filename, dpi=72) pyplot.close("all") def test_simulate_from_multi_locus(self): num_replicates = 1000 n = 50 for m in [10, 50, 100, 1000]: logging.debug(f"running for m = {m}") T1 = np.zeros(num_replicates) num_trees1 = np.zeros(num_replicates) recomb_rate = 1 / m reps = msprime.sim_ancestry( n, recombination_rate=recomb_rate, population_size=1, sequence_length=m, num_replicates=num_replicates, ) for j, ts in enumerate(reps): T1[j] = np.max(ts.tables.nodes.time) num_trees1[j] = ts.num_trees for t in [0.5, 1, 1.5, 5]: T2 = np.zeros(num_replicates) num_trees2 = np.zeros(num_replicates) reps = msprime.sim_ancestry( n, num_replicates=num_replicates, recombination_rate=recomb_rate, sequence_length=m, end_time=t, ) for j, ts in enumerate(reps): final_ts = msprime.sim_ancestry( initial_state=ts, population_size=1, recombination_rate=recomb_rate, start_time=np.max(ts.tables.nodes.time), ) final_ts = final_ts.simplify() T2[j] = np.max(final_ts.tables.nodes.time) num_trees2[j] = final_ts.num_trees sm.graphics.qqplot(T1) sm.qqplot_2samples(T1, T2, line="45") filename = self.output_dir / f"T_mrca_m={m}_t={t}.png" pyplot.savefig(filename, dpi=72) pyplot.close("all") sm.graphics.qqplot(num_trees1) sm.qqplot_2samples(num_trees1, num_trees2, line="45") filename = self.output_dir / f"num_trees_m={m}_t={t}.png" pyplot.savefig(filename, dpi=72) pyplot.close("all") def test_simulate_from_recombination(self): num_replicates = 1000 n = 100 recombination_rate = 10 T1 = np.zeros(num_replicates) num_trees1 = np.zeros(num_replicates) num_edges1 = np.zeros(num_replicates) num_nodes1 = np.zeros(num_replicates) reps = msprime.simulate( n, recombination_rate=recombination_rate, num_replicates=num_replicates ) for j, ts in enumerate(reps): T1[j] = np.max(ts.tables.nodes.time) num_trees1[j] = ts.num_trees num_nodes1[j] = ts.num_nodes num_edges1[j] = ts.num_edges logging.debug( "original mean: trees={:.2f} nodes={:.2f} edges={:.2f}".format( np.mean(num_trees1), np.mean(num_nodes1), np.mean(num_edges1) ) ) for t in [0.5, 1.0, 1.5, 5.0]: T2 = np.zeros(num_replicates) num_trees2 = np.zeros(num_replicates) num_nodes2 = np.zeros(num_replicates) num_edges2 = np.zeros(num_replicates) reps = msprime.simulate( n, num_replicates=num_replicates, recombination_rate=recombination_rate, end_time=t, ) for j, ts in enumerate(reps): final_ts = msprime.simulate( from_ts=ts, recombination_rate=recombination_rate, start_time=np.max(ts.tables.nodes.time), ) assert max(t.num_roots for t in final_ts.trees()) == 1 final_ts = final_ts.simplify() T2[j] = np.max(final_ts.tables.nodes.time) num_trees2[j] = final_ts.num_trees num_nodes2[j] = final_ts.num_nodes num_edges2[j] = final_ts.num_edges logging.debug( "t = {} mean: trees={:.2f} nodes={:.2f} edges={:.2f}".format( t, np.mean(num_trees2), np.mean(num_nodes2), np.mean(num_edges2) ) ) sm.graphics.qqplot(T1) sm.qqplot_2samples(T1, T2, line="45") filename = self.output_dir / f"T_mrca_t={t}.png" pyplot.savefig(filename, dpi=72) pyplot.close("all") sm.graphics.qqplot(num_trees1) sm.qqplot_2samples(num_trees1, num_trees2, line="45") filename = self.output_dir / f"num_trees_t={t}.png" pyplot.savefig(filename, dpi=72) pyplot.close("all") sm.graphics.qqplot(num_edges1) sm.qqplot_2samples(num_edges1, num_edges2, line="45") filename = self.output_dir / f"num_edges_t={t}.png" pyplot.savefig(filename, dpi=72) pyplot.close("all") sm.graphics.qqplot(num_nodes1) sm.qqplot_2samples(num_nodes1, num_nodes2, line="45") filename = self.output_dir / f"num_nodes_t={t}.png" pyplot.savefig(filename, dpi=72) pyplot.close("all") def test_simulate_from_demography(self): # TODO this test is considerably complicated by the fact that we # can't compare migrations without having support in simplify. # When simplify with migrations support is added, also add a test # here to check that the number of migrations is equivalent. # It's still a good check to have the underlying numbers of # events reported though, so keep these now that it's implemented. num_replicates = 1000 n = 50 recombination_rate = 10 samples = [msprime.Sample(time=0, population=j % 2) for j in range(n)] population_configurations = [ msprime.PopulationConfiguration(), msprime.PopulationConfiguration(), ] migration_matrix = [[0, 1], [1, 0]] demographic_events = [ msprime.SimpleBottleneck(time=5.1, population=0, proportion=0.4), msprime.SimpleBottleneck(time=10.1, population=1, proportion=0.4), msprime.SimpleBottleneck(time=15.1, population=1, proportion=0.4), msprime.SimpleBottleneck(time=25.1, population=0, proportion=0.4), ] T1 = np.zeros(num_replicates) num_ca_events1 = np.zeros(num_replicates) num_re_events1 = np.zeros(num_replicates) num_mig_events1 = np.zeros(num_replicates) num_trees1 = np.zeros(num_replicates) num_edges1 = np.zeros(num_replicates) num_nodes1 = np.zeros(num_replicates) sim = msprime.ancestry._parse_simulate( samples=samples, population_configurations=population_configurations, migration_matrix=migration_matrix, demographic_events=demographic_events, recombination_rate=recombination_rate, ) logging.debug("t\ttrees\tnodes\tedges\tca\tre\tmig") for j, ts in enumerate(sim.run_replicates(num_replicates)): num_ca_events1[j] = sim.num_common_ancestor_events num_re_events1[j] = sim.num_recombination_events num_mig_events1[j] = sum( [r for row in sim.num_migration_events for r in row] ) T1[j] = np.max(ts.tables.nodes.time) num_trees1[j] = ts.num_trees num_nodes1[j] = ts.num_nodes num_edges1[j] = ts.num_edges sim.reset() logging.debug( "{:.2f}\t{:.2f}\t{:.2f}\t{:.2f}\t{:.2f}\t{:.2f}\t{:.2f}".format( -1, np.mean(num_trees1), np.mean(num_nodes1), np.mean(num_edges1), np.mean(num_ca_events1), np.mean(num_re_events1), np.mean(num_mig_events1), ) ) for t in [5.0, 10.0, 15.0, 25.0]: T2 = np.zeros(num_replicates) num_trees2 = np.zeros(num_replicates) num_nodes2 = np.zeros(num_replicates) num_edges2 = np.zeros(num_replicates) num_ca_events2 = np.zeros(num_replicates) num_re_events2 = np.zeros(num_replicates) num_mig_events2 = np.zeros(num_replicates) sim = msprime.ancestry._parse_simulate( samples=samples, population_configurations=population_configurations, migration_matrix=migration_matrix, demographic_events=demographic_events, recombination_rate=recombination_rate, end_time=t, ) for j, ts in enumerate(sim.run_replicates(num_replicates)): num_ca_events2[j] = sim.num_common_ancestor_events num_re_events2[j] = sim.num_recombination_events num_mig_events2[j] = sum( [r for row in sim.num_migration_events for r in row] ) max_time = max(node.time for node in ts.nodes()) sim2 = msprime.ancestry._parse_simulate( from_ts=ts, population_configurations=population_configurations, migration_matrix=migration_matrix, demographic_events=[ e for e in demographic_events if e.time > max_time ], recombination_rate=recombination_rate, ) final_ts = next(sim2.run_replicates(1)).simplify() num_ca_events2[j] += sim2.num_common_ancestor_events num_re_events2[j] += sim2.num_recombination_events num_mig_events2[j] += sum( [r for row in sim2.num_migration_events for r in row] ) T2[j] = np.max(final_ts.tables.nodes.time) num_trees2[j] = final_ts.num_trees num_nodes2[j] = final_ts.num_nodes num_edges2[j] = final_ts.num_edges sim.reset() logging.debug( "{:.2f}\t{:.2f}\t{:.2f}\t{:.2f}\t{:.2f}\t{:.2f}\t{:.2f}".format( t, np.mean(num_trees2), np.mean(num_nodes2), np.mean(num_edges2), np.mean(num_ca_events2), np.mean(num_re_events2), np.mean(num_mig_events2), ) ) sm.graphics.qqplot(T1) sm.qqplot_2samples(T1, T2, line="45") filename = self.output_dir / f"T_mrca_t={t}.png" pyplot.savefig(filename, dpi=72) pyplot.close("all") sm.graphics.qqplot(num_trees1) sm.qqplot_2samples(num_trees1, num_trees2, line="45") filename = self.output_dir / f"num_trees_t={t}.png" pyplot.savefig(filename, dpi=72) pyplot.close("all") sm.graphics.qqplot(num_edges1) sm.qqplot_2samples(num_edges1, num_edges2, line="45") filename = self.output_dir / f"num_edges_t={t}.png" pyplot.savefig(filename, dpi=72) pyplot.close("all") sm.graphics.qqplot(num_nodes1) sm.qqplot_2samples(num_nodes1, num_nodes2, line="45") filename = self.output_dir / f"num_nodes_t={t}.png" pyplot.savefig(filename, dpi=72) pyplot.close("all") sm.graphics.qqplot(num_ca_events1) sm.qqplot_2samples(num_ca_events1, num_ca_events2, line="45") filename = self.output_dir / f"num_ca_events_t={t}.png" pyplot.savefig(filename, dpi=72) pyplot.close("all") sm.graphics.qqplot(num_re_events1) sm.qqplot_2samples(num_re_events1, num_re_events2, line="45") filename = self.output_dir / f"num_re_events_t={t}.png" pyplot.savefig(filename, dpi=72) pyplot.close("all") sm.graphics.qqplot(num_mig_events1) sm.qqplot_2samples(num_mig_events1, num_mig_events2, line="45") filename = self.output_dir / f"num_mig_events_t={t}.png" pyplot.savefig(filename, dpi=72) pyplot.close("all") class MutationStatsTest(Test): def plot_relative_error(self, x_values, observed, expected, name): x = np.array([np.full(o.shape, xv) for xv, o in zip(x_values, observed)]) observed = np.array(observed) expected = np.array(expected) outfile = self._build_filename(None, name) if not np.all(observed[expected == 0] == 0): raise ValueError("Impossible mutations occurred!") nonzero = expected > 0 rel_err = (observed[nonzero] - expected[nonzero]) / expected[nonzero] unique_x = np.unique(x) x_index = np.searchsorted(unique_x, x[nonzero]) mean_rel_err = np.bincount(x_index, weights=np.abs(rel_err)) mean_rel_err /= np.bincount(x_index) n_expected = np.repeat(-1, len(unique_x)) for j, exp in enumerate(expected): n_expected[j] = 1 / np.mean(1 / exp[exp > 0]) fig, (ax1, ax2) = pyplot.subplots(1, 2, figsize=(12, 6)) ax1.scatter(x[nonzero], rel_err) ax1.plot([0, max(unique_x)], [0, 0], linestyle=":") ax1.set_xlabel("sample size") ax1.set_ylabel("relative error") ax2.plot(unique_x, mean_rel_err, label="mean relative error") ax2.plot( unique_x, 1 / np.sqrt(1 + n_expected), linestyle=":", label="rough expected behaviour", ) ax2.plot([0, max(unique_x)], [0, 0], linestyle=":") ax2.set_ylim(0, max(0.001, max(mean_rel_err))) ax2.set_xlabel("sample size") ax2.set_ylabel("mean relative error") ax2.legend() pyplot.savefig(outfile, dpi=72) pyplot.close(fig) def plot_uniform(self, x, name): outfile = self._build_filename(None, name) x = np.array(sorted(x)) fig, ax = pyplot.subplots(1, 1, figsize=(8, 8)) ax.scatter(np.linspace(0, 1, len(x)), x) ax.plot([-0.1, 1.1], [-0.1, 1.1], "r-", linewidth=2) ax.set_xlabel("expected relative mutation spacings") ax.set_ylabel("observed relative mutation spacings") pyplot.savefig(outfile, dpi=72) pyplot.close(fig) def plot_y_equals_x( self, xlist, ylist, titles, name, xlabel="expected", ylabel="observed" ): assert len(xlist) == len(ylist) assert len(xlist) == len(titles) outfile = self._build_filename(None, name) fig, (axes,) = pyplot.subplots( 1, len(xlist), figsize=(8 * len(xlist), 8), squeeze=False ) for ax, x, y, title in zip(axes, xlist, ylist, titles): x = np.array(x).flatten() y = np.array(y).flatten() xx = np.linspace(0.9 * min(x), 1.1 * max(x), 51) ax.scatter(x, y) ax.plot( [0.9 * min(x), 1.1 * np.max(x)], [0.9 * min(x), 1.1 * np.max(x)], "r-", linewidth=2, label="y = x", ) ax.plot( xx, xx + 4 * np.sqrt(xx), "r:", linewidth=2, label="rough expected bounds", ) ax.plot(xx, xx - 4 * np.sqrt(xx), "r:", linewidth=2) ax.legend() ax.set_title(title) ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) pyplot.savefig(outfile, dpi=72) pyplot.close(fig) def verify_model(self, model, name): L = 100000 ots = msprime.sim_ancestry( 8, random_seed=7, recombination_rate=3 / L, sequence_length=L, discrete_genome=True, ) for discrete_genome in (True, False): x = [] observed = [] expected = [] observed_roots = [] expected_roots = [] observed_rates = [] expected_rates = [] observed_times = [] for nmuts in (100, 500, 1000, 1500, 2500, 3500, 5000, 7500, 10000, 20000): rate = nmuts / L ts = msprime.sim_mutations( ots, random_seed=6, rate=rate, model=model, discrete_genome=discrete_genome, ) x.append(nmuts) # transitions obs, exp = self.verify_transitions(ts, model, discrete_genome, rate) observed.append(obs) expected.append(exp) # root distributions obs_roots, exp_roots = self.verify_roots( ts, model, discrete_genome, rate ) observed_roots.append(obs_roots) expected_roots.append(exp_roots) # mutation rates obs_rates, exp_rates = self.verify_mutation_rates( ts, model, rate, discrete_genome ) observed_rates.append(obs_rates) expected_rates.append(exp_rates) obs_times = self.verify_mutation_times(ts) observed_times.extend(obs_times) if discrete_genome: pname = f"{name}_discrete" else: pname = f"{name}_continuous" if name != "binary": self.plot_relative_error( x, observed=observed, expected=expected, name=pname + "_transitions" ) self.plot_relative_error( x, observed=observed_roots, expected=expected_roots, name=pname + "_roots", ) # check mutation times self.plot_uniform(observed_times, name=pname + "_times") # and overall mutation rate self.plot_y_equals_x( [observed_rates], [expected_rates], name=pname + "_rates", titles=["number of mutations"], ) def verify_stacking(self, model, name): # model should be parent-independent for j in range(len(model.alleles)): assert np.allclose(model.transition_matrix[0], model.transition_matrix[j]) L = 100000 ots = msprime.sim_ancestry( 8, random_seed=88, recombination_rate=3 / L, sequence_length=L, discrete_genome=True, ) rate = 10000 / L # mutate once ts1 = msprime.sim_mutations( ots, random_seed=99, rate=rate, model=model, ) ts2 = ots nsub = 10 for j in range(nsub): ts2 = msprime.sim_mutations( ts2, random_seed=99 + j, rate=rate / nsub, model=model, ) roots1 = self.count_roots(ts1, model) transitions1 = self.count_transitions(ts1, model) roots2 = self.count_roots(ts2, model) transitions2 = self.count_transitions(ts2, model) self.plot_y_equals_x( [roots1, transitions1], [roots2, transitions2], titles=["roots", "transitions"], name=name + "_stacking", xlabel="mutate once", ylabel="mutate many times", ) def count_roots(self, ts, model): alleles = model.alleles num_alleles = len(alleles) observed = np.zeros((num_alleles,)) for site in ts.sites(): aa = site.ancestral_state observed[alleles.index(aa)] += 1 return observed def count_transitions(self, ts, model): alleles = model.alleles num_alleles = len(alleles) observed = np.zeros((num_alleles, num_alleles)) for mut in ts.mutations(): if mut.parent == tskit.NULL: pa = ts.site(mut.site).ancestral_state else: pa = ts.mutation(mut.parent).derived_state da = mut.derived_state observed[alleles.index(pa), alleles.index(da)] += 1 return observed def verify_transitions(self, ts, model, discrete_genome, mutation_rate): observed = self.count_transitions(ts, model) expected = np.zeros(observed.shape) for j, (row, p) in enumerate(zip(observed, model.transition_matrix)): expected[j, :] = sum(row) * p return observed, expected def verify_roots(self, ts, model, discrete_genome, mutation_rate): observed = self.count_roots(ts, model) expected = np.zeros(observed.shape) for t in ts.trees(): if discrete_genome: t_span = np.ceil(t.interval[1] - np.ceil(t.interval[0])) expected += ( model.root_distribution * t_span * (1 - np.exp(-mutation_rate * t.total_branch_length)) ) else: t_span = t.span expected += ( model.root_distribution * mutation_rate * t.total_branch_length * t_span ) return observed, expected def verify_mutation_rates(self, ts, model, rate, discrete_genome): observed = np.zeros(ts.num_trees) expected = np.zeros(ts.num_trees) for j, t in enumerate(ts.trees()): if discrete_genome: span = np.ceil(t.interval[1]) - np.ceil(t.interval[0]) else: span = t.span mean = rate * span * t.total_branch_length observed[j] = t.num_mutations # if we draw an indepenent Poisson with the same mean # it should be greater than observed half the time it is different expected[j] = scipy.stats.poisson.rvs(mean, 1) return observed, expected def verify_mutation_times(self, ts): start_time = np.full(ts.num_mutations, -1, dtype=np.float32) end_time = np.full(ts.num_mutations, -1, dtype=np.float32) mut_time = np.full(ts.num_mutations, -1, dtype=np.float32) for t in ts.trees(): for mut in t.mutations(): mut_time[mut.id] = mut.time end_time[mut.id] = t.time(t.parent(mut.node)) start_time[mut.id] = t.time(mut.node) if mut.parent != tskit.NULL: end_time[mut.id] = min( end_time[mut.id], ts.mutation(mut.parent).time ) start_time[mut.parent] = max(start_time[mut.parent], mut.time) return (mut_time - start_time) / (end_time - start_time) def test_binary_model_stats(self): model = msprime.BinaryMutationModel() self.verify_model( model, name="binary", ) model = msprime.BinaryMutationModel(state_independent=True) self.verify_stacking(model, name="binary") def test_jukes_cantor_stats(self): model = msprime.JC69() self.verify_model( model, name="jukes_cantor", ) model = msprime.JC69(state_independent=True) self.verify_stacking(model, name="jukes_cantor") def test_HKY_stats(self): equilibrium_frequencies = [0.3, 0.2, 0.3, 0.2] model = msprime.HKY(kappa=0.75, equilibrium_frequencies=equilibrium_frequencies) self.verify_model(model, name="HKY") # now the state-independent version model = msprime.HKY( kappa=1.0, equilibrium_frequencies=equilibrium_frequencies, state_independent=True, ) self.verify_stacking(model, name="HKY") def test_F84_stats(self): equilibrium_frequencies = [0.4, 0.1, 0.1, 0.4] model = msprime.F84(kappa=0.75, equilibrium_frequencies=equilibrium_frequencies) self.verify_model(model, name="F84") # now the parent-independent version model = msprime.F84( kappa=1.0, equilibrium_frequencies=equilibrium_frequencies, state_independent=True, ) self.verify_stacking(model, name="F84") def test_GTR_stats(self): relative_rates = [0.2, 0.1, 0.7, 0.5, 0.3, 0.4] equilibrium_frequencies = [0.3, 0.4, 0.2, 0.1] model = msprime.GTR( relative_rates=relative_rates, equilibrium_frequencies=equilibrium_frequencies, ) self.verify_model(model, name="GTR") model = msprime.GTR( relative_rates=[1] * 6, equilibrium_frequencies=equilibrium_frequencies, state_independent=True, ) self.verify_stacking(model, name="GTR") def test_PAM_stats(self): model = msprime.PAM() self.verify_model(model, name="PAM") def test_BLOSUM62_stats(self): model = msprime.BLOSUM62() self.verify_model(model, name="BLOSUM62") def test_arbitrary_model_stats(self): model = msprime.MatrixMutationModel( alleles=["abc", "", "x"], root_distribution=[0.8, 0.0, 0.2], transition_matrix=[[0.2, 0.4, 0.4], [0.1, 0.2, 0.7], [0.5, 0.3, 0.2]], ) self.verify_model( model, name="arbitrary", ) class MutationRateMapTest(Test): def verify_subdivided(self, ts, rate_map, discrete=False): outfile = self._build_filename(None, "mutation_counts") sub_pos = np.unique( np.sort( np.concatenate( [ rate_map.position, np.linspace(0, rate_map.sequence_length, 101), ] ) ) ) sub_rate = [rate_map.get_rate(p) for p in sub_pos[:-1]] sub_map = msprime.RateMap(position=sub_pos, rate=sub_rate) t0 = msprime.sim_mutations(ts, rate=rate_map, discrete_genome=discrete).tables t1 = msprime.sim_mutations(ts, rate=sub_map, discrete_genome=discrete).tables if discrete: # make an equivalent map with breaks on integers # to use in calculating expected values int_pos = np.unique(np.ceil(rate_map.position)) int_rate = [rate_map.get_rate(p) for p in int_pos[:-1]] rate_map = msprime.RateMap(position=int_pos, rate=int_rate) bins = np.linspace(0, ts.sequence_length, min(101, int(ts.sequence_length + 1))) breaks = np.unique(np.sort(np.concatenate([bins, rate_map.position]))) segsites = ts.segregating_sites(windows=breaks, mode="branch") expected = np.bincount( np.searchsorted(bins, breaks[:-1], "right") - 1, weights=segsites, minlength=len(bins) - 1, ) for j in range(len(expected)): left = bins[j] right = bins[j + 1] mass = rate_map.get_cumulative_mass(right) - rate_map.get_cumulative_mass( left ) expected[j] *= mass lower = scipy.stats.poisson.ppf(0.025, expected) upper = scipy.stats.poisson.ppf(1 - 0.025, expected) counts0 = np.bincount( np.digitize(t0.sites.position[t0.mutations.site], bins) - 1, minlength=len(bins) - 1, ) counts1 = np.bincount( np.digitize(t1.sites.position[t1.mutations.site], bins) - 1, minlength=len(bins) - 1, ) fig, ax = pyplot.subplots(1, 1, figsize=(8, 6)) ax.scatter(bins[:-1], counts0, label="coarse map") ax.scatter(bins[:-1], counts1, label="fine map") ax.plot(bins[:-1], expected, label="expected number") ax.plot(bins[:-1], lower, "r:", linewidth=2, label="rough expected bounds") ax.plot(bins[:-1], upper, "r:", linewidth=2) ax.set_xlabel("Position") ax.set_ylabel("Num mutations in bin") ax.legend() pyplot.savefig(outfile, dpi=72) pyplot.close(fig) def test_subdivide(self): ts = msprime.sim_ancestry( 1000, sequence_length=1e6, recombination_rate=1e-8, population_size=10000, random_seed=1, ) rate_map = msprime.RateMap(position=[0, 1e6], rate=[1e-8]) self.verify_subdivided(ts, rate_map) def test_varying_rate(self): ts = msprime.sim_ancestry( 1000, sequence_length=1e6, recombination_rate=1e-8, population_size=10000, random_seed=1, ) rate_map = msprime.RateMap(position=[0, 3e5, 6e5, 1e6], rate=[2e-8, 1e-9, 1e-8]) self.verify_subdivided(ts, rate_map) def test_shorter_chromosome(self): ts = msprime.sim_ancestry( 1000, sequence_length=20, recombination_rate=0.05, population_size=100, random_seed=12, ) rate_map = msprime.RateMap( position=[0, 1.1, 10, 11.5, 13.8, 15.2, 15.9, 20], rate=[0.1, 0.2, 0.0, 0.2, 0.0, 100, 0.0], ) self.verify_subdivided(ts, rate_map, discrete=True) class MutationTest(Test): def _transition_matrix_chi_sq(self, transitions, transition_matrix): tm_chisq = [] for row, p in zip(transitions, transition_matrix): not_zeros = p > 0 if sum(not_zeros) > 1: chisq = scipy.stats.chisquare(row[not_zeros], p[not_zeros]) tm_chisq.append(chisq.statistic) else: tm_chisq.append(None) return tm_chisq def _transitions(self, sequences, ts, alleles, mutation_rate, Q): num_alleles = len(alleles) transitions = np.zeros((num_alleles, num_alleles), dtype=int) expected = np.zeros((num_alleles, num_alleles)) for edge in ts.edges(): for idx in range(int(ts.sequence_length)): p = sequences[edge.parent][idx] c = sequences[edge.child][idx] transitions[alleles.index(p), alleles.index(c)] += 1 j = alleles.index(p) expected[j, :] += _matrix_exponential( ts.first().branch_length(edge.child) * mutation_rate * Q )[j, :] return (transitions, expected) def get_allele_counts(self, ts): if ts.num_sites == 0: df_ts = allel.HaplotypeArray(np.zeros((2, ts.num_samples), dtype=int)) else: df_ts = allel.HaplotypeArray(ts.genotype_matrix()) return df_ts.count_alleles() def get_transition_stats(self, ts, alleles, mutation_rate, Q): num_alleles = len(alleles) observed_transitions_ts = np.zeros((num_alleles, num_alleles)) expected_ts = np.zeros((num_alleles, num_alleles)) corr = ts.sequence_length / ts.num_sites # -> for this method to perform optimally, corr==1 # at least one mutation on each site assert ts.num_trees == 1 tree = ts.first() for v in ts.variants(samples=range(ts.num_nodes), isolated_as_missing=False): for n in tree.nodes(): pn = tree.parent(n) if pn != tskit.NULL: pa = v.alleles[v.genotypes[pn]] else: pa = v.site.ancestral_state da = v.alleles[v.genotypes[n]] observed_transitions_ts[alleles.index(pa), alleles.index(da)] += 1 j = alleles.index(pa) expected_ts[j, :] += _matrix_exponential( tree.branch_length(n) * mutation_rate * corr * Q )[j, :] return observed_transitions_ts, expected_ts def plot_stats(self, df_test, df_msprime, alleles, test_prog, model): test_key = f"{test_prog}-{model}" # plot results for name in ["pi", "root_distribution"]: sg_results = sm.ProbPlot(df_test[name].dropna()) ts_results = sm.ProbPlot(df_msprime[name].dropna()) sm.qqplot_2samples( sg_results, ts_results, ylabel=f"quantiles {test_prog}", xlabel="quantiles msprime", line="45", ) outfile = self._build_filename(test_key, name) pyplot.savefig(outfile) pyplot.clf() if len(alleles) == 4: rows, columns = 2, 2 else: rows, columns = 5, 4 fig, axs = pyplot.subplots(rows, columns, figsize=(12, 12)) for i, co in enumerate(itertools.product(range(rows), range(columns))): a = alleles[i] size = min(df_test[a].dropna().size, df_msprime[a].dropna().size) temp_test = sm.ProbPlot(df_test[a].dropna()[:size]) temp_msprime = sm.ProbPlot(df_msprime[a].dropna()[:size]) sm.qqplot_2samples( temp_test, temp_msprime, ylabel=f"quantiles {test_prog}", xlabel="quantiles msprime", line="45", ax=axs[co], ) axs[co].set_title(a) outfile = self._build_filename(test_key, "alleles") pyplot.savefig(outfile) class SeqGenTest(MutationTest): _seq_gen_executable = ["./data/seq-gen"] def _run_seq_gen(self, tree, args, model, alleles, num_sites, mutation_rate, Q): ts = tree.tree_sequence newick = tree.newick() cmd = self._seq_gen_executable + args num_sequences = 2 * ts.num_samples - 1 with tempfile.TemporaryFile("w+") as in_file, tempfile.TemporaryFile( "w+" ) as out_file: in_file.write(newick) in_file.seek(0) subprocess.call(cmd, stdin=in_file, stdout=out_file) out_file.seek(0) sequences = {} # Skip the first line out_file.readline() for line, node in zip(out_file, ts.first().nodes()): sample_id, sequence = line.split() sequences[node] = sequence assert len(sequence) == ts.sequence_length assert len(sequences) == num_sequences num_alleles = len(alleles) ancestral_sequence = sequences[len(sequences) - 1] observed_ancestral_sg = np.zeros((num_alleles,)) for idx in np.random.choice(int(ts.sequence_length), num_sites, replace=False): b = ancestral_sequence[idx] observed_ancestral_sg[alleles.index(b)] += 1 def replace_variants(variants): u = np.unique(variants) repl = [i for i in range(len(u))] return np.array([dict(zip(u, repl))[i] for i in variants]) ord_sequences = { key: [ord(element) % 32 for element in value] for key, value in sequences.items() } transitions_sg, expected = self._transitions( sequences, ts, alleles, mutation_rate, Q ) sg_sequences = np.transpose( np.array([ord_sequences[key] for key in range(ts.num_samples)]) ) sg_reduced = np.apply_along_axis(replace_variants, 1, sg_sequences) sg_genotypes = allel.HaplotypeArray(sg_reduced) sg_counts = sg_genotypes.count_alleles() return (sg_counts, transitions_sg, observed_ancestral_sg, expected) def _run_seq_gen_msprime_stats(self, model, length=20, num_samples=10): """ Runs a comparison between mutations generated by msprime and seq_gen for the specified model and returns a tuple of data frames ready for plotting. """ model_dict = { "JC69": {"model_id": msprime.JC69(), "par": ["-m", "HKY"]}, "HKY": { "model_id": msprime.HKY( kappa=1.5, equilibrium_frequencies=[0.2, 0.3, 0.1, 0.4] ), "par": ["-m", "HKY", "-t", "0.75", "-f", "0.2,0.3,0.1,0.4"], }, "F84": { "model_id": msprime.F84( kappa=1.0, equilibrium_frequencies=[0.3, 0.25, 0.2, 0.25] ), "par": ["-m", "F84", "-t", "0.5", "-f", "0.3,0.25,0.2,0.25"], }, "GTR": { "model_id": msprime.GTR( relative_rates=[0.4, 0.1, 0.4, 0.2, 0.4, 0.4], equilibrium_frequencies=[0.3, 0.2, 0.3, 0.2], ), "par": [ "-m", "GTR", "-r", "0.4,0.1,0.4,0.2,0.4,0.4", "-f", "0.3,0.2,0.3,0.2", ], }, "PAM": {"model_id": msprime.PAM(), "par": ["-m", "PAM"]}, "BLOSUM62": { "model_id": msprime.BLOSUM62(), "par": ["-m", "BLOSUM"], }, } num_replicates = 250 sg_results = collections.defaultdict(list) ts_results = collections.defaultdict(list) pos = [i for i in range(1, length + 1)] transition_matrix = model_dict[model]["model_id"].transition_matrix root_distribution = model_dict[model]["model_id"].root_distribution alleles = model_dict[model]["model_id"].alleles num_alleles = len(alleles) mutation_rate = 1e-4 if num_alleles == 4 else 1.5e-3 Q = transition_matrix.copy() Q -= np.eye(num_alleles) mut_rate_seq_gen = np.sum(-Q.diagonal() * root_distribution) * mutation_rate args = ["-q", "-s", str(mut_rate_seq_gen), "-l", str(length), "-wa"] args += model_dict[model]["par"] Ne = 1e4 for _ in range(num_replicates): ts = msprime.simulate(num_samples, Ne=Ne, length=length) ts_mutated = msprime.sim_mutations( ts, rate=mutation_rate, model=model_dict[model]["model_id"], discrete_genome=True, ) num_sites = ts_mutated.num_sites t = ts_mutated.first() t_span = np.ceil(t.interval[1] - np.ceil(t.interval[0])) # expected number of ancestral alleles for sites expected_ancestral_states_ts = np.zeros(num_alleles) change_probs = transition_matrix.sum(axis=1) - np.diag(transition_matrix) expected_ancestral_states_ts += ( root_distribution * t_span * (1 - np.exp(-mutation_rate * t.total_branch_length * change_probs)) ) # observed number of ancestral alleles obs_ancestral_states_ts = np.zeros((num_alleles,)) for site in ts_mutated.sites(): aa = site.ancestral_state obs_ancestral_states_ts[alleles.index(aa)] += 1 # expected and observed number of transitions ts # root distribution == equilibrium freqs for these tests, # as is the case in seq-gen observed_transitions_ts, expected_ts = self.get_transition_stats( ts_mutated, alleles, mutation_rate, Q ) # run Seq-gen and calculate statistics ( c_sg, observed_transitions_sg, observed_ancestral_sg, expected_sg, ) = self._run_seq_gen( t, args, model_dict[model]["model_id"], alleles, num_sites, mutation_rate, Q, ) c_ts = self.get_allele_counts(ts_mutated) # Compute pi pi_sg = allel.sequence_diversity(pos, c_sg) sg_results["pi"].append(pi_sg) pi_ts = allel.sequence_diversity(pos, c_ts) ts_results["pi"].append(pi_ts) # Compute chisquare stats. tm_chisq_sg = self._transition_matrix_chi_sq( observed_transitions_sg, expected_sg ) # in Seq-Gen the ancestral sequence is determined first expected_num_ancestral_states_sg = root_distribution * num_sites root_chisq_sg = scipy.stats.chisquare( observed_ancestral_sg, expected_num_ancestral_states_sg ).statistic tm_chisq_ts = self._transition_matrix_chi_sq( observed_transitions_ts, expected_ts ) root_chisq_ts = scipy.stats.chisquare( obs_ancestral_states_ts, expected_ancestral_states_ts ).statistic ts_results["root_distribution"].append(root_chisq_ts) sg_results["root_distribution"].append(root_chisq_sg) for idx, a in enumerate(alleles): sg_results[a].append(tm_chisq_sg[idx]) ts_results[a].append(tm_chisq_ts[idx]) df_sg = pd.DataFrame.from_dict(sg_results) df_ts = pd.DataFrame.from_dict(ts_results) return df_sg, df_ts, alleles def _run_seq_gen_msprime_comparison(self, model, length=20, num_samples=10): df_sg, df_ts, alleles = self._run_seq_gen_msprime_stats( model, length, num_samples ) self.plot_stats(df_sg, df_ts, alleles, "seqgen", model) # Test methods def test_JC69(self): self._run_seq_gen_msprime_comparison("JC69") def test_HKY(self): self._run_seq_gen_msprime_comparison("HKY") def test_F84(self): self._run_seq_gen_msprime_comparison("F84") def test_GTR(self): self._run_seq_gen_msprime_comparison("GTR") def test_PAM(self): self._run_seq_gen_msprime_comparison("PAM") def test_BLOSUM62(self): self._run_seq_gen_msprime_comparison("BLOSUM62") @attr.s class PyvolveTest(MutationTest): def _run_pyvolve( self, tree, py_model, model, alleles, num_sites, mutation_rate, ts_mutrate, Q ): ts = tree.tree_sequence seq_length = int(ts.sequence_length) node_labels = {u: str(u) for u in ts.samples()} newick = tree.newick(node_labels=node_labels) pyvolve_tree = pyvolve.read_tree(tree=newick, scale_tree=mutation_rate) pyvolve_model = pyvolve.Partition(models=py_model, size=seq_length) sim = pyvolve.Evolver(tree=pyvolve_tree, partitions=pyvolve_model) sim(ratefile=None, infofile=None, seqfile=None) seqs = sim.get_sequences(anc=True) # seq-dict is sorted in pre-order sequences = {} for key, node in zip(seqs.keys(), ts.first().nodes()): sequences[node] = seqs[key] assert len(seqs[key]) == ts.sequence_length assert len(sequences) == 2 * ts.num_samples - 1 num_alleles = len(alleles) ancestral_sequence = sequences[len(sequences) - 1] roots_d_py = np.zeros((num_alleles,)) for idx in np.random.choice(int(ts.sequence_length), num_sites, replace=False): b = ancestral_sequence[idx] roots_d_py[alleles.index(b)] += 1 def replace_variants(variants): u = np.unique(variants) repl = [i for i in range(len(u))] return np.array([dict(zip(u, repl))[i] for i in variants]) ord_sequences = { key: [ord(element) % 32 for element in value] for key, value in sequences.items() } transitions_py, expected = self._transitions( sequences, ts, alleles, ts_mutrate, Q ) py_sequences = np.transpose( np.array([ord_sequences[key] for key in range(ts.num_samples)]) ) py_reduced = np.apply_along_axis(replace_variants, 1, py_sequences) py_genotypes = allel.HaplotypeArray(py_reduced) py_counts = py_genotypes.count_alleles() return (py_counts, transitions_py, roots_d_py, expected) def _run_pyvolve_stats(self, model, length=20, num_samples=10): model_dict = { "JC69": { "model_id": msprime.JC69(), "pyvolve_model": pyvolve.Model("nucleotide"), }, "HKY": { "model_id": msprime.HKY( kappa=1.5, equilibrium_frequencies=[0.2, 0.3, 0.1, 0.4] ), "pyvolve_model": pyvolve.Model( "nucleotide", {"kappa": 1.5, "state_freqs": [0.2, 0.3, 0.1, 0.4]} ), }, "PAM": { "model_id": msprime.PAM(), "pyvolve_model": pyvolve.Model("DAYHOFFDCMUT"), }, "BLOSUM62": { "model_id": msprime.BLOSUM62(), "pyvolve_model": pyvolve.Model("BLOSUM62"), }, } num_replicates = 250 py_results = collections.defaultdict(list) ts_results = collections.defaultdict(list) pos = [i for i in range(1, length + 1)] alleles = model_dict[model]["model_id"].alleles num_alleles = len(alleles) mutation_rate = 1e-4 if num_alleles == 4 else 1.5e-3 transition_matrix = model_dict[model]["model_id"].transition_matrix root_distribution = model_dict[model]["model_id"].root_distribution Q = transition_matrix.copy() Q -= np.eye(num_alleles) mut_rate_pyvolve = np.sum(-Q.diagonal() * root_distribution) * mutation_rate for _ in range(num_replicates): ts = msprime.simulate(num_samples, Ne=1e4, length=length) ts_mutated = msprime.sim_mutations( ts, rate=mutation_rate, model=model_dict[model]["model_id"], discrete_genome=True, ) num_sites = ts_mutated.num_sites t = ts_mutated.first() t_span = np.ceil(t.interval[1] - np.ceil(t.interval[0])) # expected number of ancestral alleles for sites expected_ancestral_states_ts = np.zeros(num_alleles) change_probs = transition_matrix.sum(axis=1) - np.diag(transition_matrix) expected_ancestral_states_ts += ( root_distribution * t_span * (1 - np.exp(-mutation_rate * t.total_branch_length * change_probs)) ) # observed number of ancestral alleles obs_ancestral_states_ts = np.zeros((num_alleles,)) for site in ts_mutated.sites(): aa = site.ancestral_state obs_ancestral_states_ts[alleles.index(aa)] += 1 observed_transitions_ts, expected = self.get_transition_stats( ts_mutated, alleles, mutation_rate, Q ) # run pyvolve and calculate statistics ( c_py, observed_transitions_py, observed_ancestral_py, expected_py, ) = self._run_pyvolve( t, model_dict[model]["pyvolve_model"], model_dict[model]["model_id"], alleles, num_sites, mut_rate_pyvolve, mutation_rate, Q, ) pi_py = allel.sequence_diversity(pos, c_py) tm_chisq_py = self._transition_matrix_chi_sq( observed_transitions_py, expected_py ) expected_num_ancestral_states_py = root_distribution * num_sites root_chisq_py = scipy.stats.chisquare( observed_ancestral_py, expected_num_ancestral_states_py ).statistic tm_chisq_ts = self._transition_matrix_chi_sq( observed_transitions_ts, expected ) root_chisq_ts = scipy.stats.chisquare( obs_ancestral_states_ts, expected_ancestral_states_ts ).statistic c_ts = self.get_allele_counts(ts_mutated) pi_ts = allel.sequence_diversity(pos, c_ts) ts_results["pi"].append(pi_ts) ts_results["root_distribution"].append(root_chisq_ts) py_results["pi"].append(pi_py) py_results["root_distribution"].append(root_chisq_py) for idx, a in enumerate(alleles): ts_results[a].append(tm_chisq_ts[idx]) py_results[a].append(tm_chisq_py[idx]) df_py = pd.DataFrame.from_dict(py_results) df_ts = pd.DataFrame.from_dict(ts_results) return df_py, df_ts, alleles def _run_pyvolve_comparison(self, model, length=20, num_samples=10): df_py, df_ts, alleles = self._run_pyvolve_stats(model, length, num_samples) self.plot_stats(df_py, df_ts, alleles, "pyvolve", model) def test_pyv_JC69(self): self._run_pyvolve_comparison("JC69") def test_pyv_HKY(self): self._run_pyvolve_comparison("HKY") def test_pyv_PAM(self): self._run_pyvolve_comparison("PAM") def test_pyv_BLOSUM62(self): self._run_pyvolve_comparison("BLOSUM62") class SequentialMutations(MutationTest): """ Verify that repeated rounds to running sim_mutations gives the same results as running it once with a high rate. """ def _run(self, model): total_rate = 10 num_repeats = 10 num_replicates = 100 num_mutations_single = np.zeros(num_replicates) num_sites_single = np.zeros(num_replicates) num_mutations_repeat = np.zeros(num_replicates) num_sites_repeat = np.zeros(num_replicates) for j in range(num_replicates): base_ts = msprime.sim_ancestry(10, sequence_length=1000) single_ts = msprime.sim_mutations(base_ts, rate=total_rate, model=model) num_mutations_single[j] = single_ts.num_mutations num_sites_single[j] = single_ts.num_sites repeat_ts = base_ts for _ in range(num_repeats): repeat_ts = msprime.sim_mutations( repeat_ts, rate=total_rate / num_repeats, model=model, ) num_mutations_repeat[j] = repeat_ts.num_mutations num_sites_repeat[j] = repeat_ts.num_sites df_single = pd.DataFrame( {"num_sites": num_sites_single, "num_mutations": num_mutations_single} ) df_repeat = pd.DataFrame( {"num_sites": num_sites_repeat, "num_mutations": num_mutations_repeat} ) self._plot_stats("", df_single, df_repeat, "single", "repeat") def test_sequential_mutate_binary(self): self._run(msprime.BinaryMutationModel()) def test_sequential_mutate_JC69(self): self._run("JC69") def test_sequential_mutate_HKY(self): model = msprime.HKY(kappa=1.5, equilibrium_frequencies=[0.2, 0.3, 0.1, 0.4]) self._run(model) def test_sequential_mutate_PAM(self): self._run("PAM") class OlderMsprimeTest(Test): """ Run tests against older versions of msprime. """ def _run_in_venv(self, num_replicates, **kwargs): """ Runs the specified simulation in the older version of msprime using a venv. """ with tempfile.TemporaryDirectory(dir=self.output_dir.resolve()) as tempdir: tempdir = pathlib.Path(tempdir) params_file = tempdir / "params.pkl" output_prefix = tempdir / "output" with open(params_file, "wb") as f: pickle.dump(kwargs, f) cmd = ( "cd data && ./msprime-0.7.4/bin/python run_old_msprime.py " f"{num_replicates} {params_file} {output_prefix}" ) subprocess.run(cmd, shell=True, check=True) count = 0 for trees_file in tempdir.glob("*.trees"): ts = tskit.load(trees_file) prov = json.loads(ts.provenance(0).record) assert prov["software"] == {"name": "msprime", "version": "0.7.4"} yield ts count += 1 assert count == num_replicates def _run(self, num_replicates, **kwargs): logging.debug(f"Running: {num_replicates} replicates of {kwargs}") data = collections.defaultdict(list) old_version = "0.7.4" new_version = msprime.__version__ iter1 = self._run_in_venv(num_replicates, **kwargs) iter2 = msprime.simulate(num_replicates=num_replicates, **kwargs) for ts1, ts2 in zip(iter1, iter2): assert ts1.sequence_length == ts2.sequence_length assert ts1.num_samples == ts2.num_samples for ts, version in [(ts1, old_version), (ts2, new_version)]: t_mrca = np.zeros(ts.num_trees) for tree in ts.trees(): t_mrca[tree.index] = tree.time(tree.root) data["tmrca_mean"].append(np.mean(t_mrca)) data["num_trees"].append(ts.num_trees) data["num_nodes"].append(ts.num_nodes) data["num_edges"].append(ts.num_edges) data["version"].append(version) df = pd.DataFrame(data) df_new = df[df.version == new_version] df_old = df[df.version == old_version] for stat in ["tmrca_mean", "num_trees", "num_nodes", "num_edges"]: v1 = df_new[stat] v2 = df_old[stat] sm.graphics.qqplot(v1) sm.qqplot_2samples(v1, v2, line="45") pyplot.xlabel(new_version) pyplot.ylabel(old_version) f = self.output_dir / f"{stat}.png" pyplot.savefig(f, dpi=72) pyplot.close("all") def test_msprime_n1e2_no_recomb(self): self._run(10000, sample_size=100) def test_msprime_n1e4_no_recomb(self): self._run(1000, sample_size=10 ** 4) def test_msprime_n1e3_long_genome(self): self._run( 1000, sample_size=10 ** 2, Ne=10 ** 4, recombination_rate=1e-8, length=1e6 ) def test_msprime_n1e2_long_genome(self): self._run( 2000, sample_size=10 ** 2, Ne=10 ** 4, recombination_rate=1e-8, length=1e6 ) def test_msprime_n10_long_genome(self): self._run(1000, sample_size=10, Ne=10 ** 4, recombination_rate=1e-8, length=1e6) def test_msprime_n2_long_genome(self): self._run(1000, sample_size=2, Ne=10 ** 4, recombination_rate=1e-8, length=1e7) ############################################### # Infrastructure for running the tests and CLI ############################################### @attr.s class TestInstance: """ A single test instance, that consists of the test class and the test method name. """ test_class = attr.ib() method_name = attr.ib() def run(self, basedir): logging.info(f"Running {self}") output_dir = pathlib.Path(basedir) / self.test_class / self.method_name output_dir.mkdir(parents=True, exist_ok=True) instance = getattr(sys.modules[__name__], self.test_class)(output_dir) method = getattr(instance, self.method_name) method() @attr.s class TestSuite: """ Class responsible for registering all known tests. """ tests = attr.ib(init=False, factory=dict) classes = attr.ib(init=False, factory=set) def register(self, test_class, method_name): test_instance = TestInstance(test_class, method_name) if method_name in self.tests: raise ValueError(f"Test name {method_name} already used.") self.tests[method_name] = test_instance self.classes.add(test_class) def get_tests(self, names=None, test_class=None): if names is not None: tests = [self.tests[name] for name in names] elif test_class is not None: tests = [ test for test in self.tests.values() if test.test_class == test_class ] else: tests = list(self.tests.values()) return tests @attr.s class TestRunner: """ Class responsible for running test instances. """ def __run_sequential(self, tests, basedir, progress): for test in tests: test.run(basedir) progress.update() def __run_parallel(self, tests, basedir, num_threads, progress): with concurrent.futures.ProcessPoolExecutor( max_workers=num_threads ) as executor: futures = [executor.submit(test.run, basedir) for test in tests] exception = None for future in concurrent.futures.as_completed(futures): exception = future.exception() if exception is not None: # At least tell the user that we've had an exception. # Other stuff will still keep going, though. logging.error("EXCEPTION:%s", exception) break progress.update() if exception is not None: # Try to clear out as much work as we can, but it'll still run a # lot of stuff before we finish for future in futures: future.cancel() raise exception def run(self, tests, basedir, num_threads, show_progress): progress = tqdm.tqdm(total=len(tests), disable=not show_progress) logging.info(f"running {len(tests)} tests using {num_threads} processes") if num_threads <= 1: self.__run_sequential(tests, basedir, progress) else: self.__run_parallel(tests, basedir, num_threads, progress) progress.close() def setup_logging(args): log_level = "INFO" if args.quiet: log_level = "WARN" if args.debug: log_level = "DEBUG" daiquiri.setup(level=log_level) msprime_logger = daiquiri.getLogger("msprime") msprime_logger.setLevel("WARN") mpl_logger = daiquiri.getLogger("matplotlib") mpl_logger.setLevel("WARN") def run_tests(suite, args): setup_logging(args) runner = TestRunner() if len(args.tests) > 0: tests = suite.get_tests(names=args.tests) elif args.test_class is not None: tests = suite.get_tests(test_class=args.test_class) else: tests = suite.get_tests() runner.run(tests, args.output_dir, args.num_threads, not args.no_progress) def make_suite(): suite = TestSuite() for cls_name, cls in inspect.getmembers(sys.modules[__name__]): if inspect.isclass(cls) and issubclass(cls, Test): test_class_instance = cls() for name, thing in inspect.getmembers(test_class_instance): if inspect.ismethod(thing): if name.startswith("test_"): suite.register(cls_name, name) return suite def main(): suite = make_suite() parser = argparse.ArgumentParser() parser.add_argument( "--test-class", "-c", default=None, choices=sorted(suite.classes), help="Run all tests for specified test class", ) parser.add_argument( "tests", nargs="*", help="Run specific tests. Use the --list option to see those available", ) parser.add_argument( "--output-dir", "-d", type=str, default="tmp__NOBACKUP__", help="specify the base output directory", ) parser.add_argument( "--num-threads", "-t", type=int, default=1, help="Specify number of threads" ) group = parser.add_mutually_exclusive_group() group.add_argument( "--quiet", "-q", action="store_true", help="Do not write any output" ) group.add_argument( "--debug", "-D", action="store_true", help="Write out debug output" ) parser.add_argument( "--no-progress", "-n", action="store_true", help="Do not show progress bar" ) parser.add_argument( "--list", "-l", action="store_true", help="List available checks and exit" ) args = parser.parse_args() if args.list: print("All available tests") for test in suite.tests.values(): print(test.test_class, test.method_name, sep="\t") else: run_tests(suite, args) if __name__ == "__main__": main()
gpl-3.0
8,229,186,648,927,964,000
34.95479
88
0.524534
false
tankywoo/simiki
simiki/server.py
1
4414
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function, absolute_import, unicode_literals import os import os.path import sys import logging import traceback from simiki.compat import is_py2, unicode try: import SimpleHTTPServer as http_server except ImportError: # py3 import http.server as http_server try: import SocketServer as socket_server except ImportError: # py3 import socketserver as socket_server try: import urllib2 as urllib_request except ImportError: # py3 import urllib.request as urllib_request try: from os import getcwdu except ImportError: # py3 from os import getcwd as getcwdu URL_ROOT = None PUBLIC_DIRECTORY = None class Reuse_TCPServer(socket_server.TCPServer): allow_reuse_address = True class YARequestHandler(http_server.SimpleHTTPRequestHandler): def translate_path(self, path): """map url path to local file system. path and return path are str type in py3, builtin translate_path input is str(but it's unicode) and return str. so there is no need to do with codecs, system can locate file with unicode path. in py2, buildin translate_path input is str and return str. we need to decode to unicode and then encode path with filesystemencoding(), as mentioned above, unicode path can be located, but will have problem with py2's translate_path, for uniformity, we also return the corresponding type of translate_path in manual part. TODO: - fspath with os.sep from url always slash - URL_ROOT codecs simplify? - in the end of if body use super translate_path directly? """ path = urllib_request.unquote(path) if not isinstance(path, unicode): path = path.decode('utf-8') fsenc = sys.getfilesystemencoding() if is_py2: path = path.encode(fsenc) if URL_ROOT and self.path.startswith(URL_ROOT): if self.path == URL_ROOT or self.path == URL_ROOT + '/': fspath = os.path.join(PUBLIC_DIRECTORY, 'index.html') if is_py2: fspath = fspath.encode(fsenc) else: _url_root = urllib_request.unquote(URL_ROOT) if not isinstance(_url_root, unicode): _url_root = _url_root.decode('utf-8') if is_py2: _url_root = _url_root.encode(fsenc) fspath = os.path.join( PUBLIC_DIRECTORY.encode(fsenc), path[len(_url_root) + 1:]) # noqa: E501 else: fspath = os.path.join( PUBLIC_DIRECTORY, path[len(_url_root) + 1:]) return fspath else: return http_server.SimpleHTTPRequestHandler \ .translate_path(self, path) def do_GET(self): # redirect url if URL_ROOT and not self.path.startswith(URL_ROOT): self.send_response(301) self.send_header('Location', URL_ROOT + self.path) self.end_headers() http_server.SimpleHTTPRequestHandler.do_GET(self) def preview(path, url_root, host='127.0.0.1', port=8000): """ :param path: directory path relative to current path :param url_root: `root` setted in _config.yml """ global URL_ROOT, PUBLIC_DIRECTORY if not host: host = '127.0.0.1' if not port: port = 8000 if url_root.endswith('/'): url_root = url_root[:-1] URL_ROOT = urllib_request.quote(url_root.encode('utf-8')) PUBLIC_DIRECTORY = os.path.join(getcwdu(), path) if os.path.exists(path): os.chdir(path) else: logging.error("Path {} not exists".format(path)) try: Handler = YARequestHandler httpd = Reuse_TCPServer((host, port), Handler) except (OSError, IOError) as e: logging.error("Could not listen on port {0}\n{1}" .format(port, traceback.format_exc())) sys.exit(getattr(e, 'exitcode', 1)) logging.info("Serving at: http://{0}:{1}{2}/".format(host, port, url_root)) logging.info("Serving running... (Press CTRL-C to quit)") try: httpd.serve_forever() except (KeyboardInterrupt, SystemExit): logging.info("Shutting down server") httpd.socket.close()
mit
1,257,562,507,566,963,700
31.218978
96
0.607612
false
haystack/eyebrowse-server
notifications/models.py
1
7781
from __future__ import unicode_literals from __future__ import print_function import base64 import datetime from django.db import models from django.db.models.query import QuerySet from django.core.exceptions import ImproperlyConfigured from django.contrib.contenttypes.models import ContentType from django.contrib.auth.models import User from django.utils.translation import ugettext_lazy as _ from django.utils.translation import get_language, activate from django.utils.encoding import python_2_unicode_compatible from django.utils.six.moves import cPickle as pickle # pylint: disable-msg=F from notifications.compat import AUTH_USER_MODEL, GenericForeignKey from notifications.conf import settings from notifications.utils import load_media_defaults, notice_setting_for_user, my_import from notifications.backends.email import EmailBackend NOTICE_MEDIA, NOTICE_MEDIA_DEFAULTS = load_media_defaults() class LanguageStoreNotAvailable(Exception): pass @python_2_unicode_compatible class NoticeType(models.Model): label = models.CharField(_("label"), max_length=40, unique=True) display = models.CharField(_("display"), max_length=50) description = models.CharField(_("description"), max_length=100) # by default only on for media with sensitivity less than or equal to this # number default = models.IntegerField(_("default")) def __str__(self): return self.label class Meta: verbose_name = _("notice type") verbose_name_plural = _("notice types") @classmethod def create(cls, label, display, description, default=2, verbosity=1): """ Creates a new NoticeType. This is intended to be used by other apps as a post_syncdb manangement step. """ try: notice_type = cls._default_manager.get(label=label) updated = False if display != notice_type.display: notice_type.display = display updated = True if description != notice_type.description: notice_type.description = description updated = True if default != notice_type.default: notice_type.default = default updated = True if updated: notice_type.save() if verbosity > 1: print("Updated %s NoticeType" % label) except cls.DoesNotExist: cls(label=label, display=display, description=description, default=default).save() if verbosity > 1: print("Created %s NoticeType" % label) class Notification(models.Model): recipient = models.ForeignKey(User, related_name="notification_recipient") sender = models.ForeignKey(User, related_name="notification_sender") date_created = models.DateTimeField(default=datetime.datetime.utcnow()) notice_type = models.ForeignKey(NoticeType) seen = models.BooleanField(default=False) url = models.URLField(max_length=300, blank=False, null=True) message = models.CharField(max_length=2000, blank=False, null=True) class NoticeSetting(models.Model): """ Indicates, for a given user, whether to send notifications of a given type to a given medium. """ user = models.ForeignKey(AUTH_USER_MODEL, verbose_name=_("user")) notice_type = models.ForeignKey(NoticeType, verbose_name=_("notice type")) medium = models.CharField(_("medium"), max_length=1, choices=NOTICE_MEDIA) send = models.BooleanField(_("send"), default=False) scoping_content_type = models.ForeignKey( ContentType, null=True, blank=True) scoping_object_id = models.PositiveIntegerField(null=True, blank=True) scoping = GenericForeignKey("scoping_content_type", "scoping_object_id") @classmethod def for_user(cls, user, notice_type, medium, scoping=None): """ Kept for backwards compatibilty but isn't used anywhere within this app @@@ consider deprecating """ return notice_setting_for_user(user, notice_type, medium, scoping) class Meta: verbose_name = _("notice setting") verbose_name_plural = _("notice settings") unique_together = ( "user", "notice_type", "medium", "scoping_content_type", "scoping_object_id") class NoticeQueueBatch(models.Model): """ A queued notice. Denormalized data for a notice. """ pickled_data = models.TextField() def get_notification_language(user): """ Returns site-specific notification language for this user. Raises LanguageStoreNotAvailable if this site does not use translated notifications. """ if settings.PINAX_NOTIFICATIONS_LANGUAGE_MODEL: model = settings.PINAX_NOTIFICATIONS_GET_LANGUAGE_MODEL() try: language = model._default_manager.get(user__id__exact=user.id) if hasattr(language, "language"): return language.language except (ImportError, ImproperlyConfigured, model.DoesNotExist): raise LanguageStoreNotAvailable raise LanguageStoreNotAvailable def send_now(users, label, extra=None, sender=None, scoping=None): """ Creates a new notice. This is intended to be how other apps create new notices. notification.send(user, "friends_invite_sent", { "spam": "eggs", "foo": "bar", ) """ sent = False if extra is None: extra = {} notice_type = NoticeType.objects.get(label=label) current_language = get_language() for user in users: # get user language for user from language store defined in # NOTIFICATION_LANGUAGE_MODULE setting try: language = get_notification_language(user) except LanguageStoreNotAvailable: language = None if language is not None: # activate the user's language activate(language) # Since we only have 1 medium, just hardcode it in (was getting some weird # 'module' object is not callable error) backend = EmailBackend(0) if backend.can_send(user, notice_type, scoping=scoping): backend.deliver(user, sender, notice_type, extra) sent = True # reset environment to original language activate(current_language) return sent def send(*args, **kwargs): """ A basic interface around both queue and send_now. This honors a global flag NOTIFICATION_QUEUE_ALL that helps determine whether all calls should be queued or not. A per call ``queue`` or ``now`` keyword argument can be used to always override the default global behavior. """ queue_flag = kwargs.pop("queue", False) now_flag = kwargs.pop("now", False) assert not ( queue_flag and now_flag), "'queue' and 'now' cannot both be True." if queue_flag: return queue(*args, **kwargs) elif now_flag: return send_now(*args, **kwargs) else: if settings.PINAX_NOTIFICATIONS_QUEUE_ALL: return queue(*args, **kwargs) else: return send_now(*args, **kwargs) def queue(users, label, extra=None, sender=None): """ Queue the notification in NoticeQueueBatch. This allows for large amounts of user notifications to be deferred to a seperate process running outside the webserver. """ if extra is None: extra = {} if isinstance(users, QuerySet): users = [row["pk"] for row in users.values("pk")] else: users = [user.pk for user in users] notices = [] for user in users: notices.append((user, label, extra, sender)) NoticeQueueBatch( pickled_data=base64.b64encode(pickle.dumps(notices))).save()
mit
4,718,777,562,124,167,000
33.127193
89
0.658399
false
eayunstack/neutron
neutron/plugins/ml2/drivers/helpers.py
1
7033
# Copyright (c) 2014 Thales Services SAS # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import random from neutron_lib import context as neutron_ctx from neutron_lib.plugins.ml2 import api from neutron_lib.utils import helpers from oslo_config import cfg from oslo_db import exception as db_exc from oslo_log import log from neutron.common import exceptions as exc from neutron.db import api as db_api from neutron.objects import base as base_obj from neutron.plugins.common import utils as p_utils LOG = log.getLogger(__name__) IDPOOL_SELECT_SIZE = 100 class BaseTypeDriver(api.ML2TypeDriver): """BaseTypeDriver for functions common to Segment and flat.""" def __init__(self): try: self.physnet_mtus = helpers.parse_mappings( cfg.CONF.ml2.physical_network_mtus, unique_values=False ) except Exception as e: LOG.error("Failed to parse physical_network_mtus: %s", e) self.physnet_mtus = [] def get_mtu(self, physical_network=None): return p_utils.get_deployment_physnet_mtu() class SegmentTypeDriver(BaseTypeDriver): """SegmentTypeDriver for segment allocation. Provide methods helping to perform segment allocation fully or partially specified. """ def __init__(self, model): super(SegmentTypeDriver, self).__init__() if issubclass(model, base_obj.NeutronDbObject): self.model = model.db_model else: self.model = model self.primary_keys = set(dict(self.model.__table__.columns)) self.primary_keys.remove("allocated") # TODO(ataraday): get rid of this method when old TypeDriver won't be used def _get_session(self, arg): if isinstance(arg, neutron_ctx.Context): return arg.session, db_api.context_manager.writer.using(arg) return arg, arg.session.begin(subtransactions=True) def allocate_fully_specified_segment(self, context, **raw_segment): """Allocate segment fully specified by raw_segment. If segment exists, then try to allocate it and return db object If segment does not exists, then try to create it and return db object If allocation/creation failed, then return None """ network_type = self.get_type() session, ctx_manager = self._get_session(context) try: with ctx_manager: alloc = ( session.query(self.model).filter_by(**raw_segment). first()) if alloc: if alloc.allocated: # Segment already allocated return else: # Segment not allocated LOG.debug("%(type)s segment %(segment)s allocate " "started ", {"type": network_type, "segment": raw_segment}) count = (session.query(self.model). filter_by(allocated=False, **raw_segment). update({"allocated": True})) if count: LOG.debug("%(type)s segment %(segment)s allocate " "done ", {"type": network_type, "segment": raw_segment}) return alloc # Segment allocated or deleted since select LOG.debug("%(type)s segment %(segment)s allocate " "failed: segment has been allocated or " "deleted", {"type": network_type, "segment": raw_segment}) # Segment to create or already allocated LOG.debug("%(type)s segment %(segment)s create started", {"type": network_type, "segment": raw_segment}) alloc = self.model(allocated=True, **raw_segment) alloc.save(session) LOG.debug("%(type)s segment %(segment)s create done", {"type": network_type, "segment": raw_segment}) except db_exc.DBDuplicateEntry: # Segment already allocated (insert failure) alloc = None LOG.debug("%(type)s segment %(segment)s create failed", {"type": network_type, "segment": raw_segment}) return alloc def allocate_partially_specified_segment(self, context, **filters): """Allocate model segment from pool partially specified by filters. Return allocated db object or None. """ network_type = self.get_type() session, ctx_manager = self._get_session(context) with ctx_manager: select = (session.query(self.model). filter_by(allocated=False, **filters)) # Selected segment can be allocated before update by someone else, allocs = select.limit(IDPOOL_SELECT_SIZE).all() if not allocs: # No resource available return alloc = random.choice(allocs) raw_segment = dict((k, alloc[k]) for k in self.primary_keys) LOG.debug("%(type)s segment allocate from pool " "started with %(segment)s ", {"type": network_type, "segment": raw_segment}) count = (session.query(self.model). filter_by(allocated=False, **raw_segment). update({"allocated": True})) if count: LOG.debug("%(type)s segment allocate from pool " "success with %(segment)s ", {"type": network_type, "segment": raw_segment}) return alloc # Segment allocated since select LOG.debug("Allocate %(type)s segment from pool " "failed with segment %(segment)s", {"type": network_type, "segment": raw_segment}) # saving real exception in case we exceeded amount of attempts raise db_exc.RetryRequest( exc.NoNetworkFoundInMaximumAllowedAttempts())
apache-2.0
3,381,965,087,420,021,000
39.188571
78
0.550974
false
aleksandar-mitrevski/fault_and_anomaly_detection
generative_model_fd/tests/multi-memory/test_td_dbn.py
1
3902
import numpy as np from test_generic import * def train1(y, f, model_manager, number_of_model_parameters, sequence_len): number_of_sequences = len(y) - sequence_len x = np.linspace(0, sequence_len, sequence_len) models = model_manager.optimise_td(x, y, f, sequence_len) models, arr_min, arr_max = rescale(models) network = DBN(number_of_model_parameters,[number_of_model_parameters*2]) network.train(models, epochs=20, learning_rate=0.1) return network, arr_min, arr_max def train2(y, f, model_manager, number_of_model_parameters, sequence_len): number_of_sequences = len(y) - sequence_len x = np.linspace(0, sequence_len, sequence_len) models = model_manager.optimise_td(x, y, f, sequence_len) models, arr_min, arr_max = rescale(models) network = DBN(number_of_model_parameters,[number_of_model_parameters*3]) network.train(models, epochs=20, learning_rate=0.1) return network, arr_min, arr_max def test(y, arr_min, arr_max, f, network, model_manager, number_of_model_parameters, sequence_len, initial_guess=None): number_of_sequences = len(y) - sequence_len x = np.linspace(0, sequence_len, sequence_len) test_models = model_manager.optimise_td(x, y, f, sequence_len, initial_guess=initial_guess) test_models,_,_ = rescale(test_models, arr_min, arr_max) distances = list() for i in xrange(0,number_of_sequences): sample = network.sample_network(test_models[i]) dist = distance(sample, test_models[i]) distances.append(dist) return distances def retrain(y, arr_min, arr_max, network, f, model_manager, sequence_len): number_of_sequences = len(y) - sequence_len x = np.linspace(0, sequence_len, sequence_len) models = model_manager.optimise_td(x, y, f, sequence_len) models,_,_ = rescale(models, arr_min, arr_max) network.train(models, epochs=20, learning_rate=0.1) return network sequence_len = 10 number_of_model_parameters = [3,4,5] model_manager = ModelFitLibrary() number_of_test_sequences = 90 curve_functions = [lambda x, a, b, c: a * np.square(x) + b * x + c, \ lambda x, a, b, c, d: a * np.power(x,3) + b * np.square(x) + c * x + d,\ lambda x, a, b, c, d, e: a * np.power(x,4) + b * np.power(x,3) + c * np.square(x) + d * x + e] number_of_model_parameters = [3] curve_functions = [lambda x, a, b, c: a * np.square(x) + b * x + c] # print 'Testing with hidden neurons = 2 * visible neurons' # generic_test(train1, retrain, test, curve_functions, model_manager, number_of_model_parameters, sequence_len, number_of_test_sequences) # print 'Testing with hidden neurons = 3 * visible neurons' # generic_test(train2, retrain, test, curve_functions, model_manager, number_of_model_parameters, sequence_len, number_of_test_sequences) train_data_front = np.genfromtxt('../../logs/laser_front.log') test_data_front = np.genfromtxt('../../logs/laser_front_test.log') anomalous_data_front = np.genfromtxt('../../logs/laser_front_faulty.log') retrain_data_front = np.genfromtxt('../../logs/laser_front_retrain.log') retrain_data_env2_front = np.genfromtxt('../../logs/laser_front_map2.log') anomalous_data_env2_front = np.genfromtxt('../../logs/laser_front_map2_faulty.log') train_data = train_data_front.mean(axis=1) test_data = test_data_front.mean(axis=1) anomalous_data = anomalous_data_front.mean(axis=1) anomalous_data = np.hstack((anomalous_data, anomalous_data_env2_front.mean(axis=1))) retrain_data = retrain_data_env2_front.mean(axis=1) #retrain_data = np.hstack((retrain_data, retrain_data_env2_front.mean(axis=1)))#retrain_data_front.mean(axis=1) number_of_test_sequences = max(test_data_front.shape[0], anomalous_data.shape[0]) - sequence_len generic_sensor_test(train_data, retrain_data, test_data, anomalous_data, train1, retrain, test, curve_functions, model_manager, number_of_model_parameters, sequence_len, number_of_test_sequences)
bsd-2-clause
5,779,025,792,876,472,000
44.917647
195
0.698616
false
bstroebl/DigitizingTools
tools/dttools.py
1
49803
# -*- coding: utf-8 -*- """ dttools ````````````` """ """ Part of DigitizingTools, a QGIS plugin that subsumes different tools neded during digitizing sessions * begin : 2013-02-25 * copyright : (C) 2013 by Bernhard Ströbl * email : [email protected] This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ from builtins import range from builtins import object from qgis.PyQt import QtGui, QtCore, QtWidgets from qgis.core import * from qgis.gui import * import dtutils class DtTool(object): '''Abstract class; parent for any Dt tool or button''' def __init__(self, iface, geometryTypes, **kw): self.iface = iface self.canvas = self.iface.mapCanvas() #custom cursor self.cursor = QtGui.QCursor(QtGui.QPixmap(["16 16 3 1", " c None", ". c #FF0000", "+ c #FFFFFF", " ", " +.+ ", " ++.++ ", " +.....+ ", " +. .+ ", " +. . .+ ", " +. . .+ ", " ++. . .++", " ... ...+... ...", " ++. . .++", " +. . .+ ", " +. . .+ ", " ++. .+ ", " ++.....+ ", " ++.++ ", " +.+ "])) self.geometryTypes = [] self.shapeFileGeometryTypes = [] # ESRI shapefile does not distinguish between single and multi geometries # source of wkbType numbers: http://gdal.org/java/constant-values.html for aGeomType in geometryTypes: if aGeomType == 1: # wkbPoint self.geometryTypes.append(1) self.shapeFileGeometryTypes.append(4) self.geometryTypes.append(-2147483647) #wkbPoint25D self.shapeFileGeometryTypes.append(-2147483647) elif aGeomType == 2: # wkbLineString self.geometryTypes.append(2) self.shapeFileGeometryTypes.append(5) self.geometryTypes.append(-2147483646) #wkbLineString25D self.shapeFileGeometryTypes.append(-2147483646) elif aGeomType == 3: # wkbPolygon self.geometryTypes.append(3) self.shapeFileGeometryTypes.append(6) self.geometryTypes.append(-2147483645) #wkbPolygon25D self.shapeFileGeometryTypes.append(-2147483645) elif aGeomType == 4: # wkbMultiPoint self.geometryTypes.append(4) self.shapeFileGeometryTypes.append(1) # wkbPoint self.geometryTypes.append(-2147483644) #wkbMultiPoint25D self.shapeFileGeometryTypes.append(-2147483647) #wkbPoint25D elif aGeomType == 5: # wkbMultiLineString self.geometryTypes.append(5) self.shapeFileGeometryTypes.append(2) # wkbLineString self.geometryTypes.append(-2147483643) #wkbMultiLineString25D self.shapeFileGeometryTypes.append(-2147483646) #wkbLineString25D elif aGeomType == 6: # wkbMultiPolygon self.geometryTypes.append(6) self.shapeFileGeometryTypes.append(6) # wkbPolygon self.geometryTypes.append(-2147483642) #wkbMultiPolygon25D self.shapeFileGeometryTypes.append(-2147483645) #wkbPolygon25D def allowedGeometry(self, layer): '''check if this layer's geometry type is within the list of allowed types''' if layer.dataProvider().storageType() == u'ESRI Shapefile': # does not distinguish between single and multi result = self.shapeFileGeometryTypes.count(layer.wkbType()) >= 1 else: result = self.geometryTypes.count(layer.wkbType()) == 1 return result def geometryTypeMatchesLayer(self, layer, geom): '''check if the passed geom's geometry type matches the layer's type''' match = layer.wkbType() == geom.wkbType() if not match: if layer.dataProvider().storageType() == u'ESRI Shapefile': # does not distinguish between single and multi match = (layer.wkbType() == 1 and geom.wkbType() == 4) or \ (layer.wkbType() == 2 and geom.wkbType() == 5) or \ (layer.wkbType() == 3 and geom.wkbType() == 6) or \ (layer.wkbType() == 4 and geom.wkbType() == 1) or \ (layer.wkbType() == 5 and geom.wkbType() == 2) or \ (layer.wkbType() == 6 and geom.wkbType() == 3) else: # are we trying a single into a multi layer? match = (layer.wkbType() == 4 and geom.wkbType() == 1) or \ (layer.wkbType() == 5 and geom.wkbType() == 2) or \ (layer.wkbType() == 6 and geom.wkbType() == 3) return match def isPolygonLayer(self, layer): ''' check if this layer is a polygon layer''' polygonTypes = [3, 6, -2147483645, -2147483642] result = layer.wkbType() in polygonTypes return result def debug(self, str): title = "DigitizingTools Debugger" QgsMessageLog.logMessage(title + "\n" + str) class DtSingleButton(DtTool): '''Abstract class for a single button icon [QtGui.QIcon] tooltip [str] geometryTypes [array:integer] 0=point, 1=line, 2=polygon''' def __init__(self, iface, toolBar, icon, tooltip, geometryTypes = [1, 2, 3], dtName = None): super().__init__(iface, geometryTypes) self.act = QtWidgets.QAction(icon, tooltip, self.iface.mainWindow()) self.act.triggered.connect(self.process) if dtName != None: self.act.setObjectName(dtName) self.iface.currentLayerChanged.connect(self.enable) toolBar.addAction(self.act) self.geometryTypes = geometryTypes def process(self): raise NotImplementedError("Should have implemented process") def enable(self): '''Enables/disables the corresponding button.''' # Disable the Button by default self.act.setEnabled(False) layer = self.iface.activeLayer() if layer != None: #Only for vector layers. if layer.type() == QgsMapLayer.VectorLayer: if self.allowedGeometry(layer): self.act.setEnabled(layer.isEditable()) try: layer.editingStarted.disconnect(self.enable) # disconnect, will be reconnected except: pass try: layer.editingStopped.disconnect(self.enable) # when it becomes active layer again except: pass layer.editingStarted.connect(self.enable) layer.editingStopped.connect(self.enable) class DtSingleTool(DtSingleButton): '''Abstract class for a tool''' def __init__(self, iface, toolBar, icon, tooltip, geometryTypes = [0, 1, 2], crsWarning = True, dtName = None): super().__init__(iface, toolBar, icon, tooltip, geometryTypes, dtName) self.tool = None self.act.setCheckable(True) self.canvas.mapToolSet.connect(self.toolChanged) def toolChanged(self, thisTool): if thisTool != self.tool: self.deactivate() def deactivate(self): if self.tool != None: self.tool.reset() self.reset() self.act.setChecked(False) def reset(self): pass class DtSingleEditTool(DtSingleTool): '''Abstract class for a tool for interactive editing''' def __init__(self, iface, toolBar, icon, tooltip, geometryTypes = [0, 1, 2], crsWarning = True, dtName = None): super().__init__(iface, toolBar, icon, tooltip, geometryTypes, dtName) self.crsWarning = crsWarning self.editLayer = None def reset(self): self.editLayer = None def enable(self): '''Enables/disables the corresponding button.''' # Disable the Button by default doEnable = False layer = self.iface.activeLayer() if layer != None: if layer.type() == 0: #Only for vector layers. if self.allowedGeometry(layer): doEnable = layer.isEditable() try: layer.editingStarted.disconnect(self.enable) # disconnect, will be reconnected except: pass try: layer.editingStopped.disconnect(self.enable) # when it becomes active layer again except: pass layer.editingStarted.connect(self.enable) layer.editingStopped.connect(self.enable) if self.editLayer != None: # we have a current edit session, activeLayer may have changed or editing status of self.editLayer if self.editLayer != layer: try: self.editLayer.editingStarted.disconnect(self.enable) # disconnect, will be reconnected except: pass try: self.editLayer.editingStopped.disconnect(self.enable) # when it becomes active layer again except: pass self.tool.reset() self.reset() if not doEnable: self.deactivate() if doEnable and self.crsWarning: layerCRSSrsid = layer.crs().srsid() mapSet = self.canvas.mapSettings() projectCRSSrsid = mapSet.destinationCrs().srsid() if layerCRSSrsid != projectCRSSrsid: self.iface.messageBar().pushWarning("DigitizingTools", self.act.toolTip() + " " + QtWidgets.QApplication.translate("DigitizingTools", "is disabled because layer CRS and project CRS do not match!")) doEnable = False self.act.setEnabled(doEnable) class DtDualTool(DtTool): '''Abstract class for a tool with interactive and batch mode icon [QtGui.QIcon] for interactive mode tooltip [str] for interactive mode iconBatch [QtGui.QIcon] for batch mode tooltipBatch [str] for batch mode geometryTypes [array:integer] 0=point, 1=line, 2=polygon''' def __init__(self, iface, toolBar, icon, tooltip, iconBatch, tooltipBatch, geometryTypes = [1, 2, 3], dtName = None): super().__init__(iface, geometryTypes) self.iface.currentLayerChanged.connect(self.enable) self.canvas.mapToolSet.connect(self.toolChanged) #create button self.button = QtWidgets.QToolButton(toolBar) self.button.clicked.connect(self.runSlot) self.button.toggled.connect(self.hasBeenToggled) #create menu self.menu = QtWidgets.QMenu(toolBar) if dtName != None: self.menu.setObjectName(dtName) self.menu.triggered.connect(self.menuTriggered) self.button.setMenu(self.menu) self.button.setPopupMode(QtWidgets.QToolButton.MenuButtonPopup) # create actions self.act = QtWidgets.QAction(icon, tooltip, self.iface.mainWindow()) if dtName != None: self.act.setObjectName(dtName + "Action") self.act.setToolTip(tooltip) self.act_batch = QtWidgets.QAction(iconBatch, tooltipBatch, self.iface.mainWindow()) if dtName != None: self.act_batch.setObjectName(dtName + "BatchAction") self.act_batch.setToolTip(tooltipBatch) self.menu.addAction(self.act) self.menu.addAction(self.act_batch) # set the interactive action as default action, user needs to click the button to activate it self.button.setIcon(self.act.icon()) self.button.setToolTip(self.act.toolTip()) self.button.setCheckable(True) self.batchMode = False # add button to toolBar toolBar.addWidget(self.button) self.geometryTypes = geometryTypes # run the enable slot self.enable() def menuTriggered(self, thisAction): if thisAction == self.act: self.batchMode = False self.button.setCheckable(True) if not self.button.isChecked(): self.button.toggle() else: self.batchMode = True if self.button.isCheckable(): if self.button.isChecked(): self.button.toggle() self.button.setCheckable(False) self.runSlot(False) self.button.setIcon(thisAction.icon()) self.button.setToolTip(thisAction.toolTip()) def toolChanged(self, thisTool): if thisTool != self.tool: self.deactivate() def hasBeenToggled(self, isChecked): raise NotImplementedError("Should have implemented hasBeenToggled") def deactivate(self): if self.button != None: if self.button.isChecked(): self.button.toggle() def runSlot(self, isChecked): if self.batchMode: layer = self.iface.activeLayer() if layer.selectedFeatureCount() > 0: self.process() else: if not isChecked: self.button.toggle() def process(self): raise NotImplementedError("Should have implemented process") def enable(self): # Disable the Button by default self.button.setEnabled(False) layer = self.iface.activeLayer() if layer != None: #Only for vector layers. if layer.type() == QgsMapLayer.VectorLayer: # only for certain layers if self.allowedGeometry(layer): if not layer.isEditable(): self.deactivate() self.button.setEnabled(layer.isEditable()) try: layer.editingStarted.disconnect(self.enable) # disconnect, will be reconnected except: pass try: layer.editingStopped.disconnect(self.enable) # when it becomes active layer again except: pass layer.editingStarted.connect(self.enable) layer.editingStopped.connect(self.enable) else: self.deactivate() class DtDualToolSelectFeature(DtDualTool): '''Abstract class for a DtDualToo which uses the DtSelectFeatureTool for interactive mode''' def __init__(self, iface, toolBar, icon, tooltip, iconBatch, tooltipBatch, geometryTypes = [1, 2, 3], dtName = None): super().__init__(iface, toolBar, icon, tooltip, iconBatch, tooltipBatch, geometryTypes, dtName) self.tool = DtSelectFeatureTool(iface) def featureSelectedSlot(self, fids): if len(fids) >0: self.process() def hasBeenToggled(self, isChecked): try: self.tool.featureSelected.disconnect(self.featureSelectedSlot) # disconnect if it was already connected, so slot gets called only once! except: pass if isChecked: self.canvas.setMapTool(self.tool) self.tool.featureSelected.connect(self.featureSelectedSlot) else: self.canvas.unsetMapTool(self.tool) class DtDualToolSelectPolygon(DtDualToolSelectFeature): '''Abstract class for a DtDualToo which uses the DtSelectFeatureTool for interactive mode''' def __init__(self, iface, toolBar, icon, tooltip, iconBatch, tooltipBatch, geometryTypes = [3, 6], dtName = None): super().__init__(iface, toolBar, icon, tooltip, iconBatch, tooltipBatch, geometryTypes, dtName) self.tool = DtSelectPolygonTool(iface) class DtDualToolSelectVertex(DtDualTool): '''Abstract class for a DtDualTool which uses the DtSelectVertexTool for interactive mode numVertices [integer] nnumber of vertices to be snapped until vertexFound signal is emitted''' def __init__(self, iface, toolBar, icon, tooltip, iconBatch, tooltipBatch, geometryTypes = [1, 2, 3], numVertices = 1, dtName = None): super().__init__(iface, toolBar, icon, tooltip, iconBatch, tooltipBatch, geometryTypes, dtName) self.tool = DtSelectVertexTool(self.iface, numVertices) def hasBeenToggled(self, isChecked): try: self.tool.vertexFound.disconnect(self.vertexSnapped) # disconnect if it was already connected, so slot gets called only once! except: pass if isChecked: self.canvas.setMapTool(self.tool) self.tool.vertexFound.connect(self.vertexSnapped) else: self.canvas.unsetMapTool(self.tool) def vertexSnapped(self, snapResult): raise NotImplementedError("Should have implemented vertexSnapped") class DtDualToolSelectRing(DtDualTool): ''' Abstract class for a DtDualTool which uses the DtSelectRingTool for interactive mode ''' def __init__(self, iface, toolBar, icon, tooltip, iconBatch, tooltipBatch, geometryTypes = [1, 2, 3], dtName = None): super().__init__(iface, toolBar, icon, tooltip, iconBatch, tooltipBatch, geometryTypes, dtName) self.tool = DtSelectRingTool(self.iface) def hasBeenToggled(self, isChecked): try: self.tool.ringSelected.disconnect(self.ringFound) # disconnect if it was already connected, so slot gets called only once! except: pass if isChecked: self.canvas.setMapTool(self.tool) self.tool.ringSelected.connect(self.ringFound) else: self.canvas.unsetMapTool(self.tool) def ringFound(self, selectRingResult): raise NotImplementedError("Should have implemented ringFound") class DtDualToolSelectGap(DtDualTool): ''' Abstract class for a DtDualTool which uses the DtSelectGapTool for interactive mode ''' def __init__(self, iface, toolBar, icon, tooltip, iconBatch, tooltipBatch, geometryTypes = [1, 2, 3], dtName = None, allLayers = False): super().__init__(iface, toolBar, icon, tooltip, iconBatch, tooltipBatch, geometryTypes, dtName) self.tool = DtSelectGapTool(self.iface, allLayers) def hasBeenToggled(self, isChecked): try: self.tool.gapSelected.disconnect(self.gapFound) # disconnect if it was already connected, so slot gets called only once! except: pass if isChecked: self.canvas.setMapTool(self.tool) self.tool.gapSelected.connect(self.gapFound) else: self.canvas.unsetMapTool(self.tool) def gapFound(self, selectGapResult): raise NotImplementedError("Should have implemented gapFound") class DtMapToolEdit(QgsMapToolEdit, DtTool): '''abstract subclass of QgsMapToolEdit''' def __init__(self, iface, **kw): super().__init__(canvas = iface.mapCanvas(), iface = iface, geometryTypes = []) def activate(self): self.canvas.setCursor(self.cursor) def deactivate(self): self.reset() def reset(self, emitSignal = False): pass def transformed(self, thisLayer, thisQgsPoint): layerCRSSrsid = thisLayer.crs().srsid() projectCRSSrsid = QgsProject.instance().crs().srsid() if layerCRSSrsid != projectCRSSrsid: transQgsPoint = QgsGeometry.fromPointXY(thisQgsPoint) transQgsPoint.transform(QgsCoordinateTransform( QgsProject.instance().crs(), thisLayer.crs(), QgsProject.instance())) return transQgsPoint.asPoint() else: return thisQgsPoint class DtSelectFeatureTool(DtMapToolEdit): featureSelected = QtCore.pyqtSignal(list) def __init__(self, iface): super().__init__(iface) self.currentHighlight = [None, None] # feature, highlightGraphic self.ignoreFids = [] # featureids that schould be ignored when looking for a feature def highlightFeature(self, layer, feature): '''highlight the feature if it has a geometry''' geomType = layer.geometryType() returnGeom = None if geomType <= 2: if geomType == 0: marker = QgsVertexMarker(self.iface.mapCanvas()) marker.setIconType(3) # ICON_BOX marker.setColor(self.rubberBandColor) marker.setIconSize(12) marker.setPenWidth (3) marker.setCenter(feature.geometry().centroid().asPoint()) returnGeom = marker else: settings = QtCore.QSettings() settings.beginGroup("Qgis/digitizing") a = settings.value("line_color_alpha",200,type=int) b = settings.value("line_color_blue",0,type=int) g = settings.value("line_color_green",0,type=int) r = settings.value("line_color_red",255,type=int) lw = settings.value("line_width",1,type=int) settings.endGroup() rubberBandColor = QtGui.QColor(r, g, b, a) rubberBandWidth = lw rubberBand = QgsRubberBand(self.iface.mapCanvas()) rubberBand.setColor(rubberBandColor) rubberBand.setWidth(rubberBandWidth) rubberBand.setToGeometry(feature.geometry(), layer) returnGeom = rubberBand self.currentHighlight = [feature, returnGeom] return returnGeom else: return None def removeHighlight(self): highlightGeom = self.currentHighlight[1] if highlightGeom != None: self.iface.mapCanvas().scene().removeItem(highlightGeom) self.currentHighlight = [None, None] def highlightNext(self, layer, startingPoint): if self.currentHighlight != [None, None]: self.ignoreFids.append(self.currentHighlight[0].id()) # will return the first feature, if there is only one will return this feature found = self.getFeatureForPoint(layer, startingPoint) if len(found) == 0: self.removeHighlight() return 0 else: aFeat = found[0] numFeatures = found[1] if self.currentHighlight != [None, None]: if aFeat.id() != self.currentHighlight[0].id(): self.removeHighlight() self.highlightFeature(layer, found[0]) else: self.highlightFeature(layer, found[0]) return numFeatures def getFeatureForPoint(self, layer, startingPoint, inRing = False): ''' return the feature this QPoint is in (polygon layer) or this QPoint snaps to (point or line layer) ''' result = [] if self.isPolygonLayer(layer): mapToPixel = self.canvas.getCoordinateTransform() #thisQgsPoint = mapToPixel.toMapCoordinates(startingPoint) thisQgsPoint = self.transformed(layer, mapToPixel.toMapCoordinates(startingPoint)) spatialIndex = dtutils.dtSpatialindex(layer) featureIds = spatialIndex.nearestNeighbor(thisQgsPoint, 0) # if we use 0 as neighborCount then only features that contain the point # are included for fid in featureIds: feat = dtutils.dtGetFeatureForId(layer, fid) if feat != None: geom = QgsGeometry(feat.geometry()) if geom.contains(thisQgsPoint): result.append(feat) result.append([]) return result break else: if inRing: rings = dtutils.dtExtractRings(geom) if len(rings) > 0: for aRing in rings: if aRing.contains(thisQgsPoint): result.append(feat) result.append([]) result.append(aRing) return result break else: #we need a snapper, so we use the MapCanvas snapper snapper = self.canvas.snappingUtils() snapper.setCurrentLayer(layer) # snapType = 0: no snap, 1 = vertex, 2 vertex & segment, 3 = segment snapMatch = snapper.snapToCurrentLayer(startingPoint, QgsPointLocator.All) if not snapMatch.isValid(): dtutils.showSnapSettingsWarning(self.iface) else: feat = dtutils.dtGetFeatureForId(layer, snapMatch.featureId()) if feat != None: result.append(feat) if snapMatch.hasVertex(): result.append([snapMatch.point(), None]) if snapMatch.hasEdge(): result.append(snapMatch.edgePoints()) return result return result def canvasReleaseEvent(self,event): #Get the click x = event.pos().x() y = event.pos().y() layer = self.canvas.currentLayer() if layer != None: #the clicked point is our starting point startingPoint = QtCore.QPoint(x,y) found = self.getFeatureForPoint(layer, startingPoint) if len(found) > 0: feat = found[0] layer.removeSelection() layer.select(feat.id()) self.featureSelected.emit([feat.id()]) class DtSelectPolygonTool(DtSelectFeatureTool): def __init__(self, iface): super().__init__(iface) def getFeatureForPoint(self, layer, startingPoint): ''' return the feature this QPoint is in and the total amount of features ''' result = [] mapToPixel = self.canvas.getCoordinateTransform() #thisQgsPoint = mapToPixel.toMapCoordinates(startingPoint) thisQgsPoint = self.transformed(layer, mapToPixel.toMapCoordinates(startingPoint)) spatialIndex = dtutils.dtSpatialindex(layer) featureIds = spatialIndex.nearestNeighbor(thisQgsPoint, 0) # if we use 0 as neighborCount then only features that contain the point # are included foundFeatures = [] while True: for fid in featureIds: if self.ignoreFids.count(fid) == 0: feat = dtutils.dtGetFeatureForId(layer, fid) if feat != None: geom = QgsGeometry(feat.geometry()) if geom.contains(thisQgsPoint): foundFeatures.append(feat) if len(foundFeatures) == 0: if len(self.ignoreFids) == 0: #there is no feaure at this point break #while else: self.ignoreFids.pop(0) # remove first and try again elif len(foundFeatures) > 0: # return first feature feat = foundFeatures[0] result.append(feat) result.append(len(featureIds)) break #while return result def canvasReleaseEvent(self,event): ''' - if user clicks left and no feature is highlighted, highlight first feature - if user clicks left and there is a highlighted feature use this feature as selected - if user clicks right, highlight another feature ''' #Get the click x = event.pos().x() y = event.pos().y() layer = self.canvas.currentLayer() if layer != None: startingPoint = QtCore.QPoint(x,y) #the clicked point is our starting point if event.button() == QtCore.Qt.RightButton: # choose another feature self.highlightNext(layer, startingPoint) elif event.button() == QtCore.Qt.LeftButton: if self.currentHighlight == [None, None]: # first click numFeatures = self.highlightNext(layer, startingPoint) else: # user accepts highlighted geometry mapToPixel = self.canvas.getCoordinateTransform() thisQgsPoint = self.transformed(layer, mapToPixel.toMapCoordinates(startingPoint)) feat = self.currentHighlight[0] if feat.geometry().contains(thisQgsPoint): # is point in highlighted feature? numFeatures = 1 else: # mabe user clicked somewhere else numFeatures = self.highlightNext(layer, startingPoint) if numFeatures == 1: feat = self.currentHighlight[0] self.removeHighlight() layer.removeSelection() layer.select(feat.id()) self.featureSelected.emit([feat.id()]) def reset(self): self.removeHighlight() class DtSelectRingTool(DtSelectFeatureTool): ''' a map tool to select a ring in a polygon ''' ringSelected = QtCore.pyqtSignal(list) def __init__(self, iface): super().__init__(iface) def canvasReleaseEvent(self,event): #Get the click x = event.pos().x() y = event.pos().y() layer = self.canvas.currentLayer() if layer != None: #the clicked point is our starting point startingPoint = QtCore.QPoint(x,y) found = self.getFeatureForPoint(layer, startingPoint, inRing = True) if len(found) == 3: aRing = found[2] self.ringSelected.emit([aRing]) def reset(self, emitSignal = False): pass class DtSelectGapTool(DtMapToolEdit): ''' a map tool to select a gap between polygons, if allLayers is True then the gap is searched between polygons of all currently visible polygon layers ''' gapSelected = QtCore.pyqtSignal(list) def __init__(self, iface, allLayers): super().__init__(iface) self.allLayers = allLayers def canvasReleaseEvent(self,event): #Get the click x = event.pos().x() y = event.pos().y() layer = self.canvas.currentLayer() visibleLayers = [] if self.allLayers: for aLayer in self.iface.layerTreeCanvasBridge().rootGroup().checkedLayers(): if 0 == aLayer.type(): if self.isPolygonLayer(aLayer): visibleLayers.append(aLayer) else: if layer != None: visibleLayers.append(layer) if len(visibleLayers) > 0: #the clicked point is our starting point startingPoint = QtCore.QPoint(x,y) mapToPixel = self.canvas.getCoordinateTransform() thisQgsPoint = self.transformed(layer, mapToPixel.toMapCoordinates(startingPoint)) multiGeom = None for aLayer in visibleLayers: if not self.allLayers and aLayer.selectedFeatureCount() > 0: #we assume, that the gap is between the selected polyons hadSelection = True else: hadSelection = False spatialIndex = dtutils.dtSpatialindex(aLayer) # get the 100 closest Features featureIds = spatialIndex.nearestNeighbor(thisQgsPoint, 100) aLayer.select(featureIds) multiGeom = dtutils.dtCombineSelectedPolygons(aLayer, self.iface, multiGeom) if self.allLayers or not hadSelection: aLayer.removeSelection() if multiGeom == None: return None if multiGeom != None: rings = dtutils.dtExtractRings(multiGeom) if len(rings) > 0: for aRing in rings: if aRing.contains(thisQgsPoint): self.gapSelected.emit([aRing]) break def reset(self, emitSignal = False): pass class DtSelectPartTool(DtSelectFeatureTool): '''signal sends featureId of clickedd feature, number of part selected and geometry of part''' partSelected = QtCore.pyqtSignal(list) def __init__(self, iface): super().__init__(iface) def canvasReleaseEvent(self,event): #Get the click x = event.pos().x() y = event.pos().y() layer = self.canvas.currentLayer() if layer != None: #the clicked point is our starting point startingPoint = QtCore.QPoint(x,y) found = self.getFeatureForPoint(layer, startingPoint) if len(found) > 0: feat = found[0] snappedPoints = found[1] if len(snappedPoints) > 0: snappedVertex = snappedPoints[0] else: snappedVertex = None geom = QgsGeometry(feat.geometry()) # if feature geometry is multipart start split processing if geom.isMultipart(): # Get parts from original feature parts = geom.asGeometryCollection() mapToPixel = self.canvas.getCoordinateTransform() thisQgsPoint = mapToPixel.toMapCoordinates(startingPoint) for i in range(len(parts)): # find the part that was snapped aPart = parts[i] if self.isPolygonLayer(layer): if aPart.contains(thisQgsPoint): self.partSelected.emit([feat.id(), i, aPart]) break else: points = dtutils.dtExtractPoints(aPart) for j in range(len(points)): aPoint = points[j] if snappedVertex != None: if aPoint.x() == snappedVertex.x() and \ aPoint.y() == snappedVertex.y(): self.partSelected.emit([feat.id(), i, aPart]) break else: try: nextPoint = points[j + 1] except: break if aPoint.x() == snappedPoints[0].x() and \ aPoint.y() == snappedPoints[0].y() and \ nextPoint.x() == snappedPoints[1].x() and \ nextPoint.y() == snappedPoints[1].y(): self.partSelected.emit([feat.id(), i, aPart]) break class DtSelectVertexTool(DtMapToolEdit): '''select and mark numVertices vertices in the active layer''' vertexFound = QtCore.pyqtSignal(list) def __init__(self, iface, numVertices = 1): super().__init__(iface) # desired number of marked vertex until signal self.numVertices = numVertices # number of marked vertex self.count = 0 # arrays to hold markers and vertex points self.markers = [] self.points = [] self.fids = [] def canvasReleaseEvent(self,event): if self.count < self.numVertices: #not yet enough #Get the click x = event.pos().x() y = event.pos().y() layer = self.canvas.currentLayer() if layer != None: #the clicked point is our starting point startingPoint = QtCore.QPoint(x,y) #we need a snapper, so we use the MapCanvas snapper snapper = self.canvas.snappingUtils() snapper.setCurrentLayer(layer) # snapType = 0: no snap, 1 = vertex, 2 = segment, 3 = vertex & segment snapMatch = snapper.snapToCurrentLayer(startingPoint, QgsPointLocator.Vertex) if not snapMatch.isValid(): #warn about missing snapping tolerance if appropriate dtutils.showSnapSettingsWarning(self.iface) else: #mark the vertex p = snapMatch.point() m = QgsVertexMarker(self.canvas) m.setIconType(1) if self.count == 0: m.setColor(QtGui.QColor(255,0,0)) else: m.setColor(QtGui.QColor(0, 0, 255)) m.setIconSize(12) m.setPenWidth (3) m.setCenter(p) self.points.append(p) self.markers.append(m) fid = snapMatch.featureId() # QgsFeatureId of the snapped geometry self.fids.append(fid) self.count += 1 if self.count == self.numVertices: self.vertexFound.emit([self.points, self.markers, self.fids]) #self.emit(SIGNAL("vertexFound(PyQt_PyObject)"), [self.points, self.markers]) def reset(self, emitSignal = False): for m in self.markers: self.canvas.scene().removeItem(m) self.markers = [] self.points = [] self.fids = [] self.count = 0 class DtSelectSegmentTool(DtMapToolEdit): segmentFound = QtCore.pyqtSignal(list) def __init__(self, iface): super().__init__(iface) self.rb1 = QgsRubberBand(self.canvas, False) def canvasReleaseEvent(self,event): #Get the click x = event.pos().x() y = event.pos().y() layer = self.canvas.currentLayer() if layer != None: #the clicked point is our starting point startingPoint = QtCore.QPoint(x,y) #we need a snapper, so we use the MapCanvas snapper snapper = self.canvas.snappingUtils() snapper.setCurrentLayer(layer) # snapType = 0: no snap, 1 = vertex, 2 = segment, 3 = vertex & segment snapType = 2 snapMatch = snapper.snapToCurrentLayer(startingPoint, QgsPointLocator.Edge) if not snapMatch.isValid(): #warn about missing snapping tolerance if appropriate dtutils.showSnapSettingsWarning(self.iface) else: #if we have found a linesegment edge = snapMatch.edgePoints() p1 = edge[0] p2 = edge[1] # we like to mark the segment that is choosen, so we need a rubberband self.rb1.reset() color = QtGui.QColor(255,0,0) self.rb1.setColor(color) self.rb1.setWidth(2) self.rb1.addPoint(p1) self.rb1.addPoint(p2) self.rb1.show() self.segmentFound.emit([self.rb1.getPoint(0, 0), self.rb1.getPoint(0, 1), self.rb1]) def reset(self, emitSignal = False): self.rb1.reset() class DtSplitFeatureTool(QgsMapToolAdvancedDigitizing, DtTool): finishedDigitizing = QtCore.pyqtSignal(QgsGeometry) def __init__(self, iface): super().__init__(canvas = iface.mapCanvas(), cadDockWidget = iface.cadDockWidget(), iface = iface, geometryTypes = []) self.marker = None self.rubberBand = None self.sketchRubberBand = self.createRubberBand() self.sketchRubberBand.setLineStyle(QtCore.Qt.DotLine) self.rbPoints = [] # array to store points in rubber band because # api to access points does not work properly or I did not figure it out :) self.currentMousePosition = None self.snapPoint = None self.reset() def activate(self): super().activate() self.canvas.setCursor(self.cursor) self.canvas.installEventFilter(self) self.snapPoint = None self.rbPoints = [] def eventFilter(self, source, event): ''' we need an eventFilter here to filter out Backspace key presses as otherwise the selected objects in the edit layer get deleted if user hits Backspace The eventFilter() function must return true if the event should be filtered, (i.e. stopped); otherwise it must return false, see http://doc.qt.io/qt-5/qobject.html#installEventFilter ''' if event.type() == QtCore.QEvent.KeyPress: if event.key() == QtCore.Qt.Key_Backspace: if self.rubberBand != None: if self.rubberBand.numberOfVertices() >= 2: # QgsRubberBand has always 2 vertices if self.currentMousePosition != None: self.removeLastPoint() self.redrawSketchRubberBand([self.toMapCoordinates(self.currentMousePosition)]) return True else: return False else: return False def eventToQPoint(self, event): x = event.pos().x() y = event.pos().y() thisPoint = QtCore.QPoint(x, y) return thisPoint def initRubberBand(self, firstPoint): if self.rubberBand == None: # create a QgsRubberBand self.rubberBand = self.createRubberBand() self.rubberBand.addPoint(firstPoint) self.rbPoints.append(firstPoint) def removeLastPoint(self): ''' remove the last point in self.rubberBand''' if len (self.rbPoints) > 1: #first point will not be removed self.rbPoints.pop() #we recreate rubberBand because it contains doubles self.rubberBand.reset() for aPoint in self.rbPoints: self.rubberBand.addPoint(QgsPointXY(aPoint)) def trySnap(self, event): self.removeSnapMarker() self.snapPoint = None # try to snap thisPoint = self.eventToQPoint(event) snapper = self.canvas.snappingUtils() # snap to any layer within snap tolerance snapMatch = snapper.snapToMap(thisPoint) if not snapMatch.isValid(): return False else: self.snapPoint = snapMatch.point() self.markSnap(self.snapPoint) return True def markSnap(self, thisPoint): self.marker = QgsVertexMarker(self.canvas) self.marker.setIconType(1) self.marker.setColor(QtGui.QColor(255,0,0)) self.marker.setIconSize(12) self.marker.setPenWidth (3) self.marker.setCenter(thisPoint) def removeSnapMarker(self): if self.marker != None: self.canvas.scene().removeItem(self.marker) self.marker = None def clear(self): if self.rubberBand != None: self.rubberBand.reset() self.canvas.scene().removeItem(self.rubberBand) self.rubberBand = None if self.snapPoint != None: self.removeSnapMarker() self.snapPoint = None self.sketchRubberBand.reset() self.rbPoints = [] def reset(self): self.clear() self.canvas.removeEventFilter(self) def redrawSketchRubberBand(self, points): if self.rubberBand != None and len(self.rbPoints) > 0: self.sketchRubberBand.reset() sketchStartPoint = self.rbPoints[len(self.rbPoints) -1] self.sketchRubberBand.addPoint(QgsPointXY(sketchStartPoint)) if len(points) == 1: self.sketchRubberBand.addPoint(QgsPointXY(sketchStartPoint)) self.sketchRubberBand.movePoint( self.sketchRubberBand.numberOfVertices() -1, points[0]) #for p in range(self.rubberBand.size()): # self.debug("Part " + str(p)) # for v in range(self.rubberBand.partSize(p)): # vertex = self.rubberBand.getPoint(0,j=v) # self.debug("Vertex " + str(v) + " = "+ str(vertex.x()) + ", " + str(vertex.y())) #startPoint = self.rubberBand.getPoint(0, self.rubberBand.partSize(0) -1) #self.debug("StartPoint " + str(startPoint)) #self.sketchRubberBand.addPoint(startPoint) #self.sketchRubberBand.addPoint(points[len(points) - 1]) else: for aPoint in points: self.sketchRubberBand.addPoint(aPoint) def cadCanvasMoveEvent(self, event): pass #self.debug("cadCanvasMoveEvent") def cadCanvasPressEvent(self, event): pass #self.debug("cadCanvasPressEvent") def cadCanvasReleaseEvent(self, event): pass #self.debug("cadCanvasReleaseEvent") def canvasMoveEvent(self, event): self.snapPoint = None thisPoint = self.eventToQPoint(event) hasSnap = self.trySnap(event) if self.rubberBand != None: if hasSnap: #if self.canvas.snappingUtils().config().enabled(): # is snapping active? tracer = QgsMapCanvasTracer.tracerForCanvas(self.canvas) if tracer.actionEnableTracing().isChecked(): # tracing is pressed in tracer.configure() #startPoint = self.rubberBand.getPoint(0, self.rubberBand.numberOfVertices() -1) startPoint = self.rbPoints[len(self.rbPoints) -1] pathPoints, pathError = tracer.findShortestPath(QgsPointXY(startPoint), self.snapPoint) if pathError == 0: #ErrNone pathPoints.pop(0) # remove first point as it is identical with starPoint self.redrawSketchRubberBand(pathPoints) else: self.redrawSketchRubberBand([self.snapPoint]) else: self.redrawSketchRubberBand([self.snapPoint]) else: self.redrawSketchRubberBand([self.toMapCoordinates(thisPoint)]) self.currentMousePosition = thisPoint def canvasReleaseEvent(self, event): layer = self.canvas.currentLayer() if layer != None: thisPoint = self.eventToQPoint(event) #QgsMapToPixel instance if event.button() == QtCore.Qt.LeftButton: if self.rubberBand == None: if self.snapPoint == None: self.initRubberBand(self.toMapCoordinates(thisPoint)) else: # last mouse move created a snap self.initRubberBand(self.snapPoint) self.snapPoint = None self.removeSnapMarker() else: # merge sketchRubberBand into rubberBand sketchGeom = self.sketchRubberBand.asGeometry() verticesSketchGeom = sketchGeom.vertices() self.rubberBand.addGeometry(sketchGeom) # rubberBand now contains a double point because it's former end point # and sketchRubberBand's start point are identical # so we remove the last point before adding new ones self.rbPoints.pop() while verticesSketchGeom.hasNext(): # add the new points self.rbPoints.append(verticesSketchGeom.next()) self.redrawSketchRubberBand([self.toMapCoordinates(thisPoint)]) if self.snapPoint != None: self.snapPoint = None self.removeSnapMarker() else: # right click if self.rubberBand.numberOfVertices() > 1: rbGeom = self.rubberBand.asGeometry() self.finishedDigitizing.emit(rbGeom) self.clear() self.canvas.refresh() def keyPressEvent(self, event): if event.key() == QtCore.Qt.Key_Escape: self.clear() def deactivate(self): self.reset()
gpl-2.0
-5,751,604,370,468,592,000
38.091052
146
0.555741
false
HazyResearch/metal
metal/contrib/baselines/sparse_logreg.py
1
1109
from metal.contrib.modules.sparse_linear_module import SparseLinearModule from metal.end_model import EndModel from metal.utils import recursive_merge_dicts class SparseLogisticRegression(EndModel): """A _sparse_ logistic regression classifier for a single-task problem Args: input_dim: The maximum length of each input (a tensor of integer indices corresponding to one-hot features) output_dim: The cardinality of the classifier padding_idx: If not None, the embedding initialized to 0 so no gradient will pass through it. """ def __init__(self, input_dim, output_dim=2, padding_idx=0, **kwargs): layer_out_dims = [input_dim, output_dim] sparse_linear = SparseLinearModule( vocab_size=input_dim, embed_size=output_dim, padding_idx=padding_idx ) overrides = {"input_batchnorm": False, "input_dropout": 0.0} kwargs = recursive_merge_dicts( kwargs, overrides, misses="insert", verbose=False ) super().__init__(layer_out_dims, head_module=sparse_linear, **kwargs)
apache-2.0
-7,200,890,425,346,443,000
41.653846
80
0.672678
false
katerina7479/kadre
view/pages/tableboxpage.py
1
3165
from PySide import QtGui from pages import Page from view.widgets.buttonvbox import ButtonVBox from view.widgets.tablebox import TableBox class TableBoxPage(Page): def __init__(self, parent, name): super(TableBoxPage, self).__init__(parent, name) def _setup(self): self.headerlabeltext = "This is my TableBoxPage" self.ctext = "Subheader 1" self.ptext = "Subheader 2" self.buttonlist = [ {"type": "button", "text": "Add", "callback": self.on_add}, {"type": "button", "text": "Edit", "callback": self.on_edit}, {"type": "button", "text": "Delete", "callback": self.on_del}] # Usually get datalist from the database self.datalist = [{"id": 1, "name": "TestName", "desc": "TestDesc", "date": "02MAR13"}] self.collist = [{"column": "name", "title": "Name"}, {"column": "desc", "title": "Description"}, {"column": "date", "title": "Date"} ] def _header(self): self.hlabel = QtGui.QLabel( "<font size=16 align='center'>%s</font>" % self.headerlabeltext) hbox = QtGui.QHBoxLayout() hbox.addStretch(1) hbox.addWidget(self.hlabel) hbox.addStretch(1) self.dashcheck = QtGui.QCheckBox() self.dashcheck.setChecked(True) self.dashcheck.stateChanged.connect(self.on_dash) hbox.addWidget(self.dashcheck) hbox.addWidget(QtGui.QLabel("My Check Box")) self.layout.addLayout(hbox) self.clabel = QtGui.QLabel(self.ctext) self.plabel = QtGui.QLabel(self.ptext) hbox2 = QtGui.QHBoxLayout() hbox2.addStretch(1) hbox2.addWidget(self.clabel) hbox2.addStretch(1) hbox2.addWidget(self.plabel) hbox2.addStretch(1) self.layout.addLayout(hbox2) self.layout.addStretch(1) def _center(self): self.layout.addWidget(QtGui.QLabel("TableBox: ")) hbox = QtGui.QHBoxLayout() vbox = ButtonVBox(self.buttonlist) hbox.addLayout(vbox) self.tablebox = TableBox(self.datalist, self.collist, self.on_edit) hbox.addWidget(self.tablebox) self.layout.addLayout(hbox) self.layout.addStretch(1) def _refreshbox(self): #self.datalist = from the database #self.tablebox.Update(self.datalist) pass def _footer(self): self.layout.addStretch(1) def refresh(self): self._setup() self._refreshbox() self.show() def on_add(self): # Do Stuff to add to database, and refresh (like make a dialog popup) self._refreshbox() def on_edit(self): myid = self.tablebox.Get() print myid # Dialog for editing self._refreshbox() def on_del(self): myid = self.tablebox.Get() print "Deleting %s" % myid # Delete from database # self.tablebox.DeleteCurrent() self._refreshbox() def on_dash(self): self.dashboard = self.dashcheck.isChecked() print self.dashboard
mit
9,157,603,631,712,562,000
29.432692
94
0.578831
false
Chyroc/WechatSogou
test/test_tools.py
1
1735
# -*- coding: utf-8 -*- import unittest from nose.tools import assert_raises, assert_equal from lxml import etree from wechatsogou.tools import list_or_empty, get_elem_text, replace_html, str_to_dict, replace_space, get_url_param class TestTools(unittest.TestCase): def test_list_or_empty(self): with assert_raises(AssertionError): list_or_empty('test for fun') assert_equal(list_or_empty(['1', '2'], int), 1) assert_equal(list_or_empty(['1', '2']), '1') assert_equal(list_or_empty([], int), 0) assert_equal(list_or_empty([], str), '') assert_equal(list_or_empty([], list), []) def test_get_elem_text(self): html = ''' <div> <div>111</div> <div>222</div> </div> ''' elem = etree.HTML(html) assert_equal(get_elem_text(elem), '111222') def test_replace_html(self): html = '''&#39;&quot;&amp;&yen;amp;&lt;&gt;&nbsp;\\''' assert_equal(replace_html(html), '\'"&¥<> ') html = ['&#39;', '&quot;', '&amp;', '&yen;', 'amp;', '&lt;', '&gt;', '&nbsp;', '\\'] assert_equal(replace_html(html), ['\'', '"', '&', '¥', '', '<', '>', ' ', '']) html = {'&#39;': '&quot;'} assert_equal(replace_html(html), {'\'': '"'}) def test_str_to_dict(self): string = "{'a':'a'}" assert_equal(str_to_dict(string), {'a': 'a'}) def test_replace_space(self): string = 'ss ss' assert_equal(replace_space(string), 'ssss') def test_get_url_param(self): url = 'http://example.com?a=1&b=2&a=3' assert_equal(get_url_param(url), {'a': ['1', '3'], 'b': ['2']}) if __name__ == '__main__': unittest.main()
apache-2.0
5,039,872,663,245,743,000
29.946429
115
0.514137
false
googleads/googleads-python-lib
examples/ad_manager/v202105/report_service/run_ad_exchange_report.py
1
2145
#!/usr/bin/env python # # Copyright 2018 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Runs a report on Ad Exchange data via the Ad Manager API.""" import tempfile # Import appropriate modules from the client library. from googleads import ad_manager from googleads import errors def main(client): # Initialize a DataDownloader. report_downloader = client.GetDataDownloader(version='v202105') # Create report job. report_job = { 'reportQuery': { 'dimensions': ['AD_EXCHANGE_DATE', 'AD_EXCHANGE_COUNTRY_NAME'], 'columns': ['AD_EXCHANGE_AD_REQUESTS', 'AD_EXCHANGE_IMPRESSIONS', 'AD_EXCHANGE_ESTIMATED_REVENUE'], 'dateRangeType': 'LAST_WEEK', 'timeZoneType': 'AD_EXCHANGE', # Run in pacific time 'adxReportCurrency': 'EUR' } } try: # Run the report and wait for it to finish. report_job_id = report_downloader.WaitForReport(report_job) except errors.AdManagerReportError as e: print('Failed to generate report. Error was: %s' % e) # Change to your preferred export format. export_format = 'CSV_DUMP' report_file = tempfile.NamedTemporaryFile(suffix='.csv.gz', delete=False) # Download report data. report_downloader.DownloadReportToFile( report_job_id, export_format, report_file) report_file.close() # Display results. print('Report job with id "%s" downloaded to:\n%s' % ( report_job_id, report_file.name)) if __name__ == '__main__': # Initialize client object. ad_manager_client = ad_manager.AdManagerClient.LoadFromStorage() main(ad_manager_client)
apache-2.0
3,438,026,921,193,039,000
31.5
75
0.697902
false
javiercantero/streamlink
tests/resources/__init__.py
1
1277
import codecs import os.path import six from io import BytesIO try: import xml.etree.cElementTree as ET except ImportError: # pragma: no cover import xml.etree.ElementTree as ET from contextlib import contextmanager __here__ = os.path.abspath(os.path.dirname(__file__)) def _parse_xml(data, strip_ns=False): if six.PY2 and isinstance(data, six.text_type): data = data.encode("utf8") elif six.PY3: data = bytearray(data, "utf8") try: it = ET.iterparse(BytesIO(data)) for _, el in it: if '}' in el.tag and strip_ns: # strip all namespaces el.tag = el.tag.split('}', 1)[1] return it.root except Exception as err: snippet = repr(data) if len(snippet) > 35: snippet = snippet[:35] + " ..." raise ValueError("Unable to parse XML: {0} ({1})".format(err, snippet)) @contextmanager def text(path, encoding="utf8"): with codecs.open(os.path.join(__here__, path), 'r', encoding=encoding) as resource_fh: yield resource_fh @contextmanager def xml(path, encoding="utf8"): with codecs.open(os.path.join(__here__, path), 'r', encoding=encoding) as resource_fh: yield _parse_xml(resource_fh.read(), strip_ns=True)
bsd-2-clause
8,025,877,989,472,806,000
27.377778
90
0.617071
false
adrianliaw/PyCuber
setup.py
1
2055
from setuptools import setup import pycuber as pc long_desc = """ PyCuber ======= PyCuber is a Rubik's Cube package in Python 2/3. -------------------------------------------------- The cube can be revealed as expanded view in the terminal, so it's easy to visualise the cube, just inside the terminal. (Not tested on Windows) .. code-block:: python >>> import pycuber as pc >>> # Create a Cube object >>> mycube = pc.Cube() >>> # Do something at the cube. >>> mycube("R U R' U'") >>> print(mycube) .. image:: http://i.imgur.com/OI4kbn7.png We also provided some useful tools to deal with Rubik's Cube formulae. .. code-block:: python >>> import pycuber as pc >>> # Create a Formula object >>> my_formula = pc.Formula("R U R' U' R' F R2 U' R' U' R U R' F'") >>> # Reversing a Formula >>> my_formula.reverse() >>> print(my_formula) >>> # Mirroring a Formula >>> myalg.mirror("LR") >>> print(my_formula) F R U' R' U R U R2 F' R U R U' R' F' L' U L U' L' U' L2 F L' U' L' U L I'll add some documentations later.""" setup( name = "pycuber", version = pc.__version__, description = "Rubik's Cube in Python", long_description = long_desc, url = "http://github.com/adrianliaw/PyCuber", license = "MIT", author = "Adrian Liaw", author_email = "[email protected]", keywords = ["Rubik's Cube", "rubik", "cube", "solver"], packages = ["pycuber", "pycuber.solver", "pycuber.solver.cfop"], package_dir = {"pycuber":"pycuber"}, classifiers = [ "Development Status :: 4 - Beta", "Environment :: Console", "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 3", "Topic :: Scientific/Engineering :: Mathematics", ], package_data = { "pycuber.solver.cfop": ["*.csv"], }, )
mit
7,480,977,375,062,680,000
25.346154
72
0.56545
false
Wyn10/Cnchi
cnchi/hardware/hardware.py
1
14779
#!/usr/bin/env python # -*- coding: utf-8 -*- # # hardware.py # # Copyright © 2013-2016 Antergos # # This file is part of Cnchi. # # Cnchi is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # Cnchi is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # The following additional terms are in effect as per Section 7 of the license: # # The preservation of all legal notices and author attributions in # the material or in the Appropriate Legal Notices displayed # by works containing it is required. # # You should have received a copy of the GNU General Public License # along with Cnchi; If not, see <http://www.gnu.org/licenses/>. """ Hardware related packages installation """ import logging import os import subprocess _HARDWARE_PATH = '/usr/share/cnchi/cnchi/hardware' class Hardware(object): """ This is an abstract class. You need to use this as base """ def __init__(self, class_name=None, class_id=None, vendor_id=None, devices=None, priority=-1, enabled=True): self.class_name = class_name self.class_id = class_id self.vendor_id = vendor_id self.devices = devices self.priority = priority self.enabled = enabled self.product_id = "" def get_packages(self): """ Returns all necessary packages to install """ raise NotImplementedError("get_packages is not implemented") @staticmethod def get_conflicts(): """ Returns a list with all conflicting packages """ return [] def post_install(self, dest_dir): """ This method runs commands that need to be run AFTER installing the driver """ pass def pre_install(self, dest_dir): """ This method runs commands that need to run BEFORE installing the driver """ pass def check_device(self, class_id, vendor_id, product_id): """ Checks if the driver supports this device """ if not self.enabled: return False if self.class_id and class_id != self.class_id: return False if self.vendor_id and vendor_id != self.vendor_id: return False if self.devices and product_id not in self.devices: return False return True def detect(self): """ Tries to guess if a device suitable for this driver is present, used in features screen """ if not self.enabled: return False # Get PCI devices try: cmd = ["lspci", "-n"] lines = subprocess.check_output(cmd, stderr=subprocess.STDOUT) lines = lines.decode().split("\n") except subprocess.CalledProcessError as err: logging.warning("Cannot detect hardware components : %s", err.output.decode()) return False for line in lines: if len(line) > 0: class_id = "0x{0}".format(line.split()[1].rstrip(":")[0:2]) if class_id == self.class_id: dev = line.split()[2].split(":") vendor_id = "0x{0}".format(dev[0]) product_id = "0x{0}".format(dev[1]) if vendor_id == self.vendor_id and product_id in self.devices: return True return False @staticmethod def is_proprietary(): """ Proprietary drivers are drivers for your hardware devices that are not freely-available or open source, and must be obtained from the hardware manufacturer. """ return False def is_graphic_driver(self): """ Tells us if this is a graphic driver or not """ if self.class_id == "0x03": return True else: return False def get_name(self): """ Returns class name """ return self.class_name def get_priority(self): """ Get module (driver) priority """ return self.priority @staticmethod def chroot(cmd, dest_dir, stdin=None, stdout=None): """ Runs command inside the chroot """ run = ['chroot', dest_dir] for element in cmd: run.append(element) try: proc = subprocess.Popen(run, stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) out = proc.communicate()[0] logging.debug(out.decode()) except OSError as err: logging.error("Error running command: %s", err.strerror) def __str__(self): return "class name: {0}, class id: {1}, vendor id: {2}, product id: {3}".format( self.class_name, self.class_id, self.vendor_id, self.product_id) def call_script(self, script_path, dest_dir): """ Helper function that will run a script """ if os.path.exists(script_path): cmd = [ "/usr/bin/bash", script_path, dest_dir, self.class_name] try: subprocess.check_output(cmd, timeout=300) logging.debug("Script '%s' completed successfully.", script_path) except subprocess.CalledProcessError as err: # Even though Post-install script call has failed we # will try to continue with the installation. logging.error( "Error running %s script, command %s failed: %s", script_path, err.cmd, err.output) except subprocess.TimeoutExpired as timeout_error: logging.error(timeout_error) class HardwareInstall(object): """ This class checks user's hardware If 'use_proprietary_graphic_drivers' is True, this module will try to install the proprietary variants of the graphic drivers available (only if the hardware is detected). For non graphical drivers, the open one is always choosen as default. """ def __init__(self, use_proprietary_graphic_drivers=False): self.use_proprietary_graphic_drivers = use_proprietary_graphic_drivers # All available objects self.all_objects = [] # All objects that support devices found # (can have more than one object for each device) self.objects_found = {} # All objects that are really used self.objects_used = [] dirs = os.listdir(_HARDWARE_PATH) # We scan the folder for py files. # This is unsafe, but we don't care if # somebody wants Cnchi to run code arbitrarily. for filename in dirs: non_valid = ["__init__.py", "hardware.py"] if filename.endswith(".py") and filename not in non_valid: filename = filename[:-len(".py")] name = "" try: if __name__ == "__main__": package = filename else: package = "hardware." + filename name = filename.capitalize() # This instruction is the same as "from package import name" class_name = getattr(__import__(package, fromlist=[name]), "CLASS_NAME") obj = getattr(__import__(package, fromlist=[class_name]), class_name)() self.all_objects.append(obj) except ImportError as err: logging.error("Error importing %s from %s : %s", name, package, err) except Exception as ex: logging.error("Unexpected error importing %s", package) template = "An exception of type {0} occured. Arguments:\n{1!r}" message = template.format(type(ex).__name__, ex.args) logging.error(message) try: # Detect devices devices = self.get_devices() except subprocess.CalledProcessError as err: txt = "Unable to scan devices, command {0} failed: {1}" txt = txt.format(err.cmd, err.output.decode()) logging.error(txt) return logging.debug( "Cnchi will test %d drivers for %d hardware devices", len(self.all_objects), len(devices)) # Find objects that support the devices we've found. self.objects_found = {} for obj in self.all_objects: for device in devices: (class_id, vendor_id, product_id) = device check = obj.check_device( class_id=class_id, vendor_id=vendor_id, product_id=product_id) if check: logging.debug( "Driver %s is needed by (%s, %s, %s)", obj.class_name, class_id, vendor_id, product_id) # print("Driver", obj.class_name, "is needed by", class_id, vendor_id, product_id) if device not in self.objects_found: self.objects_found[device] = [obj] else: self.objects_found[device].append(obj) self.objects_used = [] for device in self.objects_found: drivers_available = self.objects_found[device] objects_selected = [] if len(drivers_available) > 1: # We have more than one driver for this device! # We'll need to choose one # Check if there is a proprietary driver is_one_closed = False for driver in drivers_available: if driver.is_proprietary(): is_one_closed = True break for driver in drivers_available: if not driver.is_graphic_driver(): # For non graphical drivers, we choose the open one as default if not driver.is_proprietary(): objects_selected.append(driver) else: # It's a graphic driver # We choose the open one if the user does not want to # use proprietary (or if all the ones available are open) if not self.use_proprietary_graphic_drivers or not is_one_closed: # OK, we choose the open one if not driver.is_proprietary(): objects_selected.append(driver) else: # One of them is proprietary and user wants to use it if driver.is_proprietary(): objects_selected.append(driver) if len(objects_selected) > 1: # We still have two or more options, # let's check their priority priorities = [] for driver in objects_selected: priorities.append(driver.get_priority()) for driver in objects_selected: if driver.get_priority() == max(priorities): self.objects_used.append(driver) break else: self.objects_used.extend(objects_selected) else: # Only one option, add it (it doesn't matter if it's open or not) self.objects_used.append(drivers_available[0]) @staticmethod def get_devices(): """ Gets a list of all pci/usb devices """ devices = [] # Get PCI devices cmd = ["/usr/bin/lspci", "-n"] lines = subprocess.check_output(cmd, stderr=subprocess.STDOUT) lines = lines.decode().split("\n") for line in lines: if len(line) > 0: class_id = line.split()[1].rstrip(":")[0:2] dev = line.split()[2].split(":") devices.append(("0x" + class_id, "0x" + dev[0], "0x" + dev[1])) # Get USB devices cmd = ["/usr/bin/lsusb"] lines = subprocess.check_output(cmd, stderr=subprocess.STDOUT) lines = lines.decode().split("\n") for line in lines: if len(line) > 0: dev = line.split()[5].split(":") devices.append(("0", "0x" + dev[0], "0x" + dev[1])) return devices def get_packages(self): """ Get pacman package list for all detected devices """ packages = [] for obj in self.objects_used: packages.extend(obj.get_packages()) # Remove duplicates (not necessary but it's cleaner) packages = list(set(packages)) return packages def get_conflicts(self): """ Get all conflicting packages for all detected devices """ packages = [] for obj in self.objects_used: packages.extend(obj.get_conflicts()) # Remove duplicates (not necessary but it's cleaner) packages = list(set(packages)) return packages def get_found_driver_names(self): """ Returns a list of found driver names """ driver_names = [] for obj in self.objects_used: driver_names.append(obj.get_name()) return driver_names def pre_install(self, dest_dir): """ Run pre install commands for all detected devices """ for obj in self.objects_used: obj.pre_install(dest_dir) def post_install(self, dest_dir): """ Run post install commands for all detected devices """ for obj in self.objects_used: obj.post_install(dest_dir) def test(): """ Test module function """ def _(text): """ Helper function """ return text hardware_install = HardwareInstall(use_proprietary_graphic_drivers=False) # hardware_install = HardwareInstall(use_proprietary_graphic_drivers=True) hardware_pkgs = hardware_install.get_packages() print(hardware_install.get_found_driver_names()) if len(hardware_pkgs) > 0: txt = " ".join(hardware_pkgs) print("Hardware module added these packages :") print(txt) if __name__ == "__main__": test()
gpl-3.0
-156,210,725,189,507,170
36.507614
102
0.546353
false
jasonzio/azure-linux-extensions
VMBackup/main/Utils/HandlerUtil.py
1
22762
# # Handler library for Linux IaaS # # Copyright 2014 Microsoft Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ JSON def: HandlerEnvironment.json [{ "name": "ExampleHandlerLinux", "seqNo": "seqNo", "version": "1.0", "handlerEnvironment": { "logFolder": "<your log folder location>", "configFolder": "<your config folder location>", "statusFolder": "<your status folder location>", "heartbeatFile": "<your heartbeat file location>", } }] Example ./config/1.settings "{"runtimeSettings":[{"handlerSettings":{"protectedSettingsCertThumbprint":"1BE9A13AA1321C7C515EF109746998BAB6D86FD1","protectedSettings": "MIIByAYJKoZIhvcNAQcDoIIBuTCCAbUCAQAxggFxMIIBbQIBADBVMEExPzA9BgoJkiaJk/IsZAEZFi9XaW5kb3dzIEF6dXJlIFNlcnZpY2UgTWFuYWdlbWVudCBmb3IgR+nhc6VHQTQpCiiV2zANBgkqhkiG9w0BAQEFAASCAQCKr09QKMGhwYe+O4/a8td+vpB4eTR+BQso84cV5KCAnD6iUIMcSYTrn9aveY6v6ykRLEw8GRKfri2d6tvVDggUrBqDwIgzejGTlCstcMJItWa8Je8gHZVSDfoN80AEOTws9Fp+wNXAbSuMJNb8EnpkpvigAWU2v6pGLEFvSKC0MCjDTkjpjqciGMcbe/r85RG3Zo21HLl0xNOpjDs/qqikc/ri43Y76E/Xv1vBSHEGMFprPy/Hwo3PqZCnulcbVzNnaXN3qi/kxV897xGMPPC3IrO7Nc++AT9qRLFI0841JLcLTlnoVG1okPzK9w6ttksDQmKBSHt3mfYV+skqs+EOMDsGCSqGSIb3DQEHATAUBggqhkiG9w0DBwQITgu0Nu3iFPuAGD6/QzKdtrnCI5425fIUy7LtpXJGmpWDUA==","publicSettings":{"port":"3000"}}}]}" Example HeartBeat { "version": 1.0, "heartbeat" : { "status": "ready", "code": 0, "Message": "Sample Handler running. Waiting for a new configuration from user." } } Example Status Report: [{"version":"1.0","timestampUTC":"2014-05-29T04:20:13Z","status":{"name":"Chef Extension Handler","operation":"chef-client-run","status":"success","code":0,"formattedMessage":{"lang":"en-US","message":"Chef-client run success"}}}] """ import os import os.path import sys import imp import base64 import json import tempfile import time from os.path import join from Utils.WAAgentUtil import waagent from waagent import LoggerInit import logging import logging.handlers from common import CommonVariables import platform import subprocess import datetime import Status from MachineIdentity import MachineIdentity import ExtensionErrorCodeHelper DateTimeFormat = "%Y-%m-%dT%H:%M:%SZ" class HandlerContext: def __init__(self,name): self._name = name self._version = '0.0' return class HandlerUtility: telemetry_data = [] ExtErrorCode = ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.success def __init__(self, log, error, short_name): self._log = log self._error = error self._short_name = short_name self.patching = None self.storageDetailsObj = None self.partitioncount = 0 def _get_log_prefix(self): return '[%s-%s]' % (self._context._name, self._context._version) def _get_current_seq_no(self, config_folder): seq_no = -1 cur_seq_no = -1 freshest_time = None for subdir, dirs, files in os.walk(config_folder): for file in files: try: if(file.endswith('.settings')): cur_seq_no = int(os.path.basename(file).split('.')[0]) if(freshest_time == None): freshest_time = os.path.getmtime(join(config_folder,file)) seq_no = cur_seq_no else: current_file_m_time = os.path.getmtime(join(config_folder,file)) if(current_file_m_time > freshest_time): freshest_time = current_file_m_time seq_no = cur_seq_no except ValueError: continue return seq_no def get_last_seq(self): if(os.path.isfile('mrseq')): seq = waagent.GetFileContents('mrseq') if(seq): return int(seq) return -1 def exit_if_same_seq(self): current_seq = int(self._context._seq_no) last_seq = self.get_last_seq() if(current_seq == last_seq): self.log("the sequence number are same, so skip, current:" + str(current_seq) + "== last:" + str(last_seq)) sys.exit(0) def log(self, message): self._log(self._get_log_prefix() + message) def error(self, message): self._error(self._get_log_prefix() + message) def _parse_config(self, ctxt): config = None try: config = json.loads(ctxt) except: self.error('JSON exception decoding ' + ctxt) if config == None: self.error("JSON error processing settings file:" + ctxt) else: handlerSettings = config['runtimeSettings'][0]['handlerSettings'] if handlerSettings.has_key('protectedSettings') and \ handlerSettings.has_key("protectedSettingsCertThumbprint") and \ handlerSettings['protectedSettings'] is not None and \ handlerSettings["protectedSettingsCertThumbprint"] is not None: protectedSettings = handlerSettings['protectedSettings'] thumb = handlerSettings['protectedSettingsCertThumbprint'] cert = waagent.LibDir + '/' + thumb + '.crt' pkey = waagent.LibDir + '/' + thumb + '.prv' f = tempfile.NamedTemporaryFile(delete=False) f.close() waagent.SetFileContents(f.name,config['runtimeSettings'][0]['handlerSettings']['protectedSettings']) cleartxt = None cleartxt = waagent.RunGetOutput(self.patching.base64_path + " -d " + f.name + " | " + self.patching.openssl_path + " smime -inform DER -decrypt -recip " + cert + " -inkey " + pkey)[1] if cleartxt == None: self.error("OpenSSh decode error using thumbprint " + thumb) do_exit(1, self.operation,'error','1', self.operation + ' Failed') jctxt = '' try: jctxt = json.loads(cleartxt) except: self.error('JSON exception decoding ' + cleartxt) handlerSettings['protectedSettings'] = jctxt self.log('Config decoded correctly.') return config def do_parse_context(self, operation): self.operation = operation _context = self.try_parse_context() if not _context: self.log("maybe no new settings file found") sys.exit(0) return _context def try_parse_context(self): self._context = HandlerContext(self._short_name) handler_env = None config = None ctxt = None code = 0 # get the HandlerEnvironment.json. According to the extension handler # spec, it is always in the ./ directory self.log('cwd is ' + os.path.realpath(os.path.curdir)) handler_env_file = './HandlerEnvironment.json' if not os.path.isfile(handler_env_file): self.error("Unable to locate " + handler_env_file) return None ctxt = waagent.GetFileContents(handler_env_file) if ctxt == None : self.error("Unable to read " + handler_env_file) try: handler_env = json.loads(ctxt) except: pass if handler_env == None : self.log("JSON error processing " + handler_env_file) return None if type(handler_env) == list: handler_env = handler_env[0] self._context._name = handler_env['name'] self._context._version = str(handler_env['version']) self._context._config_dir = handler_env['handlerEnvironment']['configFolder'] self._context._log_dir = handler_env['handlerEnvironment']['logFolder'] self._context._log_file = os.path.join(handler_env['handlerEnvironment']['logFolder'],'extension.log') self._change_log_file() self._context._status_dir = handler_env['handlerEnvironment']['statusFolder'] self._context._heartbeat_file = handler_env['handlerEnvironment']['heartbeatFile'] self._context._seq_no = self._get_current_seq_no(self._context._config_dir) if self._context._seq_no < 0: self.error("Unable to locate a .settings file!") return None self._context._seq_no = str(self._context._seq_no) self.log('sequence number is ' + self._context._seq_no) self._context._status_file = os.path.join(self._context._status_dir, self._context._seq_no + '.status') self._context._settings_file = os.path.join(self._context._config_dir, self._context._seq_no + '.settings') self.log("setting file path is" + self._context._settings_file) ctxt = None ctxt = waagent.GetFileContents(self._context._settings_file) if ctxt == None : error_msg = 'Unable to read ' + self._context._settings_file + '. ' self.error(error_msg) return None else: if(self.operation is not None and self.operation.lower() == "enable"): # we should keep the current status file self.backup_settings_status_file(self._context._seq_no) self._context._config = self._parse_config(ctxt) return self._context def _change_log_file(self): self.log("Change log file to " + self._context._log_file) LoggerInit(self._context._log_file,'/dev/stdout') self._log = waagent.Log self._error = waagent.Error def save_seq(self): self.set_last_seq(self._context._seq_no) self.log("set most recent sequence number to " + self._context._seq_no) def set_last_seq(self,seq): waagent.SetFileContents('mrseq', str(seq)) def get_machine_id(self): machine_id_file = "/etc/azure/machine_identity_FD76C85E-406F-4CFA-8EB0-CF18B123358B" machine_id = "" try: if not os.path.exists(os.path.dirname(machine_id_file)): os.makedirs(os.path.dirname(machine_id_file)) if os.path.exists(machine_id_file): file_pointer = open(machine_id_file, "r") machine_id = file_pointer.readline() file_pointer.close() else: mi = MachineIdentity() machine_id = mi.stored_identity()[1:-1] file_pointer = open(machine_id_file, "w") file_pointer.write(machine_id) file_pointer.close() except: errMsg = 'Failed to retrieve the unique machine id with error: %s, stack trace: %s' % (str(e), traceback.format_exc()) self.log(errMsg, False, 'Error') self.log("Unique Machine Id : {0}".format(machine_id)) return machine_id def get_total_used_size(self): try: df = subprocess.Popen(["df" , "-k"], stdout=subprocess.PIPE) ''' Sample output of the df command Filesystem 1K-blocks Used Available Use% Mounted on udev 1756684 12 1756672 1% /dev tmpfs 352312 420 351892 1% /run /dev/sda1 30202916 2598292 26338592 9% / none 4 0 4 0% /sys/fs/cgroup none 5120 0 5120 0% /run/lock none 1761552 0 1761552 0% /run/shm none 102400 0 102400 0% /run/user none 64 0 64 0% /etc/network/interfaces.dynamic.d tmpfs 4 4 0 100% /etc/ruxitagentproc /dev/sdb1 7092664 16120 6693216 1% /mnt ''' process_wait_time = 30 while(process_wait_time >0 and df.poll() is None): time.sleep(1) process_wait_time -= 1 output = df.stdout.read() output = output.split("\n") total_used = 0 for i in range(1,len(output)-1): device, size, used, available, percent, mountpoint = output[i].split() self.log("Device name : {0} used space in KB : {1}".format(device,used)) total_used = total_used + int(used) #return in KB self.log("Total used space in Bytes : {0}".format(total_used * 1024)) return total_used * 1024,False #Converting into Bytes except: self.log("Unable to fetch total used space") return 0,True def get_storage_details(self): if(self.storageDetailsObj == None): total_size,failure_flag = self.get_total_used_size() self.storageDetailsObj = Status.StorageDetails(self.partitioncount, total_size, False, failure_flag) self.log("partition count : {0}, total used size : {1}, is storage space present : {2}, is size computation failed : {3}".format(self.storageDetailsObj.partitionCount, self.storageDetailsObj.totalUsedSizeInBytes, self.storageDetailsObj.isStoragespacePresent, self.storageDetailsObj.isSizeComputationFailed)) return self.storageDetailsObj def SetExtErrorCode(self, extErrorCode): if self.ExtErrorCode == ExtensionErrorCodeHelper.ExtensionErrorCodeEnum.success : self.ExtErrorCode = extErrorCode def do_status_json(self, operation, status, sub_status, status_code, message, telemetrydata, taskId, commandStartTimeUTCTicks, snapshot_info, vm_health_obj): tstamp = time.strftime(DateTimeFormat, time.gmtime()) formattedMessage = Status.FormattedMessage("en-US",message) stat_obj = Status.StatusObj(self._context._name, operation, status, sub_status, status_code, formattedMessage, telemetrydata, self.get_storage_details(), self.get_machine_id(), taskId, commandStartTimeUTCTicks, snapshot_info, vm_health_obj) top_stat_obj = Status.TopLevelStatus(self._context._version, tstamp, stat_obj) return top_stat_obj def get_extension_version(self): try: cur_dir = os.getcwd() cur_extension = cur_dir.split("/")[-1] extension_version = cur_extension.split("-")[-1] return extension_version except Exception as e: errMsg = 'Failed to retrieve the Extension version with error: %s, stack trace: %s' % (str(e), traceback.format_exc()) self.log(errMsg) extension_version="Unknown" return extension_version def get_wala_version(self): try: file_pointer = open('/var/log/waagent.log','r') waagent_version = '' for line in file_pointer: if 'Azure Linux Agent Version' in line: waagent_version = line.split(':')[-1] if waagent_version[:-1]=="": #for removing the trailing '\n' character waagent_version = self.get_wala_version_from_command() return waagent_version else: waagent_version = waagent_version[:-1].split("-")[-1] #getting only version number return waagent_version except Exception as e: errMsg = 'Failed to retrieve the wala version with error: %s, stack trace: %s' % (str(e), traceback.format_exc()) self.log(errMsg) waagent_version="Unknown" return waagent_version def get_wala_version_from_command(self): try: cur_dir = os.getcwd() os.chdir("..") p = subprocess.Popen(['/usr/sbin/waagent', '-version'], stdout=subprocess.PIPE) process_wait_time = 30 while(process_wait_time > 0 and p.poll() is None): time.sleep(1) process_wait_time -= 1 out = p.stdout.read() out = out.split(" ") waagent = out[0] waagent_version = waagent.split("-")[-1] #getting only version number os.chdir(cur_dir) return waagent_version except Exception as e: errMsg = 'Failed to retrieve the wala version with error: %s, stack trace: %s' % (str(e), traceback.format_exc()) self.log(errMsg) waagent_version="Unknown" return waagent_version def get_dist_info(self): try: if 'FreeBSD' in platform.system(): release = re.sub('\-.*\Z', '', str(platform.release())) return "FreeBSD",release if 'linux_distribution' in dir(platform): distinfo = list(platform.linux_distribution(full_distribution_name=0)) # remove trailing whitespace in distro name distinfo[0] = distinfo[0].strip() return distinfo[0]+"-"+distinfo[1],platform.release() else: distinfo = platform.dist() return distinfo[0]+"-"+distinfo[1],platform.release() except Exception as e: errMsg = 'Failed to retrieve the distinfo with error: %s, stack trace: %s' % (str(e), traceback.format_exc()) self.log(errMsg) return "Unkonwn","Unkonwn" def substat_new_entry(self,sub_status,code,name,status,formattedmessage): sub_status_obj = Status.SubstatusObj(code,name,status,formattedmessage) sub_status.append(sub_status_obj) return sub_status def timedelta_total_seconds(self, delta): if not hasattr(datetime.timedelta, 'total_seconds'): return delta.days * 86400 + delta.seconds else: return delta.total_seconds() @staticmethod def add_to_telemetery_data(key,value): temp_dict = {} temp_dict["Value"] = value temp_dict["Key"] = key if(temp_dict not in HandlerUtility.telemetry_data): HandlerUtility.telemetry_data.append(temp_dict) def add_telemetry_data(self): os_version,kernel_version = self.get_dist_info() HandlerUtility.add_to_telemetery_data("guestAgentVersion",self.get_wala_version()) HandlerUtility.add_to_telemetery_data("extensionVersion",self.get_extension_version()) HandlerUtility.add_to_telemetery_data("osVersion",os_version) HandlerUtility.add_to_telemetery_data("kernelVersion",kernel_version) def do_status_report(self, operation, status, status_code, message, taskId = None, commandStartTimeUTCTicks = None, snapshot_info = None): self.log("{0},{1},{2},{3}".format(operation, status, status_code, message)) sub_stat = [] stat_rept = [] self.add_telemetry_data() vm_health_obj = Status.VmHealthInfoObj((ExtensionErrorCodeHelper.ExtensionErrorCodeHelper.ExtensionErrorCodeDict[self.ExtErrorCode]).value, status_code) stat_rept = self.do_status_json(operation, status, sub_stat, status_code, message, HandlerUtility.telemetry_data, taskId, commandStartTimeUTCTicks, snapshot_info, vm_health_obj) time_delta = datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1) time_span = self.timedelta_total_seconds(time_delta) * 1000 date_place_holder = 'e2794170-c93d-4178-a8da-9bc7fd91ecc0' stat_rept.timestampUTC = date_place_holder date_string = r'\/Date(' + str((int)(time_span)) + r')\/' stat_rept = "[" + json.dumps(stat_rept, cls = Status.ComplexEncoder) + "]" stat_rept = stat_rept.replace(date_place_holder,date_string) # Add Status as sub-status for Status to be written on Status-File sub_stat = self.substat_new_entry(sub_stat,'0',stat_rept,'success',None) if self.get_public_settings()[CommonVariables.vmType] == CommonVariables.VmTypeV2 and CommonVariables.isTerminalStatus(status) : status = CommonVariables.status_success stat_rept_file = self.do_status_json(operation, status, sub_stat, status_code, message, None, taskId, commandStartTimeUTCTicks, None, None) stat_rept_file = "[" + json.dumps(stat_rept_file, cls = Status.ComplexEncoder) + "]" # rename all other status files, or the WALA would report the wrong # status file. # because the wala choose the status file with the highest sequence # number to report. if self._context._status_file: with open(self._context._status_file,'w+') as f: f.write(stat_rept_file) return stat_rept def backup_settings_status_file(self, _seq_no): self.log("current seq no is " + _seq_no) for subdir, dirs, files in os.walk(self._context._config_dir): for file in files: try: if(file.endswith('.settings') and file != (_seq_no + ".settings")): new_file_name = file.replace(".","_") os.rename(join(self._context._config_dir,file), join(self._context._config_dir,new_file_name)) except Exception as e: self.log("failed to rename the status file.") for subdir, dirs, files in os.walk(self._context._status_dir): for file in files: try: if(file.endswith('.status') and file != (_seq_no + ".status")): new_file_name = file.replace(".","_") os.rename(join(self._context._status_dir,file), join(self._context._status_dir, new_file_name)) except Exception as e: self.log("failed to rename the status file.") def do_exit(self, exit_code, operation,status,code,message): try: HandlerUtility.add_to_telemetery_data("extErrorCode",self.ExtErrorCode) self.do_status_report(operation, status,code,message) except Exception as e: self.log("Can't update status: " + str(e)) sys.exit(exit_code) def get_handler_settings(self): return self._context._config['runtimeSettings'][0]['handlerSettings'] def get_protected_settings(self): return self.get_handler_settings().get('protectedSettings') def get_public_settings(self): return self.get_handler_settings().get('publicSettings')
apache-2.0
1,905,357,636,479,435,300
44.706827
636
0.603682
false
ollitapa/MMP-MieApi
mmp_mie_api/mieServer.py
1
6744
# # Copyright 2015 VTT Technical Research Center of Finland # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import importlib import argparse import sys import os import Pyro4 import signal from time import sleep from .sshTunnel import SshTunnel from mupif import PyroUtil, JobManager as jb import logging logger = logging.getLogger() from .MMPMie import MMPMie # required firewall settings (on ubuntu): # for computer running daemon (this script) # sudo iptables -A INPUT -p tcp -d 0/0 -s 0/0 --dport 44361 -j ACCEPT # for computer running a nameserver # sudo iptables -A INPUT -p tcp -d 0/0 -s 0/0 --dport 9090 -j ACCEPT parser = argparse.ArgumentParser(description='Start MMP-Mie server. ') parser.add_argument("configFile", help='Configuration filename (py format)', type=str, default='config.json') def main(): # Parse arguments args = parser.parse_args() sys.path.append(os.getcwd()) # Load config conf = args.configFile if conf[-3:] == '.py': conf = conf[:-3] print(conf) tConf = importlib.import_module(conf) # locate nameserver ns = PyroUtil.connectNameServer(nshost=tConf.nshost, nsport=tConf.nsport, hkey=tConf.hkey) # Run a daemon for jobMamager on this machine daemon = PyroUtil.runDaemon(host=tConf.daemonHost, port=tConf.jobManPort, nathost=tConf.nathost, natport=tConf.jobManNatport) # Run job manager on a serverdaemon, ns, appAPIClass, appName, portRange, # jobManWorkDir, serverConfigPath, serverConfigFile, jobMan2CmdPath, # maxJobs=1, jobMancmdCommPort=10000 jobMan = jb.SimpleJobManager2(daemon, ns, appAPIClass=tConf.applicationClass, appName=tConf.jobManName, portRange=tConf.jobManPortsForJobs, jobManWorkDir=tConf.jobManWorkDir, serverConfigPath=tConf.serverConfigPath, serverConfigFile=conf, jobMan2CmdPath=tConf.jobMan2CmdPath, maxJobs=tConf.jobManMaxJobs, jobMancmdCommPort=tConf.jobManSocket) # set up daemon with JobManager uri = daemon.register(jobMan) # register JobManager to nameServer ns.register(tConf.jobManName, uri) logger.debug("Daemon for JobManager runs at " + str(uri)) print(80 * '-') print("Started " + tConf.jobManName) # waits for requests daemon.requestLoop() def runSingleServerInstance(): ''' Run a single instance of the Mie server. The configuration file given in args must include the following: server, serverPort, serverNathost, serverNatport, nshost, nsport, appName, hkey ''' # Parse arguments args = parser.parse_args() sys.path.append(os.getcwd()) # Load config conf = args.configFile if conf[-3:] == '.py': conf = conf[:-3] print(conf) cfg = importlib.import_module(conf) app = MMPMie('localhost') PyroUtil.runAppServer(cfg.server, cfg.serverPort, cfg.serverNathost, cfg.serverNatport, cfg.nshost, cfg.nsport, cfg.appName, cfg.hkey, app=app) def runSingleServerInstanceNoNat(): # Parse arguments args = parser.parse_args() sys.path.append(os.getcwd()) # Load config conf = args.configFile if conf[-3:] == '.py': conf = conf[:-3] print(conf) cfg = importlib.import_module(conf) app = MMPMie('localhost') # Creates deamon, register the app in it daemon = Pyro4.Daemon(host=cfg.server, port=cfg.serverPort) uri = daemon.register(app) # Get nameserver ns = Pyro4.locateNS(host=cfg.nshost, port=cfg.nsport, hmac_key=cfg.hkey) # Register app ns.register(cfg.appName, uri) print(uri) # Deamon loops at the end daemon.requestLoop() def runSingleServerInstanceSSHtunnel(): # Parse arguments args = parser.parse_args() sys.path.append(os.getcwd()) # Load config conf = args.configFile if conf[-3:] == '.py': conf = conf[:-3] print(conf) cfg = importlib.import_module(conf) # Load the App app = MMPMie('localhost') # Prepare ssh tunnels pyroTunnel = SshTunnel(localport=cfg.serverPort, remoteport=cfg.serverPort, remoteuser=cfg.hostUserName, remotehost=cfg.server, reverse=True) nsTunnel = SshTunnel(localport=cfg.nsport, remoteport=cfg.nsport, remoteuser=cfg.hostUserName, remotehost=cfg.nshost, reverse=False) try: # Open tunnels pyroTunnel.run() nsTunnel.run() sleep(1) # Creates deamon, register the app in it daemon = Pyro4.Daemon(host='localhost', port=cfg.serverPort) uri = daemon.register(app) print(uri) # Get nameserver ns = Pyro4.locateNS(host='localhost', port=cfg.nsport, hmac_key=cfg.hkey) # Register app ns.register(cfg.appName, uri) print(uri) # Shutdown handler. Remember to close ssh tunnels def signal_handler(signal, frame): print('Shutting down!') pyroTunnel.terminate() nsTunnel.terminate() sys.exit(0) signal.signal(signal.SIGINT, signal_handler) # Deamon loops at the end daemon.requestLoop() except: pyroTunnel.terminate() nsTunnel.terminate() print('terminated') raise if __name__ == '__main__': main()
apache-2.0
-9,164,860,421,836,045,000
28.709251
77
0.57755
false
ioram7/keystone-federado-pgid2013
build/sqlalchemy/test/orm/test_lazy_relations.py
1
25372
"""basic tests of lazy loaded attributes""" from test.lib.testing import assert_raises, assert_raises_message import datetime from sqlalchemy import exc as sa_exc from sqlalchemy.orm import attributes, exc as orm_exc import sqlalchemy as sa from test.lib import testing from sqlalchemy import Integer, String, ForeignKey, SmallInteger from sqlalchemy.types import TypeDecorator from test.lib.schema import Table from test.lib.schema import Column from sqlalchemy.orm import mapper, relationship, create_session from test.lib.testing import eq_ from test.lib import fixtures from test.orm import _fixtures class LazyTest(_fixtures.FixtureTest): run_inserts = 'once' run_deletes = None def test_basic(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(mapper(Address, addresses), lazy='select') }) sess = create_session() q = sess.query(User) assert [User(id=7, addresses=[Address(id=1, email_address='[email protected]')])] == q.filter(users.c.id == 7).all() def test_needs_parent(self): """test the error raised when parent object is not bound.""" users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(mapper(Address, addresses), lazy='select') }) sess = create_session() q = sess.query(User) u = q.filter(users.c.id == 7).first() sess.expunge(u) assert_raises(orm_exc.DetachedInstanceError, getattr, u, 'addresses') def test_orderby(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties = { 'addresses':relationship(mapper(Address, addresses), lazy='select', order_by=addresses.c.email_address), }) q = create_session().query(User) assert [ User(id=7, addresses=[ Address(id=1) ]), User(id=8, addresses=[ Address(id=3, email_address='[email protected]'), Address(id=4, email_address='[email protected]'), Address(id=2, email_address='[email protected]') ]), User(id=9, addresses=[ Address(id=5) ]), User(id=10, addresses=[]) ] == q.all() def test_orderby_secondary(self): """tests that a regular mapper select on a single table can order by a relationship to a second table""" Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(Address, addresses) mapper(User, users, properties = dict( addresses = relationship(Address, lazy='select'), )) q = create_session().query(User) l = q.filter(users.c.id==addresses.c.user_id).order_by(addresses.c.email_address).all() assert [ User(id=8, addresses=[ Address(id=2, email_address='[email protected]'), Address(id=3, email_address='[email protected]'), Address(id=4, email_address='[email protected]'), ]), User(id=9, addresses=[ Address(id=5) ]), User(id=7, addresses=[ Address(id=1) ]), ] == l def test_orderby_desc(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(Address, addresses) mapper(User, users, properties = dict( addresses = relationship(Address, lazy='select', order_by=[sa.desc(addresses.c.email_address)]), )) sess = create_session() assert [ User(id=7, addresses=[ Address(id=1) ]), User(id=8, addresses=[ Address(id=2, email_address='[email protected]'), Address(id=4, email_address='[email protected]'), Address(id=3, email_address='[email protected]'), ]), User(id=9, addresses=[ Address(id=5) ]), User(id=10, addresses=[]) ] == sess.query(User).all() def test_no_orphan(self): """test that a lazily loaded child object is not marked as an orphan""" users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, cascade="all,delete-orphan", lazy='select') }) mapper(Address, addresses) sess = create_session() user = sess.query(User).get(7) assert getattr(User, 'addresses').hasparent(attributes.instance_state(user.addresses[0]), optimistic=True) assert not sa.orm.class_mapper(Address)._is_orphan(attributes.instance_state(user.addresses[0])) def test_limit(self): """test limit operations combined with lazy-load relationships.""" users, items, order_items, orders, Item, User, Address, Order, addresses = (self.tables.users, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses) mapper(Item, items) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, lazy='select') }) mapper(User, users, properties={ 'addresses':relationship(mapper(Address, addresses), lazy='select'), 'orders':relationship(Order, lazy='select') }) sess = create_session() q = sess.query(User) if testing.against('maxdb', 'mssql'): l = q.limit(2).all() assert self.static.user_all_result[:2] == l else: l = q.limit(2).offset(1).all() assert self.static.user_all_result[1:3] == l def test_distinct(self): users, items, order_items, orders, Item, User, Address, Order, addresses = (self.tables.users, self.tables.items, self.tables.order_items, self.tables.orders, self.classes.Item, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses) mapper(Item, items) mapper(Order, orders, properties={ 'items':relationship(Item, secondary=order_items, lazy='select') }) mapper(User, users, properties={ 'addresses':relationship(mapper(Address, addresses), lazy='select'), 'orders':relationship(Order, lazy='select') }) sess = create_session() q = sess.query(User) # use a union all to get a lot of rows to join against u2 = users.alias('u2') s = sa.union_all(u2.select(use_labels=True), u2.select(use_labels=True), u2.select(use_labels=True)).alias('u') l = q.filter(s.c.u2_id==User.id).order_by(User.id).distinct().all() eq_(self.static.user_all_result, l) def test_uselist_false_warning(self): """test that multiple rows received by a uselist=False raises a warning.""" User, users, orders, Order = (self.classes.User, self.tables.users, self.tables.orders, self.classes.Order) mapper(User, users, properties={ 'order':relationship(Order, uselist=False) }) mapper(Order, orders) s = create_session() u1 = s.query(User).filter(User.id==7).one() assert_raises(sa.exc.SAWarning, getattr, u1, 'order') def test_one_to_many_scalar(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(User, users, properties = dict( address = relationship(mapper(Address, addresses), lazy='select', uselist=False) )) q = create_session().query(User) l = q.filter(users.c.id == 7).all() assert [User(id=7, address=Address(id=1))] == l def test_many_to_one_binds(self): Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) mapper(Address, addresses, primary_key=[addresses.c.user_id, addresses.c.email_address]) mapper(User, users, properties = dict( address = relationship(Address, uselist=False, primaryjoin=sa.and_(users.c.id==addresses.c.user_id, addresses.c.email_address=='[email protected]') ) )) q = create_session().query(User) eq_( [ User(id=7, address=None), User(id=8, address=Address(id=3)), User(id=9, address=None), User(id=10, address=None), ], list(q) ) def test_double(self): """tests lazy loading with two relationships simulatneously, from the same table, using aliases. """ users, orders, User, Address, Order, addresses = (self.tables.users, self.tables.orders, self.classes.User, self.classes.Address, self.classes.Order, self.tables.addresses) openorders = sa.alias(orders, 'openorders') closedorders = sa.alias(orders, 'closedorders') mapper(Address, addresses) mapper(Order, orders) open_mapper = mapper(Order, openorders, non_primary=True) closed_mapper = mapper(Order, closedorders, non_primary=True) mapper(User, users, properties = dict( addresses = relationship(Address, lazy = True), open_orders = relationship(open_mapper, primaryjoin = sa.and_(openorders.c.isopen == 1, users.c.id==openorders.c.user_id), lazy='select'), closed_orders = relationship(closed_mapper, primaryjoin = sa.and_(closedorders.c.isopen == 0, users.c.id==closedorders.c.user_id), lazy='select') )) q = create_session().query(User) assert [ User( id=7, addresses=[Address(id=1)], open_orders = [Order(id=3)], closed_orders = [Order(id=1), Order(id=5)] ), User( id=8, addresses=[Address(id=2), Address(id=3), Address(id=4)], open_orders = [], closed_orders = [] ), User( id=9, addresses=[Address(id=5)], open_orders = [Order(id=4)], closed_orders = [Order(id=2)] ), User(id=10) ] == q.all() sess = create_session() user = sess.query(User).get(7) assert [Order(id=1), Order(id=5)] == create_session().query(closed_mapper).with_parent(user, property='closed_orders').all() assert [Order(id=3)] == create_session().query(open_mapper).with_parent(user, property='open_orders').all() def test_many_to_many(self): keywords, items, item_keywords, Keyword, Item = (self.tables.keywords, self.tables.items, self.tables.item_keywords, self.classes.Keyword, self.classes.Item) mapper(Keyword, keywords) mapper(Item, items, properties = dict( keywords = relationship(Keyword, secondary=item_keywords, lazy='select'), )) q = create_session().query(Item) assert self.static.item_keyword_result == q.all() assert self.static.item_keyword_result[0:2] == q.join('keywords').filter(keywords.c.name == 'red').all() def test_uses_get(self): """test that a simple many-to-one lazyload optimizes to use query.get().""" Address, addresses, users, User = (self.classes.Address, self.tables.addresses, self.tables.users, self.classes.User) for pj in ( None, users.c.id==addresses.c.user_id, addresses.c.user_id==users.c.id ): mapper(Address, addresses, properties = dict( user = relationship(mapper(User, users), lazy='select', primaryjoin=pj) )) sess = create_session() # load address a1 = sess.query(Address).filter_by(email_address="[email protected]").one() # load user that is attached to the address u1 = sess.query(User).get(8) def go(): # lazy load of a1.user should get it from the session assert a1.user is u1 self.assert_sql_count(testing.db, go, 0) sa.orm.clear_mappers() def test_uses_get_compatible_types(self): """test the use_get optimization with compatible but non-identical types""" User, Address = self.classes.User, self.classes.Address class IntDecorator(TypeDecorator): impl = Integer class SmallintDecorator(TypeDecorator): impl = SmallInteger class SomeDBInteger(sa.Integer): pass for tt in [ Integer, SmallInteger, IntDecorator, SmallintDecorator, SomeDBInteger, ]: m = sa.MetaData() users = Table('users', m, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('name', String(30), nullable=False), ) addresses = Table('addresses', m, Column('id', Integer, primary_key=True, test_needs_autoincrement=True), Column('user_id', tt, ForeignKey('users.id')), Column('email_address', String(50), nullable=False), ) mapper(Address, addresses, properties = dict( user = relationship(mapper(User, users)) )) sess = create_session(bind=testing.db) # load address a1 = sess.query(Address).filter_by(email_address="[email protected]").one() # load user that is attached to the address u1 = sess.query(User).get(8) def go(): # lazy load of a1.user should get it from the session assert a1.user is u1 self.assert_sql_count(testing.db, go, 0) sa.orm.clear_mappers() def test_many_to_one(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(Address, addresses, properties = dict( user = relationship(mapper(User, users), lazy='select') )) sess = create_session() q = sess.query(Address) a = q.filter(addresses.c.id==1).one() assert a.user is not None u1 = sess.query(User).get(7) assert a.user is u1 def test_backrefs_dont_lazyload(self): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, backref='user') }) mapper(Address, addresses) sess = create_session() ad = sess.query(Address).filter_by(id=1).one() assert ad.user.id == 7 def go(): ad.user = None assert ad.user is None self.assert_sql_count(testing.db, go, 0) u1 = sess.query(User).filter_by(id=7).one() def go(): assert ad not in u1.addresses self.assert_sql_count(testing.db, go, 1) sess.expire(u1, ['addresses']) def go(): assert ad in u1.addresses self.assert_sql_count(testing.db, go, 1) sess.expire(u1, ['addresses']) ad2 = Address() def go(): ad2.user = u1 assert ad2.user is u1 self.assert_sql_count(testing.db, go, 0) def go(): assert ad2 in u1.addresses self.assert_sql_count(testing.db, go, 1) class GetterStateTest(_fixtures.FixtureTest): """test lazyloader on non-existent attribute returns expected attribute symbols, maintain expected state""" run_inserts = None def _u_ad_fixture(self, populate_user): users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users, properties={ 'addresses':relationship(Address, backref='user') }) mapper(Address, addresses) sess = create_session() a1 = Address(email_address='a1') sess.add(a1) if populate_user: a1.user = User(name='ed') sess.flush() if populate_user: sess.expire_all() return User, Address, sess, a1 def test_get_empty_passive_return_never_set(self): User, Address, sess, a1 = self._u_ad_fixture(False) eq_( Address.user.impl.get( attributes.instance_state(a1), attributes.instance_dict(a1), passive=attributes.PASSIVE_RETURN_NEVER_SET), attributes.NEVER_SET ) assert 'user_id' not in a1.__dict__ assert 'user' not in a1.__dict__ def test_history_empty_passive_return_never_set(self): User, Address, sess, a1 = self._u_ad_fixture(False) eq_( Address.user.impl.get_history( attributes.instance_state(a1), attributes.instance_dict(a1), passive=attributes.PASSIVE_RETURN_NEVER_SET), ((), (), ()) ) assert 'user_id' not in a1.__dict__ assert 'user' not in a1.__dict__ def test_get_empty_passive_no_initialize(self): User, Address, sess, a1 = self._u_ad_fixture(False) eq_( Address.user.impl.get( attributes.instance_state(a1), attributes.instance_dict(a1), passive=attributes.PASSIVE_NO_INITIALIZE), attributes.PASSIVE_NO_RESULT ) assert 'user_id' not in a1.__dict__ assert 'user' not in a1.__dict__ def test_history_empty_passive_no_initialize(self): User, Address, sess, a1 = self._u_ad_fixture(False) eq_( Address.user.impl.get_history( attributes.instance_state(a1), attributes.instance_dict(a1), passive=attributes.PASSIVE_NO_INITIALIZE), attributes.HISTORY_BLANK ) assert 'user_id' not in a1.__dict__ assert 'user' not in a1.__dict__ def test_get_populated_passive_no_initialize(self): User, Address, sess, a1 = self._u_ad_fixture(True) eq_( Address.user.impl.get( attributes.instance_state(a1), attributes.instance_dict(a1), passive=attributes.PASSIVE_NO_INITIALIZE), attributes.PASSIVE_NO_RESULT ) assert 'user_id' not in a1.__dict__ assert 'user' not in a1.__dict__ def test_history_populated_passive_no_initialize(self): User, Address, sess, a1 = self._u_ad_fixture(True) eq_( Address.user.impl.get_history( attributes.instance_state(a1), attributes.instance_dict(a1), passive=attributes.PASSIVE_NO_INITIALIZE), attributes.HISTORY_BLANK ) assert 'user_id' not in a1.__dict__ assert 'user' not in a1.__dict__ def test_get_populated_passive_return_never_set(self): User, Address, sess, a1 = self._u_ad_fixture(True) eq_( Address.user.impl.get( attributes.instance_state(a1), attributes.instance_dict(a1), passive=attributes.PASSIVE_RETURN_NEVER_SET), User(name='ed') ) def test_history_populated_passive_return_never_set(self): User, Address, sess, a1 = self._u_ad_fixture(True) eq_( Address.user.impl.get_history( attributes.instance_state(a1), attributes.instance_dict(a1), passive=attributes.PASSIVE_RETURN_NEVER_SET), ((), [User(name='ed'), ], ()) ) class M2OGetTest(_fixtures.FixtureTest): run_inserts = 'once' run_deletes = None def test_m2o_noload(self): """test that a NULL foreign key doesn't trigger a lazy load""" users, Address, addresses, User = (self.tables.users, self.classes.Address, self.tables.addresses, self.classes.User) mapper(User, users) mapper(Address, addresses, properties={ 'user':relationship(User) }) sess = create_session() ad1 = Address(email_address='somenewaddress', id=12) sess.add(ad1) sess.flush() sess.expunge_all() ad2 = sess.query(Address).get(1) ad3 = sess.query(Address).get(ad1.id) def go(): # one lazy load assert ad2.user.name == 'jack' # no lazy load assert ad3.user is None self.assert_sql_count(testing.db, go, 1) class CorrelatedTest(fixtures.MappedTest): @classmethod def define_tables(self, meta): Table('user_t', meta, Column('id', Integer, primary_key=True), Column('name', String(50))) Table('stuff', meta, Column('id', Integer, primary_key=True), Column('date', sa.Date), Column('user_id', Integer, ForeignKey('user_t.id'))) @classmethod def insert_data(cls): stuff, user_t = cls.tables.stuff, cls.tables.user_t user_t.insert().execute( {'id':1, 'name':'user1'}, {'id':2, 'name':'user2'}, {'id':3, 'name':'user3'}) stuff.insert().execute( {'id':1, 'user_id':1, 'date':datetime.date(2007, 10, 15)}, {'id':2, 'user_id':1, 'date':datetime.date(2007, 12, 15)}, {'id':3, 'user_id':1, 'date':datetime.date(2007, 11, 15)}, {'id':4, 'user_id':2, 'date':datetime.date(2008, 1, 15)}, {'id':5, 'user_id':3, 'date':datetime.date(2007, 6, 15)}) def test_correlated_lazyload(self): stuff, user_t = self.tables.stuff, self.tables.user_t class User(fixtures.ComparableEntity): pass class Stuff(fixtures.ComparableEntity): pass mapper(Stuff, stuff) stuff_view = sa.select([stuff.c.id]).where(stuff.c.user_id==user_t.c.id).correlate(user_t).order_by(sa.desc(stuff.c.date)).limit(1) mapper(User, user_t, properties={ 'stuff':relationship(Stuff, primaryjoin=sa.and_(user_t.c.id==stuff.c.user_id, stuff.c.id==(stuff_view.as_scalar()))) }) sess = create_session() eq_(sess.query(User).all(), [ User(name='user1', stuff=[Stuff(date=datetime.date(2007, 12, 15), id=2)]), User(name='user2', stuff=[Stuff(id=4, date=datetime.date(2008, 1 , 15))]), User(name='user3', stuff=[Stuff(id=5, date=datetime.date(2007, 6, 15))]) ])
apache-2.0
5,355,492,742,495,545,000
35.559078
157
0.528102
false
edcast-inc/edx-platform-edcast
lms/envs/test.py
1
16667
# -*- coding: utf-8 -*- """ This config file runs the simplest dev environment using sqlite, and db-based sessions. Assumes structure: /envroot/ /db # This is where it'll write the database file /edx-platform # The location of this repo /log # Where we're going to write log files """ # We intentionally define lots of variables that aren't used, and # want to import all variables from base settings files # pylint: disable=wildcard-import, unused-wildcard-import # Pylint gets confused by path.py instances, which report themselves as class # objects. As a result, pylint applies the wrong regex in validating names, # and throws spurious errors. Therefore, we disable invalid-name checking. # pylint: disable=invalid-name from .common import * import os from path import path from uuid import uuid4 from warnings import filterwarnings, simplefilter from openedx.core.lib.tempdir import mkdtemp_clean # Silence noisy logs to make troubleshooting easier when tests fail. import logging LOG_OVERRIDES = [ ('factory.generate', logging.ERROR), ('factory.containers', logging.ERROR), ] for log_name, log_level in LOG_OVERRIDES: logging.getLogger(log_name).setLevel(log_level) # mongo connection settings MONGO_PORT_NUM = int(os.environ.get('EDXAPP_TEST_MONGO_PORT', '27017')) MONGO_HOST = os.environ.get('EDXAPP_TEST_MONGO_HOST', 'localhost') os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = 'localhost:8000-9000' THIS_UUID = uuid4().hex[:5] # can't test start dates with this True, but on the other hand, # can test everything else :) FEATURES['DISABLE_START_DATES'] = True # Most tests don't use the discussion service, so we turn it off to speed them up. # Tests that do can enable this flag, but must use the UrlResetMixin class to force urls.py # to reload. For consistency in user-experience, keep the value of this setting in sync with # the one in cms/envs/test.py FEATURES['ENABLE_DISCUSSION_SERVICE'] = False FEATURES['ENABLE_SERVICE_STATUS'] = True FEATURES['ENABLE_HINTER_INSTRUCTOR_VIEW'] = True FEATURES['ENABLE_INSTRUCTOR_LEGACY_DASHBOARD'] = True FEATURES['ENABLE_SHOPPING_CART'] = False FEATURES['ENABLE_VERIFIED_CERTIFICATES'] = True # Enable this feature for course staff grade downloads, to enable acceptance tests FEATURES['ENABLE_S3_GRADE_DOWNLOADS'] = True FEATURES['ALLOW_COURSE_STAFF_GRADE_DOWNLOADS'] = True # Toggles embargo on for testing FEATURES['EMBARGO'] = True FEATURES['ENABLE_COMBINED_LOGIN_REGISTRATION'] = True # Need wiki for courseware views to work. TODO (vshnayder): shouldn't need it. WIKI_ENABLED = True # Enable a parental consent age limit for testing PARENTAL_CONSENT_AGE_LIMIT = 13 # Makes the tests run much faster... SOUTH_TESTS_MIGRATE = False # To disable migrations and use syncdb instead # Nose Test Runner TEST_RUNNER = 'django_nose.NoseTestSuiteRunner' _SYSTEM = 'lms' _REPORT_DIR = REPO_ROOT / 'reports' / _SYSTEM _REPORT_DIR.makedirs_p() _NOSEID_DIR = REPO_ROOT / '.testids' / _SYSTEM _NOSEID_DIR.makedirs_p() NOSE_ARGS = [ '--id-file', _NOSEID_DIR / 'noseids', '--xunit-file', _REPORT_DIR / 'nosetests.xml', ] # Local Directories TEST_ROOT = path("test_root") # Want static files in the same dir for running on jenkins. STATIC_ROOT = TEST_ROOT / "staticfiles" STATUS_MESSAGE_PATH = TEST_ROOT / "status_message.json" COURSES_ROOT = TEST_ROOT / "data" DATA_DIR = COURSES_ROOT COMMON_TEST_DATA_ROOT = COMMON_ROOT / "test" / "data" # Where the content data is checked out. This may not exist on jenkins. GITHUB_REPO_ROOT = ENV_ROOT / "data" USE_I18N = True LANGUAGE_CODE = 'en' # tests assume they will get English. XQUEUE_INTERFACE = { "url": "http://sandbox-xqueue.edx.org", "django_auth": { "username": "lms", "password": "***REMOVED***" }, "basic_auth": ('anant', 'agarwal'), } XQUEUE_WAITTIME_BETWEEN_REQUESTS = 5 # seconds # Don't rely on a real staff grading backend MOCK_STAFF_GRADING = True MOCK_PEER_GRADING = True # TODO (cpennington): We need to figure out how envs/test.py can inject things # into common.py so that we don't have to repeat this sort of thing STATICFILES_DIRS = [ COMMON_ROOT / "static", PROJECT_ROOT / "static", ] STATICFILES_DIRS += [ (course_dir, COMMON_TEST_DATA_ROOT / course_dir) for course_dir in os.listdir(COMMON_TEST_DATA_ROOT) if os.path.isdir(COMMON_TEST_DATA_ROOT / course_dir) ] # Avoid having to run collectstatic before the unit test suite # If we don't add these settings, then Django templates that can't # find pipelined assets will raise a ValueError. # http://stackoverflow.com/questions/12816941/unit-testing-with-django-pipeline STATICFILES_STORAGE = 'pipeline.storage.NonPackagingPipelineStorage' PIPELINE_ENABLED = False update_module_store_settings( MODULESTORE, module_store_options={ 'fs_root': TEST_ROOT / "data", }, xml_store_options={ 'data_dir': mkdtemp_clean(dir=TEST_ROOT), # never inadvertently load all the XML courses }, doc_store_settings={ 'host': MONGO_HOST, 'port': MONGO_PORT_NUM, 'db': 'test_xmodule', 'collection': 'test_modulestore{0}'.format(THIS_UUID), }, ) CONTENTSTORE = { 'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore', 'DOC_STORE_CONFIG': { 'host': MONGO_HOST, 'db': 'xcontent', 'port': MONGO_PORT_NUM, } } DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': TEST_ROOT / 'db' / 'edx.db' }, } CACHES = { # This is the cache used for most things. # In staging/prod envs, the sessions also live here. 'default': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'edx_loc_mem_cache', 'KEY_FUNCTION': 'util.memcache.safe_key', }, # The general cache is what you get if you use our util.cache. It's used for # things like caching the course.xml file for different A/B test groups. # We set it to be a DummyCache to force reloading of course.xml in dev. # In staging environments, we would grab VERSION from data uploaded by the # push process. 'general': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', 'KEY_PREFIX': 'general', 'VERSION': 4, 'KEY_FUNCTION': 'util.memcache.safe_key', }, 'mongo_metadata_inheritance': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': os.path.join(tempfile.gettempdir(), 'mongo_metadata_inheritance'), 'TIMEOUT': 300, 'KEY_FUNCTION': 'util.memcache.safe_key', }, 'loc_cache': { 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'edx_location_mem_cache', }, 'course_structure_cache': { 'BACKEND': 'django.core.cache.backends.dummy.DummyCache', }, } # Dummy secret key for dev SECRET_KEY = '85920908f28904ed733fe576320db18cabd7b6cd' # hide ratelimit warnings while running tests filterwarnings('ignore', message='No request passed to the backend, unable to rate-limit') # Ignore deprecation warnings (so we don't clutter Jenkins builds/production) # https://docs.python.org/2/library/warnings.html#the-warnings-filter # Change to "default" to see the first instance of each hit # or "error" to convert all into errors simplefilter('ignore') ############################# SECURITY SETTINGS ################################ # Default to advanced security in common.py, so tests can reset here to use # a simpler security model FEATURES['ENFORCE_PASSWORD_POLICY'] = False FEATURES['ENABLE_MAX_FAILED_LOGIN_ATTEMPTS'] = False FEATURES['SQUELCH_PII_IN_LOGS'] = False FEATURES['PREVENT_CONCURRENT_LOGINS'] = False FEATURES['ADVANCED_SECURITY'] = False PASSWORD_MIN_LENGTH = None PASSWORD_COMPLEXITY = {} ######### Third-party auth ########## FEATURES['ENABLE_THIRD_PARTY_AUTH'] = True AUTHENTICATION_BACKENDS = ( 'social.backends.google.GoogleOAuth2', 'social.backends.linkedin.LinkedinOAuth2', 'social.backends.facebook.FacebookOAuth2', 'third_party_auth.dummy.DummyBackend', 'third_party_auth.saml.SAMLAuthBackend', ) + AUTHENTICATION_BACKENDS ################################## OPENID ##################################### FEATURES['AUTH_USE_OPENID'] = True FEATURES['AUTH_USE_OPENID_PROVIDER'] = True ################################## SHIB ####################################### FEATURES['AUTH_USE_SHIB'] = True FEATURES['SHIB_DISABLE_TOS'] = True FEATURES['RESTRICT_ENROLL_BY_REG_METHOD'] = True OPENID_CREATE_USERS = False OPENID_UPDATE_DETAILS_FROM_SREG = True OPENID_USE_AS_ADMIN_LOGIN = False OPENID_PROVIDER_TRUSTED_ROOTS = ['*'] ############################## OAUTH2 Provider ################################ FEATURES['ENABLE_OAUTH2_PROVIDER'] = True ########################### External REST APIs ################################# FEATURES['ENABLE_MOBILE_REST_API'] = True FEATURES['ENABLE_MOBILE_SOCIAL_FACEBOOK_FEATURES'] = True FEATURES['ENABLE_VIDEO_ABSTRACTION_LAYER_API'] = True FEATURES['ENABLE_COURSE_BLOCKS_NAVIGATION_API'] = True FEATURES['ENABLE_RENDER_XBLOCK_API'] = True ###################### Payment ##############################3 # Enable fake payment processing page FEATURES['ENABLE_PAYMENT_FAKE'] = True # Configure the payment processor to use the fake processing page # Since both the fake payment page and the shoppingcart app are using # the same settings, we can generate this randomly and guarantee # that they are using the same secret. from random import choice from string import letters, digits, punctuation # pylint: disable=deprecated-module RANDOM_SHARED_SECRET = ''.join( choice(letters + digits + punctuation) for x in range(250) ) CC_PROCESSOR_NAME = 'CyberSource2' CC_PROCESSOR['CyberSource2']['SECRET_KEY'] = RANDOM_SHARED_SECRET CC_PROCESSOR['CyberSource2']['ACCESS_KEY'] = "0123456789012345678901" CC_PROCESSOR['CyberSource2']['PROFILE_ID'] = "edx" CC_PROCESSOR['CyberSource2']['PURCHASE_ENDPOINT'] = "/shoppingcart/payment_fake" FEATURES['STORE_BILLING_INFO'] = True ########################### SYSADMIN DASHBOARD ################################ FEATURES['ENABLE_SYSADMIN_DASHBOARD'] = True GIT_REPO_DIR = TEST_ROOT / "course_repos" ################################# CELERY ###################################### CELERY_ALWAYS_EAGER = True CELERY_RESULT_BACKEND = 'djcelery.backends.cache:CacheBackend' ######################### MARKETING SITE ############################### MKTG_URL_LINK_MAP = { 'ABOUT': 'about', 'CONTACT': 'contact', 'FAQ': 'help', 'COURSES': 'courses', 'ROOT': 'root', 'TOS': 'tos', 'HONOR': 'honor', 'PRIVACY': 'privacy', 'JOBS': 'jobs', 'NEWS': 'news', 'PRESS': 'press', 'BLOG': 'blog', 'DONATE': 'donate', # Verified Certificates 'WHAT_IS_VERIFIED_CERT': 'verified-certificate', } ############################ STATIC FILES ############################# DEFAULT_FILE_STORAGE = 'django.core.files.storage.FileSystemStorage' MEDIA_ROOT = TEST_ROOT / "uploads" MEDIA_URL = "/static/uploads/" STATICFILES_DIRS.append(("uploads", MEDIA_ROOT)) _NEW_STATICFILES_DIRS = [] # Strip out any static files that aren't in the repository root # so that the tests can run with only the edx-platform directory checked out for static_dir in STATICFILES_DIRS: # Handle both tuples and non-tuple directory definitions try: _, data_dir = static_dir except ValueError: data_dir = static_dir if data_dir.startswith(REPO_ROOT): _NEW_STATICFILES_DIRS.append(static_dir) STATICFILES_DIRS = _NEW_STATICFILES_DIRS FILE_UPLOAD_TEMP_DIR = TEST_ROOT / "uploads" FILE_UPLOAD_HANDLERS = ( 'django.core.files.uploadhandler.MemoryFileUploadHandler', 'django.core.files.uploadhandler.TemporaryFileUploadHandler', ) ########################### Server Ports ################################### # These ports are carefully chosen so that if the browser needs to # access them, they will be available through the SauceLabs SSH tunnel LETTUCE_SERVER_PORT = 8003 XQUEUE_PORT = 8040 YOUTUBE_PORT = 8031 LTI_PORT = 8765 VIDEO_SOURCE_PORT = 8777 ################### Make tests faster #http://slacy.com/blog/2012/04/make-your-tests-faster-in-django-1-4/ PASSWORD_HASHERS = ( # 'django.contrib.auth.hashers.PBKDF2PasswordHasher', # 'django.contrib.auth.hashers.PBKDF2SHA1PasswordHasher', # 'django.contrib.auth.hashers.BCryptPasswordHasher', 'django.contrib.auth.hashers.SHA1PasswordHasher', 'django.contrib.auth.hashers.MD5PasswordHasher', # 'django.contrib.auth.hashers.CryptPasswordHasher', ) ### This enables the Metrics tab for the Instructor dashboard ########### FEATURES['CLASS_DASHBOARD'] = True ################### Make tests quieter # OpenID spews messages like this to stderr, we don't need to see them: # Generated checkid_setup request to http://testserver/openid/provider/login/ with assocication {HMAC-SHA1}{51d49995}{s/kRmA==} import openid.oidutil openid.oidutil.log = lambda message, level=0: None PLATFORM_NAME = "edX" SITE_NAME = "edx.org" # set up some testing for microsites MICROSITE_CONFIGURATION = { "test_microsite": { "domain_prefix": "testmicrosite", "university": "test_microsite", "platform_name": "Test Microsite", "logo_image_url": "test_microsite/images/header-logo.png", "email_from_address": "[email protected]", "payment_support_email": "[email protected]", "ENABLE_MKTG_SITE": False, "SITE_NAME": "test_microsite.localhost", "course_org_filter": "TestMicrositeX", "course_about_show_social_links": False, "css_overrides_file": "test_microsite/css/test_microsite.css", "show_partners": False, "show_homepage_promo_video": False, "course_index_overlay_text": "This is a Test Microsite Overlay Text.", "course_index_overlay_logo_file": "test_microsite/images/header-logo.png", "homepage_overlay_html": "<h1>This is a Test Microsite Overlay HTML</h1>", "ALWAYS_REDIRECT_HOMEPAGE_TO_DASHBOARD_FOR_AUTHENTICATED_USER": False, "COURSE_CATALOG_VISIBILITY_PERMISSION": "see_in_catalog", "COURSE_ABOUT_VISIBILITY_PERMISSION": "see_about_page", "ENABLE_SHOPPING_CART": True, "ENABLE_PAID_COURSE_REGISTRATION": True, "SESSION_COOKIE_DOMAIN": "test_microsite.localhost", }, "default": { "university": "default_university", "domain_prefix": "www", } } MICROSITE_ROOT_DIR = COMMON_ROOT / 'test' / 'test_microsites' MICROSITE_TEST_HOSTNAME = 'testmicrosite.testserver' FEATURES['USE_MICROSITES'] = True # add extra template directory for test-only templates MAKO_TEMPLATES['main'].extend([ COMMON_ROOT / 'test' / 'templates' ]) # Setting for the testing of Software Secure Result Callback VERIFY_STUDENT["SOFTWARE_SECURE"] = { "API_ACCESS_KEY": "BBBBBBBBBBBBBBBBBBBB", "API_SECRET_KEY": "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC", } VIDEO_CDN_URL = { 'CN': 'http://api.xuetangx.com/edx/video?s3_url=' } ######### dashboard git log settings ######### MONGODB_LOG = { 'host': MONGO_HOST, 'port': MONGO_PORT_NUM, 'user': '', 'password': '', 'db': 'xlog', } # Enable EdxNotes for tests. FEATURES['ENABLE_EDXNOTES'] = True # Enable teams feature for tests. FEATURES['ENABLE_TEAMS'] = True # Add milestones to Installed apps for testing INSTALLED_APPS += ('milestones', 'openedx.core.djangoapps.call_stack_manager') # Enable courseware search for tests FEATURES['ENABLE_COURSEWARE_SEARCH'] = True # Enable dashboard search for tests FEATURES['ENABLE_DASHBOARD_SEARCH'] = True # Use MockSearchEngine as the search engine for test scenario SEARCH_ENGINE = "search.tests.mock_search_engine.MockSearchEngine" FACEBOOK_APP_SECRET = "Test" FACEBOOK_APP_ID = "Test" FACEBOOK_API_VERSION = "v2.2" ######### custom courses ######### INSTALLED_APPS += ('ccx',) FEATURES['CUSTOM_COURSES_EDX'] = True # Set dummy values for profile image settings. PROFILE_IMAGE_BACKEND = { 'class': 'storages.backends.overwrite.OverwriteStorage', 'options': { 'location': MEDIA_ROOT, 'base_url': 'http://example-storage.com/profile-images/', }, } PROFILE_IMAGE_DEFAULT_FILENAME = 'default' PROFILE_IMAGE_DEFAULT_FILE_EXTENSION = 'png' PROFILE_IMAGE_SECRET_KEY = 'secret' PROFILE_IMAGE_MAX_BYTES = 1024 * 1024 PROFILE_IMAGE_MIN_BYTES = 100 # Enable the LTI provider feature for testing FEATURES['ENABLE_LTI_PROVIDER'] = True INSTALLED_APPS += ('lti_provider',) AUTHENTICATION_BACKENDS += ('lti_provider.users.LtiBackend',)
agpl-3.0
8,945,403,839,853,512,000
32.602823
129
0.670907
false