repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
ratschlab/ASP | examples/undocumented/python_modular/kernel_combined_modular.py | 1 | 1900 | from tools.load import LoadMatrix
from numpy import double
lm=LoadMatrix()
traindat = double(lm.load_numbers('../data/fm_train_real.dat'))
testdat = double(lm.load_numbers('../data/fm_test_real.dat'))
traindna = lm.load_dna('../data/fm_train_dna.dat')
testdna = lm.load_dna('../data/fm_test_dna.dat')
parameter_list = [[traindat,testdat,traindna,testdna],[traindat,testdat,traindna,testdna]]
def kernel_combined_modular(fm_train_real=traindat,fm_test_real=testdat,fm_train_dna=traindna,fm_test_dna=testdna ):
from shogun.Kernel import CombinedKernel, GaussianKernel, FixedDegreeStringKernel, LocalAlignmentStringKernel
from shogun.Features import RealFeatures, StringCharFeatures, CombinedFeatures, DNA
kernel=CombinedKernel()
feats_train=CombinedFeatures()
feats_test=CombinedFeatures()
subkfeats_train=RealFeatures(fm_train_real)
subkfeats_test=RealFeatures(fm_test_real)
subkernel=GaussianKernel(10, 1.1)
feats_train.append_feature_obj(subkfeats_train)
feats_test.append_feature_obj(subkfeats_test)
kernel.append_kernel(subkernel)
subkfeats_train=StringCharFeatures(fm_train_dna, DNA)
subkfeats_test=StringCharFeatures(fm_test_dna, DNA)
degree=3
subkernel=FixedDegreeStringKernel(10, degree)
feats_train.append_feature_obj(subkfeats_train)
feats_test.append_feature_obj(subkfeats_test)
kernel.append_kernel(subkernel)
subkfeats_train=StringCharFeatures(fm_train_dna, DNA)
subkfeats_test=StringCharFeatures(fm_test_dna, DNA)
subkernel=LocalAlignmentStringKernel(10)
feats_train.append_feature_obj(subkfeats_train)
feats_test.append_feature_obj(subkfeats_test)
kernel.append_kernel(subkernel)
kernel.init(feats_train, feats_train)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('Combined')
kernel_combined_modular(*parameter_list[0])
| gpl-2.0 | -7,027,474,954,381,840,000 | 36.254902 | 116 | 0.787895 | false |
sistason/kinksorter | tests/test_movie.py | 1 | 2483 | #!/usr/bin/env python3
import tempfile
import unittest
from apis.kink_api import KinkAPI
from hamcrest import *
from movie import Movie
from utils import Settings, FileProperties, SceneProperties
class MovieShould(unittest.TestCase):
def setUp(self):
Movie.settings = Settings({})
self.temp_movie_file = tempfile.NamedTemporaryFile()
file_properties = FileProperties(self.temp_movie_file.name)
scene_properties = {'shootid':12345}
self.movie = Movie(file_properties, KinkAPI(), scene_properties=scene_properties)
def test_recognize_kinkid(self):
assert_that(self.movie.get_shootids_from_filename('12345'), contains(12345))
assert_that(self.movie.get_shootids_from_filename('1999-12345-2007'), contains(12345))
assert_that(self.movie.get_shootids_from_filename('2007'), equal_to([]))
assert_that(self.movie.get_shootids_from_filename('1080p'), equal_to([]))
assert_that(self.movie.get_shootids_from_filename('1080'), equal_to([]))
assert_that(self.movie.get_shootids_from_filename('(1080)'), contains(1080))
assert_that(self.movie.get_shootids_from_filename('1080 720'), equal_to([]))
assert_that(self.movie.get_shootids_from_filename('1080 (720)'), contains(720))
assert_that(self.movie.get_shootids_from_filename('12345 1234'), contains(12345, 1234))
assert_that(self.movie.get_shootids_from_filename('2016-01-12'), equal_to([]))
assert_that(self.movie.get_shootids_from_filename('123456789'), equal_to([]))
assert_that(self.movie.get_shootids_from_filename('091224'), equal_to([]))
def test_logic_empty(self):
assert_that(bool(self.movie), equal_to(False))
assert_that(self.movie.scene_properties.is_empty(), equal_to(False))
self.movie.scene_properties.set_shootid(0)
assert_that(self.movie.scene_properties.is_empty(), equal_to(True))
def test_logic_full(self):
self.movie.scene_properties.set_title("test")
self.movie.scene_properties.set_site("Test Site")
self.movie.scene_properties.set_date(13371337)
self.movie.scene_properties.set_performers(['Tester'])
assert_that(bool(self.movie), equal_to(True))
def test_name(self):
assert_that(self.movie.get_shootids_from_filename("Waterbondage - 2006-04-21 3546 - Ava.wmv"), contains(3546))
def tearDown(self):
del self.movie
if __name__ == "__main__":
unittest.main()
| gpl-3.0 | 8,507,124,457,550,545,000 | 44.145455 | 118 | 0.681434 | false |
1flow/1flow | oneflow/core/migrations/0129_auto__add_notificationpreferences.py | 1 | 79648 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'NotificationPreferences'
db.create_table(u'core_notificationpreferences', (
('preferences', self.gf('django.db.models.fields.related.OneToOneField')(related_name='notifications', unique=True, primary_key=True, to=orm['core.Preferences'])),
('received_pokes_email', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('core', ['NotificationPreferences'])
def backwards(self, orm):
# Deleting model 'NotificationPreferences'
db.delete_table(u'core_notificationpreferences')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'base.user': {
'Meta': {'object_name': 'User'},
'address_book': ('json_field.fields.JSONField', [], {'default': '[]', 'blank': 'True'}),
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'avatar_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'email_announcements': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'hash_codes': ('jsonfield.fields.JSONField', [], {'default': "{'unsubscribe': 'd92bc8cc02bc498ca8bb388177cd417f'}", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'register_data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'sent_emails': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.article': {
'Meta': {'object_name': 'Article', '_ormbases': ['core.BaseItem']},
u'baseitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseItem']", 'unique': 'True', 'primary_key': 'True'}),
'comments_feed_url': ('django.db.models.fields.URLField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'is_orphaned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pages_urls': ('json_field.fields.JSONField', [], {'default': "u'null'", 'null': 'True', 'blank': 'True'}),
'publishers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'publications'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['base.User']"}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '512'}),
'url_absolute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version_description': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'word_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'core.author': {
'Meta': {'unique_together': "(('origin_name', 'website'),)", 'object_name': 'Author'},
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Author']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identities': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'identities_rel_+'", 'null': 'True', 'to': "orm['core.Author']"}),
'is_unsure': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '7168', 'null': 'True', 'blank': 'True'}),
'origin_id': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'origin_id_str': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'origin_name': ('django.db.models.fields.CharField', [], {'max_length': '7168', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'authors'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['base.User']"}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.WebSite']", 'null': 'True', 'blank': 'True'}),
'website_data': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'})
},
'core.baseaccount': {
'Meta': {'object_name': 'BaseAccount'},
'conn_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_conn': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_usable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'options': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.baseaccount_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'accounts'", 'to': u"orm['base.User']"})
},
'core.basefeed': {
'Meta': {'object_name': 'BaseFeed'},
'closed_reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_fetch': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.BaseFeed']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'errors': ('json_field.fields.JSONField', [], {'default': '[]', 'blank': 'True'}),
'fetch_interval': ('django.db.models.fields.IntegerField', [], {'default': '43200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_good': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_internal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.BaseItem']"}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'options': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.basefeed_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'processing_chain': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'to': "orm['core.ProcessingChain']"}),
'processing_parameters': ('yamlfield.fields.YAMLField', [], {'null': 'True', 'blank': 'True'}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'to': u"orm['base.User']"})
},
'core.baseitem': {
'Meta': {'object_name': 'BaseItem'},
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'authored_items'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Author']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'date_published': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'default_rating': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.BaseItem']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'origin': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.baseitem_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'sources': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'sources_rel_+'", 'null': 'True', 'to': "orm['core.BaseItem']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'text_direction': ('django.db.models.fields.CharField', [], {'default': "u'ltr'", 'max_length': '3', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'blank': 'True'})
},
'core.chaineditem': {
'Meta': {'object_name': 'ChainedItem'},
'chain': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'chained_items'", 'to': "orm['core.ProcessingChain']"}),
'check_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'item_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'item_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'notes_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'parameters': ('yamlfield.fields.YAMLField', [], {'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'core.chaineditemparameter': {
'Meta': {'object_name': 'ChainedItemParameter'},
'check_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'instance_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ChainedItem']"}),
'notes_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'parameters': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'core.combinedfeed': {
'Meta': {'object_name': 'CombinedFeed'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"})
},
'core.combinedfeedrule': {
'Meta': {'ordering': "('position',)", 'object_name': 'CombinedFeedRule'},
'check_error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'clone_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailFeedRule']", 'null': 'True', 'blank': 'True'}),
'combinedfeed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.CombinedFeed']"}),
'feeds': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.BaseFeed']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'core.email': {
'Meta': {'object_name': 'Email', '_ormbases': ['core.BaseItem']},
'attachments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'emails'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.BaseItem']"}),
'attachments_fetched': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'baseitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseItem']", 'unique': 'True', 'primary_key': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'is_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'message_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '256', 'blank': 'True'}),
'word_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'core.folder': {
'Meta': {'unique_together': "(('name', 'user', 'parent'),)", 'object_name': 'Folder'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.Folder']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'folders'", 'to': u"orm['base.User']"})
},
'core.helpcontent': {
'Meta': {'ordering': "['ordering', 'id']", 'object_name': 'HelpContent'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_en': ('django.db.models.fields.TextField', [], {}),
'content_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'name_nt': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'core.helpwizards': {
'Meta': {'object_name': 'HelpWizards'},
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'wizards'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'show_all': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'welcome_beta_shown': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'core.historicalarticle': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalArticle'},
u'baseitem_ptr_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'comments_feed_url': ('django.db.models.fields.URLField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'blank': 'True'}),
'date_published': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'default_rating': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'blank': 'True'}),
'duplicate_of_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'is_orphaned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'origin': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'pages_urls': ('json_field.fields.JSONField', [], {'default': "u'null'", 'null': 'True', 'blank': 'True'}),
'polymorphic_ctype_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'text_direction': ('django.db.models.fields.CharField', [], {'default': "u'ltr'", 'max_length': '3', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '512', 'db_index': 'True'}),
'url_absolute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'version_description': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'word_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'core.historicalemail': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalEmail'},
'attachments_fetched': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'baseitem_ptr_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'blank': 'True'}),
'date_published': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'default_rating': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'blank': 'True'}),
'duplicate_of_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'is_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'message_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '256', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'origin': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'polymorphic_ctype_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'text_direction': ('django.db.models.fields.CharField', [], {'default': "u'ltr'", 'max_length': '3', 'db_index': 'True'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'word_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'core.historyentry': {
'Meta': {'object_name': 'HistoryEntry'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.historyentry_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"})
},
'core.homepreferences': {
'Meta': {'object_name': 'HomePreferences'},
'experimental_features': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'home'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'read_shows': ('django.db.models.fields.IntegerField', [], {'default': '2', 'blank': 'True'}),
'show_advanced_preferences': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'style': ('django.db.models.fields.CharField', [], {'default': "u'RL'", 'max_length': '2', 'blank': 'True'})
},
'core.language': {
'Meta': {'object_name': 'Language'},
'dj_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '16'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iso639_1': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'iso639_2': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'iso639_3': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.Language']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'core.mailaccount': {
'Meta': {'object_name': 'MailAccount', '_ormbases': ['core.BaseAccount']},
u'baseaccount_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseAccount']", 'unique': 'True', 'primary_key': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'password': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'use_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'core.mailfeed': {
'Meta': {'object_name': 'MailFeed', '_ormbases': ['core.BaseFeed']},
'account': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'mail_feeds'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.MailAccount']"}),
u'basefeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseFeed']", 'unique': 'True', 'primary_key': 'True'}),
'finish_action': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'match_action': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'rules_operation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'scrape_blacklist': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'scrape_whitelist': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'})
},
'core.mailfeedrule': {
'Meta': {'ordering': "('group', 'position')", 'object_name': 'MailFeedRule'},
'check_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'clone_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailFeedRule']", 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'group_operation': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'header_field': ('django.db.models.fields.IntegerField', [], {'default': '4', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mailfeed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rules'", 'to': "orm['core.MailFeed']"}),
'match_case': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'match_type': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'match_value': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'other_header': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'core.nodepermissions': {
'Meta': {'object_name': 'NodePermissions'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.SyncNode']", 'null': 'True', 'blank': 'True'}),
'permission': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'886ae65681ad4ba9b657a060abcbb3f2'", 'max_length': '32', 'blank': 'True'})
},
'core.notificationpreferences': {
'Meta': {'object_name': 'NotificationPreferences'},
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'notifications'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'received_pokes_email': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'core.originaldata': {
'Meta': {'object_name': 'OriginalData'},
'email': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'feedparser': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feedparser_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'google_reader': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'google_reader_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'item': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'original_data'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.BaseItem']"}),
'matching_rule': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'matching_rule_processed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'twitter': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'twitter_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.preferences': {
'Meta': {'object_name': 'Preferences'},
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['base.User']", 'unique': 'True', 'primary_key': 'True'})
},
'core.processingchain': {
'Meta': {'object_name': 'ProcessingChain'},
'applies_on': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'processor_chains'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.ProcessorCategory']"}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ProcessingChain']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'processor_chains'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Language']"}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.ProcessingChain']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'processor_chains'", 'null': 'True', 'to': u"orm['base.User']"})
},
'core.processingerror': {
'Meta': {'object_name': 'ProcessingError'},
'chain': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'errors'", 'null': 'True', 'to': "orm['core.ProcessingChain']"}),
'data': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'exception': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'instance_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'is_temporary': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'issue_ref': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'processor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'errors'", 'null': 'True', 'to': "orm['core.ChainedItem']"})
},
'core.processor': {
'Meta': {'object_name': 'Processor'},
'accept_code': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'processors'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.ProcessorCategory']"}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Processor']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'processors'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Language']"}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'maintainer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'maintained_processors'", 'null': 'True', 'to': u"orm['base.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'parameters': ('yamlfield.fields.YAMLField', [], {'null': 'True', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.Processor']"}),
'process_code': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'requirements': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'source_uri': ('django.db.models.fields.CharField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'processors'", 'null': 'True', 'to': u"orm['base.User']"})
},
'core.processorcategory': {
'Meta': {'object_name': 'ProcessorCategory'},
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'maintainer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'maintained_categories'", 'null': 'True', 'to': u"orm['base.User']"}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'name_nt': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.ProcessorCategory']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'source_address': ('django.db.models.fields.CharField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'processor_categories'", 'null': 'True', 'to': u"orm['base.User']"})
},
'core.read': {
'Meta': {'unique_together': "(('user', 'item'),)", 'object_name': 'Read'},
'bookmark_type': ('django.db.models.fields.CharField', [], {'default': "u'U'", 'max_length': '2'}),
'check_set_subscriptions_131004_done': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'date_analysis': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_archived': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_auto_read': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_bookmarked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'date_fact': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_fun': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_knowhow': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_knowledge': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_number': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_prospective': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_quote': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_read': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_rules': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_starred': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_analysis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_auto_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_bookmarked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_fact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_fun': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_good': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_knowhow': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_knowledge': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_number': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_prospective': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_quote': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_read': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_rules': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_starred': ('django.db.models.fields.NullBooleanField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reads'", 'to': "orm['core.BaseItem']"}),
'knowledge_type': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'rating': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'senders': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'reads_sent'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['base.User']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'all_reads'", 'to': u"orm['base.User']"})
},
'core.readpreferences': {
'Meta': {'object_name': 'ReadPreferences'},
'auto_mark_read_delay': ('django.db.models.fields.IntegerField', [], {'default': '4500', 'blank': 'True'}),
'bookmarked_marks_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bookmarked_marks_unread': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mark_auto_read_hide_delay': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'read'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'read_switches_to_fullscreen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'reading_speed': ('django.db.models.fields.IntegerField', [], {'default': '200', 'blank': 'True'}),
'show_bottom_navbar': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'starred_marks_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'starred_marks_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'starred_removes_bookmarked': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'watch_attributes_mark_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.rssatomfeed': {
'Meta': {'object_name': 'RssAtomFeed', '_ormbases': ['core.BaseFeed']},
u'basefeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseFeed']", 'unique': 'True', 'primary_key': 'True'}),
'last_etag': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '512'}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'to': "orm['core.WebSite']"})
},
'core.selectorpreferences': {
'Meta': {'object_name': 'SelectorPreferences'},
'extended_folders_depth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'folders_show_unread_count': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'lists_show_unread_count': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'selector'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'show_closed_streams': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subscriptions_in_multiple_folders': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'titles_show_unread_count': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.sharepreferences': {
'Meta': {'object_name': 'SharePreferences'},
'default_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'share'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"})
},
'core.simpletag': {
'Meta': {'unique_together': "(('name', 'language'),)", 'object_name': 'SimpleTag'},
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'origin_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'origin_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.SimpleTag']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'core.snappreferences': {
'Meta': {'object_name': 'SnapPreferences'},
'default_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'snap'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'select_paragraph': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.staffpreferences': {
'Meta': {'object_name': 'StaffPreferences'},
'no_home_redirect': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'staff'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'reading_lists_show_bad_articles': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'selector_shows_admin_links': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'super_powers_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'core.subscription': {
'Meta': {'unique_together': "(('feed', 'user'),)", 'object_name': 'Subscription'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscriptions'", 'blank': 'True', 'to': "orm['core.BaseFeed']"}),
'folders': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Folder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reads': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Read']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'all_subscriptions'", 'blank': 'True', 'to': u"orm['base.User']"})
},
'core.syncnode': {
'Meta': {'object_name': 'SyncNode'},
'broadcast': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_seen': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_local_instance': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'local_token': ('django.db.models.fields.CharField', [], {'default': "'4c81945df7bf461a85a9be23e1b77958'", 'max_length': '32', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'permission': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'remote_token': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '-1', 'blank': 'True'}),
'strategy': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'sync_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '384', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'core.tweet': {
'Meta': {'object_name': 'Tweet', '_ormbases': ['core.BaseItem']},
u'baseitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseItem']", 'unique': 'True', 'primary_key': 'True'}),
'entities': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tweets'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.BaseItem']"}),
'entities_fetched': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mentions': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'mentions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Author']"}),
'tweet_id': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'unique': 'True', 'blank': 'True'})
},
'core.twitteraccount': {
'Meta': {'object_name': 'TwitterAccount', '_ormbases': ['core.BaseAccount']},
u'baseaccount_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseAccount']", 'unique': 'True', 'primary_key': 'True'}),
'fetch_owned_lists': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'fetch_subscribed_lists': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'social_auth': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'twitter_account'", 'unique': 'True', 'to': u"orm['default.UserSocialAuth']"}),
'timeline': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'twitter_account'", 'unique': 'True', 'null': 'True', 'to': "orm['core.TwitterFeed']"})
},
'core.twitterfeed': {
'Meta': {'object_name': 'TwitterFeed', '_ormbases': ['core.BaseFeed']},
'account': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'twitter_feeds'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.TwitterAccount']"}),
'backfill_completed': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'basefeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseFeed']", 'unique': 'True', 'primary_key': 'True'}),
'finish_action': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'is_backfilled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_timeline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'match_action': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'rules_operation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'scrape_blacklist': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'scrape_whitelist': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'track_locations': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'track_terms': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'core.twitterfeedrule': {
'Meta': {'ordering': "('group', 'position')", 'object_name': 'TwitterFeedRule'},
'check_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'clone_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.TwitterFeedRule']", 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'group_operation': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'match_case': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'match_field': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'match_type': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'match_value': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'other_field': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'twitterfeed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rules'", 'to': "orm['core.TwitterFeed']"})
},
'core.usercounters': {
'Meta': {'object_name': 'UserCounters'},
'placeholder': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_counters'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['base.User']"})
},
'core.userfeeds': {
'Meta': {'object_name': 'UserFeeds'},
'blogs': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.BaseFeed']", 'null': 'True', 'blank': 'True'}),
'imported_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'imported_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"}),
'received_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'received_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"}),
'sent_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'sent_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_feeds'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['base.User']"}),
'written_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'written_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"})
},
'core.userimport': {
'Meta': {'object_name': 'UserImport', '_ormbases': ['core.HistoryEntry']},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'historyentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.HistoryEntry']", 'unique': 'True', 'primary_key': 'True'}),
'lines': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'results': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'urls': ('django.db.models.fields.TextField', [], {})
},
'core.usersubscriptions': {
'Meta': {'object_name': 'UserSubscriptions'},
'blogs': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'blogs'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Subscription']"}),
'imported_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'imported_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"}),
'received_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'received_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"}),
'sent_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'sent_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_subscriptions'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['base.User']"}),
'written_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'written_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"})
},
'core.website': {
'Meta': {'object_name': 'WebSite'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.WebSite']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'fetch_limit_nr': ('django.db.models.fields.IntegerField', [], {'default': '16', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'mail_warned': ('json_field.fields.JSONField', [], {'default': '[]', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.WebSite']"}),
'processing_chain': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'websites'", 'null': 'True', 'to': "orm['core.ProcessingChain']"}),
'processing_parameters': ('yamlfield.fields.YAMLField', [], {'null': 'True', 'blank': 'True'}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200', 'blank': 'True'})
},
u'default.usersocialauth': {
'Meta': {'unique_together': "(('provider', 'uid'),)", 'object_name': 'UserSocialAuth', 'db_table': "'social_auth_usersocialauth'"},
'extra_data': ('social.apps.django_app.default.fields.JSONField', [], {'default': "'{}'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'social_auth'", 'to': u"orm['base.User']"})
}
}
complete_apps = ['core'] | agpl-3.0 | 161,011,278,336,962,400 | 99.949303 | 226 | 0.556649 | false |
plockaby/clone-push | pushlib/tasks.py | 1 | 8924 | from invoke import Task, run
from .tools import copy, warn, abort, confirm
from . import colors
from . import env
import os
# these are the classes that we will let modules override
__all__ = [
"CleanTask",
"MostlyCleanTask",
"BuildTask",
"TestTask",
"ArchiveTask",
"CloneTask",
"LiveTask",
"DeployTask",
]
# this makes it so that the "run" method on the class will be called when the
# task is run. do not change this implementation without knowing what it will
# do to the functionality of this program.
class TaskWrapper(Task):
def __init__(self, *args, **kwargs):
def run(c):
return self.run(c)
# the task's documentation will come from the method in our child class
run.__doc__ = self.run.__doc__
super().__init__(run, *args, **kwargs)
def run(self, c):
raise NotImplementedError("{}: property must be implemented in subclass".format(__name__))
def before(self, c):
pass
def after(self, c):
pass
class CleanTask(TaskWrapper):
name = "clean"
def run(self, c):
"""remove all build artifacts"""
c.run("rm -rf {}".format(env.containment_dir), hide=True)
print(colors.green("Finished cleaning project."))
class MostlyCleanTask(TaskWrapper):
name = "mostlyclean"
def run(self, c):
"""remove most build artifacts"""
c.run("rm -rf {}".format(env.build_dir), hide=True)
c.run("rm -rf {}".format(env.archive_dir), hide=True)
c.run("rm -rf {}".format(env.release_dir), hide=True)
c.run("rm -rf {}".format(env.test_dir), hide=True)
print(colors.green("Finished mostly cleaning project."))
class BuildTask(TaskWrapper):
name = "build"
def run(self, c):
"""build the project"""
# create release directories, build directory gets created by rsync
os.makedirs(env.release_dir, exist_ok=True)
# call before hooks
self.before(c)
# copy the root directory into the .push/build directory. need to
# append the trailing slash to make rsync copy the contents of the
# current directory rather than the current directory itself.
copy("{}/".format(env.current_dir), env.build_dir)
# call after hooks
self.after(c)
print(colors.green("Finished building project."))
class TestTask(TaskWrapper):
name = "test"
def run(self, c):
"""run project tests"""
# run perl tests
if (str(env.get("skip_tests", os.environ.get("SKIP_TESTS", False))) not in ["True", "1"]):
# create release directories, build directory gets created by rsync
os.makedirs(env.test_dir, exist_ok=True)
# call before hooks
self.before(c)
# call after hooks
self.after(c)
# only print success if we actually ran the tests
print(colors.green("Finished testing project."))
else:
print(colors.yellow("Not running tests because 'skip_tests' is set."))
class ArchiveTask(TaskWrapper):
name = "archive"
def run(self, c):
"""create deployment archive"""
# create the archive directory
os.makedirs(env.archive_dir, exist_ok=True)
# call before hooks
self.before(c)
# can't do anything if there is no release directory
if (not os.path.isdir(env.release_dir)):
abort("No release directory found. Cannot create archive.")
# clean up empty directories but make sure the release directory exists
# sometimes we have projects that don't actually have any files
c.run("find {} -type d -empty -delete".format(env.release_dir))
os.makedirs(env.release_dir, exist_ok=True)
# create the archive
c.run("tar -czf {}/{} -C {} -p .".format(env.archive_dir, env.archive_name, env.release_dir))
# call after hooks
self.after(c)
print(colors.green("Finished creating archive."))
class CloneTask(TaskWrapper):
name = "clone"
def run(self, c):
"""deploy the project to clone"""
# don't even bother if there is no tag
if (str(env.get("no_tag", os.environ.get("NO_TAG", False))) not in ["True", "1"]):
if (env.repo_is_dirty and not confirm("Repository is dirty and therefore not properly tagged. Deploy anyway?")):
abort("Aborting at user request.")
if (len(env.repo_tag_names) == 0 and not confirm("This revision is not tagged. Deploy anyway?")):
warn("This revision is not tagged.")
abort("Aborting at user request.")
else:
print(colors.yellow("Not checking to see if the project is tagged because 'no_tag' is set."))
# call before hooks
self.before(c)
# actually send it to clone
env.deploy(
archive="{}/{}".format(env.archive_dir, env.archive_name),
remote_user=env.host_user,
remote_host=env.clone_host,
remote_path="{}/{}{}".format(env.clone_base_dir, env.clone_path, env.host_path),
)
# call after hooks
self.after(c)
print(colors.green("Finished sending project to clone."))
# this task isn't like the others and requires a positional argument
class LiveTask(Task):
name = "live"
positional = ["name"]
def __init__(self, *args, **kwargs):
def run(c, name):
"""deploy the project using "live nickname" to deploy to a particular host"""
return self.run(c, name)
super().__init__(run, *args, **kwargs)
def before(self, c, hosts):
pass
def after(self, c, hosts):
pass
def run(self, c, name):
# this has all of the host information in it
from .hosts import hosts as _hosts
# these are the hosts that we might deploy to
hosts = []
# is the name a tag name? if it is then get all of the hosts
# that the tag maps to and add them to the list
if (name in _hosts["tags"]):
hosts += _hosts["tags"].get(name, [])
# is the name a host name?
if (name in _hosts["servers"]):
hosts.append(name)
# if the given name wasn't found then maybe there's a reason for that
if (len(hosts) == 0):
if (confirm("No server or tag named \"{}\" found in host list. Should we deploy directly to \"{}\"?".format(name, name))):
hosts.append(name)
else:
warn("Ignoring \"{}\" because it is not a valid server or tag name.".format(name))
# call before hooks
self.before(c, hosts)
# don't do it in parallel, sometimes the plugin modules have prompts.
for host in sorted(hosts):
env.deploy(
archive="{}/{}".format(env.archive_dir, env.archive_name),
remote_user=env.host_user,
remote_host=host,
remote_path=env.host_path,
)
# call after hooks
self.after(c, hosts)
print(colors.green("Finished deploying project."))
# not a real task
class DeployTask(object):
def __init__(self, archive, remote_user, remote_host, remote_path):
# make sure the thing we are deploying exists
if (not os.path.isfile("{}/{}".format(env.archive_dir, env.archive_name))):
abort("No archive file found. Cannot distribute project.")
# keep track of these for hooks
self.archive = archive
self.remote_user = remote_user
self.remote_host = remote_host
self.remote_path = remote_path
# call before hook
self.before()
# NOW we tell people about it. this makes the output print in the correct order
print(colors.cyan("Deploying {} to {}:{} as {}.".format(archive, remote_host, remote_path, remote_user)))
# unpack the tar file over the ssh link. we are assuming that the path
# to tar on the remote host is the same as it is on the local host.
run("cat {} | ssh -o ConnectTimeout=10 {} sudo -u {} \"tar zxf - -C {} -p --no-same-owner --overwrite-dir\"".format(archive, remote_host, remote_user, remote_path))
# call after hook
self.after()
def clean(self, path):
remote_path = os.path.join(self.remote_path, path)
print(colors.cyan("Removing {}:{} as {}.".format(self.remote_host, remote_path, self.remote_user)))
# log in to the remote host and remove the path. we are assuming
# that the path to "rm" on the remote host is the same as it is on
# the local host.
run("ssh -o ConnectTimeout=30 {} sudo -u {} \"rm -rf {}\"".format(self.remote_host, self.remote_user, remote_path))
def before(self, **kwargs):
pass
def after(self, **kwargs):
pass
| artistic-2.0 | 5,845,773,534,220,446,000 | 31.808824 | 172 | 0.595249 | false |
jiweix/open-everything | tests/test_server.py | 1 | 22033 | import unittest
from datetime import datetime, timedelta
import app
from app import models, server
from app.models import db, User, Reservation, Resource, Tag
from flask import url_for
class TestModels(unittest.TestCase):
def setUp(self):
self.app = app.get_app("TEST")
self.app.config.update(SERVER_NAME='localhost')
self.app_context = self.app.app_context()
self.app_context.push()
self.setup_dummy_data()
self.client = self.app.test_client(use_cookies=True)
self.user_data = { 'email': "[email protected]",
'password': "hard_to_guess_pw"}
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
# ---------------------- User tests ----------------------------------------
def test_can_access_login_page(self):
response = self.client.get('/login')
self.assertEqual(response.status_code, 200)
self.assertTrue("Please Login" in response.data)
def test_can_access_register_page(self):
response = self.client.get('/register')
self.assertEqual(response.status_code, 200)
self.assertTrue("Please Register" in response.data)
def test_user_can_login(self):
response = self.client.post('/login',
data=self.user_data)
self.assertEqual(response.status_code, 302)
self.assertEqual(response.location, url_for('list'))
def test_user_can_logout(self):
self.client.post('/login',
data=self.user_data)
response = self.client.get('/logout')
self.assertEqual(response.status_code, 302)
self.assertEqual(response.location, url_for('login'))
response = self.client.get('/home')
self.assertEqual(response.status_code, 200)
self.assertTrue("You need to log in" in response.data)
def test_invalid_user_can_not_login(self):
response = self.client.post('/login',
data={ 'email': "[email protected]",
'password': "wrong_password"})
self.assertEqual(response.status_code, 200)
self.assertTrue("User name or password invalid, Please try again" in response.data)
def test_register_and_login_a_user(self):
response = self.client.post('/register',
data={ 'email': "[email protected]",
'password': "hard_to_guess_pw"})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.location, url_for('login'))
response = self.client.post('/login',
data={ 'email': "[email protected]",
'password': "hard_to_guess_pw"})
self.assertEqual(response.status_code, 302)
self.assertEqual(response.location, url_for('list'))
def test_register_with_duplicate_email(self):
response = self.client.post('/register',
data=self.user_data)
self.assertEqual(response.status_code, 200)
self.assertTrue("Email already taken" in response.data)
def test_retrieve_user_by_user_id(self):
self.client.post('/login',
data=self.user_data)
response = self.client.get('/users/'+str(self.test_user_id))
self.assertEqual(response.status_code, 200)
self.assertTrue("test_res" in response.data)
def test_retrieve_user_by_invalid_user_id(self):
self.client.post('/login',
data=self.user_data)
response = self.client.get('/users/'+str(99999))
self.assertEqual(response.status_code, 404)
# ----------------------End User tests -------------------------------------
# ----------------------Home page tests ------------------------------------
def test_login_user_can_access_home_page(self):
self.client.post('/login',
data=self.user_data)
response = self.client.get('/home')
self.assertEqual(response.status_code, 200)
self.assertTrue("test_res" in response.data)
def test_anonymous_user_can_not_access_home_page(self):
response = self.client.get('/home')
self.assertEqual(response.status_code, 200)
self.assertTrue("You need to log in" in response.data)
# ----------------------End Home page tests --------------------------------
# ----------------------Index page tests -----------------------------------
def test_login_user_access_index_page(self):
self.client.post('/login',
data=self.user_data)
response = self.client.get('/')
self.assertEqual(response.status_code, 302)
self.assertEqual(response.location, url_for('list'))
def test_anonymous_user_access_index_page(self):
response = self.client.get('/home')
self.assertEqual(response.status_code, 200)
# ----------------------End Index page tests -------------------------------
# ----------------------Resource tests -------------------------------------
def test_access_add_resource_page(self):
self.client.post('/login',
data=self.user_data)
response = self.client.get('/resources/add')
self.assertEqual(response.status_code, 200)
self.assertTrue("Add resource" in response.data)
def test_add_new_resource(self):
self.client.post('/login',
data=self.user_data)
response = self.client.post('/resources/add',
data={ 'name': 'resource_2',
'owner_id': self.test_user_id,
'available_start': '01:00',
'available_end': '23:00',
'tag': 'test a b'},
follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue("resource_2" in response.data)
def test_add_new_resource_end_before_start(self):
self.client.post('/login',
data=self.user_data)
response = self.client.post('/resources/add',
data={ 'name': 'resource_2',
'owner_id': self.test_user_id,
'available_start': '10:00',
'available_end': '9:00',
'tag': 'test a b'})
self.assertEqual(response.status_code, 200)
self.assertTrue("Add resource" in response.data)
self.assertTrue("resource_2" not in response.data)
def test_add_new_resource_without_name(self):
self.client.post('/login',
data=self.user_data)
response = self.client.post('/resources/add',
data={ 'name': '',
'owner_id': self.test_user_id,
'available_start': '10:00',
'available_end': '19:00',
'tag': 'test a b'})
self.assertEqual(response.status_code, 200)
self.assertTrue("Add resource" in response.data)
def test_add_invalid_resource(self):
self.client.post('/login',
data=self.user_data)
response = self.client.post('/resources/add',
data={ 'name': 'resource_2',
'owner_id': self.test_user_id,
'available_start': 'wrong_time',
'available_end': '23:00',
'tag': ''})
self.assertEqual(response.status_code, 200)
self.assertTrue("Input Invalid" in response.data)
def test_retrieve_resource_by_id(self):
self.client.post('/login',
data=self.user_data)
response = self.client.get('/resources/'+str(self.test_resource_id))
self.assertEqual(response.status_code, 200)
self.assertTrue("test_res" in response.data)
def test_access_edit_resource_page(self):
self.client.post('/login',
data=self.user_data)
response = self.client.get('/resources/'+str(self.test_resource_id)+'/edit')
self.assertEqual(response.status_code, 200)
self.assertTrue("Edit resource" in response.data)
def test_edit_resource(self):
self.client.post('/login',
data=self.user_data)
response = self.client.post('/resources/'+str(self.test_resource_id)+'/edit',
data={ 'name': 'resource_2',
'available_start': '01:00',
'available_end': '23:00',
'tag': 'test_tag' },
follow_redirects=True)
self.assertEqual(response.status_code, 200)
self.assertTrue("test_tag" in response.data)
def test_edit_resource_invalid_user(self):
self.client.post('/register',
data={ 'email': "[email protected]",
'password': "hard_to_guess_pw"})
self.client.post('/login',
data={ 'email': "[email protected]",
'password': "hard_to_guess_pw"})
response = self.client.post('/resources/'+str(self.test_resource_id)+'/edit',
data={ 'name': 'resource_2',
'available_start': '01:00',
'available_end': '23:00',
'tag': 'test_tag' })
self.assertEqual(response.status_code, 302)
self.assertEqual(response.location, url_for('list'))
def test_delete_resource(self):
self.client.post('/login',
data=self.user_data)
response = self.client.get('/resources/'+str(self.test_resource_id)+'/delete')
self.assertEqual(response.status_code, 302)
self.assertEqual(response.location, url_for('list'))
response = self.client.get('/resources/'+str(self.test_resource_id))
self.assertEqual(response.status_code, 404)
# ----------------------End Resource tests ---------------------------------
# ----------------------Reservation tests ----------------------------------
def test_retrieve_reservation_by_id(self):
self.client.post('/login',
data=self.user_data)
response = self.client.get('/reservations/'+str(self.test_reservation_id))
self.assertEqual(response.status_code, 200)
self.assertTrue("test_res" in response.data)
def test_retrieve_reservation_by_invalid_id(self):
self.client.post('/login',
data=self.user_data)
response = self.client.get('/reservations/'+str(9999999))
self.assertEqual(response.status_code, 404)
def test_access_add_reservation_page(self):
self.client.post('/login',
data=self.user_data)
response = self.client.get('/resources/'+str(self.test_resource_id)+'/add_reservation')
self.assertEqual(response.status_code, 200)
self.assertTrue("Add Reservation" in response.data)
def test_add_new_reservation(self):
self.client.post('/login',
data=self.user_data)
response = self.client.post('/resources/'+str(self.test_resource_id)+'/add_reservation',
data={ 'date': (datetime.now()+timedelta(days=1)).strftime('%Y-%m-%d'),
'start': '10:00',
'duration': '01:00'})
self.assertEqual(response.status_code, 302)
def test_add_new_reservation_invalid_time(self):
self.client.post('/login',
data=self.user_data)
response = self.client.post('/resources/'+str(self.test_resource_id)+'/add_reservation',
data={ 'date': (datetime.now()+timedelta(days=1)).strftime('%Y-%m-%d'),
'start': 'not_time',
'duration': '01:00'})
self.assertEqual(response.status_code, 200)
self.assertTrue("Time Input Invalid" in response.data)
def test_add_new_reservation_out_of_range_time(self):
self.client.post('/login',
data=self.user_data)
response = self.client.post('/resources/'+str(self.test_resource_id)+'/add_reservation',
data={ 'date': (datetime.now()+timedelta(days=1)).strftime('%Y-%m-%d'),
'start': '0:00',
'duration': '01:00'})
self.assertEqual(response.status_code, 200)
self.assertTrue("Start time is before the resource available start" in response.data)
def test_add_new_reservation_out_of_range_time_2(self):
self.client.post('/login',
data=self.user_data)
response = self.client.post('/resources/'+str(self.test_resource_id)+'/add_reservation',
data={ 'date': (datetime.now()+timedelta(days=1)).strftime('%Y-%m-%d'),
'start': '18:00',
'duration': '01:00'})
self.assertEqual(response.status_code, 200)
self.assertTrue("End time is after the resource available end" in response.data)
def test_add_new_reservation_past_time(self):
self.client.post('/login',
data=self.user_data)
response = self.client.post('/resources/'+str(self.test_resource_id)+'/add_reservation',
data={ 'date': (datetime.now()-timedelta(days=1)).strftime('%Y-%m-%d'),
'start': '10:00',
'duration': '01:00'})
self.assertEqual(response.status_code, 200)
self.assertTrue("Add Reservation" in response.data)
def test_get_reservations_with_resource_id(self):
self.client.post('/login',
data=self.user_data)
response = self.client.get('/resources/'+str(self.test_resource_id)+'/get_reservations')
self.assertEqual(response.status_code, 200)
self.assertTrue(("Reservation Id: " +str(self.test_reservation_id)) in response.data)
def test_get_reservations_with_invalid_resource_id(self):
self.client.post('/login',
data=self.user_data)
response = self.client.get('/resources/'+str(99999)+'/get_reservations')
self.assertEqual(response.status_code, 404)
def test_delete_reservation(self):
self.client.post('/login',
data=self.user_data)
response = self.client.get('/reservations/'+str(self.test_reservation_id)+'/delete')
self.assertEqual(response.status_code, 302)
self.assertEqual(response.location, url_for('list'))
response = self.client.get('/reservations/'+str(self.test_reservation_id))
self.assertEqual(response.status_code, 404)
# ----------------------End Reservation tests ------------------------------
# ----------------------Tag tests ------------------------------------------
def test_retrieve_tag_by_id(self):
self.client.post('/login',
data=self.user_data)
response = self.client.get('/tags/'+str(self.tag_id_1))
self.assertEqual(response.status_code, 200)
self.assertTrue("test_res" in response.data)
def test_retrieve_tag_by_invalid_id(self):
self.client.post('/login',
data=self.user_data)
response = self.client.get('/tags/'+str(99999))
self.assertEqual(response.status_code, 404)
# ----------------------End Tag tests --------------------------------------
# ----------------------RSS tests ------------------------------------------
def test_get_RSS_by_resource_id(self):
self.client.post('/login',
data=self.user_data)
response = self.client.get('/resources/'+str(self.test_resource_id)+'/rss')
self.assertEqual(response.status_code, 200)
self.assertTrue(("All reservations for test_res") in response.data)
def test_get_RSS_by_invalid_resource_id(self):
self.client.post('/login',
data=self.user_data)
response = self.client.get('/resources/'+str(99999)+'/rss')
self.assertEqual(response.status_code, 404)
# ----------------------End RSS tests --------------------------------------
# ----------------------Search tests ---------------------------------------
def test_get_search_page(self):
self.client.post('/login',
data=self.user_data)
response = self.client.get('/search')
self.assertEqual(response.status_code, 200)
self.assertTrue(("Search Resource") in response.data)
def test_search_resource_should_have_result(self):
self.client.post('/login',
data=self.user_data)
response = self.client.post('/search',
data={ 'date': (datetime.now()+timedelta(days=2)).strftime('%Y-%m-%d'),
'start': '6:00',
'duration': '01:00'})
self.assertEqual(response.status_code, 200)
self.assertTrue(("test_res") in response.data)
def test_search_resource_should_not_have_result(self):
self.client.post('/login',
data=self.user_data)
response = self.client.post('/search',
data={ 'date': (datetime.now()+timedelta(days=2)).strftime('%Y-%m-%d'),
'start': '4:30',
'duration': '0:30'})
self.assertEqual(response.status_code, 200)
self.assertTrue(("test_res") not in response.data)
def test_search_resource_invalid_input(self):
self.client.post('/login',
data=self.user_data)
response = self.client.post('/search',
data={ 'date': (datetime.now()+timedelta(days=2)).strftime('%Y-%m-%d'),
'start': 'not_time',
'duration': '01:00'})
self.assertEqual(response.status_code, 200)
self.assertTrue(("Time Input Invalid") in response.data)
def test_search_resource_user_has_reservation_during_that_time(self):
self.client.post('/login',
data=self.user_data)
response = self.client.post('/search',
data={ 'date': datetime.now().strftime('%Y-%m-%d'),
'start': (datetime.now()+timedelta(minutes=30)).strftime('%H:%M'),
'duration': '01:00'})
self.assertEqual(response.status_code, 200)
self.assertTrue(("You already have a reservation during that time") in response.data)
# ----------------------End Search tests -----------------------------------
# ---------------------- SET UP --------------------------------------------
def setup_dummy_data(self):
self.test_user_id = self.add_one_user()
self.test_resource = self.add_one_resource()
self.test_resource_id, self.test_resource_name = self.test_resource.id, self.test_resource.name
self.test_reservation_id = self.add_one_reservation(
self.test_resource_id,
self.test_resource_name,
self.test_user_id)
self.tag = Tag("tag_1")
self.tag_2 = Tag("tag_2")
self.test_resource.tags.append(self.tag)
self.test_resource.tags.append(self.tag_2)
db.session.add(self.tag)
db.session.add(self.tag_2)
db.session.add(self.test_resource)
db.session.commit()
self.tag_id_1 = self.tag.id
def add_one_user(self):
user = User("[email protected]", "hard_to_guess_pw")
db.session.add(user)
db.session.commit()
return user.id
# must call add_one_user before using this method, should not called twice in one test
def add_one_resource(self):
user = User.query.filter_by(email="[email protected]").first()
resource = Resource()
resource.deserialize({
'name' : "test_res",
'owner_id' : user.id,
'available_start': "5:00",
'available_end' : "17:00"
})
db.session.add(resource)
db.session.commit()
return resource
def add_one_reservation(self, resource_id, resource_name, user_id):
reservation = Reservation()
reservation.deserialize({
'resource_id' : resource_id,
'resource_name' : resource_name,
'user_id' : user_id,
'start_time' : datetime.now(),
'end_time': datetime.now() + timedelta(minutes=90),
'duration': '01:30'
})
db.session.add(reservation)
db.session.commit()
return reservation.id
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -2,906,952,499,882,171,000 | 47.637969 | 109 | 0.504652 | false |
akelge/utils | mailIdManager/account.py | 1 | 2481 | #
# account.py
# mailVirtual
#
# Created by Andrea Mistrali on 25/09/09.
# Copyright [email protected] 2009. All rights reserved.
#
# $Id$
from Foundation import *
class Accounts(object):
pl=None
binary=False
modified=False
filename=''
def __new__(cls, filename):
try:
cls.pl=NSMutableDictionary.dictionaryWithContentsOfFile_(filename)
except IOError:
return None
cls.filename=filename
return object.__new__(cls)
def __init__(self, filename="com.apple.mail.plist"):
self.accountList=[]
if self.pl:
accountList=[a for a in self.pl['MailAccounts'] if (a['AccountType'] in ['IMAPAccount', 'POPAccount'])]
for account in accountList:
self.accountList.append(Account(account, self))
def save(self, filename=None):
if not filename:
filename=self.filename
if self.pl:
self.pl.writeToFile_atomically_(filename, False)
class Account(object):
def __init__(self, accountDict, parent):
self.account = accountDict
self.name = self.account['AccountName']
self.parent = parent
self.mainAddress = "%s <%s>" % (self.account['FullUserName'], self.account['EmailAddresses'][0])
# Setup Aliases
if not self.account.has_key('EmailAliases'):
self.account['EmailAliases']=[]
self.aliases=self.account['EmailAliases']
def __repr__(self):
return r"<Account '%s'>" % (self.name)
def addAlias(self, name, alias, index=None):
newAlias={'name': name, 'alias': alias}
if index != None:
self.aliases.insert(index, newAlias)
else:
self.aliases.append(newAlias)
self.parent.modified=True
def delAlias(self, index):
if index in range(0,len(self.aliases)):
self.aliases.pop(index)
self.parent.modified=True
def modAlias(self, index, name, alias):
if index in range(0,len(self.aliases)):
self.delAlias(index)
self.addAlias(name, alias, index)
self.parent.modified=True
def moveAliasUpDown(self, index, step):
"""
Move an alias of step positions in list, watching for overflow
"""
if (index-step) in range(0,len(self.aliases)):
item=self.aliases.pop(index)
self.aliases.insert((index-step), item)
self.parent.modified=True
| gpl-2.0 | -7,802,215,055,761,493,000 | 30.0125 | 115 | 0.597743 | false |
spacedogXYZ/sms_checkin | events/models.py | 1 | 3806 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.db.models import Avg, Count
from django.db import models
from django.utils import timezone
from timezone_field import TimeZoneField
from phonenumber_field.modelfields import PhoneNumberField
from reminders.models import Prompt
class Event(models.Model):
name = models.CharField(max_length=150)
location = models.CharField(max_length=150, null=True)
host_name = models.CharField(max_length=150, null=True)
time_zone = TimeZoneField(default='US/Pacific')
starts_at = models.DateTimeField(verbose_name="Starts at (local)")
ends_at = models.DateTimeField(verbose_name="Ends at (local)")
created = models.DateTimeField(auto_now_add=True)
prompt_before = models.ForeignKey(Prompt, related_name='+', null=True)
prompt_after = models.ForeignKey(Prompt, related_name='+', null=True)
def __str__(self):
return 'Event #{0} - {1}'.format(self.pk, self.name)
def get_absolute_url(self):
return reverse('view_event', args=[str(self.id)])
@property
def participants(self):
return [a.participant for a in self.attendance_set.select_related('participant')]
@property
def confirmed(self):
return self.attendance_set.filter(confirmed=True)
@property
def ratings(self):
return self.attendance_set.filter(rating__isnull=False).annotate(Count('id')).aggregate(Avg('rating'))
def get_starts_at(self):
"""Returns event.starts_at in specified event.time_zone"""
# NOTE: don't just force timezone into datetime
# DST will mess it up, http://bugs.python.org/issue22994
# use time_zone.localize and normalize instead
# clear existing tzinfo (which was UTC from server), making a naive datetime
starts_at_naive = self.starts_at.replace(tzinfo=None)
# use timezone.localize to add the user's correct tzinfo
starts_at_local = self.time_zone.localize(starts_at_naive)
# normalize to apply DST rules
starts_at_normal = self.time_zone.normalize(starts_at_local)
return starts_at_normal
get_starts_at.short_description = "Starts at (%s)" % timezone.get_current_timezone_name()
# this displays in django admin, which converts to server time before display
def get_ends_at(self):
"""Returns event.ends_at in specified event.time_zone"""
ends_at_naive = self.ends_at.replace(tzinfo=None)
ends_at_local = self.time_zone.localize(ends_at_naive)
ends_at_normal = self.time_zone.normalize(ends_at_local)
return ends_at_normal
get_ends_at.short_description = "Ends at (%s)" % timezone.get_current_timezone_name()
class Participant(models.Model):
created = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=150)
phone = PhoneNumberField()
email = models.EmailField()
event = models.ManyToManyField(Event, through='Attendance')
def __str__(self):
return 'Participant #{0} - {1}'.format(self.pk, self.name)
@property
def attending(self):
""" all attendances for a participant, ordered by event end times descending """
future_attendances = self.attendance_set.select_related('event')
# TODO filter out attendances in the past #.filter(event__ends_at__gte=timezone.now())
return future_attendances.order_by('-event__ends_at')
class Attendance(models.Model):
participant = models.ForeignKey(Participant)
event = models.ForeignKey(Event)
confirmed = models.NullBooleanField(default=None, blank=True, null=True)
rating = models.IntegerField(default=None, blank=True, null=True)
class Meta:
verbose_name_plural = "attending" | agpl-3.0 | 2,791,979,001,343,510,500 | 38.247423 | 110 | 0.691277 | false |
0xPoly/ooni-probe | ooni/nettests/third_party/lantern.py | 1 | 3552 | from twisted.internet import defer, reactor
from twisted.internet.endpoints import TCP4ClientEndpoint
from twisted.python import usage
from twisted.web.client import ProxyAgent, readBody
from ooni.templates.process import ProcessTest, ProcessDirector
from ooni.utils import log
from ooni.errors import handleAllFailures
import os.path
from os import getenv
class UsageOptions(usage.Options):
optParameters = [
['url', 'u', None, 'Specify a single URL to test.'],]
class LanternProcessDirector(ProcessDirector):
"""
This Process Director monitors Lantern during its
bootstrap and fires a callback if bootstrap is
successful or an errback if it fails to bootstrap
before timing out.
"""
def __init__(self, d, timeout=None):
self.d = d
self.stderr = ""
self.stdout = ""
self.finished = None
self.timeout = timeout
self.stdin = None
self.timer = None
self.exit_reason = None
self.bootstrapped = defer.Deferred()
def outReceived(self, data):
self.stdout += data
# output received, see if we have bootstrapped
if not self.bootstrapped.called and "Connected to proxy on localhost" in self.stdout:
log.debug("Bootstrap Detected")
self.cancelTimer()
self.bootstrapped.callback("bootstrapped")
class LanternTest(ProcessTest):
"""
This class tests Lantern (https://getlantern.org).
test_lantern_circumvent
Starts Lantern on Linux in --headless mode and
determine if it bootstraps successfully or not.
Then, make a HTTP request for http://google.com
and records the response body or failure string.
"""
name = "Lantern Circumvention Tool Test"
description = "Bootstraps Lantern and does a HTTP GET for the specified URL"
author = "Aaron Gibson"
version = "0.0.1"
timeout = 20
usageOptions = UsageOptions
requiredOptions = ['url']
def setUp(self):
self.command = ["lantern", "--headless"]
self.d = defer.Deferred()
self.processDirector = LanternProcessDirector(self.d, timeout=self.timeout)
self.d.addCallback(self.processEnded, self.command)
if self.localOptions['url']:
self.url = self.localOptions['url']
def runLantern(self):
paths = filter(os.path.exists,[os.path.join(os.path.expanduser(x), self.command[0]) for x in getenv('PATH').split(':')])
log.debug("Spawning Lantern")
reactor.spawnProcess(self.processDirector, paths[0], self.command)
def test_lantern_circumvent(self):
proxyEndpoint=TCP4ClientEndpoint(reactor, '127.0.0.1', 8787)
agent = ProxyAgent(proxyEndpoint, reactor)
def addResultToReport(result):
self.report['body'] = result
self.report['success'] = True
def addFailureToReport(failure):
self.report['failure'] = handleAllFailures(failure)
self.report['success'] = False
def doRequest(noreason):
log.debug("Doing HTTP request via Lantern (127.0.0.1:8787) for %s" % self.url)
request = agent.request("GET", self.url)
request.addCallback(readBody)
request.addCallback(addResultToReport)
request.addCallback(self.processDirector.close)
return request
self.processDirector.bootstrapped.addCallback(doRequest)
self.processDirector.bootstrapped.addErrback(addFailureToReport)
self.runLantern()
return self.d
| bsd-2-clause | -3,549,907,999,979,772,400 | 34.52 | 128 | 0.663288 | false |
TomAugspurger/pandas | pandas/tests/series/test_arithmetic.py | 1 | 24034 | from datetime import timedelta
import operator
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import IncompatibleFrequency
import pandas as pd
from pandas import Categorical, Index, Series, bdate_range, date_range, isna
import pandas._testing as tm
from pandas.core import nanops, ops
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestSeriesFlexArithmetic:
@pytest.mark.parametrize(
"ts",
[
(lambda x: x, lambda x: x * 2, False),
(lambda x: x, lambda x: x[::2], False),
(lambda x: x, lambda x: 5, True),
(lambda x: tm.makeFloatSeries(), lambda x: tm.makeFloatSeries(), True),
],
)
@pytest.mark.parametrize(
"opname", ["add", "sub", "mul", "floordiv", "truediv", "pow"]
)
def test_flex_method_equivalence(self, opname, ts):
# check that Series.{opname} behaves like Series.__{opname}__,
tser = tm.makeTimeSeries().rename("ts")
series = ts[0](tser)
other = ts[1](tser)
check_reverse = ts[2]
op = getattr(Series, opname)
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
def test_flex_method_subclass_metadata_preservation(self, all_arithmetic_operators):
# GH 13208
class MySeries(Series):
_metadata = ["x"]
@property
def _constructor(self):
return MySeries
opname = all_arithmetic_operators
op = getattr(Series, opname)
m = MySeries([1, 2, 3], name="test")
m.x = 42
result = op(m, 1)
assert result.x == 42
def test_flex_add_scalar_fill_value(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
tm.assert_series_equal(res, exp)
pairings = [(Series.div, operator.truediv, 1), (Series.rdiv, ops.rtruediv, 1)]
for op in ["add", "sub", "mul", "pow", "truediv", "floordiv"]:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, "r" + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
@pytest.mark.parametrize("op, equiv_op, fv", pairings)
def test_operators_combine(self, op, equiv_op, fv):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isna(a)
bmask = isna(b)
exp_values = []
for i in range(len(exp_index)):
with np.errstate(all="ignore"):
if amask[i]:
if bmask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
tm.assert_series_equal(result, expected)
a = Series([np.nan, 1.0, 2.0, 3.0, np.nan], index=np.arange(5))
b = Series([np.nan, 1, np.nan, 3, np.nan, 4.0], index=np.arange(6))
result = op(a, b)
exp = equiv_op(a, b)
tm.assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
# should accept axis=0 or axis='rows'
op(a, b, axis=0)
class TestSeriesArithmetic:
# Some of these may end up in tests/arithmetic, but are not yet sorted
def test_add_series_with_period_index(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.iloc[1::2] = np.nan
tm.assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_series_equal(result, expected)
msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
ts + ts.asfreq("D", how="end")
@pytest.mark.parametrize(
"target_add,input_value,expected_value",
[
("!", ["hello", "world"], ["hello!", "world!"]),
("m", ["hello", "world"], ["hellom", "worldm"]),
],
)
def test_string_addition(self, target_add, input_value, expected_value):
# GH28658 - ensure adding 'm' does not raise an error
a = Series(input_value)
result = a + target_add
expected = Series(expected_value)
tm.assert_series_equal(result, expected)
def test_divmod(self):
# GH#25557
a = Series([1, 1, 1, np.nan], index=["a", "b", "c", "d"])
b = Series([2, np.nan, 1, np.nan], index=["a", "b", "d", "e"])
result = a.divmod(b)
expected = divmod(a, b)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
result = a.rdivmod(b)
expected = divmod(b, a)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
@pytest.mark.parametrize("index", [None, range(9)])
def test_series_integer_mod(self, index):
# GH#24396
s1 = Series(range(1, 10))
s2 = Series("foo", index=index)
msg = "not all arguments converted during string formatting"
with pytest.raises(TypeError, match=msg):
s2 % s1
def test_add_with_duplicate_index(self):
# GH14227
s1 = Series([1, 2], index=[1, 1])
s2 = Series([10, 10], index=[1, 2])
result = s1 + s2
expected = pd.Series([11, 12, np.nan], index=[1, 1, 2])
tm.assert_series_equal(result, expected)
def test_add_na_handling(self):
from decimal import Decimal
from datetime import date
s = Series(
[Decimal("1.3"), Decimal("2.3")], index=[date(2012, 1, 1), date(2012, 1, 2)]
)
result = s + s.shift(1)
result2 = s.shift(1) + s
assert isna(result[0])
assert isna(result2[0])
def test_add_corner_cases(self, datetime_series):
empty = Series([], index=Index([]), dtype=np.float64)
result = datetime_series + empty
assert np.isnan(result).all()
result = empty + empty.copy()
assert len(result) == 0
# FIXME: dont leave commented-out
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = datetime_series.astype(int)[:-5]
added = datetime_series + int_ts
expected = Series(
datetime_series.values[:-5] + int_ts.values,
index=datetime_series.index[:-5],
name="ts",
)
tm.assert_series_equal(added[:-5], expected)
def test_mul_empty_int_corner_case(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({"x": 0.0})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=["x"]))
def test_sub_datetimelike_align(self):
# GH#7500
# datetimelike ops need to align
dt = Series(date_range("2012-1-1", periods=3, freq="D"))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
tm.assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Comparisons
class TestSeriesFlexComparison:
def test_comparison_flex_basic(self):
left = pd.Series(np.random.randn(10))
right = pd.Series(np.random.randn(10))
tm.assert_series_equal(left.eq(right), left == right)
tm.assert_series_equal(left.ne(right), left != right)
tm.assert_series_equal(left.le(right), left < right)
tm.assert_series_equal(left.lt(right), left <= right)
tm.assert_series_equal(left.gt(right), left > right)
tm.assert_series_equal(left.ge(right), left >= right)
# axis
for axis in [0, None, "index"]:
tm.assert_series_equal(left.eq(right, axis=axis), left == right)
tm.assert_series_equal(left.ne(right, axis=axis), left != right)
tm.assert_series_equal(left.le(right, axis=axis), left < right)
tm.assert_series_equal(left.lt(right, axis=axis), left <= right)
tm.assert_series_equal(left.gt(right, axis=axis), left > right)
tm.assert_series_equal(left.ge(right, axis=axis), left >= right)
#
msg = "No axis named 1 for object type"
for op in ["eq", "ne", "le", "le", "gt", "ge"]:
with pytest.raises(ValueError, match=msg):
getattr(left, op)(right, axis=1)
def test_comparison_flex_alignment(self):
left = Series([1, 3, 2], index=list("abc"))
right = Series([2, 2, 2], index=list("bcd"))
exp = pd.Series([False, False, True, False], index=list("abcd"))
tm.assert_series_equal(left.eq(right), exp)
exp = pd.Series([True, True, False, True], index=list("abcd"))
tm.assert_series_equal(left.ne(right), exp)
exp = pd.Series([False, False, True, False], index=list("abcd"))
tm.assert_series_equal(left.le(right), exp)
exp = pd.Series([False, False, False, False], index=list("abcd"))
tm.assert_series_equal(left.lt(right), exp)
exp = pd.Series([False, True, True, False], index=list("abcd"))
tm.assert_series_equal(left.ge(right), exp)
exp = pd.Series([False, True, False, False], index=list("abcd"))
tm.assert_series_equal(left.gt(right), exp)
def test_comparison_flex_alignment_fill(self):
left = Series([1, 3, 2], index=list("abc"))
right = Series([2, 2, 2], index=list("bcd"))
exp = pd.Series([False, False, True, True], index=list("abcd"))
tm.assert_series_equal(left.eq(right, fill_value=2), exp)
exp = pd.Series([True, True, False, False], index=list("abcd"))
tm.assert_series_equal(left.ne(right, fill_value=2), exp)
exp = pd.Series([False, False, True, True], index=list("abcd"))
tm.assert_series_equal(left.le(right, fill_value=0), exp)
exp = pd.Series([False, False, False, True], index=list("abcd"))
tm.assert_series_equal(left.lt(right, fill_value=0), exp)
exp = pd.Series([True, True, True, False], index=list("abcd"))
tm.assert_series_equal(left.ge(right, fill_value=0), exp)
exp = pd.Series([True, True, False, False], index=list("abcd"))
tm.assert_series_equal(left.gt(right, fill_value=0), exp)
class TestSeriesComparison:
def test_comparison_different_length(self):
a = Series(["a", "b", "c"])
b = Series(["b", "a"])
msg = "only compare identically-labeled Series"
with pytest.raises(ValueError, match=msg):
a < b
a = Series([1, 2])
b = Series([2, 3, 4])
with pytest.raises(ValueError, match=msg):
a == b
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_ser_flex_cmp_return_dtypes(self, opname):
# GH#15115
ser = Series([1, 3, 2], index=range(3))
const = 2
result = getattr(ser, opname)(const).dtypes
expected = np.dtype("bool")
assert result == expected
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_ser_flex_cmp_return_dtypes_empty(self, opname):
# GH#15115 empty Series case
ser = Series([1, 3, 2], index=range(3))
empty = ser.iloc[:0]
const = 2
result = getattr(empty, opname)(const).dtypes
expected = np.dtype("bool")
assert result == expected
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.le, operator.lt, operator.ge, operator.gt],
)
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("baz", "baz", "baz")]
)
def test_ser_cmp_result_names(self, names, op):
# datetime64 dtype
dti = pd.date_range("1949-06-07 03:00:00", freq="H", periods=5, name=names[0])
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
# datetime64tz dtype
dti = dti.tz_localize("US/Central")
dti = pd.DatetimeIndex(dti, freq="infer") # freq not preserved by tz_localize
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
# timedelta64 dtype
tdi = dti - dti.shift(1)
ser = Series(tdi).rename(names[1])
result = op(ser, tdi)
assert result.name == names[2]
# interval dtype
if op in [operator.eq, operator.ne]:
# interval dtype comparisons not yet implemented
ii = pd.interval_range(start=0, periods=5, name=names[0])
ser = Series(ii).rename(names[1])
result = op(ser, ii)
assert result.name == names[2]
# categorical
if op in [operator.eq, operator.ne]:
# categorical dtype comparisons raise for inequalities
cidx = tdi.astype("category")
ser = Series(cidx).rename(names[1])
result = op(ser, cidx)
assert result.name == names[2]
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid="ignore"):
expected = (left > right).astype("O")
expected[:3] = np.nan
tm.assert_almost_equal(result, expected)
s = Series(["a", "b", "c"])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
# -----------------------------------------------------------------
# Categorical Dtype Comparisons
def test_categorical_comparisons(self):
# GH#8938
# allow equality comparisons
a = Series(list("abc"), dtype="category")
b = Series(list("abc"), dtype="object")
c = Series(["a", "b", "cc"], dtype="object")
d = Series(list("acb"), dtype="object")
e = Categorical(list("abc"))
f = Categorical(list("acb"))
# vs scalar
assert not (a == "a").all()
assert ((a != "a") == ~(a == "a")).all()
assert not ("a" == a).all()
assert (a == "a")[0]
assert ("a" == a)[0]
assert not ("a" != a)[0]
# vs list-like
assert (a == a).all()
assert not (a != a).all()
assert (a == list(a)).all()
assert (a == b).all()
assert (b == a).all()
assert ((~(a == b)) == (a != b)).all()
assert ((~(b == a)) == (b != a)).all()
assert not (a == c).all()
assert not (c == a).all()
assert not (a == d).all()
assert not (d == a).all()
# vs a cat-like
assert (a == e).all()
assert (e == a).all()
assert not (a == f).all()
assert not (f == a).all()
assert (~(a == e) == (a != e)).all()
assert (~(e == a) == (e != a)).all()
assert (~(a == f) == (a != f)).all()
assert (~(f == a) == (f != a)).all()
# non-equality is not comparable
msg = "can only compare equality or not"
with pytest.raises(TypeError, match=msg):
a < b
with pytest.raises(TypeError, match=msg):
b < a
with pytest.raises(TypeError, match=msg):
a > b
with pytest.raises(TypeError, match=msg):
b > a
def test_unequal_categorical_comparison_raises_type_error(self):
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
msg = "can only compare equality or not"
with pytest.raises(TypeError, match=msg):
cat > "b"
cat = Series(Categorical(list("abc"), ordered=False))
with pytest.raises(TypeError, match=msg):
cat > "b"
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
msg = "Cannot compare a Categorical for op.+with a scalar"
with pytest.raises(TypeError, match=msg):
cat < "d"
with pytest.raises(TypeError, match=msg):
cat > "d"
with pytest.raises(TypeError, match=msg):
"d" < cat
with pytest.raises(TypeError, match=msg):
"d" > cat
tm.assert_series_equal(cat == "d", Series([False, False, False]))
tm.assert_series_equal(cat != "d", Series([True, True, True]))
# -----------------------------------------------------------------
def test_comparison_tuples(self):
# GH#11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
tm.assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
tm.assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
tm.assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
tm.assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
tm.assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
tm.assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
tm.assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
ser = Series(bdate_range("1/1/2000", periods=10), dtype=object)
ser[::2] = np.nan
# test that comparisons work
ops = ["lt", "le", "gt", "ge", "eq", "ne"]
for op in ops:
val = ser[5]
f = getattr(operator, op)
result = f(ser, val)
expected = f(ser.dropna(), val).reindex(ser.index)
if op == "ne":
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
tm.assert_series_equal(result, expected)
# FIXME: dont leave commented-out
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# tm.assert_series_equal(result, expected)
def test_ne(self):
ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)
expected = [True, True, False, True, True]
assert tm.equalContents(ts.index != 5, expected)
assert tm.equalContents(~(ts.index == 5), expected)
def test_comp_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list("ABC"), name="x")
s2 = pd.Series([2, 2, 2], index=list("ABD"), name="x")
s3 = pd.Series([1, 2, 3], index=list("ABC"), name="x")
s4 = pd.Series([2, 2, 2, 2], index=list("ABCD"), name="x")
for left, right in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]:
msg = "Can only compare identically-labeled Series objects"
with pytest.raises(ValueError, match=msg):
left == right
with pytest.raises(ValueError, match=msg):
left != right
with pytest.raises(ValueError, match=msg):
left < right
msg = "Can only compare identically-labeled DataFrame objects"
with pytest.raises(ValueError, match=msg):
left.to_frame() == right.to_frame()
with pytest.raises(ValueError, match=msg):
left.to_frame() != right.to_frame()
with pytest.raises(ValueError, match=msg):
left.to_frame() < right.to_frame()
def test_compare_series_interval_keyword(self):
# GH#25338
s = Series(["IntervalA", "IntervalB", "IntervalC"])
result = s == "IntervalA"
expected = Series([True, False, False])
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Unsorted
# These arithmetic tests were previously in other files, eventually
# should be parametrized and put into tests.arithmetic
class TestTimeSeriesArithmetic:
# TODO: De-duplicate with test below
def test_series_add_tz_mismatch_converts_to_utc_duplicate(self):
rng = date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern")
ser = Series(np.random.randn(len(rng)), index=rng)
ts_moscow = ser.tz_convert("Europe/Moscow")
result = ser + ts_moscow
assert result.index.tz is pytz.utc
result = ts_moscow + ser
assert result.index.tz is pytz.utc
def test_series_add_tz_mismatch_converts_to_utc(self):
rng = date_range("1/1/2011", periods=100, freq="H", tz="utc")
perm = np.random.permutation(100)[:90]
ser1 = Series(
np.random.randn(90), index=rng.take(perm).tz_convert("US/Eastern")
)
perm = np.random.permutation(100)[:90]
ser2 = Series(
np.random.randn(90), index=rng.take(perm).tz_convert("Europe/Berlin")
)
result = ser1 + ser2
uts1 = ser1.tz_convert("utc")
uts2 = ser2.tz_convert("utc")
expected = uts1 + uts2
assert result.index.tz == pytz.UTC
tm.assert_series_equal(result, expected)
def test_series_add_aware_naive_raises(self):
rng = date_range("1/1/2011", periods=10, freq="H")
ser = Series(np.random.randn(len(rng)), index=rng)
ser_utc = ser.tz_localize("utc")
msg = "Cannot join tz-naive with tz-aware DatetimeIndex"
with pytest.raises(Exception, match=msg):
ser + ser_utc
with pytest.raises(Exception, match=msg):
ser_utc + ser
def test_datetime_understood(self):
# Ensures it doesn't fail to create the right series
# reported in issue#16726
series = pd.Series(pd.date_range("2012-01-01", periods=3))
offset = pd.offsets.DateOffset(days=6)
result = series - offset
expected = pd.Series(pd.to_datetime(["2011-12-26", "2011-12-27", "2011-12-28"]))
tm.assert_series_equal(result, expected)
| bsd-3-clause | 4,321,271,863,813,454,300 | 33.882438 | 88 | 0.544562 | false |
simkuring/simkuring_telegram_bot | bot.py | 1 | 2433 | import requests
from time import sleep
import json
import ConfigParser
import modules
# config
config = ConfigParser.ConfigParser()
config.read("config.ini")
key = config.get("setting","key")
limit = config.getint("setting","limit")
sleepTime = config.getint("setting","sleep")
queryLimit = config.getint("setting","query_limit")
timeout = queryLimit = config.getint("setting","timeout")
# set url
headers = {"Content-type": "application/x-www-form-urlencoded"}
url = "https://api.telegram.org/bot"
sendMsgUrl = url + key + "/sendMessage"
getMsgUrl = url + key + "/getUpdates"
# help and about
def help(args):
return """
/jam
/adzan [bandung, bogor, jakarta, aceh, samarinda, balikpapan, makassar]
/ddg [keyword]
/about
"""
def about(args):
about = """
Bot Simkuring v .1 alpha by Simkuring Laboratory
"""
return about
# bot command list + function
commandLists = {
"/jam":modules.jam,
"/adzan":modules.adzan,
"/ddg":modules.ddg,
"/about":about,
"/help":help
}
def sendMessage(chatId, msgId, text):
try:
data = {"chat_id":chatId,"text":text,"reply_to_message_id":msgId}
r = requests.post(sendMsgUrl,data=data)
if r.status_code != 200:
print r.status_code
except:
print "weee"
def parseCommand(msg):
panjang = len(msg['result'])
for i in range(panjang):
try:
perintah = msg['result'][i]['message']['text'].replace("@SimkuringBot","")
command = perintah.split()
if command[0] in commandLists.keys():
data = commandLists[command[0]](command)
sendMessage(msg['result'][i]['message']['chat']['id'], msg['result'][i]['message']['message_id'], data)
except:
pass
def main():
lastMessageId = 0;
while (True):
data = {
"offset":lastMessageId,
"timeout":timeout,
"limit":queryLimit
}
bot = requests.post(getMsgUrl,data=data)
if bot.status_code == 200:
msg = bot.json()
panjang = len(msg['result'])
if panjang > 0 :
if panjang < limit :
parseCommand(msg)
lastMessageId = msg['result'][panjang-1]['update_id'] + 1
else:
print bot.status_code
sleep(sleepTime)
if __name__ == "__main__":
main() | gpl-3.0 | 7,329,419,161,624,676,000 | 26.659091 | 119 | 0.572544 | false |
pdasigi/onto-lstm | model_pp_attachment.py | 1 | 18634 | '''
This module defines various PP Attachment models and comes with a CLI to train and test them.
'''
import sys
import codecs
import argparse
import random
import json
import numpy
from overrides import overrides
from keras.layers import Input, Bidirectional
from keras.models import Model
from encoders import LSTMEncoder, OntoLSTMEncoder
from onto_attention import OntoAttentionLSTM
from index_data import DataProcessor
from preposition_model import PrepositionModel
from preposition_predictors import AttachmentPredictor
class PPAttachmentModel(PrepositionModel):
'''
Base class for PP Attachment models. Encoder for input phrases is not defined here. Subclasses
need to do that.
'''
def __init__(self, tune_embedding, bidirectional, **kwargs):
super(PPAttachmentModel, self).__init__(**kwargs)
self.tune_embedding = tune_embedding
self.bidirectional = bidirectional
self.validation_split = 0.05
self.model_name = "PP Attachment"
self.custom_objects = {"AttachmentPredictor": AttachmentPredictor}
def _get_input_layers(self, train_inputs):
phrase_input_layer = Input(name="phrase", shape=train_inputs.shape[1:], dtype='int32')
return phrase_input_layer
def _get_output_layers(self, inputs, dropout, embedding_file, num_mlp_layers):
encoded_phrase = self.encoder.get_encoded_phrase(inputs, dropout, embedding_file)
predictor = AttachmentPredictor(name='attachment_predictor', proj_dim=20, composition_type='HPCT',
num_hidden_layers=num_mlp_layers)
outputs = predictor(encoded_phrase)
return outputs
@overrides
def process_data(self, input_file, onto_aware, for_test=False):
'''
Reads an input file and makes input for training or testing.
'''
dataset_type = "test" if for_test else "training"
print >>sys.stderr, "Reading %s data" % dataset_type
label_ind = []
tagged_sentences = []
max_sentence_length = 0
all_sentence_lengths = []
for line in open(input_file):
lnstrp = line.strip()
label, tagged_sentence = lnstrp.split("\t")
sentence_length = len(tagged_sentence.split())
all_sentence_lengths.append(sentence_length)
if sentence_length > max_sentence_length:
max_sentence_length = sentence_length
label_ind.append(int(label))
tagged_sentences.append(tagged_sentence)
if for_test:
if not self.model:
raise RuntimeError("Model not trained yet!")
input_shape = self.model.get_input_shape_at(0) # (num_sentences, num_words, ...)
sentlenlimit = input_shape[1]
else:
sentlenlimit = max_sentence_length
# We need to readjust the labels because padding would affect the sentence indices.
for i in range(len(label_ind)):
length = all_sentence_lengths[i]
label_ind[i] += sentlenlimit - length
if not for_test:
# Shuffling so that when Keras does validation split, it is not always at the end.
sentences_and_labels = zip(tagged_sentences, label_ind)
random.shuffle(sentences_and_labels)
tagged_sentences, label_ind = zip(*sentences_and_labels)
print >>sys.stderr, "Indexing %s data" % dataset_type
inputs = self.data_processor.prepare_input(tagged_sentences, onto_aware=onto_aware,
sentlenlimit=sentlenlimit, for_test=for_test,
remove_singletons=False)
labels = self.data_processor.make_one_hot(label_ind)
return inputs, labels
@overrides
def write_predictions(self, inputs):
'''
Outputs predictions in a file named <model_name_prefix>.predictions.
'''
predictions = numpy.argmax(self.model.predict(inputs), axis=1)
test_output_file = open("%s.predictions" % self.model_name_prefix, "w")
for input_indices, prediction in zip(inputs, predictions):
# The predictions are indices of words in padded sentences. We need to readjust them.
padding_length = 0
for index in input_indices:
if numpy.all(index == 0):
padding_length += 1
else:
break
prediction = prediction - padding_length + 1 # +1 because the indices start at 1.
print >>test_output_file, prediction
class LSTMAttachmentModel(PPAttachmentModel):
'''
A PP Attachment prediction model that uses an LSTM as the encoder.
'''
def __init__(self, **kwargs):
super(LSTMAttachmentModel, self).__init__(**kwargs)
self.model_name_prefix = "lstm_models/lstm_ppa_tune-embedding=%s_bi=%s" % (self.tune_embedding,
self.bidirectional)
self.encoder = LSTMEncoder(self.data_processor, self.embed_dim, self.bidirectional, self.tune_embedding)
self.custom_objects.update(self.encoder.get_custom_objects())
class OntoLSTMAttachmentModel(PPAttachmentModel):
'''
A PP Attachment prediction model that uses an OnotoLSTM as the encoder.
'''
def __init__(self, num_senses, num_hyps, use_attention, set_sense_priors, prep_senses_dir, **kwargs):
super(OntoLSTMAttachmentModel, self).__init__(**kwargs)
# Set self.data_processor again, now with the right arguments.
process_preps = False if prep_senses_dir is None else True
self.data_processor = DataProcessor(word_syn_cutoff=num_senses, syn_path_cutoff=num_hyps,
process_preps=process_preps, prep_senses_dir=prep_senses_dir)
self.num_senses = num_senses
self.num_hyps = num_hyps
self.attention_model = None # Keras model with just embedding and encoder to output attention.
self.set_sense_priors = set_sense_priors
self.use_attention = use_attention
use_prep_senses = False if prep_senses_dir is None else True
self.encoder = OntoLSTMEncoder(self.num_senses, self.num_hyps, self.use_attention, self.set_sense_priors,
data_processor=self.data_processor, embed_dim=self.embed_dim,
bidirectional=self.bidirectional, tune_embedding=self.tune_embedding)
self.model_name_prefix = ("ontolstm_models/ontolstm_ppa_att=%s_senses=%d_hyps=%d"
"_sense-priors=%s_prep-senses=%s_tune-embedding=%s_bi=%s") % (
str(self.use_attention), self.num_senses, self.num_hyps,
str(set_sense_priors), str(use_prep_senses), str(self.tune_embedding),
str(self.bidirectional))
self.custom_objects.update(self.encoder.get_custom_objects())
def get_attention(self, inputs):
'''
Takes inputs and returns pairs of synsets and corresponding attention values.
'''
if not self.attention_model:
self.define_attention_model()
attention_outputs = self.attention_model.predict(inputs)
sent_attention_values = []
for sentence_input, sentence_attention in zip(inputs, attention_outputs):
word_attention_values = []
for word_input, word_attention in zip(sentence_input, sentence_attention):
# Size of word input is (senses, hyps+1)
# Ignoring the last hyp index because that is just the word index pt there by
# OntoAwareEmbedding for sense priors.
if word_input.sum() == 0:
# This is just padding
continue
word_input = word_input[:, :-1] # removing last hyp index.
sense_hyp_prod = self.num_senses * self.num_hyps
assert len(word_attention) == sense_hyp_prod or len(word_attention) == 2 * sense_hyp_prod
attention_per_sense = []
if len(word_attention) == 2 * sense_hyp_prod:
# The encoder is Bidirectional. We have attentions from both directions.
forward_sense_attention = word_attention[:len(word_attention) // 2]
backward_sense_attention = word_attention[len(word_attention) // 2:]
processed_attention = zip(forward_sense_attention, backward_sense_attention)
else:
# Encoder is not bidirectional
processed_attention = word_attention
hyp_ind = 0
while hyp_ind < len(processed_attention):
attention_per_sense.append(processed_attention[hyp_ind:hyp_ind+self.num_hyps])
hyp_ind += self.num_hyps
sense_attention_values = []
for sense_input, attention_per_hyp in zip(word_input, attention_per_sense):
hyp_attention_values = []
for hyp_input, hyp_attention in zip(sense_input, attention_per_hyp):
if hyp_input == 0:
continue
hyp_attention_values.append((self.data_processor.get_token_from_index(hyp_input,
onto_aware=True),
hyp_attention))
sense_attention_values.append(hyp_attention_values)
word_attention_values.append(sense_attention_values)
sent_attention_values.append(word_attention_values)
return sent_attention_values
def define_attention_model(self):
'''
Take necessary parts out of the model to get OntoLSTM attention.
'''
if not self.model:
raise RuntimeError("Model not trained yet!")
input_shape = self.model.get_input_shape_at(0)
input_layer = Input(input_shape[1:], dtype='int32') # removing batch size
embedding_layer = None
encoder_layer = None
for layer in self.model.layers:
if layer.name == "embedding":
embedding_layer = layer
elif layer.name == "onto_lstm":
# We need to redefine the OntoLSTM layer with the learned weights and set return attention to True.
# Assuming we'll want attention values for all words (return_sequences = True)
if isinstance(layer, Bidirectional):
onto_lstm = OntoAttentionLSTM(input_dim=self.embed_dim, output_dim=self.embed_dim,
num_senses=self.num_senses, num_hyps=self.num_hyps,
use_attention=True, return_attention=True, return_sequences=True,
consume_less='gpu')
encoder_layer = Bidirectional(onto_lstm, weights=layer.get_weights())
else:
encoder_layer = OntoAttentionLSTM(input_dim=self.embed_dim,
output_dim=self.embed_dim, num_senses=self.num_senses,
num_hyps=self.num_hyps, use_attention=True,
return_attention=True, return_sequences=True,
consume_less='gpu', weights=layer.get_weights())
break
if not embedding_layer or not encoder_layer:
raise RuntimeError("Required layers not found!")
attention_output = encoder_layer(embedding_layer(input_layer))
self.attention_model = Model(inputs=input_layer, outputs=attention_output)
print >>sys.stderr, "Attention model summary:"
self.attention_model.summary()
self.attention_model.compile(loss="mse", optimizer="sgd") # Loss and optimizer do not matter!
def print_attention_values(self, input_file, test_inputs, output_file):
sent_attention_outputs = self.get_attention(test_inputs)
tagged_sentences = [x.strip().split("\t")[1] for x in codecs.open(input_file).readlines()]
outfile = codecs.open(output_file, "w", "utf-8")
full_json_struct = []
for sent_attention, tagged_sentence in zip(sent_attention_outputs, tagged_sentences):
sent_json = {}
sent_json["input"] = tagged_sentence
sent_json["tokens"] = []
tagged_words = tagged_sentence.split()
for tagged_word, word_attention in zip(tagged_words, sent_attention):
token_json = {}
token_json["surface_form"] = tagged_word
token_json["senses"] = []
for sense_num, sense_attention in enumerate(word_attention):
if len(sense_attention) == 0:
continue
sense_json = {}
sense_json["id"] = sense_num
sense_json["hypernyms"] = []
for hyp_name, hyp_att in sense_attention:
if isinstance(hyp_att, tuple):
# Averaging forward and backward attention
sense_json["hypernyms"].append({hyp_name: {"forward": float(hyp_att[0]),
"backward": float(hyp_att[1])}})
else:
sense_json["hypernyms"].append({hyp_name: float(hyp_att)})
token_json["senses"].append(sense_json)
sent_json["tokens"].append(token_json)
full_json_struct.append(sent_json)
print >>outfile, json.dumps(full_json_struct, indent=2)
outfile.close()
def main():
argparser = argparse.ArgumentParser(description="Train preposition phrase attachment model")
argparser.add_argument('--train_file', type=str, help="TSV file with label and pos tagged phrase")
argparser.add_argument('--embedding_file', type=str, help="Gzipped embedding file")
argparser.add_argument('--embed_dim', type=int, help="Word/Synset vector size", default=50)
argparser.add_argument('--bidirectional', help="Encode bidirectionally followed by pooling",
action='store_true')
argparser.add_argument('--onto_aware', help="Use ontology aware encoder. "
"If this flag is not set, will use traditional encoder", action='store_true')
argparser.add_argument('--num_senses', type=int, help="Number of senses per word if using OntoLSTM (default "
"2)", default=2)
argparser.add_argument('--num_hyps', type=int, help="Number of hypernyms per sense if using OntoLSTM (default "
"5)", default=5)
argparser.add_argument('--prep_senses_dir', type=str, help="Directory containing preposition senses "
"(from Semeval07 Task 6)")
argparser.add_argument('--set_sense_priors', help="Set an exponential prior on sense probabilities",
action='store_true')
argparser.add_argument('--use_attention', help="Use attention in ontoLSTM. "
"If this is not set, will use average concept representations",
action='store_true')
argparser.add_argument('--test_file', type=str, help="Optionally provide test file for which accuracy will be computed")
argparser.add_argument('--load_model_from_epoch', type=int, help="Load model from a specific epoch. Will load best model by default.")
argparser.add_argument('--attention_output', type=str, help="Print attention values of the validation data in the given file")
argparser.add_argument('--tune_embedding', help="Fine tune pretrained embedding (if provided)", action='store_true')
argparser.add_argument('--num_epochs', type=int, help="Number of epochs (default 20)", default=20)
argparser.add_argument('--num_mlp_layers', type=int, help="Number of mlp layers (default 0)", default=0)
argparser.add_argument('--embedding_dropout', type=float, help="Dropout after embedding", default=0.0)
argparser.add_argument('--encoder_dropout', type=float, help="Dropout after encoder", default=0.0)
args = argparser.parse_args()
if args.onto_aware:
attachment_model = OntoLSTMAttachmentModel(num_senses=args.num_senses, num_hyps=args.num_hyps,
use_attention=args.use_attention,
set_sense_priors=args.set_sense_priors,
prep_senses_dir=args.prep_senses_dir,
embed_dim=args.embed_dim,
bidirectional=args.bidirectional,
tune_embedding=args.tune_embedding)
else:
attachment_model = LSTMAttachmentModel(embed_dim=args.embed_dim, bidirectional=args.bidirectional,
tune_embedding=args.tune_embedding)
## Train model or load trained model
if args.train_file is None:
attachment_model.load_model(args.load_model_from_epoch)
else:
train_inputs, train_labels = attachment_model.process_data(args.train_file, onto_aware=args.onto_aware,
for_test=False)
dropout = {"embedding": args.embedding_dropout,
"encoder": args.encoder_dropout}
attachment_model.train(train_inputs, train_labels, num_epochs=args.num_epochs,
dropout=dropout, num_mlp_layers=args.num_mlp_layers,
embedding_file=args.embedding_file)
## Test model
if args.test_file is not None:
test_inputs, test_labels = attachment_model.process_data(args.test_file, onto_aware=args.onto_aware,
for_test=True)
#attachment_model.test(test_inputs, test_labels)
if args.attention_output is not None:
attachment_model.print_attention_values(args.test_file, test_inputs, args.attention_output)
if __name__ == "__main__":
main()
| apache-2.0 | 8,712,580,144,099,428,000 | 55.810976 | 138 | 0.583611 | false |
IDEALLab/design_embeddings_jmd_2016 | ml_ae.py | 1 | 3594 | """
Builds a manifold learning autoencoders.
Author(s): Wei Chen ([email protected])
"""
import numpy as np
from keras.models import Sequential
from keras.optimizers import Adagrad, SGD, Adadelta, Adam
from keras.regularizers import l2
from keras.layers import Input, Dense
from keras.models import Model
#from early_stopping import MyEarlyStopping
from stacked_ae import save_decoder
from sklearn.manifold import LocallyLinearEmbedding, Isomap
from util import pick_k
def train_decoder(inputs, outputs, model, lr, epsilon, weights=None, nb_epoch=1000, loss='mse', verbose=False):
if weights is not None:
model.set_weights(weights)
# training
# optimizer = SGD(lr=lr, momentum=momentum, decay=lr_decay, nesterov=True)
optimizer = Adagrad(lr=lr, epsilon=epsilon)
# optimizer = Adadelta(lr=lr, rho=rho, epsilon=epsilon)
model.compile(loss=loss, optimizer=optimizer)
# early_stopping = MyEarlyStopping(monitor='loss', patience=10, verbose=verbose, tol=1e-6)
model.fit(inputs, outputs, batch_size=inputs.shape[0], nb_epoch=nb_epoch, verbose=verbose)#, callbacks=[early_stopping])
return model
def mlae(data, feature_dim, train, test, hidden_size_l1=0, hidden_size_l2=0, hidden_size_l3=0, hidden_size_l4=0,
l=0, lr=0.01, epsilon=1e-08, evaluation=False, overwrite=True):
''' Select number of layers for autoencoder based on arguments
hidden_size_l1, hidden_size_l2, hidden_size_l3 and hidden_size_l4 '''
np.random.seed(0)
# Encoder
k_opt = pick_k(data[train], feature_dim)
# encoder = LocallyLinearEmbedding(n_neighbors=k_opt, n_components=feature_dim, method='hessian').fit(data[train])
encoder = Isomap(n_neighbors=k_opt, n_components=feature_dim).fit(data[train])
features = np.zeros((data.shape[0],feature_dim))
features[train+test] = encoder.transform(data[train+test])
# Decoder
verbose = 0
activation = 'tanh'
loss = 'mse'
nb_epoch = 5000 # maximum number of epochs
if hidden_size_l1 == 0:
hidden_sizes = []
elif hidden_size_l2 == 0:
hidden_sizes = [hidden_size_l1]
elif hidden_size_l3 == 0:
hidden_sizes = [hidden_size_l1, hidden_size_l2]
elif hidden_size_l4 == 0:
hidden_sizes = [hidden_size_l1, hidden_size_l2, hidden_size_l3]
else:
hidden_sizes = [hidden_size_l1, hidden_size_l2, hidden_size_l3, hidden_size_l4]
data_dim = data.shape[1]
sizes = [data_dim] + hidden_sizes + [feature_dim]
n_layers = len(sizes) - 1
inputs = Input(shape=(feature_dim,))
x = inputs
for i in range(n_layers):
x = Dense(sizes[-i-2], activation=activation, W_regularizer=l2(l))(x)
decoded = x
model = Model(input=inputs, output=decoded)
model = train_decoder(features[train], data[train], model, lr, epsilon, nb_epoch=nb_epoch, loss=loss, verbose=verbose)
if evaluation:
# Used for hyperparameter optimization
cost = model.evaluate(features[test], data[test], batch_size=len(test), verbose=verbose)
return cost
# Reconstruct using the decoder
decoder = Sequential()
for i in range(n_layers):
decoder.add(Dense(sizes[-i-2], input_dim=sizes[-i-1], activation=activation,
weights=model.layers[-n_layers+i].get_weights()))
decoder.compile(loss='mse', optimizer='sgd')
name = 'MLAE-'+str(n_layers)
if overwrite:
# Save the decoder
save_decoder(decoder, len(train), name)
return features, name, decoder.predict
| mit | 2,650,204,477,880,609,000 | 35.673469 | 124 | 0.663606 | false |
StratoSource/StratoSource | stratosource/user/admin_views.py | 1 | 21236 | # Copyright 2010, 2011 Red Hat Inc.
#
# This file is part of StratoSource.
#
# StratoSource is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# StratoSource is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with StratoSource. If not, see <http://www.gnu.org/licenses/>.
#
from django import forms
from django.shortcuts import redirect, render
from stratosource.management import Utils
from stratosource.models import Branch, BranchLog, Repo, DeployableObject, Delta, EmailTemplateFolder
from ss2 import settings
from django.core.exceptions import ObjectDoesNotExist
from crontab import CronTab, CronItem
from django.db import transaction
import subprocess
import os
import re
import logging
logger = logging.getLogger('console')
CRON_COMMENT = 'StratoSource ID'
class RepoForm(forms.ModelForm):
class Meta:
model = Repo
fields = '__all__'
def clean(self):
cleaned_data = self.cleaned_data
# path = cleaned_data.get("location")
name = cleaned_data.get("name")
path = os.path.join('/var/sfrepo', name, 'code')
# cleaned_data['location'] = path
if not os.path.isdir(path):
try:
os.makedirs(path)
except Exception as ex:
self.errors['__all__'] = self.error_class([str(ex)])
# self._errors["location"] = self.error_class(['Path does not exist and unable to create'])
return cleaned_data
if os.path.isdir(path):
curdir = os.getcwd()
os.chdir(path)
try:
#
# initialize the git repo
#
output = subprocess.check_output(['/usr/bin/git','init'])
subprocess.check_output(['touch','.gitignore'])
subprocess.check_output(['/usr/bin/git', 'config', 'user.email', '"[email protected]"'])
subprocess.check_output(['/usr/bin/git', 'config', 'user.name', '"me"'])
subprocess.check_output(['/usr/bin/git', 'add','.gitignore'])
subprocess.check_output(['/usr/bin/git','commit','-m','"initial commit"'])
except Exception as ex:
# self._errors["name"] = self.error_class(['Unable to create git repository in ' + path])
self._errors["name"] = self.error_class(['Unable to create git repository in ' + path, str(ex)])
finally:
os.chdir(curdir)
return cleaned_data
class BranchForm(forms.ModelForm):
SFENVCHOICES = (
('test', 'Test/Sandbox'),
('login', 'Production'),
)
SFAPIASSETS = (
('CustomPageWebLink', 'Custom page web links'),
('CustomLabels', 'Custom labels'),
('CustomApplication', 'Custom applications'),
('CustomObject', 'Custom objects and fields'),
('CustomObjectTranslation', 'Custom object translations'),
('Translations', 'Translations'),
('CustomSite', 'Sites'),
('CustomTab', 'Tabs'),
('DataCategoryGroup', 'Category groups'),
('EmailTemplate', 'Email templates'),
('HomePageLayout', 'Home page layout'),
('GlobalPicklist', 'Global Picklist'),
('Layout', 'Layouts'),
('Portal', 'Portal'),
('Profile', 'Profiles'),
('RecordType', 'Record types'),
('RemoteSiteSetting', 'Remote site settings'),
('HomePageComponent', 'Home page components'),
('ArticleType', 'Article types'),
#('ApexPage', 'Pages'),
#('ApexClass', 'Classes'),
#('ApexTrigger', 'Triggers'),
#('ApexComponent', 'Apex Components'),
('ReportType', 'Report types'),
('Scontrol', 'S-Controls'),
('ConnectedApp', 'Connected Apps'),
('CustomPageWebLink', 'Custom Page Web Links'),
('PermissionSet', 'Permission Sets'),
('ExternalDataSource', 'External Data Sources'),
# ('StaticResource', 'Static resources'),
('Workflow', 'Workflows'),
('ApprovalProcess', 'Approval processes'),
('EntitlementTemplate', 'Entitlement templates'),
)
api_env = forms.ChoiceField(choices=SFENVCHOICES)
api_assets = forms.MultipleChoiceField(choices=SFAPIASSETS, required=False)
# api_pass = forms.CharField(max_length=100, widget=forms.PasswordInput)
api_pass2 = forms.CharField(max_length=100, widget=forms.PasswordInput, required=False)
class Meta:
model = Branch
fields = '__all__'
widgets = {
'api_pass': forms.widgets.PasswordInput(),
}
def clean(self):
cleaned_data = self.cleaned_data
# api_ver = cleaned_data.get("api_ver")
# if api_ver and not re.match('^\d\d\.\d', api_ver):
# self._errors["api_ver"] = self.error_class(['Invalid API Version - use xx.x format']);
name = cleaned_data.get("name")
if name and not re.match('^\w+$', name):
self._errors["name"] = self.error_class(['Invalid branch name - use only alphanumeric'])
repo = cleaned_data.get("repo")
if not repo:
self._errors["repo"] = self.error_class(['Choose a repository'])
pass1 = cleaned_data.get("api_pass")
pass2 = cleaned_data.get("api_pass2")
if pass1 and pass2 and pass1 != pass2:
self._errors["api_pass"] = self.error_class(['Passwords do not match'])
self._errors["api_pass2"] = self.error_class(['Passwords do not match'])
cron_type = cleaned_data.get('cron_type')
cron_interval = int(cleaned_data.get('cron_interval'))
cron_start = cleaned_data.get('cron_start')
order = cleaned_data.get('order')
if cron_type == 'h':
if cron_interval < 1 or cron_interval > 23:
self._errors["cron_interval"] = self.error_class(['Interval must be between 1 and 23'])
offset = int(cron_start)
if offset < 0 or offset > 59:
self._errors["cron_start"] = self.error_class(['Start must be between 0 and 59'])
return cleaned_data
def newbranch(request):
if request.method == 'POST':
form = BranchForm(request.POST)
if form.is_valid():
# Process the data in form.cleaned_data
row = Branch()
cleaned_data = form.cleaned_data
row.repo = cleaned_data.get('repo')
row.name = cleaned_data.get('name')
row.api_env = cleaned_data.get('api_env')
row.api_user = cleaned_data.get('api_user')
row.api_pass = cleaned_data.get('api_pass')
row.api_auth = cleaned_data.get('api_auth')
row.api_store = cleaned_data.get('api_store')
# row.api_ver = cleaned_data.get('api_ver')
row.api_assets = ','.join(cleaned_data.get('api_assets'))
row.save()
createCrontab(row)
createCGitEntry(row)
repo = Repo.objects.get(id=row.repo_id)
curdir = os.getcwd()
os.chdir(repo.location)
try:
#
# initialize the git repo
#
subprocess.check_output(['git', 'checkout', '-b', row.name, 'master'])
except Exception as ex:
removeCGitEntry(row)
removeCrontab(row)
row.delete()
logger.exception(ex)
raise
finally:
os.chdir(curdir)
if 'EmailTemplate' in row.api_assets:
return edit_branch_details(request, row.id, True)
return adminMenu(request)
else:
form = BranchForm()
return render(request, 'editbranch.html', {'form': form, 'type': 'New', 'action': 'newbranch/'})
def editbranch(request, branch_id):
if request.method == 'POST':
if 'cancelButton' in request.POST:
return adminMenu(request)
form = BranchForm(request.POST)
if form.is_valid():
# Process the data in form.cleaned_data
row = Branch.objects.get(id=branch_id)
cleaned_data = form.cleaned_data
row.repo = cleaned_data.get('repo')
row.name = cleaned_data.get('name')
row.api_env = cleaned_data.get('api_env')
row.api_user = cleaned_data.get('api_user')
api_pass = cleaned_data.get('api_pass')
if api_pass and len(api_pass) > 0:
row.api_pass = api_pass
row.api_auth = cleaned_data.get('api_auth')
row.api_store = cleaned_data.get('api_store')
# row.api_ver = cleaned_data.get('api_ver')
row.api_assets = ','.join(cleaned_data.get('api_assets'))
row.enabled = cleaned_data.get('enabled')
row.cron_enabled = cleaned_data.get('cron_enabled')
row.cron_type = cleaned_data.get('cron_type')
row.cron_interval = cleaned_data.get('cron_interval')
row.cron_start = cleaned_data.get('cron_start')
row.code_cron_enabled = cleaned_data.get('code_cron_enabled')
row.code_cron_type = cleaned_data.get('code_cron_type')
row.code_cron_interval = cleaned_data.get('code_cron_interval')
row.code_cron_start = cleaned_data.get('code_cron_start')
row.order = cleaned_data.get('order')
row.save()
updateCrontab(row)
createCGitEntry(row)
if 'EmailTemplate' in row.api_assets:
return edit_branch_details(request, branch_id, True)
return adminMenu(request)
else:
logger.debug(form.errors)
else:
row = Branch.objects.get(id=branch_id)
row.api_assets = row.api_assets.split(',')
form = BranchForm(instance=row)
return render(request, 'editbranch.html', {'form': form, 'type': 'Edit', 'action': 'editbranch/' + branch_id})
@transaction.atomic
def edit_branch_details(request, branch_id, from_edit = False):
branch = Branch.objects.get(id=branch_id)
selected = EmailTemplateFolder.objects.filter(branch=branch).order_by('name')
if not from_edit and request.method == 'POST' and request.POST.__contains__('saveFoldersButton'):
folderlist = request.POST.getlist('email_template_folder')
for select in selected: select.delete()
for folder in folderlist:
ef = EmailTemplateFolder()
ef.branch = branch
ef.name = folder
ef.save()
return adminMenu(request)
try:
agent = Utils.getAgentForBranch(branch)
folders = agent.getSalesforceEmailTemplateFolders()
except Exception as ex:
return render(request, 'error.html', {'error_message': str(ex) })
folders.sort()
for select in selected:
if select.name in folders:
folders.remove(select.name)
return render(request, 'edit_asset_details.html', {'branch': branch, 'selected': selected, 'folders': folders})
def last_log(request, branch_id, logtype):
branch = Branch.objects.get(id=branch_id)
log = 'No Log Found'
try:
branchlog = BranchLog.objects.get(branch=branch, logtype=logtype)
log = branchlog.lastlog
except ObjectDoesNotExist:
pass
data = {'branch': branch, 'log': log}
return render(request, 'last_log.html', data)
#
# just-in-time setup of cgit config, to support Docker
#
def verifyCgit():
if not os.path.isdir(settings.CONFIG_DIR):
os.mkdir(settings.CONFIG_DIR)
if not os.path.isfile(os.path.join(settings.CONFIG_DIR, 'cgitrepo')):
subprocess.check_output(['cp', os.path.join(settings.BASE_DIR, 'resources', 'cgitrepo'), settings.CONFIG_DIR])
def createCGitEntry(branch):
verifyCgit()
removeCGitEntry(branch)
f = open(os.path.join(settings.CONFIG_DIR, 'cgitrepo'), 'a')
f.write('#ID=%d\n' % branch.id)
f.write('repo.url=%s\n' % branch.name)
f.write('repo.path=%s/.git\n' % branch.repo.location)
f.write('repo.desc=%s\n' % branch.name)
f.close()
def removeCGitEntry(branch):
verifyCgit()
p = os.path.join(settings.CONFIG_DIR, 'cgitrepo')
if not os.path.exists(p):
return
f = open(p, 'r')
lines = f.readlines()
f.close()
linecount = 0
found = False
prefix = '#ID=%d' % branch.id
for line in lines:
if line.startswith(prefix):
found = True
break
linecount += 1
if found:
start = linecount
linecount += 1
while linecount < len(lines) and len(lines[linecount]) > 0 and lines[linecount][0:1] != '#': linecount += 1
# del lines[start:linecount]
f = open(os.path.join(settings.CONFIG_DIR, 'cgitrepo'), 'w')
f.writelines(lines[0:start])
f.writelines(lines[linecount:])
f.close()
def createCrontab(branch):
ctab = CronTab()
if branch.cron_enabled and branch.cron_type == 'h':
if branch.cron_interval > 1:
interval_list = [str(x) for x in range(0, 23, branch.cron_interval)]
interval_str = ','.join(interval_list)
else:
interval_str = '*'
cronline = "%s %s * * * %s %s %s >/var/sftmp/config_cronjob.out 2>&1" % (
branch.cron_start, interval_str, os.path.join(settings.BASE_DIR, 'config_cronjob.sh'), branch.repo.name,
branch.name)
logger.debug('Creating cron tab with line ' + cronline)
item = CronItem(line=cronline + ' #' + (CRON_COMMENT + ' %d' % branch.id))
ctab.add(item)
ctab.write()
if branch.code_cron_enabled and branch.code_cron_type == 'h':
if branch.code_cron_interval > 1:
interval_list = [str(x) for x in range(0, 23, branch.code_cron_interval)]
interval_str = ','.join(interval_list)
else:
interval_str = '*'
cronline = "%s %s * * * %s %s %s >/var/sftmp/code_cronjob.out 2>&1" % (
branch.code_cron_start, interval_str, os.path.join(settings.BASE_DIR, 'code_cronjob.sh'), branch.repo.name,
branch.name)
logger.debug('Creating cron tab with line ' + cronline)
item = CronItem(line=cronline + ' #' + (CRON_COMMENT + ' %d' % branch.id))
ctab.add(item)
ctab.write()
def updateCrontab(branch):
removeCrontab(branch)
if branch.cron_enabled or branch.code_cron_enabled:
return createCrontab(branch)
def removeCrontab(branch):
ctab = CronTab()
comment = CRON_COMMENT + ' %d' % branch.id
theItems = []
for item in ctab:
if item.raw_line.find(comment) > -1:
theItems.append(item)
for theItem in theItems:
ctab.remove(theItem)
ctab.write()
def adminMenu(request):
if request.method == u'GET' and request.GET.__contains__('reset') and request.GET['reset'] == 'true':
snaptype = request.GET['type']
branch_id = request.GET['branch_id']
branch = Branch.objects.get(id=branch_id)
if snaptype == 'config':
branch.run_status = 'd'
else:
branch.code_run_status = 'd'
branch.save()
return redirect("/admin/?success=true")
if request.method == u'GET' and request.GET.__contains__('snapshot') and request.GET['snapshot'] == 'true':
branch_id = request.GET['branch_id']
snaptype = request.GET['type']
branch = Branch.objects.get(id=branch_id)
if snaptype == 'config' and branch.run_status != 'r':
repo_name = branch.repo.name
branch_name = branch.name
pr = subprocess.Popen(os.path.join(settings.BASE_DIR,
'config_cronjob.sh') + ' ' + repo_name + ' ' + branch_name + ' >/var/sftmp/ssRun.out 2>&1 &',
shell=True)
logger.debug('Started With pid ' + str(pr.pid))
pr.wait()
if pr.returncode == 0:
brlog = BranchLog()
try:
brlog = BranchLog.objects.get(branch=branch, logtype=snaptype)
except ObjectDoesNotExist:
brlog.branch = branch
brlog.logtype = snaptype
brlog.last_log = 'Started'
brlog.save()
branch.run_status = 'r'
branch.save()
return redirect("/admin/?success=true")
return redirect("/admin/?failed=true")
if snaptype == 'code' and branch.code_run_status != 'r':
repo_name = branch.repo.name
branch_name = branch.name
pr = subprocess.Popen(os.path.join(settings.BASE_DIR,
'code_cronjob.sh') + ' ' + repo_name + ' ' + branch_name + ' >/var/sftmp/ssRun.out 2>&1 &',
shell=True)
logger.debug('Started With pid ' + str(pr.pid))
pr.wait()
if pr.returncode == 0:
brlog = BranchLog()
try:
brlog = BranchLog.objects.get(branch=branch, logtype=snaptype)
except ObjectDoesNotExist:
brlog.branch = branch
brlog.logtype = snaptype
brlog.last_log = 'Started'
brlog.save()
branch.code_run_status = 'r'
branch.save()
return redirect("/admin/?success=true")
return redirect("/admin/?failed=true")
repos = Repo.objects.all()
branches = Branch.objects.all()
ctab = CronTab()
cronlist = []
for item in [entry.render() for entry in ctab]:
if item.find(CRON_COMMENT) != -1:
cronlist.append(item)
return render(request, 'admin_menu.html', {'repos': repos, 'branches': branches, 'crontab': cronlist})
def repo_form_action(request):
if request.method == 'POST' and request.POST.__contains__('delRepoButton'):
repolist = request.POST.getlist('repocb')
if repolist:
for repoid in repolist:
branches = Branch.objects.filter(repo__id=repoid)
for branch in branches:
removeCrontab(branch)
removeCGitEntry(branch)
r = Repo.objects.get(id=repoid)
brlist = Branch.objects.filter(repo=r)
for br in brlist:
branchCascadeDelete(br)
brlist.delete()
r.delete()
if request.method == 'POST' and request.POST.__contains__('addRepoButton'):
return redirect('/newrepo')
return adminMenu(request)
def branch_form_action(request):
if request.method == 'POST' and request.POST.__contains__('delBranchButton'):
branchlist = request.POST.getlist('branchcb')
if branchlist:
for branchid in branchlist:
br = Branch.objects.get(id=branchid)
removeCrontab(br)
removeCGitEntry(br)
branchCascadeDelete(br)
if request.method == 'POST' and request.POST.__contains__('addBranchButton'):
return redirect('/newbranch')
return adminMenu(request)
def branchCascadeDelete(br):
deplist = DeployableObject.objects.filter(branch=br)
for dep in deplist:
deltas = Delta.objects.filter(object=dep)
deltas.delete()
deplist.delete()
BranchLog.objects.filter(branch=br).delete()
br.delete()
def newrepo(request):
if request.method == 'POST':
form = RepoForm(request.POST)
if form.is_valid():
row = Repo()
cleaned_data = form.cleaned_data
row.name = cleaned_data.get('name')
row.location = os.path.join('/var/sfrepo', row.name, 'code')
row.save()
# form.save()
return adminMenu(request)
else:
form = RepoForm()
return render(request, 'editrepo.html', {'form': form, 'type': 'New', 'action': 'newrepo/'})
def editrepo(request, repo_id):
if request.method == 'POST':
form = RepoForm(request.POST)
if form.is_valid():
row = Repo.objects.get(id=repo_id)
cleaned_data = form.cleaned_data
row.name = cleaned_data.get('name')
#row.location = cleaned_data.get('location')
row.location = os.path.join('/var/sfrepo', row.name, 'code')
row.save()
return adminMenu(request)
else:
form = RepoForm(instance=Repo.objects.get(id=repo_id))
return render(request, 'editrepo.html', {'form': form, 'type': 'Edit', 'action': 'editrepo/' + repo_id})
| gpl-3.0 | -2,010,322,946,773,850,000 | 38.108656 | 127 | 0.572189 | false |
ElricleNecro/CalculServer | setup.py | 1 | 1267 | #! /usr/bin/env python3
# -*- coding:Utf8 -*-
#--------------------------------------------------------------------------------------------------------------
# All necessary import:
#--------------------------------------------------------------------------------------------------------------
import os, sys, glob
#from setuptools import find_packages
import setuptools as st
from distutils.core import setup
from distutils.command.install_data import install_data
packages = st.find_packages()
#--------------------------------------------------------------------------------------------------------------
# Call the setup function:
#--------------------------------------------------------------------------------------------------------------
setup(
name = 'CalculServer',
version = '0.1',
description = 'Python Module for analysis gadget simulation on two different computer.',
author = 'Guillaume Plum',
packages = packages,
cmdclass = {'install_data': install_data},
# data_files = [
# ('share/LibThese/animation-plugins', ["share/LibThese/animation-plugins/__init__.py"]), #glob.glob("share/LibThese/animation-plugins/*.py")),
# ],
scripts = [
'scripts/cs_notifier.py',
'scripts/cs_runner.py',
],
)
#vim:spelllang=
| lgpl-3.0 | 3,314,974,385,399,036,000 | 35.2 | 145 | 0.444357 | false |
lukasmonk/lucaschess | Code/GestorOpeningLines.py | 1 | 37899 | import time
import random
from Code import Gestor
from Code import Jugada
from Code import Books
from Code import ControlPosicion
from Code import TrListas
from Code.QT import QTUtil2
from Code.QT import Iconos
from Code.QT import QTVarios
from Code import Util
from Code import OpeningLines
from Code import XMotorRespuesta
from Code import Partida
from Code.Constantes import *
class GestorOpeningEngines(Gestor.Gestor):
def inicio(self, pathFichero):
self.tablero.saveVisual()
self.pathFichero = pathFichero
dbop = OpeningLines.Opening(pathFichero)
self.tablero.dbVisual_setFichero(dbop.nomFichero)
self.reinicio(dbop)
def reinicio(self, dbop):
self.dbop = dbop
self.dbop.open_cache_engines()
self.tipoJuego = kJugOpeningLines
self.level = self.dbop.getconfig("ENG_LEVEL", 0)
self.numengine = self.dbop.getconfig("ENG_ENGINE", 0)
self.trainingEngines = self.dbop.trainingEngines()
self.auto_analysis = self.trainingEngines.get("AUTO_ANALYSIS", True)
self.ask_movesdifferent = self.trainingEngines.get("ASK_MOVESDIFFERENT", False)
liTimes = self.trainingEngines.get("TIMES")
if not liTimes:
liTimes = [500, 1000, 2000, 4000, 8000]
liBooks = self.trainingEngines.get("BOOKS")
if not liBooks:
liBooks = ["", "", "", "", ""]
liEngines = self.trainingEngines["ENGINES"]
num_engines_base = len(liEngines)
liEnginesExt = self.trainingEngines.get("EXT_ENGINES", [])
num_engines = num_engines_base+len(liEnginesExt)
if self.numengine >= num_engines:
self.level += 1
self.numengine = 0
self.dbop.setconfig("ENG_LEVEL", self.level)
self.dbop.setconfig("ENG_ENGINE", 0)
num_levels = len(liTimes)
if self.level >= num_levels:
if QTUtil2.pregunta(self.pantalla, "%s.\n%s" % (_("Training finished"), _("Do you want to reinit?"))):
self.dbop.setconfig("ENG_LEVEL", 0)
self.dbop.setconfig("ENG_ENGINE", 0)
self.reinicio(dbop)
return
self.time = liTimes[self.level]
nombook = liBooks[self.level]
if nombook:
listaLibros = Books.ListaLibros()
listaLibros.recuperaVar(self.configuracion.ficheroBooks)
self.book = listaLibros.buscaLibro(nombook)
if self.book:
self.book.polyglot()
else:
self.book = None
if self.numengine < num_engines_base:
self.keyengine = liEngines[self.numengine]
else:
self.keyengine = "*" + liEnginesExt[self.numengine-num_engines_base-1]
self.plies_mandatory = self.trainingEngines["MANDATORY"]
self.plies_control = self.trainingEngines["CONTROL"]
self.plies_pendientes = self.plies_control
self.lost_points = self.trainingEngines["LOST_POINTS"]
self.siJugamosConBlancas = self.trainingEngines["COLOR"] == "WHITE"
self.siRivalConBlancas = not self.siJugamosConBlancas
self.siAprobado = False
rival = self.configuracion.buscaRivalExt(self.keyengine)
self.xrival = self.procesador.creaGestorMotor(rival, self.time, None)
self.xrival.siBlancas = self.siRivalConBlancas
juez = self.configuracion.buscaRival(self.trainingEngines["ENGINE_CONTROL"])
self.xjuez = self.procesador.creaGestorMotor(juez, int(self.trainingEngines["ENGINE_TIME"] * 1000), None)
self.xjuez.anulaMultiPV()
self.li_info = [
"<b>%s</b>: %d/%d - %s" % (_("Engine"), self.numengine+1, num_engines, self.xrival.nombre),
"<b>%s</b>: %d/%d - %0.1f\"" % (_("Level"), self.level + 1, num_levels, self.time / 1000.0),
]
self.dicFENm2 = self.trainingEngines["DICFENM2"]
self.siAyuda = False
self.tablero.dbVisual_setShowAllways(False)
self.ayudas = 9999 # Para que analice sin problemas
self.partida = Partida.Partida()
self.pantalla.ponToolBar((k_mainmenu, k_abandonar, k_reiniciar))
self.pantalla.activaJuego(True, False, siAyudas=False)
self.ponMensajero(self.mueveHumano)
self.ponPosicion(self.partida.ultPosicion)
self.mostrarIndicador(True)
self.quitaAyudas()
self.ponPiezasAbajo(self.siJugamosConBlancas)
self.pgnRefresh(True)
self.ponCapInfoPorDefecto()
self.estado = kJugando
self.ponPosicionDGT()
self.errores = 0
self.ini_time = time.time()
self.muestraInformacion()
self.siguienteJugada()
def siguienteJugada(self):
self.muestraInformacion()
if self.estado == kFinJuego:
return
self.estado = kJugando
self.siJuegaHumano = False
self.ponVista()
siBlancas = self.partida.ultPosicion.siBlancas
self.ponIndicador(siBlancas)
self.refresh()
siRival = siBlancas == self.siRivalConBlancas
if not self.runcontrol():
if siRival:
self.desactivaTodas()
if self.mueveRival():
self.siguienteJugada()
else:
self.activaColor(siBlancas)
self.siJuegaHumano = True
def mueveRival(self):
si_obligatorio = self.partida.numJugadas() <= self.plies_mandatory
si_pensar = True
fenM2 = self.partida.ultPosicion.fenM2()
moves = self.dicFENm2.get(fenM2, set())
if si_obligatorio:
nmoves = len(moves)
if nmoves == 0:
si_obligatorio = False
else:
move = self.dbop.get_cache_engines(self.keyengine, self.time, fenM2)
if move is None:
if self.book:
move_book = self.book.eligeJugadaTipo(self.partida.ultPosicion.fen(), "au")
if move_book in list(moves):
move = move_book
if move is None:
move = random.choice(list(moves))
self.dbop.set_cache_engines(self.keyengine, self.time, fenM2, move)
desde, hasta, coronacion = move[:2], move[2:4], move[4:]
si_pensar = False
if si_pensar:
move = None
if self.book:
move = self.book.eligeJugadaTipo(self.partida.ultPosicion.fen(), "mp")
if move is None:
move = self.dbop.get_cache_engines(self.keyengine, self.time, fenM2)
if move is None:
rmRival = self.xrival.juegaPartida(self.partida)
move = rmRival.movimiento()
self.dbop.set_cache_engines(self.keyengine, self.time, fenM2, move)
desde, hasta, coronacion = move[:2], move[2:4], move[4:]
if si_obligatorio:
if move not in moves:
move = list(moves)[0]
desde, hasta, coronacion = move[:2], move[2:4], move[4:]
siBien, mens, jg = Jugada.dameJugada(self.partida.ultPosicion, desde, hasta, coronacion)
if siBien:
self.partida.ultPosicion = jg.posicion
self.masJugada(jg, False)
self.movimientosPiezas(jg.liMovs, True)
self.error = ""
return True
else:
self.error = mens
return False
def mueveHumano(self, desde, hasta, coronacion=""):
jg = self.checkMueveHumano(desde, hasta, coronacion)
if not jg:
return False
fenM2 = self.partida.ultPosicion.fenM2()
moves = self.dicFENm2.get(fenM2, [])
nmoves = len(moves)
if nmoves > 0:
if jg.movimiento() not in moves:
for move in moves:
self.tablero.creaFlechaMulti(move, False)
self.tablero.creaFlechaMulti(jg.movimiento(), True)
if self.ask_movesdifferent:
mensaje = "%s\n%s" % (_("This is not the move in the opening lines"),
_("Do you want to go on with this move?"))
if not QTUtil2.pregunta(self.pantalla, mensaje):
self.ponFinJuego()
return True
else:
self.mensajeEnPGN(_("This is not the move in the opening lines, you must repeat the game"))
self.ponFinJuego()
return True
self.movimientosPiezas(jg.liMovs)
self.masJugada(jg, True)
self.siguienteJugada()
return True
def masJugada(self, jg, siNuestra):
fenM2 = jg.posicionBase.fenM2()
jg.es_linea = False
if fenM2 in self.dicFENm2:
if jg.movimiento() in self.dicFENm2[fenM2]:
jg.criticaDirecta = "!"
jg.es_linea = True
self.partida.append_jg(jg)
if self.partida.pendienteApertura:
self.partida.asignaApertura()
self.ponFlechaSC(jg.desde, jg.hasta)
self.beepExtendido(siNuestra)
self.pgnRefresh(self.partida.ultPosicion.siBlancas)
self.refresh()
self.ponPosicionDGT()
def muestraInformacion(self):
li = []
li.extend(self.li_info)
si_obligatorio = self.partida.numJugadas() < self.plies_mandatory
if si_obligatorio and self.estado != kFinJuego:
fenM2 = self.partida.ultPosicion.fenM2()
moves = self.dicFENm2.get(fenM2, [])
if len(moves) > 0:
li.append( "<b>%s</b>: %d/%d" % (_("Mandatory move"), self.partida.numJugadas()+1, self.plies_mandatory))
else:
si_obligatorio = False
if not si_obligatorio and self.estado != kFinJuego:
tm = self.plies_pendientes
if tm > 1 and self.partida.numJugadas() and not self.partida.jugada(-1).es_linea:
li.append("%s: %d" % (_("Moves until the control"), tm-1))
self.ponRotulo1("<br>".join(li))
def run_auto_analysis(self):
lista = []
for njg in range(self.partida.numJugadas()):
jg = self.partida.jugada(njg)
if jg.siBlancas() == self.siJugamosConBlancas:
fenM2 = jg.posicionBase.fenM2()
if fenM2 not in self.dicFENm2:
jg.njg = njg
lista.append(jg)
jg.fenM2 = fenM2
total = len(lista)
for pos, jg in enumerate(lista, 1):
if self.siCancelado():
break
self.ponteEnJugada(jg.njg)
self.mensEspera(siCancelar=True, masTitulo="%d/%d" % (pos, total))
nombre = self.xanalyzer.nombre
tiempo = self.xanalyzer.motorTiempoJugada
depth = self.xanalyzer.motorProfundidad
mrm = self.dbop.get_cache_engines(nombre, tiempo, jg.fenM2, depth)
ok = False
if mrm:
rm, pos = mrm.buscaRM(jg.movimiento())
if rm:
ok = True
if not ok:
mrm, pos = self.xanalyzer.analizaJugada(jg, self.xanalyzer.motorTiempoJugada, self.xanalyzer.motorProfundidad)
self.dbop.set_cache_engines(nombre, tiempo, jg.fenM2, mrm, depth)
jg.analisis = mrm, pos
self.pantalla.base.pgnRefresh()
def mensEspera(self, siFinal=False, siCancelar=False, masTitulo=None):
if siFinal:
if self.um:
self.um.final()
else:
if self.um is None:
self.um = QTUtil2.mensajeTemporal(self.pantalla, _("Analyzing"), 0, posicion="ad", siCancelar=True,
titCancelar=_("Cancel"))
if masTitulo:
self.um.rotulo( _("Analyzing") + " " + masTitulo )
self.um.me.activarCancelar(siCancelar)
def siCancelado(self):
si = self.um.cancelado()
if si:
self.um.final()
return si
def runcontrol(self):
puntosInicio, mateInicio = 0, 0
puntosFinal, mateFinal = 0, 0
numJugadas = self.partida.numJugadas()
if numJugadas == 0:
return False
self.um = None # controla unMomento
def aprobado():
mens = "<b><span style=\"color:green\">%s</span></b>" % _("Congratulations, goal achieved")
self.li_info.append("")
self.li_info.append(mens)
self.muestraInformacion()
self.dbop.setconfig("ENG_ENGINE", self.numengine + 1)
self.mensajeEnPGN(mens)
self.siAprobado = True
def suspendido():
mens = "<b><span style=\"color:red\">%s</span></b>" % _("You must repeat the game")
self.li_info.append("")
self.li_info.append(mens)
self.muestraInformacion()
self.mensajeEnPGN(mens)
def calculaJG(jg, siinicio):
fen = jg.posicionBase.fen() if siinicio else jg.posicion.fen()
nombre = self.xjuez.nombre
tiempo = self.xjuez.motorTiempoJugada
mrm = self.dbop.get_cache_engines(nombre, tiempo, fen)
if mrm is None:
self.mensEspera()
mrm = self.xjuez.analiza(fen)
self.dbop.set_cache_engines(nombre, tiempo, fen, mrm)
rm = mrm.mejorMov()
if (" w " in fen) == self.siJugamosConBlancas:
return rm.puntos, rm.mate
else:
return -rm.puntos, -rm.mate
siCalcularInicio = True
if self.partida.siTerminada():
self.ponFinJuego()
jg = self.partida.jugada(-1)
if jg.siJaqueMate:
if jg.siBlancas() == self.siJugamosConBlancas:
aprobado()
else:
suspendido()
self.ponFinJuego()
return True
puntosFinal, mateFinal = 0, 0
else:
jg = self.partida.jugada(-1)
if jg.es_linea:
self.plies_pendientes = self.plies_control
else:
self.plies_pendientes -= 1
if self.plies_pendientes > 0:
return False
# Si la ultima jugada es de la linea no se calcula nada
self.mensEspera()
puntosFinal, mateFinal = calculaJG(jg, False)
# Se marcan todas las jugadas que no siguen las lineas
# Y se busca la ultima del color del jugador
if siCalcularInicio:
jg_inicial = None
for njg in range(numJugadas):
jg = self.partida.jugada(njg)
fenM2 = jg.posicionBase.fenM2()
if fenM2 in self.dicFENm2:
moves = self.dicFENm2[fenM2]
if jg.movimiento() not in moves:
jg.criticaDirecta = "?!"
if jg_inicial is None:
jg_inicial = jg
elif jg_inicial is None:
jg_inicial = jg
if jg_inicial:
puntosInicio, mateInicio = calculaJG(jg_inicial, True)
else:
puntosInicio, mateInicio = 0, 0
self.li_info.append("<b>%s:</b>" %_("Score"))
template = " <b>%s</b>: %d"
def appendInfo(label, puntos, mate):
mens = template % (label, puntos)
if mate:
mens += " %s %d" % (_("Mate"), mate)
self.li_info.append(mens)
appendInfo(_("Start"), puntosInicio, mateInicio)
appendInfo(_("End"), puntosFinal, mateFinal)
perdidos = (puntosInicio-puntosFinal)
ok = perdidos < self.lost_points
if mateInicio or mateFinal:
ok = mateFinal > mateInicio
mens = template % ("(%d)-(%d)" %(puntosInicio, puntosFinal), perdidos)
mens = "%s %s %d" %(mens, "<" if ok else ">", self.lost_points)
self.li_info.append(mens)
if not ok:
if self.auto_analysis:
self.run_auto_analysis()
self.mensEspera(siFinal=True)
suspendido()
else:
self.mensEspera(siFinal=True)
aprobado()
self.ponFinJuego()
return True
def procesarAccion(self, clave):
if clave == k_mainmenu:
self.finPartida()
elif clave in (k_reiniciar, k_siguiente):
self.reiniciar()
elif clave == k_peliculaRepetir:
self.dbop.setconfig("ENG_ENGINE", self.numengine)
self.reiniciar()
elif clave == k_abandonar:
self.ponFinJuego()
elif clave == k_configurar:
self.configurar(siSonidos=True)
elif clave == k_utilidades:
liMasOpciones = []
liMasOpciones.append(("libros", _("Consult a book"), Iconos.Libros()))
liMasOpciones.append((None, None, None))
liMasOpciones.append((None, _("Options"), Iconos.Opciones()))
mens = _("cancel") if self.auto_analysis else _("activate")
liMasOpciones.append(("auto_analysis", "%s: %s" % (_("Automatic analysis"), mens), Iconos.Analizar()))
liMasOpciones.append((None, None, None))
mens = _("cancel") if self.ask_movesdifferent else _("activate")
liMasOpciones.append(("ask_movesdifferent", "%s: %s" % (_("Ask when the moves are different from the line"), mens), Iconos.Pelicula_Seguir()))
liMasOpciones.append((None, None, True)) # Para salir del submenu
liMasOpciones.append((None, None, None))
liMasOpciones.append(("run_analysis", _("Specific analysis"), Iconos.Analizar()))
liMasOpciones.append((None, None, None))
liMasOpciones.append(("add_line", _("Add this line"), Iconos.OpeningLines()))
resp = self.utilidades(liMasOpciones)
if resp == "libros":
self.librosConsulta(False)
elif resp == "add_line":
numJugadas, nj, fila, siBlancas = self.jugadaActual()
partida = self.partida
if numJugadas != nj+1:
menu = QTVarios.LCMenu(self.pantalla)
menu.opcion("all", _("Add all moves"), Iconos.PuntoAzul())
menu.separador()
menu.opcion("parcial", _("Add until current move"), Iconos.PuntoVerde())
resp = menu.lanza()
if resp is None:
return
if resp == "parcial":
partida = self.partida.copia(nj)
self.dbop.append(partida)
self.dbop.updateTrainingEngines()
QTUtil2.mensaje(self.pantalla, _("Done"))
elif resp == "auto_analysis":
self.auto_analysis = not self.auto_analysis
self.trainingEngines["AUTO_ANALYSIS"] = self.auto_analysis
self.dbop.setTrainingEngines(self.trainingEngines)
elif resp == "ask_movesdifferent":
self.ask_movesdifferent = not self.ask_movesdifferent
self.trainingEngines["ASK_MOVESDIFFERENT"] = self.ask_movesdifferent
self.dbop.setTrainingEngines(self.trainingEngines)
elif resp == "run_analysis":
self.um = None
self.mensEspera()
self.run_auto_analysis()
self.mensEspera(siFinal=True)
else:
Gestor.Gestor.rutinaAccionDef(self, clave)
def finalX(self):
return self.finPartida()
def finPartida(self):
self.dbop.close()
self.tablero.restoreVisual()
self.procesador.inicio()
self.procesador.openings()
return False
def reiniciar(self):
self.reinicio(self.dbop)
def ponFinJuego(self):
self.estado = kFinJuego
self.desactivaTodas()
liOpciones = [k_mainmenu]
if self.siAprobado:
liOpciones.append(k_siguiente)
liOpciones.append(k_peliculaRepetir)
else:
liOpciones.append(k_reiniciar)
liOpciones.append(k_configurar)
liOpciones.append(k_utilidades)
self.pantalla.ponToolBar(liOpciones)
class GestorOpeningLines(Gestor.Gestor):
def inicio(self, pathFichero, modo, num_linea):
self.tablero.saveVisual()
self.pathFichero = pathFichero
dbop = OpeningLines.Opening(pathFichero)
self.tablero.dbVisual_setFichero(dbop.nomFichero)
self.reinicio(dbop, modo, num_linea)
def reinicio(self, dbop, modo, num_linea):
self.dbop = dbop
self.tipoJuego = kJugOpeningLines
self.modo = modo
self.num_linea = num_linea
self.training = self.dbop.training()
self.liGames = self.training["LIGAMES_%s" % modo.upper()]
self.game = self.liGames[num_linea]
self.liPV = self.game["LIPV"]
self.numPV = len(self.liPV)
self.calc_totalTiempo()
self.dicFENm2 = self.training["DICFENM2"]
li = self.dbop.getNumLinesPV(self.liPV)
if len(li) > 10:
mensLines = ",".join(["%d"%line for line in li[:10]]) + ", ..."
else:
mensLines = ",".join(["%d"%line for line in li])
self.liMensBasic = [
"%d/%d" % (self.num_linea+1, len(self.liGames)),
"%s: %s" % (_("Lines"), mensLines),
]
self.siAyuda = False
self.tablero.dbVisual_setShowAllways(False)
self.partida = Partida.Partida()
self.ayudas = 9999 # Para que analice sin problemas
self.siJugamosConBlancas = self.training["COLOR"] == "WHITE"
self.siRivalConBlancas = not self.siJugamosConBlancas
self.pantalla.ponToolBar((k_mainmenu, k_ayuda, k_reiniciar))
self.pantalla.activaJuego(True, False, siAyudas=False)
self.ponMensajero(self.mueveHumano)
self.ponPosicion(self.partida.ultPosicion)
self.mostrarIndicador(True)
self.quitaAyudas()
self.ponPiezasAbajo(self.siJugamosConBlancas)
self.pgnRefresh(True)
self.ponCapInfoPorDefecto()
self.estado = kJugando
self.ponPosicionDGT()
self.errores = 0
self.ini_time = time.time()
self.muestraInformacion()
self.siguienteJugada()
def calc_totalTiempo(self):
self.tm = 0
for game in self.liGames:
for tr in game["TRIES"]:
self.tm += tr["TIME"]
def ayuda(self):
self.siAyuda = True
self.pantalla.ponToolBar((k_mainmenu, k_reiniciar, k_configurar, k_utilidades))
self.tablero.dbVisual_setShowAllways(True)
self.muestraAyuda()
self.muestraInformacion()
def muestraInformacion(self):
li = []
li.append("%s: %d" %(_("Errors"), self.errores))
if self.siAyuda:
li.append(_("Help activated"))
self.ponRotulo1("\n".join(li))
tgm = 0
for tr in self.game["TRIES"]:
tgm += tr["TIME"]
mens = "\n" + "\n".join(self.liMensBasic)
mens += "\n%s:\n %s %s\n %s %s" % (_("Working time"),
time.strftime("%H:%M:%S", time.gmtime(tgm)), _("Current"),
time.strftime("%H:%M:%S", time.gmtime(self.tm)), _("Total"))
self.ponRotulo2(mens)
if self.siAyuda:
dicNAGs = TrListas.dicNAGs()
mens3 = ""
fenM2 = self.partida.ultPosicion.fenM2()
reg = self.dbop.getfenvalue(fenM2)
if reg:
mens3 = reg.get("COMENTARIO", "")
ventaja = reg.get("VENTAJA", 0)
valoracion = reg.get("VALORACION", 0)
if ventaja:
mens3 += "\n %s" % dicNAGs[ventaja]
if valoracion:
mens3 += "\n %s" % dicNAGs[valoracion]
self.ponRotulo3(mens3 if mens3 else None)
def partidaTerminada(self, siCompleta):
self.estado = kFinJuego
tm = time.time() - self.ini_time
li = [_("Line finished.")]
if self.siAyuda:
li.append(_("Help activated"))
if self.errores > 0:
li.append("%s: %d" % (_("Errors"), self.errores))
if siCompleta:
mensaje = "\n".join(li)
self.mensajeEnPGN(mensaje)
dictry = {
"DATE": Util.hoy(),
"TIME": tm,
"AYUDA": self.siAyuda,
"ERRORS": self.errores
}
self.game["TRIES"].append(dictry)
sinError = self.errores == 0 and not self.siAyuda
if siCompleta:
if sinError:
self.game["NOERROR"] += 1
noError = self.game["NOERROR"]
if self.modo == "sequential":
salto = 2**(noError + 1)
numGames = len(self.liGames)
for x in range(salto, numGames):
game = self.liGames[x]
if game["NOERROR"] != noError:
salto = x
break
liNuevo = self.liGames[1:salto]
liNuevo.append(self.game)
if numGames > salto:
liNuevo.extend(self.liGames[salto:])
self.training["LIGAMES_SEQUENTIAL"] = liNuevo
self.pantalla.ponToolBar((k_mainmenu, k_siguiente))
else:
self.pantalla.ponToolBar((k_mainmenu, k_reiniciar, k_configurar, k_utilidades))
else:
self.game["NOERROR"] -= 1
self.pantalla.ponToolBar((k_mainmenu, k_reiniciar, k_configurar, k_utilidades))
else:
if not sinError:
self.game["NOERROR"] -= 1
self.game["NOERROR"] = max(0, self.game["NOERROR"])
self.dbop.setTraining(self.training)
self.estado = kFinJuego
self.calc_totalTiempo()
self.muestraInformacion()
def muestraAyuda(self):
pv = self.liPV[len(self.partida)]
self.tablero.creaFlechaMov(pv[:2], pv[2:4], "mt80")
fenM2 = self.partida.ultPosicion.fenM2()
for pv1 in self.dicFENm2[fenM2]:
if pv1 != pv:
self.tablero.creaFlechaMov(pv1[:2], pv1[2:4], "ms40")
def procesarAccion(self, clave):
if clave == k_mainmenu:
self.finPartida()
elif clave == k_reiniciar:
self.reiniciar()
elif clave == k_configurar:
self.configurar(siSonidos=True)
elif clave == k_utilidades:
self.utilidades()
elif clave == k_siguiente:
self.reinicio(self.dbop, self.modo, self.num_linea)
elif clave == k_ayuda:
self.ayuda()
else:
Gestor.Gestor.rutinaAccionDef(self, clave)
def finalX(self):
return self.finPartida()
def finPartida(self):
self.dbop.close()
self.tablero.restoreVisual()
self.procesador.inicio()
if self.modo == "static":
self.procesador.openingsTrainingStatic(self.pathFichero)
else:
self.procesador.openings()
return False
def reiniciar(self):
if len(self.partida) > 0 and self.estado != kFinJuego:
self.partidaTerminada(False)
self.reinicio(self.dbop, self.modo, self.num_linea)
def siguienteJugada(self):
self.muestraInformacion()
if self.estado == kFinJuego:
return
self.estado = kJugando
self.siJuegaHumano = False
self.ponVista()
siBlancas = self.partida.ultPosicion.siBlancas
self.ponIndicador(siBlancas)
self.refresh()
siRival = siBlancas == self.siRivalConBlancas
numJugadas = len(self.partida)
if numJugadas >= self.numPV:
self.partidaTerminada(True)
return
pv = self.liPV[numJugadas]
if siRival:
self.desactivaTodas()
self.rmRival = XMotorRespuesta.RespuestaMotor("Apertura", self.siRivalConBlancas)
self.rmRival.desde = pv[:2]
self.rmRival.hasta = pv[2:4]
self.rmRival.coronacion = pv[4:]
self.mueveRival(self.rmRival)
self.siguienteJugada()
else:
self.activaColor(siBlancas)
self.siJuegaHumano = True
if self.siAyuda:
self.muestraAyuda()
def mueveHumano(self, desde, hasta, coronacion=""):
jg = self.checkMueveHumano(desde, hasta, coronacion)
if not jg:
return False
pvSel = desde + hasta + coronacion
pvObj = self.liPV[len(self.partida)]
if pvSel != pvObj:
fenM2 = jg.posicionBase.fenM2()
li = self.dicFENm2.get(fenM2, [])
if pvSel in li:
mens = _("You have selected a correct move, but this line uses another one.")
QTUtil2.mensajeTemporal(self.pantalla, mens, 2, posicion="tb", background="#C3D6E8")
self.sigueHumano()
return False
self.errores += 1
mens = "%s: %d" % (_("Error"), self.errores)
QTUtil2.mensajeTemporal(self.pantalla, mens, 1.2, posicion="ad", background="#FF9B00", pmImagen=Iconos.pmError())
self.muestraInformacion()
self.sigueHumano()
return False
self.movimientosPiezas(jg.liMovs)
self.masJugada(jg, True)
self.siguienteJugada()
return True
def masJugada(self, jg, siNuestra):
self.partida.append_jg(jg)
if self.partida.pendienteApertura:
self.partida.asignaApertura()
self.ponFlechaSC(jg.desde, jg.hasta)
self.beepExtendido(siNuestra)
self.pgnRefresh(self.partida.ultPosicion.siBlancas)
self.refresh()
self.ponPosicionDGT()
def mueveRival(self, respMotor):
desde = respMotor.desde
hasta = respMotor.hasta
coronacion = respMotor.coronacion
siBien, mens, jg = Jugada.dameJugada(self.partida.ultPosicion, desde, hasta, coronacion)
if siBien:
self.partida.ultPosicion = jg.posicion
self.masJugada(jg, False)
self.movimientosPiezas(jg.liMovs, True)
self.error = ""
return True
else:
self.error = mens
return False
class GestorOpeningLinesPositions(Gestor.Gestor):
def inicio(self, pathFichero):
self.pathFichero = pathFichero
dbop = OpeningLines.Opening(pathFichero)
self.reinicio(dbop)
def reinicio(self, dbop):
self.dbop = dbop
self.tipoJuego = kJugOpeningLines
self.training = self.dbop.training()
self.liTrainPositions = self.training["LITRAINPOSITIONS"]
self.trposition = self.liTrainPositions[0]
self.tm = 0
for game in self.liTrainPositions:
for tr in game["TRIES"]:
self.tm += tr["TIME"]
self.liMensBasic = [
"%s: %d" % (_("Moves"), len(self.liTrainPositions)),
]
self.siAyuda = False
self.siSaltoAutomatico = True
cp = ControlPosicion.ControlPosicion()
cp.leeFen(self.trposition["FENM2"] + " 0 1")
self.partida = Partida.Partida(iniPosicion=cp)
self.ayudas = 9999 # Para que analice sin problemas
self.siJugamosConBlancas = self.training["COLOR"] == "WHITE"
self.siRivalConBlancas = not self.siJugamosConBlancas
self.pantalla.ponToolBar((k_mainmenu, k_ayuda, k_configurar))
self.pantalla.activaJuego(True, False, siAyudas=False)
self.ponMensajero(self.mueveHumano)
self.ponPosicion(cp)
self.mostrarIndicador(True)
self.quitaAyudas()
self.ponPiezasAbajo(self.siJugamosConBlancas)
self.pgnRefresh(True)
self.ponCapInfoPorDefecto()
self.estado = kJugando
self.ponPosicionDGT()
self.quitaInformacion()
self.errores = 0
self.ini_time = time.time()
self.muestraInformacion()
self.siguienteJugada()
def ayuda(self):
self.siAyuda = True
self.pantalla.ponToolBar((k_mainmenu, k_configurar))
self.muestraAyuda()
self.muestraInformacion()
def muestraInformacion(self):
li = []
li.append("%s: %d" %(_("Errors"), self.errores))
if self.siAyuda:
li.append(_("Help activated"))
self.ponRotulo1("\n".join(li))
tgm = 0
for tr in self.trposition["TRIES"]:
tgm += tr["TIME"]
mas = time.time() - self.ini_time
mens = "\n" + "\n".join(self.liMensBasic)
mens += "\n%s:\n %s %s\n %s %s" % (_("Working time"),
time.strftime("%H:%M:%S", time.gmtime(tgm+mas)), _("Current"),
time.strftime("%H:%M:%S", time.gmtime(self.tm+mas)), _("Total"))
self.ponRotulo2(mens)
def posicionTerminada(self):
tm = time.time() - self.ini_time
siSalta = self.siSaltoAutomatico and self.errores == 0 and self.siAyuda == False
if not siSalta:
li = [_("Finished.")]
if self.siAyuda:
li.append(_("Help activated"))
if self.errores > 0:
li.append("%s: %d" % (_("Errors"), self.errores))
QTUtil2.mensajeTemporal(self.pantalla, "\n".join(li), 1.2)
dictry = {
"DATE": Util.hoy(),
"TIME": tm,
"AYUDA": self.siAyuda,
"ERRORS": self.errores
}
self.trposition["TRIES"].append(dictry)
sinError = self.errores == 0 and not self.siAyuda
if sinError:
self.trposition["NOERROR"] += 1
else:
self.trposition["NOERROR"] = max(0, self.trposition["NOERROR"]-1)
noError = self.trposition["NOERROR"]
salto = 2**(noError + 1) + 1
numPosics = len(self.liTrainPositions)
for x in range(salto, numPosics):
posic = self.liTrainPositions[x]
if posic["NOERROR"] != noError:
salto = x
break
liNuevo = self.liTrainPositions[1:salto]
liNuevo.append(self.trposition)
if numPosics > salto:
liNuevo.extend(self.liTrainPositions[salto:])
self.training["LITRAINPOSITIONS"] = liNuevo
self.pantalla.ponToolBar((k_mainmenu, k_siguiente, k_configurar))
self.dbop.setTraining(self.training)
self.estado = kFinJuego
self.muestraInformacion()
if siSalta:
self.reinicio(self.dbop)
def muestraAyuda(self):
liMoves = self.trposition["MOVES"]
for pv in liMoves:
self.tablero.creaFlechaMov(pv[:2], pv[2:4], "mt80")
def procesarAccion(self, clave):
if clave == k_mainmenu:
self.finPartida()
elif clave == k_configurar:
base = _("What to do after solving")
if self.siSaltoAutomatico:
liMasOpciones = [("lmo_stop", "%s: %s" % (base, _("Stop")), Iconos.PuntoRojo())]
else:
liMasOpciones = [("lmo_jump", "%s: %s" % (base, _("Jump to the next")), Iconos.PuntoVerde())]
resp = self.configurar(siSonidos=True, siCambioTutor=False, liMasOpciones=liMasOpciones)
if resp in ("lmo_stop", "lmo_jump"):
self.siSaltoAutomatico = resp == "lmo_jump"
elif clave == k_utilidades:
self.utilidades()
elif clave == k_siguiente:
self.reinicio(self.dbop)
elif clave == k_ayuda:
self.ayuda()
else:
Gestor.Gestor.rutinaAccionDef(self, clave)
def finalX(self):
return self.finPartida()
def finPartida(self):
self.dbop.close()
self.procesador.inicio()
self.procesador.openings()
return False
def siguienteJugada(self):
self.muestraInformacion()
if self.estado == kFinJuego:
return
self.estado = kJugando
self.siJuegaHumano = False
self.ponVista()
siBlancas = self.partida.ultPosicion.siBlancas
self.ponIndicador(siBlancas)
self.refresh()
self.activaColor(siBlancas)
self.siJuegaHumano = True
if self.siAyuda:
self.muestraAyuda()
def mueveHumano(self, desde, hasta, coronacion=""):
jg = self.checkMueveHumano(desde, hasta, coronacion)
if not jg:
return False
pvSel = desde + hasta + coronacion
lipvObj = self.trposition["MOVES"]
if pvSel not in lipvObj:
self.errores += 1
mens = "%s: %d" % (_("Error"), self.errores)
QTUtil2.mensajeTemporal(self.pantalla, mens, 2, posicion="ad", background="#FF9B00")
self.muestraInformacion()
self.sigueHumano()
return False
self.movimientosPiezas(jg.liMovs)
self.masJugada(jg, True)
self.posicionTerminada()
return True
def masJugada(self, jg, siNuestra):
self.partida.append_jg(jg)
if self.partida.pendienteApertura:
self.partida.asignaApertura()
self.ponFlechaSC(jg.desde, jg.hasta)
self.beepExtendido(siNuestra)
self.pgnRefresh(self.partida.ultPosicion.siBlancas)
self.refresh()
self.ponPosicionDGT()
| gpl-2.0 | 7,077,392,566,974,328,000 | 33.737855 | 154 | 0.555107 | false |
threerings/splatd | splat/helpers/test/test_homeutils.py | 1 | 3306 | #!/usr/bin/env python
# test_homeutils.py vi:ts=4:sw=4:expandtab:
#
# Scalable Periodic LDAP Attribute Transmogrifier
# Author:
# Nick Barkas <[email protected]>
#
# Copyright (c) 2007 Three Rings Design, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright owner nor the names of contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
""" LDAP Unit Tests """
from twisted.trial import unittest
import ldap
import os
import splat
#from splat import plugin
from splat.ldaputils.test import slapd
from splat.ldaputils import client as ldapclient
from splat.helpers import homeutils
# Useful Constants
from splat.test import DATA_DIR
# Test Cases
class HomeUtilstestCase(unittest.TestCase):
""" Test Splat Home Directory Library """
def setUp(self):
self.slapd = slapd.LDAPServer()
self.conn = ldapclient.Connection(slapd.SLAPD_URI)
self.entry = self.conn.search(slapd.BASEDN, ldap.SCOPE_SUBTREE, '(uid=john)')[0]
def tearDown(self):
self.slapd.stop()
def test_valid_attributes(self):
""" Test getLDAPAttributes() for Valid Entry """
(home, uid, gid) = homeutils.getLDAPAttributes(self.entry, '/home', 10000, 10000)
self.assertEquals(('/home/john', 10001, 10001), (home, uid, gid))
def test_invalid_uid(self):
""" Test getLDAPAttributes() for Entry with UID Lower than Minimum """
self.assertRaises(splat.SplatError, homeutils.getLDAPAttributes, self.entry, '/home', 20000, 10000)
def test_invalid_gid(self):
""" Test getLDAPAttributes() for Entry with GID Lower than Minimum """
self.assertRaises(splat.SplatError, homeutils.getLDAPAttributes, self.entry, '/home', 10000, 20000)
def test_invalid_home(self):
""" Test getLDAPAttributes() for Entry with Invalid Home Directory """
self.assertRaises(splat.SplatError, homeutils.getLDAPAttributes, self.entry, '/tmp', 10000, 10000)
| bsd-3-clause | 8,026,593,776,843,824,000 | 41.384615 | 107 | 0.73775 | false |
Null01/detect-polygons-from-image | src/plot_edge_filter.py | 1 | 2379 | """
==============
Edge operators
==============
Edge operators are used in image processing within edge detection algorithms.
They are discrete differentiation operators, computing an approximation of the
gradient of the image intensity function.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage.data import camera
from skimage.filters import roberts, sobel, scharr, prewitt
from scipy import misc
from skimage import color
from skimage import measure
from skimage.measure import find_contours, approximate_polygon
#image = camera()
file_tile = "tile_colorizer_05.png"
fimg = misc.imread("../web/img/"+file_tile)
image = color.colorconv.rgb2grey(fimg)
contours = measure.find_contours(image, 0.75)
coords = approximate_polygon(contours[0], tolerance=0.02)
edge_roberts = roberts(image)
print edge_roberts
edge_sobel = sobel(image)
fig, ax = plt.subplots(ncols=2, sharex=True, sharey=True,
figsize=(8, 4))
ax[0].imshow(edge_roberts, cmap=plt.cm.gray)
ax[0].set_title('Roberts Edge Detection')
ax[1].imshow(edge_sobel, cmap=plt.cm.gray)
ax[1].set_title('Sobel Edge Detection')
for a in ax:
a.axis('off')
plt.tight_layout()
plt.show()
######################################################################
# Different operators compute different finite-difference approximations of
# the gradient. For example, the Scharr filter results in a less rotational
# variance than the Sobel filter that is in turn better than the Prewitt
# filter [1]_ [2]_ [3]_. The difference between the Prewitt and Sobel filters
# and the Scharr filter is illustrated below with an image that is the
# discretization of a rotation- invariant continuous function. The
# discrepancy between the Prewitt and Sobel filters, and the Scharr filter is
# stronger for regions of the image where the direction of the gradient is
# close to diagonal, and for regions with high spatial frequencies. For the
# example image the differences between the filter results are very small and
# the filter results are visually almost indistinguishable.
#
# .. [1] https://en.wikipedia.org/wiki/Sobel_operator#Alternative_operators
#
# .. [2] B. Jaehne, H. Scharr, and S. Koerkel. Principles of filter design.
# In Handbook of Computer Vision and Applications. Academic Press,
# 1999.
#
# .. [3] https://en.wikipedia.org/wiki/Prewitt_operator
| gpl-3.0 | 8,243,494,613,554,497,000 | 31.589041 | 78 | 0.720891 | false |
seecr/weightless-core | weightless/core/__init__.py | 1 | 3794 | ## begin license ##
#
# "Weightless" is a High Performance Asynchronous Networking Library. See http://weightless.io
#
# Copyright (C) 2006-2011 Seek You Too (CQ2) http://www.cq2.nl
# Copyright (C) 2011-2012, 2015, 2020-2021 Seecr (Seek You Too B.V.) https://seecr.nl
#
# This file is part of "Weightless"
#
# "Weightless" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Weightless" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Weightless"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
VERSION='$Version: x.y.z$'[9:-1].strip() # Modified by package scripts
from functools import wraps
from types import GeneratorType, FunctionType
from os.path import dirname, abspath, isdir, join #DO_NOT_DISTRIBUTE
from sys import version_info #DO_NOT_DISTRIBUTE
pycmd = "python%s.%s" % version_info[:2] #DO_NOT_DISTRIBUTE
_mydir = abspath(dirname(__file__)) #DO_NOT_DISTRIBUTE
_projectdir = dirname(dirname(_mydir)) #DO_NOT_DISTRIBUTE
if isdir(join(_mydir, '.svn')) or isdir(join(_projectdir, '.git')): #DO_NOT_DISTRIBUTE
from os import system #DO_NOT_DISTRIBUTE
status = system( #DO_NOT_DISTRIBUTE
"cd %s/../..; %s setup.py build_ext --inplace" #DO_NOT_DISTRIBUTE
% (abspath(dirname(__file__)), pycmd)) #DO_NOT_DISTRIBUTE
if status > 0: #DO_NOT_DISTRIBUTE
import sys #DO_NOT_DISTRIBUTE
sys.exit(status) #DO_NOT_DISTRIBUTE
import platform
if hasattr(platform, 'python_implementation'):
cpython = platform.python_implementation() == "CPython"
elif hasattr(platform, 'system'):
cpython = platform.system() != "Java"
else:
cpython = False
try:
from os import getenv
if getenv('WEIGHTLESS_COMPOSE_TEST') == 'PYTHON':
raise ImportError('Python compose for testing purposes')
from .ext import compose as _compose, local, tostring, Yield, is_generator, DeclineMessage
cextension = True
ComposeType = _compose
except ImportError as e:
from warnings import warn
warn("Using Python version of compose(), local() and tostring()", stacklevel=2)
def is_generator(o):
return type(o) is GeneratorType
class DeclineMessage(Exception):
pass
from ._compose_py import compose as _compose, Yield
from ._local_py import local
from ._tostring_py import tostring
cextension = False
ComposeType = GeneratorType
def compose(X, *args, **kwargs):
if type(X) == FunctionType: # compose used as decorator
@wraps(X)
def helper(*args, **kwargs):
return _compose(X(*args, **kwargs))
return helper
elif is_generator(X):
return _compose(X, *args, **kwargs)
raise TypeError("compose() expects generator, got %s" % repr(X))
#from compose import compose, local, tostring, Yield
from .utils import identify, autostart, retval, consume, asList, asString, asBytes, return_
from ._observable import Observable, Transparent, be, methodOrMethodPartialStr, NoneOfTheObserversRespond
| gpl-2.0 | -8,486,436,413,945,769,000 | 43.116279 | 105 | 0.644439 | false |
networkjanitor/faeriawikibot | gamepedia_rw_pages.py | 1 | 13081 | import configparser
import os
import sys
import gamepedia_client
class GamepediaPagesRW:
gc = None
'''
Create new instance of GamepediaClient (required for name attribution)
'''
def create_gamepedia_client(self, username=None, password=None):
global cfg_file
if username is None:
username = cfg_file['account']['username']
if password is None:
password = cfg_file['account']['password']
self.gc = gamepedia_client.GamepediaClient(username=username, password=password)
'''
Download and save page.
'''
def download(self, path, page):
if self.gc is None:
self.create_gamepedia_client()
res = self.gc.read(page)
with open(path, 'w') as f:
f.write(res)
'''
Write text from local file to page
'''
def upload(self, path, page):
if self.gc is None:
self.create_gamepedia_client()
with open(path, 'r') as f:
res = f.read()
self.gc.write(page, res)
'''
Backup selection of pages
'''
def backup(self):
self.backup_galleries_cards()
'''
Archivate selection of pages
'''
def archivate(self):
self.download('setup/Template/Card_stats', 'Template:Card_stats')
self.download('setup/Template/Cardlist', 'Template:Cardlist')
self.download('setup/Template/Card_nav', 'Template:Card_nav')
self.download('setup/Template/Codexcontentlist', 'Template:Codexcontentlist')
self.download('setup/Lore/The_world', 'The_world')
self.download('setup/Lore/Factions', 'Factions')
self.download('setup/Lore/The_player,_the_orbs,_the_memoria', 'The_player,_the_orbs,_the_memoria')
self.download('setup/Lore/The_Faëria', 'The_Faëria')
self.download('setup/Template/Lake', 'Template:Lake')
self.download('setup/Template/Mountain', 'Template:Mountain')
self.download('setup/Template/Forest', 'Template:Forest')
self.download('setup/Template/Desert', 'Template:Desert')
self.download('setup/Template/Dpl_lake', 'Template:dpl_lake')
self.download('setup/Template/Dpl_mountain', 'Template:dpl_mountain')
self.download('setup/Template/Dpl_forest', 'Template:dpl_forest')
self.download('setup/Template/Dpl_desert', 'Template:dpl_desert')
self.download('setup/Template/Dpl_life', 'Template:Lif')
self.download('setup/Template/Dpl_power', 'Template:Pow')
self.download('setup/Template/Dpl_name', 'Template:dpl_name')
self.download('setup/Template/Dpl_display', 'Template:dpl_display')
self.download('setup/Template/Rarity', 'Template:Rarity')
self.download('setup/Template/Common', 'Template:Common')
self.download('setup/Template/Rare', 'Template:Rare')
self.download('setup/Template/Epic', 'Template:Epic')
self.download('setup/Template/Legendary', 'Template:Legendary')
self.download('setup/List/List_of_Cards', 'List_of_Cards')
self.download('setup/List/List_of_Blue_cards', 'List_of_Blue_cards')
self.download('setup/List/List_of_Green_cards', 'List_of_Green_cards')
self.download('setup/List/List_of_Red_cards', 'List_of_Red_cards')
self.download('setup/List/List_of_Yellow_cards', 'List_of_Yellow_cards')
self.download('setup/List/List_of_Human_cards', 'List_of_Human_cards')
self.download('setup/List/List_of_Common_cards', 'List_of_Common_cards')
self.download('setup/List/List_of_Rare_cards', 'List_of_Rare_cards')
self.download('setup/List/List_of_Epic_cards', 'List_of_Epic_cards')
self.download('setup/List/List_of_Legendary_cards', 'List_of_Legendary_cards')
self.download('setup/List/List_of_Creature_cards', 'List_of_Creature_cards')
self.download('setup/List/List_of_Structure_cards', 'List_of_Structure_cards')
self.download('setup/List/List_of_Event_cards', 'List_of_Event_Cards')
self.download('setup/List/List_of_Charge_X_cards', 'List_of_Charge_X_cards')
self.download('setup/List/List_of_Faeria_X_cards', 'List_of_Faeria_X_cards')
self.download('setup/List/List_of_Options_cards', 'List_of_Options_cards')
self.download('setup/List/List_of_Ranged_cards', 'List_of_Ranged_cards')
self.download('setup/List/List_of_Production_cards', 'List_of_Production_cards')
self.download('setup/List/List_of_Combat_cards', 'List_of_Combat_cards')
self.download('setup/List/List_of_Protection_cards', 'List_of_Protection_cards')
self.download('setup/List/List_of_Taunt_cards', 'List_of_Taund_cards')
self.download('setup/List/List_of_Haste_cards', 'List_of_Haste_cards')
self.download('setup/List/List_of_Last_Words_cards', 'List_of_Last_Words_cards')
self.download('setup/List/List_of_Deathtouch_cards', 'List_of_Deathtouch_cards')
self.download('setup/List/List_of_Flying_cards', 'List_of_Flying_cards')
self.download('setup/List/List_of_Jump_cards', 'List_of_Jump_cards')
self.download('setup/List/List_of_Aquatic_cards', 'List_of_Aquatic_cards')
self.download('setup/List/List_of_Activate_cards', 'List_of_Activate_cards')
self.download('setup/List/List_of_Gift_cards', 'List_of_Gift_cards')
self.download('setup/Cards/By Color/Human', 'Human')
self.download('setup/Cards/By Color/Blue', 'Blue')
self.download('setup/Cards/By Color/Green', 'Green')
self.download('setup/Cards/By Color/Red', 'Red')
self.download('setup/Cards/By Color/Yellow', 'Yellow')
self.download('setup/Cards/By Type/Creature', 'Creature')
self.download('setup/Cards/By Type/Event', 'Event')
self.download('setup/Cards/By Type/Structure', 'Structure')
self.download('setup/Cards/By Rarity/Common', 'Common')
self.download('setup/Cards/By Rarity/Rare', 'Rare')
self.download('setup/Cards/By Rarity/Epic', 'Epic')
self.download('setup/Cards/By Rarity/Legendary', 'Legendary')
self.download('setup/Gallery/Gallery_of_Blue_cards', 'Gallery_of_Blue_cards')
self.download('setup/Gallery/Gallery_of_Green_cards', 'Gallery_of_Green_cards')
self.download('setup/Gallery/Gallery_of_Human_cards', 'Gallery_of_Human_cards')
self.download('setup/Gallery/Gallery_of_Red_cards', 'Gallery_of_Red_cards')
self.download('setup/Gallery/Gallery_of_Yellow_cards', 'Gallery_of_Yellow_cards')
'''
Restore selection of default pages
'''
def restore(self):
self.restore_cards_by()
self.restore_galleries_cards()
'''
Restore Cards By-X
'''
def restore_cards_by(self):
self.upload('setup/Cards/By Color/Human', 'Human')
self.upload('setup/Cards/By Color/Blue', 'Blue')
self.upload('setup/Cards/By Color/Green', 'Green')
self.upload('setup/Cards/By Color/Red', 'Red')
self.upload('setup/Cards/By Color/Yellow', 'Yellow')
self.upload('setup/Cards/By Type/Creature', 'Creature')
self.upload('setup/Cards/By Type/Event', 'Event')
self.upload('setup/Cards/By Type/Structure', 'Structure')
self.upload('setup/Cards/By Rarity/Common', 'Common')
self.upload('setup/Cards/By Rarity/Rare', 'Rare')
self.upload('setup/Cards/By Rarity/Epic', 'Epic')
self.upload('setup/Cards/By Rarity/Legendary', 'Legendary')
'''
Restore Changelog Templates
'''
def restore_templates_changelog(self):
self.upload('setup/Template/Changelog/Cl_codexcode1', 'Template:Cl_codexcode1')
self.upload('setup/Template/Changelog/Cl_codexcode2', 'Template:Cl_codexcode2')
self.upload('setup/Template/Changelog/Cl_codexcode3', 'Template:Cl_codexcode3')
self.upload('setup/Template/Changelog/Cl_color', 'Template:Cl_color')
self.upload('setup/Template/Changelog/Cl_desc', 'Template:Cl_desc')
self.upload('setup/Template/Changelog/Cl_desert', 'Template:Cl_desert')
self.upload('setup/Template/Changelog/Cl_faeria', 'Template:Cl_faeria')
self.upload('setup/Template/Changelog/Cl_forest', 'Template:Cl_forest')
self.upload('setup/Template/Changelog/Cl_lake', 'Template:Cl_lake')
self.upload('setup/Template/Changelog/Cl_life', 'Template:Cl_life')
self.upload('setup/Template/Changelog/Cl_mountain', 'Template:Cl_mountain')
self.upload('setup/Template/Changelog/Cl_name', 'Template:Cl_name')
self.upload('setup/Template/Changelog/Cl_power', 'Template:Cl_power')
self.upload('setup/Template/Changelog/Cl_rarity', 'Template:Cl_rarity')
self.upload('setup/Template/Changelog/Cl_type', 'Template:Cl_type')
self.upload('setup/Template/Changelog/Cl_unknown', 'Template:Cl_unknown')
self.upload('setup/Template/Changelog/Cl_info', 'Template:Cl_info')
'''
Restore Card Galleries
'''
def restore_galleries_cards(self):
self.upload('setup/Gallery/Gallery_of_Blue_cards', 'Gallery_of_Blue_cards')
self.upload('setup/Gallery/Gallery_of_Green_cards', 'Gallery_of_Green_cards')
self.upload('setup/Gallery/Gallery_of_Human_cards', 'Gallery_of_Human_cards')
self.upload('setup/Gallery/Gallery_of_Red_cards', 'Gallery_of_Red_cards')
self.upload('setup/Gallery/Gallery_of_Yellow_cards', 'Gallery_of_Yellow_cards')
self.upload('setup/Gallery/Gallery_of_Creature_cards', 'Gallery_of_Creature_cards')
self.upload('setup/Gallery/Gallery_of_Structure_cards', 'Gallery_of_Structure_cards')
self.upload('setup/Gallery/Gallery_of_Event_cards', 'Gallery_of_Event_cards')
self.upload('setup/Gallery/Gallery_of_Common_cards', 'Gallery_of_Common_cards')
self.upload('setup/Gallery/Gallery_of_Rare_cards', 'Gallery_of_Rare_cards')
self.upload('setup/Gallery/Gallery_of_Epic_cards', 'Gallery_of_Epic_cards')
self.upload('setup/Gallery/Gallery_of_Legendary_cards', 'Gallery_of_Legendary_cards')
'''
Restore Lists of (effect) cards
'''
def restore_lists_effects(self):
self.download('setup/List/List_of_Charge_X_cards', 'List_of_Charge_X_cards')
self.download('setup/List/List_of_Faeria_X_cards', 'List_of_Faeria_X_cards')
self.download('setup/List/List_of_Options_cards', 'List_of_Options_cards')
self.download('setup/List/List_of_Ranged_cards', 'List_of_Ranged_cards')
self.download('setup/List/List_of_Production_cards', 'List_of_Production_cards')
self.download('setup/List/List_of_Combat_cards', 'List_of_Combat_cards')
self.download('setup/List/List_of_Protection_cards', 'List_of_Protection_cards')
self.download('setup/List/List_of_Taunt_cards', 'List_of_Taund_cards')
self.download('setup/List/List_of_Haste_cards', 'List_of_Haste_cards')
self.download('setup/List/List_of_Last_Words_cards', 'List_of_Last_Words_cards')
self.download('setup/List/List_of_Deathtouch_cards', 'List_of_Deathtouch_cards')
self.download('setup/List/List_of_Flying_cards', 'List_of_Flying_cards')
self.download('setup/List/List_of_Jump_cards', 'List_of_Jump_cards')
self.download('setup/List/List_of_Aquatic_cards', 'List_of_Aquatic_cards')
self.download('setup/List/List_of_Activate_cards', 'List_of_Activate_cards')
self.download('setup/List/List_of_Gift_cards', 'List_of_Gift_cards')
self.download('setup/List/List_of_Random_cards', 'List_of_Random_cards')
'''
Restore Card Galleries
'''
'''
Backup Card Galleries
'''
def backup_galleries_cards(self):
self.download('setup/Gallery/Gallery_of_Blue_cards', 'Gallery_of_Blue_cards')
self.download('setup/Gallery/Gallery_of_Green_cards', 'Gallery_of_Green_cards')
self.download('setup/Gallery/Gallery_of_Human_cards', 'Gallery_of_Human_cards')
self.download('setup/Gallery/Gallery_of_Red_cards', 'Gallery_of_Red_cards')
self.download('setup/Gallery/Gallery_of_Yellow_cards', 'Gallery_of_Yellow_cards')
self.download('setup/Gallery/Gallery_of_Creature_cards', 'Gallery_of_Creature_cards')
self.download('setup/Gallery/Gallery_of_Structure_cards', 'Gallery_of_Structure_cards')
self.download('setup/Gallery/Gallery_of_Event_cards', 'Gallery_of_Event_cards')
self.download('setup/Gallery/Gallery_of_Common_cards', 'Gallery_of_Common_cards')
self.download('setup/Gallery/Gallery_of_Rare_cards', 'Gallery_of_Rare_cards')
self.download('setup/Gallery/Gallery_of_Epic_cards', 'Gallery_of_Epic_cards')
self.download('setup/Gallery/Gallery_of_Legendary_cards', 'Gallery_of_Legendary_cards')
if __name__ == '__main__':
gr = GamepediaPagesRW()
global cfg_file
cfg_file = configparser.ConfigParser()
path_to_cfg = os.path.abspath(os.path.dirname(sys.argv[0]))
path_to_cfg = os.path.join(path_to_cfg, 'faeriawikibot.conf')
cfg_file.read(path_to_cfg)
gr.restore()
| mit | -1,266,205,476,075,803,100 | 50.290196 | 106 | 0.666641 | false |
Southpaw-TACTIC/Team | src/python/Lib/site-packages/pythonwin/pywin/idle/AutoExpand.py | 1 | 2763 | import string
import re
###$ event <<expand-word>>
###$ win <Alt-slash>
###$ unix <Alt-slash>
class AutoExpand:
keydefs = {
'<<expand-word>>': ['<Alt-slash>'],
}
unix_keydefs = {
'<<expand-word>>': ['<Meta-slash>'],
}
menudefs = [
('edit', [
('E_xpand word', '<<expand-word>>'),
]),
]
wordchars = string.letters + string.digits + "_"
def __init__(self, editwin):
self.text = editwin.text
self.text.wordlist = None # XXX what is this?
self.state = None
def expand_word_event(self, event):
curinsert = self.text.index("insert")
curline = self.text.get("insert linestart", "insert lineend")
if not self.state:
words = self.getwords()
index = 0
else:
words, index, insert, line = self.state
if insert != curinsert or line != curline:
words = self.getwords()
index = 0
if not words:
self.text.bell()
return "break"
word = self.getprevword()
self.text.delete("insert - %d chars" % len(word), "insert")
newword = words[index]
index = (index + 1) % len(words)
if index == 0:
self.text.bell() # Warn we cycled around
self.text.insert("insert", newword)
curinsert = self.text.index("insert")
curline = self.text.get("insert linestart", "insert lineend")
self.state = words, index, curinsert, curline
return "break"
def getwords(self):
word = self.getprevword()
if not word:
return []
before = self.text.get("1.0", "insert wordstart")
wbefore = re.findall(r"\b" + word + r"\w+\b", before)
del before
after = self.text.get("insert wordend", "end")
wafter = re.findall(r"\b" + word + r"\w+\b", after)
del after
if not wbefore and not wafter:
return []
words = []
dict = {}
# search backwards through words before
wbefore.reverse()
for w in wbefore:
if dict.get(w):
continue
words.append(w)
dict[w] = w
# search onwards through words after
for w in wafter:
if dict.get(w):
continue
words.append(w)
dict[w] = w
words.append(word)
return words
def getprevword(self):
line = self.text.get("insert linestart", "insert")
i = len(line)
while i > 0 and line[i-1] in self.wordchars:
i = i-1
return line[i:]
| epl-1.0 | 187,856,678,564,993,900 | 28.032609 | 69 | 0.488961 | false |
effigies/mne-python | examples/time_frequency/plot_source_power_spectrum.py | 2 | 1929 | """
=========================================================
Compute power spectrum densities of the sources with dSPM
=========================================================
Returns an STC file containing the PSD (in dB) of each of the sources.
"""
# Authors: Alexandre Gramfort <[email protected]>
#
# License: BSD (3-clause)
print(__doc__)
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, compute_source_psd
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_label = data_path + '/MEG/sample/labels/Aud-lh.label'
# Setup for reading the raw data
raw = io.Raw(raw_fname, verbose=False)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, exclude='bads')
tmin, tmax = 0, 120 # use the first 120s of data
fmin, fmax = 4, 100 # look at frequencies between 4 and 100Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
label = mne.read_label(fname_label)
stc = compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
pick_ori="normal", n_fft=n_fft, label=label)
stc.save('psd_dSPM')
###############################################################################
# View PSD of sources in label
import matplotlib.pyplot as plt
plt.plot(1e3 * stc.times, stc.data.T)
plt.xlabel('Frequency (Hz)')
plt.ylabel('PSD (dB)')
plt.title('Source Power Spectrum (PSD)')
plt.show()
| bsd-3-clause | -6,288,450,017,476,853,000 | 34.072727 | 79 | 0.589943 | false |
acysos/odoo-addons | edicom/models/edicom_albaran.py | 1 | 4074 | # -*- coding: utf-8 -*-
# Copyright 2020 Ignacio Ibeas <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import _, api, fields, models
from odoo.exceptions import UserError, ValidationError
import time
import logging
_logger = logging.getLogger(__name__)
class EdicomAlbaran(models.Model):
_name = "edicom.albaran"
_description = "Albaran Edicom"
picking_id = fields.Many2one(
comodel_name='stock.picking', string='Albaran', required=True)
cabalb_ids = fields.One2many(
comodel_name='edicom.cabalb', inverse_name='albaran_edicom_id',
string='Datos de CABALB')
linalb_ids = fields.One2many(
comodel_name='edicom.linalb', inverse_name='albaran_edicom_id',
string='Datos de LINALB')
embalb_ids = fields.One2many(
comodel_name='edicom.embalb', inverse_name='albaran_edicom_id',
string='Datos de EMBALB')
_rec_name = 'picking_id'
@api.multi
def procesar_albaran(self):
cabalb_pool = self.env['edicom.cabalb']
linalb_pool = self.env['edicom.linalb']
embalb_pool = self.env['edicom.embalb']
for albaran_edi in self:
_logger.info('Albaran EDI ' + str(albaran_edi.picking_id.name))
albaran = albaran_edi.picking_id
if not albaran:
raise UserError(
_('No se ha indicado la albaran para generar el fichero.'))
if not (albaran.company_id and albaran.company_id.partner_id and
albaran.company_id.partner_id.codigo_edi):
raise UserError(
_('No se ha indicado el codigo edi en la compañía del '
'albaran.'))
if not (albaran.partner_id and albaran.partner_id.codigo_edi):
raise UserError(
_('No se ha indicado el codigo edi en el cliente.'))
# GENERO LA CABECERA - primero la borro si existe
cabalb_ids = cabalb_pool.search(
[('albaran_edicom_id', '=', albaran_edi.id)])
cabalb_ids.unlink()
cabalb_ids = cabalb_pool.generar(albaran_edi)
# GENERO EMBALAJES - primero la borro si existe
embalb_ids = embalb_pool.search(
[('albaran_edicom_id', '=', albaran_edi.id)])
embalb_ids.unlink()
embalb_ids = embalb_pool.generar(albaran_edi)
# GENERO LINEAS - primero la borro si existe
linalb_ids = linalb_pool.search(
[('albaran_edicom_id', '=', albaran_edi.id)])
linalb_ids.unlink()
linalb_ids = linalb_pool.generar(albaran_edi)
return True
@api.multi
def generar_ficheros(self):
cabalb_pool = self.env['edicom.cabalb']
linalb_pool = self.env['edicom.linalb']
embalb_pool = self.env['edicom.embalb']
for albaran_edi in self:
if (albaran_edi.picking_id and albaran_edi.picking_id.company_id
and albaran_edi.picking_id.company_id.edi_path):
path = albaran_edi.picking_id.company_id.edi_path
else:
raise UserError(
_('No se ha indicado la ruta para generar el fichero en '
'la compañía de la albaran.'))
out_char_sep = albaran_edi.picking_id.company_id.out_char_separator
file_suffix = albaran_edi.picking_id.name.replace('/', '')
if albaran_edi.cabalb_ids:
cabalb_pool.exportar(
albaran_edi.cabalb_ids, path, file_suffix, out_char_sep)
if albaran_edi.linalb_ids:
linalb_pool.exportar(
albaran_edi.linalb_ids, path, file_suffix, out_char_sep)
if albaran_edi.embalb_ids:
embalb_pool.exportar(
albaran_edi.embalb_ids, path, file_suffix, out_char_sep)
alert_file = open(path + '/albaranespendientes.txt', 'w')
alert_file.close()
return True
| agpl-3.0 | -6,508,898,318,081,650,000 | 36.685185 | 79 | 0.578133 | false |
raymak/contextualfeaturerecommender | phase1/analysis/user_to_aggregates.py | 1 | 8181 | #!/usr/bin/python
# input: csv-formatted stream, with each line corresponding to the data for a user
# output:
# assumes the input messages from a specific user are contiguous
import fileinput
import json
rev_inds = {}
FEATURE_NAMES = [
'closetabshortcut',
'newbookmark',
'newtabshortcut',
'newbookmarkshortcut',
'blushypage', 'facebook',
'amazon',
'youtube',
'download',
'gmail',
'reddit']
FEATURE_SUFFIXES = [
'_recommended',
'_recommended_seen',
'_secondary_used_after',
'_secondary_used_only_after',
'_secondary_used_after_to_seen',
'_secondary_used_after_seen',
'_secondary_used',
'_secondary_used_after_seen_to_seen',
'_secondary_used_before',
'_minor_used_after',
'_reaction_used',
'_reaction_used_after_seen',
'_reaction_used_after_seen_to_seen',
'_addon_ignored']
FEATURE_OFFERING_TYPES = {
'closetabshortcut': 'KEYSHORTCUT',
'newbookmark': 'ADDON',
'newtabshortcut': 'KEYSHORTCUT',
'newbookmarkshortcut': 'KEYSHORTCUT',
'blushypage': 'PRIVATEWINDOW',
'facebook': 'PINTAB',
'amazon': 'ADDON',
'youtube': 'ADDON',
'download': 'ADDON',
'gmail': 'ADDON',
'reddit': 'ADDON'
}
ARMS_ROWS_KEYS_ARR = [
'name',
'user_num',
'has_disabled',
'has_moved_button',
'median_num_of_extensions',
'median_total_recommendations'
] + [featureName + suffix for featureName in FEATURE_NAMES
for suffix in FEATURE_SUFFIXES]
ARMS_FEATURES_KEYS_ARR = [
'ARM_arm_name',
'ARM_basis',
'ARM_explanation',
'ARM_ui',
'ARM_user_num',
'ARM_has_disabled',
'ARM_has_moved_button',
'ARM_median_num_of_extensions',
'ARM_median_total_recommendations',
'FEATURE_feature_name',
'FEATURE_offering_type'
] + ['FEATURE' + suffix for suffix in FEATURE_SUFFIXES]
ARM_NAMES = ['explained-doorhanger-active',
'explained-doorhanger-passive',
'unexplained-doorhanger-active',
'unexplained-doorhanger-passive',
'control']
def main(headerLine, userLines):
table = parseCSVtoTable(headerLine, userLines)
table = basicFilter(table)
printTableToCSV(generateArmFeatureReport(table), ARMS_FEATURES_KEYS_ARR)
def basicFilter(table):
selected_indices = [i for i in range(len(table['userid']))
if table['experiment_ver'][i] == '2.0.0'
and table['num_of_extensions'][i] is not None
and not table['test_mode_enabled'][i]
and not table['browsertabsremote_enabled'][i]
]
new_table = {key: [table[key][i] for i in selected_indices] for key in table }
return new_table
def getTableByColumnValue(table, column_name, column_value):
selected_indices = [i for i in range(len(table[column_name])) if table[column_name][i] == column_value]
new_table = {key: [table[key][i] for i in selected_indices] for key in table}
return new_table
def appendRecordDictToTable(table, recordDict):
for col_name in table:
table[col_name].append(recordDict[col_name])
# mutates the given table
def generateArmFeatureReport(table):
armsFeaturesTable = {armsFeaturesKey: [] for armsFeaturesKey in ARMS_FEATURES_KEYS_ARR}
armsTables = {arm: {} for arm in ARM_NAMES}
for arm in armsTables:
armsTables[arm] = getTableByColumnValue(table, 'arm_name', arm)
recordDict = {}
for arm in ARM_NAMES:
userNum = len(armsTables[arm]['userid'])
recordDict['ARM_user_num'] = userNum
recordDict['ARM_arm_name'] = arm
recordDict['ARM_basis'] = armsTables[arm]['arm_basis'][0]
recordDict['ARM_explanation'] = armsTables[arm]['arm_explanation'][0]
recordDict['ARM_ui'] = armsTables[arm]['arm_ui'][0]
recordDict['ARM_has_disabled'] = armsTables[arm]['has_disabled'].count(True)
recordDict['ARM_has_moved_button'] = armsTables[arm]['has_moved_button'].count(True)
recordDict['ARM_median_num_of_extensions'] = sorted(armsTables[arm]['num_of_extensions'])[userNum // 2]
recordDict['ARM_median_total_recommendations'] = sorted(armsTables[arm]['total_recommendations'])[userNum //2]
for featureName in FEATURE_NAMES:
recordDict['FEATURE_feature_name'] = featureName
recordDict['FEATURE_offering_type'] = FEATURE_OFFERING_TYPES[featureName]
for featureSuffix in [
'_recommended',
'_recommended_seen',
'_secondary_used',
'_secondary_used_after',
'_secondary_used_before',
'_minor_used_after',
'_reaction_used',
'_addon_ignored']:
col_name = featureName + featureSuffix
recordDict['FEATURE' + featureSuffix] = armsTables[arm][col_name].count(True)
secondaryUsedAfter = recordDict['FEATURE' + '_secondary_used_after']
recommendedSeen = recordDict['FEATURE' + '_recommended_seen']
# 0 could mean real 0 or 0/0
recordDict['FEATURE' + '_secondary_used_after_to_seen'] = 0 if recommendedSeen == 0 else (100* secondaryUsedAfter) / recommendedSeen
recordDict['FEATURE' + '_secondary_used_only_after'] = [
armsTables[arm][featureName + '_secondary_used_after'][i]
and not armsTables[arm][featureName + '_secondary_used_before'][i]
for i in range(userNum)
].count(True)
recordDict['FEATURE' + '_secondary_used_after_seen'] = [
armsTables[arm][featureName + '_secondary_used_after'][i]
and armsTables[arm][featureName + '_recommended_seen'][i]
for i in range(userNum)
].count(True)
secondaryUsedAfterSeen = recordDict['FEATURE' + '_secondary_used_after_seen']
recordDict['FEATURE' + '_secondary_used_after_seen_to_seen'] = 0 if recommendedSeen == 0 else (100 * secondaryUsedAfterSeen) / recommendedSeen
recordDict['FEATURE' + '_reaction_used_after_seen'] = [
armsTables[arm][featureName + '_reaction_used'][i]
and armsTables[arm][featureName + '_recommended_seen'][i]
for i in range(userNum)
].count(True)
reactionUsedAfterSeen = recordDict['FEATURE' + '_reaction_used_after_seen']
recordDict['FEATURE' + '_reaction_used_after_seen_to_seen'] = 0 if recommendedSeen == 0 else (100 * reactionUsedAfterSeen) / recommendedSeen
appendRecordDictToTable(armsFeaturesTable, recordDict)
return armsFeaturesTable
def printTableToCSV(table, columnNamesArr):
printCSVTableHeader(columnNamesArr)
rowNum = len(table[columnNamesArr[0]])
for i in range(rowNum):
printTableRow(table, i, columnNamesArr)
def printTableRow(table, rowNum, columnNamesArr):
elms = [json.dumps(table[colName][rowNum])
for colName in columnNamesArr]
rowStr = '\t'.join(elms)
print rowStr
def printCSVTableHeader(keysArr):
print '\t'.join(keysArr)
def parseCSVtoTable(headerLine, rows):
table = {}
fields = headerLine.strip().split('\t')
for i in range(len(fields)):
table[fields[i]] = []
rev_inds[i] = fields[i]
for line in rows:
jsonrow = [json.loads(val) for val in line.strip().split('\t')]
for i in range(len(jsonrow)):
table[rev_inds[i]].append(jsonrow[i])
return table
if __name__ == "__main__":
lines = fileinput.input()
main(lines.next(), lines)
| mpl-2.0 | 6,887,455,980,894,750,000 | 32.666667 | 154 | 0.577802 | false |
lizbew/code-practice | 03-weibo/base62.py | 1 | 1460 | ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
def rsplit(s, count):
f = lambda x: x > 0 and x or 0
return [s[f(i - count):i] for i in range(len(s), 0, -count)]
def id2mid(id):
result = ''
for i in rsplit(id, 7):
str62 = base62_encode(int(i))
result = str62.zfill(4) + result
return result.lstrip('0')
def mid2id(mid):
result = ''
for i in rsplit(mid, 4):
str10 = str(base62_decode(i)).zfill(7)
result = str10 + result
return result.lstrip('0')
def base62_encode(num, alphabet=ALPHABET):
"""Encode a number in Base X
`num`: The number to encode
`alphabet`: The alphabet to use for encoding
"""
if (num == 0):
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
def base62_decode(string, alphabet=ALPHABET):
"""Decode a Base X encoded string into the number
Arguments:
- `string`: The encoded string
- `alphabet`: The alphabet to use for encoding
"""
base = len(alphabet)
strlen = len(string)
num = 0
idx = 0
for char in string:
power = (strlen - (idx + 1))
num += alphabet.index(char) * (base ** power)
idx += 1
return num
if __name__ == '__main__':
print mid2id('CeaOU15IT')
print id2mid('3833781880260331')
| apache-2.0 | -2,043,530,695,683,512,600 | 25.071429 | 75 | 0.586986 | false |
LudditeLabs/query-reform | reform/utils/scrap.py | 1 | 1277 | import re
class Scrap(object):
"""
Scraps method names from the Java doc
"""
def __init__(self, file_path, out_name):
self.file_path = file_path
self.out_name = out_name
self.java_method_re = re.compile('^([a-z]+.+)\(')
self.js_method_re = re.compile('^([a-z]+): ')
self.python_class_re = re.compile('^([A-z]+.+) \(class in')
self.python_method_re = re.compile('^([A-z]+.+)\(\)')
def scrap_java_methods(self):
self._scrap(self.java_method_re)
def scrap_js_methods(self):
self._scrap(self.js_method_re)
def scrap_python_classes(self):
self._scrap(self.python_class_re)
def scrap_python_methods(self):
self._scrap(self.python_method_re)
def _scrap(self, scrap_re):
res = set()
with open(self.file_path) as f:
for line in f:
match = scrap_re.findall(line.strip())
if match:
res.add(match[0])
print "Found %d methods" % len(res)
with open(self.out_name, 'w') as o:
for r in res:
o.write(r + '\n')
if __name__ == '__main__':
scrapper = Scrap('../../data/raw/js_methods.txt', 'jsapimethods.txt')
scrapper.scrap_js_methods()
| apache-2.0 | -8,040,165,848,016,772,000 | 27.377778 | 73 | 0.530932 | false |
varmarakesh/devops-toolbox | devops-toolbox/ftp/install.py | 1 | 1910 | __author__ = 'rakesh.varma'
from fabric.api import *
import os
import time
class install:
fuse_git_repo = 'https://github.com/s3fs-fuse/s3fs-fuse.git'
def __init__(self, host_ip, host_user, host_key_file):
env.host_string = host_ip
env.user = host_user
env.key_filename = host_key_file
def install_s3fs(self):
print env.host_string
print env.user
print env.key_filename
sudo('yum install automake fuse-devel gcc-c++ git libcurl-devel libxml2-devel make openssl-devel')
sudo('git clone {0}'.format(self.fuse_git_repo))
sudo('./home/ec2-user/s3fs-fuse/autogen.sh; ./home/ec2-user/s3fs-fuse/configure')
sudo('/bin/make /home/ec2-user')
sudo('make install')
def mount(self, access_key, secret_key):
sudo('touch /etc/passwd-s3fs && chmod 640 /etc/passwd-s3fs && echo "{0}:{1}" > /etc/passwd-s3fs'.format(access_key, secret_key))
sudo('/opt/bin/s3fs vcs-payment /home/vcsuser -o allow_other -o nonempty')
sudo('mount|grep s3fs')
def create_user(self, user, pwd):
print env.host_string
print env.user
print env.key_filename
sudo('hostname')
sudo('useradd -d /home/{0} {1}'.format(user, user))
sudo('echo -e "{0}\n{1}" | passwd {2}'.format(pwd, pwd, user))
sudo('chown -R {0} /home/{1}'.format(user, user))
def install_ftp(self, user):
sudo('yum install -y vsftpd')
sudo('chkconfig vsftpd on')
sudo('setsebool -P ftp_home_dir=1')
sudo('echo "{0}" > /etc/vsftpd/chroot_list'.format(user))
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
f = open(os.path.join(__location__, 'vsftpd.conf'))
vsftpd_config = f.read()
sudo('echo "{0}" > /etc/vsftpd/vsftpd.conf'.format(vsftpd_config))
sudo('service vsftpd restart') | isc | -6,202,998,654,796,628,000 | 39.659574 | 136 | 0.604188 | false |
Hybrid-Cloud/conveyor | conveyor/conveyorheat/engine/resources/aws/autoscaling/autoscaling_group.py | 1 | 19686 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
import six
from conveyor.conveyorheat.common import exception
from conveyor.conveyorheat.common import grouputils
from conveyor.conveyorheat.engine import attributes
from conveyor.conveyorheat.engine import constraints
from conveyor.conveyorheat.engine import function
from conveyor.conveyorheat.engine.notification import \
autoscaling as notification
from conveyor.conveyorheat.engine import properties
from conveyor.conveyorheat.engine import resource
from conveyor.conveyorheat.engine.resources.openstack.heat import \
instance_group as instgrp
from conveyor.conveyorheat.engine import rsrc_defn
from conveyor.conveyorheat.engine import support
from conveyor.conveyorheat.scaling import cooldown
from conveyor.conveyorheat.scaling import scalingutil as sc_util
from conveyor.i18n import _
from conveyor.i18n import _LE
from conveyor.i18n import _LI
LOG = logging.getLogger(__name__)
class AutoScalingGroup(instgrp.InstanceGroup, cooldown.CooldownMixin):
support_status = support.SupportStatus(version='2014.1')
PROPERTIES = (
AVAILABILITY_ZONES, LAUNCH_CONFIGURATION_NAME, MAX_SIZE, MIN_SIZE,
COOLDOWN, DESIRED_CAPACITY, HEALTH_CHECK_GRACE_PERIOD,
HEALTH_CHECK_TYPE, LOAD_BALANCER_NAMES, VPCZONE_IDENTIFIER, TAGS,
INSTANCE_ID,
) = (
'AvailabilityZones', 'LaunchConfigurationName', 'MaxSize', 'MinSize',
'Cooldown', 'DesiredCapacity', 'HealthCheckGracePeriod',
'HealthCheckType', 'LoadBalancerNames', 'VPCZoneIdentifier', 'Tags',
'InstanceId',
)
_TAG_KEYS = (
TAG_KEY, TAG_VALUE,
) = (
'Key', 'Value',
)
_UPDATE_POLICY_SCHEMA_KEYS = (
ROLLING_UPDATE
) = (
'AutoScalingRollingUpdate'
)
_ROLLING_UPDATE_SCHEMA_KEYS = (
MIN_INSTANCES_IN_SERVICE, MAX_BATCH_SIZE, PAUSE_TIME
) = (
'MinInstancesInService', 'MaxBatchSize', 'PauseTime'
)
ATTRIBUTES = (
INSTANCE_LIST,
) = (
'InstanceList',
)
properties_schema = {
AVAILABILITY_ZONES: properties.Schema(
properties.Schema.LIST,
_('Not Implemented.'),
required=True
),
LAUNCH_CONFIGURATION_NAME: properties.Schema(
properties.Schema.STRING,
_('The reference to a LaunchConfiguration resource.'),
update_allowed=True
),
INSTANCE_ID: properties.Schema(
properties.Schema.STRING,
_('The ID of an existing instance to use to '
'create the Auto Scaling group. If specify this property, '
'will create the group use an existing instance instead of '
'a launch configuration.'),
constraints=[
constraints.CustomConstraint("nova.server")
]
),
MAX_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('Maximum number of instances in the group.'),
required=True,
update_allowed=True
),
MIN_SIZE: properties.Schema(
properties.Schema.INTEGER,
_('Minimum number of instances in the group.'),
required=True,
update_allowed=True
),
COOLDOWN: properties.Schema(
properties.Schema.INTEGER,
_('Cooldown period, in seconds.'),
update_allowed=True
),
DESIRED_CAPACITY: properties.Schema(
properties.Schema.INTEGER,
_('Desired initial number of instances.'),
update_allowed=True
),
HEALTH_CHECK_GRACE_PERIOD: properties.Schema(
properties.Schema.INTEGER,
_('Not Implemented.'),
implemented=False
),
HEALTH_CHECK_TYPE: properties.Schema(
properties.Schema.STRING,
_('Not Implemented.'),
constraints=[
constraints.AllowedValues(['EC2', 'ELB']),
],
implemented=False
),
LOAD_BALANCER_NAMES: properties.Schema(
properties.Schema.LIST,
_('List of LoadBalancer resources.')
),
VPCZONE_IDENTIFIER: properties.Schema(
properties.Schema.LIST,
_('Use only with Neutron, to list the internal subnet to '
'which the instance will be attached; '
'needed only if multiple exist; '
'list length must be exactly 1.'),
schema=properties.Schema(
properties.Schema.STRING,
_('UUID of the internal subnet to which the instance '
'will be attached.')
)
),
TAGS: properties.Schema(
properties.Schema.LIST,
_('Tags to attach to this group.'),
schema=properties.Schema(
properties.Schema.MAP,
schema={
TAG_KEY: properties.Schema(
properties.Schema.STRING,
required=True
),
TAG_VALUE: properties.Schema(
properties.Schema.STRING,
required=True
),
},
)
),
}
attributes_schema = {
INSTANCE_LIST: attributes.Schema(
_("A comma-delimited list of server ip addresses. "
"(Heat extension)."),
type=attributes.Schema.STRING
),
}
rolling_update_schema = {
MIN_INSTANCES_IN_SERVICE: properties.Schema(properties.Schema.INTEGER,
default=0),
MAX_BATCH_SIZE: properties.Schema(properties.Schema.INTEGER,
default=1),
PAUSE_TIME: properties.Schema(properties.Schema.STRING,
default='PT0S')
}
update_policy_schema = {
ROLLING_UPDATE: properties.Schema(properties.Schema.MAP,
schema=rolling_update_schema)
}
def handle_create(self):
return self.create_with_template(self.child_template())
def _get_members(self, group_id):
members = []
for res in self.stack.iter_resources(cfg.CONF.max_nested_stack_depth):
if (res.type() in ['OS::Nova::Server'] and
res.status == res.COMPLETE):
members.append({
'id': res.resource_id,
'name': res.name,
'group_id': group_id
})
return members
def _add_scheduler(self, group_id):
task_args = {
'group_name': 'groupwatch',
'job_name': group_id,
'job_type': 'period',
'trigger_type': 'SIMPLE_TRIGGER',
'interval': 240,
'cover_flag': 'true',
'end_time': 4076884800000,
'meta_data': {
'group_id': group_id,
'project_id': self.context.tenant_id
}
}
rsp = self.client('scheduler').scheduler.create(**task_args)
return rsp.get('job_id')
def _create_groupwatch(self):
if not cfg.CONF.FusionSphere.groupwatch_enable:
return
group_id = self.stack.resource_by_refid(self.FnGetRefId()).resource_id
members = self._get_members(group_id)
job_id = self._add_scheduler(group_id)
kwargs = {
'id': group_id,
'name': self.name,
'type': 'VM',
'data': {'scheduler_job_id': job_id},
'members': members
}
self.client('groupwatch').groups.create(**kwargs)
def _make_launch_config_resource(self, name, props):
lc_res_type = 'AWS::AutoScaling::LaunchConfiguration'
lc_res_def = rsrc_defn.ResourceDefinition(name,
lc_res_type,
props)
lc_res = resource.Resource(name, lc_res_def, self.stack)
return lc_res
def _get_conf_properties(self):
instance_id = self.properties.get(self.INSTANCE_ID)
if instance_id:
server = self.client_plugin('nova').get_server(instance_id)
instance_props = {
'ImageId': server.image['id'],
'InstanceType': server.flavor['id'],
'KeyName': server.key_name,
'SecurityGroups': [sg['name']
for sg in server.security_groups]
}
conf = self._make_launch_config_resource(self.name,
instance_props)
props = function.resolve(conf.properties.data)
else:
conf, props = super(AutoScalingGroup, self)._get_conf_properties()
vpc_zone_ids = self.properties.get(self.VPCZONE_IDENTIFIER)
if vpc_zone_ids:
props['SubnetId'] = vpc_zone_ids[0]
return conf, props
def check_create_complete(self, task):
"""Update cooldown timestamp after create succeeds."""
done = super(AutoScalingGroup, self).check_create_complete(task)
if done:
self._create_groupwatch()
self._finished_scaling(
"%s : %s" % (sc_util.CFN_EXACT_CAPACITY,
grouputils.get_size(self)))
return done
def check_update_complete(self, cookie):
"""Update the cooldown timestamp after update succeeds."""
done = super(AutoScalingGroup, self).check_update_complete(cookie)
if done:
self._finished_scaling(
"%s : %s" % (sc_util.CFN_EXACT_CAPACITY,
grouputils.get_size(self)))
return done
def _get_new_capacity(self, capacity,
adjustment,
adjustment_type=sc_util.CFN_EXACT_CAPACITY,
min_adjustment_step=None):
lower = self.properties[self.MIN_SIZE]
upper = self.properties[self.MAX_SIZE]
return sc_util.calculate_new_capacity(capacity, adjustment,
adjustment_type,
min_adjustment_step,
lower, upper)
def _update_groupwatch(self):
if not cfg.CONF.FusionSphere.groupwatch_enable:
return
group_id = self.stack.resource_by_refid(self.FnGetRefId()).resource_id
members = self._get_members(group_id)
kwargs = {
'id': group_id,
'name': self.name,
'type': 'VM',
'members': members
}
self.client('groupwatch').groups.update(group_id, **kwargs)
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
"""Updates self.properties, if Properties has changed.
If Properties has changed, update self.properties, so we get the new
values during any subsequent adjustment.
"""
if tmpl_diff:
# parse update policy
if 'UpdatePolicy' in tmpl_diff:
up = json_snippet.update_policy(self.update_policy_schema,
self.context)
self.update_policy = up
self.properties = json_snippet.properties(self.properties_schema,
self.context)
if prop_diff:
# Replace instances first if launch configuration has changed
self._try_rolling_update(prop_diff)
# Update will happen irrespective of whether auto-scaling
# is in progress or not.
capacity = grouputils.get_size(self)
desired_capacity = self.properties[self.DESIRED_CAPACITY] or capacity
new_capacity = self._get_new_capacity(capacity, desired_capacity)
self.resize(new_capacity)
def adjust(self, adjustment,
adjustment_type=sc_util.CFN_CHANGE_IN_CAPACITY,
min_adjustment_step=None):
"""Adjust the size of the scaling group if the cooldown permits."""
if not self._is_scaling_allowed():
LOG.info(_LI("%(name)s NOT performing scaling adjustment, "
"cooldown %(cooldown)s"),
{'name': self.name,
'cooldown': self.properties[self.COOLDOWN]})
raise exception.NoActionRequired()
capacity = grouputils.get_size(self)
new_capacity = self._get_new_capacity(capacity, adjustment,
adjustment_type,
min_adjustment_step)
changed_size = new_capacity != capacity
# send a notification before, on-error and on-success.
notif = {
'stack': self.stack,
'adjustment': adjustment,
'adjustment_type': adjustment_type,
'capacity': capacity,
'groupname': self.FnGetRefId(),
'message': _("Start resizing the group %(group)s") % {
'group': self.FnGetRefId()},
'suffix': 'start',
}
notification.send(**notif)
try:
self.resize(new_capacity)
except Exception as resize_ex:
with excutils.save_and_reraise_exception():
try:
notif.update({'suffix': 'error',
'message': six.text_type(resize_ex),
'capacity': grouputils.get_size(self),
})
notification.send(**notif)
except Exception:
LOG.exception(_LE('Failed sending error notification'))
else:
notif.update({
'suffix': 'end',
'capacity': new_capacity,
'message': _("End resizing the group %(group)s") % {
'group': notif['groupname']},
})
notification.send(**notif)
finally:
self._update_groupwatch()
self._finished_scaling("%s : %s" % (adjustment_type, adjustment),
changed_size=changed_size)
return changed_size
def _tags(self):
"""Add Identifying Tags to all servers in the group.
This is so the Dimensions received from cfn-push-stats all include
the groupname and stack id.
Note: the group name must match what is returned from FnGetRefId
"""
autoscaling_tag = [{self.TAG_KEY: 'metering.AutoScalingGroupName',
self.TAG_VALUE: self.FnGetRefId()}]
return super(AutoScalingGroup, self)._tags() + autoscaling_tag
def validate(self):
# check validity of group size
min_size = self.properties[self.MIN_SIZE]
max_size = self.properties[self.MAX_SIZE]
if max_size < min_size:
msg = _("MinSize can not be greater than MaxSize")
raise exception.StackValidationFailed(message=msg)
if min_size < 0:
msg = _("The size of AutoScalingGroup can not be less than zero")
raise exception.StackValidationFailed(message=msg)
if self.properties[self.DESIRED_CAPACITY] is not None:
desired_capacity = self.properties[self.DESIRED_CAPACITY]
if desired_capacity < min_size or desired_capacity > max_size:
msg = _("DesiredCapacity must be between MinSize and MaxSize")
raise exception.StackValidationFailed(message=msg)
# TODO(pasquier-s): once Neutron is able to assign subnets to
# availability zones, it will be possible to specify multiple subnets.
# For now, only one subnet can be specified. The bug #1096017 tracks
# this issue.
if (self.properties.get(self.VPCZONE_IDENTIFIER) and
len(self.properties[self.VPCZONE_IDENTIFIER]) != 1):
raise exception.NotSupported(feature=_("Anything other than one "
"VPCZoneIdentifier"))
# validate properties InstanceId and LaunchConfigurationName
# for aws auto scaling group.
# should provide just only one of
if self.type() == 'AWS::AutoScaling::AutoScalingGroup':
instanceId = self.properties.get(self.INSTANCE_ID)
launch_config = self.properties.get(
self.LAUNCH_CONFIGURATION_NAME)
if bool(instanceId) == bool(launch_config):
msg = _("Either 'InstanceId' or 'LaunchConfigurationName' "
"must be provided.")
raise exception.StackValidationFailed(message=msg)
super(AutoScalingGroup, self).validate()
def _resolve_attribute(self, name):
"""Resolves the resource's attributes.
heat extension: "InstanceList" returns comma delimited list of server
ip addresses.
"""
if name == self.INSTANCE_LIST:
return u','.join(inst.FnGetAtt('PublicIp')
for inst in grouputils.get_members(self)) or None
def child_template(self):
if self.properties[self.DESIRED_CAPACITY]:
num_instances = self.properties[self.DESIRED_CAPACITY]
else:
num_instances = self.properties[self.MIN_SIZE]
return self._create_template(num_instances)
def _delete_groupwatch(self):
if not cfg.CONF.FusionSphere.groupwatch_enable:
return
if not self.resource_id:
return
group = None
try:
group = self.client('groupwatch').groups.get(self.resource_id)
except Exception as ex:
self.client_plugin('groupwatch').ignore_not_found(ex)
return
try:
if (group and group.get('group') and
'data' in group.get('group')):
scheduler_job_id = \
group.get('group').get('data').get('scheduler_job_id')
self.client('scheduler').scheduler.delete(scheduler_job_id)
except (AttributeError, KeyError):
# do nothing
pass
except Exception as ex:
self.client_plugin('scheduler').ignore_not_found(ex)
try:
self.client('groupwatch').groups.delete(self.resource_id)
except Exception as ex:
self.client_plugin('groupwatch').ignore_not_found(ex)
def handle_delete(self):
self._delete_groupwatch()
return self.delete_nested()
def handle_metadata_reset(self):
metadata = self.metadata_get()
if 'scaling_in_progress' in metadata:
metadata['scaling_in_progress'] = False
self.metadata_set(metadata)
def resource_mapping():
return {
'AWS::AutoScaling::AutoScalingGroup': AutoScalingGroup,
}
| apache-2.0 | 6,615,088,679,688,837,000 | 37.151163 | 78 | 0.560906 | false |
qedsoftware/commcare-hq | custom/world_vision/sqldata/child_sqldata.py | 1 | 33675 | import calendar
from sqlagg import CountUniqueColumn
from sqlagg.columns import SimpleColumn
from sqlagg.filters import LT, LTE, AND, GTE, GT, EQ, NOTEQ, OR, IN
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.sqlreport import DatabaseColumn
from corehq.apps.reports.util import get_INFilter_bindparams
from custom.utils.utils import clean_IN_filter_value
from custom.world_vision.custom_queries import CustomMedianColumn, MeanColumnWithCasting
from custom.world_vision.sqldata import BaseSqlData
from custom.world_vision.sqldata.main_sqldata import ImmunizationOverview
from custom.world_vision.sqldata.mother_sqldata import MotherRegistrationDetails, DeliveryMothersIds
class ChildRegistrationDetails(MotherRegistrationDetails):
table_name = "fluff_WorldVisionChildFluff"
slug = 'child_registration_details'
title = 'Child Registration Details'
@property
def rows(self):
from custom.world_vision import CHILD_INDICATOR_TOOLTIPS
result = []
for column in self.columns:
result.append([{'sort_key': column.header, 'html': column.header,
'tooltip': self.get_tooltip(CHILD_INDICATOR_TOOLTIPS['child_registration_details'],
column.slug)},
{'sort_key': self.data[column.slug], 'html': self.data[column.slug]}])
return result
@property
def columns(self):
columns = [
DatabaseColumn("Total child registered ever", CountUniqueColumn('doc_id', alias="total"))
]
if 'startdate' not in self.config and 'enddate' not in self.config or 'startdate' not in self.config \
and 'enddate' in self.config:
columns.extend([
DatabaseColumn(
"Total open children cases", CountUniqueColumn(
'doc_id', alias="no_date_opened",
filters=self.filters + [EQ('closed_on', 'empty')]
)
),
DatabaseColumn(
"Total closed children cases", CountUniqueColumn(
'doc_id', alias="no_date_closed",
filters=self.filters + [NOTEQ('closed_on', 'empty')]
)
),
DatabaseColumn(
"New registrations during last 30 days", CountUniqueColumn(
'doc_id', alias="no_date_new_registrations",
filters=self.filters + [AND([GTE('opened_on', "last_month"), LTE('opened_on', "today")])]
)
)
])
else:
columns.extend([
DatabaseColumn(
"Children cases open at end period", CountUniqueColumn(
'doc_id', alias="opened",
filters=self.filters + [AND([LTE('opened_on', "stred"), OR([EQ('closed_on', 'empty'),
GT('closed_on', "stred")])])]
)
),
DatabaseColumn(
"Children cases closed during period", CountUniqueColumn(
'doc_id', alias="closed",
filters=self.filters + [AND([GTE('closed_on', "strsd"), LTE('closed_on', "stred")])]
)
),
DatabaseColumn(
"Total children followed during period", CountUniqueColumn(
'doc_id', alias="followed",
filters=self.filters + [AND([LTE('opened_on', "stred"), OR([EQ('closed_on', 'empty'),
GTE('closed_on', "strsd")])])]
)
),
DatabaseColumn(
"New registrations during period", CountUniqueColumn(
'doc_id', alias="new_registrations",
filters=self.filters + [AND([LTE('opened_on', "stred"), GTE('opened_on', "strsd")])]
)
)
])
return columns
class ClosedChildCasesBreakdown(BaseSqlData):
table_name = "fluff_WorldVisionChildFluff"
slug = 'closed_child_cases_breakdown'
title = 'Closed Child Cases Breakdown'
show_total = True
total_row_name = "Children cases closed during the time period"
chart_title = 'Closed Child Cases'
show_charts = True
chart_x_label = ''
chart_y_label = ''
chart_only = True
@property
def group_by(self):
return ['reason_for_child_closure']
@property
def rows(self):
from custom.world_vision import CLOSED_CHILD_CASES_BREAKDOWN
return self._get_rows(CLOSED_CHILD_CASES_BREAKDOWN, super(ClosedChildCasesBreakdown, self).rows)
@property
def filters(self):
filter = super(ClosedChildCasesBreakdown, self).filters[1:]
if 'strsd' in self.config:
filter.append(GTE('closed_on', 'strsd'))
if 'stred' in self.config:
filter.append(LTE('closed_on', 'stred'))
filter.append(NOTEQ('reason_for_child_closure', 'empty'))
return filter
@property
def headers(self):
return DataTablesHeader(*[DataTablesColumn('Reason for closure'), DataTablesColumn('Number'), DataTablesColumn('Percentage')])
@property
def columns(self):
return [
DatabaseColumn("Reason for closure", SimpleColumn('reason_for_child_closure')),
DatabaseColumn("Number", CountUniqueColumn('doc_id'))
]
class ChildrenDeaths(BaseSqlData):
table_name = "fluff_WorldVisionChildFluff"
slug = 'children_deaths'
title = 'Children Death Details'
total_row_name = "Total Deaths"
show_total = True
show_charts = False
chart_x_label = ''
chart_y_label = ''
custom_total_calculate = True
accordion_start = True
accordion_end = False
table_only = True
def calculate_total_row(self, rows):
total_row = []
if len(rows) > 0:
num_cols = len(rows[0])
for i in range(num_cols):
colrows = [cr[i] for cr in rows[1:] if isinstance(cr[i], dict)]
columns = [r.get('sort_key') for r in colrows if isinstance(r.get('sort_key'), (int, long))]
if len(columns):
total_row.append(reduce(lambda x, y: x + y, columns, 0))
else:
total_row.append('')
return total_row
@property
def rows(self):
result = []
total = self.data['total_deaths']
for idx, column in enumerate(self.columns[:-1]):
if idx == 0:
percent = 'n/a'
else:
percent = self.percent_fn(total, self.data[column.slug])
result.append([{'sort_key': column.header, 'html': column.header},
{'sort_key': self.data[column.slug], 'html': self.data[column.slug]},
{'sort_key': 'percentage', 'html': percent}])
return result
@property
def filters(self):
filter = []
if 'start_date' in self.config:
filter.extend([AND([GTE('date_of_death', 'startdate'), LTE('date_of_death', 'enddate')])])
return filter
@property
def headers(self):
return DataTablesHeader(*[DataTablesColumn('Children Death Type'), DataTablesColumn('Number'), DataTablesColumn('Percentage')])
@property
def columns(self):
self.config['mother_ids'] = tuple(DeliveryMothersIds(config=self.config).data.keys()) + ('',)
return [
DatabaseColumn("Total births",
CountUniqueColumn('doc_id',
filters=[AND([IN('mother_id', get_INFilter_bindparams('mother_ids', self.config['mother_ids'])),
OR([EQ('gender', 'female'), EQ('gender', 'male')])])],
alias='total_births')),
DatabaseColumn("Newborn deaths (< 1 m)",
CountUniqueColumn('doc_id', filters=self.filters + [AND(
[EQ('reason_for_child_closure', 'death'),
EQ('type_of_child_death', 'newborn_death')])], alias='newborn_death')),
DatabaseColumn("Infant deaths (< 1 y)",
CountUniqueColumn('doc_id', filters=self.filters + [AND(
[EQ('reason_for_child_closure', 'death'),
EQ('type_of_child_death', 'infant_death')])], alias='infant_death')),
DatabaseColumn("Child deaths (2-5y)",
CountUniqueColumn('doc_id', filters=self.filters + [AND(
[EQ('reason_for_child_closure', 'death'),
EQ('type_of_child_death', 'child_death')])], alias='child_death')),
DatabaseColumn("Total deaths",
CountUniqueColumn('doc_id', filters=self.filters + [EQ('reason_for_child_closure',
'death')], alias='total_deaths'))
]
@property
def filter_values(self):
return clean_IN_filter_value(super(ChildrenDeaths, self).filter_values, 'mother_ids')
class ChildrenDeathDetails(BaseSqlData):
table_name = "fluff_WorldVisionChildFluff"
slug = 'children_death_details'
title = ''
show_total = True
total_row_name = "Total Deaths"
chart_title = 'Child Deaths'
show_charts = True
chart_x_label = ''
chart_y_label = ''
accordion_start = False
accordion_end = False
@property
def group_by(self):
return ['cause_of_death_child']
@property
def rows(self):
from custom.world_vision import CHILD_CAUSE_OF_DEATH
return self._get_rows(CHILD_CAUSE_OF_DEATH, super(ChildrenDeathDetails, self).rows)
@property
def filters(self):
filter = []
if 'start_date' in self.config:
filter.extend([AND([GTE('date_of_death', 'startdate'), LTE('date_of_death', 'enddate')])])
filter.extend([EQ('reason_for_child_closure', 'death')])
return filter
@property
def headers(self):
return DataTablesHeader(*[DataTablesColumn('Cause of death'), DataTablesColumn('Number'), DataTablesColumn('Percentage')])
@property
def columns(self):
return [
DatabaseColumn("Cause of death", SimpleColumn('cause_of_death_child')),
DatabaseColumn("Number", CountUniqueColumn('doc_id')),
]
class ChildrenDeathsByMonth(BaseSqlData):
table_name = "fluff_WorldVisionChildFluff"
slug = 'children_death_by_month'
title = ''
show_charts = True
chart_title = 'Seasonal Variation of Child Deaths'
chart_x_label = ''
chart_y_label = ''
accordion_start = False
accordion_end = True
@property
def group_by(self):
return ['month_of_death', 'year_of_death']
@property
def filters(self):
filters = super(ChildrenDeathsByMonth, self).filters
filters.extend([NOTEQ('month_of_death', 'empty')])
return filters
@property
def headers(self):
return DataTablesHeader(*[DataTablesColumn('Month'), DataTablesColumn('Deaths'), DataTablesColumn('Percentage')])
@property
def rows(self):
rows = [[int(i), 0] for i in range(1, 13)]
sum_of_deaths = 0
for row in super(ChildrenDeathsByMonth, self).rows:
rows[int(row[0])][-1] += row[-1]['html']
sum_of_deaths += row[-1]['html']
for row in rows:
row[0] = calendar.month_name[row[0]]
row.append({'sort_key': self.percent_fn(sum_of_deaths, row[1]),
'html': self.percent_fn(sum_of_deaths, row[1])})
row[1] = {'sort_key': row[1], 'html': row[1]}
return rows
@property
def columns(self):
return [DatabaseColumn("Month", SimpleColumn('month_of_death')),
DatabaseColumn("Year", SimpleColumn('year_of_death')),
DatabaseColumn("Number", CountUniqueColumn('doc_id'))]
class NutritionMeanMedianBirthWeightDetails(BaseSqlData):
table_name = "fluff_WorldVisionChildFluff"
slug = 'children_birth_weights_1'
title = 'Nutrition Details'
accordion_start = True
accordion_end = False
@property
def filters(self):
filters = super(NutritionMeanMedianBirthWeightDetails, self).filters
filters.append(NOTEQ('weight_birth', 'empty'))
return filters
@property
def headers(self):
return DataTablesHeader(*[DataTablesColumn('Entity'), DataTablesColumn('Mean'), DataTablesColumn('Median')])
@property
def columns(self):
return [
DatabaseColumn("Median Birth Weight",
MeanColumnWithCasting('weight_birth', alias='mean_birth_weight')
),
DatabaseColumn("Median Birth Weight",
CustomMedianColumn('weight_birth', alias='median_birth_weight')
)
]
@property
def rows(self):
return [['Birth Weight (kg)',
"%.2f" % (self.data['mean_birth_weight'] if self.data['mean_birth_weight'] else 0),
"%.2f" % (self.data['median_birth_weight'] if self.data['mean_birth_weight'] else 0)]
]
class NutritionBirthWeightDetails(BaseSqlData):
table_name = "fluff_WorldVisionChildFluff"
slug = 'children_birth_details_2'
title = ''
show_charts = True
chart_title = 'Birth Weight'
chart_x_label = ''
chart_y_label = ''
accordion_start = False
accordion_end = False
chart_only = True
@property
def headers(self):
return DataTablesHeader(*[DataTablesColumn('Entity'), DataTablesColumn('Number'), DataTablesColumn('Percentage')])
@property
def rows(self):
result = []
for idx, column in enumerate(self.columns):
if idx == 0 or idx == 1:
percent = 'n/a'
else:
percent = self.percent_fn(self.data['total_birthweight_known'], self.data[column.slug])
result.append([{'sort_key': column.header, 'html': column.header},
{'sort_key': self.data[column.slug], 'html': self.data[column.slug],
'color': 'red' if column.slug == 'total_birthweight_lt_25' else 'green'},
{'sort_key': 'percentage', 'html': percent}]
)
return result
@property
def columns(self):
self.config['mother_ids'] = tuple(DeliveryMothersIds(config=self.config).data.keys()) + ('',)
columns = [
DatabaseColumn("Total children with with birthweight known",
CountUniqueColumn('doc_id', alias="total_birthweight_known",
filters=self.filters + [NOTEQ('weight_birth', 'empty')])),
DatabaseColumn("Total births",
CountUniqueColumn('doc_id',
filters=[AND([IN('mother_id', get_INFilter_bindparams('mother_ids', self.config['mother_ids'])),
OR([EQ('gender', 'female'), EQ('gender', 'male')])])],
alias='total_births'))]
columns.extend([
DatabaseColumn("Birthweight < 2.5 kg",
CountUniqueColumn('doc_id',
alias="total_birthweight_lt_25",
filters=self.filters + [AND([LT('weight_birth', 'weight_birth_25'), NOTEQ('weight_birth', 'empty')])]
)
),
DatabaseColumn("Birthweight >= 2.5 kg",
CountUniqueColumn('doc_id',
alias="total_birthweight_gte_25",
filters=self.filters + [AND([GTE('weight_birth', 'weight_birth_25'), NOTEQ('weight_birth', 'empty')])]
)
)
])
return columns
@property
def filter_values(self):
return clean_IN_filter_value(super(NutritionBirthWeightDetails, self).filter_values, 'mother_ids')
class NutritionFeedingDetails(BaseSqlData):
table_name = "fluff_WorldVisionChildFluff"
slug = 'children_feeding_details'
title = ''
accordion_start = False
accordion_end = True
@property
def headers(self):
return DataTablesHeader(*[DataTablesColumn('Feeding type'), DataTablesColumn('Number'), DataTablesColumn('Total Eligible'), DataTablesColumn('Percentage')])
@property
def rows(self):
from custom.world_vision import CHILD_INDICATOR_TOOLTIPS
result = []
for i in range(0,4):
result.append([{'sort_key': self.columns[2*i].header, 'html': self.columns[2*i].header,
'tooltip': self.get_tooltip(CHILD_INDICATOR_TOOLTIPS['nutrition_details'], self.columns[2*i].slug)},
{'sort_key': self.data[self.columns[2*i].slug], 'html': self.data[self.columns[2*i].slug]},
{'sort_key': self.data[self.columns[2*i+1].slug], 'html': self.data[self.columns[2*i + 1].slug],
'tooltip': self.get_tooltip(CHILD_INDICATOR_TOOLTIPS['nutrition_details'], self.columns[2*i+1].slug)},
{'sort_key': self.percent_fn(self.data[self.columns[2*i + 1].slug], self.data[self.columns[2*i].slug]),
'html': self.percent_fn(self.data[self.columns[2*i + 1].slug], self.data[self.columns[2*i].slug])}
])
return result
@property
def columns(self):
return [
DatabaseColumn("Early initiation of breastfeeding",
CountUniqueColumn('doc_id', alias="colostrum_feeding",
filters=self.filters + [EQ('breastfeed_1_hour', 'yes')])),
DatabaseColumn("Early initiation of breastfeeding Total Eligible",
CountUniqueColumn('doc_id', alias="colostrum_feeding_total_eligible",
filters=self.filters + [NOTEQ('breastfeed_1_hour', 'empty')])),
DatabaseColumn("Exclusive breastfeeding",
CountUniqueColumn('doc_id', alias="exclusive_breastfeeding",
filters=self.filters + [AND([EQ('exclusive_breastfeeding', "yes"),
GTE('dob', "today_minus_183")])])),
DatabaseColumn("Exclusive Breastfeeding (EBF) Total Eligible",
CountUniqueColumn('doc_id', alias="exclusive_breastfeeding_total_eligible",
filters=self.filters + [GTE('dob', 'today_minus_183')])),
DatabaseColumn("Supplementary feeding",
CountUniqueColumn('doc_id', alias="supplementary_feeding",
filters=self.filters + [AND([EQ('supplementary_feeding_baby', 'yes'),
GTE('dob', 'today_minus_182')])])),
DatabaseColumn("Supplementary feeding Total Eligible",
CountUniqueColumn('doc_id', alias="supplementary_feeding_total_eligible",
filters=self.filters + [GTE('dob', 'today_minus_182')])),
DatabaseColumn("Complementary feeding",
CountUniqueColumn('doc_id', alias="complementary_feeding",
filters=self.filters + [AND([EQ('comp_breastfeeding', 'yes'),
LTE('dob', 'today_minus_183'),
GTE('dob', 'today_minus_730')])])),
DatabaseColumn("Complementary feeding Total Eligible",
CountUniqueColumn('doc_id', alias="complementary_feeding_total_eligible",
filters=self.filters + [AND([LTE('dob', 'today_minus_183'),
GTE('dob', 'today_minus_730')])]))
]
class ChildHealthIndicators(BaseSqlData):
table_name = "fluff_WorldVisionChildFluff"
slug = 'Child_health_indicators'
title = 'Child Health Indicators'
@property
def rows(self):
from custom.world_vision import CHILD_INDICATOR_TOOLTIPS
result = [[{'sort_key': self.columns[0].header, 'html': self.columns[0].header,
'tooltip': self.get_tooltip(CHILD_INDICATOR_TOOLTIPS['child_health_indicators'],
self.columns[0].slug)},
{'sort_key': self.data[self.columns[0].slug], 'html': self.data[self.columns[0].slug]}],
[{'sort_key': self.columns[1].header, 'html': self.columns[1].header,
'tooltip': self.get_tooltip(CHILD_INDICATOR_TOOLTIPS['child_health_indicators'],
self.columns[1].slug)},
{'sort_key': self.data[self.columns[1].slug], 'html': self.data[self.columns[1].slug]}],
[{'sort_key': self.columns[2].header, 'html': self.columns[2].header,
'tooltip': self.get_tooltip(CHILD_INDICATOR_TOOLTIPS['child_health_indicators'],
self.columns[2].slug)},
{'sort_key': self.data[self.columns[2].slug], 'html': self.data[self.columns[2].slug]}]]
for i in range(3, 5):
result.append([{'sort_key': self.columns[i].header, 'html': self.columns[i].header,
'tooltip': self.get_tooltip(CHILD_INDICATOR_TOOLTIPS['child_health_indicators'],
self.columns[i].slug)},
{'sort_key': self.data[self.columns[i].slug], 'html': self.data[self.columns[i].slug]},
{'sort_key': self.percent_fn(self.data[self.columns[1].slug],
self.data[self.columns[i].slug]),
'html': self.percent_fn(self.data[self.columns[1].slug],
self.data[self.columns[i].slug])}])
return result
@property
def columns(self):
return [
DatabaseColumn("Total child ill",
CountUniqueColumn(
'doc_id', alias="total_child_ill",
filters=self.filters + [OR([EQ('pneumonia_since_last_visit', 'yes'),
EQ('has_diarrhea_since_last_visit', 'yes')])])),
DatabaseColumn("ARI (Pneumonia)",
CountUniqueColumn('doc_id', alias="ari_cases",
filters=self.filters + [EQ('pneumonia_since_last_visit', 'yes')])),
DatabaseColumn("Diarrhea",
CountUniqueColumn('doc_id', alias="diarrhea_cases",
filters=self.filters + [EQ('has_diarrhea_since_last_visit', 'yes')])),
DatabaseColumn("ORS given during diarrhea",
CountUniqueColumn('doc_id', alias="ors",
filters=self.filters + [EQ('dairrhea_treated_with_ors', 'yes')])),
DatabaseColumn("Zinc given during diarrhea",
CountUniqueColumn('doc_id', alias="zinc",
filters=self.filters + [EQ('dairrhea_treated_with_zinc', 'yes')]))
]
class ImmunizationDetailsFirstYear(ImmunizationOverview):
title = 'Immunization Overview (0 - 1 yrs)'
slug = 'immunization_first_year_overview'
@property
def columns(self):
columns = super(ImmunizationDetailsFirstYear, self).columns
del columns[6:8]
del columns[-2:]
cols1 = [
DatabaseColumn("OPV0",
CountUniqueColumn('doc_id', alias="opv0", filters=self.filters + [EQ('opv0', 'yes')])
),
DatabaseColumn("HEP0",
CountUniqueColumn('doc_id', alias="hep0", filters=self.filters + [EQ('hepb0', 'yes')])
),
DatabaseColumn("OPV1",
CountUniqueColumn('doc_id', alias="opv1", filters=self.filters + [EQ('opv1', 'yes')])
),
DatabaseColumn("HEP1",
CountUniqueColumn('doc_id', alias="hep1", filters=self.filters + [EQ('hepb1', 'yes')])
),
DatabaseColumn("DPT1",
CountUniqueColumn('doc_id', alias="dpt1", filters=self.filters + [EQ('dpt1', 'yes')])
),
DatabaseColumn("OPV2",
CountUniqueColumn('doc_id', alias="opv2", filters=self.filters + [EQ('opv2', 'yes')])
),
DatabaseColumn("HEP2",
CountUniqueColumn('doc_id', alias="hep2", filters=self.filters + [EQ('hepb2', 'yes')])
),
DatabaseColumn("DPT2",
CountUniqueColumn('doc_id', alias="dpt2", filters=self.filters + [EQ('dpt2', 'yes')])
),
]
cols2 = [
DatabaseColumn("OPV0 Total Eligible",
CountUniqueColumn('doc_id', alias="opv0_eligible", filters=self.filters)),
DatabaseColumn("HEP0 Total Eligible",
CountUniqueColumn('doc_id', alias="hep0_eligible", filters=self.filters)),
DatabaseColumn("OPV1 Total Eligible",
CountUniqueColumn('doc_id', alias="opv1_eligible",
filters=self.filters + [LTE('dob', 'today_minus_40')])),
DatabaseColumn("HEP1 Total Eligible",
CountUniqueColumn('doc_id', alias="hep1_eligible",
filters=self.filters + [LTE('dob', 'today_minus_40')])),
DatabaseColumn("DPT1 Total Eligible",
CountUniqueColumn('doc_id', alias="dpt1_eligible",
filters=self.filters + [LTE('dob', 'today_minus_40')])),
DatabaseColumn("OPV2 Total Eligible",
CountUniqueColumn('doc_id', alias="opv2_eligible",
filters=self.filters + [LTE('dob', 'today_minus_75')])),
DatabaseColumn("HEP2 Total Eligible",
CountUniqueColumn('doc_id', alias="hep2_eligible",
filters=self.filters + [LTE('dob', 'today_minus_75')])),
DatabaseColumn("DPT2 Total Eligible",
CountUniqueColumn('doc_id', alias="dpt2_eligible",
filters=self.filters + [LTE('dob', 'today_minus_75')]))
]
cols3 = [
DatabaseColumn("VitA1",
CountUniqueColumn('doc_id', alias="vita1", filters=self.filters + [EQ('vita1', 'yes')]))
]
cols4 = [
DatabaseColumn("VitA1 Total Eligible",
CountUniqueColumn('doc_id', alias="vita1_eligible",
filters=self.filters + [LTE('dob', 'today_minus_273')]))
]
return columns[:1] + cols1 + columns[1:5] + cols3 + columns[5:-5] \
+ cols2 + columns[-5:-1] + cols4 + columns[-1:]
class ImmunizationDetailsSecondYear(ImmunizationOverview):
title = 'Immunization Overview (1 - 2 yrs)'
slug = 'immunization_second_year_overview'
@property
def columns(self):
return [
DatabaseColumn("VitA2", CountUniqueColumn('doc_id', alias="vita2",
filters=self.filters + [EQ('vita2', 'yes')])),
DatabaseColumn("DPT-OPT Booster",
CountUniqueColumn('doc_id', alias="dpt_opv_booster",
filters=self.filters + [EQ('dpt_opv_booster', 'yes')])),
DatabaseColumn("VitA3",
CountUniqueColumn('doc_id', alias="vita3",
filters=self.filters + [EQ('vita3', 'yes')])),
DatabaseColumn("VitA2 Total Eligible",
CountUniqueColumn('doc_id', alias="vita2_eligible",
filters=self.filters + [LTE('dob', 'today_minus_547')])),
DatabaseColumn("DPT-OPT Booster Total Eligible",
CountUniqueColumn('doc_id', alias="dpt_opv_booster_eligible",
filters=self.filters + [LTE('dob', 'today_minus_548')])),
DatabaseColumn("VitA3 Total Eligible",
CountUniqueColumn('doc_id', alias="vita3_eligible",
filters=self.filters + [LTE('dob', 'today_minus_700')]))
]
class ChildDeworming(BaseSqlData):
table_name = "fluff_WorldVisionChildFluff"
slug = 'children_deworming'
title = 'Child Deworming'
@property
def headers(self):
return DataTablesHeader(*[DataTablesColumn('Entity'), DataTablesColumn('Number'), DataTablesColumn('Total Eligible'), DataTablesColumn('Percentage')])
@property
def rows(self):
from custom.world_vision import CHILD_INDICATOR_TOOLTIPS
return [[{'sort_key': self.columns[0].header, 'html': self.columns[0].header,
'tooltip': self.get_tooltip(CHILD_INDICATOR_TOOLTIPS['child_health_indicators'], self.columns[0].slug)},
{'sort_key': self.data[self.columns[0].slug], 'html': self.data[self.columns[0].slug]},
{'sort_key': self.data[self.columns[1].slug], 'html': self.data[self.columns[1].slug],
'tooltip': self.get_tooltip(CHILD_INDICATOR_TOOLTIPS['child_health_indicators'], self.columns[1].slug)},
{'sort_key': self.percent_fn(self.data[self.columns[1].slug], self.data[self.columns[0].slug]),
'html': self.percent_fn(self.data[self.columns[1].slug], self.data[self.columns[0].slug])}
]]
@property
def columns(self):
return [
DatabaseColumn("Deworming dose in last 6 months",
CountUniqueColumn('doc_id',
alias="deworming",
filters=self.filters + [EQ('deworm', 'yes')]
)
),
DatabaseColumn("Deworming Total Eligible",
CountUniqueColumn('doc_id',
alias="deworming_total_eligible",
filters=self.filters + [LTE('dob', 'today_minus_365')]
)
),
]
class EBFStoppingDetails(BaseSqlData):
table_name = "fluff_WorldVisionChildFluff"
slug = 'ebf_stopping_details'
title = 'EBF Stopping Details'
show_total = True
total_row_name = "EBF stopped"
@property
def filters(self):
filters = super(EBFStoppingDetails, self).filters
filters.append(EQ('exclusive_breastfeeding', 'no'))
filters.append(LTE('dob', 'today_minus_183'))
filters.append(NOTEQ('ebf_stop_age_month', 'empty'))
return filters
@property
def rows(self):
from custom.world_vision import CHILD_INDICATOR_TOOLTIPS
total = sum(v for v in self.data.values())
result = []
for column in self.columns:
percent = self.percent_fn(total, self.data[column.slug])
result.append([{'sort_key': column.header, 'html': column.header,
'tooltip': self.get_tooltip(CHILD_INDICATOR_TOOLTIPS['ebf_stopping_details'], column.slug)},
{'sort_key': self.data[column.slug], 'html': self.data[column.slug]},
{'sort_key': 'percentage', 'html': percent}
])
return result
@property
def columns(self):
return [
DatabaseColumn("EBF stopped between 0-1 month",
CountUniqueColumn('doc_id', alias="stopped_0_1",
filters=self.filters + [LTE('ebf_stop_age_month', '1')])
),
DatabaseColumn("EBF stopped between 1-3 month",
CountUniqueColumn('doc_id', alias="stopped_1_3",
filters=self.filters + [AND([GT('ebf_stop_age_month', '1'), LTE('ebf_stop_age_month', '3')])])
),
DatabaseColumn("EBF stopped between 3-5 month",
CountUniqueColumn('doc_id', alias="stopped_3_5",
filters=self.filters + [AND([GT('ebf_stop_age_month', '3'), LTE('ebf_stop_age_month', '5')])])
),
DatabaseColumn("EBF stopped between 5-6 month",
CountUniqueColumn('doc_id', alias="stopped_5_6",
filters=self.filters + [AND([GT('ebf_stop_age_month', '5'), LTE('ebf_stop_age_month', '6')])])
)
]
| bsd-3-clause | -8,137,775,323,974,969,000 | 46.163866 | 164 | 0.529681 | false |
pokornyv/SPEpy | siam_parquet.py | 1 | 15368 | ###########################################################
# SPEpy - simplified parquet equation solver for SIAM #
# Copyright (C) 2019 Vladislav Pokorny; [email protected] #
# homepage: github.com/pokornyv/SPEpy #
# siam_parquet.py - solver for SPE #
# method described in Phys. Rev. B 100, 195114 (2019). #
###########################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import scipy as sp
from scipy.integrate import simps
from scipy.optimize import brentq
from sys import argv,exit,version_info
from os import listdir
from time import ctime,time
from parlib import *
from parlib2 import *
t = time()
hashes = '#'*80
## python version
ver = str(version_info[0])+'.'+str(version_info[1])+'.'+str(version_info[2])
## header for files so we store the parameters along with data
parline = '# U = {0: .5f}, Delta = {1: .5f}, ed = {2: .5f}, h = {3: .5f}, T = {4: .5f}'\
.format(U,Delta,ed,h,T)
parfname = str(GFtype)+'_U'+str(U)+'eps'+str(ed)+'T'+str(T)+'h'+str(h)
## print the header #######################################
if chat:
print(hashes+'\n# generated by '+str(argv[0])+', '+str(ctime()))
print('# python version: '+str(ver)+', SciPy version: '+str(sp.version.version))
print('# energy axis: [{0: .5f} ..{1: .5f}], step = {2: .5f}, length = {3: 3d}'\
.format(En_A[0],En_A[-1],dE,len(En_A)))
print(parline)
print('# Kondo temperature from Bethe ansatz: Tk ~{0: .5f}'\
.format(float(KondoTemperature(U,Delta,ed))))
if SC: print('# using partial self-consistency scheme for the self-energy')
elif FSC: print('# using full self-consistency scheme for the self-energy')
else: print('# using no self-consistency scheme for the self-energy')
if SC and FSC: SC = False
if SCsolver == 'fixed':
print('# using Steffensen fixed-point algorithm to calculate Lambda vertex')
elif SCsolver == 'root':
print('# using MINPACK root to calculate Lambda vertex')
else:
print('# using iteration algorithm to calculate Lambda vertex, mixing parameter alpha = {0: .5f}'\
.format(float(alpha)))
###########################################################
## inicialize the non-interacting Green function ##########
if GFtype == 'lor':
if chat: print('# using Lorentzian non-interacting DoS')
GFlambda = lambda x: GreensFunctionLorenz(x,Delta)
DensityLambda = lambda x: DensityLorentz(x,Delta)
elif GFtype == 'semi':
if chat: print('# using semielliptic non-interacting DoS')
W = Delta ## half-bandwidth
GFlambda = lambda x: GreensFunctionSemi(x,W)
DensityLambda = lambda x: DensitySemi(x,W)
elif GFtype == 'gauss':
if chat: print('# using Gaussian non-interacting DoS')
GFlambda = lambda x: GreensFunctionGauss(x,Delta)
DensityLambda = lambda x: DensityGauss(x,Delta)
else:
print('# Error: DoS type "'+GFtype+'" not implemented.')
exit(1)
## using the Lambda from the older method as a starting point
if not Lin:
if chat: print('# calculating the fully static vertex at half-filling as a starting point:')
GFzero_A = GFlambda(En_A)
Bubble_A = TwoParticleBubble(GFzero_A,GFzero_A,'eh')
Lambda0 = CalculateLambda(Bubble_A,GFzero_A,GFzero_A)
if chat: print('# - Lambda0 = {0: .8f}'.format(Lambda0))
else:
if chat: print('# Initial guess for Lambda: {0: .6f}'.format(LIn))
########################################################
## calculate filling of the thermodynamic Green function
if chat: print('#\n# calculating the initial thermodynamic Green function:')
[nTup,nTdn] = [0.5,0.5]
[nTupOld,nTdnOld] = [1e8,1e8]
k = 1
while any([sp.fabs(nTupOld-nTup) > epsn, sp.fabs(nTdnOld-nTdn) > epsn]):
[nTupOld,nTdnOld] = [nTup,nTdn]
if T == 0.0:
nup_dens = lambda x: DensityLambda(ed+U/2.0*(x+nTdn-1.0)-h) - x
ndn_dens = lambda x: DensityLambda(ed+U/2.0*(nTup+x-1.0)+h) - x
else:
nup_dens = lambda x: Filling(GFlambda(En_A-ed-U/2.0*(x+nTdn-1.0)+h)) - x
ndn_dens = lambda x: Filling(GFlambda(En_A-ed-U/2.0*(nTup+x-1.0)-h)) - x
nTup = brentq(nup_dens,0.0,1.0,xtol = epsn)
nTdn = brentq(ndn_dens,0.0,1.0,xtol = epsn)
if chat: print('# - - {0: 3d}: nUp: {1: .8f}, nDn: {2: .8f}'.format(k,nTup,nTdn))
k += 1
## fill the Green functions
GFTup_A = GFlambda(En_A-ed-U/2.0*(nTup+nTdn-1.0)+h)
GFTdn_A = GFlambda(En_A-ed-U/2.0*(nTup+nTdn-1.0)-h)
## write non-interacting GF to a file, development only
#WriteFileX([GFTup_A,GFTdn_A],WriteMax,WriteStep,parline,'GFTzero.dat')
if chat: print('# - norm[GTup]: {0: .8f}, n[GTup]: {1: .8f}'\
.format(float(IntDOS(GFTup_A)),float(nTup)))
if chat: print('# - norm[GTdn]: {0: .8f}, n[GTdn]: {1: .8f}'\
.format(float(IntDOS(GFTdn_A)),float(nTdn)))
if chat: print('# - nT = {0: .8f}, mT = {1: .8f}'.format(float(nTup+nTdn),float(nTup-nTdn)))
###########################################################
## calculate the Lambda vertex ############################
if chat:
if FSC: print('#\n# calculating the full self-energy using FSC scheme:')
else: print('#\n# calculating the Hartree-Fock self-energy:')
if Lin: ## reading initial values from command line
Lambda = LIn
else: ## using the static guess
Lambda = Lambda0
[nTupOld,nTdnOld] = [1e8,1e8]
[Sigma0,Sigma1] = [U*(nTup+nTdn-1.0)/2.0,Lambda*(nTdn-nTup)/2.0]
k = 1
sumsq = 1e8 if FSC else 0.0 ## converence criterium for FSC scheme
while any([sp.fabs(nTupOld-nTup) > epsn, sp.fabs(nTdnOld-nTdn) > epsn, sumsq > 0.01]):
if chat: print('#\n# Iteration {0: 3d}'.format(k))
[nTupOld,nTdnOld] = [nTup,nTdn]
if FSC:
GFTupOld_A = sp.copy(GFTup_A)
## Lambda vertex
if chat: print('# - calculating Lambda vertex:')
Lambda = CalculateLambdaD(GFTup_A,GFTdn_A,Lambda)
if chat: print('# - - Lambda vertex: Lambda: {0: .8f}'.format(Lambda))
if True: ## print auxiliary functions, development only
# if False:
K = KvertexD(Lambda,GFTup_A,GFTdn_A)
if chat: print('# - - K vertex: K: {0: .8f}'.format(K))
## check the integrals:
XD = ReBDDFDD(GFTup_A,GFTdn_A,0)
if chat: print('# - - aux. integral: X: {0: .8f}'.format(XD))
## HF self-energy
if chat: print('# - calculating static self-energy:')
[Sigma0,Sigma1] = CalculateSigmaT(Lambda,Sigma0,Sigma1,GFlambda,DensityLambda)
if chat: print('# - - static self-energy: normal: {0: .8f}, anomalous: {1: .8f}'.format(Sigma0,Sigma1))
GFTup_A = GFlambda(En_A-ed-Sigma0+(h-Sigma1))
GFTdn_A = GFlambda(En_A-ed-Sigma0-(h-Sigma1))
## symmetrize the Green function if possible
if h == 0.0:
if chat: print('# - h = 0, averaging Green functions over spin to avoid numerical errors')
GFTup_A = sp.copy((GFTup_A+GFTdn_A)/2.0)
GFTdn_A = sp.copy((GFTup_A+GFTdn_A)/2.0)
Sigma1 = 0.0
## recalculate filling and magnetization
if any([ed!=0.0,h!=0.0]):
if T == 0.0:
nTup = DensityLambda(ed+Sigma0-(h-Sigma1))
nTdn = DensityLambda(ed+Sigma0+(h-Sigma1))
else:
nTup = Filling(GFTup_A)
nTdn = Filling(GFTdn_A)
else: ## ed = 0 and h = 0
nTup = nTdn = 0.5
## this is to convert complex to float, the warning is just a sanity check
if any([sp.fabs(sp.imag(nTup))>1e-6,sp.fabs(sp.imag(nTdn))>1e-6,]):
print('# Warning: non-zero imaginary part of nT, up: {0: .8f}, dn: {1: .8f}.'\
.format(sp.imag(nTup),sp.imag(nTdn)))
[nTup,nTdn] = [sp.real(nTup),sp.real(nTdn)]
if FSC:
## spectral self-energy ###################################
SigmaUp_A = SelfEnergyD2(GFTup_A,GFTdn_A,Lambda,'up')
SigmaDn_A = SelfEnergyD2(GFTup_A,GFTdn_A,Lambda,'dn')
Sigma_A = (SigmaUp_A+SigmaDn_A)/2.0
## interacting Green function #############################
GFTup_A = GFlambda(En_A-ed-U/2.0*(nTup+nTdn-1.0)+(h-Sigma1)-Sigma_A)
GFTdn_A = GFlambda(En_A-ed-U/2.0*(nTup+nTdn-1.0)-(h-Sigma1)-Sigma_A)
## print output for given iteration
if chat:
print('# - thermodynamic Green function filling: nTup = {0: .8f}, nTdn = {1: .8f}'.format(nTup,nTdn))
print('# - ed = {0: .4f}, h = {1: .4f}: nT = {2: .8f}, mT = {3: .8f}'.format(ed,h,nTup+nTdn,nTup-nTdn))
print('{0: 3d}\t{1: .8f}\t{2: .8f}\t{3: .8f}\t{4: .8f}'.format(k,nTup,nTdn,nTup+nTdn,nTup-nTdn))
if FSC:
sumsq = sp.sum(sp.imag(GFTupOld_A-GFTup_A)[int(0.5*Nhalf):int(1.5*Nhalf)]**2)
if chat: print('# Sum of squares: {0: .8f}'.format(sumsq))
k+=1
if chat:
if FSC: print('# - Calculation of the Hartree-Fock self-energy finished after {0: 3d} iterations.'.format(int(k-1)))
else: print('# - Calculation of the full spectral self-energy finished after {0: 3d} iterations.'.format(int(k-1)))
Det_A = DeterminantGD(Lambda,GFTup_A,GFTdn_A)
Dzero = Det_A[int((len(En_A)-1)/2)]
if chat: print('# - determinant at zero energy: {0: .8f} {1:+8f}i'.format(sp.real(Dzero),sp.imag(Dzero)))
## write the determinant to a file, for development only
#WriteFileX([GFTup_A,GFTdn_A,Det_A],WriteMax,WriteStep,parline,'DetG.dat')
if SC: ## partial self-consistency between Sigma and G:
if chat: print('#\n# calculating the spectral self-energy:')
parfname = 'SC_'+ parfname
k = 1
sumsq = 1e8
GFintUp_A = sp.copy(GFTup_A)
GFintDn_A = sp.copy(GFTdn_A)
[nUp,nDn] = [nTup,nTdn]
while sumsq > 0.06:
GFintUpOld_A = sp.copy(GFintUp_A)
## spectral self-energy ###################################
if chat: print('#\n# Iteration {0: 3d}'.format(k))
SigmaUp_A = SelfEnergyD_sc(GFintUp_A,GFintDn_A,GFTup_A,GFTdn_A,Lambda,'up')
SigmaDn_A = SelfEnergyD_sc(GFintUp_A,GFintDn_A,GFTup_A,GFTdn_A,Lambda,'dn')
Sigma_A = (SigmaUp_A+SigmaDn_A)/2.0
## interacting Green function #############################
GFintUp_A = GFlambda(En_A-ed-U/2.0*(nUp+nDn-1.0)+(h-Sigma1)-Sigma_A)
GFintDn_A = GFlambda(En_A-ed-U/2.0*(nUp+nDn-1.0)-(h-Sigma1)-Sigma_A)
if any([ed!=0.0,h!=0.0]):
[nUp,nDn] = [Filling(GFintUp_A),Filling(GFintDn_A)]
else: ## ed = 0 and h = 0
[nUp,nDn] = [0.5,0.5]
if chat: print('# densities: nUp: {1: .8f}, nDn: {2: .8f}'.format(k,nUp,nDn))
sumsq = sp.sum(sp.imag(GFintUpOld_A-GFintUp_A)[int(0.5*Nhalf):int(1.5*Nhalf)]**2)
if chat: print('# Sum of squares: {0: .8f}'.format(sumsq))
k+=1
elif FSC: ## full self-consistency between Sigma and G:
parfname = 'FSC_'+ parfname
GFintUp_A = sp.copy(GFTup_A)
GFintDn_A = sp.copy(GFTdn_A)
if any([ed!=0.0,h!=0.0]): [nUp,nDn] = [Filling(GFintUp_A),Filling(GFintDn_A)]
else: [nUp,nDn] = [0.5,0.5]
else:
## spectral self-energy ###################################
if chat: print('#\n# calculating the spectral self-energy')
SigmaUp_A = SelfEnergyD2(GFTup_A,GFTdn_A,Lambda,'up')
SigmaDn_A = SelfEnergyD2(GFTup_A,GFTdn_A,Lambda,'dn')
Sigma_A = (SigmaUp_A+SigmaDn_A)/2.0
## interacting Green function #############################
if chat: print('#\n# calculating the spectral Green function:')
if chat: print('# - iterating the final density:')
[nUp,nDn] = [nTup,nTdn]
[nUpOld,nDnOld] = [1e8,1e8]
k = 1
while any([sp.fabs(nUpOld-nUp) > epsn, sp.fabs(nDnOld-nDn) > epsn]):
[nUpOld,nDnOld] = [nUp,nDn]
nup_dens = lambda x: Filling(GFlambda(En_A-ed-U/2.0*(x+nDn-1.0)+(h-Sigma1)-Sigma_A)) - x
ndn_dens = lambda x: Filling(GFlambda(En_A-ed-U/2.0*(nUp+x-1.0)-(h-Sigma1)-Sigma_A)) - x
nUp = brentq(nup_dens,0.0,1.0,xtol = epsn)
nDn = brentq(ndn_dens,0.0,1.0,xtol = epsn)
if chat: print('# - - {0: 3d}: nUp: {1: .8f}, nDn: {2: .8f}'.format(k,nUp,nDn))
k += 1
GFintUp_A = GFlambda(En_A-ed-U/2.0*(nUp+nDn-1.0)+(h-Sigma1)-Sigma_A)
GFintDn_A = GFlambda(En_A-ed-U/2.0*(nUp+nDn-1.0)-(h-Sigma1)-Sigma_A)
###########################################################
## calculate properties ###################################
## quasiparticle weights
[Zup,dReSEupdw] = QuasiPWeight(sp.real(SigmaUp_A))
[Zdn,dReSEdndw] = QuasiPWeight(sp.real(SigmaDn_A))
[Z,dReSEdw] = QuasiPWeight(sp.real(Sigma_A))
if chat: print('# quasiparticle weight:')
if chat: print('# - Z = {0: .8f}, DReSE/dw[0] = {1: .8f}, m*/m = {2: .8f}'\
.format(float(Z),float(dReSEdw),float(1.0/Z)))
if chat and h!=0.0:
print('# - up spin: Z = {0: .8f}, DReSE/dw[0] = {1: .8f}, m*/m = {2: .8f}'\
.format(float(Zup),float(dReSEupdw),float(1.0/Zup)))
print('# - dn spin: Z = {0: .8f}, DReSE/dw[0] = {1: .8f}, m*/m = {2: .8f}'\
.format(float(Zdn),float(dReSEdndw),float(1.0/Zdn)))
## DoS at Fermi energy
DOSFup = -sp.imag(GFintUp_A[int(N/2)])/sp.pi
DOSFdn = -sp.imag(GFintDn_A[int(N/2)])/sp.pi
## filling
[nUp,nDn] = [Filling(GFintUp_A),Filling(GFintDn_A)]
if chat:
print('# - spectral Green function filling: nUp = {0: .8f}, nDn = {1: .8f}'.format(nUp,nDn))
print('# - ed = {0: .4f}, h = {1: .4f}: n = {2: .8f}, m = {3: .8f}'.format(ed,h,nUp+nDn,nUp-nDn))
## HWHM of the spectral function
[HWHMup,DOSmaxUp,wmaxUp] = CalculateHWHM(GFintUp_A)
[HWHMdn,DOSmaxDn,wmaxDn] = CalculateHWHM(GFintDn_A)
if any([HWHMup == 0.0,HWHMdn == 0.0]) and chat:
print('# - Warning: HWHM cannot be calculated, setting it to zero.')
elif any([HWHMup < dE,HWHMdn < dE]):
print('# - Warning: HWHM smaller than energy resolution.')
if chat: print('# - spin-up: DOS[0] = {0: .8f}, maximum of DoS: {1: .8f} at w = {2: .8f}'\
.format(float(DOSFup),float(DOSmaxUp),float(wmaxUp)))
if h!=0.0 and chat:
print('# - spin-dn: DOS[0] = {0: .8f}, maximum of DoS: {1: .8f} at w = {2: .8f}'\
.format(float(DOSFdn),float(DOSmaxDn),float(wmaxDn)))
if chat: print('# - HWHM: spin-up: {0: .8f}, spin-dn: {1: .8f}'.format(float(HWHMup),float(HWHMdn)))
## zero-field susceptibility
if h==0.0:
ChiT = sp.real(SusceptibilityTherm(Dzero,GFTup_A))
ChiS = sp.real(SusceptibilitySpecD(Lambda,ChiT,GFintUp_A))
if chat: print('# - thermodynamic susceptibility: {0: .8f}'.format(ChiT))
if chat: print('# - spectral susceptibility: {0: .8f}'.format(ChiS))
else:
ChiS = ChiT = 0.0
###########################################################
## write the output files #################################
if WriteGF:
header = parline+'\n# E\t\tRe GF0\t\tIm GF0\t\tRe SE\t\tIm SE\t\tRe GF\t\tIm GF'
filename = 'gfUp_'+parfname+'.dat'
WriteFileX([GFTup_A,SigmaUp_A,GFintUp_A],WriteMax,WriteStep,header,filename)
#WriteFileX([GFTup_A,SigmaUp_A,(GFintUp_A+sp.flipud(GFintUp_A))/2.0],WriteMax,WriteStep,header,'symmGF.dat')
if h!=0.0:
filename = 'gfDn_'+parfname+'.dat'
WriteFileX([GFTdn_A,SigmaDn_A,GFintDn_A],WriteMax,WriteStep,header,filename)
filename = 'gfMag_'+parfname+'.dat'
WriteFileX([GFintUp_A,GFintDn_A,Sigma_A],WriteMax,WriteStep,header,filename)
## write data to standard output
## use awk 'NR%2==0', awk 'NR%2==1' to separate the output into two blocks
print('{0: .4f}\t{1: .4f}\t{2: .4f}\t{3: .4f}\t{4: .6f}\t{5: .6f}\t{6: .6f}\t{7: .6f}\t{8: .6f}'\
.format(U,ed,T,h,sp.real(Lambda),HWHMup,Z,DOSFup,sp.real(Dzero)))
print('{0: .4f}\t{1: .4f}\t{2: .4f}\t{3: .4f}\t{4: .6f}\t{5: .6f}\t{6: .6f}\t{7: .6f}\t{8: .6f}\t{9: .6f}'\
.format(U,ed,T,h,nTup,nTdn,nUp,nDn,ChiT,ChiS))
if chat: print('# '+argv[0]+' DONE after {0: .2f} seconds.'.format(float(time()-t)))
## siam_parquet.py end ###
| gpl-3.0 | 5,412,667,270,545,219,000 | 44.333333 | 118 | 0.619534 | false |
OnroerendErfgoed/pyramid_urireferencer | pyramid_urireferencer/models.py | 1 | 4161 | # -*- coding: utf-8 -*-
import json
class RegistryResponse:
"""
Represents what the registry will send back to a client when asked if
a certain uri is used somewhere.
:param string query_uri: Uri of the resource unser survey.
:param boolean success: Were all the queries successful?
:param boolean has_references: Were any references found?
:param int count: How many references were found?
:param list applications: A list of application results.
"""
def __init__(self, query_uri, success, has_references, count, applications):
self.query_uri = query_uri
self.success = success
self.has_references = has_references
self.count = count
self.applications = applications
@staticmethod
def load_from_json(data):
"""
Load a :class:`RegistryReponse` from a dictionary or a string (that
will be parsed as json).
"""
if isinstance(data, str):
data = json.loads(data)
applications = [
ApplicationResponse.load_from_json(a) for a in data['applications']
] if data['applications'] is not None else []
return RegistryResponse(
data['query_uri'], data['success'],
data['has_references'], data['count'], applications
)
def to_json(self):
return {
"query_uri": self.query_uri,
"success": self.success,
"has_references": self.has_references,
"count": self.count,
"applications": [app.to_json() for app in self.applications]
}
class ApplicationResponse:
"""
Represents what a certain application will send back to the registry when
asked if a certain uri is used by the application.
:param string title: Title of the application
:param string uri: A uri for the application, not guaranteed to be a http url.
:param string service_url: The url that answered the question
:param boolean success: Was the querie successful?
:param boolean has_references: Were any references found?
:param int count: How many references were found?
:param list items: A list of items that have a reference to the \
uri under survey. Limited to 5 items for performance reasons.
"""
def __init__(self, title, uri, service_url, success, has_references, count, items):
self.title = title
self.uri = uri
self.service_url = service_url
self.success = success
self.has_references = has_references
self.count = count
self.items = items
@staticmethod
def load_from_json(data):
"""
Load a :class:`ApplicationResponse` from a dictionary or string (that
will be parsed as json).
"""
if isinstance(data, str):
data = json.loads(data)
items = [Item.load_from_json(a) for a in data['items']] if data['items'] is not None else []
return ApplicationResponse(
data['title'], data['uri'], data['service_url'],
data['success'], data['has_references'], data['count'], items
)
def to_json(self):
return {
"title": self.title,
"uri": self.uri,
"service_url": self.service_url,
"success": self.success,
"has_references": self.has_references,
"count": self.count,
"items": [item.to_json() for item in self.items] if self.items else []
}
class Item:
"""
A single item that holds a reference to the queried uri.
:param string title: Title of the item.
:param string uri: Uri of the item.
"""
def __init__(self, title, uri):
self.title = title
self.uri = uri
@staticmethod
def load_from_json(data):
"""
Load a :class:`Item` from a dictionary ot string (that will be parsed
as json)
"""
if isinstance(data, str):
data = json.loads(data)
return Item(data['title'], data['uri'])
def to_json(self):
return {
"title": self.title,
"uri": self.uri
}
| mit | -6,828,652,018,221,024,000 | 31.76378 | 100 | 0.596972 | false |
suutari-ai/shoop | shuup_tests/core/test_attributes.py | 2 | 6961 | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
import datetime
from decimal import Decimal
import pytest
from django.core.exceptions import ObjectDoesNotExist
from django.test import override_settings
from django.utils.translation import get_language
from shuup.core.models import (
Attribute, AttributeType, AttributeVisibility, Product, ProductAttribute
)
from shuup.core.models._attributes import NoSuchAttributeHere
from shuup.testing.factories import (
ATTR_SPECS, create_product, get_default_attribute_set, get_default_product
)
def _populate_applied_attribute(aa):
if aa.attribute.type == AttributeType.BOOLEAN:
aa.value = True
aa.save()
assert aa.value is True, "Truth works"
assert aa.untranslated_string_value == "1", "Integer attributes save string representations"
aa.value = not 42 # (but it could be something else)
aa.save()
assert aa.value is False, "Lies work"
assert aa.untranslated_string_value == "0", "Integer attributes save string representations"
aa.value = None
aa.save()
assert aa.value is None, "None works"
assert aa.untranslated_string_value == "", "Boolean saves None"
return
if aa.attribute.type == AttributeType.INTEGER:
aa.value = 320.51
aa.save()
assert aa.value == 320, "Integer attributes get rounded down"
assert aa.untranslated_string_value == "320", "Integer attributes save string representations"
return
if aa.attribute.type == AttributeType.DECIMAL:
aa.value = Decimal("0.636") # Surface pressure of Mars
aa.save()
assert aa.value * 1000 == 636, "Decimals work like they should"
assert aa.untranslated_string_value == "0.636", "Decimal attributes save string representations"
return
if aa.attribute.type == AttributeType.TIMEDELTA:
aa.value = 86400
aa.save()
assert aa.value.days == 1, "86,400 seconds is one day"
assert aa.untranslated_string_value == "86400", "Timedeltas are seconds as strings"
aa.value = datetime.timedelta(days=4)
aa.save()
assert aa.value.days == 4, "4 days remain as 4 days"
assert aa.untranslated_string_value == "345600", "Timedeltas are still seconds as strings"
return
if aa.attribute.type == AttributeType.UNTRANSLATED_STRING:
aa.value = "Dog Hello"
aa.save()
assert aa.value == "Dog Hello", "Untranslated strings work"
assert aa.untranslated_string_value == "Dog Hello", "Untranslated strings work"
return
if aa.attribute.type == AttributeType.TRANSLATED_STRING:
assert aa.attribute.is_translated
with override_settings(LANGUAGES=[(x, x) for x in ("en", "fi", "ga", "ja")]):
versions = {
"en": u"science fiction",
"fi": u"tieteiskirjallisuus",
"ga": u"ficsean eolaíochta",
"ja": u"空想科学小説",
}
for language_code, text in versions.items():
aa.set_current_language(language_code)
aa.value = text
aa.save()
assert aa.value == text, "Translated strings work"
for language_code, text in versions.items():
assert aa.safe_translation_getter("translated_string_value", language_code=language_code) == text, "%s translation is safe" % language_code
aa.set_current_language("xx")
assert aa.value == "", "untranslated version yields an empty string"
return
if aa.attribute.type == AttributeType.DATE:
aa.value = "2014-01-01"
assert aa.value == datetime.date(2014, 1, 1), "Date parsing works"
assert aa.untranslated_string_value == "2014-01-01", "Dates are saved as strings"
return
if aa.attribute.type == AttributeType.DATETIME:
with pytest.raises(TypeError):
aa.value = "yesterday"
dt = datetime.datetime(1997, 8, 12, 14)
aa.value = dt
assert aa.value.toordinal() == 729248, "Date assignment works"
assert aa.value.time().hour == 14, "The clock still works"
assert aa.untranslated_string_value == dt.isoformat(), "Datetimes are saved as strings too"
return
raise NotImplementedError("Not implemented: populating %s" % aa.attribute.type) # pragma: no cover
@pytest.mark.django_db
def test_applied_attributes():
product = get_default_product()
for spec in ATTR_SPECS: # This loop sets each attribute twice. That's okay.
attr = Attribute.objects.get(identifier=spec["identifier"])
pa, _ = ProductAttribute.objects.get_or_create(product=product, attribute=attr)
_populate_applied_attribute(pa)
pa.save()
if not attr.is_translated:
product.set_attribute_value(attr.identifier, pa.value)
assert product.get_attribute_value("bogomips") == 320, "integer attribute loaded neatly"
product.set_attribute_value("bogomips", 480)
assert product.get_attribute_value("bogomips") == 480, "integer attribute updated neatly"
Product.cache_attributes_for_targets(
applied_attr_cls=ProductAttribute,
targets=[product],
attribute_identifiers=[a["identifier"] for a in ATTR_SPECS],
language=get_language()
)
assert (get_language(), "bogomips",) in product._attr_cache, "integer attribute in cache"
assert product.get_attribute_value("bogomips") == 480, "integer attribute value in cache"
assert product.get_attribute_value("ba:gelmips", default="Britta") == "Britta", "non-existent attributes return default value"
assert product._attr_cache[(get_language(), "ba:gelmips")] is NoSuchAttributeHere, "cache miss saved"
attr_info = product.get_all_attribute_info(language=get_language(), visibility_mode=AttributeVisibility.SHOW_ON_PRODUCT_PAGE)
assert set(attr_info.keys()) <= set(a["identifier"] for a in ATTR_SPECS), "get_all_attribute_info gets all attribute info"
@pytest.mark.django_db
def test_get_set_attribute():
product = create_product("ATTR_TEST")
product.set_attribute_value("awesome", True)
product.set_attribute_value("bogomips", 10000)
product.set_attribute_value("bogomips", None)
product.set_attribute_value("author", None)
product.set_attribute_value("genre", "Kenre", "fi")
with pytest.raises(ValueError):
product.set_attribute_value("genre", "Kenre")
with pytest.raises(ObjectDoesNotExist):
product.set_attribute_value("keppi", "stick")
def test_saving_invalid_attribute():
with pytest.raises(ValueError):
Attribute(identifier=None).save()
| agpl-3.0 | -583,852,436,404,547,100 | 41.365854 | 155 | 0.661773 | false |
googleads/google-ads-python | google/ads/googleads/v6/resources/types/currency_constant.py | 1 | 1954 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v6.resources",
marshal="google.ads.googleads.v6",
manifest={"CurrencyConstant",},
)
class CurrencyConstant(proto.Message):
r"""A currency constant.
Attributes:
resource_name (str):
Output only. The resource name of the currency constant.
Currency constant resource names have the form:
``currencyConstants/{code}``
code (str):
Output only. ISO 4217 three-letter currency
code, e.g. "USD".
name (str):
Output only. Full English name of the
currency.
symbol (str):
Output only. Standard symbol for describing
this currency, e.g. '$' for US Dollars.
billable_unit_micros (int):
Output only. The billable unit for this
currency. Billed amounts should be multiples of
this value.
"""
resource_name = proto.Field(proto.STRING, number=1)
code = proto.Field(proto.STRING, number=6, optional=True)
name = proto.Field(proto.STRING, number=7, optional=True)
symbol = proto.Field(proto.STRING, number=8, optional=True)
billable_unit_micros = proto.Field(proto.INT64, number=9, optional=True)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 | -3,993,784,063,970,901,500 | 32.118644 | 76 | 0.660184 | false |
santoshghimire/IL-Jobcrawl | jobcrawl/dump_client_changes.py | 1 | 4316 | import warnings
warnings.filterwarnings("ignore")
DATE_FMT = "%d/%m/%Y"
def parse_dates(sd, ed):
if not ed:
ed = datetime.today().strftime(DATE_FMT)
return datetime.strptime(sd, DATE_FMT), datetime.strptime(ed, DATE_FMT)
def main(site, start_date, end_date):
start_date, end_date = parse_dates(start_date, end_date)
if start_date > end_date:
print("Start date is greater than end date")
return
print("\nGetting data from {} to {}\n".format(
start_date.strftime(DATE_FMT), end_date.strftime(DATE_FMT)))
conn = pymysql.connect(
host=settings.MYSQL_HOST, port=3306, user=settings.MYSQL_USER,
passwd=settings.MYSQL_PASSWORD, db=settings.MYSQL_DBNAME,
charset='utf8'
)
df_all = []
current_date = None
while True:
if current_date is None:
current_date = start_date - timedelta(days=1)
current_date_str = current_date.strftime(DATE_FMT)
sql = """SELECT distinct(Company) FROM sites_datas
WHERE Site='%s' and Crawl_Date='%s'""" % (site, current_date_str)
data_df = pd.read_sql(sql, conn)
print("Date: {}, Unique company size = {}".format(
current_date_str, data_df.shape[0]))
df_all.append((current_date_str, data_df))
if current_date >= end_date:
break
current_date += timedelta(days=1)
print("\nTotal df retrieved = {}".format(len(df_all)))
print("Dates of all dfs = {}\n".format([i[0] for i in df_all]))
yest_df = None
new_companies = pd.DataFrame.from_dict({'Company': [], 'Report Date': []})
removed_companies = pd.DataFrame.from_dict({'Company': [], 'Report Date': []})
for date_str, df in df_all:
if yest_df is None:
yest_df = df
continue
yest_list = yest_df['Company'].tolist()
# if None in yest_list:
# yest_list.remove(None)
today_list = df['Company'].tolist()
# if None in today_list:
# today_list.remove(None)
new = list(set(today_list) - set(yest_list))
removed = list(set(yest_list) - set(today_list))
new_temp = pd.DataFrame.from_dict({'Company': new,
'Report Date': [date_str] * len(new)})
removed_temp = pd.DataFrame.from_dict({'Company': removed,
'Report Date': [date_str] * len(removed)})
print("Report: Date {}: New={}, Removed={}".format(
date_str, new_temp.shape[0], removed_temp.shape[0]))
new_companies = new_companies.append(new_temp, ignore_index=True)
removed_companies = removed_companies.append(removed_temp, ignore_index=True)
print("Combined Report: Date {}: New={}, Removed={}".format(
date_str, new_companies.shape[0], removed_companies.shape[0]))
yest_df = df
prefix = "{}_to_{}".format(
start_date.strftime("%d-%m-%y"), end_date.strftime("%d-%m-%y"))
new_companies.to_csv("{}_{}".format(prefix, "new_company_report_dump.csv"),
index=False, encoding='utf-8')
removed_companies.to_csv("{}_{}".format(prefix, "removed_company_report_dump.csv"),
index=False, encoding='utf-8')
total_new = new_companies['Company'].tolist()
total_removed = removed_companies['Company'].tolist()
total_new_distinct = set(total_new)
total_removed_distinct = set(total_removed)
print("Distinct companies in New companies report = {}".format(
len(total_new_distinct)))
print("Distinct companies in Removed companies report = {}".format(
len(total_removed_distinct)))
print("\nDone")
if __name__ == '__main__':
import argparse
from datetime import datetime, timedelta
import pymysql
import pandas as pd
from jobcrawl import settings
parser = argparse.ArgumentParser(description='Dump Client Changes')
parser.add_argument('-s', '--site', help="Site", required=True)
parser.add_argument('-sd', '--start_date', help="Start Date (dd/mm/yyyy)",
required=True)
parser.add_argument('-ed', '--end_date', help="End Date (dd/mm/yyyy)",
required=False)
args = parser.parse_args()
main(args.site, args.start_date, args.end_date)
| mit | -7,969,661,471,214,422,000 | 38.236364 | 87 | 0.593605 | false |
tchakravarty/PythonExamples | Code/Miscellaneous/FileReadWithNA.py | 1 | 1710 | __author__ = 'tirthankar'
import pandas as pd
import xlrd as xl
import numpy as np
# straight read
pdata = pd.read_csv(
"Code/Miscellaneous/Data/pwt71_11302012version/pwt71_wo_country_names_wo_g_vars.csv")
# passing a string
pdata2 = pd.read_csv("Code/Miscellaneous/Data/pwt71_11302012version/pwt71_wo_country_names_wo_g_vars.csv",
na_values = "AFG")
pdata2["isocode"]
# passing a list
pdata3 = pd.read_csv("Code/Miscellaneous/Data/pwt71_11302012version/pwt71_wo_country_names_wo_g_vars.csv",
na_values = ["AFG"])
pdata3["isocode"]
# read the file directly from Excel using xlrd
file_location = "Code/Miscellaneous/Data/pwt71_11302012version/pwt71_vars_forWeb.xls"
xlPWT = xl.open_workbook(file_location)
xlPWT1 = xlPWT.sheet_by_index(0)
xlPWT1.cell_value(3, 1)
xlPWT1.nrows
xlPWT1.ncols
# read file directly using pd.read_excel
pmetadata = pd.read_excel("Code/Miscellaneous/Data/pwt71_11302012version/pwt71_vars_forWeb.xls")
pd.read_csv("Code/Miscellaneous/Data/pwt.csv", na_values = ["na"])
textPWT = """
country ccode year Pop XRAT currency ppp t1
Afghanistan AFG 1950 8150.368 na na na na
Afghanistan AFG 1951 8284.473 na na na na
Afghanistan AFG 1952 8425.333 na na na na
Afghanistan AFG 1953 8573.217 na na na na
"""
liPWT = textPWT.split("\n")
liPWT = [x.split() for x in liPWT][1:6]
npPWT = np.array(liPWT)
pdPWT = pd.DataFrame(npPWT[1:, :], columns=npPWT[0, :])
pdPWT = pdPWT.replace('na', np.nan, regex=True)
pdPWT = pdPWT.convert_objects(convert_numeric=True)
| apache-2.0 | -7,479,459,970,993,196,000 | 24.147059 | 106 | 0.645614 | false |
CarterBain/AlephNull | alephnull/sources/data_frame_source.py | 1 | 4637 |
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tools to generate data sources.
"""
import pandas as pd
from alephnull.gens.utils import hash_args
from alephnull.sources.data_source import DataSource
class DataFrameSource(DataSource):
"""
Yields all events in event_list that match the given sid_filter.
If no event_list is specified, generates an internal stream of events
to filter. Returns all events if filter is None.
Configuration options:
sids : list of values representing simulated internal sids
start : start date
delta : timedelta between internal events
filter : filter to remove the sids
"""
def __init__(self, data, **kwargs):
assert isinstance(data.index, pd.tseries.index.DatetimeIndex)
self.data = data
# Unpack config dictionary with default values.
self.sids = kwargs.get('sids', data.columns)
self.start = kwargs.get('start', data.index[0])
self.end = kwargs.get('end', data.index[-1])
# Hash_value for downstream sorting.
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
@property
def mapping(self):
return {
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
}
@property
def instance_hash(self):
return self.arg_string
def raw_data_gen(self):
for dt, series in self.data.iterrows():
for sid, price in series.iterkv():
if sid in self.sids:
event = {
'dt': dt,
'sid': sid,
'price': price,
'volume': 1000,
}
yield event
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
class DataPanelSource(DataSource):
"""
Yields all events in event_list that match the given sid_filter.
If no event_list is specified, generates an internal stream of events
to filter. Returns all events if filter is None.
Configuration options:
sids : list of values representing simulated internal sids
start : start date
delta : timedelta between internal events
filter : filter to remove the sids
"""
def __init__(self, data, **kwargs):
assert isinstance(data.major_axis, pd.tseries.index.DatetimeIndex)
self.data = data
# Unpack config dictionary with default values.
self.sids = kwargs.get('sids', data.items)
self.start = kwargs.get('start', data.major_axis[0])
self.end = kwargs.get('end', data.major_axis[-1])
# Hash_value for downstream sorting.
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
@property
def mapping(self):
mapping = {
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
}
# Add additional fields.
for field_name in self.data.minor_axis:
if field_name in ['price', 'volume', 'dt', 'sid']:
continue
mapping[field_name] = (lambda x: x, field_name)
return mapping
@property
def instance_hash(self):
return self.arg_string
def raw_data_gen(self):
for dt in self.data.major_axis:
df = self.data.major_xs(dt)
for sid, series in df.iterkv():
if sid in self.sids:
event = {
'dt': dt,
'sid': sid,
}
for field_name, value in series.iteritems():
event[field_name] = value
yield event
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
| apache-2.0 | 4,802,013,538,821,213,000 | 29.11039 | 74 | 0.574941 | false |
CMUSV-VisTrails/WorkflowRecommendation | vistrails/gui/modules/query_configuration.py | 1 | 6088 | ###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from PyQt4 import QtCore, QtGui
from core.utils import any, expression
from core import system
from constant_configuration import StandardConstantWidget, ColorWidget
############################################################################
class QueryWidgetMixin(object):
def __init__(self, contents=None, query_method=None):
self._last_contents = contents
self._last_query_method = query_method
# updateMethod intercepts calls from a child widget like the
# contents_widget
def updateMethod(self):
self.update_parent()
def update_parent(self):
new_contents = self.contents()
new_query_method = self.query_method()
if (new_contents != self._last_contents or
new_query_method != self._last_query_method):
if self.parent() and hasattr(self.parent(), 'updateMethod'):
self.parent().updateMethod()
self._last_contents = new_contents
self._last_query_method = new_query_method
self.emit(QtCore.SIGNAL('contentsChanged'), (self,new_contents))
class BaseQueryWidget(QtGui.QWidget, QueryWidgetMixin):
def __init__(self, contents_klass, query_methods, param, parent=None):
QtGui.QWidget.__init__(self, parent)
QueryWidgetMixin.__init__(self, param.strValue, param.queryMethod)
contents = param.strValue
queryMethod = param.queryMethod
layout = QtGui.QHBoxLayout()
self.op_button = QtGui.QToolButton()
self.op_button.setPopupMode(QtGui.QToolButton.InstantPopup)
self.op_button.setArrowType(QtCore.Qt.NoArrow)
action_group = QtGui.QActionGroup(self.op_button)
actions = []
checked_exists = False
for method in query_methods:
action = QtGui.QAction(method, self)
action.setCheckable(True)
action_group.addAction(action)
if method == queryMethod:
action.setChecked(True)
checked_exists = True
actions.append(action)
if not checked_exists:
actions[0].setChecked(True)
self._last_query_method = str(actions[0].text())
menu = QtGui.QMenu(self.op_button)
menu.addActions(actions)
self.op_button.setMenu(menu)
self.op_button.setText(action_group.checkedAction().text())
self.contents_widget = contents_klass(param)
self.contents_widget.setContents(contents)
layout.setMargin(0)
layout.setSpacing(0)
layout.addWidget(self.op_button)
layout.addWidget(self.contents_widget)
self.setLayout(layout)
self.connect(self.op_button, QtCore.SIGNAL('triggered(QAction*)'),
self.update_action)
def contents(self):
return self.contents_widget.contents()
def setContents(self, strValue, silent=True):
self.contents_widget.setContents(strValue)
if not silent:
self.update_parent()
def update_action(self, action):
self.op_button.setText(action.text())
self.update_parent()
def query_method(self):
for action in self.op_button.menu().actions():
if action.isChecked():
return str(action.text())
class StandardQueryWidget(BaseQueryWidget):
def __init__(self, param, parent=None):
BaseQueryWidget.__init__(self, StandardConstantWidget, ["==", "!="],
param, parent)
class StringQueryWidget(StandardQueryWidget):
def __init__(self, param, parent=None):
BaseQueryWidget.__init__(self, StandardConstantWidget,
["*[]*", "==", "=~"],
param, parent)
class NumericQueryWidget(StandardQueryWidget):
def __init__(self, param, parent=None):
BaseQueryWidget.__init__(self, StandardConstantWidget,
["==", "<", ">", "<=", ">="],
param, parent)
class ColorQueryWidget(StandardQueryWidget):
def __init__(self, param, parent=None):
BaseQueryWidget.__init__(self, ColorWidget, ["2.3", "5", "10", "50"],
param, parent)
| bsd-3-clause | -6,465,648,025,933,796,000 | 40.69863 | 79 | 0.621058 | false |
NicoVarg99/daf-recipes | ckan/ckan/ckan/ckan/tests/legacy/functional/api/model/test_ratings.py | 2 | 4089 | # encoding: utf-8
from nose.tools import assert_equal
from nose.plugins.skip import SkipTest
from ckan import model
from ckan.lib.create_test_data import CreateTestData
from ckan.tests.legacy.functional.api.base import BaseModelApiTestCase
from ckan.tests.legacy.functional.api.base import Api1TestCase as Version1TestCase
from ckan.tests.legacy.functional.api.base import Api2TestCase as Version2TestCase
class RatingsTestCase(BaseModelApiTestCase):
@classmethod
def setup_class(cls):
CreateTestData.create()
cls.testsysadmin = model.User.by_name(u'testsysadmin')
cls.comment = u'Comment umlaut: \xfc.'
cls.user_name = u'annafan' # created in CreateTestData
cls.init_extra_environ(cls.user_name)
@classmethod
def teardown_class(cls):
model.repo.rebuild_db()
def test_register_get(self):
raise SkipTest('"Rating register get" functionality is not implemented')
rating1 = model.Rating(user_ip_address='1.2.3.4',
package=self.anna,
rating=4.0)
rating2 = model.Rating(user=model.User.by_name(u'annafan'),
package=self.anna,
rating=2.0)
model.Session.add_all((rating1, rating2))
model.repo.commit_and_remove()
offset = self.rating_offset()
res = self.app.get(offset, status=[200])
def test_entity_get(self):
raise SkipTest('"Rating entity get" functionality is not implemented')
rating = model.Rating(user_ip_address='1.2.3.4',
package=self.anna,
rating=4.0)
model.Session.add(rating)
model.repo.commit_and_remove()
offset = self.rating_offset(self.anna.name)
res = self.app.get(offset, status=[200])
assert_equal(res, rating_opts['rating'])
def test_register_post(self):
# Test Rating Register Post 200.
self.clear_all_tst_ratings()
offset = self.rating_offset()
rating_opts = {'package':u'warandpeace',
'rating':5}
pkg_name = rating_opts['package']
postparams = '%s=1' % self.dumps(rating_opts)
res = self.app.post(offset, params=postparams, status=[201],
extra_environ=self.extra_environ)
model.Session.remove()
pkg = self.get_package_by_name(pkg_name)
assert pkg
assert len(pkg.ratings) == 1
assert pkg.ratings[0].rating == rating_opts['rating'], pkg.ratings
# Get package to see rating
offset = self.package_offset(pkg_name)
res = self.app.get(offset, status=[200])
assert pkg_name in res, res
assert '"ratings_average": %s.0' % rating_opts['rating'] in res, res
assert '"ratings_count": 1' in res, res
model.Session.remove()
# Rerate package
offset = self.rating_offset()
postparams = '%s=1' % self.dumps(rating_opts)
res = self.app.post(offset, params=postparams, status=[201],
extra_environ=self.extra_environ)
model.Session.remove()
pkg = self.get_package_by_name(pkg_name)
assert pkg
assert len(pkg.ratings) == 1
assert pkg.ratings[0].rating == rating_opts['rating'], pkg.ratings
def test_entity_post_invalid(self):
self.clear_all_tst_ratings()
offset = self.rating_offset()
rating_opts = {'package':u'warandpeace',
'rating':0}
postparams = '%s=1' % self.dumps(rating_opts)
res = self.app.post(offset, params=postparams, status=[409],
extra_environ=self.extra_environ)
self.assert_json_response(res, 'rating')
model.Session.remove()
pkg = self.get_package_by_name(rating_opts['package'])
assert pkg
assert len(pkg.ratings) == 0
class TestRatingsVersion1(Version1TestCase, RatingsTestCase): pass
class TestRatingsVersion2(Version2TestCase, RatingsTestCase): pass
| gpl-3.0 | 3,643,774,490,865,021,400 | 38.317308 | 83 | 0.60944 | false |
flyingfrog81/zmqnumpy | setup.py | 1 | 1739 | from distutils.core import setup
setup(name='zmqnumpy',
version='0.2',
author="Marco Bartolini",
author_email = "[email protected]",
url = "https://github.com/flyingfrog81/zmqnumpy/",
download_url = "",
license = "mit",
py_modules = ['zmqnumpy'],
requires = ["numpy", "pyzmq"],
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
description="numpy array over zmq sockets",
long_description = """
Zmqnumpy module implements a series of functions used to exchange
numpy ndarrays between U{zeromq<http://www.zeromq.org>} sockets.
Serializtion of numpy arrays happens using the numpy.ndarray.tostring method
which preserves portability to standard C binary format,
enabling data exchange with different programming languages.
A very simple protocol is defined in order to exchange array data, the
multipart messages will be composed of:
1. identifier string name
2. the numpy array element type (dtype) in its string representation
3. numpy array shape encoded as a binary numpy.int32 array
4. the array data encoded as string using numpy.ndarray.tostring()
This protocol guarantees that numpy array can be carried around and
recostructed uniquely without errors on both ends of a connected pair enabling
an efficient interchange of data between processes and nodes.
@author: Marco Bartolini
@contact: [email protected]
@version: 0.2
""",
)
| mit | 8,255,093,838,087,761,000 | 38.522727 | 78 | 0.699252 | false |
snufiks/nmap2db | nmap2db/database.py | 1 | 27070 | #!/usr/bin/env python
#
# Copyright (c) 2014 Rafael Martinez Guerrero (PostgreSQL-es)
# [email protected] / http://www.postgresql.org.es/
#
# This file is part of Nmap2db
# https://github.com/rafaelma/nmap2db
#
# Nmap2db is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Nmap2db is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Nmap2db. If not, see <http://www.gnu.org/licenses/>.
import sys
import psycopg2
import psycopg2.extensions
from psycopg2.extras import wait_select
from nmap2db.prettytable import *
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
#
# Class: pg_database
#
# This class is used to interact with a postgreSQL database
# It is used to open and close connections to the database
# and to set/get some information for/of the connection.
#
class nmap2db_db():
"""This class is used to interact with a postgreSQL database"""
# ############################################
# Constructor
# ############################################
def __init__(self, dsn,logs,application):
""" The Constructor."""
self.dsn = dsn
self.logs = logs
self.application = application
self.conn = None
self.server_version = None
self.cur = None
self.output_format = 'table'
# ############################################
# Method pg_connect()
#
# A generic function to connect to PostgreSQL using Psycopg2
# We will define the application_name parameter if it is not
# defined in the DSN and the postgreSQL server version >= 9.0
# ############################################
def pg_connect(self):
"""A generic function to connect to PostgreSQL using Psycopg2"""
try:
self.conn = psycopg2.connect(self.dsn)
if self.conn:
self.conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
wait_select(self.conn)
self.cur = self.conn.cursor()
self.server_version = self.conn.server_version
if (self.server_version >= 90000 and 'application_name=' not in self.dsn):
try:
self.cur.execute('SET application_name TO %s',(self.application,))
self.conn.commit()
except psycopg2.Error as e:
self.logs.logger.error('Could not define the application_name parameter: - %s', e)
except psycopg2.Error as e:
raise e
# ############################################
# Method pg_close()
# ############################################
def pg_close(self):
"""A generic function to close a postgreSQL connection using Psycopg2"""
if self.cur:
try:
self.cur.close()
except psycopg2.Error as e:
print "\n* ERROR - Could not close the cursor used in this connection: \n%s" % e
if self.conn:
try:
self.conn.close()
except psycopg2.Error as e:
print "\n* ERROR - Could not close the connection to the database: \n%s" % e
# ############################################
# Method
# ############################################
def show_network_definitions(self):
"""A function to get a list with the networks defined in the system"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT * FROM show_network_definitions')
self.conn.commit()
colnames = [desc[0] for desc in self.cur.description]
self.print_results(self.cur,colnames,["Network","Remarks"])
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def show_scan_definitions(self):
"""A function to get a list with the scans defined in the system"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT * FROM show_scan_definitions')
self.conn.commit()
colnames = [desc[0] for desc in self.cur.description]
self.print_results(self.cur,colnames,["ScanID","Remarks","Arguments"])
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def show_scan_jobs(self,network_cidr):
"""A function to get a list with the scans jobs defined in the system"""
try:
self.pg_connect()
if self.cur:
try:
if network_cidr == 'ALL':
self.cur.execute('SELECT * FROM show_scan_jobs')
self.conn.commit()
else:
self.cur.execute('SELECT * FROM show_scan_jobs WHERE "Network" = %s',(network_cidr,))
self.conn.commit()
colnames = [desc[0] for desc in self.cur.description]
self.print_results(self.cur,colnames,["ScanID","Remarks","Arguments"])
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def show_host_reports(self,host,from_timestamp,to_timestamp):
"""A function to get a list or scan reports for a host"""
try:
self.pg_connect()
if self.cur:
try:
if (host.replace('.','')).replace('/','').isdigit():
self.cur.execute('SELECT * FROM show_host_reports WHERE "IPaddress" = %s AND "Registered" >= %s AND "Registered" <= %s',(host,from_timestamp, to_timestamp))
else:
self.cur.execute('SELECT * FROM show_host_reports WHERE "Hostname" @> %s AND "Registered" >= %s AND "Registered" <= %s',([host],from_timestamp, to_timestamp))
self.conn.commit()
colnames = [desc[0] for desc in self.cur.description]
self.print_results(self.cur,colnames,["ScanID","Finished","Duration","IPaddress","Hostname","State"])
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def show_host_details(self,report_id):
"""A function to get host details for a reportID"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT * FROM show_host_details WHERE "ReportID" = %s',(report_id,))
self.conn.commit()
x = PrettyTable([".",".."],header = False)
x.align["."] = "r"
x.align[".."] = "l"
x.padding_width = 1
for record in self.cur:
x.add_row(["ReportID:",record[0]])
x.add_row(["Registered:",str(record[1])])
x.add_row(["ScanID:",record[2]])
x.add_row(["",""])
x.add_row(["Network:",record[3]])
x.add_row(["Network info:",record[4]])
x.add_row(["",""])
x.add_row(["IPaddress:",record[5]])
x.add_row(["Addrtype:",record[6]])
x.add_row(["Hostname:",record[7]])
x.add_row(["Hostname type:",record[8]])
x.add_row(["",""])
x.add_row(["OStype:",record[9]])
x.add_row(["OSvendor:",record[10]])
x.add_row(["OSfamily:",record[11]])
x.add_row(["OSgen:",record[12]])
x.add_row(["OSname:",record[13]])
x.add_row(["",""])
x.add_row(["State:",record[14]])
x.add_row(["State reason:",record[15]])
print x
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def show_services_details(self,report_id):
"""A function to get a list of services found in a scan report"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT "Prot","Port","State","Reason","Service","Method","Product","Prod.ver","Prod.info" FROM show_services_details WHERE report_id = %s',(report_id,))
self.conn.commit()
colnames = [desc[0] for desc in self.cur.description]
self.print_results(self.cur,colnames,["Port","State","Reason","Service","Method","Product","Prod.ver","Prod.info"])
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def show_ports(self,network_list,port_list,service_list,from_timestamp,to_timestamp):
"""A function to get a list of ports"""
try:
self.pg_connect()
if self.cur:
try:
if network_list != None:
network_sql = 'AND (FALSE '
for network in network_list:
network_sql = network_sql + 'OR "IPaddress" <<= \'' + network + '\' '
network_sql = network_sql + ') '
else:
network_sql = ''
if port_list != None:
port_sql = 'AND "Port" IN (' + ','.join(port_list) + ') '
else:
port_sql = ''
if service_list != None:
service_sql = 'AND (FALSE '
for service in service_list:
service_sql = service_sql + 'OR "Service" LIKE \'' + service + '\' '
service_sql = service_sql + ') '
else:
service_sql = ''
self.cur.execute('WITH port_list AS(' +
'SELECT DISTINCT ON ("Port","Prot","IPaddress") ' +
'"IPaddress",' +
'"Port",' +
'"Prot",' +
'"State",' +
'"Service",' +
'"Product",' +
'"Prod.ver",' +
'"Prod.info" ' +
'FROM show_ports ' +
'WHERE registered >= \'' + str(from_timestamp) + '\' AND registered <= \'' + str(to_timestamp) + '\' ' +
network_sql +
port_sql +
service_sql + ')' +
'SELECT DISTINCT ON ("Port","Prot","IPaddress") ' +
'a."IPaddress",' +
'array_to_string(b.hostname,\' \') AS "Hostname",' +
'a."Port",' +
'a."Prot",' +
'a."State",' +
'a."Service",' +
'a."Product",' +
'a."Prod.ver",' +
'a."Prod.info" ' +
'FROM port_list a ' +
'JOIN host_info b ON a."IPaddress" = b.hostaddr ' +
'WHERE b.registered >= \'' + str(from_timestamp) + '\' AND b.registered <= \'' + str(to_timestamp) + '\' '
)
self.conn.commit()
colnames = [desc[0] for desc in self.cur.description]
self.print_results(self.cur,colnames,["IPaddress","Hostname","Port","Prot","State","Service","Product","Prod.ver","Prod.info"])
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def show_os(self,network_list,os_list,from_timestamp,to_timestamp):
"""A function to get a list og hostnames running an OS"""
try:
self.pg_connect()
if self.cur:
try:
if network_list != None:
network_sql = 'AND (FALSE '
for network in network_list:
network_sql = network_sql + 'OR "Network" <<= \'' + network + '\' '
network_sql = network_sql + ') '
else:
network_sql = ''
if os_list != None:
os_sql = 'AND (FALSE '
for osname in os_list:
os_sql = os_sql + 'OR "OSname" LIKE \'' + osname + '\' '
os_sql = os_sql + ') '
else:
os_sql = ''
fullstmt = 'SELECT DISTINCT ON ("IPaddress") "Registered", "IPaddress", "Hostname",' + '"OSname" ' + 'FROM show_host_details ' + 'WHERE "Registered" >= \'' + str(from_timestamp) + '\' AND "Registered" <= \'' + str(to_timestamp) + '\' ' + network_sql + os_sql + 'ORDER BY "IPaddress"'
self.logs.logger.info(fullstmt)
self.cur.execute(fullstmt)
self.conn.commit()
colnames = [desc[0] for desc in self.cur.description]
self.print_results(self.cur,colnames,["IPaddress","Hostname","OSname"])
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def show_host_without_hostname(self):
"""A function to get a list of host without a hostname"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT "IPaddress","State","Last registration" FROM show_host_without_hostname')
self.conn.commit()
colnames = [desc[0] for desc in self.cur.description]
self.print_results(self.cur,colnames,["IPaddress","State","Last registration"])
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def register_network(self,network_cidr,remarks):
"""A method to register a network_cidr"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT register_network(%s,%s)',(network_cidr,remarks))
self.conn.commit()
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def register_scan_job(self,network_cidr,scan_id,execution_interval,is_active):
"""A method to register a scan job"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT register_scan_job(%s,%s,%s,%s)',(network_cidr,scan_id,execution_interval,is_active))
self.conn.commit()
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def get_next_scan_job(self):
"""A method to get the next scan job to run"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT get_next_scan_job()')
self.conn.commit()
scan_job_id = self.cur.fetchone()[0]
return scan_job_id
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def get_scan_job_args(self,scan_job_id):
"""A method to get the arguments for a scan_job"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT get_scan_job_args(%s)',(scan_job_id,))
self.conn.commit()
scan_job_args = self.cur.fetchone()[0]
return scan_job_args
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method get_scan_job_command
# ############################################
def get_scan_job_command(self,scan_job_id):
"""A method to get the scan executable for a scan_job"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT get_scan_job_command(%s)',(scan_job_id,))
self.conn.commit()
scan_job_command = self.cur.fetchone()[0]
return scan_job_command
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method get_scan_job_parsemethod
# ############################################
def get_scan_job_parsemethod(self,scan_job_id):
"""A method to get the parse method for a scan_job"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT get_scan_job_parsemethod(%s)',(scan_job_id,))
self.conn.commit()
scan_job_parsemethod = self.cur.fetchone()[0]
return scan_job_parsemethod
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def get_scan_job_network(self,scan_job_id):
"""A method to get the network for a scan_job"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT get_scan_job_network_addr(%s)',(scan_job_id,))
self.conn.commit()
scan_job_network = self.cur.fetchone()[0]
return scan_job_network
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method get_scan_job_scope
# ############################################
def get_scan_job_scope(self,scan_job_id):
"""A method to get the scan scope for a scan_job"""
try:
self.pg_connect()
if self.cur:
try:
query = "SELECT scan_scope FROM scan_job WHERE id=%s"
self.logs.logger.info("SELECT scan_scope FROM scan_job WHERE id=%d",scan_job_id)
self.cur.execute(query, [scan_job_id])
self.conn.commit()
scan_job_scope = self.cur.fetchone()[0]
return scan_job_scope
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def save_scan_report(self,scan_job_id, report, scan_type):
"""A method to save a scan report"""
# self.logs.logger.info("save_scan_report PRE _%s_", scan_type)
sqlstmt = False
if scan_type == 'nmap_default':
# self.logs.logger.info("save_scan_report (nmap_default) _%s_", scan_type)
sqlstmt = 'SELECT save_scan_report_xml(%s,%s)'
elif scan_type == 'testssl':
# self.logs.logger.info("save_scan_report (testssl) _%s_", scan_type)
sqlstmt = 'SELECT save_ssl_report_json(%s,%s)'
else:
self.logs.logger.info("save_scan_report ELSE _%s_", scan_type)
# self.logs.logger.info("save_scan_report (report) _%s_", report)
# self.logs.logger.info("save_scan_report (scan_job_id) _%s_", scan_job_id)
try:
self.pg_connect()
if self.cur and sqlstmt:
try:
# self.logs.logger.info("save_scan_report (sqlstmt) _%s_", sqlstmt)
self.cur.execute(sqlstmt, (scan_job_id, report))
self.conn.commit()
return True
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def expand_network(self,scan_job_network):
"""A method to get all IPs in a network"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT expand_network(%s)',(scan_job_network,))
self.conn.commit()
return self.cur
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def print_results(self,cur,colnames,left_columns):
'''A function to print a table with sql results'''
if self.output_format == 'table':
x = PrettyTable(colnames)
x.padding_width = 1
for column in left_columns:
x.align[column] = "l"
for records in cur:
columns = []
for index in range(len(colnames)):
columns.append(records[index])
x.add_row(columns)
print x.get_string()
print
elif self.output_format == 'csv':
for records in cur:
columns = []
for index in range(len(colnames)):
columns.append(str(records[index]))
print ','.join(columns)
# ############################################
# Method
# ############################################
def show_hosts(self):
"""A function to get a list with the scans defined in the system"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT "IP-address","Hostname","Last registration", "First registration" FROM show_hosts')
self.conn.commit()
colnames = [desc[0] for desc in self.cur.description]
self.print_results(self.cur,colnames,["IP-address","Hostname","Last registration", "First registration"])
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
| gpl-3.0 | -4,005,357,262,043,700,700 | 32.669154 | 305 | 0.412893 | false |
itohnobue/domanager | build_pkg_mac.py | 1 | 2220 |
from setuptools import setup
import os, shutil, sys
srcPath = os.path.abspath(os.path.join("source"))
sys.path.append(srcPath)
# Remove the build folder
shutil.rmtree("build", ignore_errors=True)
shutil.rmtree("dist", ignore_errors=True)
APP = ['run.py']
DATA_FILES = [os.path.join("source", "domanager", "resources")]
OPTIONS = {'argv_emulation': True,
'iconfile': os.path.join("source", "domanager",
"resources", "main_logo_color.icns"),
'includes': ["domanager", "sip", "PyQt5.QtCore", "PyQt5.QtWidgets", "PyQt5.QtGui"],
"qt_plugins": ["imageformats/*", "platforms/*"],
'excludes': ["numpy", "sqlalchemy", 'h5py', 'cx_Freeze', 'coverage',
'Enginio', 'PyQt5.QtBluetooth', 'PyQt5.QtHelp', 'PyQt5.QtMultimediaWidgets',
'PyQt5.QtWebChannel', 'PyQt5.QtWebEngineWidgets',
'PyQt5.QtPositioning', 'PyQt5.QtQml', 'PyQt5.QtQuick', 'PyQt5.QtQuickWidgets',
'PyQt5.QtSensors', 'PyQt5.QtSerialPort', 'PyQt5.QtWebKitWidgets',
'PyQt5.QtDesigner', 'PyQt5.QtMultimedia', 'PyQt5.QtOpenGL',
'PyQt5.QtSvg', 'PyQt5.QtSql', 'PyQt5.QtXml', 'PyQt5.QtXmlPatterns',
'PyQt5.QtWebKit', 'PyQt5.QtTest', 'PyQt5.QtScript', 'PyQt5.QtScriptTools',
'PyQt5.QtDeclarative', 'PyQt5.QtWebSockets',
'_gtkagg', '_tkagg', 'bsddb', 'curses', 'pywin.debugger',
'pywin.debugger.dbgcon', 'pywin.dialogs', 'tcl', 'test',
'Tkinter', 'xml', 'pywinauto.tests', 'unittest', 'Tkconstants',
'pdb', 'dummy_thread', 'doctest', 'PIL', 'PpmImagePlugin',
'BmpImagePlugin', 'GifImagePlugin', 'GimpGradientFile',
'GimpPaletteFile', 'JpegImagePlugin', 'PngImagePlugin',
'TiffImagePlugin', 'TiffTags', 'Image', 'ImageGrab', 'bz2'],
'plist': {'LSUIElement': True},
}
setup(
name = "DO_Manager",
app=APP,
data_files=DATA_FILES,
options={'py2app': OPTIONS},
setup_requires=['py2app'],
)
| gpl-2.0 | 4,955,059,193,898,313,000 | 44.306122 | 102 | 0.549099 | false |
TunnelBlanket/Spirit | Spirit/Data/User.py | 1 | 1753 | # coding: utf-8
from sqlalchemy import Column, Integer, String, Boolean, Text, text
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class User(Base):
__tablename__ = 'users'
Id = Column(Integer, primary_key=True)
Username = Column(String(12, u'utf8mb4_unicode_ci'), nullable=False, unique=True)
Password = Column(String(128, u'utf8mb4_unicode_ci'), nullable=False)
Swid = Column(String(39, u'utf8mb4_unicode_ci'), nullable=False)
LoginKey = Column(String(32, u'utf8mb4_unicode_ci'))
ConfirmationHash = Column(String(128, u'utf8mb4_unicode_ci'))
Avatar = Column(Integer, nullable=False)
AvatarAttributes = Column(String(98, u'utf8mb4_unicode_ci'), nullable=False,server_default=text(
"""'{"spriteScale":100,"spriteSpeed":100,"ignoresBlockLayer":false,"invisible":false,"floating":false}'"""))
Coins = Column(Integer, nullable=False, server_default=text("'10000'"))
Moderator = Column(Boolean, nullable=False, default=False)
Inventory = Column(Text(collation=u'utf8mb4_unicode_ci'), nullable=False)
Color = Column(Integer, nullable=False, server_default=text("'1'"))
Head = Column(Integer, nullable=False, server_default=text("'0'"))
Face = Column(Integer, nullable=False, server_default=text("'0'"))
Neck = Column(Integer, nullable=False, server_default=text("'0'"))
Body = Column(Integer, nullable=False, server_default=text("'0'"))
Hands = Column(Integer, nullable=False, server_default=text("'0'"))
Feet = Column(Integer, nullable=False, server_default=text("'0'"))
Photo = Column(Integer, nullable=False, server_default=text("'0'"))
Pin = Column(Integer, nullable=False, server_default=text("'0'"))
| gpl-3.0 | 4,836,467,860,719,150,000 | 55.548387 | 113 | 0.705077 | false |
matrix-org/synapse | synapse/push/httppusher.py | 1 | 16725 | # Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2017 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import urllib.parse
from typing import TYPE_CHECKING, Any, Dict, Iterable, Optional, Union
from prometheus_client import Counter
from twisted.internet.error import AlreadyCalled, AlreadyCancelled
from twisted.internet.interfaces import IDelayedCall
from synapse.api.constants import EventTypes
from synapse.events import EventBase
from synapse.logging import opentracing
from synapse.metrics.background_process_metrics import run_as_background_process
from synapse.push import Pusher, PusherConfig, PusherConfigException
from . import push_rule_evaluator, push_tools
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
http_push_processed_counter = Counter(
"synapse_http_httppusher_http_pushes_processed",
"Number of push notifications successfully sent",
)
http_push_failed_counter = Counter(
"synapse_http_httppusher_http_pushes_failed",
"Number of push notifications which failed",
)
http_badges_processed_counter = Counter(
"synapse_http_httppusher_badge_updates_processed",
"Number of badge updates successfully sent",
)
http_badges_failed_counter = Counter(
"synapse_http_httppusher_badge_updates_failed",
"Number of badge updates which failed",
)
class HttpPusher(Pusher):
INITIAL_BACKOFF_SEC = 1 # in seconds because that's what Twisted takes
MAX_BACKOFF_SEC = 60 * 60
# This one's in ms because we compare it against the clock
GIVE_UP_AFTER_MS = 24 * 60 * 60 * 1000
def __init__(self, hs: "HomeServer", pusher_config: PusherConfig):
super().__init__(hs, pusher_config)
self.storage = self.hs.get_storage()
self.app_display_name = pusher_config.app_display_name
self.device_display_name = pusher_config.device_display_name
self.pushkey_ts = pusher_config.ts
self.data = pusher_config.data
self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC
self.failing_since = pusher_config.failing_since
self.timed_call = None # type: Optional[IDelayedCall]
self._is_processing = False
self._group_unread_count_by_room = hs.config.push_group_unread_count_by_room
self._pusherpool = hs.get_pusherpool()
self.data = pusher_config.data
if self.data is None:
raise PusherConfigException("'data' key can not be null for HTTP pusher")
self.name = "%s/%s/%s" % (
pusher_config.user_name,
pusher_config.app_id,
pusher_config.pushkey,
)
# Validate that there's a URL and it is of the proper form.
if "url" not in self.data:
raise PusherConfigException("'url' required in data for HTTP pusher")
url = self.data["url"]
if not isinstance(url, str):
raise PusherConfigException("'url' must be a string")
url_parts = urllib.parse.urlparse(url)
# Note that the specification also says the scheme must be HTTPS, but
# it isn't up to the homeserver to verify that.
if url_parts.path != "/_matrix/push/v1/notify":
raise PusherConfigException(
"'url' must have a path of '/_matrix/push/v1/notify'"
)
self.url = url
self.http_client = hs.get_proxied_blacklisted_http_client()
self.data_minus_url = {}
self.data_minus_url.update(self.data)
del self.data_minus_url["url"]
def on_started(self, should_check_for_notifs: bool) -> None:
"""Called when this pusher has been started.
Args:
should_check_for_notifs: Whether we should immediately
check for push to send. Set to False only if it's known there
is nothing to send
"""
if should_check_for_notifs:
self._start_processing()
def on_new_receipts(self, min_stream_id: int, max_stream_id: int) -> None:
# Note that the min here shouldn't be relied upon to be accurate.
# We could check the receipts are actually m.read receipts here,
# but currently that's the only type of receipt anyway...
run_as_background_process("http_pusher.on_new_receipts", self._update_badge)
async def _update_badge(self) -> None:
# XXX as per https://github.com/matrix-org/matrix-doc/issues/2627, this seems
# to be largely redundant. perhaps we can remove it.
badge = await push_tools.get_badge_count(
self.hs.get_datastore(),
self.user_id,
group_by_room=self._group_unread_count_by_room,
)
await self._send_badge(badge)
def on_timer(self) -> None:
self._start_processing()
def on_stop(self) -> None:
if self.timed_call:
try:
self.timed_call.cancel()
except (AlreadyCalled, AlreadyCancelled):
pass
self.timed_call = None
def _start_processing(self) -> None:
if self._is_processing:
return
run_as_background_process("httppush.process", self._process)
async def _process(self) -> None:
# we should never get here if we are already processing
assert not self._is_processing
try:
self._is_processing = True
# if the max ordering changes while we're running _unsafe_process,
# call it again, and so on until we've caught up.
while True:
starting_max_ordering = self.max_stream_ordering
try:
await self._unsafe_process()
except Exception:
logger.exception("Exception processing notifs")
if self.max_stream_ordering == starting_max_ordering:
break
finally:
self._is_processing = False
async def _unsafe_process(self) -> None:
"""
Looks for unset notifications and dispatch them, in order
Never call this directly: use _process which will only allow this to
run once per pusher.
"""
unprocessed = (
await self.store.get_unread_push_actions_for_user_in_range_for_http(
self.user_id, self.last_stream_ordering, self.max_stream_ordering
)
)
logger.info(
"Processing %i unprocessed push actions for %s starting at "
"stream_ordering %s",
len(unprocessed),
self.name,
self.last_stream_ordering,
)
for push_action in unprocessed:
with opentracing.start_active_span(
"http-push",
tags={
"authenticated_entity": self.user_id,
"event_id": push_action["event_id"],
"app_id": self.app_id,
"app_display_name": self.app_display_name,
},
):
processed = await self._process_one(push_action)
if processed:
http_push_processed_counter.inc()
self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC
self.last_stream_ordering = push_action["stream_ordering"]
pusher_still_exists = (
await self.store.update_pusher_last_stream_ordering_and_success(
self.app_id,
self.pushkey,
self.user_id,
self.last_stream_ordering,
self.clock.time_msec(),
)
)
if not pusher_still_exists:
# The pusher has been deleted while we were processing, so
# lets just stop and return.
self.on_stop()
return
if self.failing_since:
self.failing_since = None
await self.store.update_pusher_failing_since(
self.app_id, self.pushkey, self.user_id, self.failing_since
)
else:
http_push_failed_counter.inc()
if not self.failing_since:
self.failing_since = self.clock.time_msec()
await self.store.update_pusher_failing_since(
self.app_id, self.pushkey, self.user_id, self.failing_since
)
if (
self.failing_since
and self.failing_since
< self.clock.time_msec() - HttpPusher.GIVE_UP_AFTER_MS
):
# we really only give up so that if the URL gets
# fixed, we don't suddenly deliver a load
# of old notifications.
logger.warning(
"Giving up on a notification to user %s, pushkey %s",
self.user_id,
self.pushkey,
)
self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC
self.last_stream_ordering = push_action["stream_ordering"]
await self.store.update_pusher_last_stream_ordering(
self.app_id,
self.pushkey,
self.user_id,
self.last_stream_ordering,
)
self.failing_since = None
await self.store.update_pusher_failing_since(
self.app_id, self.pushkey, self.user_id, self.failing_since
)
else:
logger.info("Push failed: delaying for %ds", self.backoff_delay)
self.timed_call = self.hs.get_reactor().callLater(
self.backoff_delay, self.on_timer
)
self.backoff_delay = min(
self.backoff_delay * 2, self.MAX_BACKOFF_SEC
)
break
async def _process_one(self, push_action: dict) -> bool:
if "notify" not in push_action["actions"]:
return True
tweaks = push_rule_evaluator.tweaks_for_actions(push_action["actions"])
badge = await push_tools.get_badge_count(
self.hs.get_datastore(),
self.user_id,
group_by_room=self._group_unread_count_by_room,
)
event = await self.store.get_event(push_action["event_id"], allow_none=True)
if event is None:
return True # It's been redacted
rejected = await self.dispatch_push(event, tweaks, badge)
if rejected is False:
return False
if isinstance(rejected, (list, tuple)):
for pk in rejected:
if pk != self.pushkey:
# for sanity, we only remove the pushkey if it
# was the one we actually sent...
logger.warning(
("Ignoring rejected pushkey %s because we didn't send it"),
pk,
)
else:
logger.info("Pushkey %s was rejected: removing", pk)
await self._pusherpool.remove_pusher(self.app_id, pk, self.user_id)
return True
async def _build_notification_dict(
self, event: EventBase, tweaks: Dict[str, bool], badge: int
) -> Dict[str, Any]:
priority = "low"
if (
event.type == EventTypes.Encrypted
or tweaks.get("highlight")
or tweaks.get("sound")
):
# HACK send our push as high priority only if it generates a sound, highlight
# or may do so (i.e. is encrypted so has unknown effects).
priority = "high"
# This was checked in the __init__, but mypy doesn't seem to know that.
assert self.data is not None
if self.data.get("format") == "event_id_only":
d = {
"notification": {
"event_id": event.event_id,
"room_id": event.room_id,
"counts": {"unread": badge},
"prio": priority,
"devices": [
{
"app_id": self.app_id,
"pushkey": self.pushkey,
"pushkey_ts": int(self.pushkey_ts / 1000),
"data": self.data_minus_url,
}
],
}
}
return d
ctx = await push_tools.get_context_for_event(self.storage, event, self.user_id)
d = {
"notification": {
"id": event.event_id, # deprecated: remove soon
"event_id": event.event_id,
"room_id": event.room_id,
"type": event.type,
"sender": event.user_id,
"prio": priority,
"counts": {
"unread": badge,
# 'missed_calls': 2
},
"devices": [
{
"app_id": self.app_id,
"pushkey": self.pushkey,
"pushkey_ts": int(self.pushkey_ts / 1000),
"data": self.data_minus_url,
"tweaks": tweaks,
}
],
}
}
if event.type == "m.room.member" and event.is_state():
d["notification"]["membership"] = event.content["membership"]
d["notification"]["user_is_target"] = event.state_key == self.user_id
if self.hs.config.push_include_content and event.content:
d["notification"]["content"] = event.content
# We no longer send aliases separately, instead, we send the human
# readable name of the room, which may be an alias.
if "sender_display_name" in ctx and len(ctx["sender_display_name"]) > 0:
d["notification"]["sender_display_name"] = ctx["sender_display_name"]
if "name" in ctx and len(ctx["name"]) > 0:
d["notification"]["room_name"] = ctx["name"]
return d
async def dispatch_push(
self, event: EventBase, tweaks: Dict[str, bool], badge: int
) -> Union[bool, Iterable[str]]:
notification_dict = await self._build_notification_dict(event, tweaks, badge)
if not notification_dict:
return []
try:
resp = await self.http_client.post_json_get_json(
self.url, notification_dict
)
except Exception as e:
logger.warning(
"Failed to push event %s to %s: %s %s",
event.event_id,
self.name,
type(e),
e,
)
return False
rejected = []
if "rejected" in resp:
rejected = resp["rejected"]
return rejected
async def _send_badge(self, badge):
"""
Args:
badge (int): number of unread messages
"""
logger.debug("Sending updated badge count %d to %s", badge, self.name)
d = {
"notification": {
"id": "",
"type": None,
"sender": "",
"counts": {"unread": badge},
"devices": [
{
"app_id": self.app_id,
"pushkey": self.pushkey,
"pushkey_ts": int(self.pushkey_ts / 1000),
"data": self.data_minus_url,
}
],
}
}
try:
await self.http_client.post_json_get_json(self.url, d)
http_badges_processed_counter.inc()
except Exception as e:
logger.warning(
"Failed to send badge count to %s: %s %s", self.name, type(e), e
)
http_badges_failed_counter.inc()
| apache-2.0 | 4,157,670,346,115,659,300 | 37.625866 | 89 | 0.534589 | false |
qguv/config | weechat/plugins/python/imap_status.py | 1 | 5857 | # -*- coding: utf-8 -*-
# Copyright (c) 2009-2015 by xt <[email protected]>
# (this script requires WeeChat 0.4.2 or newer)
#
# History:
# 2019-01-26, nils_2@freenode
# version 0.9: make script python3 compatible
# : remove option "message_color" and "separator_color"
# 2016-05-07, Sebastien Helleu <[email protected]>:
# version 0.8: add options "mailbox_color", "separator", "separator_color",
# remove extra colon in bar item content, use hook_process
# to prevent any freeze in WeeChat >= 1.5
# 2015-01-09, nils_2
# version 0.7: use eval_expression()
# 2010-07-12, TenOfTen
# version 0.6: beautify notification area
# 2010-03-17, xt
# version 0.5: fix caching of return message
# 2010-01-19, xt
# version 0.4: only run check when timer expired
# 2009-11-03, xt
# version 0.3: multiple mailbox support
# 2009-11-02, xt
# version 0.2: remove the imap "client" buffer, just do the unread count
# 2009-06-18, xt <[email protected]>
# version 0.1: initial release.
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
'''
Usage: put [imap] in your status bar items. (Or any other bar to your liking)
"/set weechat.bar.status.items".
'''
import imaplib as i
import re
import weechat as w
SCRIPT_NAME = "imap_status"
SCRIPT_AUTHOR = "xt <[email protected]>"
SCRIPT_VERSION = "0.9"
SCRIPT_LICENSE = "GPL3"
SCRIPT_DESC = "Bar item with unread imap messages count"
WEECHAT_VERSION = 0
IMAP_UNREAD = ''
# script options
settings = {
'username': '',
'password': '',
'hostname': '', # gmail uses imap.gmail.com
'port': '993',
'mailboxes': 'INBOX', # comma separated list of mailboxes (gmail: "Inbox")
'message': '${color:default}Mail: ',
'mailbox_color': 'default',
'separator': '${color:default}, ',
'count_color': 'default',
'interval': '5',
}
def string_eval_expression(text):
return w.string_eval_expression(text, {}, {}, {})
class Imap(object):
"""Simple helper class for interfacing with IMAP server."""
iRe = re.compile(br"UNSEEN (\d+)")
conn = False
def __init__(self):
'''Connect and login.'''
username = string_eval_expression(w.config_get_plugin('username'))
password = string_eval_expression(w.config_get_plugin('password'))
hostname = string_eval_expression(w.config_get_plugin('hostname'))
port = int(w.config_get_plugin('port'))
if username and password and hostname and port:
M = i.IMAP4_SSL(hostname, port)
M.login(username, password)
self.conn = M
def unreadCount(self, mailbox='INBOX'):
if self.conn:
unreadCount = int(
self.iRe.search(
self.conn.status(mailbox, "(UNSEEN)")[1][0]).group(1))
return unreadCount
else:
w.prnt('', 'Problem with IMAP connection. Please check settings.')
return 0
def logout(self):
if not self.conn:
return
try:
self.conn.close()
except Exception:
self.conn.logout()
def imap_get_unread(data):
"""Return the unread count."""
imap = Imap()
if not w.config_get_plugin('message'):
output = ""
else:
output = '%s' % (
string_eval_expression(w.config_get_plugin('message')))
any_with_unread = False
mailboxes = w.config_get_plugin('mailboxes').split(',')
count = []
for mailbox in mailboxes:
mailbox = mailbox.strip()
unreadCount = imap.unreadCount(mailbox)
if unreadCount > 0:
any_with_unread = True
count.append('%s%s: %s%s' % (
w.color(w.config_get_plugin('mailbox_color')),
mailbox,
w.color(w.config_get_plugin('count_color')),
unreadCount))
imap.logout()
sep = '%s' % (
string_eval_expression(w.config_get_plugin('separator')))
output = output + sep.join(count) + w.color('reset')
return output if any_with_unread else ''
def imap_item_cb(data, item, window):
return IMAP_UNREAD
def imap_update_content(content):
global IMAP_UNREAD
if content != IMAP_UNREAD:
IMAP_UNREAD = content
w.bar_item_update('imap')
def imap_process_cb(data, command, rc, out, err):
if rc == 0:
imap_update_content(out)
return w.WEECHAT_RC_OK
def imap_timer_cb(data, remaining_calls):
"""Timer callback to update imap bar item."""
if WEECHAT_VERSION >= 0x01050000:
w.hook_process('func:imap_get_unread', 30 * 1000,
'imap_process_cb', '')
else:
imap_update_content(imap_get_unread(None)) # this can block WeeChat!
return w.WEECHAT_RC_OK
if w.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, '', ''):
for option, default_value in settings.items():
if not w.config_is_set_plugin(option):
w.config_set_plugin(option, default_value)
WEECHAT_VERSION = int(w.info_get("version_number", "") or 0)
w.bar_item_new('imap', 'imap_item_cb', '')
imap_timer_cb(None, None)
w.hook_timer(
int(w.config_get_plugin('interval'))*1000*60,
0,
0,
'imap_timer_cb',
'')
| gpl-3.0 | 1,555,020,943,178,162,000 | 30.320856 | 79 | 0.616869 | false |
google/deepvariant | third_party/nucleus/util/sequence_utils.py | 1 | 4197 | # Copyright 2018 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Utility functions for manipulating DNA sequences."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Error(Exception):
"""Base error class."""
def _add_lowercase(d):
"""Returns a dictionary with the lowercase keys and values entered."""
retval = d.copy()
retval.update({k.lower(): v.lower() for k, v in d.items()})
return retval
STRICT_DNA_COMPLEMENT_UPPER = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}
DNA_COMPLEMENT_UPPER = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C', 'N': 'N'}
IUPAC_DNA_COMPLEMENT_UPPER = {
'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C',
'R': 'Y', # R is A/G
'Y': 'R', # Y is C/T
'S': 'S', # S is C/G
'W': 'W', # W is A/T
'K': 'M', # K is G/T
'M': 'K', # M is A/C
'B': 'V', # B is C/G/T
'V': 'B', # V is A/C/G
'D': 'H', # D is A/G/T
'H': 'D', # H is A/C/T
'N': 'N', # N is any base
}
IUPAC_TO_CANONICAL_BASES_UPPER = {
'A': ['A'],
'T': ['T'],
'C': ['C'],
'G': ['G'],
'R': ['A', 'G'],
'Y': ['C', 'T'],
'S': ['C', 'G'],
'W': ['A', 'T'],
'K': ['G', 'T'],
'M': ['A', 'C'],
'B': ['C', 'G', 'T'],
'V': ['A', 'C', 'G'],
'D': ['A', 'G', 'T'],
'H': ['A', 'C', 'T'],
'N': ['A', 'C', 'G', 'T'],
}
STRICT_DNA_COMPLEMENT = _add_lowercase(STRICT_DNA_COMPLEMENT_UPPER)
DNA_COMPLEMENT = _add_lowercase(DNA_COMPLEMENT_UPPER)
IUPAC_DNA_COMPLEMENT = _add_lowercase(IUPAC_DNA_COMPLEMENT_UPPER)
STRICT_DNA_BASES_UPPER = frozenset(['A', 'C', 'G', 'T'])
STRICT_DNA_BASES = frozenset(['a', 'c', 'g', 't', 'A', 'C', 'G', 'T'])
DNA_BASES_UPPER = frozenset(['A', 'C', 'G', 'T', 'N'])
DNA_BASES = frozenset(['a', 'c', 'g', 't', 'n', 'A', 'C', 'G', 'T', 'N'])
def reverse_complement(sequence, complement_dict=None):
"""Returns the reverse complement of a DNA sequence.
By default this will successfully reverse complement sequences comprised
solely of A, C, G, and T letters. Other complement dictionaries can be
passed in for more permissive matching.
Args:
sequence: str. The input sequence to reverse complement.
complement_dict: dict[str, str]. The lookup dictionary holding the
complement base pairs.
Returns:
The reverse complement DNA sequence.
Raises:
Error: The sequence contains letters not present in complement_dict.
"""
if complement_dict is None:
complement_dict = STRICT_DNA_COMPLEMENT_UPPER
try:
return ''.join(complement_dict[nt] for nt in reversed(sequence))
except KeyError:
raise Error('Unknown base in {}, cannot reverse complement using {}'.format(
sequence, str(complement_dict)))
| bsd-3-clause | -3,947,212,933,746,330,000 | 33.68595 | 80 | 0.635692 | false |
faircloth-lab/sh_t | sh_t/core.py | 1 | 3611 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
(c) 2014 Brant Faircloth || http://faircloth-lab.org/
All rights reserved.
This code is distributed under a 3-clause BSD license. Please see
LICENSE.txt for more information.
Created on 21 April 2014 20:54 PDT (-0700)
"""
import os
import sys
import glob
import shutil
import argparse
import subprocess
from Bio import AlignIO
import pdb
class FullPaths(argparse.Action):
"""Expand user- and relative-paths"""
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, os.path.abspath(os.path.expanduser(values)))
class CreateDir(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
# get the full path
d = os.path.abspath(os.path.expanduser(values))
# check to see if directory exists
if os.path.exists(d):
answer = raw_input("[WARNING] Output directory exists, REMOVE [Y/n]? ")
if answer == "Y":
shutil.rmtree(d)
else:
print "[QUIT]"
sys.exit()
# create the new directory
os.makedirs(d)
# return the full path
setattr(namespace, self.dest, d)
class GroupError(Exception):
def __init__(self, message, group, alignment):
# Call the base class constructor with the parameters it needs
Exception.__init__(self, message)
# Now for your custom code...
self.group = group
self.alignment = alignment
def is_dir(dirname):
if not os.path.isdir(dirname):
msg = "{0} is not a directory".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname
def is_file(filename):
if not os.path.isfile:
msg = "{0} is not a file".format(filename)
raise argparse.ArgumentTypeError(msg)
else:
return filename
def which(prog):
cmd = ["which", prog]
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = proc.communicate()
if stderr:
raise EnvironmentError("Program {} does not appear to be installed")
else:
return stdout.strip()
def get_alignments(alignment_dir):
alignments = []
for ftype in ('.phylip', '.phy'):
alignments.extend(glob.glob(os.path.join(alignment_dir, "*{}".format(ftype))))
return alignments
def satisfy_one_taxon_group(taxa_in_align, taxon_group):
try:
isinstance(taxon_group, list)
except:
raise AssertionError("Taxon group is not a list.")
group_set = set(taxon_group)
# ensure there is at least one member in each group
if len(taxa_in_align.intersection(group_set)) >= 1:
return True
else:
return False
def get_taxa_in_alignment(alignment):
aln = AlignIO.read(alignment, "phylip-relaxed")
taxa_in_align = set([taxon.id for taxon in aln])
return taxa_in_align
def satisfy_all_taxon_groups(alignment, taxon_groups):
"""given an input alignment, see if any taxa in list are in file"""
taxa_in_align = get_taxa_in_alignment(alignment)
taxa_present = []
for group_name, taxon_group in taxon_groups.iteritems():
if satisfy_one_taxon_group(taxa_in_align, taxon_group):
taxa_present.append(True)
else:
taxa_present.append(False)
if all(taxa_present):
return True
else:
raise GroupError(
"Not all taxa present in Group",
group_name,
os.path.basename(alignment),
)
| bsd-3-clause | -438,796,830,149,700,200 | 26.356061 | 86 | 0.626696 | false |
ciudadanointeligente/votainteligente-portal-electoral | proposal_subscriptions/migrations/0001_initial.py | 1 | 1395 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-27 21:20
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import picklefield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='SearchSubscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('keyword_args', picklefield.fields.PickledObjectField(editable=False)),
('search_params', picklefield.fields.PickledObjectField(editable=False)),
('filter_class_module', models.CharField(max_length=254)),
('filter_class_name', models.CharField(max_length=254)),
('oftenity', models.DurationField()),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('updated', models.DateTimeField(auto_now=True, null=True)),
('last_run', models.DateTimeField(blank=True, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| gpl-3.0 | -2,690,039,399,188,640,000 | 38.857143 | 118 | 0.62724 | false |
JoshData/django-annotator-store | annotator/views.py | 1 | 4909 | from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseServerError, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseForbidden
from django.views.generic import View
from django.views.generic.base import TemplateView
from django.shortcuts import get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib.auth.decorators import permission_required
from django.conf import settings
import json, re
from annotator.models import Document, Annotation
class BaseStorageView(View):
def dispatch(self, request, *args, **kwargs):
# All PUT/POST requests must contain a JSON body. We decode that here and
# interpolate the value into the view argument list.
if request.method in ('PUT', 'POST'):
if not re.match("application/json(; charset=UTF-8)?", request.META['CONTENT_TYPE'], re.I):
return HttpResponseBadRequest("Request must have application/json content type.")
try:
body = json.loads(request.body.decode("utf8"))
except:
return HttpResponseBadRequest("Request body is not JSON.")
if not isinstance(body, dict):
return HttpResponseBadRequest("Request body is not a JSON object.")
# Interpolate the parsed JSON body into the arg list.
args = [body] + list(args)
# All requets return JSON on success, or some other HttpResponse.
try:
ret = super(BaseStorageView, self).dispatch(request, *args, **kwargs)
if isinstance(ret, HttpResponse):
return ret
# DELETE requests, when successful, return a 204 NO CONTENT.
if request.method == 'DELETE':
return HttpResponse(status=204)
ret = json.dumps(ret)
resp = HttpResponse(ret, mimetype="application/json")
resp["Content-Length"] = len(ret)
return resp
except ValueError as e:
return HttpResponseBadRequest(str(e))
except PermissionDenied as e:
return HttpResponseForbidden(str(e))
except ObjectDoesNotExist as e:
return HttpResponseNotFound(str(e))
except Exception as e:
if settings.DEBUG: raise # when debugging, don't trap
return HttpResponseServerError(str(e))
return ret
class Root(BaseStorageView):
http_method_names = ['get']
def get(self, request):
return {
"name": "Django Annotator Store",
"version": "0.0.1",
}
class Index(BaseStorageView):
http_method_names = ['get', 'post']
def get(self, request):
# index. Returns ALL annotation objects. Seems kind of not scalable.
return Annotation.as_list()
def post(self, request, client_data):
# create. Creates an annotation object and returns a 303.
obj = Annotation()
obj.owner = request.user if request.user.is_authenticated() else None
try:
obj.document = Document.objects.get(id=client_data.get("document"))
except:
raise ValueError("Invalid or missing 'document' value passed in annotation data.")
obj.set_guid()
obj.data = "{ }"
obj.update_from_json(client_data)
obj.save()
return obj.as_json(request.user) # Spec wants redirect but warns of browser bugs, so return the object.
class Annot(BaseStorageView):
http_method_names = ['get', 'put', 'delete']
def get(self, request, guid):
# read. Returns the annotation.
obj = Annotation.objects.get(guid=guid) # exception caught by base view
return obj.as_json(request.user)
def put(self, request, client_data, guid):
# update. Updates the annotation.
obj = Annotation.objects.get(guid=guid) # exception caught by base view
if not obj.can_edit(request.user):
raise PermissionDenied("You do not have permission to modify someone else's annotation.")
obj.update_from_json(client_data)
obj.save()
return obj.as_json(request.user) # Spec wants redirect but warns of browser bugs, so return the object.
def delete(self, request, guid):
obj = Annotation.objects.get(guid=guid) # exception caught by base view
if not obj.can_edit(request.user):
raise PermissionDenied("You do not have permission to delete someone else's annotation.")
obj.delete()
return None # response handled by the base view
class Search(BaseStorageView):
http_method_names = ['get']
def get(self, request):
try:
document = Document.objects.get(id=request.GET.get("document"))
except:
raise ValueError("Invalid or missing 'document' value passed in the query string.")
qs = Annotation.objects.filter(document=document)
return {
"total": qs.count(),
"rows": Annotation.as_list(qs=qs, user=request.user)
}
class EditorView(TemplateView):
template_name = 'annotator/editor.html'
def get_context_data(self, **kwargs):
context = super(EditorView, self).get_context_data(**kwargs)
context['storage_api_base_url'] = reverse('annotator.root')[0:-1] # chop off trailing slash
context['document'] = get_object_or_404(Document, id=kwargs['doc_id'])
return context
| unlicense | -49,917,196,215,574,370 | 34.316547 | 130 | 0.727032 | false |
bbxyard/bbxyard | yard/skills/36-spider/spider-so/stackoverflow/spiders/stackoverflow_spider.py | 1 | 2417 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import scrapy
from stackoverflow.spiders.items import StackoverflowItem
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger('monitor')
logger.setLevel(logging.INFO)
fh = logging.FileHandler('monitor.log')
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger.addHandler(fh)
class StackoverflowSpider(scrapy.Spider):
name = "stackoverflow"
def __init__(self):
self.count = 1
def start_requests(self):
_url = 'https://stackoverflow.com/questions?page={page}&sort=votes&pagesize=50'
urls = [_url.format(page=page) for page in range(1, 100001)]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
nodeList = response.xpath('//*[@id="questions"]/*[@class="question-summary"]')
for sel in nodeList:
self.count += 1
if self.count % 100 == 0:
logger.info(self.count)
item = StackoverflowItem()
item['votes'] = sel.xpath('./*/div[@class="stats"]/div[@class="vote"]/div[@class="votes"]/span/strong/text()').extract()[0]
item['answers'] = sel.xpath('./*/div[@class="stats"]/div[2]/strong/text()').extract()[0]
item['views'] = sel.xpath('./*/div[@class="views supernova"]/@title').extract()[0].split()[0].replace(',','')
item['questions'] = sel.xpath('./div[@class="summary"]/*/a[@class="question-hyperlink"]/text()').extract()[0]
item['links'] = sel.xpath('./div[@class="summary"]/*/a[@class="question-hyperlink"]/@href').extract()[0]
item['tags'] = sel.xpath('./div[@class="summary"]/div[2]/a/text()').extract()
yield item
# item[''] = sel.xpath('//div[@class="votes"]/span/strong/text()').extract()
# for index in range(1, 51):
# sel = response.xpath('//*[@id="questions"]/div[{index}]'.format(index=index))
# item = StackoverflowItem()
# item['votes'] = sel.xpath(
# 'div[1]/div[2]/div[1]/div[1]/span/strong/text()').extract()
# item['answers'] = sel.xpath(
# 'div[1]/div[2]/div[2]/strong/text()').extract()
# item['links'] = "".join(
# sel.xpath('div[2]/h3/a/@href').extract()).split("/")[2]
| apache-2.0 | 7,355,543,177,940,404,000 | 36.184615 | 135 | 0.570542 | false |
hying-caritas/ibsuite | ibpy/ibpy/assembler.py | 1 | 9511 | #
# Copyright 2008 Huang Ying <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
import os
import Image, ImageDraw
from image import PageImageRef
from divide import HLine, PageSpace, SegPart
from util import *
class Assembler(object):
def __init__(self, config):
object.__init__(self)
self.config = config
self.img = Image.new("L", config.out_size)
self.draw = ImageDraw.Draw(self.img)
self.fill_img()
self.segs = []
self.opimg_refs = []
def fill_img(self):
ow, oh = self.img.size
self.img.paste(255, [0, 0, ow, oh])
def output_img(self, oi):
def real_output_img():
if not config.crop:
opimg = self.img.copy()
else:
rt = max(ow * 3/4, rt_ink) + config.margin
btm = max(oh * 3/4, btm_ink) + config.margin
opimg = self.img.crop((0, 0, rt, btm)).copy()
opimg_ref = PageImageRef(page.page_no, page.out_no, opimg)
self.opimg_refs.append(opimg_ref)
page.out_no = page.out_no + 1
self.fill_img()
rt_ink = 0
btm_ink = 0
config = self.config
ow, oh = config.out_size
seg = self.segs[0]
page = seg.get_page()
sh = seg.pheight()
rh = oh - 2 * config.opedge_ex
if sh >= rh:
poverlap = nround(config.overlap * rh)
sy = 0
remain = sh
while remain >= rh:
h = min(remain, rh)
crop = seg.get_img(sy, nceil(h))
txo = nround(seg.pout_x)
self.img.paste(crop, (txo, 0))
rt_ink = txo + crop.size[0]
btm_ink = crop.size[1]
real_output_img()
if remain == h:
remain = 0
break
remain = remain - h + poverlap
sy = sy + h - poverlap
del self.segs[0]
if remain != 0:
self.segs.insert(0, SegPart(seg, sy))
return
y = config.margin
for i in range(oi):
seg = self.segs[i]
page = seg.get_page()
y = y + seg.pout_bl
ny = nround(y)
if isinstance(seg, HLine):
self.draw.line(((0, ny), (ow, ny)), fill = 0)
y = y + 1
elif isinstance(seg, PageSpace):
self.draw.line(((0, ny), (ow, ny)), fill = 0)
y = y + 1 + seg.pout_bl
else:
sh = seg.pheight()
crop1 = seg.get_img(0, nceil(sh))
(porgx, porgy) = seg.porg()
pxo = min(porgx, seg.pout_x)
pyo = min(porgy, seg.pout_bl/2.)
pxo = nround(pxo)
pyo = nround(pyo)
szx, szy = crop1.size
if porgx != pxo or porgy != pyo:
crop = crop1.crop((porgx - pxo, porgy - pyo, szx, szy))
else:
crop = crop1
txo = nround(seg.pout_x) - pxo
tyo = ny - pyo
self.img.paste(crop, (txo, tyo))
rt_ink = max(txo + crop.size[0], rt_ink)
btm_ink = max(tyo + crop.size[1], btm_ink)
y = y + sh
real_output_img()
del self.segs[0:oi]
def kick_page(self):
while len(self.segs) != 0 and \
isinstance(self.segs[0], PageSpace):
del self.segs[0]
if len(self.segs) == 0:
return
page = self.segs[0].get_page()
config = self.config
self.segs[0].pout_bl = min((self.segs[0].pout_bl+1)/2, config.opedge_ex)
oh = config.out_size[1]
oh34 = oh * 3 / 4
h = config.margin * 2 + config.opedge_ex
nl = -1
nh = -1
for i, seg in enumerate(self.segs):
t = h + seg.pout_bl + seg.pheight()
if nl == -1 and t > oh34:
nl = i
if t > oh:
nh = i
break
h = t
if nh == -1:
return
if nl == nh:
if nh == 0:
nh = nh + 1
self.output_img(nh)
return self.kick_page()
oi = -1
oal = 0
for i in range(nl, nh):
seg = self.segs[i]
nseg = self.segs[i+1]
if seg.is_line_end():
al = nseg.pout_bl
if al >= oal:
oi = i
oal = al
if oi == -1:
oi = nh - 1
self.output_img(oi+1)
self.kick_page()
def flush_page(self):
self.kick_page()
n = len(self.segs)
if n > 0:
self.output_img(n)
def start_page(self, page):
self.page = page
self.page.out_no = 0
def put_hline(self, pil):
hl = HLine(self.page, pil)
self.segs.append(hl)
def put_seg(self, seg, il):
config = self.config
page = self.page
px = self.page.norm2opxl(seg.out_x)
seg.pout_x = px + self.config.margin + config.opedge_ex
seg.pout_bl = self.page.norm2opxl(il)
self.segs.append(seg)
def end_page(self):
if self.config.run_pages:
oh = self.config.out_size[1]
ps = PageSpace(self.page, oh/20)
self.segs.append(ps)
self.kick_page()
else:
self.flush_page()
def assemble(self, segs):
if len(segs) == 0:
return []
page = segs[0].get_page()
self.start_page(page)
for seg in segs:
if isinstance(seg, HLine):
self.put_hline(seg.pout_bl)
else:
self.put_seg(seg, seg.out_bl)
self.end_page()
opimg_refs = self.opimg_refs
self.opimg_refs = []
return opimg_refs
def end(self):
self.flush_page()
opimg_refs = self.opimg_refs
self.opimg_refs = []
return opimg_refs
class SimpleAssembler(object):
def __init__(self, config):
object.__init__(self)
self.config = config
self.img = Image.new("L", config.out_size)
self.draw = ImageDraw.Draw(self.img)
self.fill_img()
self.segs = []
def fill_img(self):
ow, oh = self.img.size
self.img.paste(255, [0, 0, ow, oh])
def output_img(self, seg):
config = self.config
oc = config.out_center
ow, oh = config.out_size
owp = ow - config.margin * 2
ohp = oh - config.margin * 2
img = seg.get_img(0, seg.pheight())
iw, ih = img.size
if iw > owp or ih > ohp:
nw = owp
nh = nround(float(ih) / iw * nw)
if nh > ohp:
nh = ohp
nw = nround(float(iw) / ih * nh)
img = img.resize((nw, nh), Image.ANTIALIAS)
iw, ih = img.size
if oc:
oleft = (ow - iw) / 2
otop = (oh - ih) / 2
else:
oleft, otop = (config.margin, config.margin)
opimg = self.img.copy()
opimg.paste(img, (oleft, otop))
page = seg.get_page()
opimg_ref = PageImageRef(page.page_no, 0, opimg)
return opimg_ref
def assemble(self, segs):
opimg_refs = []
for seg in segs:
opimg_ref = self.output_img(seg)
opimg_refs.append(opimg_ref)
return opimg_refs
def end(self):
return []
class CropAssembler(object):
def __init__(self, config):
object.__init__(self)
self.config = config
self.img = Image.new("L", config.out_size)
self.draw = ImageDraw.Draw(self.img)
self.fill_img()
self.segs = []
self.right_align = config.right_align
self.min_out_width_in = config.min_out_width_in
self.min_out_height_in = config.min_out_height_in
def fill_img(self):
ow, oh = self.img.size
self.img.paste(255, [0, 0, ow, oh])
def output_img(self, seg):
img = seg.get_img(0, seg.pheight())
page = seg.get_page()
iw, ih = img.size
ow, oh = page.out_size
mow = nround(ow * self.min_out_width_in)
moh = nround(oh * self.min_out_height_in)
if iw < mow or ih < moh:
mow = max(mow, iw)
moh = max(moh, ih)
nimg = Image.new("L", (mow, moh))
nimg.paste(255, [0, 0, mow, moh])
if self.right_align:
oleft = mow - iw
else:
oleft = 0
nimg.paste(img, (oleft, 0))
img = nimg
else:
img = img.copy()
opimg_ref = PageImageRef(page.page_no, 0, img)
return opimg_ref
def assemble(self, segs):
opimg_refs = []
for seg in segs:
opimg_ref = self.output_img(seg)
opimg_refs.append(opimg_ref)
return opimg_refs
def end(self):
return []
def create_assembler(config):
if config.assembler == 'simple':
return SimpleAssembler(config)
elif config.assembler == 'crop':
return CropAssembler(config)
else:
return Assembler(config)
| gpl-2.0 | -6,730,597,192,314,136,000 | 31.35034 | 80 | 0.478604 | false |
iJebus/CITS4406-Assignment2 | data.py | 1 | 8727 | """Reads CSV file for information, provides basic cleaning of data and then
runs analysis on said data."""
import csv
import re
from collections import Counter
from statistics import mean, mode, median_low, median, median_high, \
StatisticsError, Decimal
# Config
threshold = 0.9
invalid_values = ['-', '*', '_']
re_float = re.compile('^\d*?\.\d+$')
re_int = re.compile('^[1-9]\d*$')
class Analyser(object):
"""Base analysis class object. Initiate the object, and assigns the
statistical mode, if any.
Class variables:
mode -- Returns the mode of the column analysed.
Child Classes and associated variables:
StringAnalyser -- String column analysis.
EnumAnalyser -- Enumerated column analysis.
NumericalAnalyser - String/Float column analysis.
min -- Minimum value in column values.
max -- Maximum value in column values.
mean -- Mean value in column values.
median_low -- Low median for column values.
median -- Median value for column values.
median_high -- High median for column values.
"""
def __init__(self, values):
try:
self.mode = mode(values)
except StatisticsError:
self.mode = 'N/A'
class StringAnalyser(Analyser):
"""Run string analysis."""
def __init__(self, values):
super().__init__(values)
# TODO Implement some string exclusive statistics.
class EnumAnalyser(Analyser):
"""Run enumeration analysis."""
def __init__(self, values):
super().__init__(values)
# TODO Implement some enum exclusive statistics.
class NumericalAnalyser(Analyser):
"""Runs numeric analysis."""
def __init__(self, values):
values = [eval(i) for i in values]
super().__init__(values)
self.min = min(values)
self.max = max(values)
self.mean = Decimal(mean(values)).quantize(Decimal('.00000'))
self.median_low = median_low(values)
self.median = median(values)
self.median_high = median_high(values)
class Column(object):
"""Object to hold data from each column within the provided CSV file.
Methods:
change_misc_values -- Removes misc/unclear values from column
values.
drop_greater_than -- Removes '<', '>' from column values.
define_most_common -- Sets object variable to hold 15 most common values
for that column.
define_type -- Sets object variable to type (e.g., String) according
to column values.
Variables:
most_common -- <= 15 most common results within the column values.
empty -- Boolean value of whether the column holds values or not.
header -- Column header/title.
type -- The type of data in column, e.g., String, Float, Integer,
Enumerated.
values -- List of CSV values for the column.
analysis -- Analysis object associated with this column.
outliers -- List of values in column but outside threshold of column type.
"""
def __init__(self, header=''):
self.most_common = []
self.empty = False
self.header = header
self.type = ''
self.values = []
self.analysis = None
self.outliers = []
# Todo: Does initialising as None even make sense?
def change_misc_values(self):
"""
Replaces identified values of unclear meaning or inexact value, i.e.,
'-', with an agreed value.
"""
for index, value in enumerate(self.values):
if value in invalid_values:
self.values[index] = ''
def drop_greater_than(self):
pass
# Todo: Implement method to handle (strip?) '<', '>'.
def define_most_common(self):
"""Set 15 most common results to class variable, and set object variable
empty if appropriate.
"""
self.most_common = Counter(self.values).most_common(15)
if self.most_common[0][0] == '' \
and self.most_common[0][1] / len(self.values) >= threshold:
self.empty = True
def define_type(self):
"""Run column data against regex filters and assign object variable type
as appropriate.
"""
float_count = 0
int_count = 0
boolean = ['true', 'false']
# Todo: Define date type.
for value in self.values:
if re_float.match(value):
float_count += 1
elif re_int.match(value):
int_count += 1
if float_count / len(self.values) >= threshold:
self.type = 'Float'
elif int_count / len(self.values) >= threshold:
self.type = 'Integer'
elif len(self.most_common) <= 2:
if self.most_common[0][0].lower() in boolean:
self.type = 'Bool'
elif len(self.most_common) < 10:
self.type = 'Enum'
else:
self.type = 'String'
def define_outliers(self):
if self.type == 'Float':
for value in self.values:
if not re_float.match(value):
self.outliers.append(value)
elif self.type == 'Integer':
for value in self.values:
if not re_int.match(value):
self.outliers.append(value)
class Data(object):
"""Main store for CSV data, reading the data from the CSV file and then
assigning out to relevant variables.
Methods:
read -- Reads the CSV file and outputs to raw_data variable.
remove_invalid -- Reads from raw_data variable and assigns rows to
valid_rows or invalid_rows according to their length.
create_columns -- Creates column object according to valid_rows, assigning
column header and column values.
clean -- Calls column cleaning methods to run 'cleaning' on all columns.
analyse -- Calls column analysis methods to run 'analysis' on all columns.
Variables:
columns -- List of column objects.
headers -- List of column headers.
invalid_rows -- List of invalid rows (i.e., more or less columns than
number of headers).
raw_data -- List of raw CSV data as rows.
valid_rows -- List of valid rows (i.e., same number of columns as headers).
"""
def __init__(self, csv_file):
self.columns = []
self.headers = []
self.invalid_rows = []
self.raw_data = []
self.valid_rows = []
self.read(csv_file)
self.remove_invalid()
self.create_columns()
def read(self, csv_file):
"""Opens and reads the CSV file, line by line, to raw_data variable."""
f = csv.reader(open(csv_file))
for row in f:
self.raw_data.append(row)
def remove_invalid(self):
"""For each row in raw_data variable, checks row length and appends to
valid_rows variable if same length as headers, else appends to
invalid_rows variable.
"""
for index, row in enumerate(self.raw_data):
if len(row) != len(self.raw_data[0]):
self.invalid_rows.append([index + 1, row])
else:
self.valid_rows.append(row)
def create_columns(self):
"""For each row in raw_data variable, assigns the first value to the
headers variable and creates a Column object with that header provided.
Then removes header row from valid_rows. (Todo: Maybe can read straight
from valid rows? Why/Why not?). Then for each row in valid_rows,
populates relevant column object with row data.
"""
for value in self.raw_data[0]:
self.columns.append(Column(header=value))
self.headers.append(value)
self.valid_rows.pop(0)
for row in self.valid_rows:
for index, value in enumerate(row):
self.columns[index].values.append(value)
def clean(self):
"""Calls cleaning methods on all columns."""
for column in self.columns:
column.change_misc_values()
column.drop_greater_than()
def analyse(self):
"""Calls analysis methods on all columns, checking if they are empty
first.
"""
analysers = {'String': StringAnalyser, 'Integer': NumericalAnalyser,
'Float': NumericalAnalyser, 'Enum': EnumAnalyser}
for column in self.columns:
column.define_most_common()
if not column.empty:
column.define_type()
column.define_outliers()
if column.type in analysers:
column.analysis = analysers[column.type](column.values)
| mit | -7,077,307,649,676,576,000 | 34.620408 | 81 | 0.595623 | false |
septag/termite | scripts/texture-tools/etc2pack.py | 1 | 9820 | import os
import sys
import subprocess
import shutil
import optparse
import lz4.block
import json
import hashlib
import traceback
import timeit
import tempfile
from PIL import Image
ARG_InputFile = ''
ARG_ListFile = ''
ARG_OutputDir = '.'
ARG_Encoder = 'etc2_alpha'
ARG_Quality = 'normal'
ARG_FixImageSizeModulo = 4
C_TexturePackerPath = 'TexturePacker'
C_EtcToolPath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'EtcTool')
gFileHashes = {} # key: filepath, value: sha1
gProcessedFileCount = 0
def readListFile():
global ARG_ListFile
with open(ARG_ListFile) as f:
lines = f.readlines()
f.close()
return tuple([l.strip() for l in lines])
def readHashFile():
global ARG_ListFile
global gFileHashes
hashFilepath = ARG_ListFile + '.sha1'
if not os.path.isfile(hashFilepath):
return
with open(hashFilepath) as f:
lines = f.readlines()
stripLines = [l.strip() for l in lines]
for l in stripLines:
key, value = l.split(';', 1)
gFileHashes[key] = value
f.close()
def writeHashFile():
global ARG_ListFile
global gFileHashes
with open(ARG_ListFile + '.sha1', 'w') as f:
for key, value in gFileHashes.items():
f.write(key + ';' + value + '\n')
f.close()
def compressLz4(filepath):
with open(filepath, 'rb') as f:
srcData = f.read()
srcDataLen = len(srcData)
f.close()
compressed = lz4.block.compress(srcData, mode='high_compression', compression=9, store_size=True)
os.remove(filepath)
with open(filepath + '.lz4', 'wb') as f:
f.write(compressed)
f.close()
compressedLen = len(compressed)
print('\tLZ4 compressed (%dkb -> %dkb), Ratio: %.1f' % (srcDataLen/1024, compressedLen/1024,
srcDataLen/compressedLen))
def encodeEtc2(filepath):
global ARG_OutputDir, ARG_Quality, ARG_Encoder, ARG_ListFile, ARG_FixImageSizeModulo
global C_EtcToolPath
global gFileHashes, gProcessedFileCount
if not os.path.isfile(filepath):
print("Image file '%s' does not exist" % filepath)
return False
filedir = os.path.dirname(filepath)
destdir = os.path.join(ARG_OutputDir, filedir)
if not os.path.isdir(destdir):
os.makedirs(destdir, exist_ok=True)
# Check source file hash with the data we cached
# If file didn't change, return immediately
if ARG_ListFile:
sha1 = hashlib.sha1()
sha1.update(open(filepath, 'rb').read())
hashVal = sha1.hexdigest()
if filepath in gFileHashes and gFileHashes[filepath] == hashVal:
return True
tpFmt = ''
if ARG_Encoder == 'etc2':
tpFmt = 'RGB8'
elif ARG_Encoder == 'etc2_alpha':
tpFmt = 'RGBA8'
tpQuality = ''
if ARG_Quality == 'low':
tpQuality = ['-effort', '30']
elif ARG_Quality == 'normal':
tpQuality = ['-effort', '60']
elif ARG_Quality == 'high':
tpQuality = ['-effort', '100']
filename, fileext = os.path.splitext(filepath)
outputFilepath = os.path.join(destdir, os.path.basename(filename)) + '.ktx'
print(filepath + ' -> ' + os.path.relpath(outputFilepath, ARG_OutputDir))
modifiedFilepath = filepath
# check if have a json file with the same name (TexturePacker spritesheet)
# then change it's size in the json too, or just copy the file to target path
spritesheet_filepath = filename + '.json'
if (os.path.isfile(spritesheet_filepath)):
jdata = json.load(open(spritesheet_filepath))
else:
jdata = None
# Open the image file, check the size to be a modulo of the argument
if (ARG_FixImageSizeModulo != 0):
img = Image.open(filepath)
width, height = img.size
if (width % ARG_FixImageSizeModulo != 0 or height % ARG_FixImageSizeModulo != 0):
prevWidth = width
prevHeight = height
if (width % ARG_FixImageSizeModulo != 0):
width = width + (ARG_FixImageSizeModulo - (width % ARG_FixImageSizeModulo))
if (height % ARG_FixImageSizeModulo != 0):
height = height + (ARG_FixImageSizeModulo - (height % ARG_FixImageSizeModulo))
print('\tFixing size (%d, %d) -> (%d, %d)' % (prevWidth, prevHeight, width, height))
tmpImageFilepath = os.path.join(tempfile.gettempdir(), os.path.basename(filename)) + fileext
newImage = Image.new('RGBA', (width, height))
newImage.paste(img)
newImage.save(tmpImageFilepath, fileext[1:])
modifiedFilepath = tmpImageFilepath
# modify image size inside the spritesheet 'meta' tag
if jdata:
jdata['meta']['size']['w'] = width
jdata['meta']['size']['h'] = height
# trim/modify spritesheet json data for the image, and put them into target
if jdata:
json_filepath = os.path.join(destdir, os.path.basename(filename)) + '.json'
with open(json_filepath, 'w', encoding='utf8') as f:
f.write(json.dumps(jdata, sort_keys=False))
f.close()
print('\t' + spritesheet_filepath + ' -> ' + os.path.relpath(json_filepath, ARG_OutputDir))
# ETC2 convert the file
args = [C_EtcToolPath, modifiedFilepath, '-j', '4']
if tpFmt:
args.extend(['-format', tpFmt])
if tpQuality:
args.extend(tpQuality)
args.extend(['-errormetric', 'rec709'])
#args.extend(['-m', '2'])
args.extend(['-output', outputFilepath])
r = subprocess.call(args)
if r == 0:
compressLz4(outputFilepath)
if ARG_ListFile:
gFileHashes[filepath] = hashVal
gProcessedFileCount = gProcessedFileCount + 1
if modifiedFilepath != filepath:
os.remove(modifiedFilepath)
return (r == 0)
def encodeWithTexturePacker(filepath):
global ARG_OutputDir
global C_TexturePackerPath
filename, fileext = os.path.splitext(filepath)
outputFilepath = os.path.join(ARG_OutputDir, os.path.basename(filename)) + '.json'
args = [C_TexturePackerPath, '--data', outputFilepath, filepath]
r = subprocess.call(args)
if r == 0:
# read json and extract output file
jdata = json.load(open(outputFilepath))
imgfile = jdata['meta']['image']
imgdir = os.path.dirname(outputFilepath)
imgFilepath = os.path.join(imgdir, imgfile)
res = encodeEtc2(imgFilepath)
os.remove(imgFilepath)
return res
else:
return False
def encodeFile(filepath):
# determine the file type (TexturePacker or plain image)
filename, fileext = os.path.splitext(filepath)
if fileext == '.tps':
return encodeWithTexturePacker(filepath)
if fileext == '.png' or fileext == '.jpg':
return encodeEtc2(filepath)
else:
return False
def main():
global ARG_ListFile, ARG_Quality, ARG_Encoder, ARG_OutputDir, ARG_InputFile, ARG_FixImageSizeModulo
global gProcessedFileCount
cmdParser = optparse.OptionParser()
cmdParser.add_option('--file', action='store', type='string', dest='ARG_InputFile',
help = 'Input image file', default=ARG_InputFile)
cmdParser.add_option('--listfile', action='store', type='string', dest='ARG_ListFile',
help = 'Text file which lists input image files', default=ARG_ListFile)
cmdParser.add_option('--outdir', action='store', type='string', dest='ARG_OutputDir',
help = 'Output file(s) directory', default=ARG_OutputDir)
cmdParser.add_option('--enc', action='store', type='choice', dest='ARG_Encoder',
choices=['etc2', 'etc2_alpha'], help = 'Choose encoder', default=ARG_Encoder)
cmdParser.add_option('--quality', action='store', type='choice', dest='ARG_Quality',
choices = ['low', 'normal', 'high'], help = '', default=ARG_Quality)
cmdParser.add_option('--msize', action='store', type='int', dest='ARG_FixImageSizeModulo',
default=4, help='Fix output image size to be a multiply of specified argument')
cmdParser.add_option('--exclude-hd', action='store_true', default=False, dest='ARG_ExcludeHD')
(options, args) = cmdParser.parse_args()
if options.ARG_InputFile:
ARG_InputFile = os.path.abspath(options.ARG_InputFile)
if options.ARG_ListFile:
ARG_ListFile = os.path.abspath(options.ARG_ListFile)
ARG_OutputDir = os.path.abspath(options.ARG_OutputDir)
ARG_Encoder = options.ARG_Encoder
ARG_Quality = options.ARG_Quality
ARG_FixImageSizeModulo = options.ARG_FixImageSizeModulo
if not ARG_InputFile and not ARG_ListFile:
raise Exception('Must provide either --file or --listfile arguments. See --help')
if not os.path.isdir(ARG_OutputDir):
raise Exception(ARG_OutputDir + ' is not a valid directory')
startTm = timeit.default_timer()
if ARG_ListFile:
readHashFile()
files = readListFile()
# Remove all files that have -sd versions
if (options.ARG_ExcludeHD):
for f in files:
(first_part, ext) = os.path.splitext(f)
sd_version = first_part + '-sd' + ext
if not os.path.isfile(sd_version):
encodeFile(os.path.normpath(f))
else:
for f in files:
encodeFile(os.path.normpath(f))
writeHashFile()
elif ARG_InputFile:
encodeFile(ARG_InputFile)
print('Total %d file(s) processed' % gProcessedFileCount)
print('Took %.3f secs' % (timeit.default_timer() - startTm))
if __name__ == '__main__':
try:
main()
except Exception as e:
print('Error:')
print(e)
print('CallStack:')
traceback.print_exc(file=sys.stdout)
except:
raise
| bsd-2-clause | -2,274,651,020,613,943,800 | 34.839416 | 104 | 0.626782 | false |
dneiter/exabgp | lib/exabgp/configuration/experimental/__init__.py | 2 | 3727 | # encoding: utf-8
"""
registry.py
Created by Thomas Mangin on 2014-06-22.
Copyright (c) 2014-2015 Exa Networks. All rights reserved.
"""
from exabgp.configuration.experimental.engine.raised import Raised
from exabgp.configuration.experimental.engine.section import Section
from exabgp.configuration.experimental.engine.reader import Reader
from exabgp.configuration.experimental.engine.tokeniser import Tokeniser
from collections import defaultdict
from exabgp.util.dictionary import Dictionary
from StringIO import StringIO
import time
# ===================================================================== Registry
# The class where all configuration callback are registered to
class Configuration (object):
def __init__ (self):
self.stack = []
self._klass = {}
self._handler = {}
self._parser = None
self.section = None
# self.location set by Registry
def register (self, cls, location):
cls.register(self,location)
def register_class (self, cls):
print
print "class %s" % cls.__name__
print "-"*40
if not cls in self._klass:
self._klass[cls] = cls()
print
def register_hook (self, cls, action, position, function):
key = '/'.join(position)
if action in self._handler:
raise Exception('conflicting handlers')
self._handler.setdefault(key,{})[action] = getattr(cls,function)
print "%-50s %-7s %s.%s" % (key if key else 'root',action,cls.__name__,function)
def iterate (self, tokeniser):
# each section can registered named configuration for reference here
Section.configuration[tokeniser.name] = defaultdict(Dictionary)
def run (search, section, location):
key = '/'.join(search)
function = self._handler.get(key,{}).get(section,None)
if function:
print 'hit %s/%s' % (key,section)
instance = self._klass.setdefault(function.im_class,function.im_class())
instance.location = location
return function(instance,tokeniser) is None
return False
while True:
token = tokeniser()
if not token: break
location = self.stack+[token,]
# if we have both a section and a action, try the action first
if run(location,'action',location):
yield None
continue
if run(location,'enter',self.stack):
self.stack.append(token)
yield None
continue
if token != '}':
print
print 'Available paths are .....'
print
for path in sorted(self._handler):
for action in sorted(self._handler[path]):
print '/%-50s %s' % (path,action)
print '....'
print
print '/'.join(location)
# we need the line and position at this level
raise Raised(tokeniser,'no parser for the location /%s' % ('/'.join(location)))
if run(self.stack,'exit',self.stack[:-1]):
self.stack.pop()
yield None
continue
# we need the line and position at this level
raise Exception('application error, no exit code registered for %s, please report with your configuration' % '/'.join(self.stack))
data = Section.configuration[tokeniser.name]
del Section.configuration[tokeniser.name]
yield data
def parse_tokeniser (self, tokeniser):
if self._parser is None:
self._parser = self.iterate(tokeniser)
next = self._parser.next()
if next is None:
return None
self._parser = None
return next
def parse_file (self, fname):
with Reader(fname) as r:
tokeniser = Tokeniser('configuration',r)
parsed = None
while parsed is None:
parsed = self.parse_tokeniser(tokeniser)
return parsed
def parse_string (self, string):
name = 'command-%d' % int(int(time.time()*1000) % (365*24*60*60*1000))
sio = StringIO(string)
tokeniser = Tokeniser(name,sio)
parsed = None
while parsed is None:
parsed = self.parse_tokeniser(tokeniser)
return parsed
| bsd-3-clause | 6,378,954,307,960,287,000 | 26.813433 | 133 | 0.681782 | false |
igraph/python-igraph | tests/test_edgeseq.py | 1 | 16063 | # vim:ts=4 sw=4 sts=4:
import unittest
from igraph import *
from .utils import is_pypy
try:
import numpy as np
except ImportError:
np = None
class EdgeTests(unittest.TestCase):
def setUp(self):
self.g = Graph.Full(10)
def testHash(self):
data = {}
n = self.g.ecount()
for i in range(n):
code1 = hash(self.g.es[i])
code2 = hash(self.g.es[i])
self.assertEqual(code1, code2)
data[self.g.es[i]] = i
for i in range(n):
self.assertEqual(i, data[self.g.es[i]])
def testRichCompare(self):
idxs = [2, 5, 9, 13, 42]
g2 = Graph.Full(10)
for i in idxs:
for j in idxs:
self.assertEqual(i == j, self.g.es[i] == self.g.es[j])
self.assertEqual(i != j, self.g.es[i] != self.g.es[j])
self.assertEqual(i < j, self.g.es[i] < self.g.es[j])
self.assertEqual(i > j, self.g.es[i] > self.g.es[j])
self.assertEqual(i <= j, self.g.es[i] <= self.g.es[j])
self.assertEqual(i >= j, self.g.es[i] >= self.g.es[j])
self.assertFalse(self.g.es[i] == g2.es[j])
self.assertFalse(self.g.es[i] != g2.es[j])
self.assertFalse(self.g.es[i] < g2.es[j])
self.assertFalse(self.g.es[i] > g2.es[j])
self.assertFalse(self.g.es[i] <= g2.es[j])
self.assertFalse(self.g.es[i] >= g2.es[j])
self.assertFalse(self.g.es[2] == self.g.vs[2])
def testRepr(self):
output = repr(self.g.es[0])
self.assertEqual(output, "igraph.Edge(%r, 0, {})" % self.g)
self.g.es["weight"] = list(range(10, 0, -1))
output = repr(self.g.es[3])
self.assertEqual(output, "igraph.Edge(%r, 3, {'weight': 7})" % self.g)
def testUpdateAttributes(self):
e = self.g.es[0]
e.update_attributes(a=2)
self.assertEqual(e["a"], 2)
e.update_attributes([("a", 3), ("b", 4)], c=5, d=6)
self.assertEqual(e.attributes(), dict(a=3, b=4, c=5, d=6))
e.update_attributes(dict(b=44, c=55))
self.assertEqual(e.attributes(), dict(a=3, b=44, c=55, d=6))
def testPhantomEdge(self):
e = self.g.es[self.g.ecount() - 1]
e.delete()
# v is now a phantom edge; try to freak igraph out now :)
self.assertRaises(ValueError, e.update_attributes, a=2)
self.assertRaises(ValueError, e.__getitem__, "a")
self.assertRaises(ValueError, e.__setitem__, "a", 4)
self.assertRaises(ValueError, e.__delitem__, "a")
self.assertRaises(ValueError, e.attributes)
self.assertRaises(ValueError, getattr, e, "source")
self.assertRaises(ValueError, getattr, e, "source_vertex")
self.assertRaises(ValueError, getattr, e, "target")
self.assertRaises(ValueError, getattr, e, "target_vertex")
self.assertRaises(ValueError, getattr, e, "tuple")
self.assertRaises(ValueError, getattr, e, "vertex_tuple")
@unittest.skipIf(is_pypy, "skipped on PyPy because we do not have access to docstrings")
def testProxyMethods(self):
g = Graph.GRG(10, 0.5)
e = g.es[0]
# - delete() is ignored because it mutates the graph
ignore = "delete"
ignore = set(ignore.split())
# Methods not listed here are expected to return an int or a float
return_types = {}
for name in Edge.__dict__:
if name in ignore:
continue
func = getattr(e, name)
docstr = func.__doc__
if not docstr.startswith("Proxy method"):
continue
result = func()
self.assertEqual(
getattr(g, name)(e.index),
result,
msg=("Edge.%s proxy method misbehaved" % name),
)
return_type = return_types.get(name, (int, float))
self.assertTrue(
isinstance(result, return_type),
msg=("Edge.%s proxy method did not return %s" % (name, return_type)),
)
class EdgeSeqTests(unittest.TestCase):
def assert_edges_unique_in(self, es):
pairs = sorted(e.tuple for e in es)
self.assertEqual(pairs, sorted(set(pairs)))
def setUp(self):
self.g = Graph.Full(10)
self.g.es["test"] = list(range(45))
def testCreation(self):
self.assertTrue(len(EdgeSeq(self.g)) == 45)
self.assertTrue(len(EdgeSeq(self.g, 2)) == 1)
self.assertTrue(len(EdgeSeq(self.g, [1, 2, 3])) == 3)
self.assertTrue(EdgeSeq(self.g, [1, 2, 3]).indices == [1, 2, 3])
self.assertRaises(ValueError, EdgeSeq, self.g, 112)
self.assertRaises(ValueError, EdgeSeq, self.g, [112])
self.assertTrue(self.g.es.graph == self.g)
def testIndexing(self):
n = self.g.ecount()
for i in range(n):
self.assertEqual(i, self.g.es[i].index)
self.assertEqual(n - i - 1, self.g.es[-i - 1].index)
self.assertRaises(IndexError, self.g.es.__getitem__, n)
self.assertRaises(IndexError, self.g.es.__getitem__, -n - 1)
self.assertRaises(TypeError, self.g.es.__getitem__, 1.5)
@unittest.skipIf(np is None, "test case depends on NumPy")
def testNumPyIndexing(self):
n = self.g.ecount()
for i in range(n):
arr = np.array([i])
self.assertEqual(i, self.g.es[arr[0]].index)
arr = np.array([n])
self.assertRaises(IndexError, self.g.es.__getitem__, arr[0])
arr = np.array([-n - 1])
self.assertRaises(IndexError, self.g.es.__getitem__, arr[0])
arr = np.array([1.5])
self.assertRaises(TypeError, self.g.es.__getitem__, arr[0])
ind = [1, 3, 5, 8, 3, 2]
arr = np.array(ind)
self.assertEqual(ind, [edge.index for edge in self.g.es[arr.tolist()]])
self.assertEqual(ind, [edge.index for edge in self.g.es[list(arr)]])
def testPartialAttributeAssignment(self):
only_even = self.g.es.select(lambda e: (e.index % 2 == 0))
only_even["test"] = [0] * len(only_even)
expected = [[0, i][i % 2] for i in range(self.g.ecount())]
self.assertTrue(self.g.es["test"] == expected)
only_even["test2"] = list(range(23))
expected = [[i // 2, None][i % 2] for i in range(self.g.ecount())]
self.assertTrue(self.g.es["test2"] == expected)
def testSequenceReusing(self):
if "test" in self.g.edge_attributes():
del self.g.es["test"]
self.g.es["test"] = ["A", "B", "C"]
self.assertTrue(self.g.es["test"] == ["A", "B", "C"] * 15)
self.g.es["test"] = "ABC"
self.assertTrue(self.g.es["test"] == ["ABC"] * 45)
only_even = self.g.es.select(lambda e: (e.index % 2 == 0))
only_even["test"] = ["D", "E"]
expected = ["D", "ABC", "E", "ABC"] * 12
expected = expected[0:45]
self.assertTrue(self.g.es["test"] == expected)
del self.g.es["test"]
only_even["test"] = ["D", "E"]
expected = ["D", None, "E", None] * 12
expected = expected[0:45]
self.assertTrue(self.g.es["test"] == expected)
def testAllSequence(self):
self.assertTrue(len(self.g.es) == 45)
self.assertTrue(self.g.es["test"] == list(range(45)))
def testEmptySequence(self):
empty_es = self.g.es.select(None)
self.assertTrue(len(empty_es) == 0)
self.assertRaises(IndexError, empty_es.__getitem__, 0)
self.assertRaises(KeyError, empty_es.__getitem__, "nonexistent")
self.assertTrue(empty_es["test"] == [])
empty_es = self.g.es[[]]
self.assertTrue(len(empty_es) == 0)
empty_es = self.g.es[()]
self.assertTrue(len(empty_es) == 0)
def testCallableFilteringFind(self):
edge = self.g.es.find(lambda e: (e.index % 2 == 1))
self.assertTrue(edge.index == 1)
self.assertRaises(IndexError, self.g.es.find, lambda e: (e.index % 2 == 3))
def testCallableFilteringSelect(self):
only_even = self.g.es.select(lambda e: (e.index % 2 == 0))
self.assertTrue(len(only_even) == 23)
self.assertRaises(KeyError, only_even.__getitem__, "nonexistent")
self.assertTrue(only_even["test"] == [i * 2 for i in range(23)])
def testChainedCallableFilteringSelect(self):
only_div_six = self.g.es.select(
lambda e: (e.index % 2 == 0), lambda e: (e.index % 3 == 0)
)
self.assertTrue(len(only_div_six) == 8)
self.assertTrue(only_div_six["test"] == [0, 6, 12, 18, 24, 30, 36, 42])
only_div_six = self.g.es.select(lambda e: (e.index % 2 == 0)).select(
lambda e: (e.index % 3 == 0)
)
self.assertTrue(len(only_div_six) == 8)
self.assertTrue(only_div_six["test"] == [0, 6, 12, 18, 24, 30, 36, 42])
def testIntegerFilteringFind(self):
self.assertEqual(self.g.es.find(3).index, 3)
self.assertEqual(self.g.es.select(2, 3, 4, 2).find(3).index, 2)
self.assertRaises(IndexError, self.g.es.find, 178)
def testIntegerFilteringSelect(self):
subset = self.g.es.select(2, 3, 4, 2)
self.assertTrue(len(subset) == 4)
self.assertTrue(subset["test"] == [2, 3, 4, 2])
self.assertRaises(TypeError, self.g.es.select, 2, 3, 4, 2, None)
subset = self.g.es[2, 3, 4, 2]
self.assertTrue(len(subset) == 4)
self.assertTrue(subset["test"] == [2, 3, 4, 2])
def testIterableFilteringSelect(self):
subset = self.g.es.select(list(range(5, 8)))
self.assertTrue(len(subset) == 3)
self.assertTrue(subset["test"] == [5, 6, 7])
def testSliceFilteringSelect(self):
subset = self.g.es.select(slice(5, 8))
self.assertTrue(len(subset) == 3)
self.assertTrue(subset["test"] == [5, 6, 7])
subset = self.g.es[40:56:2]
self.assertTrue(len(subset) == 3)
self.assertTrue(subset["test"] == [40, 42, 44])
def testKeywordFilteringSelect(self):
g = Graph.Barabasi(1000, 2)
g.es["betweenness"] = g.edge_betweenness()
g.es["parity"] = [i % 2 for i in range(g.ecount())]
self.assertTrue(len(g.es(betweenness_gt=10)) < 2000)
self.assertTrue(len(g.es(betweenness_gt=10, parity=0)) < 2000)
def testSourceTargetFiltering(self):
g = Graph.Barabasi(1000, 2, directed=True)
es1 = set(e.source for e in g.es.select(_target_in=[2, 4]))
es2 = set(v1 for v1, v2 in g.get_edgelist() if v2 in [2, 4])
self.assertTrue(es1 == es2)
def testWithinFiltering(self):
g = Graph.Lattice([10, 10])
vs = [0, 1, 2, 10, 11, 12, 20, 21, 22]
vs2 = (0, 1, 10, 11)
es1 = g.es.select(_within=vs)
es2 = g.es.select(_within=VertexSeq(g, vs))
for es in [es1, es2]:
self.assertTrue(len(es) == 12)
self.assertTrue(all(e.source in vs and e.target in vs for e in es))
self.assert_edges_unique_in(es)
es_filtered = es.select(_within=vs2)
self.assertTrue(len(es_filtered) == 4)
self.assertTrue(
all(e.source in vs2 and e.target in vs2 for e in es_filtered)
)
self.assert_edges_unique_in(es_filtered)
def testBetweenFiltering(self):
g = Graph.Lattice([10, 10])
vs1, vs2 = [10, 11, 12], [20, 21, 22]
es1 = g.es.select(_between=(vs1, vs2))
es2 = g.es.select(_between=(VertexSeq(g, vs1), VertexSeq(g, vs2)))
for es in [es1, es2]:
self.assertTrue(len(es) == 3)
self.assertTrue(
all(
(e.source in vs1 and e.target in vs2)
or (e.target in vs1 and e.source in vs2)
for e in es
)
)
self.assert_edges_unique_in(es)
def testIncidentFiltering(self):
g = Graph.Lattice([10, 10], circular=False)
vs = (0, 1, 10, 11)
vs2 = (11, 0, 24)
vs3 = sorted(set(vs).intersection(set(vs2)))
es = g.es.select(_incident=vs)
self.assertEqual(8, len(es))
self.assertTrue(all((e.source in vs or e.target in vs) for e in es))
self.assert_edges_unique_in(es)
es_filtered = es.select(_incident=vs2)
self.assertEqual(6, len(es_filtered))
self.assertTrue(all((e.source in vs3 or e.target in vs3) for e in es_filtered))
self.assert_edges_unique_in(es_filtered)
def testIncidentFilteringByNames(self):
g = Graph.Lattice([10, 10], circular=False)
vs = (0, 1, 10, 11)
g.vs[vs]["name"] = ["A", "B", "C", "D"]
vs2 = (11, 0, 24)
g.vs[24]["name"] = "X"
vs3 = sorted(set(vs).intersection(set(vs2)))
es = g.es.select(_incident=("A", "B", "C", "D"))
self.assertEqual(8, len(es))
self.assertTrue(all((e.source in vs or e.target in vs) for e in es))
self.assert_edges_unique_in(es)
es_filtered = es.select(_incident=("D", "A", "X"))
self.assertEqual(6, len(es_filtered))
self.assertTrue(all((e.source in vs3 or e.target in vs3) for e in es_filtered))
self.assert_edges_unique_in(es_filtered)
es_filtered = es_filtered.select(_from="A")
self.assertEqual(2, len(es_filtered))
self.assertTrue(all((e.source == 0 or e.target == 0) for e in es_filtered))
self.assert_edges_unique_in(es_filtered)
def testSourceAndTargetFilteringForUndirectedGraphs(self):
g = Graph.Lattice([10, 10], circular=False)
vs = (0, 1, 10, 11)
vs2 = (11, 0, 24)
vs3 = sorted(set(vs).intersection(set(vs2)))
es = g.es.select(_from=vs)
self.assertEqual(8, len(es))
self.assertTrue(all((e.source in vs or e.target in vs) for e in es))
self.assert_edges_unique_in(es)
es_filtered = es.select(_to_in=vs2)
self.assertEqual(6, len(es_filtered))
self.assertTrue(all((e.source in vs3 or e.target in vs3) for e in es_filtered))
self.assert_edges_unique_in(es_filtered)
es_filtered = es_filtered.select(_from_eq=0)
self.assertEqual(2, len(es_filtered))
self.assertTrue(all((e.source == 0 or e.target == 0) for e in es_filtered))
self.assert_edges_unique_in(es_filtered)
def testIndexOutOfBoundsSelect(self):
g = Graph.Full(3)
self.assertRaises(ValueError, g.es.select, 4)
self.assertRaises(ValueError, g.es.select, 4, 5)
self.assertRaises(ValueError, g.es.select, (4, 5))
self.assertRaises(ValueError, g.es.select, 2, -1)
self.assertRaises(ValueError, g.es.select, (2, -1))
self.assertRaises(ValueError, g.es.__getitem__, (0, 1000000))
def testIndexAndKeywordFilteringFind(self):
self.assertRaises(ValueError, self.g.es.find, 2, test=4)
self.assertTrue(self.g.es.find(2, test=2) == self.g.es[2])
def testGraphMethodProxying(self):
idxs = [1, 3, 5, 7, 9]
g = Graph.Barabasi(100)
es = g.es(*idxs)
ebs = g.edge_betweenness()
self.assertEqual([ebs[i] for i in idxs], es.edge_betweenness())
idxs = [1, 3]
g = Graph([(0, 1), (1, 2), (2, 0), (1, 0)], directed=True)
es = g.es(*idxs)
mutual = g.is_mutual(es)
self.assertEqual(mutual, es.is_mutual())
for e, m in zip(es, mutual):
self.assertEqual(e.is_mutual(), m)
def testIsAll(self):
g = Graph.Full(5)
self.assertTrue(g.es.is_all())
self.assertFalse(g.es.select(1, 2, 3).is_all())
self.assertFalse(g.es.select(_within=[1, 2, 3]).is_all())
def suite():
edge_suite = unittest.makeSuite(EdgeTests)
es_suite = unittest.makeSuite(EdgeSeqTests)
return unittest.TestSuite([edge_suite, es_suite])
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
| gpl-2.0 | -759,501,945,597,758,200 | 36.355814 | 92 | 0.560481 | false |
DailyActie/Surrogate-Model | surrogate/crossover/tests/test_cxUniform.py | 1 | 1730 | # MIT License
#
# Copyright (c) 2016 Daily Actie
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: Quan Pan <[email protected]>
# License: MIT License
# Create: 2016-12-02
import numpy as np
from surrogate.crossover import cxUniform
print '\nTest.crossover.cxUniform: cxUniform'
ind1 = np.array(range(0, 10))
ind2 = np.array(range(10, 20))
# ind2 = np.array(range(9,-1,-1))
print '\tInput: ind1_desVar=\t' + '\t'.join(map(str, ind1)) + ''
print '\tInput: ind2_desVar=\t' + '\t'.join(map(str, ind2)) + ''
[out1, out2] = cxUniform(var1=ind1.tolist(), var2=ind2.tolist())
print '\tOutput: out1_desVar=\t' + '\t'.join(map(str, out1)) + ''
print '\tOutput: out2_desVar=\t' + '\t'.join(map(str, out2)) + ''
| mit | -7,992,576,133,490,520,000 | 43.358974 | 80 | 0.731792 | false |
atomman/nmrglue | examples/jbnmr_examples/s7-s9_s3e_processing/convert.py | 4 | 1126 | import nmrglue as ng
# read in the sum data set
dic, data = ng.varian.read('.', fid_file='fid_sum', as_2d=True)
# set the spectral parameters
udic = ng.varian.guess_udic(dic, data)
udic[1]['size'] = 1500 ; udic[0]['size'] = 256
udic[1]['complex'] = True ; udic[0]['complex'] = True
udic[1]['encoding'] = 'direct' ; udic[0]['encoding'] = 'states'
udic[1]['sw'] = 50000.000 ; udic[0]['sw'] = 5000.0
udic[1]['obs'] = 125.690 ; udic[0]['obs'] = 50.648
udic[1]['car'] = 174.538 * 125.690; udic[0]['car'] = 119.727 * 50.648
udic[1]['label'] = 'C13' ; udic[0]['label'] = 'N15'
# convert to NMRPipe format
C = ng.convert.converter()
C.from_varian(dic, data, udic)
pdic, pdata = C.to_pipe()
# write out the NMRPipe file
ng.pipe.write("test_sum.fid", pdic, pdata, overwrite=True)
# repeat for the difference data set
dic, data = ng.varian.read('.', fid_file='fid_dif', as_2d=True)
C = ng.convert.converter()
C.from_varian(dic, data, udic)
pdic, pdata = C.to_pipe()
ng.pipe.write("test_dif.fid", pdic, pdata, overwrite=True)
| bsd-3-clause | 718,106,159,891,570,200 | 37.827586 | 79 | 0.581705 | false |
KFGisIT/gsa-bpa-django | app/settings.py | 1 | 2975 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
SECRET_KEY = '_*zjhswt9umayc3hl4(a3trs3fz+zgh9l@o^1(bo#%jl@t4jqu'
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
TEST_PROJECT_APPS = (
'app',
)
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Pipeline
'pipeline',
# Bower
'djangobower',
) + TEST_PROJECT_APPS
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'app.urls'
WSGI_APPLICATION = 'app.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
'djangobower.finders.BowerFinder',
)
# Pipeline settings
STATICFILES_STORAGE = 'pipeline.storage.PipelineCachedStorage'
PIPELINE_JS_COMPRESSOR = 'pipeline.compressors.uglifyjs.UglifyJSCompressor'
# Static
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
# Bower
BOWER_COMPONENTS_ROOT = os.path.join(BASE_DIR, 'app/static')
# Pipeline
PIPELINE_CSS = {
# Libraries
'libraries': {
'source_filenames': (
'bower_components/font-awesome/css/font-awesome.min.css',
'bower_components/bootstrap/dist/css/bootstrap.css',
'bower_components/select2/dist/css/select2.min.css',
),
'output_filename': 'css/libs.min.css',
},
# Base styles
'base': {
'source_filenames': (
'css/app.css',
),
'output_filename': 'css/main.min.css',
},
}
PIPELINE_JS = {
# Libraries
'libraries': {
'source_filenames': (
'bower_components/jquery/dist/jquery.js',
'bower_components/bootstrap/dist/js/bootstrap.min.js',
'bower_components/select2/dist/js/select2.min.js',
'bower_components/bootpag/lib/jquery.bootpag.js',
'js/pubsub.js',
'js/ajax-helpers.js',
'js/app.js',
),
'output_filename': 'js/libs.min.js',
}
}
PASSWORD_HASHERS = (
'django.contrib.auth.hashers.BCryptPasswordHasher',
)
AXES_LOGIN_FAILURE_LIMIT = 10
AXES_USE_USER_AGENT = True
AXES_COOLOFF_TIME = 1
AXES_LOCKOUT_TEMPLATE = '403.html'
| mit | 6,532,810,265,487,909,000 | 22.611111 | 75 | 0.647059 | false |
YannThorimbert/ThePhantomRacer | levelgen.py | 1 | 1323 | import random
import parameters
import track
import obstacle
class LevelGenerator:
def __init__(self, zfinish, nx, ny):
self.zfinish = zfinish
self.nx = nx
self.ny = ny
self.track = track.Track(zfinish,nx,ny)
parameters.scene.track = self.track
def add_static_obstacles(self, density, zmin, zmax, objects):
"""Density: average number of obstacles per 100 m"""
n = density * self.zfinish / 100.
done = set([])
i = 0
while i < n:
x = random.randint(0,self.nx-1)
y = random.randint(0,self.ny-1)
z = random.randint(zmin,zmax)
if (x,y,z) not in done:
done.add((x,y,z))
obj = random.choice(objects).get_copy()
damage = 1
obstacle.Obstacle(damage,x,y,z,obj)
i += 1
def random_gen(self, nparts, objects, min_density=0.1, max_density=1.8):
zpart = self.zfinish // nparts
for i in range(nparts):
density = random.random()*(max_density-min_density) + min_density
print("random gen", density)
if i == 0:
begin = 50
else:
begin = i*zpart
self.add_static_obstacles(density, begin, (i+1)*zpart, objects)
| mit | -7,094,742,456,078,290,000 | 30.5 | 77 | 0.530612 | false |
tbachman/group-based-policy | gbpservice/neutron/tests/unit/services/servicechain/test_servicechain_plugin.py | 1 | 11662 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import collections
from neutron import context as n_ctx
from oslo_config import cfg
from oslo_serialization import jsonutils
from gbpservice.neutron.services.servicechain.plugins.msc import context
from gbpservice.neutron.tests.unit.db.grouppolicy import (
test_servicechain_db as test_servicechain_db)
cfg.CONF.import_opt(
'servicechain_drivers',
'gbpservice.neutron.services.servicechain.plugins.msc.config',
group='servicechain')
SC_PLUGIN_KLASS = (
"gbpservice.neutron.services.servicechain.plugins.msc.plugin."
"ServiceChainPlugin")
class ServiceChainPluginTestCase(test_servicechain_db.ServiceChainDbTestCase):
def setUp(self, core_plugin=None, sc_plugin=None, gp_plugin=None):
if not sc_plugin:
sc_plugin = SC_PLUGIN_KLASS
super(ServiceChainPluginTestCase, self).setUp(core_plugin=core_plugin,
sc_plugin=sc_plugin,
gp_plugin=gp_plugin)
class TestGroupPolicyPluginGroupResources(
ServiceChainPluginTestCase,
test_servicechain_db.TestServiceChainResources):
def test_spec_shared(self):
# Shared spec can only point shared nodes
node = self._create_profiled_servicechain_node(
'LOADBALANCER', shared=True, shared_profile=True,
profile_tenant_id='admin', tenant_id='admin')['servicechain_node']
self.create_servicechain_spec(nodes=[node['id']], shared=True,
expected_res_status=201)
self.create_servicechain_spec(nodes=[node['id']], shared=False,
tenant_id='admin',
expected_res_status=201)
node = self._create_profiled_servicechain_node(
'LOADBALANCER', shared=False, profile_tenant_id='nonadmin',
tenant_id='nonadmin')['servicechain_node']
self.create_servicechain_spec(nodes=[node['id']], shared=True,
expected_res_status=404)
self.create_servicechain_spec(nodes=[node['id']], shared=True,
tenant_id='nonadmin',
expected_res_status=400)
self.create_servicechain_spec(nodes=[node['id']], shared=False,
tenant_id='nonadmin',
expected_res_status=201)
def test_node_shared(self):
# Shared node can only point shared profile
prof = self.create_service_profile(
service_type='LOADBALANCER', shared=True,
tenant_id='admin')['service_profile']
to_update = self.create_servicechain_node(
service_profile_id=prof['id'], shared=True,
expected_res_status=201)['servicechain_node']
self.create_servicechain_node(
service_profile_id=prof['id'], shared=False, tenant_id='admin',
expected_res_status=201)
prof = self.create_service_profile(
service_type='LOADBALANCER', shared=False,
tenant_id='admin')['service_profile']
self.create_servicechain_node(
service_profile_id=prof['id'], shared=True,
expected_res_status=404)
self.create_servicechain_node(
service_profile_id=prof['id'], shared=True,
tenant_id='admin', expected_res_status=400)
self.create_servicechain_node(
service_profile_id=prof['id'], shared=False,
tenant_id='admin', expected_res_status=201)
self.create_servicechain_spec(nodes=[to_update['id']], shared=True,
tenant_id='nonadmin',
expected_res_status=201)
data = {'servicechain_node': {'shared': False}}
req = self.new_update_request('servicechain_nodes', data,
to_update['id'])
res = req.get_response(self.ext_api)
self.assertEqual(400, res.status_int)
res = self.deserialize(self.fmt, res)
self.assertEqual('InvalidSharedAttributeUpdate',
res['NeutronError']['type'])
def test_profile_shared(self):
prof = self.create_service_profile(
service_type='LOADBALANCER', shared=True,
tenant_id='admin')['service_profile']
self.create_servicechain_node(
service_profile_id=prof['id'], shared=True,
expected_res_status=201)
data = {'service_profile': {'shared': False}}
req = self.new_update_request('service_profiles', data,
prof['id'])
res = req.get_response(self.ext_api)
self.assertEqual(400, res.status_int)
res = self.deserialize(self.fmt, res)
self.assertEqual('InvalidSharedAttributeUpdate',
res['NeutronError']['type'])
prof = self.create_service_profile(
service_type='LOADBALANCER', shared=False)['service_profile']
self.create_servicechain_node(
service_profile_id=prof['id'], shared=False,
expected_res_status=201)
data = {'service_profile': {'shared': True}}
req = self.new_update_request('service_profiles', data,
prof['id'])
res = req.get_response(self.ext_api)
self.assertEqual(200, res.status_int)
res = self.deserialize(self.fmt, res)
self.assertTrue(res['service_profile']['shared'])
def test_node_context_profile(self):
# Current node with profile
plugin_context = n_ctx.get_admin_context()
plugin_context.is_admin = plugin_context.is_advsvc = False
plugin_context.tenant_id = 'test-tenant'
prof = self.create_service_profile(
service_type='LOADBALANCER')['service_profile']
current = self.create_servicechain_node(
service_profile_id=prof['id'],
expected_res_status=201)['servicechain_node']
ctx = context.ServiceChainNodeContext(self.plugin, plugin_context,
current)
self.assertIsNone(ctx.original)
self.assertIsNone(ctx.original_profile)
self.assertEqual(ctx.current['id'], current['id'])
self.assertEqual(ctx.current_profile['id'], prof['id'])
# Original node with profile
prof2 = self.create_service_profile(
service_type='LOADBALANCER')['service_profile']
original = self.create_servicechain_node(
service_profile_id=prof2['id'],
expected_res_status=201)['servicechain_node']
ctx = context.ServiceChainNodeContext(self.plugin, plugin_context,
current, original)
self.assertEqual(ctx.original['id'], original['id'])
self.assertEqual(ctx.original_profile['id'], prof2['id'])
self.assertEqual(ctx.current['id'], current['id'])
self.assertEqual(ctx.current_profile['id'], prof['id'])
def test_node_context_no_profile(self):
plugin_context = n_ctx.get_admin_context()
plugin_context.is_admin = plugin_context.is_advsvc = False
plugin_context.tenant_id = 'test_tenant'
current = self.create_servicechain_node(
service_type='TEST',
expected_res_status=201)['servicechain_node']
ctx = context.ServiceChainNodeContext(self.plugin, plugin_context,
current)
self.assertIsNone(ctx.original)
self.assertIsNone(ctx.original_profile)
self.assertEqual(ctx.current['id'], current['id'])
self.assertIsNone(ctx.current_profile)
original = self.create_servicechain_node(
service_type='TEST',
expected_res_status=201)['servicechain_node']
ctx = context.ServiceChainNodeContext(self.plugin, plugin_context,
current, original)
self.assertEqual(ctx.original['id'], original['id'])
self.assertIsNone(ctx.original_profile)
self.assertEqual(ctx.current['id'], current['id'])
self.assertIsNone(ctx.current_profile)
def test_spec_parameters(self):
params_node_1 = ['p1', 'p2', 'p3']
params_node_2 = ['p4', 'p5', 'p6']
params_node_3 = ['p7', 'p8', 'p9']
def params_dict(params):
return jsonutils.dumps({'Parameters':
dict((x, {}) for x in params)})
prof = self.create_service_profile(
service_type='LOADBALANCER', shared=True,
tenant_id='admin')['service_profile']
# Create 2 nodes with different parameters
node1 = self.create_servicechain_node(
service_profile_id=prof['id'], shared=True,
config=params_dict(params_node_1),
expected_res_status=201)['servicechain_node']
node2 = self.create_servicechain_node(
service_profile_id=prof['id'], shared=True,
config=params_dict(params_node_2),
expected_res_status=201)['servicechain_node']
# Create SC spec with the nodes assigned
spec = self.create_servicechain_spec(
nodes=[node1['id'], node2['id']], shared=True,
expected_res_status=201)['servicechain_spec']
# Verify param names correspondence
self.assertEqual(
collections.Counter(params_node_1 + params_node_2),
collections.Counter(ast.literal_eval(spec['config_param_names'])))
# Update the spec removing one node
self.update_servicechain_spec(spec['id'], nodes=[node1['id']],
expected_res_status=200)
spec = self.show_servicechain_spec(spec['id'])['servicechain_spec']
# Verify param names correspondence
self.assertEqual(
collections.Counter(params_node_1),
collections.Counter(ast.literal_eval(spec['config_param_names'])))
# Update the spec without modifying the node list
self.update_servicechain_spec(spec['id'],
name='new_name',
expected_res_status=200)
spec = self.show_servicechain_spec(spec['id'])['servicechain_spec']
# Verify param names correspondence
self.assertEqual(
collections.Counter(params_node_1),
collections.Counter(ast.literal_eval(spec['config_param_names'])))
# Update a node with new config params
self.update_servicechain_node(node1['id'],
config=params_dict(params_node_3),
expected_res_status=200)
spec = self.show_servicechain_spec(spec['id'])['servicechain_spec']
# Verify param names correspondence
self.assertEqual(
collections.Counter(params_node_3),
collections.Counter(ast.literal_eval(spec['config_param_names'])))
| apache-2.0 | 7,955,415,843,362,950,000 | 42.842105 | 78 | 0.594409 | false |
boldprogressives/trac-GitolitePlugin | trac_gitolite/repo_manager.py | 1 | 2805 | import getpass
import pkg_resources
from trac.admin import IAdminPanelProvider
from trac.core import *
from trac.config import Option, BoolOption
from trac.perm import IPermissionRequestor
from trac.util.translation import _
from trac.web.chrome import ITemplateProvider
from trac.web.chrome import add_notice, add_warning
from trac_gitolite import utils
class GitoliteRepositoryManager(Component):
implements(IPermissionRequestor, IAdminPanelProvider, ITemplateProvider)
gitolite_admin_reponame = Option('trac-gitolite', 'admin_reponame',
default="gitolite-admin")
gitolite_admin_ssh_path = Option('trac-gitolite', 'admin_ssh_path',
default="git@localhost:gitolite-admin.git")
def read_config(self):
node = utils.get_repo_node(self.env, self.gitolite_admin_reponame,
"conf/gitolite.conf")
fp = node.get_content()
return utils.read_config(fp)
## IPermissionRequestor methods
def get_permission_actions(self):
return [('VERSIONCONTROL_ADMIN', ['REPOSITORY_CREATE']),
'REPOSITORY_CREATE']
## IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'REPOSITORY_CREATE' in req.perm:
yield ('versioncontrol', _('Version Control'), 'gitolite',
_('Gitolite Repositories'))
def render_admin_panel(self, req, category, page, path_info):
req.perm.require('REPOSITORY_CREATE')
if req.method == 'POST':
repo_name = req.args['name']
perms = self.read_config()
if repo_name in perms:
add_warning(req, _('A repository named %s already exists; maybe you just need to tell Trac about it using the Repositories panel?'))
req.redirect(req.href.admin(category, page))
perms[repo_name] = repo_perms = {}
trac_user = getpass.getuser()
for perm in ['R', 'W', '+']:
repo_perms[perm] = [trac_user]
utils.save_file(self.gitolite_admin_ssh_path, 'conf/gitolite.conf',
utils.to_string(perms),
_('Adding new repository %s' % repo_name))
add_notice(req, _('Repository "%s" has been created. Now you should give some users permissions on it using the Version Control Permissions panel.' % repo_name))
req.redirect(req.href.admin(category, page))
data = {'repos': sorted(self.read_config())}
return 'admin_repository_gitolite.html', data
# ITemplateProvider methods
def get_htdocs_dirs(self):
return []
def get_templates_dirs(self):
return [pkg_resources.resource_filename('trac_gitolite', 'templates')]
| bsd-3-clause | 6,899,371,258,026,229,000 | 39.652174 | 174 | 0.62139 | false |
DaveBackus/Data_Bootcamp | Code/Lab/SPF_forecasts.py | 1 | 1126 | """
Survey of Professional Forecasters
The Philly Fed has been polling forecasters for years and posting both
summary statistics (mean forecasts, for example) and individual numbers
(suitably anonymized). We take a look at the recent data, see what's there.
Link
* https://www.philadelphiafed.org/research-and-data/real-time-center/survey-of-professional-forecasters/
Prepared for Data Bootcamp course at NYU
* http://databootcamp.nyuecon.com/
* https://github.com/DaveBackus/Data_Bootcamp/Code/Lab
Written by Dave Backus and Chase Coleman, March 2016
Created with Python 3.5
"""
"""
import packages, check versions
"""
import sys
import pandas as pd
#import numpy as np
#import matplotlib.pyplot as plt
print('\nPython version: ', sys.version)
print('Pandas version: ', pd.__version__, '\n')
#%%
"""
read data
"""
url1 = 'https://www.philadelphiafed.org/-/media/research-and-data/'
url2 = 'real-time-center/survey-of-professional-forecasters/'
url3 = 'historical-data/micro5.xls'
url = url1 + url2 + url3
spf = pd.read_excel(url)
print('Dimensions:', spf.shape)
print('\nData types:\n', spf.dtypes, sep='')
#%%
#%%
| mit | 8,940,016,088,585,971,000 | 24.590909 | 104 | 0.737123 | false |
parksandwildlife/wastd | wastd/observations/migrations/0021_auto_20200622_1218.py | 1 | 1991 | # Generated by Django 2.2.10 on 2020-06-22 04:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('observations', '0020_auto_20200622_1045'),
]
operations = [
migrations.AddField(
model_name='encounter',
name='location_accuracy_m',
field=models.FloatField(blank=True, help_text='The accuracy of the supplied location in metres, if given.', null=True, verbose_name='Location accuracy (m)'),
),
migrations.AlterField(
model_name='encounter',
name='location_accuracy',
field=models.CharField(choices=[('10', 'GPS reading at exact location (10 m)'), ('1000', 'Site centroid or place name (1 km)'), ('10000', 'Rough estimate (10 km)')], default='1000', help_text='The source of the supplied location implies a rough location accuracy.', max_length=300, verbose_name='Location accuracy class (m)'),
),
migrations.AlterField(
model_name='turtlehatchlingemergenceobservation',
name='light_sources_present',
field=models.CharField(choices=[('na', 'NA'), ('absent', 'Confirmed absent'), ('present', 'Confirmed present')], default='na', help_text='', max_length=300, verbose_name='Light sources present during emergence'),
),
migrations.AlterField(
model_name='turtlehatchlingemergenceobservation',
name='outlier_tracks_present',
field=models.CharField(choices=[('na', 'NA'), ('absent', 'Confirmed absent'), ('present', 'Confirmed present')], default='na', help_text='', max_length=300, verbose_name='Outlier tracks present'),
),
migrations.AlterField(
model_name='turtlehatchlingemergenceoutlierobservation',
name='outlier_group_size',
field=models.PositiveIntegerField(blank=True, help_text='', null=True, verbose_name='Number of tracks in outlier group'),
),
]
| mit | -4,406,606,444,078,129,700 | 51.394737 | 338 | 0.637368 | false |
maartenbreddels/vaex | packages/vaex-jupyter/vaex/jupyter/ipyleaflet.py | 1 | 1541 | import ipyleaflet as ll
import traitlets
import ipywidgets as widgets
import vaex.image
class IpyleafletImage(traitlets.HasTraits):
x_min = traitlets.CFloat()
x_max = traitlets.CFloat()
y_min = traitlets.CFloat(None, allow_none=True)
y_max = traitlets.CFloat(None, allow_none=True)
x_label = traitlets.Unicode()
y_label = traitlets.Unicode()
tool = traitlets.Unicode(None, allow_none=True)
def __init__(self, output, presenter, map=None, zoom=12, **kwargs):
super().__init__(**kwargs)
self.output = output
self.presenter = presenter
self.map = map
self._zoom = zoom
self.last_image_layer = None
center = self.x_min + (self.x_max - self.x_min) / 2, self.y_min + (self.y_max - self.y_min) / 2
center = center[1], center[0]
self.map = ll.Map(center=center, zoom=self._zoom)
widgets.dlink((self.map, 'west'), (self, 'x_min'))
widgets.dlink((self.map, 'east'), (self, 'x_max'))
widgets.dlink((self.map, 'north'), (self, 'y_min'))
widgets.dlink((self.map, 'south'), (self, 'y_max'))
self.widget = self.map
def set_rgb_image(self, rgb_image):
with self.output:
if self.last_image_layer:
self.map.remove_layer(self.last_image_layer)
url = vaex.image.rgba_to_url(rgb_image[::-1, ::].copy())
image = ll.ImageOverlay(url=url, bounds=list(self.map.bounds))
self.map.add_layer(image)
self.last_image_layer = image
| mit | 3,515,014,371,890,492,400 | 34.837209 | 103 | 0.598313 | false |
jokajak/itweb | data/env/lib/python2.6/site-packages/transaction-1.1.1-py2.6.egg/transaction/tests/test_register_compat.py | 1 | 4007 | ##############################################################################
#
# Copyright (c) 2004 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test backwards compatibility for resource managers using register().
The transaction package supports several different APIs for resource
managers. The original ZODB3 API was implemented by ZODB.Connection.
The Connection passed persistent objects to a Transaction's register()
method. It's possible that third-party code also used this API, hence
these tests that the code that adapts the old interface to the current
API works.
These tests use a TestConnection object that implements the old API.
They check that the right methods are called and in roughly the right
order.
Common cases
------------
First, check that a basic transaction commit works.
>>> cn = TestConnection()
>>> cn.register(Object())
>>> cn.register(Object())
>>> cn.register(Object())
>>> transaction.commit()
>>> len(cn.committed)
3
>>> len(cn.aborted)
0
>>> cn.calls
['begin', 'vote', 'finish']
Second, check that a basic transaction abort works. If the
application calls abort(), then the transaction never gets into the
two-phase commit. It just aborts each object.
>>> cn = TestConnection()
>>> cn.register(Object())
>>> cn.register(Object())
>>> cn.register(Object())
>>> transaction.abort()
>>> len(cn.committed)
0
>>> len(cn.aborted)
3
>>> cn.calls
[]
Error handling
--------------
The tricky part of the implementation is recovering from an error that
occurs during the two-phase commit. We override the commit() and
abort() methods of Object to cause errors during commit.
Note that the implementation uses lists internally, so that objects
are committed in the order they are registered. (In the presence of
multiple resource managers, objects from a single resource manager are
committed in order. I'm not sure if this is an accident of the
implementation or a feature that should be supported by any
implementation.)
The order of resource managers depends on sortKey().
>>> cn = TestConnection()
>>> cn.register(Object())
>>> cn.register(CommitError())
>>> cn.register(Object())
>>> transaction.commit()
Traceback (most recent call last):
...
RuntimeError: commit
>>> len(cn.committed)
1
>>> len(cn.aborted)
3
Clean up:
>>> transaction.abort()
"""
import doctest
import transaction
class Object(object):
def commit(self):
pass
def abort(self):
pass
class CommitError(Object):
def commit(self):
raise RuntimeError("commit")
class AbortError(Object):
def abort(self):
raise RuntimeError("abort")
class BothError(CommitError, AbortError):
pass
class TestConnection:
def __init__(self):
self.committed = []
self.aborted = []
self.calls = []
def register(self, obj):
obj._p_jar = self
transaction.get().register(obj)
def sortKey(self):
return str(id(self))
def tpc_begin(self, txn):
self.calls.append("begin")
def tpc_vote(self, txn):
self.calls.append("vote")
def tpc_finish(self, txn):
self.calls.append("finish")
def tpc_abort(self, txn):
self.calls.append("abort")
def commit(self, obj, txn):
obj.commit()
self.committed.append(obj)
def abort(self, obj, txn):
obj.abort()
self.aborted.append(obj)
def test_suite():
return doctest.DocTestSuite()
# additional_tests is for setuptools "setup.py test" support
additional_tests = test_suite
| gpl-3.0 | 2,778,489,074,686,703,000 | 24.685897 | 78 | 0.670826 | false |
lvapeab/nmt-keras | examples/configs/config_transformer.py | 1 | 22866 |
def load_parameters():
"""
Loads the defined hyperparameters
:return parameters: Dictionary of loaded parameters
"""
# Input data params
TASK_NAME = 'my_task' # Task name
DATASET_NAME = TASK_NAME # Dataset name
SRC_LAN = 'fr' # Language of the source text
TRG_LAN = 'en' # Language of the target text
DATA_ROOT_PATH = '/DATA/%s/%s/joint_bpe/' % (TASK_NAME, SRC_LAN+TRG_LAN) # Path where data is stored
# SRC_LAN or TRG_LAN will be added to the file names
TEXT_FILES = {'train': 'training.', # Data files
'val': 'dev.',
'test': 'test.'}
# Dataset class parameters
INPUTS_IDS_DATASET = ['source_text', 'state_below'] # Corresponding inputs of the dataset
OUTPUTS_IDS_DATASET = ['target_text'] # Corresponding outputs of the dataset
INPUTS_IDS_MODEL = ['source_text', 'state_below'] # Corresponding inputs of the built model
OUTPUTS_IDS_MODEL = ['target_text'] # Corresponding outputs of the built model
# Evaluation params
METRICS = ['coco'] # Metric used for evaluating the model
EVAL_ON_SETS = ['val'] # Possible values: 'train', 'val' and 'test' (external evaluator)
EVAL_ON_SETS_KERAS = [] # Possible values: 'train', 'val' and 'test' (Keras' evaluator). Untested.
START_EVAL_ON_EPOCH = 1 # First epoch to start the model evaluation
EVAL_EACH_EPOCHS = False # Select whether evaluate between N epochs or N updates
EVAL_EACH = 3750 # Sets the evaluation frequency (epochs or updates)
# Search parameters
SAMPLING = 'max_likelihood' # Possible values: multinomial or max_likelihood (recommended)
TEMPERATURE = 1 # Multinomial sampling parameter
BEAM_SEARCH = True # Switches on-off the beam search procedure
BEAM_SIZE = 6 # Beam size (in case of BEAM_SEARCH == True)
OPTIMIZED_SEARCH = True # Compute annotations only a single time per sample
SEARCH_PRUNING = False # Apply pruning strategies to the beam search method.
# It will likely increase decoding speed, but decrease quality.
MAXLEN_GIVEN_X = True # Generate translations of similar length to the source sentences
MAXLEN_GIVEN_X_FACTOR = 1.7 # The hypotheses will have (as maximum) the number of words of the
# source sentence * LENGTH_Y_GIVEN_X_FACTOR
MINLEN_GIVEN_X = True # Generate translations of similar length to the source sentences
MINLEN_GIVEN_X_FACTOR = 2 # The hypotheses will have (as minimum) the number of words of the
# source sentence / LENGTH_Y_GIVEN_X_FACTOR
# Apply length and coverage decoding normalizations.
# See Section 7 from Wu et al. (2016) (https://arxiv.org/abs/1609.08144)
LENGTH_PENALTY = False # Apply length penalty
LENGTH_NORM_FACTOR = 0.2 # Length penalty factor
COVERAGE_PENALTY = False # Apply source coverage penalty
COVERAGE_NORM_FACTOR = 0.2 # Coverage penalty factor
# Alternative (simple) length normalization.
NORMALIZE_SAMPLING = False # Normalize hypotheses scores according to their length:
ALPHA_FACTOR = .6 # Normalization according to |h|**ALPHA_FACTOR
# Sampling params: Show some samples during training
SAMPLE_ON_SETS = ['train', 'val'] # Possible values: 'train', 'val' and 'test'
N_SAMPLES = 5 # Number of samples generated
START_SAMPLING_ON_EPOCH = 2 # First epoch where to start the sampling counter
SAMPLE_EACH_UPDATES = 10000 # Sampling frequency (always in #updates)
# Unknown words treatment
POS_UNK = True # Enable POS_UNK strategy for unknown words
HEURISTIC = 0 # Heuristic to follow:
# 0: Replace the UNK by the correspondingly aligned source
# 1: Replace the UNK by the translation (given by an external
# dictionary) of the correspondingly aligned source
# 2: Replace the UNK by the translation (given by an external
# dictionary) of the correspondingly aligned source only if it
# starts with a lowercase. Otherwise, copies the source word.
ALIGN_FROM_RAW = True # Align using the full vocabulary or the short_list
MAPPING = DATA_ROOT_PATH + '/mapping.%s_%s.pkl' % (SRC_LAN, TRG_LAN) # Source -- Target pkl mapping (used for heuristics 1--2)
# Word representation params
TOKENIZATION_METHOD = 'tokenize_none' # Select which tokenization we'll apply.
# See Dataset class (from stager_keras_wrapper) for more info.
BPE_CODES_PATH = DATA_ROOT_PATH + '/training_codes.joint' # If TOKENIZATION_METHOD = 'tokenize_bpe',
# sets the path to the learned BPE codes.
DETOKENIZATION_METHOD = 'detokenize_bpe' # Select which de-tokenization method we'll apply
APPLY_DETOKENIZATION = True # Wheter we apply a detokenization method
TOKENIZE_HYPOTHESES = True # Whether we tokenize the hypotheses using the
# previously defined tokenization method
TOKENIZE_REFERENCES = True # Whether we tokenize the references using the
# previously defined tokenization method
# Input image parameters
DATA_AUGMENTATION = False # Apply data augmentation on input data (still unimplemented for text inputs)
# Text parameters
FILL = 'end' # Whether we pad the 'end' or the 'start' of the sentence with 0s
PAD_ON_BATCH = True # Whether we take as many timesteps as the longest sequence of
# the batch or a fixed size (MAX_OUTPUT_TEXT_LEN)
# Input text parameters
INPUT_VOCABULARY_SIZE = 0 # Size of the input vocabulary. Set to 0 for using all,
# otherwise it will be truncated to these most frequent words.
MIN_OCCURRENCES_INPUT_VOCAB = 0 # Minimum number of occurrences allowed for the words in the input vocabulary.
# Set to 0 for using them all.
MAX_INPUT_TEXT_LEN = 70 # Maximum length of the input sequence
# Output text parameters
OUTPUT_VOCABULARY_SIZE = 0 # Size of the input vocabulary. Set to 0 for using all,
# otherwise it will be truncated to these most frequent words.
MIN_OCCURRENCES_OUTPUT_VOCAB = 0 # Minimum number of occurrences allowed for the words in the output vocabulary.
MAX_OUTPUT_TEXT_LEN = 70 # Maximum length of the output sequence
# set to 0 if we want to use the whole answer as a single class
MAX_OUTPUT_TEXT_LEN_TEST = MAX_OUTPUT_TEXT_LEN * 3 # Maximum length of the output sequence during test time
# Optimizer parameters (see model.compile() function).
LOSS = 'categorical_crossentropy'
CLASSIFIER_ACTIVATION = 'softmax'
SAMPLE_WEIGHTS = True # Select whether we use a weights matrix (mask) for the data outputs
LABEL_SMOOTHING = 0.1 # Epsilon value for label smoothing. Only valid for 'categorical_crossentropy' loss. See arxiv.org/abs/1512.00567.
OPTIMIZER = 'Adam' # Optimizer. Supported optimizers: SGD, RMSprop, Adagrad, Adadelta, Adam, Adamax, Nadam.
LR = 0.004 # Learning rate. Recommended values - Adam 0.0002 - Adadelta 1.0.
CLIP_C = 5. # During training, clip L2 norm of gradients to this value (0. means deactivated).
CLIP_V = 0. # During training, clip absolute value of gradients to this value (0. means deactivated).
USE_TF_OPTIMIZER = True # Use native Tensorflow's optimizer (only for the Tensorflow backend).
# Advanced parameters for optimizers. Default values are usually effective.
MOMENTUM = 0. # Momentum value (for SGD optimizer).
NESTEROV_MOMENTUM = False # Use Nesterov momentum (for SGD optimizer).
RHO = 0.9 # Rho value (for Adadelta and RMSprop optimizers).
BETA_1 = 0.9 # Beta 1 value (for Adam, Adamax Nadam optimizers).
BETA_2 = 0.999 # Beta 2 value (for Adam, Adamax Nadam optimizers).
AMSGRAD = False # Whether to apply the AMSGrad variant of Adam (see https://openreview.net/pdf?id=ryQu7f-RZ).
EPSILON = 1e-7 # Optimizers epsilon value.
# Learning rate annealing
LR_DECAY = 1 # Frequency (number of epochs or updates) between LR annealings. Set to None for not decay the learning rate
LR_GAMMA = 1 # Multiplier used for decreasing the LR
LR_REDUCE_EACH_EPOCHS = False # Reduce each LR_DECAY number of epochs or updates
LR_START_REDUCTION_ON_EPOCH = 0 # Epoch to start the reduction
LR_REDUCER_TYPE = 'noam' # Function to reduce. 'linear' and 'exponential' implemented.
# Linear reduction: new_lr = lr * LR_GAMMA
# Exponential reduction: new_lr = lr * LR_REDUCER_EXP_BASE ** (current_nb / LR_HALF_LIFE) * LR_GAMMA
# Noam reduction: new_lr = lr * min(current_nb ** LR_REDUCER_EXP_BASE, current_nb * LR_HALF_LIFE ** WARMUP_EXP)
LR_REDUCER_EXP_BASE = -0.5 # Base for the exponential decay.
LR_HALF_LIFE = 4000 # Factor/warmup steps for exponenital/noam decay.
WARMUP_EXP = -1.5 # Warmup steps for noam decay.
# Training parameters
MAX_EPOCH = 500 # Stop when computed this number of epochs.
BATCH_SIZE = 50 # Size of each minibatch.
N_GPUS = 1 # Number of GPUs to use. Only for Tensorflow backend. Each GPU will receive mini-batches of BATCH_SIZE / N_GPUS.
HOMOGENEOUS_BATCHES = False # Use batches with homogeneous output lengths (Dangerous!!).
JOINT_BATCHES = 4 # When using homogeneous batches, get this number of batches to sort.
PARALLEL_LOADERS = 1 # Parallel data batch loaders. Somewhat untested if > 1.
EPOCHS_FOR_SAVE = 1 # Number of epochs between model saves.
WRITE_VALID_SAMPLES = True # Write valid samples in file.
SAVE_EACH_EVALUATION = True # Save each time we evaluate the model.
# Early stop parameters
EARLY_STOP = True # Turns on/off the early stop protocol.
PATIENCE = 10 # We'll stop if the val STOP_METRIC does not improve after this.
# number of evaluations.
STOP_METRIC = 'Bleu_4' # Metric for the stop.
# Model parameters
MODEL_TYPE = 'Transformer' # Model to train. See model_zoo.py for more info.
# Supported architectures: 'AttentionRNNEncoderDecoder' and 'Transformer'.
# Hyperparameters common to all models
# # # # # # # # # # # # # # # # # # # # # # # #
TRAINABLE_ENCODER = True # Whether the encoder's weights should be modified during training.
TRAINABLE_DECODER = True # Whether the decoder's weights should be modified during training.
# Initializers (see keras/initializations.py).
INIT_FUNCTION = 'glorot_uniform' # General initialization function for matrices.
INNER_INIT = 'orthogonal' # Initialization function for inner RNN matrices.
INIT_ATT = 'glorot_uniform' # Initialization function for attention mechism matrices
SOURCE_TEXT_EMBEDDING_SIZE = 512 # Source language word embedding size.
SRC_PRETRAINED_VECTORS = None # Path to pretrained vectors (e.g.: DATA_ROOT_PATH + '/DATA/word2vec.%s.npy' % SRC_LAN).
# Set to None if you don't want to use pretrained vectors.
# When using pretrained word embeddings. this parameter must match with the word embeddings size
SRC_PRETRAINED_VECTORS_TRAINABLE = True # Finetune or not the target word embedding vectors.
TARGET_TEXT_EMBEDDING_SIZE = 512 # Source language word embedding size.
TRG_PRETRAINED_VECTORS = None # Path to pretrained vectors. (e.g. DATA_ROOT_PATH + '/DATA/word2vec.%s.npy' % TRG_LAN)
# Set to None if you don't want to use pretrained vectors.
# When using pretrained word embeddings, the size of the pretrained word embeddings must match with the word embeddings size.
TRG_PRETRAINED_VECTORS_TRAINABLE = True # Finetune or not the target word embedding vectors.
SCALE_SOURCE_WORD_EMBEDDINGS = True # Scale source word embeddings by Sqrt(SOURCE_TEXT_EMBEDDING_SIZE)
SCALE_TARGET_WORD_EMBEDDINGS = True # Scale target word embeddings by Sqrt(TARGET_TEXT_EMBEDDING_SIZE)
N_LAYERS_ENCODER = 6 # Stack this number of encoding layers.
N_LAYERS_DECODER = 6 # Stack this number of decoding layers.
# Additional Fully-Connected layers applied before softmax.
# Here we should specify the activation function and the output dimension.
# (e.g DEEP_OUTPUT_LAYERS = [('tanh', 600), ('relu', 400), ('relu', 200)])
DEEP_OUTPUT_LAYERS = [('linear', TARGET_TEXT_EMBEDDING_SIZE)]
# # # # # # # # # # # # # # # # # # # # # # # #
# AttentionRNNEncoderDecoder model hyperparameters
# # # # # # # # # # # # # # # # # # # # # # # #
ENCODER_RNN_TYPE = 'LSTM' # Encoder's RNN unit type ('LSTM' and 'GRU' supported).
USE_CUDNN = True # Use CuDNN's implementation of GRU and LSTM (only for Tensorflow backend).
DECODER_RNN_TYPE = 'ConditionalLSTM' # Decoder's RNN unit type.
# ('LSTM', 'GRU', 'ConditionalLSTM' and 'ConditionalGRU' supported).
ATTENTION_MODE = 'add' # Attention mode. 'add' (Bahdanau-style) or 'dot' (Luong-style).
# Encoder configuration
ENCODER_HIDDEN_SIZE = 512 # For models with RNN encoder.
BIDIRECTIONAL_ENCODER = True # Use bidirectional encoder.
BIDIRECTIONAL_DEEP_ENCODER = True # Use bidirectional encoder in all encoding layers.
BIDIRECTIONAL_MERGE_MODE = 'concat' # Merge function for bidirectional layers.
# Fully-Connected layers for initializing the first decoder RNN state.
# Here we should only specify the activation function of each layer (as they have a potentially fixed size)
# (e.g INIT_LAYERS = ['tanh', 'relu'])
INIT_LAYERS = ['tanh']
# Decoder configuration
DECODER_HIDDEN_SIZE = 512 # For models with RNN decoder.
ATTENTION_SIZE = DECODER_HIDDEN_SIZE
# Skip connections parameters
SKIP_VECTORS_HIDDEN_SIZE = TARGET_TEXT_EMBEDDING_SIZE # Hidden size.
ADDITIONAL_OUTPUT_MERGE_MODE = 'Add' # Merge mode for the skip-connections (see keras.layers.merge.py).
SKIP_VECTORS_SHARED_ACTIVATION = 'tanh' # Activation for the skip vectors.
# # # # # # # # # # # # # # # # # # # # # # # #
# Transformer model hyperparameters
# # # # # # # # # # # # # # # # # # # # # # # #
MODEL_SIZE = 512 # Transformer model size (d_{model} in de paper).
MULTIHEAD_ATTENTION_ACTIVATION = 'linear' # Activation the input projections in the Multi-Head Attention blocks.
FF_SIZE = MODEL_SIZE * 4 # Size of the feed-forward layers of the Transformer model.
N_HEADS = 8 # Number of parallel attention layers of the Transformer model.
# # # # # # # # # # # # # # # # # # # # # # # #
# Regularizers
REGULARIZATION_FN = 'L2' # Regularization function. 'L1', 'L2' and 'L1_L2' supported.
WEIGHT_DECAY = 1e-4 # L2 regularization
RECURRENT_WEIGHT_DECAY = 0. # L2 regularization in recurrent layers
DROPOUT_P = 0.1 # Percentage of units to drop (0 means no dropout).
RECURRENT_INPUT_DROPOUT_P = 0. # Percentage of units to drop in input cells of recurrent layers.
RECURRENT_DROPOUT_P = 0. # Percentage of units to drop in recurrent layers.
ATTENTION_DROPOUT_P = 0.1 # Percentage of units to drop in attention layers (0 means no dropout).
USE_NOISE = True # Use gaussian noise during training
NOISE_AMOUNT = 0.01 # Amount of noise
USE_BATCH_NORMALIZATION = True # If True it is recommended to deactivate Dropout
BATCH_NORMALIZATION_MODE = 1 # See documentation in Keras' BN
USE_PRELU = False # use PReLU activations as regularizer.
USE_L1 = False # L1 normalization on the features.
USE_L2 = False # L2 normalization on the features.
DOUBLE_STOCHASTIC_ATTENTION_REG = 0.0 # Doubly stochastic attention (Eq. 14 from arXiv:1502.03044)
# Results plot and models storing parameters.
EXTRA_NAME = '' # This will be appended to the end of the model name.
if MODEL_TYPE == 'AttentionRNNEncoderDecoder':
MODEL_NAME = TASK_NAME + '_' + SRC_LAN + TRG_LAN + '_' + MODEL_TYPE + \
'_src_emb_' + str(SOURCE_TEXT_EMBEDDING_SIZE) + \
'_bidir_' + str(BIDIRECTIONAL_ENCODER) + \
'_enc_' + ENCODER_RNN_TYPE + '_' + str(ENCODER_HIDDEN_SIZE) + \
'_dec_' + DECODER_RNN_TYPE + '_' + str(DECODER_HIDDEN_SIZE) + \
'_deepout_' + '_'.join([layer[0] for layer in DEEP_OUTPUT_LAYERS]) + \
'_trg_emb_' + str(TARGET_TEXT_EMBEDDING_SIZE) + \
'_' + OPTIMIZER + '_' + str(LR)
elif MODEL_TYPE == 'Transformer':
MODEL_NAME = TASK_NAME + '_' + SRC_LAN + TRG_LAN + '_' + MODEL_TYPE + \
'_model_size_' + str(MODEL_SIZE) + \
'_ff_size_' + str(FF_SIZE) + \
'_num_heads_' + str(N_HEADS) + \
'_encoder_blocks_' + str(N_LAYERS_ENCODER) + \
'_decoder_blocks_' + str(N_LAYERS_DECODER) + \
'_deepout_' + '_'.join([layer[0] for layer in DEEP_OUTPUT_LAYERS]) + \
'_' + OPTIMIZER + '_' + str(LR)
else:
MODEL_NAME = TASK_NAME + '_' + SRC_LAN + TRG_LAN + '_' +\
MODEL_TYPE + '_' + OPTIMIZER + '_' + str(LR)
MODEL_NAME += EXTRA_NAME
STORE_PATH = 'trained_models/' + MODEL_NAME + '/' # Models and evaluation results will be stored here.
DATASET_STORE_PATH = 'datasets/' # Dataset instance will be stored here.
# Tensorboard configuration. Only if the backend is Tensorflow. Otherwise, it will be ignored.
TENSORBOARD = True # Switches On/Off the tensorboard callback.
LOG_DIR = 'tensorboard_logs' # Directory to store teh model. Will be created inside STORE_PATH.
EMBEDDINGS_FREQ = 1 # Frequency (in epochs) at which selected embedding layers will be saved.
SAMPLING_SAVE_MODE = 'list' # 'list': Store in a text file, one sentence per line.
VERBOSE = 1 # Verbosity level.
RELOAD = 0 # If 0 start training from scratch, otherwise the model.
# Saved on epoch 'RELOAD' will be used.
RELOAD_EPOCH = True # Select whether we reload epoch or update number.
REBUILD_DATASET = True # Build again or use stored instance.
MODE = 'training' # 'training' or 'sampling' (if 'sampling' then RELOAD must
# be greater than 0 and EVAL_ON_SETS will be used).
# Extra parameters for special trainings. In most cases, they should be set to `False`
TRAIN_ON_TRAINVAL = False # train the model on both training and validation sets combined.
FORCE_RELOAD_VOCABULARY = False # force building a new vocabulary from the training samples
# applicable if RELOAD > 1
# ================================================ #
parameters = locals().copy()
return parameters
| mit | 2,040,581,547,170,478,800 | 71.360759 | 175 | 0.539578 | false |
intuition-io/insights | insights/plugins/hipchat.py | 1 | 2386 | # -*- coding: utf-8 -*-
# vim:fenc=utf-8
'''
hipchat Bot
-----------
:copyright (c) 2014 Xavier Bruhiere.
:license: Apache 2.0, see LICENSE for more details.
'''
import os
import requests
import dna.logging
log = dna.logging.logger(__name__)
class Bot(object):
'''
Hipchat api client that sends notifications to a specified room
Doc: https://www.hipchat.com/docs/api
'''
api_key = os.environ.get('HIPCHAT_API')
api_url = 'https://api.hipchat.com/v1'
name = 'Intuition Bot'
bg_color = 'green'
intro = 'Hey guys, I detected an opportunity'
def __init__(self, room_id, name=None, api_key=None):
self.room_id = room_id
if api_key:
self.api_key = api_key
if name:
self.name = name
def _test_token(self):
''' TODO '''
pass
def _api_call(self, path, data={}, http_method=requests.get):
''' Process an http call against the hipchat api '''
log.info('performing api request', path=path)
response = http_method('/'.join([self.api_url, path]),
params={'auth_token': self.api_key},
data=data)
log.debug('{} remaining calls'.format(
response.headers['x-ratelimit-remaining']))
return response.json()
def message(self, body, room_id, style='text'):
''' Send a message to the given room '''
# TODO Automatically detect body format ?
path = 'rooms/message'
data = {
'room_id': room_id,
'message': body,
'from': self.name,
'notify': 1,
'message_format': style,
'color': self.bg_color
}
log.info('sending message to hipchat', message=body, room=room_id)
feedback = self._api_call(path, data, requests.post)
log.debug(feedback)
return feedback
def notify(self, datetime, orderbook):
# TODO Same flood security as mobile
if orderbook:
body = '<strong>{} - {}</strong><ul><li>{}</li></ul>'.format(
datetime,
self.intro,
'</li><li>'.join(
['{}: {}'.format(sid, quantity)
for sid, quantity in orderbook.iteritems()])
)
self.message(body, self.room_id, style='html')
| apache-2.0 | -3,800,319,069,206,980,000 | 28.825 | 74 | 0.532272 | false |
Valeureux/wezer-exchange | __unreviewed__/community_project/community_project.py | 1 | 2428 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Buron and Valeureux Copyright Valeureux.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm
class GroupsView(orm.Model):
"""
Add group in user simplified form
"""
_inherit = 'res.groups'
def get_simplified_groups_by_application(self, cr, uid, context=None):
""" return all groups classified by application (module category),
as a list of pairs: [(app, kind, [group, ...]), ...],
where app and group are browse records, and kind is either 'boolean'
or 'selection'. Applications are given in sequence order. If kind is
'selection', the groups are given in reverse implication order.
"""
model = self.pool.get('ir.model.data')
res = super(GroupsView, self).get_simplified_groups_by_application(
cr, uid, context=context
)
# We need to catch the exception for the community module installation,
# the records are not created at this point
try:
category = model.get_object(
cr, uid, 'base', 'module_category_project_management'
)
group_project_user = model.get_object(
cr, uid, 'project', 'group_project_user'
)
group_project_manager = model.get_object(
cr, uid, 'project', 'group_project_manager'
)
res.append((
category, 'selection',
[group_project_user, group_project_manager]
))
except ValueError:
pass
return res
| agpl-3.0 | 9,197,470,633,125,033,000 | 36.353846 | 79 | 0.583196 | false |
tensorflow/federated | tensorflow_federated/experimental/python/learning/jax_components.py | 1 | 5080 | # Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experimental federated learning components for JAX."""
import collections
import jax
import numpy as np
from tensorflow_federated.experimental.python.core.api import computations as experimental_computations
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import placements
from tensorflow_federated.python.core.impl.types import type_conversions
from tensorflow_federated.python.core.templates import iterative_process
# TODO(b/175888145): Evolve this to reach parity with TensorFlow-specific helper
# and eventually unify the two.
def build_jax_federated_averaging_process(batch_type, model_type, loss_fn,
step_size):
"""Constructs an iterative process that implements simple federated averaging.
Args:
batch_type: An instance of `tff.Type` that represents the type of a single
batch of data to use for training. This type should be constructed with
standard Python containers (such as `collections.OrderedDict`) of the sort
that are expected as parameters to `loss_fn`.
model_type: An instance of `tff.Type` that represents the type of the model.
Similarly to `batch_size`, this type should be constructed with standard
Python containers (such as `collections.OrderedDict`) of the sort that are
expected as parameters to `loss_fn`.
loss_fn: A loss function for the model. Must be a Python function that takes
two parameters, one of them being the model, and the other being a single
batch of data (with types matching `batch_type` and `model_type`).
step_size: The step size to use during training (an `np.float32`).
Returns:
An instance of `tff.templates.IterativeProcess` that implements federated
training in JAX.
"""
batch_type = computation_types.to_type(batch_type)
model_type = computation_types.to_type(model_type)
py_typecheck.check_type(batch_type, computation_types.Type)
py_typecheck.check_type(model_type, computation_types.Type)
py_typecheck.check_callable(loss_fn)
py_typecheck.check_type(step_size, np.float)
def _tensor_zeros(tensor_type):
return jax.numpy.zeros(
tensor_type.shape.dims, dtype=tensor_type.dtype.as_numpy_dtype)
@experimental_computations.jax_computation
def _create_zero_model():
model_zeros = structure.map_structure(_tensor_zeros, model_type)
return type_conversions.type_to_py_container(model_zeros, model_type)
@computations.federated_computation
def _create_zero_model_on_server():
return intrinsics.federated_eval(_create_zero_model, placements.SERVER)
def _apply_update(model_param, param_delta):
return model_param - step_size * param_delta
@experimental_computations.jax_computation(model_type, batch_type)
def _train_on_one_batch(model, batch):
params = structure.flatten(structure.from_container(model, recursive=True))
grads = structure.flatten(
structure.from_container(jax.api.grad(loss_fn)(model, batch)))
updated_params = [_apply_update(x, y) for (x, y) in zip(params, grads)]
trained_model = structure.pack_sequence_as(model_type, updated_params)
return type_conversions.type_to_py_container(trained_model, model_type)
local_dataset_type = computation_types.SequenceType(batch_type)
@computations.federated_computation(model_type, local_dataset_type)
def _train_on_one_client(model, batches):
return intrinsics.sequence_reduce(batches, model, _train_on_one_batch)
@computations.federated_computation(
computation_types.FederatedType(model_type, placements.SERVER),
computation_types.FederatedType(local_dataset_type, placements.CLIENTS))
def _train_one_round(model, federated_data):
locally_trained_models = intrinsics.federated_map(
_train_on_one_client,
collections.OrderedDict([('model',
intrinsics.federated_broadcast(model)),
('batches', federated_data)]))
return intrinsics.federated_mean(locally_trained_models)
return iterative_process.IterativeProcess(
initialize_fn=_create_zero_model_on_server, next_fn=_train_one_round)
| apache-2.0 | 357,904,846,135,631,740 | 46.037037 | 103 | 0.745276 | false |
kcl-ddh/chopin-online | ocve/imagetools.py | 1 | 5457 | import re
import urllib
from ocve.models import PageLegacy, SourceLegacy
from ocve.models import PageImage
from django.conf import settings
from django.utils.html import escape
import logging
__author__ = 'Elliot'
logger = logging.getLogger(__name__)
def buildOldPath(pi):
p = pi.page
pl = PageLegacy.objects.get(pageimage=pi)
oldPath = 'ERRor'
if pl.cfeoKey > 0:
path = re.search("(.*?)_(.*)", pl.filename)
if path is not None:
oldPath = path.group(1) + "/" + path.group(1) + "_" + path.group(2) + ".jp2"
elif pl.storageStructure is not None:
path = re.search("(\d+)\/.*?\/(.*)", pl.storageStructure)
if path is not None:
sl = SourceLegacy.objects.get(source=p.sourcecomponent.source)
oldPath = path.group(1) + "/" + str(sl.witnessKey) + "/" + path.group(2) + ".jp2"
return oldPath
#Use the iip server to get width/height of an image
#Param full url to the image in iip format
def getImageDimensions(fullurl):
meta = urllib.urlopen(fullurl+ '&obj=IIP,1.0&obj=Max-size&obj=Tile-size&obj=Resolution-number')
dimensions={'width':0,'height':0}
for line in meta.readlines():
m = re.search("Max-size:\s*(\d+)\s*(\d+)", line)
if m is not None:
width = int(m.group(1))
height = int(m.group(2))
dimensions['width']=width
dimensions['height']=height
if dimensions['width'] == 0:
logger.error('Image at '+fullurl+' not found')
return dimensions
#Uses iip server to make sure dimensions in db correct
#pi=pageimage to check
def verifyImageDimensions(pi, oldPath):
found=0
try:
fullurl = settings.IMAGE_SERVER_URL + '?FIF='
fullurl = fullurl + oldPath
dimensions=getImageDimensions(fullurl)
if dimensions['width'] >0:
if pi.width != dimensions['width'] or pi.height != dimensions['height']:
pi.width = dimensions['width']
pi.height= dimensions['height']
pi.permissionnote = ''
pl=PageLegacy.objects.filter(pageimage=pi)
if pl.count() >0:
if pl[0].jp2 == 'UNVERIFIED':
pl[0].jp2=oldPath
pl[0].save()
pi.save()
found=1
except IOError:
print("Could not contact server at "+fullurl)
return found
#Request image information from the iip serv
#to verify images paths are correct
#http://ocve2-stg.cch.kcl.ac.uk/iip/iipsrv.fcgi?FIF=jp2/ocvejp2-proc/20/1/01TP/20-1-BH_GBLbl_p01TP.jp2&obj=IIP,1.0&obj=Max-size&obj=Tile-size&obj=Resolution-number
#iipsrv.fcgi?FIF=jp2/ocvejp2-proc/20/1/01TP/20-1-BH_GBLbl_p01TP.jp2&obj=IIP,1.0&obj=Max-size&obj=Tile-size&obj=Resolution-number
#jp2/ocvejp2-proc/20/1/02B/20-1-BH_GBLbl_p02B.jp2
def verifyImagesViaIIP():
log = '<html><head>IMAGE REPORT</head><body><ul>'
fullurl = settings.IMAGE_SERVER_URL + '?FIF=jp2/' #'http://ocve2-stg.cch.kcl.ac.uk/iip/iipsrv.fcgi?FIF=jp2/'
allpages = PageImage.objects.filter(pagelegacy__jp2='UNVERIFIED')
count=0
for pi in allpages:
#build old path
oldPath = buildOldPath(pi)
fullurl = settings.IMAGE_SERVER_URL + '?FIF=jp2/' #'http://ocve2-stg.cch.kcl.ac.uk/iip/iipsrv.fcgi?FIF=jp2/'
#Request iamge informaiton from iip
pl = PageLegacy.objects.get(pageimage=pi)
if pl.cfeoKey > 0:
fullurl = 'jp2/cfeojp2-proc/' + oldPath + '&obj=IIP,1.0&obj=Max-size'
else:
fullurl = 'jp2/ocvejp2-proc/' + oldPath + '&obj=IIP,1.0&obj=Max-size'
meta = urllib.urlopen(fullurl)
# found=0
# for line in meta.readlines():
# m = re.search("Max-size:\s*(\d+)\s*(\d+)", line)
# if m is not None:
# found=1
verifyImageDimensions(pi, oldPath)
if found is 0:
found=0
if pl.cfeoKey > 0:
#Check the _loose directory, they might be in there
pi.width=0
verifyImageDimensions(pi,'/_loose/'+pl.filename+'.jp2')
if pi.width>0:
pl.jp2='cfeojp2-proc/_loose/'+pl.filename+'.jp2'
if pl.storageStructure is None:
pl.storageStructure=''
pl.save()
found=1
#log=log+'<li>FOUND IN _loose: '+s.label+': '+pi.page.label+' key:'+str(pi.id)+' at path '+oldPath+':'+pl.filename+'</li>'
if found is 0:
#Image not found, write to log
s=pi.page.sourcecomponent.source
print str(pi.id)+' not found'
try:
log=log+'<li>'+s.label+': '+pi.page.label+' key:'+str(pi.id)+' at path '+oldPath+':'+pl.filename+'</li>'
except TypeError:
log=log+'<li> key:'+str(pi.id)+' </li>'
count+=1
else:
#Record correct path in pagelegacy.jp2
if pl.cfeoKey > 0:
pl.jp2='cfeojp2-proc/' + oldPath
else:
pl.jp2='ocvejp2-proc/' + oldPath
if pl.storageStructure is None:
pl.storageStructure=''
pl.save()
return log + '</ul><h2>Total: ' + str(count) + '</h2></body>'
| gpl-3.0 | -8,405,203,046,656,316,000 | 40.302326 | 163 | 0.550852 | false |
jptomo/rpython-lang-scheme | rpython/jit/codewriter/test/test_support.py | 1 | 5479 | import py, sys
from rpython.rtyper.lltypesystem import lltype
from rpython.rtyper.annlowlevel import llstr
from rpython.flowspace.model import Variable, Constant, SpaceOperation
from rpython.jit.codewriter.support import decode_builtin_call, LLtypeHelpers
from rpython.jit.codewriter.support import _ll_1_int_abs
def newconst(x):
return Constant(x, lltype.typeOf(x))
def voidconst(x):
return Constant(x, lltype.Void)
# ____________________________________________________________
def test_decode_builtin_call_nomethod():
def myfoobar(i, marker, c):
assert marker == 'mymarker'
return i * ord(c)
myfoobar.oopspec = 'foobar(2, c, i)'
TYPE = lltype.FuncType([lltype.Signed, lltype.Void, lltype.Char],
lltype.Signed)
fnobj = lltype.functionptr(TYPE, 'foobar', _callable=myfoobar)
vi = Variable('i')
vi.concretetype = lltype.Signed
vc = Variable('c')
vc.concretetype = lltype.Char
v_result = Variable('result')
v_result.concretetype = lltype.Signed
op = SpaceOperation('direct_call', [newconst(fnobj),
vi,
voidconst('mymarker'),
vc],
v_result)
oopspec, opargs = decode_builtin_call(op)
assert oopspec == 'foobar'
assert opargs == [newconst(2), vc, vi]
#impl = runner.get_oopspec_impl('foobar', lltype.Signed)
#assert impl(2, 'A', 5) == 5 * ord('A')
def test_decode_builtin_call_method():
A = lltype.GcArray(lltype.Signed)
def myfoobar(a, i, marker, c):
assert marker == 'mymarker'
return a[i] * ord(c)
myfoobar.oopspec = 'spam.foobar(a, 2, c, i)'
TYPE = lltype.FuncType([lltype.Ptr(A), lltype.Signed,
lltype.Void, lltype.Char],
lltype.Signed)
fnobj = lltype.functionptr(TYPE, 'foobar', _callable=myfoobar)
vi = Variable('i')
vi.concretetype = lltype.Signed
vc = Variable('c')
vc.concretetype = lltype.Char
v_result = Variable('result')
v_result.concretetype = lltype.Signed
myarray = lltype.malloc(A, 10)
myarray[5] = 42
op = SpaceOperation('direct_call', [newconst(fnobj),
newconst(myarray),
vi,
voidconst('mymarker'),
vc],
v_result)
oopspec, opargs = decode_builtin_call(op)
assert oopspec == 'spam.foobar'
assert opargs == [newconst(myarray), newconst(2), vc, vi]
#impl = runner.get_oopspec_impl('spam.foobar', lltype.Ptr(A))
#assert impl(myarray, 2, 'A', 5) == 42 * ord('A')
def test_streq_slice_checknull():
p1 = llstr("hello world")
p2 = llstr("wor")
func = LLtypeHelpers._ll_4_str_eq_slice_checknull.im_func
assert func(p1, 6, 3, p2) == True
assert func(p1, 6, 2, p2) == False
assert func(p1, 5, 3, p2) == False
assert func(p1, 2, 1, llstr(None)) == False
def test_streq_slice_nonnull():
p1 = llstr("hello world")
p2 = llstr("wor")
func = LLtypeHelpers._ll_4_str_eq_slice_nonnull.im_func
assert func(p1, 6, 3, p2) == True
assert func(p1, 6, 2, p2) == False
assert func(p1, 5, 3, p2) == False
py.test.raises(AttributeError, func, p1, 2, 1, llstr(None))
def test_streq_slice_char():
p1 = llstr("hello world")
func = LLtypeHelpers._ll_4_str_eq_slice_char.im_func
assert func(p1, 6, 3, "w") == False
assert func(p1, 6, 0, "w") == False
assert func(p1, 6, 1, "w") == True
assert func(p1, 6, 1, "x") == False
def test_streq_nonnull():
p1 = llstr("wor")
p2 = llstr("wor")
assert p1 != p2
func = LLtypeHelpers._ll_2_str_eq_nonnull.im_func
assert func(p1, p1) == True
assert func(p1, p2) == True
assert func(p1, llstr("wrl")) == False
assert func(p1, llstr("world")) == False
assert func(p1, llstr("w")) == False
py.test.raises(AttributeError, func, p1, llstr(None))
py.test.raises(AttributeError, func, llstr(None), p2)
def test_streq_nonnull_char():
func = LLtypeHelpers._ll_2_str_eq_nonnull_char.im_func
assert func(llstr("wor"), "x") == False
assert func(llstr("w"), "x") == False
assert func(llstr(""), "x") == False
assert func(llstr("x"), "x") == True
py.test.raises(AttributeError, func, llstr(None), "x")
def test_streq_checknull_char():
func = LLtypeHelpers._ll_2_str_eq_checknull_char.im_func
assert func(llstr("wor"), "x") == False
assert func(llstr("w"), "x") == False
assert func(llstr(""), "x") == False
assert func(llstr("x"), "x") == True
assert func(llstr(None), "x") == False
def test_streq_lengthok():
p1 = llstr("wor")
p2 = llstr("wor")
assert p1 != p2
func = LLtypeHelpers._ll_2_str_eq_lengthok.im_func
assert func(p1, p1) == True
assert func(p1, p2) == True
assert func(p1, llstr("wrl")) == False
py.test.raises(IndexError, func, p1, llstr("w"))
py.test.raises(AttributeError, func, p1, llstr(None))
py.test.raises(AttributeError, func, llstr(None), p2)
def test_int_abs():
assert _ll_1_int_abs(0) == 0
assert _ll_1_int_abs(1) == 1
assert _ll_1_int_abs(10) == 10
assert _ll_1_int_abs(sys.maxint) == sys.maxint
assert _ll_1_int_abs(-1) == 1
assert _ll_1_int_abs(-10) == 10
assert _ll_1_int_abs(-sys.maxint) == sys.maxint
| mit | 7,485,920,560,243,705,000 | 36.786207 | 77 | 0.580215 | false |
bhupennewalkar1337/erpnext | erpnext/utilities/transaction_base.py | 1 | 5809 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.share
from frappe import _
from frappe.utils import cstr, now_datetime, cint, flt
from erpnext.controllers.status_updater import StatusUpdater
class UOMMustBeIntegerError(frappe.ValidationError): pass
class TransactionBase(StatusUpdater):
def load_notification_message(self):
dt = self.doctype.lower().replace(" ", "_")
if int(frappe.db.get_value("Notification Control", None, dt) or 0):
self.set("__notification_message",
frappe.db.get_value("Notification Control", None, dt + "_message"))
def validate_posting_time(self):
if not self.posting_time:
self.posting_time = now_datetime().strftime('%H:%M:%S')
def add_calendar_event(self, opts, force=False):
if cstr(self.contact_by) != cstr(self._prev.contact_by) or \
cstr(self.contact_date) != cstr(self._prev.contact_date) or force:
self.delete_events()
self._add_calendar_event(opts)
def delete_events(self):
events = frappe.db.sql_list("""select name from `tabEvent`
where ref_type=%s and ref_name=%s""", (self.doctype, self.name))
if events:
frappe.db.sql("delete from `tabEvent` where name in (%s)"
.format(", ".join(['%s']*len(events))), tuple(events))
frappe.db.sql("delete from `tabEvent Role` where parent in (%s)"
.format(", ".join(['%s']*len(events))), tuple(events))
def _add_calendar_event(self, opts):
opts = frappe._dict(opts)
if self.contact_date:
event = frappe.get_doc({
"doctype": "Event",
"owner": opts.owner or self.owner,
"subject": opts.subject,
"description": opts.description,
"starts_on": self.contact_date,
"event_type": "Private",
"ref_type": self.doctype,
"ref_name": self.name
})
event.insert(ignore_permissions=True)
if frappe.db.exists("User", self.contact_by):
frappe.share.add("Event", event.name, self.contact_by,
flags={"ignore_share_permission": True})
def validate_uom_is_integer(self, uom_field, qty_fields):
validate_uom_is_integer(self, uom_field, qty_fields)
def validate_with_previous_doc(self, ref):
for key, val in ref.items():
is_child = val.get("is_child_table")
ref_doc = {}
item_ref_dn = []
for d in self.get_all_children(self.doctype + " Item"):
ref_dn = d.get(val["ref_dn_field"])
if ref_dn:
if is_child:
self.compare_values({key: [ref_dn]}, val["compare_fields"], d)
if ref_dn not in item_ref_dn:
item_ref_dn.append(ref_dn)
elif not val.get("allow_duplicate_prev_row_id"):
frappe.throw(_("Duplicate row {0} with same {1}").format(d.idx, key))
elif ref_dn:
ref_doc.setdefault(key, [])
if ref_dn not in ref_doc[key]:
ref_doc[key].append(ref_dn)
if ref_doc:
self.compare_values(ref_doc, val["compare_fields"])
def compare_values(self, ref_doc, fields, doc=None):
for reference_doctype, ref_dn_list in ref_doc.items():
for reference_name in ref_dn_list:
prevdoc_values = frappe.db.get_value(reference_doctype, reference_name,
[d[0] for d in fields], as_dict=1)
if not prevdoc_values:
frappe.throw(_("Invalid reference {0} {1}").format(reference_doctype, reference_name))
for field, condition in fields:
if prevdoc_values[field] is not None:
self.validate_value(field, condition, prevdoc_values[field], doc)
def validate_rate_with_reference_doc(self, ref_details):
for ref_dt, ref_dn_field, ref_link_field in ref_details:
for d in self.get("items"):
if d.get(ref_link_field):
ref_rate = frappe.db.get_value(ref_dt + " Item", d.get(ref_link_field), "rate")
if abs(flt(d.rate - ref_rate, d.precision("rate"))) >= .01:
frappe.throw(_("Row #{0}: Rate must be same as {1}: {2} ({3} / {4}) ")
.format(d.idx, ref_dt, d.get(ref_dn_field), d.rate, ref_rate))
def get_link_filters(self, for_doctype):
if hasattr(self, "prev_link_mapper") and self.prev_link_mapper.get(for_doctype):
fieldname = self.prev_link_mapper[for_doctype]["fieldname"]
values = filter(None, tuple([item.as_dict()[fieldname] for item in self.items]))
if values:
ret = {
for_doctype : {
"filters": [[for_doctype, "name", "in", values]]
}
}
else:
ret = None
else:
ret = None
return ret
def delink_advance_entries(self, linked_doc_name):
total_allocated_amount = 0
for adv in self.advances:
consider_for_total_advance = True
if adv.reference_name == linked_doc_name:
frappe.db.sql("""delete from `tab{0} Advance`
where name = %s""".format(self.doctype), adv.name)
consider_for_total_advance = False
if consider_for_total_advance:
total_allocated_amount += flt(adv.allocated_amount, adv.precision("allocated_amount"))
frappe.db.set_value(self.doctype, self.name, "total_advance", total_allocated_amount, update_modified=False)
def delete_events(ref_type, ref_name):
frappe.delete_doc("Event", frappe.db.sql_list("""select name from `tabEvent`
where ref_type=%s and ref_name=%s""", (ref_type, ref_name)), for_reload=True)
def validate_uom_is_integer(doc, uom_field, qty_fields, child_dt=None):
if isinstance(qty_fields, basestring):
qty_fields = [qty_fields]
distinct_uoms = list(set([d.get(uom_field) for d in doc.get_all_children()]))
integer_uoms = filter(lambda uom: frappe.db.get_value("UOM", uom,
"must_be_whole_number") or None, distinct_uoms)
if not integer_uoms:
return
for d in doc.get_all_children(parenttype=child_dt):
if d.get(uom_field) in integer_uoms:
for f in qty_fields:
if d.get(f):
if cint(d.get(f))!=d.get(f):
frappe.throw(_("Quantity cannot be a fraction in row {0}").format(d.idx), UOMMustBeIntegerError)
| gpl-3.0 | -7,810,579,147,257,779,000 | 34.638037 | 110 | 0.667413 | false |
NeCTAR-RC/heat | heat/common/config.py | 1 | 12042 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Routines for configuring Heat
"""
import copy
import logging as sys_logging
import os
from eventlet.green import socket
from oslo.config import cfg
from heat.common import wsgi
from heat.openstack.common import log as logging
from heat.openstack.common import rpc
logger = logging.getLogger(__name__)
paste_deploy_group = cfg.OptGroup('paste_deploy')
paste_deploy_opts = [
cfg.StrOpt('flavor',
help=_("The flavor to use.")),
cfg.StrOpt('api_paste_config', default="api-paste.ini",
help=_("The API paste config file to use."))]
service_opts = [
cfg.IntOpt('periodic_interval',
default=60,
help='Seconds between running periodic tasks.'),
cfg.StrOpt('heat_metadata_server_url',
default="",
help='URL of the Heat metadata server.'),
cfg.StrOpt('heat_waitcondition_server_url',
default="",
help='URL of the Heat waitcondition server.'),
cfg.StrOpt('heat_watch_server_url',
default="",
help='URL of the Heat CloudWatch server.'),
cfg.StrOpt('instance_connection_is_secure',
default="0",
help='Instance connection to CFN/CW API via https.'),
cfg.StrOpt('instance_connection_https_validate_certificates',
default="1",
help='Instance connection to CFN/CW API validate certs if SSL '
'is used.'),
cfg.StrOpt('region_name_for_services',
default=None,
help='Default region name used to get services endpoints.'),
cfg.StrOpt('heat_stack_user_role',
default="heat_stack_user",
help='Keystone role for heat template-defined users.'),
cfg.StrOpt('stack_user_domain',
help='Keystone domain ID which contains heat template-defined '
'users.'),
cfg.StrOpt('stack_domain_admin',
help='Keystone username, a user with roles sufficient to '
'manage users and projects in the stack_user_domain.'),
cfg.StrOpt('stack_domain_admin_password',
help='Keystone password for stack_domain_admin user.'),
cfg.IntOpt('max_template_size',
default=524288,
help='Maximum raw byte size of any template.'),
cfg.IntOpt('max_nested_stack_depth',
default=3,
help='Maximum depth allowed when using nested stacks.')]
engine_opts = [
cfg.StrOpt('instance_user',
default='ec2-user',
help="The default user for new instances. This option "
"is deprecated and will be removed in the Juno release. "
"If it's empty, Heat will use the default user set up "
"with your cloud image (for OS::Nova::Server) or "
"'ec2-user' (for AWS::EC2::Instance)."),
cfg.StrOpt('instance_driver',
default='heat.engine.nova',
help='Driver to use for controlling instances.'),
cfg.ListOpt('plugin_dirs',
default=['/usr/lib64/heat', '/usr/lib/heat'],
help='List of directories to search for plug-ins.'),
cfg.StrOpt('environment_dir',
default='/etc/heat/environment.d',
help='The directory to search for environment files.'),
cfg.StrOpt('deferred_auth_method',
choices=['password', 'trusts'],
default='password',
help=_('Select deferred auth method, '
'stored password or trusts.')),
cfg.ListOpt('trusts_delegated_roles',
default=['heat_stack_owner'],
help=_('Subset of trustor roles to be delegated to heat.')),
cfg.IntOpt('max_resources_per_stack',
default=1000,
help='Maximum resources allowed per top-level stack.'),
cfg.IntOpt('max_stacks_per_tenant',
default=100,
help=_('Maximum number of stacks any one tenant may have'
' active at one time.')),
cfg.IntOpt('event_purge_batch_size',
default=10,
help=_('Controls how many events will be pruned whenever a '
' stack\'s events exceed max_events_per_stack. Set this'
' lower to keep more events at the expense of more'
' frequent purges.')),
cfg.IntOpt('max_events_per_stack',
default=1000,
help=_('Maximum events that will be available per stack. Older'
' events will be deleted when this is reached. Set to 0'
' for unlimited events per stack.')),
cfg.IntOpt('stack_action_timeout',
default=3600,
help=_('Timeout in seconds for stack action (ie. create or'
' update).')),
cfg.IntOpt('engine_life_check_timeout',
default=2,
help=_('RPC timeout for the engine liveness check that is used'
' for stack locking.')),
cfg.StrOpt('onready',
help=_('onready allows you to send a notification when the'
' heat processes are ready to serve. This is either a'
' module with the notify() method or a shell command. '
' To enable notifications with systemd, one may use'
' the \'systemd-notify --ready\' shell command or'
' the \'heat.common.systemd\' notification module.'))]
rpc_opts = [
cfg.StrOpt('host',
default=socket.gethostname(),
help='Name of the engine node. '
'This can be an opaque identifier. '
'It is not necessarily a hostname, FQDN, or IP address.')]
auth_password_group = cfg.OptGroup('auth_password')
auth_password_opts = [
cfg.BoolOpt('multi_cloud',
default=False,
help=_('Allow orchestration of multiple clouds.')),
cfg.ListOpt('allowed_auth_uris',
default=[],
help=_('Allowed keystone endpoints for auth_uri when '
'multi_cloud is enabled. At least one endpoint needs '
'to be specified.'))]
clients_opts = [
cfg.StrOpt('endpoint_type',
default='publicURL',
help=_(
'Type of endpoint in Identity service catalog to use '
'for communication with the OpenStack service.')),
cfg.StrOpt('ca_file',
help=_('Optional CA cert file to use in SSL connections.')),
cfg.StrOpt('cert_file',
help=_('Optional PEM-formatted certificate chain file.')),
cfg.StrOpt('key_file',
help=_('Optional PEM-formatted file that contains the '
'private key.')),
cfg.BoolOpt('insecure',
default=False,
help=_("If set, then the server's certificate will not "
"be verified."))]
def register_clients_opts():
cfg.CONF.register_opts(clients_opts, group='clients')
for client in ('nova', 'swift', 'neutron', 'cinder',
'ceilometer', 'keystone', 'heat', 'trove'):
client_specific_group = 'clients_' + client
# register opts copy and put it to globals in order to
# generate_sample.sh to work
opts_copy = copy.deepcopy(clients_opts)
if client == 'heat':
opts_copy.append(
cfg.StrOpt('url',
help=_('Optional heat url in format like'
' http://0.0.0.0:8004/v1/%(tenant_id)s.')))
globals()[client_specific_group + '_opts'] = opts_copy
cfg.CONF.register_opts(opts_copy, group=client_specific_group)
revision_group = cfg.OptGroup('revision')
revision_opts = [
cfg.StrOpt('heat_revision',
default='unknown',
help=_('Heat build revision. '
'If you would prefer to manage your build revision '
'separately, you can move this section to a different '
'file and add it as another config option.'))]
cfg.CONF.register_opts(engine_opts)
cfg.CONF.register_opts(service_opts)
cfg.CONF.register_opts(rpc_opts)
rpc.set_defaults(control_exchange='heat')
cfg.CONF.register_group(paste_deploy_group)
cfg.CONF.register_opts(paste_deploy_opts, group=paste_deploy_group)
cfg.CONF.register_group(auth_password_group)
cfg.CONF.register_opts(auth_password_opts, group=auth_password_group)
cfg.CONF.register_group(revision_group)
cfg.CONF.register_opts(revision_opts, group=revision_group)
register_clients_opts()
# A bit of history:
# This was added initially by jianingy, then it got added
# to oslo by Luis. Then it was receintly removed from the
# default list again.
# I am not sure we can (or should) rely on oslo to keep
# our exceptions class in the defaults list.
allowed_rpc_exception_modules = cfg.CONF.allowed_rpc_exception_modules
allowed_rpc_exception_modules.append('heat.common.exception')
cfg.CONF.set_default(name='allowed_rpc_exception_modules',
default=allowed_rpc_exception_modules)
if cfg.CONF.instance_user:
logger.warn(_('The "instance_user" option in heat.conf is deprecated and '
'will be removed in the Juno release.'))
def _get_deployment_flavor():
"""
Retrieve the paste_deploy.flavor config item, formatted appropriately
for appending to the application name.
"""
flavor = cfg.CONF.paste_deploy.flavor
return '' if not flavor else ('-' + flavor)
def _get_deployment_config_file():
"""
Retrieve the deployment_config_file config item, formatted as an
absolute pathname.
"""
config_path = cfg.CONF.find_file(
cfg.CONF.paste_deploy['api_paste_config'])
if config_path is None:
return None
return os.path.abspath(config_path)
def load_paste_app(app_name=None):
"""
Builds and returns a WSGI app from a paste config file.
We assume the last config file specified in the supplied ConfigOpts
object is the paste config file.
:param app_name: name of the application to load
:raises RuntimeError when config file cannot be located or application
cannot be loaded from config file
"""
if app_name is None:
app_name = cfg.CONF.prog
# append the deployment flavor to the application name,
# in order to identify the appropriate paste pipeline
app_name += _get_deployment_flavor()
conf_file = _get_deployment_config_file()
if conf_file is None:
raise RuntimeError(_("Unable to locate config file"))
try:
app = wsgi.paste_deploy_app(conf_file, app_name, cfg.CONF)
# Log the options used when starting if we're in debug mode...
if cfg.CONF.debug:
cfg.CONF.log_opt_values(logging.getLogger(app_name),
sys_logging.DEBUG)
return app
except (LookupError, ImportError) as e:
raise RuntimeError(_("Unable to load %(app_name)s from "
"configuration file %(conf_file)s."
"\nGot: %(e)r") % {'app_name': app_name,
'conf_file': conf_file,
'e': e})
| apache-2.0 | -3,818,074,077,992,978,400 | 40.524138 | 78 | 0.590101 | false |
openstack/glance_store | glance_store/tests/unit/test_cinder_store.py | 1 | 19827 | # Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import hashlib
import os
from unittest import mock
import six
import socket
import sys
import tempfile
import time
import uuid
from os_brick.initiator import connector
from oslo_concurrency import processutils
from oslo_utils.secretutils import md5
from oslo_utils import units
from glance_store import exceptions
from glance_store import location
from glance_store.tests import base
from glance_store.tests.unit import test_store_capabilities
sys.modules['glance_store.common.fs_mount'] = mock.Mock()
from glance_store._drivers import cinder # noqa
class FakeObject(object):
def __init__(self, **kwargs):
for name, value in kwargs.items():
setattr(self, name, value)
class TestCinderStore(base.StoreBaseTest,
test_store_capabilities.TestStoreCapabilitiesChecking):
def setUp(self):
super(TestCinderStore, self).setUp()
self.store = cinder.Store(self.conf)
self.store.configure()
self.register_store_schemes(self.store, 'cinder')
self.store.READ_CHUNKSIZE = 4096
self.store.WRITE_CHUNKSIZE = 4096
fake_sc = [{u'endpoints': [{u'publicURL': u'http://foo/public_url'}],
u'endpoints_links': [],
u'name': u'cinder',
u'type': u'volumev3'}]
self.context = FakeObject(service_catalog=fake_sc,
user_id='fake_user',
auth_token='fake_token',
project_id='fake_project')
self.hash_algo = 'sha256'
cinder._reset_cinder_session()
def test_get_cinderclient(self):
cc = self.store.get_cinderclient(self.context)
self.assertEqual('fake_token', cc.client.auth.token)
self.assertEqual('http://foo/public_url', cc.client.auth.endpoint)
def _test_get_cinderclient_with_user_overriden(self):
self.config(cinder_store_user_name='test_user')
self.config(cinder_store_password='test_password')
self.config(cinder_store_project_name='test_project')
self.config(cinder_store_auth_address='test_address')
cc = self.store.get_cinderclient(self.context)
self.assertEqual('test_project', cc.client.session.auth.project_name)
self.assertEqual('Default', cc.client.session.auth.project_domain_name)
return cc
def test_get_cinderclient_with_user_overriden(self):
self._test_get_cinderclient_with_user_overriden()
def test_get_cinderclient_with_user_overriden_and_region(self):
self.config(cinder_os_region_name='test_region')
cc = self._test_get_cinderclient_with_user_overriden()
self.assertEqual('test_region', cc.client.region_name)
def test_temporary_chown(self):
class fake_stat(object):
st_uid = 1
with mock.patch.object(os, 'stat', return_value=fake_stat()), \
mock.patch.object(os, 'getuid', return_value=2), \
mock.patch.object(processutils, 'execute') as mock_execute, \
mock.patch.object(cinder.Store, 'get_root_helper',
return_value='sudo'):
with self.store.temporary_chown('test'):
pass
expected_calls = [mock.call('chown', 2, 'test', run_as_root=True,
root_helper='sudo'),
mock.call('chown', 1, 'test', run_as_root=True,
root_helper='sudo')]
self.assertEqual(expected_calls, mock_execute.call_args_list)
@mock.patch.object(time, 'sleep')
def test_wait_volume_status(self, mock_sleep):
fake_manager = FakeObject(get=mock.Mock())
volume_available = FakeObject(manager=fake_manager,
id='fake-id',
status='available')
volume_in_use = FakeObject(manager=fake_manager,
id='fake-id',
status='in-use')
fake_manager.get.side_effect = [volume_available, volume_in_use]
self.assertEqual(volume_in_use,
self.store._wait_volume_status(
volume_available, 'available', 'in-use'))
fake_manager.get.assert_called_with('fake-id')
mock_sleep.assert_called_once_with(0.5)
@mock.patch.object(time, 'sleep')
def test_wait_volume_status_unexpected(self, mock_sleep):
fake_manager = FakeObject(get=mock.Mock())
volume_available = FakeObject(manager=fake_manager,
id='fake-id',
status='error')
fake_manager.get.return_value = volume_available
self.assertRaises(exceptions.BackendException,
self.store._wait_volume_status,
volume_available, 'available', 'in-use')
fake_manager.get.assert_called_with('fake-id')
@mock.patch.object(time, 'sleep')
def test_wait_volume_status_timeout(self, mock_sleep):
fake_manager = FakeObject(get=mock.Mock())
volume_available = FakeObject(manager=fake_manager,
id='fake-id',
status='available')
fake_manager.get.return_value = volume_available
self.assertRaises(exceptions.BackendException,
self.store._wait_volume_status,
volume_available, 'available', 'in-use')
fake_manager.get.assert_called_with('fake-id')
def _test_open_cinder_volume(self, open_mode, attach_mode, error,
multipath_supported=False,
enforce_multipath=False,
encrypted_nfs=False):
self.config(cinder_mount_point_base=None)
fake_volume = mock.MagicMock(id=str(uuid.uuid4()), status='available')
fake_volumes = FakeObject(get=lambda id: fake_volume,
detach=mock.Mock())
fake_client = FakeObject(volumes=fake_volumes)
_, fake_dev_path = tempfile.mkstemp(dir=self.test_dir)
fake_devinfo = {'path': fake_dev_path}
fake_connector = FakeObject(
connect_volume=mock.Mock(return_value=fake_devinfo),
disconnect_volume=mock.Mock())
@contextlib.contextmanager
def fake_chown(path, backend=None):
yield
def do_open():
with self.store._open_cinder_volume(
fake_client, fake_volume, open_mode):
if error:
raise error
def fake_factory(protocol, root_helper, **kwargs):
self.assertEqual(fake_volume.initialize_connection.return_value,
kwargs['conn'])
return fake_connector
root_helper = "sudo glance-rootwrap /etc/glance/rootwrap.conf"
with mock.patch.object(cinder.Store,
'_wait_volume_status',
return_value=fake_volume), \
mock.patch.object(cinder.Store, 'temporary_chown',
side_effect=fake_chown), \
mock.patch.object(cinder.Store, 'get_root_helper',
return_value=root_helper), \
mock.patch.object(connector.InitiatorConnector, 'factory',
side_effect=fake_factory) as fake_conn_obj:
with mock.patch.object(connector,
'get_connector_properties') as mock_conn:
if error:
self.assertRaises(error, do_open)
elif encrypted_nfs:
fake_volume.initialize_connection.return_value = {
'driver_volume_type': 'nfs'
}
fake_volume.encrypted = True
try:
with self.store._open_cinder_volume(
fake_client, fake_volume, open_mode):
pass
except exceptions.BackendException:
self.assertEqual(1,
fake_volume.unreserve.call_count)
self.assertEqual(1,
fake_volume.delete.call_count)
else:
do_open()
if not encrypted_nfs:
mock_conn.assert_called_once_with(
root_helper, socket.gethostname(),
multipath_supported, enforce_multipath)
fake_connector.connect_volume.assert_called_once_with(
mock.ANY)
fake_connector.disconnect_volume.assert_called_once_with(
mock.ANY, fake_devinfo)
fake_volume.attach.assert_called_once_with(
None, 'glance_store', attach_mode,
host_name=socket.gethostname())
fake_volumes.detach.assert_called_once_with(fake_volume)
fake_conn_obj.assert_called_once_with(
mock.ANY, root_helper, conn=mock.ANY,
use_multipath=multipath_supported)
def test_open_cinder_volume_rw(self):
self._test_open_cinder_volume('wb', 'rw', None)
def test_open_cinder_volume_ro(self):
self._test_open_cinder_volume('rb', 'ro', None)
def test_open_cinder_volume_error(self):
self._test_open_cinder_volume('wb', 'rw', IOError)
def test_open_cinder_volume_multipath_supported(self):
self.config(cinder_use_multipath=True)
self._test_open_cinder_volume('wb', 'rw', None,
multipath_supported=True)
def test_open_cinder_volume_enforce_multipath(self):
self.config(cinder_use_multipath=True)
self.config(cinder_enforce_multipath=True)
self._test_open_cinder_volume('wb', 'rw', None,
multipath_supported=True,
enforce_multipath=True)
def test_open_cinder_volume_nfs_encrypted(self):
self._test_open_cinder_volume('rb', 'ro', None, encrypted_nfs=True)
def test_cinder_configure_add(self):
self.assertRaises(exceptions.BadStoreConfiguration,
self.store._check_context, None)
self.assertRaises(exceptions.BadStoreConfiguration,
self.store._check_context,
FakeObject(service_catalog=None))
self.store._check_context(FakeObject(service_catalog='fake'))
def test_cinder_get(self):
expected_size = 5 * units.Ki
expected_file_contents = b"*" * expected_size
volume_file = six.BytesIO(expected_file_contents)
fake_client = FakeObject(auth_token=None, management_url=None)
fake_volume_uuid = str(uuid.uuid4())
fake_volume = mock.MagicMock(id=fake_volume_uuid,
metadata={'image_size': expected_size},
status='available')
fake_volume.manager.get.return_value = fake_volume
fake_volumes = FakeObject(get=lambda id: fake_volume)
@contextlib.contextmanager
def fake_open(client, volume, mode):
self.assertEqual('rb', mode)
yield volume_file
with mock.patch.object(cinder.Store, 'get_cinderclient') as mock_cc, \
mock.patch.object(self.store, '_open_cinder_volume',
side_effect=fake_open):
mock_cc.return_value = FakeObject(client=fake_client,
volumes=fake_volumes)
uri = "cinder://%s" % fake_volume_uuid
loc = location.get_location_from_uri(uri, conf=self.conf)
(image_file, image_size) = self.store.get(loc,
context=self.context)
expected_num_chunks = 2
data = b""
num_chunks = 0
for chunk in image_file:
num_chunks += 1
data += chunk
self.assertEqual(expected_num_chunks, num_chunks)
self.assertEqual(expected_file_contents, data)
def test_cinder_get_size(self):
fake_client = FakeObject(auth_token=None, management_url=None)
fake_volume_uuid = str(uuid.uuid4())
fake_volume = FakeObject(size=5, metadata={})
fake_volumes = {fake_volume_uuid: fake_volume}
with mock.patch.object(cinder.Store, 'get_cinderclient') as mocked_cc:
mocked_cc.return_value = FakeObject(client=fake_client,
volumes=fake_volumes)
uri = 'cinder://%s' % fake_volume_uuid
loc = location.get_location_from_uri(uri, conf=self.conf)
image_size = self.store.get_size(loc, context=self.context)
self.assertEqual(fake_volume.size * units.Gi, image_size)
def test_cinder_get_size_with_metadata(self):
fake_client = FakeObject(auth_token=None, management_url=None)
fake_volume_uuid = str(uuid.uuid4())
expected_image_size = 4500 * units.Mi
fake_volume = FakeObject(size=5,
metadata={'image_size': expected_image_size})
fake_volumes = {fake_volume_uuid: fake_volume}
with mock.patch.object(cinder.Store, 'get_cinderclient') as mocked_cc:
mocked_cc.return_value = FakeObject(client=fake_client,
volumes=fake_volumes)
uri = 'cinder://%s' % fake_volume_uuid
loc = location.get_location_from_uri(uri, conf=self.conf)
image_size = self.store.get_size(loc, context=self.context)
self.assertEqual(expected_image_size, image_size)
def _test_cinder_add(self, fake_volume, volume_file, size_kb=5,
verifier=None):
expected_image_id = str(uuid.uuid4())
expected_size = size_kb * units.Ki
expected_file_contents = b"*" * expected_size
image_file = six.BytesIO(expected_file_contents)
expected_checksum = md5(expected_file_contents,
usedforsecurity=False).hexdigest()
expected_multihash = hashlib.sha256(expected_file_contents).hexdigest()
expected_location = 'cinder://%s' % fake_volume.id
fake_client = FakeObject(auth_token=None, management_url=None)
fake_volume.manager.get.return_value = fake_volume
fake_volumes = FakeObject(create=mock.Mock(return_value=fake_volume))
self.config(cinder_volume_type='some_type')
@contextlib.contextmanager
def fake_open(client, volume, mode):
self.assertEqual('wb', mode)
yield volume_file
with mock.patch.object(cinder.Store, 'get_cinderclient') as mock_cc, \
mock.patch.object(self.store, '_open_cinder_volume',
side_effect=fake_open):
mock_cc.return_value = FakeObject(client=fake_client,
volumes=fake_volumes)
loc, size, checksum, multihash, _ = self.store.add(
expected_image_id, image_file, expected_size, self.hash_algo,
self.context, verifier)
self.assertEqual(expected_location, loc)
self.assertEqual(expected_size, size)
self.assertEqual(expected_checksum, checksum)
self.assertEqual(expected_multihash, multihash)
fake_volumes.create.assert_called_once_with(
1,
name='image-%s' % expected_image_id,
metadata={'image_owner': self.context.project_id,
'glance_image_id': expected_image_id,
'image_size': str(expected_size)},
volume_type='some_type')
def test_cinder_add(self):
fake_volume = mock.MagicMock(id=str(uuid.uuid4()),
status='available',
size=1)
volume_file = six.BytesIO()
self._test_cinder_add(fake_volume, volume_file)
def test_cinder_add_with_verifier(self):
fake_volume = mock.MagicMock(id=str(uuid.uuid4()),
status='available',
size=1)
volume_file = six.BytesIO()
verifier = mock.MagicMock()
self._test_cinder_add(fake_volume, volume_file, 1, verifier)
verifier.update.assert_called_with(b"*" * units.Ki)
def test_cinder_add_volume_full(self):
e = IOError()
volume_file = six.BytesIO()
e.errno = errno.ENOSPC
fake_volume = mock.MagicMock(id=str(uuid.uuid4()),
status='available',
size=1)
with mock.patch.object(volume_file, 'write', side_effect=e):
self.assertRaises(exceptions.StorageFull,
self._test_cinder_add, fake_volume, volume_file)
fake_volume.delete.assert_called_once_with()
def test_cinder_delete(self):
fake_client = FakeObject(auth_token=None, management_url=None)
fake_volume_uuid = str(uuid.uuid4())
fake_volume = FakeObject(delete=mock.Mock())
fake_volumes = {fake_volume_uuid: fake_volume}
with mock.patch.object(cinder.Store, 'get_cinderclient') as mocked_cc:
mocked_cc.return_value = FakeObject(client=fake_client,
volumes=fake_volumes)
uri = 'cinder://%s' % fake_volume_uuid
loc = location.get_location_from_uri(uri, conf=self.conf)
self.store.delete(loc, context=self.context)
fake_volume.delete.assert_called_once_with()
def test_set_url_prefix(self):
self.assertEqual('cinder://', self.store._url_prefix)
def test_configure_add(self):
def fake_volume_type(name):
if name != 'some_type':
raise cinder.cinder_exception.NotFound(code=404)
with mock.patch.object(self.store, 'get_cinderclient') as mocked_cc:
mocked_cc.return_value = FakeObject(volume_types=FakeObject(
find=fake_volume_type))
self.config(cinder_volume_type='some_type')
# If volume type exists, no exception is raised
self.store.configure_add()
# setting cinder_volume_type to non-existent value will log a
# warning
self.config(cinder_volume_type='some_random_type')
with mock.patch.object(cinder, 'LOG') as mock_log:
self.store.configure_add()
mock_log.warning.assert_called_with(
"Invalid `cinder_volume_type some_random_type`")
| apache-2.0 | 6,477,931,125,047,552,000 | 44.474771 | 79 | 0.564584 | false |
Patola/patolascripts | cam.py | 1 | 7929 | #!/usr/bin/python
from gi.repository import Gtk
import sys,re,os,time
import urllib2
import subprocess,signal
bitRates={
"QVGA":(1,[128,256,384,512]),
"VGA":(0,[128,256,384,512,640,768,896,1024]),
"720P":(3,[128,256,384,512,640,768,896,1024,1280,1536,1792,2048,2560,3072,3584,4096])
}
frameRates=range(1,31)
# default values
wCamDefaultPort=81
wCamDefaultMode="VGA"
wCamDefaultBitRate=1024
wCamDefaultFrameRate=15
wCamAddress="10.0.0.54" # wCamAddress is mandatory
wCamDefaultUser="admin"
wCamDefaultPassWord="888888"
wCamPort = wCamDefaultPort
wCamMode=wCamDefaultMode
wCamBitRate=wCamDefaultBitRate
wCamFrameRate=wCamDefaultFrameRate
wCamAddress=None # wCamAddress is mandatory
wCamUser=wCamDefaultUser
wCamPassWord=wCamDefaultPassWord
wCamTitle=None
mplayerPid=None
def usage():
print "Usage : %s <OPTIONS>" % (sys.argv[0])
print "-h, --help show this help"
print "-u, --user=<user> set user ( default is [%s] )" % (wCamDefaultUser)
print "-x, --password=<password> set password ( default is [%s] )" % (wCamDefaultPassWord)
print "-a, --address=<webcam address> set webcam address e.g -i 192.168.0.253 or -i=starcam.myhome.lan ( mandatory )"
print "-p, --port=<webcam ip address> set webcam port e.g. -p 81 (default is [%s])" % (wCamDefaultPort)
print "-m, --mode=<mode> set output resolution: allowed values: QVGA, VGA, 720P e.g. -m VGA (default is [%s])" % (wCamDefaultMode)
print "-b, --bitrate=<bitrate> set bitrate: allowed values depends from mode: (default is [%s])" % (wCamDefaultBitRate)
for mode,rates in bitRates.iteritems():
print " for %s: %s" % (mode,rates[1])
print "-f, --framerate=<fps> set framerate: allowed values %s e.g -f 25 (default is [%s])" % (frameRates,wCamDefaultFrameRate)
sys.exit(1)
def kill_child_processes(parent_pid,sig=signal.SIGTERM):
cmd="ps -o pid --ppid %d --noheaders" % (parent_pid)
print "cmd [%s]" % (cmd)
ps_command = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
ps_output = ps_command.stdout.read()
retcode = ps_command.wait()
if retcode == 0:
for pid_str in ps_output.split("\n")[:-1]:
kill_child_processes (int(pid_str))
print "child kill pid %s" % (pid_str)
try:
os.kill(int(pid_str), sig)
except:
pass
else:
try:
os.kill(parent_pid, sig)
except:
pass
# http://starcam/camera_control.cgi?loginuse=admin&loginpas=888888¶m=13&value=512&140094356 38360.6156135550700128&_=140094356 3838
def httpGet(uri,params):
import random
import time
# params="%s&%f" % (params,time.time()*1000+random.random())
url="http://%s:%s/%s?loginuse=%s&loginpas=%s&%s" % (wCamAddress,wCamPort,uri,wCamUser,wCamPassWord,params)
print url
sock=urllib2.urlopen (url,None,4)
response = sock.read()
sock.close()
print response
class CamWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="CAM control")
self.set_border_width(10)
self.set_title(wCamTitle)
# http://python-gtk-3-tutorial.readthedocs.org/en/latest/layout.html#grid
grid = Gtk.Grid()
self.add(grid)
top = Gtk.Button("Up")
top.connect("pressed", self._pressed,"decoder_control.cgi","command=0&onestep=0")
top.connect("released", self._released,"decoder_control.cgi","command=1&onestep=0")
grid.attach(top, 1, 0, 1, 1)
left = Gtk.Button("Left")
left.connect("pressed", self._pressed,"decoder_control.cgi","command=4&onestep=0")
left.connect("released", self._released,"decoder_control.cgi","command=5&onestep=0")
grid.attach(left, 0, 1, 1, 1)
right = Gtk.Button("Right")
right.connect("pressed", self._pressed,"decoder_control.cgi","command=6&onestep=0")
right.connect("released", self._released,"decoder_control.cgi","command=7&onestep=0")
grid.attach(right, 2, 1, 1, 1)
bottom = Gtk.Button("Down")
bottom.connect("pressed", self._pressed,"decoder_control.cgi","command=2&onestep=0")
bottom.connect("released", self._released,"decoder_control.cgi","command=3&onestep=0")
grid.attach(bottom, 1, 2, 1, 1)
zoomout = Gtk.Button("Zoom Out")
zoomout.connect("pressed", self._pressed,"camera_control.cgi","param=17&value=1")
zoomout.connect("released", self._released,"camera_control.cgi","param=17&value=0")
grid.attach(zoomout, 3, 2, 1, 1)
zoomin = Gtk.Button("Zoom In")
zoomin.connect("pressed", self._pressed,"camera_control.cgi","param=18&value=1")
zoomin.connect("released", self._released,"camera_control.cgi","param=18&value=0")
grid.attach(zoomin, 3, 0, 1, 1)
def _pressed(self, button,uri,params):
print("press")
httpGet (uri,params)
def _released(self, button,uri,params):
print("release")
httpGet (uri,params)
def on_close_clicked(self, button):
print("Closing application")
Gtk.main_quit()
def go():
win = CamWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
if __name__ == '__main__':
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "a:p:m:b:f:h", ["help", "address=","port=","mode=","bitrate=","framerate="])
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(2)
verbose = False
for o, a in opts:
if o in ( "-u","--user"):
wCamUser = a
if o in ( "-x","--password"):
wCamPassWord = a
if o in ( "-a","--address"):
wCamAddress = a
if o in ( "-p","--port"):
wCamPort = a
if o in ( "-m","--mode"):
wCamMode = a
if o in ( "-b","--bitrate"):
wCamBitRate = int(a)
if o in ( "-f","--framerate"):
wCamFrameRate = int(a)
if o in ( "-v","--verbose"):
verbose = a
if o in ("-h","--help"):
usage()
if (not wCamAddress):
usage()
if verbose:
print "Verbose is [%d]" % (verbose)
if wCamMode not in bitRates.keys():
print "Invalid Mode [%s]" % (wCamMode)
usage()
else:
if not wCamBitRate in bitRates[wCamMode][1]:
print "Invalid bitRate [%s] for mode [%s]" % (wCamBitRate, wCamMode)
usage()
if wCamFrameRate not in frameRates:
print "Invalid frameRate [%s]" % (wCamFrameRate)
usage()
wCamTitle="%s:%s" % (wCamAddress,wCamPort)
print "Using user %s:%s %s:%s, mode %s, bitrate %s, framerate %s" % (wCamUser,wCamPassWord,wCamAddress,wCamPort,wCamMode,wCamBitRate,wCamFrameRate)
# set framerate
httpGet ("camera_control.cgi","param=6&value=%d" % (wCamFrameRate))
time.sleep(1)
#httpGet ("get_camera_params.cgi","")
# set video format
httpGet ("camera_control.cgi","param=0&value=%s" % (bitRates[wCamMode][0]))
time.sleep(1)
httpGet ("get_camera_params.cgi","")
streamingUrl="http://%s:%s/livestream.cgi?user=%s&pwd=%s&streamid=0&audio=0&filename=" % (wCamAddress,wCamPort,wCamUser,wCamPassWord)
cmd="curl -s \"%s\" | mplayer -title \"%s\" -quiet -nocache -vc ffh264 -demuxer h264es -fps %s -noextbased -" % (streamingUrl,wCamTitle,wCamFrameRate)
mplayerPid=os.fork()
print "player pid %d" % (mplayerPid)
if not mplayerPid:
os.system (cmd)
else:
time.sleep(4)
# set bitrate
httpGet ("camera_control.cgi","param=13&value=%d" % (wCamBitRate))
go()
kill_child_processes (mplayerPid)
#os.kill (mplayerPid,signal.SIGTERM)
| apache-2.0 | 4,252,622,409,302,086,000 | 29.496154 | 154 | 0.599319 | false |
JackDanger/sentry | src/sentry/models/project.py | 1 | 10877 | """
sentry.models.project
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
import six
import warnings
from bitfield import BitField
from django.conf import settings
from django.db import models
from django.db.models import F
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.app import locks
from sentry.constants import ObjectStatus
from sentry.db.models import (
BaseManager, BoundedPositiveIntegerField, FlexibleForeignKey, Model,
sane_repr
)
from sentry.db.models.utils import slugify_instance
from sentry.utils.colors import get_hashed_color
from sentry.utils.http import absolute_uri
from sentry.utils.retries import TimedRetryPolicy
# TODO(dcramer): pull in enum library
ProjectStatus = ObjectStatus
class ProjectManager(BaseManager):
# TODO(dcramer): we might want to cache this per user
def get_for_user(self, team, user, scope=None, _skip_team_check=False):
from sentry.models import Team
if not (user and user.is_authenticated()):
return []
if not _skip_team_check:
team_list = Team.objects.get_for_user(
organization=team.organization,
user=user,
scope=scope,
)
try:
team = team_list[team_list.index(team)]
except ValueError:
logging.info('User does not have access to team: %s', team.id)
return []
base_qs = self.filter(
team=team,
status=ProjectStatus.VISIBLE,
)
project_list = []
for project in base_qs:
project.team = team
project_list.append(project)
return sorted(project_list, key=lambda x: x.name.lower())
class Project(Model):
"""
Projects are permission based namespaces which generally
are the top level entry point for all data.
"""
__core__ = True
slug = models.SlugField(null=True)
name = models.CharField(max_length=200)
forced_color = models.CharField(max_length=6, null=True, blank=True)
organization = FlexibleForeignKey('sentry.Organization')
team = FlexibleForeignKey('sentry.Team')
public = models.BooleanField(default=False)
date_added = models.DateTimeField(default=timezone.now)
status = BoundedPositiveIntegerField(default=0, choices=(
(ObjectStatus.VISIBLE, _('Active')),
(ObjectStatus.PENDING_DELETION, _('Pending Deletion')),
(ObjectStatus.DELETION_IN_PROGRESS, _('Deletion in Progress')),
), db_index=True)
# projects that were created before this field was present
# will have their first_event field set to date_added
first_event = models.DateTimeField(null=True)
flags = BitField(flags=(
('has_releases', 'This Project has sent release data'),
), default=0, null=True)
objects = ProjectManager(cache_fields=[
'pk',
'slug',
])
class Meta:
app_label = 'sentry'
db_table = 'sentry_project'
unique_together = (('team', 'slug'), ('organization', 'slug'))
__repr__ = sane_repr('team_id', 'name', 'slug')
def __unicode__(self):
return u'%s (%s)' % (self.name, self.slug)
def next_short_id(self):
from sentry.models import Counter
return Counter.increment(self)
def save(self, *args, **kwargs):
if not self.slug:
lock = locks.get('slug:project', duration=5)
with TimedRetryPolicy(10)(lock.acquire):
slugify_instance(self, self.name, organization=self.organization)
super(Project, self).save(*args, **kwargs)
else:
super(Project, self).save(*args, **kwargs)
def get_absolute_url(self):
return absolute_uri('/{}/{}/'.format(self.organization.slug, self.slug))
def merge_to(self, project):
from sentry.models import (
Group, GroupTagValue, Event, TagValue
)
if not isinstance(project, Project):
project = Project.objects.get_from_cache(pk=project)
for group in Group.objects.filter(project=self):
try:
other = Group.objects.get(
project=project,
)
except Group.DoesNotExist:
group.update(project=project)
GroupTagValue.objects.filter(
project_id=self.id,
group_id=group.id,
).update(project_id=project.id)
else:
Event.objects.filter(
group_id=group.id,
).update(group_id=other.id)
for obj in GroupTagValue.objects.filter(group=group):
obj2, created = GroupTagValue.objects.get_or_create(
project_id=project.id,
group_id=group.id,
key=obj.key,
value=obj.value,
defaults={'times_seen': obj.times_seen}
)
if not created:
obj2.update(times_seen=F('times_seen') + obj.times_seen)
for fv in TagValue.objects.filter(project=self):
TagValue.objects.get_or_create(project=project, key=fv.key, value=fv.value)
fv.delete()
self.delete()
def is_internal_project(self):
for value in (settings.SENTRY_FRONTEND_PROJECT, settings.SENTRY_PROJECT):
if six.text_type(self.id) == six.text_type(value) or six.text_type(self.slug) == six.text_type(value):
return True
return False
def get_tags(self, with_internal=True):
from sentry.models import TagKey
if not hasattr(self, '_tag_cache'):
tags = self.get_option('tags', None)
if tags is None:
tags = [
t for t in TagKey.objects.all_keys(self)
if with_internal or not t.startswith('sentry:')
]
self._tag_cache = tags
return self._tag_cache
# TODO: Make these a mixin
def update_option(self, *args, **kwargs):
from sentry.models import ProjectOption
return ProjectOption.objects.set_value(self, *args, **kwargs)
def get_option(self, *args, **kwargs):
from sentry.models import ProjectOption
return ProjectOption.objects.get_value(self, *args, **kwargs)
def delete_option(self, *args, **kwargs):
from sentry.models import ProjectOption
return ProjectOption.objects.unset_value(self, *args, **kwargs)
@property
def callsign(self):
return self.slug.upper()
@property
def color(self):
if self.forced_color is not None:
return '#%s' % self.forced_color
return get_hashed_color(self.callsign or self.slug)
@property
def member_set(self):
from sentry.models import OrganizationMember
return self.organization.member_set.filter(
id__in=OrganizationMember.objects.filter(
organizationmemberteam__is_active=True,
organizationmemberteam__team=self.team,
).values('id'),
user__is_active=True,
).distinct()
def has_access(self, user, access=None):
from sentry.models import AuthIdentity, OrganizationMember
warnings.warn('Project.has_access is deprecated.', DeprecationWarning)
queryset = self.member_set.filter(user=user)
if access is not None:
queryset = queryset.filter(type__lte=access)
try:
member = queryset.get()
except OrganizationMember.DoesNotExist:
return False
try:
auth_identity = AuthIdentity.objects.get(
auth_provider__organization=self.organization_id,
user=member.user_id,
)
except AuthIdentity.DoesNotExist:
return True
return auth_identity.is_valid(member)
def get_audit_log_data(self):
return {
'id': self.id,
'slug': self.slug,
'name': self.name,
'status': self.status,
'public': self.public,
}
def get_full_name(self):
if self.team.name not in self.name:
return '%s %s' % (self.team.name, self.name)
return self.name
def get_notification_recipients(self, user_option):
from sentry.models import UserOption
alert_settings = dict(
(o.user_id, int(o.value))
for o in UserOption.objects.filter(
project=self,
key=user_option,
)
)
disabled = set(u for u, v in six.iteritems(alert_settings) if v == 0)
member_set = set(self.member_set.exclude(
user__in=disabled,
).values_list('user', flat=True))
# determine members default settings
members_to_check = set(u for u in member_set if u not in alert_settings)
if members_to_check:
disabled = set((
uo.user_id for uo in UserOption.objects.filter(
key='subscribe_by_default',
user__in=members_to_check,
)
if uo.value == '0'
))
member_set = [x for x in member_set if x not in disabled]
return member_set
def get_mail_alert_subscribers(self):
user_ids = self.get_notification_recipients('mail:alert')
if not user_ids:
return []
from sentry.models import User
return list(User.objects.filter(id__in=user_ids))
def is_user_subscribed_to_mail_alerts(self, user):
from sentry.models import UserOption
is_enabled = UserOption.objects.get_value(
user,
'mail:alert',
project=self
)
if is_enabled is None:
is_enabled = UserOption.objects.get_value(
user,
'subscribe_by_default',
'1'
) == '1'
else:
is_enabled = bool(is_enabled)
return is_enabled
def is_user_subscribed_to_workflow(self, user):
from sentry.models import UserOption, UserOptionValue
opt_value = UserOption.objects.get_value(
user,
'workflow:notifications',
project=self
)
if opt_value is None:
opt_value = UserOption.objects.get_value(
user,
'workflow:notifications',
UserOptionValue.all_conversations
)
return opt_value == UserOptionValue.all_conversations
| bsd-3-clause | -3,192,964,341,637,174,300 | 31.861027 | 114 | 0.583249 | false |
ldoktor/virt-test | virttest/utils_test.py | 1 | 61357 | """
High-level KVM test utility functions.
This module is meant to reduce code size by performing common test procedures.
Generally, code here should look like test code.
More specifically:
- Functions in this module should raise exceptions if things go wrong
(unlike functions in kvm_utils.py and kvm_vm.py which report failure via
their returned values).
- Functions in this module may use logging.info(), in addition to
logging.debug() and logging.error(), to log messages the user may be
interested in (unlike kvm_utils.py and kvm_vm.py which use
logging.debug() for anything that isn't an error).
- Functions in this module typically use functions and classes from
lower-level modules (e.g. kvm_utils.py, kvm_vm.py, kvm_subprocess.py).
- Functions in this module should not be used by lower-level modules.
- Functions in this module should be used in the right context.
For example, a function should not be used where it may display
misleading or inaccurate info or debug messages.
@copyright: 2008-2009 Red Hat Inc.
"""
import time, os, logging, re, signal, imp, tempfile, commands
import threading, shelve
from Queue import Queue
from autotest.client.shared import error, global_config
from autotest.client import utils
from autotest.client.tools import scan_results
from autotest.client.shared.syncdata import SyncData, SyncListenServer
import aexpect, utils_misc, virt_vm, remote, storage, env_process
GLOBAL_CONFIG = global_config.global_config
def get_living_vm(env, vm_name):
"""
Get a VM object from the environment and make sure it's alive.
@param env: Dictionary with test environment.
@param vm_name: Name of the desired VM object.
@return: A VM object.
"""
vm = env.get_vm(vm_name)
if not vm:
raise error.TestError("VM '%s' not found in environment" % vm_name)
if not vm.is_alive():
raise error.TestError("VM '%s' seems to be dead; test requires a "
"living VM" % vm_name)
return vm
def wait_for_login(vm, nic_index=0, timeout=240, start=0, step=2, serial=None):
"""
Try logging into a VM repeatedly. Stop on success or when timeout expires.
@param vm: VM object.
@param nic_index: Index of NIC to access in the VM.
@param timeout: Time to wait before giving up.
@param serial: Whether to use a serial connection instead of a remote
(ssh, rss) one.
@return: A shell session object.
"""
end_time = time.time() + timeout
session = None
if serial:
mode = 'serial'
logging.info("Trying to log into guest %s using serial connection,"
" timeout %ds", vm.name, timeout)
time.sleep(start)
while time.time() < end_time:
try:
session = vm.serial_login()
break
except remote.LoginError, e:
logging.debug(e)
time.sleep(step)
else:
mode = 'remote'
logging.info("Trying to log into guest %s using remote connection,"
" timeout %ds", vm.name, timeout)
time.sleep(start)
while time.time() < end_time:
try:
session = vm.login(nic_index=nic_index)
break
except (remote.LoginError, virt_vm.VMError), e:
logging.debug(e)
time.sleep(step)
if not session:
raise error.TestFail("Could not log into guest %s using %s connection" %
(vm.name, mode))
logging.info("Logged into guest %s using %s connection", vm.name, mode)
return session
def reboot(vm, session, method="shell", sleep_before_reset=10, nic_index=0,
timeout=240):
"""
Reboot the VM and wait for it to come back up by trying to log in until
timeout expires.
@param vm: VM object.
@param session: A shell session object.
@param method: Reboot method. Can be "shell" (send a shell reboot
command) or "system_reset" (send a system_reset monitor command).
@param nic_index: Index of NIC to access in the VM, when logging in after
rebooting.
@param timeout: Time to wait before giving up (after rebooting).
@return: A new shell session object.
"""
if method == "shell":
# Send a reboot command to the guest's shell
session.sendline(vm.get_params().get("reboot_command"))
logging.info("Reboot command sent. Waiting for guest to go down")
elif method == "system_reset":
# Sleep for a while before sending the command
time.sleep(sleep_before_reset)
# Clear the event list of all QMP monitors
monitors = [m for m in vm.monitors if m.protocol == "qmp"]
for m in monitors:
m.clear_events()
# Send a system_reset monitor command
vm.monitor.cmd("system_reset")
logging.info("Monitor command system_reset sent. Waiting for guest to "
"go down")
# Look for RESET QMP events
time.sleep(1)
for m in monitors:
if not m.get_event("RESET"):
raise error.TestFail("RESET QMP event not received after "
"system_reset (monitor '%s')" % m.name)
else:
logging.info("RESET QMP event received")
else:
logging.error("Unknown reboot method: %s", method)
# Wait for the session to become unresponsive and close it
if not utils_misc.wait_for(lambda: not session.is_responsive(timeout=30),
120, 0, 1):
raise error.TestFail("Guest refuses to go down")
session.close()
# Try logging into the guest until timeout expires
logging.info("Guest is down. Waiting for it to go up again, timeout %ds",
timeout)
session = vm.wait_for_login(nic_index, timeout=timeout)
logging.info("Guest is up again")
return session
@error.context_aware
def update_boot_option(vm, args_removed=None, args_added=None,
need_reboot=True):
"""
Update guest default kernel option.
@param vm: The VM object.
@param args_removed: Kernel options want to remove.
@param args_added: Kernel options want to add.
@param need_reboot: Whether need reboot VM or not.
@raise error.TestError: Raised if fail to update guest kernel cmdlie.
"""
if vm.params.get("os_type") == 'windows':
# this function is only for linux, if we need to change
# windows guest's boot option, we can use a function like:
# update_win_bootloader(args_removed, args_added, reboot)
# (this function is not implement.)
# here we just:
return
login_timeout = int(vm.params.get("login_timeout"))
session = vm.wait_for_login(timeout=login_timeout)
msg = "Update guest kernel cmdline. "
cmd = "grubby --update-kernel=`grubby --default-kernel` "
if args_removed is not None:
msg += " remove args: %s." % args_removed
cmd += '--remove-args="%s." ' % args_removed
if args_added is not None:
msg += " add args: %s" % args_added
cmd += '--args="%s"' % args_added
error.context(msg, logging.info)
s, o = session.cmd_status_output(cmd)
if s != 0:
logging.error(o)
raise error.TestError("Fail to modify guest kernel cmdline")
if need_reboot:
error.context("Rebooting guest ...", logging.info)
vm.reboot(session=session, timeout=login_timeout)
def migrate(vm, env=None, mig_timeout=3600, mig_protocol="tcp",
mig_cancel=False, offline=False, stable_check=False,
clean=False, save_path=None, dest_host='localhost', mig_port=None):
"""
Migrate a VM locally and re-register it in the environment.
@param vm: The VM to migrate.
@param env: The environment dictionary. If omitted, the migrated VM will
not be registered.
@param mig_timeout: timeout value for migration.
@param mig_protocol: migration protocol
@param mig_cancel: Test migrate_cancel or not when protocol is tcp.
@param dest_host: Destination host (defaults to 'localhost').
@param mig_port: Port that will be used for migration.
@return: The post-migration VM, in case of same host migration, True in
case of multi-host migration.
"""
def mig_finished():
o = vm.monitor.info("migrate")
if isinstance(o, str):
return "status: active" not in o
else:
return o.get("status") != "active"
def mig_succeeded():
o = vm.monitor.info("migrate")
if isinstance(o, str):
return "status: completed" in o
else:
return o.get("status") == "completed"
def mig_failed():
o = vm.monitor.info("migrate")
if isinstance(o, str):
return "status: failed" in o
else:
return o.get("status") == "failed"
def mig_cancelled():
o = vm.monitor.info("migrate")
if isinstance(o, str):
return ("Migration status: cancelled" in o or
"Migration status: canceled" in o)
else:
return (o.get("status") == "cancelled" or
o.get("status") == "canceled")
def wait_for_migration():
if not utils_misc.wait_for(mig_finished, mig_timeout, 2, 2,
"Waiting for migration to finish"):
raise error.TestFail("Timeout expired while waiting for migration "
"to finish")
if dest_host == 'localhost':
dest_vm = vm.clone()
if (dest_host == 'localhost') and stable_check:
# Pause the dest vm after creation
dest_vm.params['extra_params'] = (dest_vm.params.get('extra_params','')
+ ' -S')
if dest_host == 'localhost':
dest_vm.create(migration_mode=mig_protocol, mac_source=vm)
try:
try:
if mig_protocol == "tcp":
if dest_host == 'localhost':
uri = "tcp:0:%d" % dest_vm.migration_port
else:
uri = 'tcp:%s:%d' % (dest_host, mig_port)
elif mig_protocol == "unix":
uri = "unix:%s" % dest_vm.migration_file
elif mig_protocol == "exec":
uri = '"exec:nc localhost %s"' % dest_vm.migration_port
if offline:
vm.pause()
vm.monitor.migrate(uri)
if mig_cancel:
time.sleep(2)
vm.monitor.cmd("migrate_cancel")
if not utils_misc.wait_for(mig_cancelled, 60, 2, 2,
"Waiting for migration "
"cancellation"):
raise error.TestFail("Failed to cancel migration")
if offline:
vm.resume()
if dest_host == 'localhost':
dest_vm.destroy(gracefully=False)
return vm
else:
wait_for_migration()
if (dest_host == 'localhost') and stable_check:
save_path = None or "/tmp"
save1 = os.path.join(save_path, "src")
save2 = os.path.join(save_path, "dst")
vm.save_to_file(save1)
dest_vm.save_to_file(save2)
# Fail if we see deltas
md5_save1 = utils.hash_file(save1)
md5_save2 = utils.hash_file(save2)
if md5_save1 != md5_save2:
raise error.TestFail("Mismatch of VM state before "
"and after migration")
if (dest_host == 'localhost') and offline:
dest_vm.resume()
except Exception:
if dest_host == 'localhost':
dest_vm.destroy()
raise
finally:
if (dest_host == 'localhost') and stable_check and clean:
logging.debug("Cleaning the state files")
if os.path.isfile(save1):
os.remove(save1)
if os.path.isfile(save2):
os.remove(save2)
# Report migration status
if mig_succeeded():
logging.info("Migration finished successfully")
elif mig_failed():
raise error.TestFail("Migration failed")
else:
raise error.TestFail("Migration ended with unknown status")
if dest_host == 'localhost':
if dest_vm.monitor.verify_status("paused"):
logging.debug("Destination VM is paused, resuming it")
dest_vm.resume()
# Kill the source VM
vm.destroy(gracefully=False)
# Replace the source VM with the new cloned VM
if (dest_host == 'localhost') and (env is not None):
env.register_vm(vm.name, dest_vm)
# Return the new cloned VM
if dest_host == 'localhost':
return dest_vm
else:
return vm
def guest_active(vm):
o = vm.monitor.info("status")
if isinstance(o, str):
return "status: running" in o
else:
if "status" in o:
return o.get("status") == "running"
else:
return o.get("running")
class MigrationData(object):
def __init__(self, params, srchost, dsthost, vms_name, params_append):
"""
Class that contains data needed for one migration.
"""
self.params = params.copy()
self.params.update(params_append)
self.source = False
if params.get("hostid") == srchost:
self.source = True
self.destination = False
if params.get("hostid") == dsthost:
self.destination = True
self.src = srchost
self.dst = dsthost
self.hosts = [srchost, dsthost]
self.mig_id = {'src': srchost, 'dst': dsthost, "vms": vms_name}
self.vms_name = vms_name
self.vms = []
self.vm_ports = None
def is_src(self):
"""
@return: True if host is source.
"""
return self.source
def is_dst(self):
"""
@return: True if host is destination.
"""
return self.destination
class MultihostMigration(object):
"""
Class that provides a framework for multi-host migration.
Migration can be run both synchronously and asynchronously.
To specify what is going to happen during the multi-host
migration, it is necessary to reimplement the method
migration_scenario. It is possible to start multiple migrations
in separate threads, since self.migrate is thread safe.
Only one test using multihost migration framework should be
started on one machine otherwise it is necessary to solve the
problem with listen server port.
Multihost migration starts SyncListenServer through which
all messages are transfered, since the multiple hosts can
be in diferent states.
Class SyncData is used to transfer data over network or
synchronize the migration process. Synchronization sessions
are recognized by session_id.
It is important to note that, in order to have multi-host
migration, one needs shared guest image storage. The simplest
case is when the guest images are on an NFS server.
Example:
class TestMultihostMigration(utils_misc.MultihostMigration):
def __init__(self, test, params, env):
super(testMultihostMigration, self).__init__(test, params, env)
def migration_scenario(self):
srchost = self.params.get("hosts")[0]
dsthost = self.params.get("hosts")[1]
def worker(mig_data):
vm = env.get_vm("vm1")
session = vm.wait_for_login(timeout=self.login_timeout)
session.sendline("nohup dd if=/dev/zero of=/dev/null &")
session.cmd("killall -0 dd")
def check_worker(mig_data):
vm = env.get_vm("vm1")
session = vm.wait_for_login(timeout=self.login_timeout)
session.cmd("killall -9 dd")
# Almost synchronized migration, waiting to end it.
# Work is started only on first VM.
self.migrate_wait(["vm1", "vm2"], srchost, dsthost,
worker, check_worker)
# Migration started in different threads.
# It allows to start multiple migrations simultaneously.
mig1 = self.migrate(["vm1"], srchost, dsthost,
worker, check_worker)
mig2 = self.migrate(["vm2"], srchost, dsthost)
mig2.join()
mig1.join()
mig = TestMultihostMigration(test, params, env)
mig.run()
"""
def __init__(self, test, params, env, preprocess_env=True):
self.test = test
self.params = params
self.env = env
self.hosts = params.get("hosts")
self.hostid = params.get('hostid', "")
self.comm_port = int(params.get("comm_port", 13234))
vms_count = len(params["vms"].split())
self.login_timeout = int(params.get("login_timeout", 360))
self.disk_prepare_timeout = int(params.get("disk_prepare_timeout",
160 * vms_count))
self.finish_timeout = int(params.get("finish_timeout",
120 * vms_count))
self.new_params = None
if params.get("clone_master") == "yes":
self.clone_master = True
else:
self.clone_master = False
self.mig_timeout = int(params.get("mig_timeout"))
# Port used to communicate info between source and destination
self.regain_ip_cmd = params.get("regain_ip_cmd", "dhclient")
self.vm_lock = threading.Lock()
self.sync_server = None
if self.clone_master:
self.sync_server = SyncListenServer()
if preprocess_env:
self.preprocess_env()
self._hosts_barrier(self.hosts, self.hosts, 'disk_prepared',
self.disk_prepare_timeout)
def migration_scenario(self):
"""
Multi Host migration_scenario is started from method run where the
exceptions are checked. It is not necessary to take care of
cleaning up after test crash or finish.
"""
raise NotImplementedError
def migrate_vms_src(self, mig_data):
"""
Migrate vms source.
@param mig_Data: Data for migration.
For change way how machine migrates is necessary
re implement this method.
"""
def mig_wrapper(vm, dsthost, vm_ports):
vm.migrate(dest_host=dsthost, remote_port=vm_ports[vm.name])
logging.info("Start migrating now...")
multi_mig = []
for vm in mig_data.vms:
multi_mig.append((mig_wrapper, (vm, mig_data.dst,
mig_data.vm_ports)))
utils_misc.parallel(multi_mig)
def migrate_vms_dest(self, mig_data):
"""
Migrate vms destination. This function is started on dest host during
migration.
@param mig_Data: Data for migration.
"""
pass
def __del__(self):
if self.sync_server:
self.sync_server.close()
def master_id(self):
return self.hosts[0]
def _hosts_barrier(self, hosts, session_id, tag, timeout):
logging.debug("Barrier timeout: %d tags: %s" % (timeout, tag))
tags = SyncData(self.master_id(), self.hostid, hosts,
"%s,%s,barrier" % (str(session_id), tag),
self.sync_server).sync(tag, timeout)
logging.debug("Barrier tag %s" % (tags))
def preprocess_env(self):
"""
Prepare env to start vms.
"""
storage.preprocess_images(self.test.bindir, self.params, self.env)
def _check_vms_source(self, mig_data):
for vm in mig_data.vms:
vm.wait_for_login(timeout=self.login_timeout)
sync = SyncData(self.master_id(), self.hostid, mig_data.hosts,
mig_data.mig_id, self.sync_server)
mig_data.vm_ports = sync.sync(timeout=120)[mig_data.dst]
logging.info("Received from destination the migration port %s",
str(mig_data.vm_ports))
def _check_vms_dest(self, mig_data):
mig_data.vm_ports = {}
for vm in mig_data.vms:
logging.info("Communicating to source migration port %s",
vm.migration_port)
mig_data.vm_ports[vm.name] = vm.migration_port
SyncData(self.master_id(), self.hostid,
mig_data.hosts, mig_data.mig_id,
self.sync_server).sync(mig_data.vm_ports, timeout=120)
def _prepare_params(self, mig_data):
"""
Prepare separate params for vm migration.
@param vms_name: List of vms.
"""
new_params = mig_data.params.copy()
new_params["vms"] = " ".join(mig_data.vms_name)
return new_params
def _check_vms(self, mig_data):
"""
Check if vms are started correctly.
@param vms: list of vms.
@param source: Must be True if is source machine.
"""
logging.info("Try check vms %s" % (mig_data.vms_name))
for vm in mig_data.vms_name:
if not self.env.get_vm(vm) in mig_data.vms:
mig_data.vms.append(self.env.get_vm(vm))
for vm in mig_data.vms:
logging.info("Check vm %s on host %s" % (vm.name, self.hostid))
vm.verify_alive()
if mig_data.is_src():
self._check_vms_source(mig_data)
else:
self._check_vms_dest(mig_data)
def prepare_for_migration(self, mig_data, migration_mode):
"""
Prepare destination of migration for migration.
@param mig_data: Class with data necessary for migration.
@param migration_mode: Migration mode for prepare machine.
"""
new_params = self._prepare_params(mig_data)
new_params['migration_mode'] = migration_mode
new_params['start_vm'] = 'yes'
self.vm_lock.acquire()
env_process.process(self.test, new_params, self.env,
env_process.preprocess_image,
env_process.preprocess_vm)
self.vm_lock.release()
self._check_vms(mig_data)
def migrate_vms(self, mig_data):
"""
Migrate vms.
"""
if mig_data.is_src():
self.migrate_vms_src(mig_data)
else:
self.migrate_vms_dest(mig_data)
def check_vms(self, mig_data):
"""
Check vms after migrate.
@param mig_data: object with migration data.
"""
for vm in mig_data.vms:
if not guest_active(vm):
raise error.TestFail("Guest not active after migration")
logging.info("Migrated guest appears to be running")
logging.info("Logging into migrated guest after migration...")
for vm in mig_data.vms:
session_serial = vm.wait_for_serial_login(timeout=
self.login_timeout)
#There is sometime happen that system sends some message on
#serial console and IP renew command block test. Because
#there must be added "sleep" in IP renew command.
session_serial.cmd(self.regain_ip_cmd)
vm.wait_for_login(timeout=self.login_timeout)
def postprocess_env(self):
"""
Kill vms and delete cloned images.
"""
storage.postprocess_images(self.test.bindir, self.params)
def migrate(self, vms_name, srchost, dsthost, start_work=None,
check_work=None, mig_mode="tcp", params_append=None):
"""
Migrate machine from srchost to dsthost. It executes start_work on
source machine before migration and executes check_work on dsthost
after migration.
Migration execution progress:
source host | dest host
--------------------------------------------------------
prepare guest on both sides of migration
- start machine and check if machine works
- synchronize transfer data needed for migration
--------------------------------------------------------
start work on source guests | wait for migration
--------------------------------------------------------
migrate guest to dest host.
wait on finish migration synchronization
--------------------------------------------------------
| check work on vms
--------------------------------------------------------
wait for sync on finish migration
@param vms_name: List of vms.
@param srchost: src host id.
@param dsthost: dst host id.
@param start_work: Function started before migration.
@param check_work: Function started after migration.
@param mig_mode: Migration mode.
@param params_append: Append params to self.params only for migration.
"""
def migrate_wrap(vms_name, srchost, dsthost, start_work=None,
check_work=None, params_append=None):
logging.info("Starting migrate vms %s from host %s to %s" %
(vms_name, srchost, dsthost))
error = None
mig_data = MigrationData(self.params, srchost, dsthost,
vms_name, params_append)
try:
try:
if mig_data.is_src():
self.prepare_for_migration(mig_data, None)
elif self.hostid == dsthost:
self.prepare_for_migration(mig_data, mig_mode)
else:
return
if mig_data.is_src():
if start_work:
start_work(mig_data)
self.migrate_vms(mig_data)
timeout = 30
if not mig_data.is_src():
timeout = self.mig_timeout
self._hosts_barrier(mig_data.hosts, mig_data.mig_id,
'mig_finished', timeout)
if mig_data.is_dst():
self.check_vms(mig_data)
if check_work:
check_work(mig_data)
except:
error = True
raise
finally:
if not error:
self._hosts_barrier(self.hosts,
mig_data.mig_id,
'test_finihed',
self.finish_timeout)
def wait_wrap(vms_name, srchost, dsthost):
mig_data = MigrationData(self.params, srchost, dsthost, vms_name,
None)
timeout = (self.login_timeout + self.mig_timeout +
self.finish_timeout)
self._hosts_barrier(self.hosts, mig_data.mig_id,
'test_finihed', timeout)
if (self.hostid in [srchost, dsthost]):
mig_thread = utils.InterruptedThread(migrate_wrap, (vms_name,
srchost,
dsthost,
start_work,
check_work,
params_append))
else:
mig_thread = utils.InterruptedThread(wait_wrap, (vms_name,
srchost,
dsthost))
mig_thread.start()
return mig_thread
def migrate_wait(self, vms_name, srchost, dsthost, start_work=None,
check_work=None, mig_mode="tcp", params_append=None):
"""
Migrate machine from srchost to dsthost and wait for finish.
It executes start_work on source machine before migration and executes
check_work on dsthost after migration.
@param vms_name: List of vms.
@param srchost: src host id.
@param dsthost: dst host id.
@param start_work: Function which is started before migration.
@param check_work: Function which is started after
done of migration.
"""
self.migrate(vms_name, srchost, dsthost, start_work, check_work,
mig_mode, params_append).join()
def cleanup(self):
"""
Cleanup env after test.
"""
if self.clone_master:
self.sync_server.close()
self.postprocess_env()
def run(self):
"""
Start multihost migration scenario.
After scenario is finished or if scenario crashed it calls postprocess
machines and cleanup env.
"""
try:
self.migration_scenario()
self._hosts_barrier(self.hosts, self.hosts, 'all_test_finihed',
self.finish_timeout)
finally:
self.cleanup()
def stop_windows_service(session, service, timeout=120):
"""
Stop a Windows service using sc.
If the service is already stopped or is not installed, do nothing.
@param service: The name of the service
@param timeout: Time duration to wait for service to stop
@raise error.TestError: Raised if the service can't be stopped
"""
end_time = time.time() + timeout
while time.time() < end_time:
o = session.cmd_output("sc stop %s" % service, timeout=60)
# FAILED 1060 means the service isn't installed.
# FAILED 1062 means the service hasn't been started.
if re.search(r"\bFAILED (1060|1062)\b", o, re.I):
break
time.sleep(1)
else:
raise error.TestError("Could not stop service '%s'" % service)
def start_windows_service(session, service, timeout=120):
"""
Start a Windows service using sc.
If the service is already running, do nothing.
If the service isn't installed, fail.
@param service: The name of the service
@param timeout: Time duration to wait for service to start
@raise error.TestError: Raised if the service can't be started
"""
end_time = time.time() + timeout
while time.time() < end_time:
o = session.cmd_output("sc start %s" % service, timeout=60)
# FAILED 1060 means the service isn't installed.
if re.search(r"\bFAILED 1060\b", o, re.I):
raise error.TestError("Could not start service '%s' "
"(service not installed)" % service)
# FAILED 1056 means the service is already running.
if re.search(r"\bFAILED 1056\b", o, re.I):
break
time.sleep(1)
else:
raise error.TestError("Could not start service '%s'" % service)
def get_time(session, time_command, time_filter_re, time_format):
"""
Return the host time and guest time. If the guest time cannot be fetched
a TestError exception is raised.
Note that the shell session should be ready to receive commands
(i.e. should "display" a command prompt and should be done with all
previous commands).
@param session: A shell session.
@param time_command: Command to issue to get the current guest time.
@param time_filter_re: Regex filter to apply on the output of
time_command in order to get the current time.
@param time_format: Format string to pass to time.strptime() with the
result of the regex filter.
@return: A tuple containing the host time and guest time.
"""
if len(re.findall("ntpdate|w32tm", time_command)) == 0:
host_time = time.time()
s = session.cmd_output(time_command)
try:
s = re.findall(time_filter_re, s)[0]
except IndexError:
logging.debug("The time string from guest is:\n%s", s)
raise error.TestError("The time string from guest is unexpected.")
except Exception, e:
logging.debug("(time_filter_re, time_string): (%s, %s)",
time_filter_re, s)
raise e
guest_time = time.mktime(time.strptime(s, time_format))
else:
o = session.cmd(time_command)
if re.match('ntpdate', time_command):
offset = re.findall('offset (.*) sec', o)[0]
host_main, host_mantissa = re.findall(time_filter_re, o)[0]
host_time = (time.mktime(time.strptime(host_main, time_format)) +
float("0.%s" % host_mantissa))
guest_time = host_time - float(offset)
else:
guest_time = re.findall(time_filter_re, o)[0]
offset = re.findall("o:(.*)s", o)[0]
if re.match('PM', guest_time):
hour = re.findall('\d+ (\d+):', guest_time)[0]
hour = str(int(hour) + 12)
guest_time = re.sub('\d+\s\d+:', "\d+\s%s:" % hour,
guest_time)[:-3]
else:
guest_time = guest_time[:-3]
guest_time = time.mktime(time.strptime(guest_time, time_format))
host_time = guest_time + float(offset)
return (host_time, guest_time)
def get_memory_info(lvms):
"""
Get memory information from host and guests in format:
Host: memfree = XXXM; Guests memsh = {XXX,XXX,...}
@params lvms: List of VM objects
@return: String with memory info report
"""
if not isinstance(lvms, list):
raise error.TestError("Invalid list passed to get_stat: %s " % lvms)
try:
meminfo = "Host: memfree = "
meminfo += str(int(utils.freememtotal()) / 1024) + "M; "
meminfo += "swapfree = "
mf = int(utils.read_from_meminfo("SwapFree")) / 1024
meminfo += str(mf) + "M; "
except Exception, e:
raise error.TestFail("Could not fetch host free memory info, "
"reason: %s" % e)
meminfo += "Guests memsh = {"
for vm in lvms:
shm = vm.get_shared_meminfo()
if shm is None:
raise error.TestError("Could not get shared meminfo from "
"VM %s" % vm)
meminfo += "%dM; " % shm
meminfo = meminfo[0:-2] + "}"
return meminfo
def domstat_cgroup_cpuacct_percpu(domain, qemu_path="/libvirt/qemu/"):
"""
Get a list of domain-specific per CPU stats from cgroup cpuacct controller.
@param domain: Domain name
@param qemu_path: Default: "/libvirt/qemu/".
Please refer OS doc to pass the correct qemu path.
$CGRP_MNTPT/cpuacct/<$qemu_path>/<domain>..
"""
percpu_act_file = (utils.get_cgroup_mountpoint("cpuacct") + qemu_path +
domain + "/cpuacct.usage_percpu")
try:
f_percpu_act = open(percpu_act_file, "rU")
cpuacct_usage_percpu = f_percpu_act.readline().split()
f_percpu_act.close()
return cpuacct_usage_percpu
except IOError:
raise error.TestError("Failed to get per cpu stat from %s" %
percpu_act_file)
def run_file_transfer(test, params, env):
"""
Transfer a file back and forth between host and guest.
1) Boot up a VM.
2) Create a large file by dd on host.
3) Copy this file from host to guest.
4) Copy this file from guest to host.
5) Check if file transfers ended good.
@param test: KVM test object.
@param params: Dictionary with the test parameters.
@param env: Dictionary with test environment.
"""
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
login_timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=login_timeout)
dir_name = test.tmpdir
transfer_timeout = int(params.get("transfer_timeout"))
transfer_type = params.get("transfer_type")
tmp_dir = params.get("tmp_dir", "/tmp/")
clean_cmd = params.get("clean_cmd", "rm -f")
filesize = int(params.get("filesize", 4000))
count = int(filesize / 10)
if count == 0:
count = 1
host_path = os.path.join(dir_name, "tmp-%s" %
utils_misc.generate_random_string(8))
host_path2 = host_path + ".2"
cmd = "dd if=/dev/zero of=%s bs=10M count=%d" % (host_path, count)
guest_path = (tmp_dir + "file_transfer-%s" %
utils_misc.generate_random_string(8))
try:
logging.info("Creating %dMB file on host", filesize)
utils.run(cmd)
if transfer_type == "remote":
logging.info("Transfering file host -> guest, timeout: %ss",
transfer_timeout)
t_begin = time.time()
vm.copy_files_to(host_path, guest_path, timeout=transfer_timeout)
t_end = time.time()
throughput = filesize / (t_end - t_begin)
logging.info("File transfer host -> guest succeed, "
"estimated throughput: %.2fMB/s", throughput)
logging.info("Transfering file guest -> host, timeout: %ss",
transfer_timeout)
t_begin = time.time()
vm.copy_files_from(guest_path, host_path2, timeout=transfer_timeout)
t_end = time.time()
throughput = filesize / (t_end - t_begin)
logging.info("File transfer guest -> host succeed, "
"estimated throughput: %.2fMB/s", throughput)
else:
raise error.TestError("Unknown test file transfer mode %s" %
transfer_type)
if (utils.hash_file(host_path, method="md5") !=
utils.hash_file(host_path2, method="md5")):
raise error.TestFail("File changed after transfer host -> guest "
"and guest -> host")
finally:
logging.info('Cleaning temp file on guest')
session.cmd("%s %s" % (clean_cmd, guest_path))
logging.info('Cleaning temp files on host')
try:
os.remove(host_path)
os.remove(host_path2)
except OSError:
pass
session.close()
def run_autotest(vm, session, control_path, timeout, outputdir, params):
"""
Run an autotest control file inside a guest (linux only utility).
@param vm: VM object.
@param session: A shell session on the VM provided.
@param control_path: A path to an autotest control file.
@param timeout: Timeout under which the autotest control file must complete.
@param outputdir: Path on host where we should copy the guest autotest
results to.
The following params is used by the migration
@param params: Test params used in the migration test
"""
def copy_if_hash_differs(vm, local_path, remote_path):
"""
Copy a file to a guest if it doesn't exist or if its MD5sum differs.
@param vm: VM object.
@param local_path: Local path.
@param remote_path: Remote path.
@return: Whether the hash differs (True) or not (False).
"""
hash_differs = False
local_hash = utils.hash_file(local_path)
basename = os.path.basename(local_path)
output = session.cmd_output("md5sum %s" % remote_path)
if "such file" in output:
remote_hash = "0"
elif output:
remote_hash = output.split()[0]
else:
logging.warning("MD5 check for remote path %s did not return.",
remote_path)
# Let's be a little more lenient here and see if it wasn't a
# temporary problem
remote_hash = "0"
if remote_hash != local_hash:
hash_differs = True
logging.debug("Copying %s to guest "
"(remote hash: %s, local hash:%s)",
basename, remote_hash, local_hash)
vm.copy_files_to(local_path, remote_path)
return hash_differs
def extract(vm, remote_path, dest_dir):
"""
Extract the autotest .tar.bz2 file on the guest, ensuring the final
destination path will be dest_dir.
@param vm: VM object
@param remote_path: Remote file path
@param dest_dir: Destination dir for the contents
"""
basename = os.path.basename(remote_path)
logging.debug("Extracting %s on VM %s", basename, vm.name)
session.cmd("rm -rf %s" % dest_dir)
dirname = os.path.dirname(remote_path)
session.cmd("cd %s" % dirname)
session.cmd("mkdir -p %s" % os.path.dirname(dest_dir))
e_cmd = "tar xjvf %s -C %s" % (basename, os.path.dirname(dest_dir))
output = session.cmd(e_cmd, timeout=120)
autotest_dirname = ""
for line in output.splitlines():
autotest_dirname = line.split("/")[0]
break
if autotest_dirname != os.path.basename(dest_dir):
session.cmd("cd %s" % os.path.dirname(dest_dir))
session.cmd("mv %s %s" %
(autotest_dirname, os.path.basename(dest_dir)))
def get_results(guest_autotest_path):
"""
Copy autotest results present on the guest back to the host.
"""
logging.debug("Trying to copy autotest results from guest")
guest_results_dir = os.path.join(outputdir, "guest_autotest_results")
if not os.path.exists(guest_results_dir):
os.mkdir(guest_results_dir)
vm.copy_files_from("%s/results/default/*" % guest_autotest_path,
guest_results_dir)
def get_results_summary():
"""
Get the status of the tests that were executed on the guest.
NOTE: This function depends on the results copied to host by
get_results() function, so call get_results() first.
"""
status_path = os.path.join(outputdir,
"guest_autotest_results/*/status")
try:
output = utils.system_output("cat %s" % status_path)
except error.CmdError, e:
logging.error("Error getting guest autotest status file: %s", e)
return None
try:
results = scan_results.parse_results(output)
# Report test results
logging.info("Results (test, status, duration, info):")
for result in results:
logging.info("\t %s", str(result))
return results
except Exception, e:
logging.error("Error processing guest autotest results: %s", e)
return None
if not os.path.isfile(control_path):
raise error.TestError("Invalid path to autotest control file: %s" %
control_path)
migrate_background = params.get("migrate_background") == "yes"
if migrate_background:
mig_timeout = float(params.get("mig_timeout", "3600"))
mig_protocol = params.get("migration_protocol", "tcp")
compressed_autotest_path = "/tmp/autotest.tar.bz2"
destination_autotest_path = GLOBAL_CONFIG.get_config_value('COMMON',
'autotest_top_path')
# To avoid problems, let's make the test use the current AUTODIR
# (autotest client path) location
try:
autotest_path = os.environ['AUTODIR']
except KeyError:
autotest_path = os.environ['AUTOTEST_PATH']
autotest_path = os.path.join(autotest_path, 'client')
autotest_basename = os.path.basename(autotest_path)
autotest_parentdir = os.path.dirname(autotest_path)
# tar the contents of bindir/autotest
cmd = ("cd %s; tar cvjf %s %s/*" %
(autotest_parentdir, compressed_autotest_path, autotest_basename))
# Until we have nested virtualization, we don't need the kvm test :)
cmd += " --exclude=%s/tests/kvm" % autotest_basename
cmd += " --exclude=%s/results" % autotest_basename
cmd += " --exclude=%s/tmp" % autotest_basename
cmd += " --exclude=%s/control*" % autotest_basename
cmd += " --exclude=*.pyc"
cmd += " --exclude=*.svn"
cmd += " --exclude=*.git"
utils.run(cmd)
# Copy autotest.tar.bz2
update = copy_if_hash_differs(vm, compressed_autotest_path,
compressed_autotest_path)
# Extract autotest.tar.bz2
if update:
extract(vm, compressed_autotest_path, destination_autotest_path)
g_fd, g_path = tempfile.mkstemp(dir='/tmp/')
aux_file = os.fdopen(g_fd, 'w')
config = GLOBAL_CONFIG.get_section_values(('CLIENT', 'COMMON'))
config.write(aux_file)
aux_file.close()
global_config_guest = os.path.join(destination_autotest_path,
'global_config.ini')
vm.copy_files_to(g_path, global_config_guest)
os.unlink(g_path)
vm.copy_files_to(control_path,
os.path.join(destination_autotest_path, 'control'))
# Run the test
logging.info("Running autotest control file %s on guest, timeout %ss",
os.path.basename(control_path), timeout)
session.cmd("cd %s" % destination_autotest_path)
try:
session.cmd("rm -f control.state")
session.cmd("rm -rf results/*")
session.cmd("rm -rf tmp/*")
except aexpect.ShellError:
pass
try:
bg = None
try:
logging.info("---------------- Test output ----------------")
if migrate_background:
mig_timeout = float(params.get("mig_timeout", "3600"))
mig_protocol = params.get("migration_protocol", "tcp")
bg = utils.InterruptedThread(session.cmd_output,
kwargs={'cmd': "./autotest control",
'timeout': timeout,
'print_func': logging.info})
bg.start()
while bg.isAlive():
logging.info("Autotest job did not end, start a round of "
"migration")
vm.migrate(timeout=mig_timeout, protocol=mig_protocol)
else:
session.cmd_output("./autotest control", timeout=timeout,
print_func=logging.info)
finally:
logging.info("------------- End of test output ------------")
if migrate_background and bg:
bg.join()
except aexpect.ShellTimeoutError:
if vm.is_alive():
get_results(destination_autotest_path)
get_results_summary()
raise error.TestError("Timeout elapsed while waiting for job to "
"complete")
else:
raise error.TestError("Autotest job on guest failed "
"(VM terminated during job)")
except aexpect.ShellProcessTerminatedError:
get_results(destination_autotest_path)
raise error.TestError("Autotest job on guest failed "
"(Remote session terminated during job)")
get_results(destination_autotest_path)
results = get_results_summary()
# Make a list of FAIL/ERROR/ABORT results (make sure FAIL results appear
# before ERROR results, and ERROR results appear before ABORT results)
bad_results = [r[0] for r in results if r[1] == "FAIL"]
bad_results += [r[0] for r in results if r[1] == "ERROR"]
bad_results += [r[0] for r in results if r[1] == "ABORT"]
# Fail the test if necessary
if not results:
raise error.TestFail("Autotest control file run did not produce any "
"recognizable results")
if bad_results:
if len(bad_results) == 1:
e_msg = ("Test %s failed during control file execution" %
bad_results[0])
else:
e_msg = ("Tests %s failed during control file execution" %
" ".join(bad_results))
raise error.TestFail(e_msg)
def get_loss_ratio(output):
"""
Get the packet loss ratio from the output of ping
.
@param output: Ping output.
"""
try:
return int(re.findall('(\d+)% packet loss', output)[0])
except IndexError:
logging.debug(output)
return -1
def raw_ping(command, timeout, session, output_func):
"""
Low-level ping command execution.
@param command: Ping command.
@param timeout: Timeout of the ping command.
@param session: Local executon hint or session to execute the ping command.
"""
if session is None:
process = aexpect.run_bg(command, output_func=output_func,
timeout=timeout)
# Send SIGINT signal to notify the timeout of running ping process,
# Because ping have the ability to catch the SIGINT signal so we can
# always get the packet loss ratio even if timeout.
if process.is_alive():
utils_misc.kill_process_tree(process.get_pid(), signal.SIGINT)
status = process.get_status()
output = process.get_output()
process.close()
return status, output
else:
output = ""
try:
output = session.cmd_output(command, timeout=timeout,
print_func=output_func)
except aexpect.ShellTimeoutError:
# Send ctrl+c (SIGINT) through ssh session
session.send("\003")
try:
output2 = session.read_up_to_prompt(print_func=output_func)
output += output2
except aexpect.ExpectTimeoutError, e:
output += e.output
# We also need to use this session to query the return value
session.send("\003")
session.sendline(session.status_test_command)
try:
o2 = session.read_up_to_prompt()
except aexpect.ExpectError:
status = -1
else:
try:
status = int(re.findall("\d+", o2)[0])
except Exception:
status = -1
return status, output
def ping(dest=None, count=None, interval=None, interface=None,
packetsize=None, ttl=None, hint=None, adaptive=False,
broadcast=False, flood=False, timeout=0,
output_func=logging.debug, session=None):
"""
Wrapper of ping.
@param dest: Destination address.
@param count: Count of icmp packet.
@param interval: Interval of two icmp echo request.
@param interface: Specified interface of the source address.
@param packetsize: Packet size of icmp.
@param ttl: IP time to live.
@param hint: Path mtu discovery hint.
@param adaptive: Adaptive ping flag.
@param broadcast: Broadcast ping flag.
@param flood: Flood ping flag.
@param timeout: Timeout for the ping command.
@param output_func: Function used to log the result of ping.
@param session: Local executon hint or session to execute the ping command.
"""
if dest is not None:
command = "ping %s " % dest
else:
command = "ping localhost "
if count is not None:
command += " -c %s" % count
if interval is not None:
command += " -i %s" % interval
if interface is not None:
command += " -I %s" % interface
if packetsize is not None:
command += " -s %s" % packetsize
if ttl is not None:
command += " -t %s" % ttl
if hint is not None:
command += " -M %s" % hint
if adaptive:
command += " -A"
if broadcast:
command += " -b"
if flood:
command += " -f -q"
command = "sleep %s && kill -2 `pidof ping` & %s" % (timeout, command)
output_func = None
timeout += 1
return raw_ping(command, timeout, session, output_func)
def get_linux_ifname(session, mac_address):
"""
Get the interface name through the mac address.
@param session: session to the virtual machine
@mac_address: the macaddress of nic
"""
output = session.cmd_output("ifconfig -a")
try:
ethname = re.findall("(\w+)\s+Link.*%s" % mac_address, output,
re.IGNORECASE)[0]
return ethname
except Exception:
return None
def restart_guest_network(session, nic_name=None):
"""
Restart guest's network via serial console.
@param session: session to virtual machine
@nic_name: nic card name in guest to restart
"""
if_list = []
if not nic_name:
# initiate all interfaces on guest.
o = session.cmd_output("ip link")
if_list = re.findall(r"\d+: (eth\d+):", o)
else:
if_list.append(nic_name)
if if_list:
session.sendline("killall dhclient && "
"dhclient %s &" % ' '.join(if_list))
def run_virt_sub_test(test, params, env, sub_type=None, tag=None):
"""
Call another test script in one test script.
@param test: KVM test object.
@param params: Dictionary with the test parameters.
@param env: Dictionary with test environment.
@param sub_type: Type of called test script.
@param tag: Tag for get the sub_test params
"""
if sub_type is None:
raise error.TestError("No sub test is found")
virt_dir = os.path.dirname(test.virtdir)
subtest_dir_virt = os.path.join(virt_dir, "tests")
subtest_dir_specific = os.path.join(test.bindir, params.get('vm_type'),
"tests")
subtest_dir = None
for d in [subtest_dir_specific, subtest_dir_virt]:
module_path = os.path.join(d, "%s.py" % sub_type)
if os.path.isfile(module_path):
subtest_dir = d
break
if subtest_dir is None:
raise error.TestError("Could not find test file %s.py "
"on either %s or %s directory" % (sub_type,
subtest_dir_specific, subtest_dir_virt))
f, p, d = imp.find_module(sub_type, [subtest_dir])
test_module = imp.load_module(sub_type, f, p, d)
f.close()
# Run the test function
run_func = getattr(test_module, "run_%s" % sub_type)
if tag is not None:
params = params.object_params(tag)
run_func(test, params, env)
def pin_vm_threads(vm, node):
"""
Pin VM threads to single cpu of a numa node
@param vm: VM object
@param node: NumaNode object
"""
for i in vm.vhost_threads:
logging.info("pin vhost thread(%s) to cpu(%s)" % (i, node.pin_cpu(i)))
for i in vm.vcpu_threads:
logging.info("pin vcpu thread(%s) to cpu(%s)" % (i, node.pin_cpu(i)))
def service_setup(vm, session, directory):
params = vm.get_params()
rh_perf_envsetup_script = params.get("rh_perf_envsetup_script")
rebooted = params.get("rebooted", "rebooted")
if rh_perf_envsetup_script:
src = os.path.join(directory, rh_perf_envsetup_script)
vm.copy_files_to(src, "/tmp/rh_perf_envsetup.sh")
logging.info("setup perf environment for host")
commands.getoutput("bash %s host %s" % (src, rebooted))
logging.info("setup perf environment for guest")
session.cmd("bash /tmp/rh_perf_envsetup.sh guest %s" % rebooted)
def cmd_runner_monitor(vm, monitor_cmd, test_cmd, guest_path, timeout=300):
"""
For record the env information such as cpu utilization, meminfo while
run guest test in guest.
@vm: Guest Object
@monitor_cmd: monitor command running in backgroud
@test_cmd: test suit run command
@guest_path: path in guest to store the test result and monitor data
@timeout: longest time for monitor running
Return: tag the suffix of the results
"""
def thread_kill(cmd, p_file):
fd = shelve.open(p_file)
o = commands.getoutput("pstree -p %s" % fd["pid"])
tmp = re.split("\s+", cmd)[0]
pid = re.findall("%s.(\d+)" % tmp, o)[0]
s, o = commands.getstatusoutput("kill -9 %s" % pid)
fd.close()
return (s, o)
def monitor_thread(m_cmd, p_file, r_file):
fd = shelve.open(p_file)
fd["pid"] = os.getpid()
fd.close()
os.system("%s &> %s" % (m_cmd, r_file))
def test_thread(session, m_cmd, t_cmd, p_file, flag, timeout):
flag.put(True)
s, o = session.cmd_status_output(t_cmd, timeout)
if s != 0:
raise error.TestFail("Test failed or timeout: %s" % o)
if not flag.empty():
flag.get()
thread_kill(m_cmd, p_file)
kill_thread_flag = Queue(1)
session = wait_for_login(vm, 0, 300, 0, 2)
tag = vm.instance
pid_file = "/tmp/monitor_pid_%s" % tag
result_file = "/tmp/host_monitor_result_%s" % tag
monitor = threading.Thread(target=monitor_thread,args=(monitor_cmd,
pid_file, result_file))
test_runner = threading.Thread(target=test_thread, args=(session,
monitor_cmd, test_cmd, pid_file,
kill_thread_flag, timeout))
monitor.start()
test_runner.start()
monitor.join(int(timeout))
if not kill_thread_flag.empty():
kill_thread_flag.get()
thread_kill(monitor_cmd, pid_file)
thread_kill("sh", pid_file)
guest_result_file = "/tmp/guest_result_%s" % tag
guest_monitor_result_file = "/tmp/guest_monitor_result_%s" % tag
vm.copy_files_from(guest_path, guest_result_file)
vm.copy_files_from("%s_monitor" % guest_path, guest_monitor_result_file)
return tag
def aton(sr):
"""
Transform a string to a number(include float and int). If the string is
not in the form of number, just return false.
@str: string to transfrom
Return: float, int or False for failed transform
"""
try:
return int(sr)
except ValueError:
try:
return float(sr)
except ValueError:
return False
def summary_up_result(result_file, ignore, row_head, column_mark):
"""
Use to summary the monitor or other kinds of results. Now it calculates
the average value for each item in the results. It fits to the records
that are in matrix form.
@result_file: files which need to calculate
@ignore: pattern for the comment in results which need to through away
@row_head: pattern for the items in row
@column_mark: pattern for the first line in matrix which used to generate
the items in column
Return: A dictionary with the average value of results
"""
head_flag = False
result_dict = {}
column_list = {}
row_list = []
fd = open(result_file, "r")
for eachLine in fd:
if len(re.findall(ignore, eachLine)) == 0:
if len(re.findall(column_mark, eachLine)) != 0 and not head_flag:
column = 0
_, row, eachLine = re.split(row_head, eachLine)
for i in re.split("\s+", eachLine):
if i:
result_dict[i] = {}
column_list[column] = i
column += 1
head_flag = True
elif len(re.findall(column_mark, eachLine)) == 0:
column = 0
_, row, eachLine = re.split(row_head, eachLine)
row_flag = False
for i in row_list:
if row == i:
row_flag = True
if row_flag == False:
row_list.append(row)
for i in result_dict:
result_dict[i][row] = []
for i in re.split("\s+", eachLine):
if i:
result_dict[column_list[column]][row].append(i)
column += 1
fd.close()
# Calculate the average value
average_list = {}
for i in column_list:
average_list[column_list[i]] = {}
for j in row_list:
average_list[column_list[i]][j] = {}
check = result_dict[column_list[i]][j][0]
if aton(check) or aton(check) == 0.0:
count = 0
for k in result_dict[column_list[i]][j]:
count += aton(k)
average_list[column_list[i]][j] = "%.2f" % (count /
len(result_dict[column_list[i]][j]))
return average_list
| gpl-2.0 | 3,545,727,355,655,549,000 | 36.321776 | 80 | 0.560621 | false |
tovrstra/sympy | sympy/matrices/matrices.py | 1 | 67422 | import warnings
from sympy import Basic, Symbol, Integer
from sympy.core import sympify
from sympy.core.basic import S
from sympy.polys import Poly, roots, cancel
from sympy.simplify import simplify
from sympy.utilities import any
# from sympy.printing import StrPrinter /cyclic/
import random
class NonSquareMatrixException(Exception):
pass
class ShapeError(ValueError):
"""Wrong matrix shape"""
pass
class MatrixError(Exception):
pass
def _dims_to_nm(dims):
"""Converts dimensions tuple (or any object with length 1 or 2) or scalar
in dims to matrix dimensions n and m."""
try:
l = len(dims)
except TypeError:
dims = (dims,)
l = 1
# This will work for nd-array too when they are added to sympy.
try:
for dim in dims:
assert (dim > 0)
except AssertionError:
raise ValueError("Matrix dimensions should be positive integers!")
if l == 2:
n, m = map(int, dims)
elif l == 1:
n = m = int(dims[0])
else:
raise ValueError("Matrix dimensions should be a two-element tuple of ints or a single int!")
return n, m
def _iszero(x):
return x == 0
class DeferredVector(object):
def __init__(self,name):
self.name=name
def __getitem__(self,i):
component_name = '%s[%d]'%(self.name,i)
return Symbol(component_name)
def __str__(self):
return StrPrinter.doprint(self)
def __repr__(self):
return StrPrinter.doprint(self)
class Matrix(object):
# Added just for numpy compatibility
# TODO: investigate about __array_priority__
__array_priority__ = 10.0
def __init__(self, *args):
"""
Matrix can be constructed with values or a rule.
>>> from sympy import Matrix, I
>>> Matrix( ((1,2+I), (3,4)) ) #doctest:+NORMALIZE_WHITESPACE
[1, 2 + I]
[3, 4]
>>> Matrix(2, 2, lambda i,j: (i+1)*j ) #doctest:+NORMALIZE_WHITESPACE
[0, 1]
[0, 2]
"""
if len(args) == 3 and callable(args[2]):
operation = args[2]
self.rows = int(args[0])
self.cols = int(args[1])
self.mat = []
for i in range(self.rows):
for j in range(self.cols):
self.mat.append(sympify(operation(i, j)))
elif len(args)==3 and isinstance(args[2], (list, tuple)):
self.rows=args[0]
self.cols=args[1]
mat = args[2]
if len(mat) != self.rows*self.cols:
raise MatrixError('List length should be equal to rows*columns')
self.mat = map(lambda i: sympify(i), mat)
elif len(args) == 1:
mat = args[0]
if isinstance(mat, Matrix):
self.rows = mat.rows
self.cols = mat.cols
self.mat = mat[:]
return
elif hasattr(mat, "__array__"):
# NumPy array or matrix or some other object that implements
# __array__. So let's first use this method to get a
# numpy.array() and then make a python list out of it.
arr = mat.__array__()
if len(arr.shape) == 2:
self.rows, self.cols = arr.shape[0], arr.shape[1]
self.mat = map(lambda i: sympify(i), arr.ravel())
return
elif len(arr.shape) == 1:
self.rows, self.cols = 1, arr.shape[0]
self.mat = [0]*self.cols
for i in xrange(len(arr)):
self.mat[i] = sympify(arr[i])
return
else:
raise NotImplementedError("Sympy supports just 1D and 2D matrices")
elif not isinstance(mat, (list, tuple)):
raise TypeError("Matrix constructor doesn't accept %s as input" % str(type(mat)))
self.rows = len(mat)
if len(mat) != 0:
if not isinstance(mat[0], (list, tuple)):
self.cols = 1
self.mat = map(lambda i: sympify(i), mat)
return
self.cols = len(mat[0])
else:
self.cols = 0
self.mat = []
for j in xrange(self.rows):
assert len(mat[j])==self.cols
for i in xrange(self.cols):
self.mat.append(sympify(mat[j][i]))
elif len(args) == 0:
# Empty Matrix
self.rows = self.cols = 0
self.mat = []
else:
# TODO: on 0.7.0 delete this and uncomment the last line
mat = args
if not isinstance(mat[0], (list, tuple)):
# make each element a singleton
mat = [ [element] for element in mat ]
warnings.warn("Deprecated constructor, use brackets: Matrix(%s)" % str(mat))
self.rows=len(mat)
self.cols=len(mat[0])
self.mat=[]
for j in xrange(self.rows):
assert len(mat[j])==self.cols
for i in xrange(self.cols):
self.mat.append(sympify(mat[j][i]))
#raise TypeError("Data type not understood")
def key2ij(self,key):
"""Converts key=(4,6) to 4,6 and ensures the key is correct."""
if not (isinstance(key,(list, tuple)) and len(key) == 2):
raise TypeError("wrong syntax: a[%s]. Use a[i,j] or a[(i,j)]"
%repr(key))
i,j=key
if not (i>=0 and i<self.rows and j>=0 and j < self.cols):
print self.rows, " ", self.cols
raise IndexError("Index out of range: a[%s]"%repr(key))
return i,j
def transpose(self):
"""
Matrix transposition.
>>> from sympy import Matrix, I
>>> m=Matrix(((1,2+I),(3,4)))
>>> m #doctest: +NORMALIZE_WHITESPACE
[1, 2 + I]
[3, 4]
>>> m.transpose() #doctest: +NORMALIZE_WHITESPACE
[ 1, 3]
[2 + I, 4]
>>> m.T == m.transpose()
True
"""
a = [0]*self.cols*self.rows
for i in xrange(self.cols):
a[i*self.rows:(i+1)*self.rows] = self.mat[i::self.cols]
return Matrix(self.cols,self.rows,a)
T = property(transpose,None,None,"Matrix transposition.")
def conjugate(self):
"""By-element conjugation."""
out = Matrix(self.rows,self.cols,
lambda i,j: self[i,j].conjugate())
return out
C = property(conjugate,None,None,"By-element conjugation.")
@property
def H(self):
"""
Hermite conjugation.
>>> from sympy import Matrix, I
>>> m=Matrix(((1,2+I),(3,4)))
>>> m #doctest: +NORMALIZE_WHITESPACE
[1, 2 + I]
[3, 4]
>>> m.H #doctest: +NORMALIZE_WHITESPACE
[ 1, 3]
[2 - I, 4]
"""
out = self.T.C
return out
@property
def D(self):
"""Dirac conjugation."""
from sympy.physics.matrices import mgamma
out = self.H * mgamma(0)
return out
def __getitem__(self,key):
"""
>>> from sympy import Matrix, I
>>> m=Matrix(((1,2+I),(3,4)))
>>> m #doctest: +NORMALIZE_WHITESPACE
[1, 2 + I]
[3, 4]
>>> m[1,0]
3
>>> m.H[1,0]
2 - I
"""
if type(key) is tuple:
i, j = key
if type(i) is slice or type(j) is slice:
return self.submatrix(key)
else:
# a2idx inlined
try:
i = i.__int__()
except AttributeError:
try:
i = i.__index__()
except AttributeError:
raise IndexError("Invalid index a[%r]" % (key,))
# a2idx inlined
try:
j = j.__int__()
except AttributeError:
try:
j = j.__index__()
except AttributeError:
raise IndexError("Invalid index a[%r]" % (key,))
if not (i>=0 and i<self.rows and j>=0 and j < self.cols):
raise IndexError("Index out of range: a[%s]" % (key,))
else:
return self.mat[i*self.cols + j]
else:
# row-wise decomposition of matrix
if type(key) is slice:
return self.mat[key]
else:
k = a2idx(key)
if k is not None:
return self.mat[k]
raise IndexError("Invalid index: a[%s]" % repr(key))
def __setitem__(self, key, value):
"""
>>> from sympy import Matrix, I
>>> m=Matrix(((1,2+I),(3,4)))
>>> m #doctest: +NORMALIZE_WHITESPACE
[1, 2 + I]
[3, 4]
>>> m[1,0]=9
>>> m #doctest: +NORMALIZE_WHITESPACE
[1, 2 + I]
[9, 4]
"""
if type(key) is tuple:
i, j = key
if type(i) is slice or type(j) is slice:
if isinstance(value, Matrix):
self.copyin_matrix(key, value)
return
if isinstance(value, (list, tuple)):
self.copyin_list(key, value)
return
else:
# a2idx inlined
try:
i = i.__int__()
except AttributeError:
try:
i = i.__index__()
except AttributeError:
raise IndexError("Invalid index a[%r]" % (key,))
# a2idx inlined
try:
j = j.__int__()
except AttributeError:
try:
j = j.__index__()
except AttributeError:
raise IndexError("Invalid index a[%r]" % (key,))
if not (i>=0 and i<self.rows and j>=0 and j < self.cols):
raise IndexError("Index out of range: a[%s]" % (key,))
else:
self.mat[i*self.cols + j] = sympify(value)
return
else:
# row-wise decomposition of matrix
if type(key) is slice:
raise IndexError("Vector slices not implemented yet.")
else:
k = a2idx(key)
if k is not None:
self.mat[k] = sympify(value)
return
raise IndexError("Invalid index: a[%s]"%repr(key))
def __array__(self):
return matrix2numpy(self)
def tolist(self):
"""
Return the Matrix converted in a python list.
>>> from sympy import Matrix
>>> m=Matrix(3, 3, range(9))
>>> m
[0, 1, 2]
[3, 4, 5]
[6, 7, 8]
>>> m.tolist()
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
ret = [0]*self.rows
for i in xrange(self.rows):
ret[i] = self.mat[i*self.cols:(i+1)*self.cols]
return ret
def copyin_matrix(self, key, value):
rlo, rhi = self.slice2bounds(key[0], self.rows)
clo, chi = self.slice2bounds(key[1], self.cols)
assert value.rows == rhi - rlo and value.cols == chi - clo
for i in range(value.rows):
for j in range(value.cols):
self[i+rlo, j+clo] = sympify(value[i,j])
def copyin_list(self, key, value):
assert isinstance(value, (list, tuple))
self.copyin_matrix(key, Matrix(value))
def hash(self):
"""Compute a hash every time, because the matrix elements
could change."""
return hash(self.__str__() )
@property
def shape(self):
return (self.rows, self.cols)
def __rmul__(self,a):
if hasattr(a, "__array__") and a.shape != ():
return matrix_multiply(a,self)
out = Matrix(self.rows,self.cols,map(lambda i: a*i,self.mat))
return out
def expand(self):
out = Matrix(self.rows,self.cols,map(lambda i: i.expand(), self.mat))
return out
def combine(self):
out = Matrix(self.rows,self.cols,map(lambda i: i.combine(),self.mat))
return out
def subs(self, *args):
out = Matrix(self.rows,self.cols,map(lambda i: i.subs(*args),self.mat))
return out
def __sub__(self,a):
return self + (-a)
def __mul__(self,a):
if hasattr(a, "__array__") and a.shape != ():
return matrix_multiply(self,a)
out = Matrix(self.rows,self.cols,map(lambda i: i*a,self.mat))
return out
def __pow__(self, num):
if not self.is_square:
raise NonSquareMatrixException()
if isinstance(num, int) or isinstance(num, Integer):
n = int(num)
if n < 0:
return self.inv() ** -n # A**-2 = (A**-1)**2
a = eye(self.cols)
while n:
if n % 2:
a = a * self
n -= 1
self = self * self
n = n // 2
return a
raise NotImplementedError('Can only raise to the power of an integer for now')
def __add__(self,a):
return matrix_add(self,a)
def __radd__(self,a):
return matrix_add(a,self)
def __div__(self,a):
return self * (S.One/a)
def __truediv__(self,a):
return self.__div__(a)
def multiply(self,b):
"""Returns self*b """
return matrix_multiply(self,b)
def add(self,b):
"""Return self+b """
return matrix_add(self,b)
def __neg__(self):
return -1*self
def __eq__(self, a):
if not isinstance(a, (Matrix, Basic)):
a = sympify(a)
if isinstance(a, Matrix):
return self.hash() == a.hash()
else:
return False
def __ne__(self,a):
if not isinstance(a, (Matrix, Basic)):
a = sympify(a)
if isinstance(a, Matrix):
return self.hash() != a.hash()
else:
return True
def _format_str(self, strfunc, rowsep='\n'):
# Build table of string representations of the elements
res = []
# Track per-column max lengths for pretty alignment
maxlen = [0] * self.cols
for i in range(self.rows):
res.append([])
for j in range(self.cols):
string = strfunc(self[i,j])
res[-1].append(string)
maxlen[j] = max(len(string), maxlen[j])
# Patch strings together
for i, row in enumerate(res):
for j, elem in enumerate(row):
# Pad each element up to maxlen so the columns line up
row[j] = elem.rjust(maxlen[j])
res[i] = "[" + ", ".join(row) + "]"
return rowsep.join(res)
def __str__(self):
return StrPrinter.doprint(self)
def __repr__(self):
return StrPrinter.doprint(self)
def inv(self, method="GE", iszerofunc=_iszero, try_block_diag=False):
"""
Calculates the matrix inverse.
According to the "method" parameter, it calls the appropriate method:
GE .... inverse_GE()
LU .... inverse_LU()
ADJ ... inverse_ADJ()
According to the "try_block_diag" parameter, it will try to form block
diagonal matrices using the method get_diag_blocks(), invert these
individually, and then reconstruct the full inverse matrix.
Note, the GE and LU methods may require the matrix to be simplified
before it is inverted in order to properly detect zeros during
pivoting. In difficult cases a custom zero detection function can
be provided by setting the iszerosfunc argument to a function that
should return True if its argument is zero.
"""
assert self.cols==self.rows
if try_block_diag:
blocks = self.get_diag_blocks()
r = []
for block in blocks:
r.append(block.inv(method=method, iszerofunc=iszerofunc))
return block_diag(r)
if method == "GE":
return self.inverse_GE(iszerofunc=iszerofunc)
elif method == "LU":
return self.inverse_LU(iszerofunc=iszerofunc)
elif method == "ADJ":
return self.inverse_ADJ()
else:
raise ValueError("Inversion method unrecognized")
def __mathml__(self):
mml = ""
for i in range(self.rows):
mml += "<matrixrow>"
for j in range(self.cols):
mml += self[i,j].__mathml__()
mml += "</matrixrow>"
return "<matrix>" + mml + "</matrix>"
def row(self, i, f):
"""Elementary row operation using functor"""
for j in range(0, self.cols):
self[i, j] = f(self[i, j], j)
def col(self, j, f):
"""Elementary column operation using functor"""
for i in range(0, self.rows):
self[i, j] = f(self[i, j], i)
def row_swap(self, i, j):
for k in range(0, self.cols):
self[i, k], self[j, k] = self[j, k], self[i, k]
def col_swap(self, i, j):
for k in range(0, self.rows):
self[k, i], self[k, j] = self[k, j], self[k, i]
def row_del(self, i):
self.mat = self.mat[:i*self.cols] + self.mat[(i+1)*self.cols:]
self.rows -= 1
def col_del(self, i):
"""
>>> import sympy
>>> M = sympy.matrices.eye(3)
>>> M.col_del(1)
>>> M #doctest: +NORMALIZE_WHITESPACE
[1, 0]
[0, 0]
[0, 1]
"""
for j in range(self.rows-1, -1, -1):
del self.mat[i+j*self.cols]
self.cols -= 1
def row_join(self, rhs):
"""
Concatenates two matrices along self's last and rhs's first column
>>> from sympy import Matrix
>>> M = Matrix(3,3,lambda i,j: i+j)
>>> V = Matrix(3,1,lambda i,j: 3+i+j)
>>> M.row_join(V)
[0, 1, 2, 3]
[1, 2, 3, 4]
[2, 3, 4, 5]
"""
assert self.rows == rhs.rows
newmat = self.zeros((self.rows, self.cols + rhs.cols))
newmat[:,:self.cols] = self[:,:]
newmat[:,self.cols:] = rhs
return newmat
def col_join(self, bott):
"""
Concatenates two matrices along self's last and bott's first row
>>> from sympy import Matrix
>>> M = Matrix(3,3,lambda i,j: i+j)
>>> V = Matrix(1,3,lambda i,j: 3+i+j)
>>> M.col_join(V)
[0, 1, 2]
[1, 2, 3]
[2, 3, 4]
[3, 4, 5]
"""
assert self.cols == bott.cols
newmat = self.zeros((self.rows+bott.rows, self.cols))
newmat[:self.rows,:] = self[:,:]
newmat[self.rows:,:] = bott
return newmat
def row_insert(self, pos, mti):
"""
>>> from sympy import Matrix, zeros
>>> M = Matrix(3,3,lambda i,j: i+j)
>>> M
[0, 1, 2]
[1, 2, 3]
[2, 3, 4]
>>> V = zeros((1, 3))
>>> V
[0, 0, 0]
>>> M.row_insert(1,V)
[0, 1, 2]
[0, 0, 0]
[1, 2, 3]
[2, 3, 4]
"""
if pos is 0:
return mti.col_join(self)
assert self.cols == mti.cols
newmat = self.zeros((self.rows + mti.rows, self.cols))
newmat[:pos,:] = self[:pos,:]
newmat[pos:pos+mti.rows,:] = mti[:,:]
newmat[pos+mti.rows:,:] = self[pos:,:]
return newmat
def col_insert(self, pos, mti):
"""
>>> from sympy import Matrix, zeros
>>> M = Matrix(3,3,lambda i,j: i+j)
>>> M
[0, 1, 2]
[1, 2, 3]
[2, 3, 4]
>>> V = zeros((3, 1))
>>> V
[0]
[0]
[0]
>>> M.col_insert(1,V)
[0, 0, 1, 2]
[1, 0, 2, 3]
[2, 0, 3, 4]
"""
if pos is 0:
return mti.row_join(self)
assert self.rows == mti.rows
newmat = self.zeros((self.rows, self.cols + mti.cols))
newmat[:,:pos] = self[:,:pos]
newmat[:,pos:pos+mti.cols] = mti[:,:]
newmat[:,pos+mti.cols:] = self[:,pos:]
return newmat
def trace(self):
assert self.cols == self.rows
trace = 0
for i in range(self.cols):
trace += self[i,i]
return trace
def submatrix(self, keys):
"""
>>> from sympy import Matrix
>>> m = Matrix(4,4,lambda i,j: i+j)
>>> m #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3]
[1, 2, 3, 4]
[2, 3, 4, 5]
[3, 4, 5, 6]
>>> m[0:1, 1] #doctest: +NORMALIZE_WHITESPACE
[1]
>>> m[0:2, 0:1] #doctest: +NORMALIZE_WHITESPACE
[0]
[1]
>>> m[2:4, 2:4] #doctest: +NORMALIZE_WHITESPACE
[4, 5]
[5, 6]
"""
assert isinstance(keys[0], slice) or isinstance(keys[1], slice)
rlo, rhi = self.slice2bounds(keys[0], self.rows)
clo, chi = self.slice2bounds(keys[1], self.cols)
if not ( 0<=rlo<=rhi and 0<=clo<=chi ):
raise IndexError("Slice indices out of range: a[%s]"%repr(keys))
outLines, outCols = rhi-rlo, chi-clo
outMat = [0]*outLines*outCols
for i in xrange(outLines):
outMat[i*outCols:(i+1)*outCols] = self.mat[(i+rlo)*self.cols+clo:(i+rlo)*self.cols+chi]
return Matrix(outLines,outCols,outMat)
def slice2bounds(self, key, defmax):
"""
Takes slice or number and returns (min,max) for iteration
Takes a default maxval to deal with the slice ':' which is (none, none)
"""
if isinstance(key, slice):
lo, hi = 0, defmax
if key.start != None:
if key.start >= 0:
lo = key.start
else:
lo = defmax+key.start
if key.stop != None:
if key.stop >= 0:
hi = key.stop
else:
hi = defmax+key.stop
return lo, hi
elif isinstance(key, int):
if key >= 0:
return key, key+1
else:
return defmax+key, defmax+key+1
else:
raise IndexError("Improper index type")
def applyfunc(self, f):
"""
>>> from sympy import Matrix
>>> m = Matrix(2,2,lambda i,j: i*2+j)
>>> m #doctest: +NORMALIZE_WHITESPACE
[0, 1]
[2, 3]
>>> m.applyfunc(lambda i: 2*i) #doctest: +NORMALIZE_WHITESPACE
[0, 2]
[4, 6]
"""
assert callable(f)
out = Matrix(self.rows,self.cols,map(f,self.mat))
return out
def evalf(self, prec=None, **options):
if prec is None:
return self.applyfunc(lambda i: i.evalf(**options))
else:
return self.applyfunc(lambda i: i.evalf(prec, **options))
def reshape(self, _rows, _cols):
"""
>>> from sympy import Matrix
>>> m = Matrix(2,3,lambda i,j: 1)
>>> m #doctest: +NORMALIZE_WHITESPACE
[1, 1, 1]
[1, 1, 1]
>>> m.reshape(1,6) #doctest: +NORMALIZE_WHITESPACE
[1, 1, 1, 1, 1, 1]
>>> m.reshape(3,2) #doctest: +NORMALIZE_WHITESPACE
[1, 1]
[1, 1]
[1, 1]
"""
if self.rows*self.cols != _rows*_cols:
print "Invalid reshape parameters %d %d" % (_rows, _cols)
return Matrix(_rows, _cols, lambda i,j: self.mat[i*_cols + j])
def print_nonzero (self, symb="X"):
"""
Shows location of non-zero entries for fast shape lookup
>>> from sympy import Matrix, matrices
>>> m = Matrix(2,3,lambda i,j: i*3+j)
>>> m #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2]
[3, 4, 5]
>>> m.print_nonzero() #doctest: +NORMALIZE_WHITESPACE
[ XX]
[XXX]
>>> m = matrices.eye(4)
>>> m.print_nonzero("x") #doctest: +NORMALIZE_WHITESPACE
[x ]
[ x ]
[ x ]
[ x]
"""
s="";
for i in range(self.rows):
s+="["
for j in range(self.cols):
if self[i,j] == 0:
s+=" "
else:
s+= symb+""
s+="]\n"
print s
def LUsolve(self, rhs, iszerofunc=_iszero):
"""
Solve the linear system Ax = b for x.
self is the coefficient matrix A and rhs is the right side b.
This is for symbolic matrices, for real or complex ones use
sympy.mpmath.lu_solve or sympy.mpmath.qr_solve.
"""
assert rhs.rows == self.rows
A, perm = self.LUdecomposition_Simple(iszerofunc=_iszero)
n = self.rows
b = rhs.permuteFwd(perm)
# forward substitution, all diag entries are scaled to 1
for i in range(n):
for j in range(i):
b.row(i, lambda x,k: x - b[j,k]*A[i,j])
# backward substitution
for i in range(n-1,-1,-1):
for j in range(i+1, n):
b.row(i, lambda x,k: x - b[j,k]*A[i,j])
b.row(i, lambda x,k: x / A[i,i])
return b
def LUdecomposition(self, iszerofunc=_iszero):
"""
Returns the decomposition LU and the row swaps p.
"""
combined, p = self.LUdecomposition_Simple(iszerofunc=_iszero)
L = self.zeros(self.rows)
U = self.zeros(self.rows)
for i in range(self.rows):
for j in range(self.rows):
if i > j:
L[i,j] = combined[i,j]
else:
if i == j:
L[i,i] = 1
U[i,j] = combined[i,j]
return L, U, p
def LUdecomposition_Simple(self, iszerofunc=_iszero):
"""
Returns A comprised of L,U (L's diag entries are 1) and
p which is the list of the row swaps (in order).
"""
assert self.rows == self.cols
n = self.rows
A = self[:,:]
p = []
# factorization
for j in range(n):
for i in range(j):
for k in range(i):
A[i,j] = A[i,j] - A[i,k]*A[k,j]
pivot = -1
for i in range(j,n):
for k in range(j):
A[i,j] = A[i,j] - A[i,k]*A[k,j]
# find the first non-zero pivot, includes any expression
if pivot == -1 and not iszerofunc(A[i,j]):
pivot = i
if pivot < 0:
raise ValueError("Error: non-invertible matrix passed to LUdecomposition_Simple()")
if pivot != j: # row must be swapped
A.row_swap(pivot,j)
p.append([pivot,j])
assert not iszerofunc(A[j,j])
scale = 1 / A[j,j]
for i in range(j+1,n):
A[i,j] = A[i,j] * scale
return A, p
def LUdecompositionFF(self):
"""
Returns 4 matrices P, L, D, U such that PA = L D**-1 U.
From the paper "fraction-free matrix factors..." by Zhou and Jeffrey
"""
n, m = self.rows, self.cols
U, L, P = self[:,:], eye(n), eye(n)
DD = zeros(n) # store it smarter since it's just diagonal
oldpivot = 1
for k in range(n-1):
if U[k,k] == 0:
kpivot = k+1
Notfound = True
while kpivot < n and Notfound:
if U[kpivot, k] != 0:
Notfound = False
else:
kpivot = kpivot + 1
if kpivot == n+1:
raise ValueError("Matrix is not full rank")
else:
swap = U[k, k:]
U[k,k:] = U[kpivot,k:]
U[kpivot, k:] = swap
swap = P[k, k:]
P[k, k:] = P[kpivot, k:]
P[kpivot, k:] = swap
assert U[k, k] != 0
L[k,k] = U[k,k]
DD[k,k] = oldpivot * U[k,k]
assert DD[k,k] != 0
Ukk = U[k,k]
for i in range(k+1, n):
L[i,k] = U[i,k]
Uik = U[i,k]
for j in range(k+1, m):
U[i,j] = (Ukk * U[i,j] - U[k,j]*Uik) / oldpivot
U[i,k] = 0
oldpivot = U[k,k]
DD[n-1,n-1] = oldpivot
return P, L, DD, U
def cofactorMatrix(self, method="berkowitz"):
out = Matrix(self.rows, self.cols, lambda i,j:
self.cofactor(i, j, method))
return out
def minorEntry(self, i, j, method="berkowitz"):
assert 0 <= i < self.rows and 0 <= j < self.cols
return self.minorMatrix(i,j).det(method)
def minorMatrix(self, i, j):
assert 0 <= i < self.rows and 0 <= j < self.cols
return self.delRowCol(i,j)
def cofactor(self, i, j, method="berkowitz"):
if (i+j) % 2 == 0:
return self.minorEntry(i, j, method)
else:
return -1 * self.minorEntry(i, j, method)
def jacobian(self, X):
"""
Calculates the Jacobian matrix (derivative of a vectorial function).
*self*
A vector of expressions representing functions f_i(x_1, ..., x_n).
*X*
The set of x_i's in order, it can be a list or a Matrix
Both self and X can be a row or a column matrix in any order
(jacobian() should always work).
Examples::
>>> from sympy import sin, cos, Matrix
>>> from sympy.abc import rho, phi
>>> X = Matrix([rho*cos(phi), rho*sin(phi), rho**2])
>>> Y = Matrix([rho, phi])
>>> X.jacobian(Y)
[cos(phi), -rho*sin(phi)]
[sin(phi), rho*cos(phi)]
[ 2*rho, 0]
>>> X = Matrix([rho*cos(phi), rho*sin(phi)])
>>> X.jacobian(Y)
[cos(phi), -rho*sin(phi)]
[sin(phi), rho*cos(phi)]
"""
if not isinstance(X, Matrix):
X = Matrix(X)
# Both X and self can be a row or a column matrix, so we need to make
# sure all valid combinations work, but everything else fails:
assert len(self.shape) == 2
assert len(X.shape) == 2
if self.shape[0] == 1:
m = self.shape[1]
elif self.shape[1] == 1:
m = self.shape[0]
else:
raise TypeError("self must be a row or a column matrix")
if X.shape[0] == 1:
n = X.shape[1]
elif X.shape[1] == 1:
n = X.shape[0]
else:
raise TypeError("X must be a row or a column matrix")
# m is the number of functions and n is the number of variables
# computing the Jacobian is now easy:
return Matrix(m, n, lambda j, i: self[j].diff(X[i]))
def QRdecomposition(self):
"""
Return Q,R where A = Q*R, Q is orthogonal and R is upper triangular.
Assumes full-rank square (for now).
"""
assert self.rows == self.cols
n = self.rows
Q, R = self.zeros(n), self.zeros(n)
for j in range(n): # for each column vector
tmp = self[:,j] # take original v
for i in range(j):
# subtract the project of self on new vector
tmp -= Q[:,i] * self[:,j].dot(Q[:,i])
tmp.expand()
# normalize it
R[j,j] = tmp.norm()
Q[:,j] = tmp / R[j,j]
assert Q[:,j].norm() == 1
for i in range(j):
R[i,j] = Q[:,i].dot(self[:,j])
return Q,R
def QRsolve(self, b):
"""
Solve the linear system 'Ax = b'.
'self' is the matrix 'A', the method argument is the vector
'b'. The method returns the solution vector 'x'. If 'b' is a
matrix, the system is solved for each column of 'b' and the
return value is a matrix of the same shape as 'b'.
This method is slower (approximately by a factor of 2) but
more stable for floating-point arithmetic than the LUsolve method.
However, LUsolve usually uses an exact arithmetic, so you don't need
to use QRsolve.
This is mainly for educational purposes and symbolic matrices, for real
(or complex) matrices use sympy.mpmath.qr_solve.
"""
Q, R = self.QRdecomposition()
y = Q.T * b
# back substitution to solve R*x = y:
# We build up the result "backwards" in the vector 'x' and reverse it
# only in the end.
x = []
n = R.rows
for j in range(n-1, -1, -1):
tmp = y[j,:]
for k in range(j+1, n):
tmp -= R[j,k] * x[n-1-k]
x.append(tmp/R[j,j])
return Matrix([row.mat for row in reversed(x)])
# Utility functions
def simplify(self):
"""Simplify the elements of a matrix in place."""
for i in xrange(len(self.mat)):
self.mat[i] = simplify(self.mat[i])
#def evaluate(self): # no more eval() so should be removed
# for i in range(self.rows):
# for j in range(self.cols):
# self[i,j] = self[i,j].eval()
def cross(self, b):
assert isinstance(b, (list, tuple, Matrix))
if not (self.rows == 1 and self.cols == 3 or \
self.rows == 3 and self.cols == 1 ) and \
(b.rows == 1 and b.cols == 3 or \
b.rows == 3 and b.cols == 1):
raise ValueError("Dimensions incorrect for cross product")
else:
return Matrix(1,3,((self[1]*b[2] - self[2]*b[1]),
(self[2]*b[0] - self[0]*b[2]),
(self[0]*b[1] - self[1]*b[0])))
def dot(self, b):
assert isinstance(b, (list, tuple, Matrix))
if isinstance(b, (list, tuple)):
m = len(b)
else:
m = b.rows * b.cols
assert self.cols*self.rows == m
prod = 0
for i in range(m):
prod += self[i] * b[i]
return prod
def norm(self):
assert self.rows == 1 or self.cols == 1
out = sympify(0)
for i in range(self.rows * self.cols):
out += self[i]*self[i]
return out**S.Half
def normalized(self):
assert self.rows == 1 or self.cols == 1
norm = self.norm()
out = self.applyfunc(lambda i: i / norm)
return out
def project(self, v):
"""Project onto v."""
return v * (self.dot(v) / v.dot(v))
def permuteBkwd(self, perm):
copy = self[:,:]
for i in range(len(perm)-1, -1, -1):
copy.row_swap(perm[i][0], perm[i][1])
return copy
def permuteFwd(self, perm):
copy = self[:,:]
for i in range(len(perm)):
copy.row_swap(perm[i][0], perm[i][1])
return copy
def delRowCol(self, i, j):
# used only for cofactors, makes a copy
M = self[:,:]
M.row_del(i)
M.col_del(j)
return M
def zeronm(self, n, m):
# used so that certain functions above can use this
# then only this func need be overloaded in subclasses
warnings.warn( 'Deprecated: use zeros() instead.' )
return Matrix(n,m,[S.Zero]*n*m)
def zero(self, n):
"""Returns a n x n matrix of zeros."""
warnings.warn( 'Deprecated: use zeros() instead.' )
return Matrix(n,n,[S.Zero]*n*n)
def zeros(self, dims):
"""Returns a dims = (d1,d2) matrix of zeros."""
n, m = _dims_to_nm( dims )
return Matrix(n,m,[S.Zero]*n*m)
def eye(self, n):
"""Returns the identity matrix of size n."""
tmp = self.zeros(n)
for i in range(tmp.rows):
tmp[i,i] = S.One
return tmp
@property
def is_square(self):
return self.rows == self.cols
def is_upper(self):
for i in range(self.cols):
for j in range(self.rows):
if i > j and self[i,j] != 0:
return False
return True
def is_lower(self):
for i in range(self.cols):
for j in range(self.rows):
if i < j and self[i, j] != 0:
return False
return True
def is_symbolic(self):
for i in range(self.cols):
for j in range(self.rows):
if self[i,j].atoms(Symbol):
return True
return False
def clone(self):
return Matrix(self.rows, self.cols, lambda i, j: self[i, j])
def det(self, method="bareis"):
"""
Computes the matrix determinant using the method "method".
Possible values for "method":
bareis ... det_bareis
berkowitz ... berkowitz_det
"""
if method == "bareis":
return self.det_bareis()
elif method == "berkowitz":
return self.berkowitz_det()
else:
raise ValueError("Determinant method unrecognized")
def det_bareis(self):
"""Compute matrix determinant using Bareis' fraction-free
algorithm which is an extension of the well known Gaussian
elimination method. This approach is best suited for dense
symbolic matrices and will result in a determinant with
minimal number of fractions. It means that less term
rewriting is needed on resulting formulae.
TODO: Implement algorithm for sparse matrices (SFF).
"""
if not self.is_square:
raise NonSquareMatrixException()
M, n = self[:,:], self.rows
if n == 1:
det = M[0, 0]
elif n == 2:
det = M[0, 0]*M[1, 1] - M[0, 1]*M[1, 0]
else:
sign = 1 # track current sign in case of column swap
for k in range(n-1):
# look for a pivot in the current column
# and assume det == 0 if none is found
if M[k, k] == 0:
for i in range(k+1, n):
if M[i, k] != 0:
M.row_swap(i, k)
sign *= -1
break
else:
return S.Zero
# proceed with Bareis' fraction-free (FF)
# form of Gaussian elimination algorithm
for i in range(k+1, n):
for j in range(k+1, n):
D = M[k, k]*M[i, j] - M[i, k]*M[k, j]
if k > 0:
D /= M[k-1, k-1]
if D.is_Atom:
M[i, j] = D
else:
M[i, j] = cancel(D)
det = sign * M[n-1, n-1]
return det.expand()
def adjugate(self, method="berkowitz"):
"""
Returns the adjugate matrix.
Adjugate matrix is the transpose of the cofactor matrix.
http://en.wikipedia.org/wiki/Adjugate
See also: .cofactorMatrix(), .T
"""
return self.cofactorMatrix(method).T
def inverse_LU(self, iszerofunc=_iszero):
"""
Calculates the inverse using LU decomposition.
"""
return self.LUsolve(self.eye(self.rows), iszerofunc=_iszero)
def inverse_GE(self, iszerofunc=_iszero):
"""
Calculates the inverse using Gaussian elimination.
"""
assert self.rows == self.cols
assert self.det() != 0
big = self.row_join(self.eye(self.rows))
red = big.rref(iszerofunc=iszerofunc)
return red[0][:,big.rows:]
def inverse_ADJ(self):
"""
Calculates the inverse using the adjugate matrix and a determinant.
"""
assert self.rows == self.cols
d = self.berkowitz_det()
assert d != 0
return self.adjugate()/d
def rref(self,simplified=False, iszerofunc=_iszero):
"""
Take any matrix and return reduced row-echelon form and indices of pivot vars
To simplify elements before finding nonzero pivots set simplified=True
"""
# TODO: rewrite inverse_GE to use this
pivots, r = 0, self[:,:] # pivot: index of next row to contain a pivot
pivotlist = [] # indices of pivot variables (non-free)
for i in range(r.cols):
if pivots == r.rows:
break
if simplified:
r[pivots,i] = simplify(r[pivots,i])
if iszerofunc(r[pivots,i]):
for k in range(pivots, r.rows):
if simplified and k>pivots:
r[k,i] = simplify(r[k,i])
if not iszerofunc(r[k,i]):
break
if k == r.rows - 1 and iszerofunc(r[k,i]):
continue
r.row_swap(pivots,k)
scale = r[pivots,i]
r.row(pivots, lambda x, _: x/scale)
for j in range(r.rows):
if j == pivots:
continue
scale = r[j,i]
r.row(j, lambda x, k: x - r[pivots,k]*scale)
pivotlist.append(i)
pivots += 1
return r, pivotlist
def nullspace(self,simplified=False):
"""
Returns list of vectors (Matrix objects) that span nullspace of self
"""
reduced, pivots = self.rref(simplified)
basis = []
# create a set of vectors for the basis
for i in range(self.cols - len(pivots)):
basis.append(zeros((self.cols, 1)))
# contains the variable index to which the vector corresponds
basiskey, cur = [-1]*len(basis), 0
for i in range(self.cols):
if i not in pivots:
basiskey[cur] = i
cur += 1
for i in range(self.cols):
if i not in pivots: # free var, just set vector's ith place to 1
basis[basiskey.index(i)][i,0] = 1
else: # add negative of nonpivot entry to corr vector
for j in range(i+1, self.cols):
line = pivots.index(i)
if reduced[line, j] != 0:
assert j not in pivots
basis[basiskey.index(j)][i,0] = -1 * reduced[line, j]
return basis
def berkowitz(self):
"""The Berkowitz algorithm.
Given N x N matrix with symbolic content, compute efficiently
coefficients of characteristic polynomials of 'self' and all
its square sub-matrices composed by removing both i-th row
and column, without division in the ground domain.
This method is particularly useful for computing determinant,
principal minors and characteristic polynomial, when 'self'
has complicated coefficients e.g. polynomials. Semi-direct
usage of this algorithm is also important in computing
efficiently sub-resultant PRS.
Assuming that M is a square matrix of dimension N x N and
I is N x N identity matrix, then the following following
definition of characteristic polynomial is begin used:
charpoly(M) = det(t*I - M)
As a consequence, all polynomials generated by Berkowitz
algorithm are monic.
>>> from sympy import Matrix
>>> from sympy.abc import x, y, z
>>> M = Matrix([ [x,y,z], [1,0,0], [y,z,x] ])
>>> p, q, r = M.berkowitz()
>>> print p # 1 x 1 M's sub-matrix
(1, -x)
>>> print q # 2 x 2 M's sub-matrix
(1, -x, -y)
>>> print r # 3 x 3 M's sub-matrix
(1, -2*x, -y - y*z + x**2, x*y - z**2)
For more information on the implemented algorithm refer to:
[1] S.J. Berkowitz, On computing the determinant in small
parallel time using a small number of processors, ACM,
Information Processing Letters 18, 1984, pp. 147-150
[2] M. Keber, Division-Free computation of sub-resultants
using Bezout matrices, Tech. Report MPI-I-2006-1-006,
Saarbrucken, 2006
"""
if not self.is_square:
raise NonSquareMatrixException()
A, N = self, self.rows
transforms = [0] * (N-1)
for n in xrange(N, 1, -1):
T, k = zeros((n+1,n)), n - 1
R, C = -A[k,:k], A[:k,k]
A, a = A[:k,:k], -A[k,k]
items = [ C ]
for i in xrange(0, n-2):
items.append(A * items[i])
for i, B in enumerate(items):
items[i] = (R * B)[0,0]
items = [ S.One, a ] + items
for i in xrange(n):
T[i:,i] = items[:n-i+1]
transforms[k-1] = T
polys = [ Matrix([S.One, -A[0,0]]) ]
for i, T in enumerate(transforms):
polys.append(T * polys[i])
return tuple(map(tuple, polys))
def berkowitz_det(self):
"""Computes determinant using Berkowitz method."""
poly = self.berkowitz()[-1]
sign = (-1)**(len(poly)-1)
return sign * poly[-1]
def berkowitz_minors(self):
"""Computes principal minors using Berkowitz method."""
sign, minors = S.NegativeOne, []
for poly in self.berkowitz():
minors.append(sign*poly[-1])
sign = -sign
return tuple(minors)
def berkowitz_charpoly(self, x):
"""Computes characteristic polynomial minors using Berkowitz method."""
coeffs, monoms = self.berkowitz()[-1], range(self.rows+1)
return Poly(dict(zip(reversed(monoms), coeffs)), x)
charpoly = berkowitz_charpoly
def berkowitz_eigenvals(self, **flags):
"""Computes eigenvalues of a Matrix using Berkowitz method. """
return roots(self.berkowitz_charpoly(Symbol('x', dummy=True)), **flags)
eigenvals = berkowitz_eigenvals
def eigenvects(self, **flags):
"""Return list of triples (eigenval, multiplicity, basis)."""
if 'multiple' in flags:
del flags['multiple']
out, vlist = [], self.eigenvals(**flags)
for r, k in vlist.iteritems():
tmp = self - eye(self.rows)*r
basis = tmp.nullspace()
# whether tmp.is_symbolic() is True or False, it is possible that
# the basis will come back as [] in which case simplification is
# necessary.
if not basis:
# The nullspace routine failed, try it again with simplification
basis = tmp.nullspace(simplified=True)
out.append((r, k, basis))
return out
def fill(self, value):
"""Fill the matrix with the scalar value."""
self.mat = [value] * self.rows * self.cols
def __getattr__(self, attr):
if attr in ('diff','integrate','limit'):
def doit(*args):
item_doit = lambda item: getattr(item, attr)(*args)
return self.applyfunc( item_doit )
return doit
else:
raise AttributeError()
def vec(self):
"""
Return the Matrix converted into a one column matrix by stacking columns
>>> from sympy import Matrix
>>> m=Matrix([ [1,3], [2,4] ])
>>> m
[1, 3]
[2, 4]
>>> m.vec()
[1]
[2]
[3]
[4]
"""
return Matrix(self.cols*self.rows, 1, self.transpose().mat)
def vech(self, diagonal=True, check_symmetry=True):
"""
Return the unique elements of a symmetric Matrix as a one column matrix
by stacking
the elements in the lower triangle
Arguments:
diagonal -- include the diagonal cells of self or not
check_symmetry -- checks symmetry of self but not completely reliably
>>> from sympy import Matrix
>>> m=Matrix([ [1,2], [2,3] ])
>>> m
[1, 2]
[2, 3]
>>> m.vech()
[1]
[2]
[3]
>>> m.vech(diagonal=False)
[2]
"""
c = self.cols
if c != self.rows:
raise TypeError("Matrix must be square")
if check_symmetry:
self.simplify()
if self != self.transpose():
raise ValueError("Matrix appears to be asymmetric; consider check_symmetry=False")
count = 0
if diagonal:
v = zeros( (c * (c + 1) // 2, 1) )
for j in xrange(c):
for i in xrange(j,c):
v[count] = self[i,j]
count += 1
else:
v = zeros( (c * (c - 1) // 2, 1) )
for j in xrange(c):
for i in xrange(j+1,c):
v[count] = self[i,j]
count += 1
return v
def get_diag_blocks(self):
"""Obtains the square sub-matrices on the main diagonal of a square matrix.
Useful for inverting symbolic matrices or solving systems of
linear equations which may be decoupled by having a block diagonal
structure.
Example:
>>> from sympy import Matrix, symbols
>>> from sympy.abc import x, y, z
>>> A = Matrix([[1, 3, 0, 0], [y, z*z, 0, 0], [0, 0, x, 0], [0, 0, 0, 0]])
>>> a1, a2, a3 = A.get_diag_blocks()
>>> a1
[1, 3]
[y, z**2]
>>> a2
[x]
>>> a3
[0]
>>>
"""
sub_blocks = []
def recurse_sub_blocks(M):
i = 1
while i <= M.shape[0]:
if i == 1:
to_the_right = M[0, i:]
to_the_bottom = M[i:, 0]
else:
to_the_right = M[0:i, i:]
to_the_bottom = M[i:, 0:i]
if any(to_the_right) or any(to_the_bottom):
i += 1
continue
else:
sub_blocks.append(M[0:i, 0:i])
if M.shape == M[0:i, 0:i].shape:
return
else:
recurse_sub_blocks(M[i:, i:])
return
recurse_sub_blocks(self)
return sub_blocks
def matrix_multiply(A, B):
"""
Matrix product A*B.
A and B must be of appropriate dimensions. If A is a m x k matrix, and B
is a k x n matrix, the product will be an m x n matrix.
Example:
>>> from sympy import Matrix
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> B = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> A*B
[30, 36, 42]
[66, 81, 96]
>>> B*A
Traceback (most recent call last):
...
ShapeError
>>>
"""
# The following implmentation is equivalent, but about 5% slower
#ma, na = A.shape
#mb, nb = B.shape
#
#if na != mb:
# raise ShapeError()
#product = Matrix(ma, nb, lambda i,j: 0)
#for i in xrange(ma):
# for j in xrange(nb):
# s = 0
# for k in range(na):
# s += A[i, k]*B[k, j]
# product[i, j] = s
#return product
if A.shape[1] != B.shape[0]:
raise ShapeError()
blst = B.T.tolist()
alst = A.tolist()
return Matrix(A.shape[0], B.shape[1], lambda i, j:
reduce(lambda k, l: k+l,
map(lambda n, m: n*m,
alst[i],
blst[j])))
def matrix_add(A,B):
"""Return A+B"""
if A.shape != B.shape:
raise ShapeError()
alst = A.tolist()
blst = B.tolist()
ret = [0]*A.shape[0]
for i in xrange(A.shape[0]):
ret[i] = map(lambda j,k: j+k, alst[i], blst[i])
return Matrix(ret)
def zero(n):
"""Create square zero matrix n x n"""
warnings.warn( 'Deprecated: use zeros() instead.' )
return zeronm(n,n)
def zeronm(n,m):
"""Create zero matrix n x m"""
warnings.warn( 'Deprecated: use zeros() instead.' )
assert n>0
assert m>0
return Matrix(n,m,[S.Zero]*m*n)
def zeros(dims):
"""Create zero matrix of dimensions dims = (d1,d2)"""
n, m = _dims_to_nm(dims)
return Matrix(n, m, [S.Zero]*m*n)
def one(n):
"""Create square all-one matrix n x n"""
warnings.warn( 'Deprecated: use ones() instead.' )
return Matrix(n,n,[S.One]*n*n)
def ones(dims):
"""Create all-one matrix of dimensions dims = (d1,d2)"""
n, m = _dims_to_nm( dims )
return Matrix(n, m, [S.One]*m*n)
def eye(n):
"""Create square identity matrix n x n"""
n = int(n)
out = zeros(n)
for i in range(n):
out[i, i] = S.One
return out
def randMatrix(r,c,min=0,max=99,seed=[]):
"""Create random matrix r x c"""
if seed == []:
prng = random.Random() # use system time
else:
prng = random.Random(seed)
return Matrix(r,c,lambda i,j: prng.randint(min,max))
def hessian(f, varlist):
"""Compute Hessian matrix for a function f
see: http://en.wikipedia.org/wiki/Hessian_matrix
"""
# f is the expression representing a function f, return regular matrix
if isinstance(varlist, (list, tuple)):
m = len(varlist)
elif isinstance(varlist, Matrix):
m = varlist.cols
assert varlist.rows == 1
else:
raise ValueError("Improper variable list in hessian function")
assert m > 0
try:
f.diff(varlist[0]) # check differentiability
except AttributeError:
raise ValueError("Function %d is not differentiable" % i)
out = zeros(m)
for i in range(m):
for j in range(i,m):
out[i,j] = f.diff(varlist[i]).diff(varlist[j])
for i in range(m):
for j in range(i):
out[i,j] = out[j,i]
return out
def GramSchmidt(vlist, orthog=False):
out = []
m = len(vlist)
for i in range(m):
tmp = vlist[i]
for j in range(i):
tmp -= vlist[i].project(out[j])
if tmp == Matrix([[0,0,0]]):
raise ValueError("GramSchmidt: vector set not linearly independent")
out.append(tmp)
if orthog:
for i in range(len(out)):
out[i] = out[i].normalized()
return out
def wronskian(functions, var, method='bareis'):
"""Compute Wronskian for [] of functions
| f1 f2 ... fn |
| f1' f2' ... fn' |
| . . . . |
W(f1,...,fn) = | . . . . |
| . . . . |
| n n n |
| D(f1) D(f2) ... D(fn)|
see: http://en.wikipedia.org/wiki/Wronskian
"""
for index in xrange(0, len(functions)):
functions[index] = sympify(functions[index])
n = len(functions)
if n == 0:
return 1
W = Matrix(n, n, lambda i,j: functions[i].diff(var, j) )
return W.det(method)
def casoratian(seqs, n, zero=True):
"""Given linear difference operator L of order 'k' and homogeneous
equation Ly = 0 we want to compute kernel of L, which is a set
of 'k' sequences: a(n), b(n), ... z(n).
Solutions of L are linearly independent iff their Casoratian,
denoted as C(a, b, ..., z), do not vanish for n = 0.
Casoratian is defined by k x k determinant:
+ a(n) b(n) . . . z(n) +
| a(n+1) b(n+1) . . . z(n+1) |
| . . . . |
| . . . . |
| . . . . |
+ a(n+k-1) b(n+k-1) . . . z(n+k-1) +
It proves very useful in rsolve_hyper() where it is applied
to a generating set of a recurrence to factor out linearly
dependent solutions and return a basis.
>>> from sympy import Symbol, casoratian, factorial
>>> n = Symbol('n', integer=True)
Exponential and factorial are linearly independent:
>>> casoratian([2**n, factorial(n)], n) != 0
True
"""
seqs = map(sympify, seqs)
if not zero:
f = lambda i, j: seqs[j].subs(n, n+i)
else:
f = lambda i, j: seqs[j].subs(n, i)
k = len(seqs)
return Matrix(k, k, f).det()
def block_diag(matrices):
"""
Constructs a block diagonal matrix from a list of square matrices.
Example:
>>> from sympy import block_diag, symbols, Matrix
>>> from sympy.abc import a, b, c, x, y, z
>>> a = Matrix([[1, 2], [2, 3]])
>>> b = Matrix([[3, x], [y, 3]])
>>> block_diag([a, b, b])
[1, 2, 0, 0, 0, 0]
[2, 3, 0, 0, 0, 0]
[0, 0, 3, x, 0, 0]
[0, 0, y, 3, 0, 0]
[0, 0, 0, 0, 3, x]
[0, 0, 0, 0, y, 3]
"""
rows = 0
for m in matrices:
assert m.rows == m.cols, "All matrices must be square."
rows += m.rows
A = zeros((rows, rows))
i = 0
for m in matrices:
A[i+0:i+m.rows, i+0:i+m.cols] = m
i += m.rows
return A
class SMatrix(Matrix):
"""Sparse matrix"""
def __init__(self, *args):
if len(args) == 3 and callable(args[2]):
op = args[2]
assert isinstance(args[0], int) and isinstance(args[1], int)
self.rows = args[0]
self.cols = args[1]
self.mat = {}
for i in range(self.rows):
for j in range(self.cols):
value = sympify(op(i,j))
if value != 0:
self.mat[(i,j)] = value
elif len(args)==3 and isinstance(args[0],int) and \
isinstance(args[1],int) and isinstance(args[2], (list, tuple)):
self.rows = args[0]
self.cols = args[1]
mat = args[2]
self.mat = {}
for i in range(self.rows):
for j in range(self.cols):
value = sympify(mat[i*self.cols+j])
if value != 0:
self.mat[(i,j)] = value
elif len(args)==3 and isinstance(args[0],int) and \
isinstance(args[1],int) and isinstance(args[2], dict):
self.rows = args[0]
self.cols = args[1]
self.mat = {}
# manual copy, copy.deepcopy() doesn't work
for key in args[2].keys():
self.mat[key] = args[2][key]
else:
if len(args) == 1:
mat = args[0]
else:
mat = args
if not isinstance(mat[0], (list, tuple)):
mat = [ [element] for element in mat ]
self.rows = len(mat)
self.cols = len(mat[0])
self.mat = {}
for i in range(self.rows):
assert len(mat[i]) == self.cols
for j in range(self.cols):
value = sympify(mat[i][j])
if value != 0:
self.mat[(i,j)] = value
def __getitem__(self, key):
if isinstance(key, slice) or isinstance(key, int):
lo, hi = self.slice2bounds(key, self.rows*self.cols)
L = []
for i in range(lo, hi):
m,n = self.rowdecomp(i)
if self.mat.has_key((m,n)):
L.append(self.mat[(m,n)])
else:
L.append(0)
if len(L) == 1:
return L[0]
else:
return L
assert len(key) == 2
if isinstance(key[0], int) and isinstance(key[1], int):
i,j=self.key2ij(key)
if (i, j) in self.mat:
return self.mat[(i,j)]
else:
return 0
elif isinstance(key[0], slice) or isinstance(key[1], slice):
return self.submatrix(key)
else:
raise IndexError("Index out of range: a[%s]"%repr(key))
def rowdecomp(self, num):
assert (0 <= num < self.rows * self.cols) or \
(0 <= -1*num < self.rows * self.cols)
i, j = 0, num
while j >= self.cols:
j -= self.cols
i += 1
return i,j
def __setitem__(self, key, value):
# almost identical, need to test for 0
assert len(key) == 2
if isinstance(key[0], slice) or isinstance(key[1], slice):
if isinstance(value, Matrix):
self.copyin_matrix(key, value)
if isinstance(value, (list, tuple)):
self.copyin_list(key, value)
else:
i,j=self.key2ij(key)
testval = sympify(value)
if testval != 0:
self.mat[(i,j)] = testval
elif self.mat.has_key((i,j)):
del self.mat[(i,j)]
def row_del(self, k):
newD = {}
for (i,j) in self.mat.keys():
if i==k:
pass
elif i > k:
newD[i-1,j] = self.mat[i,j]
else:
newD[i,j] = self.mat[i,j]
self.mat = newD
self.rows -= 1
def col_del(self, k):
newD = {}
for (i,j) in self.mat.keys():
if j==k:
pass
elif j > k:
newD[i,j-1] = self.mat[i,j]
else:
newD[i,j] = self.mat[i,j]
self.mat = newD
self.cols -= 1
def toMatrix(self):
l = []
for i in range(self.rows):
c = []
l.append(c)
for j in range(self.cols):
if (i, j) in self.mat:
c.append(self[i, j])
else:
c.append(0)
return Matrix(l)
# from here to end all functions are same as in matrices.py
# with Matrix replaced with SMatrix
def copyin_list(self, key, value):
assert isinstance(value, (list, tuple))
self.copyin_matrix(key, SMatrix(value))
def multiply(self,b):
"""Returns self*b """
def dotprod(a,b,i,j):
assert a.cols == b.rows
r=0
for x in range(a.cols):
r+=a[i,x]*b[x,j]
return r
r = SMatrix(self.rows, b.cols, lambda i,j: dotprod(self,b,i,j))
if r.rows == 1 and r.cols ==1:
return r[0,0]
return r
def submatrix(self, keys):
assert isinstance(keys[0], slice) or isinstance(keys[1], slice)
rlo, rhi = self.slice2bounds(keys[0], self.rows)
clo, chi = self.slice2bounds(keys[1], self.cols)
if not ( 0<=rlo<=rhi and 0<=clo<=chi ):
raise IndexError("Slice indices out of range: a[%s]"%repr(keys))
return SMatrix(rhi-rlo, chi-clo, lambda i,j: self[i+rlo, j+clo])
def reshape(self, _rows, _cols):
if self.rows*self.cols != _rows*_cols:
print "Invalid reshape parameters %d %d" % (_rows, _cols)
newD = {}
for i in range(_rows):
for j in range(_cols):
m,n = self.rowdecomp(i*_cols + j)
if self.mat.has_key((m,n)):
newD[(i,j)] = self.mat[(m,n)]
return SMatrix(_rows, _cols, newD)
def cross(self, b):
assert isinstance(b, (list, tuple, Matrix))
if not (self.rows == 1 and self.cols == 3 or \
self.rows == 3 and self.cols == 1 ) and \
(b.rows == 1 and b.cols == 3 or \
b.rows == 3 and b.cols == 1):
raise ValueError("Dimensions incorrect for cross product")
else:
return SMatrix(1,3,((self[1]*b[2] - self[2]*b[1]),
(self[2]*b[0] - self[0]*b[2]),
(self[0]*b[1] - self[1]*b[0])))
def zeronm(self,n,m):
warnings.warn( 'Deprecated: use zeros() instead.' )
return SMatrix(n,m,{})
def zero(self, n):
warnings.warn( 'Deprecated: use zeros() instead.' )
return SMatrix(n,n,{})
def zeros(self, dims):
"""Returns a dims = (d1,d2) matrix of zeros."""
n, m = _dims_to_nm( dims )
return SMatrix(n,m,{})
def eye(self, n):
tmp = SMatrix(n,n,lambda i,j:0)
for i in range(tmp.rows):
tmp[i,i] = 1
return tmp
def list2numpy(l):
"""Converts python list of SymPy expressions to a NumPy array."""
from numpy import empty
a = empty(len(l), dtype=object)
for i, s in enumerate(l):
a[i] = s
return a
def matrix2numpy(m):
"""Converts SymPy's matrix to a NumPy array."""
from numpy import empty
a = empty(m.shape, dtype=object)
for i in range(m.rows):
for j in range(m.cols):
a[i, j] = m[i, j]
return a
def a2idx(a):
"""
Tries to convert "a" to an index, returns None on failure.
The result of a2idx() (if not None) can be safely used as an index to
arrays/matrices.
"""
if hasattr(a, "__int__"):
return int(a)
if hasattr(a, "__index__"):
return a.__index__()
def symarray(prefix, shape):
"""Create a numpy ndarray of symbols (as an object array).
The created symbols are named prefix_i1_i2_... You should thus provide a
non-empty prefix if you want your symbols to be unique for different output
arrays, as Sympy symbols with identical names are the same object.
Parameters
----------
prefix : string
A prefix prepended to the name of every symbol.
shape : int or tuple
Shape of the created array. If an int, the array is one-dimensional; for
more than one dimension the shape must be a tuple.
Examples
--------
>> from sympy import symarray
>> symarray('', 3)
[_0 _1 _2]
If you want multiple symarrays to contain distinct symbols, you *must*
provide unique prefixes:
>> a = symarray('', 3)
>> b = symarray('', 3)
>> a[0] is b[0]
True
>> a = symarray('a', 3)
>> b = symarray('b', 3)
>> a[0] is b[0]
False
Creating symarrays with a prefix:
>> symarray('a', 3)
[a_0 a_1 a_2]
For more than one dimension, the shape must be given as a tuple:
>> symarray('a', (2,3))
[[a_0_0 a_0_1 a_0_2]
[a_1_0 a_1_1 a_1_2]]
>> symarray('a', (2,3,2))
[[[a_0_0_0 a_0_0_1]
[a_0_1_0 a_0_1_1]
[a_0_2_0 a_0_2_1]]
<BLANKLINE>
[[a_1_0_0 a_1_0_1]
[a_1_1_0 a_1_1_1]
[a_1_2_0 a_1_2_1]]]
"""
try:
import numpy as np
except ImportError:
raise ImportError("symarray requires numpy to be installed")
arr = np.empty(shape, dtype=object)
for index in np.ndindex(shape):
arr[index] = Symbol('%s_%s' % (prefix, '_'.join(map(str, index))))
return arr
| bsd-3-clause | -3,508,994,818,285,606,000 | 30.698166 | 100 | 0.484604 | false |
rgkirch/check-for-duplicates | walk-and-hash.py | 1 | 2040 | import os
import sys
import hashlib
# os.makedirs(dir) to make a dir
# hashfile source
# http://www.pythoncentral.io/finding-duplicate-files-with-python/
def hashfile(path, blocksize = 65536):
infile = open(path, 'rb')
hasher = hashlib.md5()
buf = infile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = infile.read(blocksize)
infile.close()
return hasher.hexdigest()
def which_dir():
print "default dir is current dir (./)"
raw = raw_input("enter alternate dir:")
if raw:
if os.path.exists(str(raw)):
print "path exists"
return str(raw)
elif os.access(os.path.dirname(str(raw)), os.W_OK):
print "path does not exist but write privileges are given"
return str(raw)
else:
print "error, invalid path"
print "must have write privileges"
else:
print "using default dir (./)"
return "./"
if __name__ == '__main__':
startDir = which_dir()
all_hashes_once = {}
all_duplicates = {}
for dirName, dirList, fileList in os.walk(startDir):
print "checking", dirName
for filename in fileList:
# print filename
path = os.path.join(dirName, filename)
# file_hash = hashfile(dirName + "/" + filename)
file_hash = hashfile(path)
if file_hash in all_hashes_once:
print "->", filename
if file_hash in all_duplicates:
all_duplicates[file_hash].append(path)
else:
all_duplicates[file_hash] = [all_hashes_once[file_hash], path]
else:
all_hashes_once[file_hash] = path
# print all_hashes_once
print "done checking"
if all_duplicates:
print "duplicates found"
else:
print "no duplicates found"
print
for hash_value in all_duplicates:
for item in all_duplicates[hash_value]:
print item
print
print
| gpl-2.0 | 629,394,333,001,483,300 | 28.142857 | 82 | 0.567647 | false |
felipedau/blueberrywsn | blueberrywsn/pi.py | 1 | 1992 | from copy import deepcopy
from threading import Event, Lock, Thread
import bluetooth as bt
from constants import UUID
from receiver import Receiver
class Pi(Thread):
def __init__(self):
Thread.__init__(self)
self._devices = {}
self._lock_devices = Lock()
self.server_sock = bt.BluetoothSocket(bt.RFCOMM)
self.server_sock.bind(('', bt.PORT_ANY))
self.server_sock.listen(1)
self.done = None
port = self.server_sock.getsockname()[1]
bt.advertise_service(self.server_sock, 'SampleServer',
service_id=UUID,
service_classes=[UUID, bt.SERIAL_PORT_CLASS],
profiles=[bt.SERIAL_PORT_PROFILE])
print('Waiting for connection on RFCOMM channel %d' % port)
@property
def devices(self):
self._lock_devices.acquire()
devs = deepcopy(self._devices)
self._lock_devices.release()
return devs
def run(self):
self.done = Event()
while not self.done.isSet():
print('Waiting for clients')
client_sock, client_info = self.server_sock.accept()
r = Receiver(self, client_sock, client_info)
r.daemon = True
r.start()
self.server_sock.close()
print('The server socket has been closed')
def stop(self):
try:
self.done.set()
except AttributeError:
print('The server cannot be stopped. It is not running')
else:
print('The server has been stopped')
def update_device(self, device, data):
self._lock_devices.acquire()
self._devices[device] = data
self._lock_devices.release()
def main():
p = Pi()
p.start()
try:
raw_input('Press "enter" or "ctrl + c" to stop the server\n')
except KeyboardInterrupt:
print()
finally:
p.stop()
if __name__ == '__main__':
main()
| gpl-3.0 | 8,016,502,288,322,077,000 | 25.56 | 74 | 0.562249 | false |
google/personfinder | tests/views/view_tests_base.py | 1 | 3874 | # Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tools to help run tests against the Django app."""
import const
import datetime
import modelmodule.admin_acls as admin_acls_model
import utils
import scrape
import testutils.base
class ViewTestsBase(testutils.base.ServerTestsBase):
"""A base class for tests for the Django app."""
def setUp(self):
super(ViewTestsBase, self).setUp()
self._xsrf_tool = utils.XsrfTool()
self.data_generator.admin_permission(
repo_id='global', email_address='[email protected]',
access_level=(
admin_acls_model.AdminPermission.AccessLevel.SUPERADMIN),
expiration_date=datetime.datetime(2051, 1, 20))
self.data_generator.admin_permission(
repo_id='global', email_address='[email protected]',
access_level=admin_acls_model.AdminPermission.AccessLevel.MANAGER,
expiration_date=datetime.datetime(2051, 1, 20))
self.data_generator.admin_permission(
repo_id='global', email_address='[email protected]',
access_level=admin_acls_model.AdminPermission.AccessLevel.MODERATOR,
expiration_date=datetime.datetime(2051, 1, 20))
self._current_user_id = None
def login_as_superadmin(self):
self.testbed.setup_env(
user_email='[email protected]',
user_id='z',
user_is_admin='0',
overwrite=True)
self._current_user_id = 'z'
def login_as_manager(self):
self.testbed.setup_env(
user_email='[email protected]',
user_id='k',
user_is_admin='0',
overwrite=True)
self._current_user_id = 'k'
def login_as_moderator(self):
self.testbed.setup_env(
user_email='[email protected]',
user_id='j',
user_is_admin='0',
overwrite=True)
self._current_user_id = 'j'
def login_as_nonadmin(self):
self.testbed.setup_env(
user_email='[email protected]',
user_id='frank',
user_is_admin='0',
overwrite=True)
self._current_user_id = 'frank'
def xsrf_token(self, action_id):
return self._xsrf_tool.generate_token(self._current_user_id, action_id)
def to_doc(self, response):
"""Produces a scrape.Document from the Django test response.
Args:
response (Response): A response from a Django test client.
Returns:
scrape.Document: A wrapper around the response's contents to help
with examining it.
"""
# TODO(nworden): when everything's on Django, make some changes to
# scrape.py so it better fits Django's test framework.
return scrape.Document(
content_bytes=response.content,
# The Django test Response objects don't include the URL, but that's
# ok: the Document's url field is only used by scrape.Session, which
# we're not using with the Django tests.
url=None,
status=response.status_code,
# We aren't using this, at least not in the Django tests.
message=None,
# The response headers are accessed directly through the Response
# object.
headers=response,
charset=const.CHARSET_UTF8)
| apache-2.0 | 4,467,310,917,029,568,000 | 35.205607 | 80 | 0.620031 | false |
ahaldane/numpy | setup.py | 1 | 15754 | #!/usr/bin/env python
""" NumPy is the fundamental package for array computing with Python.
It provides:
- a powerful N-dimensional array object
- sophisticated (broadcasting) functions
- tools for integrating C/C++ and Fortran code
- useful linear algebra, Fourier transform, and random number capabilities
- and much more
Besides its obvious scientific uses, NumPy can also be used as an efficient
multi-dimensional container of generic data. Arbitrary data-types can be
defined. This allows NumPy to seamlessly and speedily integrate with a wide
variety of databases.
All NumPy wheels distributed on PyPI are BSD licensed.
"""
from __future__ import division, print_function
DOCLINES = (__doc__ or '').split("\n")
import os
import sys
import subprocess
import textwrap
if sys.version_info[:2] < (3, 5):
raise RuntimeError("Python version >= 3.5 required.")
import builtins
CLASSIFIERS = """\
Development Status :: 5 - Production/Stable
Intended Audience :: Science/Research
Intended Audience :: Developers
License :: OSI Approved
Programming Language :: C
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: Implementation :: CPython
Topic :: Software Development
Topic :: Scientific/Engineering
Operating System :: Microsoft :: Windows
Operating System :: POSIX
Operating System :: Unix
Operating System :: MacOS
"""
MAJOR = 1
MINOR = 18
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
# Return the git revision as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except (subprocess.SubprocessError, OSError):
GIT_REVISION = "Unknown"
return GIT_REVISION
# BEFORE importing setuptools, remove MANIFEST. Otherwise it may not be
# properly updated when the contents of directories change (true for distutils,
# not sure about setuptools).
if os.path.exists('MANIFEST'):
os.remove('MANIFEST')
# This is a bit hackish: we are setting a global variable so that the main
# numpy __init__ can detect if it is being loaded by the setup routine, to
# avoid attempting to load components that aren't built yet. While ugly, it's
# a lot more robust than what was previously being used.
builtins.__NUMPY_SETUP__ = True
def get_version_info():
# Adding the git rev number needs to be done inside write_version_py(),
# otherwise the import of numpy.version messes up the build under Python 3.
FULLVERSION = VERSION
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists('numpy/version.py'):
# must be a source distribution, use existing version file
try:
from numpy.version import git_revision as GIT_REVISION
except ImportError:
raise ImportError("Unable to import git_revision. Try removing "
"numpy/version.py and the build directory "
"before building.")
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
return FULLVERSION, GIT_REVISION
def write_version_py(filename='numpy/version.py'):
cnt = """
# THIS FILE IS GENERATED FROM NUMPY SETUP.PY
#
# To compare versions robustly, use `numpy.lib.NumpyVersion`
short_version = '%(version)s'
version = '%(version)s'
full_version = '%(full_version)s'
git_revision = '%(git_revision)s'
release = %(isrelease)s
if not release:
version = full_version
"""
FULLVERSION, GIT_REVISION = get_version_info()
a = open(filename, 'w')
try:
a.write(cnt % {'version': VERSION,
'full_version': FULLVERSION,
'git_revision': GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(None, parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
config.add_subpackage('numpy')
config.add_data_files(('numpy', 'LICENSE.txt'))
config.get_version('numpy/version.py') # sets config.version
return config
def check_submodules():
""" verify that the submodules are checked out and clean
use `git submodule update --init`; on failure
"""
if not os.path.exists('.git'):
return
with open('.gitmodules') as f:
for l in f:
if 'path' in l:
p = l.split('=')[-1].strip()
if not os.path.exists(p):
raise ValueError('Submodule %s missing' % p)
proc = subprocess.Popen(['git', 'submodule', 'status'],
stdout=subprocess.PIPE)
status, _ = proc.communicate()
status = status.decode("ascii", "replace")
for line in status.splitlines():
if line.startswith('-') or line.startswith('+'):
raise ValueError('Submodule not clean: %s' % line)
class concat_license_files():
"""Merge LICENSE.txt and LICENSES_bundled.txt for sdist creation
Done this way to keep LICENSE.txt in repo as exact BSD 3-clause (see
gh-13447). This makes GitHub state correctly how NumPy is licensed.
"""
def __init__(self):
self.f1 = 'LICENSE.txt'
self.f2 = 'LICENSES_bundled.txt'
def __enter__(self):
"""Concatenate files and remove LICENSES_bundled.txt"""
with open(self.f1, 'r') as f1:
self.bsd_text = f1.read()
with open(self.f1, 'a') as f1:
with open(self.f2, 'r') as f2:
self.bundled_text = f2.read()
f1.write('\n\n')
f1.write(self.bundled_text)
def __exit__(self, exception_type, exception_value, traceback):
"""Restore content of both files"""
with open(self.f1, 'w') as f:
f.write(self.bsd_text)
from distutils.command.sdist import sdist
class sdist_checked(sdist):
""" check submodules on sdist to prevent incomplete tarballs """
def run(self):
check_submodules()
with concat_license_files():
sdist.run(self)
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
for d in ('random',):
p = subprocess.call([sys.executable,
os.path.join(cwd, 'tools', 'cythonize.py'),
'numpy/{0}'.format(d)],
cwd=cwd)
if p != 0:
raise RuntimeError("Running cythonize failed!")
def parse_setuppy_commands():
"""Check the commands and respond appropriately. Disable broken commands.
Return a boolean value for whether or not to run the build or not (avoid
parsing Cython and template files if False).
"""
args = sys.argv[1:]
if not args:
# User forgot to give an argument probably, let setuptools handle that.
return True
info_commands = ['--help-commands', '--name', '--version', '-V',
'--fullname', '--author', '--author-email',
'--maintainer', '--maintainer-email', '--contact',
'--contact-email', '--url', '--license', '--description',
'--long-description', '--platforms', '--classifiers',
'--keywords', '--provides', '--requires', '--obsoletes']
for command in info_commands:
if command in args:
return False
# Note that 'alias', 'saveopts' and 'setopt' commands also seem to work
# fine as they are, but are usually used together with one of the commands
# below and not standalone. Hence they're not added to good_commands.
good_commands = ('develop', 'sdist', 'build', 'build_ext', 'build_py',
'build_clib', 'build_scripts', 'bdist_wheel', 'bdist_rpm',
'bdist_wininst', 'bdist_msi', 'bdist_mpkg')
for command in good_commands:
if command in args:
return True
# The following commands are supported, but we need to show more
# useful messages to the user
if 'install' in args:
print(textwrap.dedent("""
Note: if you need reliable uninstall behavior, then install
with pip instead of using `setup.py install`:
- `pip install .` (from a git repo or downloaded source
release)
- `pip install numpy` (last NumPy release on PyPi)
"""))
return True
if '--help' in args or '-h' in sys.argv[1]:
print(textwrap.dedent("""
NumPy-specific help
-------------------
To install NumPy from here with reliable uninstall, we recommend
that you use `pip install .`. To install the latest NumPy release
from PyPi, use `pip install numpy`.
For help with build/installation issues, please ask on the
numpy-discussion mailing list. If you are sure that you have run
into a bug, please report it at https://github.com/numpy/numpy/issues.
Setuptools commands help
------------------------
"""))
return False
# The following commands aren't supported. They can only be executed when
# the user explicitly adds a --force command-line argument.
bad_commands = dict(
test="""
`setup.py test` is not supported. Use one of the following
instead:
- `python runtests.py` (to build and test)
- `python runtests.py --no-build` (to test installed numpy)
- `>>> numpy.test()` (run tests for installed numpy
from within an interpreter)
""",
upload="""
`setup.py upload` is not supported, because it's insecure.
Instead, build what you want to upload and upload those files
with `twine upload -s <filenames>` instead.
""",
upload_docs="`setup.py upload_docs` is not supported",
easy_install="`setup.py easy_install` is not supported",
clean="""
`setup.py clean` is not supported, use one of the following instead:
- `git clean -xdf` (cleans all files)
- `git clean -Xdf` (cleans all versioned files, doesn't touch
files that aren't checked into the git repo)
""",
check="`setup.py check` is not supported",
register="`setup.py register` is not supported",
bdist_dumb="`setup.py bdist_dumb` is not supported",
bdist="`setup.py bdist` is not supported",
build_sphinx="""
`setup.py build_sphinx` is not supported, use the
Makefile under doc/""",
flake8="`setup.py flake8` is not supported, use flake8 standalone",
)
bad_commands['nosetests'] = bad_commands['test']
for command in ('upload_docs', 'easy_install', 'bdist', 'bdist_dumb',
'register', 'check', 'install_data', 'install_headers',
'install_lib', 'install_scripts', ):
bad_commands[command] = "`setup.py %s` is not supported" % command
for command in bad_commands.keys():
if command in args:
print(textwrap.dedent(bad_commands[command]) +
"\nAdd `--force` to your command to use it anyway if you "
"must (unsupported).\n")
sys.exit(1)
# Commands that do more than print info, but also don't need Cython and
# template parsing.
other_commands = ['egg_info', 'install_egg_info', 'rotate']
for command in other_commands:
if command in args:
return False
# If we got here, we didn't detect what setup.py command was given
import warnings
warnings.warn("Unrecognized setuptools command, proceeding with "
"generating Cython sources and expanding templates", stacklevel=2)
return True
def setup_package():
src_path = os.path.dirname(os.path.abspath(__file__))
old_path = os.getcwd()
os.chdir(src_path)
sys.path.insert(0, src_path)
# Rewrite the version file everytime
write_version_py()
# The f2py scripts that will be installed
if sys.platform == 'win32':
f2py_cmds = [
'f2py = numpy.f2py.f2py2e:main',
]
else:
f2py_cmds = [
'f2py = numpy.f2py.f2py2e:main',
'f2py%s = numpy.f2py.f2py2e:main' % sys.version_info[:1],
'f2py%s.%s = numpy.f2py.f2py2e:main' % sys.version_info[:2],
]
metadata = dict(
name = 'numpy',
maintainer = "NumPy Developers",
maintainer_email = "[email protected]",
description = DOCLINES[0],
long_description = "\n".join(DOCLINES[2:]),
url = "https://www.numpy.org",
author = "Travis E. Oliphant et al.",
download_url = "https://pypi.python.org/pypi/numpy",
project_urls={
"Bug Tracker": "https://github.com/numpy/numpy/issues",
"Documentation": "https://docs.scipy.org/doc/numpy/",
"Source Code": "https://github.com/numpy/numpy",
},
license = 'BSD',
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
test_suite='nose.collector',
cmdclass={"sdist": sdist_checked},
python_requires='>=3.5',
zip_safe=False,
entry_points={
'console_scripts': f2py_cmds
},
)
if "--force" in sys.argv:
run_build = True
sys.argv.remove('--force')
else:
# Raise errors for unsupported commands, improve help output, etc.
run_build = parse_setuppy_commands()
from setuptools import setup
if run_build:
from numpy.distutils.core import setup
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(cwd, 'PKG-INFO')):
# Generate Cython sources, unless building from source release
generate_cython()
metadata['configuration'] = configuration
else:
# Version number is added to metadata inside configuration() if build
# is run.
metadata['version'] = get_version_info()[0]
try:
setup(**metadata)
finally:
del sys.path[0]
os.chdir(old_path)
return
if __name__ == '__main__':
setup_package()
# This may avoid problems where numpy is installed via ``*_requires`` by
# setuptools, the global namespace isn't reset properly, and then numpy is
# imported later (which will then fail to load numpy extension modules).
# See gh-7956 for details
del builtins.__NUMPY_SETUP__
| bsd-3-clause | -7,317,157,568,309,640,000 | 34.165179 | 84 | 0.593627 | false |
mdmintz/SeleniumBase | examples/translations/russian_test_1.py | 1 | 1369 | # Russian Language Test
from seleniumbase.translate.russian import ТестНаСелен # noqa
class МойТестовыйКласс(ТестНаСелен):
def test_пример_1(self):
self.открыть("https://ru.wikipedia.org/wiki/")
self.подтвердить_элемент('[title="Русский язык"]')
self.подтвердить_текст("Википедия", "h2.main-wikimedia-header")
self.введите("#searchInput", "МГУ")
self.нажмите("#searchButton")
self.подтвердить_текст("университет", "#firstHeading")
self.подтвердить_элемент('img[alt="МГУ, вид с воздуха.jpg"]')
self.введите("#searchInput", "приключения Шурика")
self.нажмите("#searchButton")
self.подтвердить_текст("Операция «Ы» и другие приключения Шурика")
self.подтвердить_элемент('img[alt="Постер фильма"]')
self.назад()
self.подтвердить_правду("университет" in self.получить_текущий_URL())
self.вперед()
self.подтвердить_правду("Шурика" in self.получить_текущий_URL())
| mit | -8,395,393,105,277,526,000 | 43.727273 | 77 | 0.671748 | false |
georgemarshall/django | tests/utils_tests/test_text.py | 4 | 12578 | import json
import sys
from django.test import SimpleTestCase, ignore_warnings
from django.utils import text
from django.utils.deprecation import RemovedInDjango40Warning
from django.utils.functional import lazystr
from django.utils.text import format_lazy
from django.utils.translation import gettext_lazy, override
IS_WIDE_BUILD = (len('\U0001F4A9') == 1)
class TestUtilsText(SimpleTestCase):
def test_get_text_list(self):
self.assertEqual(text.get_text_list(['a', 'b', 'c', 'd']), 'a, b, c or d')
self.assertEqual(text.get_text_list(['a', 'b', 'c'], 'and'), 'a, b and c')
self.assertEqual(text.get_text_list(['a', 'b'], 'and'), 'a and b')
self.assertEqual(text.get_text_list(['a']), 'a')
self.assertEqual(text.get_text_list([]), '')
with override('ar'):
self.assertEqual(text.get_text_list(['a', 'b', 'c']), "a، b أو c")
def test_smart_split(self):
testdata = [
('This is "a person" test.',
['This', 'is', '"a person"', 'test.']),
('This is "a person\'s" test.',
['This', 'is', '"a person\'s"', 'test.']),
('This is "a person\\"s" test.',
['This', 'is', '"a person\\"s"', 'test.']),
('"a \'one',
['"a', "'one"]),
('all friends\' tests',
['all', 'friends\'', 'tests']),
('url search_page words="something else"',
['url', 'search_page', 'words="something else"']),
("url search_page words='something else'",
['url', 'search_page', "words='something else'"]),
('url search_page words "something else"',
['url', 'search_page', 'words', '"something else"']),
('url search_page words-"something else"',
['url', 'search_page', 'words-"something else"']),
('url search_page words=hello',
['url', 'search_page', 'words=hello']),
('url search_page words="something else',
['url', 'search_page', 'words="something', 'else']),
("cut:','|cut:' '",
["cut:','|cut:' '"]),
(lazystr("a b c d"), # Test for #20231
['a', 'b', 'c', 'd']),
]
for test, expected in testdata:
self.assertEqual(list(text.smart_split(test)), expected)
def test_truncate_chars(self):
truncator = text.Truncator('The quick brown fox jumped over the lazy dog.')
self.assertEqual('The quick brown fox jumped over the lazy dog.', truncator.chars(100)),
self.assertEqual('The quick brown fox …', truncator.chars(21)),
self.assertEqual('The quick brown fo.....', truncator.chars(23, '.....')),
self.assertEqual('.....', truncator.chars(4, '.....')),
nfc = text.Truncator('o\xfco\xfco\xfco\xfc')
nfd = text.Truncator('ou\u0308ou\u0308ou\u0308ou\u0308')
self.assertEqual('oüoüoüoü', nfc.chars(8))
self.assertEqual('oüoüoüoü', nfd.chars(8))
self.assertEqual('oü…', nfc.chars(3))
self.assertEqual('oü…', nfd.chars(3))
# Ensure the final length is calculated correctly when there are
# combining characters with no precomposed form, and that combining
# characters are not split up.
truncator = text.Truncator('-B\u030AB\u030A----8')
self.assertEqual('-B\u030A…', truncator.chars(3))
self.assertEqual('-B\u030AB\u030A-…', truncator.chars(5))
self.assertEqual('-B\u030AB\u030A----8', truncator.chars(8))
# Ensure the length of the end text is correctly calculated when it
# contains combining characters with no precomposed form.
truncator = text.Truncator('-----')
self.assertEqual('---B\u030A', truncator.chars(4, 'B\u030A'))
self.assertEqual('-----', truncator.chars(5, 'B\u030A'))
# Make a best effort to shorten to the desired length, but requesting
# a length shorter than the ellipsis shouldn't break
self.assertEqual('…', text.Truncator('asdf').chars(0))
# lazy strings are handled correctly
self.assertEqual(text.Truncator(lazystr('The quick brown fox')).chars(10), 'The quick…')
def test_truncate_chars_html(self):
perf_test_values = [
(('</a' + '\t' * 50000) + '//>', None),
('&' * 50000, '&' * 9 + '…'),
('_X<<<<<<<<<<<>', None),
]
for value, expected in perf_test_values:
with self.subTest(value=value):
truncator = text.Truncator(value)
self.assertEqual(expected if expected else value, truncator.chars(10, html=True))
def test_truncate_words(self):
truncator = text.Truncator('The quick brown fox jumped over the lazy dog.')
self.assertEqual('The quick brown fox jumped over the lazy dog.', truncator.words(10))
self.assertEqual('The quick brown fox…', truncator.words(4))
self.assertEqual('The quick brown fox[snip]', truncator.words(4, '[snip]'))
# lazy strings are handled correctly
truncator = text.Truncator(lazystr('The quick brown fox jumped over the lazy dog.'))
self.assertEqual('The quick brown fox…', truncator.words(4))
def test_truncate_html_words(self):
truncator = text.Truncator(
'<p id="par"><strong><em>The quick brown fox jumped over the lazy dog.</em></strong></p>'
)
self.assertEqual(
'<p id="par"><strong><em>The quick brown fox jumped over the lazy dog.</em></strong></p>',
truncator.words(10, html=True)
)
self.assertEqual(
'<p id="par"><strong><em>The quick brown fox…</em></strong></p>',
truncator.words(4, html=True)
)
self.assertEqual(
'<p id="par"><strong><em>The quick brown fox....</em></strong></p>',
truncator.words(4, '....', html=True)
)
self.assertEqual(
'<p id="par"><strong><em>The quick brown fox</em></strong></p>',
truncator.words(4, '', html=True)
)
# Test with new line inside tag
truncator = text.Truncator(
'<p>The quick <a href="xyz.html"\n id="mylink">brown fox</a> jumped over the lazy dog.</p>'
)
self.assertEqual(
'<p>The quick <a href="xyz.html"\n id="mylink">brown…</a></p>',
truncator.words(3, html=True)
)
# Test self-closing tags
truncator = text.Truncator('<br/>The <hr />quick brown fox jumped over the lazy dog.')
self.assertEqual('<br/>The <hr />quick brown…', truncator.words(3, html=True))
truncator = text.Truncator('<br>The <hr/>quick <em>brown fox</em> jumped over the lazy dog.')
self.assertEqual('<br>The <hr/>quick <em>brown…</em>', truncator.words(3, html=True))
# Test html entities
truncator = text.Truncator('<i>Buenos días! ¿Cómo está?</i>')
self.assertEqual('<i>Buenos días! ¿Cómo…</i>', truncator.words(3, html=True))
truncator = text.Truncator('<p>I <3 python, what about you?</p>')
self.assertEqual('<p>I <3 python,…</p>', truncator.words(3, html=True))
perf_test_values = [
('</a' + '\t' * 50000) + '//>',
'&' * 50000,
'_X<<<<<<<<<<<>',
]
for value in perf_test_values:
with self.subTest(value=value):
truncator = text.Truncator(value)
self.assertEqual(value, truncator.words(50, html=True))
def test_wrap(self):
digits = '1234 67 9'
self.assertEqual(text.wrap(digits, 100), '1234 67 9')
self.assertEqual(text.wrap(digits, 9), '1234 67 9')
self.assertEqual(text.wrap(digits, 8), '1234 67\n9')
self.assertEqual(text.wrap('short\na long line', 7), 'short\na long\nline')
self.assertEqual(text.wrap('do-not-break-long-words please? ok', 8), 'do-not-break-long-words\nplease?\nok')
long_word = 'l%sng' % ('o' * 20)
self.assertEqual(text.wrap(long_word, 20), long_word)
self.assertEqual(text.wrap('a %s word' % long_word, 10), 'a\n%s\nword' % long_word)
self.assertEqual(text.wrap(lazystr(digits), 100), '1234 67 9')
def test_normalize_newlines(self):
self.assertEqual(text.normalize_newlines("abc\ndef\rghi\r\n"), "abc\ndef\nghi\n")
self.assertEqual(text.normalize_newlines("\n\r\r\n\r"), "\n\n\n\n")
self.assertEqual(text.normalize_newlines("abcdefghi"), "abcdefghi")
self.assertEqual(text.normalize_newlines(""), "")
self.assertEqual(text.normalize_newlines(lazystr("abc\ndef\rghi\r\n")), "abc\ndef\nghi\n")
def test_phone2numeric(self):
numeric = text.phone2numeric('0800 flowers')
self.assertEqual(numeric, '0800 3569377')
lazy_numeric = lazystr(text.phone2numeric('0800 flowers'))
self.assertEqual(lazy_numeric, '0800 3569377')
def test_slugify(self):
items = (
# given - expected - unicode?
('Hello, World!', 'hello-world', False),
('spam & eggs', 'spam-eggs', False),
('spam & ıçüş', 'spam-ıçüş', True),
('foo ıç bar', 'foo-ıç-bar', True),
(' foo ıç bar', 'foo-ıç-bar', True),
('你好', '你好', True),
)
for value, output, is_unicode in items:
self.assertEqual(text.slugify(value, allow_unicode=is_unicode), output)
# interning the result may be useful, e.g. when fed to Path.
self.assertEqual(sys.intern(text.slugify('a')), 'a')
@ignore_warnings(category=RemovedInDjango40Warning)
def test_unescape_entities(self):
items = [
('', ''),
('foo', 'foo'),
('&', '&'),
('&am;', '&am;'),
('&', '&'),
('&#xk;', '&#xk;'),
('&', '&'),
('foo & bar', 'foo & bar'),
('foo & bar', 'foo & bar'),
]
for value, output in items:
self.assertEqual(text.unescape_entities(value), output)
self.assertEqual(text.unescape_entities(lazystr(value)), output)
def test_unescape_entities_deprecated(self):
msg = (
'django.utils.text.unescape_entities() is deprecated in favor of '
'html.unescape().'
)
with self.assertWarnsMessage(RemovedInDjango40Warning, msg):
text.unescape_entities('foo')
def test_unescape_string_literal(self):
items = [
('"abc"', 'abc'),
("'abc'", 'abc'),
('"a \"bc\""', 'a "bc"'),
("'\'ab\' c'", "'ab' c"),
]
for value, output in items:
self.assertEqual(text.unescape_string_literal(value), output)
self.assertEqual(text.unescape_string_literal(lazystr(value)), output)
def test_get_valid_filename(self):
filename = "^&'@{}[],$=!-#()%+~_123.txt"
self.assertEqual(text.get_valid_filename(filename), "-_123.txt")
self.assertEqual(text.get_valid_filename(lazystr(filename)), "-_123.txt")
def test_compress_sequence(self):
data = [{'key': i} for i in range(10)]
seq = list(json.JSONEncoder().iterencode(data))
seq = [s.encode() for s in seq]
actual_length = len(b''.join(seq))
out = text.compress_sequence(seq)
compressed_length = len(b''.join(out))
self.assertLess(compressed_length, actual_length)
def test_format_lazy(self):
self.assertEqual('django/test', format_lazy('{}/{}', 'django', lazystr('test')))
self.assertEqual('django/test', format_lazy('{0}/{1}', *('django', 'test')))
self.assertEqual('django/test', format_lazy('{a}/{b}', **{'a': 'django', 'b': 'test'}))
self.assertEqual('django/test', format_lazy('{a[0]}/{a[1]}', a=('django', 'test')))
t = {}
s = format_lazy('{0[a]}-{p[a]}', t, p=t)
t['a'] = lazystr('django')
self.assertEqual('django-django', s)
t['a'] = 'update'
self.assertEqual('update-update', s)
# The format string can be lazy. (string comes from contrib.admin)
s = format_lazy(
gettext_lazy('Added {name} “{object}”.'),
name='article', object='My first try',
)
with override('fr'):
self.assertEqual('Ajout de article «\xa0My first try\xa0».', s)
| bsd-3-clause | -940,907,584,277,542,100 | 44.631387 | 116 | 0.558906 | false |
daerty0153/crazyHN | hn_main/hn_main/wsgi.py | 1 | 1422 | """
WSGI config for hn_main project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "hn_main.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "hn_main.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| mit | -7,970,751,244,716,074,000 | 43.4375 | 79 | 0.791139 | false |
lexdene/pavel | pavel/grammar/parser.py | 1 | 11358 | from ply import yacc
from . import lexer
class Parser:
def __init__(self):
self._debug = False
import os
if os.environ.get('PARSER_DEBUG') == 'on':
self._debug = True
def _create_lexer(self):
return lexer.Lexer()
def parse(self, source):
if self._debug:
self._debug_parse_tokens(source)
self.__parser = yacc.yacc(module=self)
debug = 0
else:
self.__parser = yacc.yacc(
module=self,
debug=False,
write_tables=False
)
debug = 0
result = self.__parser.parse(
source,
lexer=self._create_lexer(),
debug=debug
)
if self._debug:
import pprint
pprint.pprint(result, indent=4)
return result
def _debug_parse_tokens(self, source):
_lexer = self._create_lexer()
print(' ==== debug begin ==== ')
print(_lexer.tokens)
print(source)
print(repr(source))
_lexer.input(source)
for tok in _lexer:
print(
'%15s, %40s %3d %3d' % (
tok.type, repr(tok.value), tok.lineno, tok.lexpos
)
)
print(' ==== debug end ==== ')
print('')
tokens = lexer.Lexer.tokens
precedence = (
('nonassoc', 'CMP'),
('left', '+', '-'),
('left', '*', '/'),
)
def p_first_rule(self, p):
'''
first_rule : multi_lines
'''
p[0] = p[1]
def p_error(self, p):
raise ValueError(p)
def p_multi_lines(self, p):
'''
multi_lines : empty
| line
| multi_lines line
'''
if len(p) == 2:
if p[1] is None:
p[0] = (
'multi_lines',
dict(
lines=[]
),
)
else:
p[0] = (
'multi_lines',
dict(
lines=[p[1]]
),
)
elif len(p) == 3:
line_list = p[1][1]['lines'] + [p[2]]
p[0] = (
'multi_lines',
dict(
lines=line_list
),
)
else:
raise ValueError('len is %d' % len(p))
def p_empty(self, p):
'empty :'
p[0] = None
def p_line(self, p):
'''
line : expression NEWLINE
| assign NEWLINE
| if_struct NEWLINE
| for_struct NEWLINE
| while_struct NEWLINE
| function_struct NEWLINE
'''
p[0] = p[1]
def p_one_item_expression(self, p):
'''
expression : number
| keyword
| string
| function_call
| member_function_call
| anonymous_function_struct
| block
'''
p[0] = p[1]
def p_three_items_expression(self, p):
'''
expression : expression '+' expression
| expression '-' expression
| expression '*' expression
| expression '/' expression
| expression CMP expression
'''
p[0] = (
'expression',
dict(
operator=('operator', p[2]),
args=(
p[1],
p[3]
)
)
)
def p_keyword_expression(self, p):
'''
expression : expression keyword expression
'''
p[0] = (
'function_call',
dict(
function=p[2],
params=[
p[1],
p[3],
]
)
)
def p_assign(self, p):
'''
assign : keyword ASSIGN expression
'''
p[0] = (
'expression',
dict(
operator=('operator', p[2]),
args=(
p[1],
p[3]
)
)
)
def p_get_attr_expression(self, p):
'''
expression : expression '.' keyword
'''
p[0] = (
'expression',
dict(
operator=('operator', p[2]),
args=(
p[1],
p[3]
)
)
)
def p_set_attr_expression(self, p):
'''
expression : expression '.' keyword ASSIGN expression
'''
p[0] = (
'expression',
dict(
operator=('operator', 'set_attr'),
args=(
p[1],
p[3],
p[5],
)
)
)
def p_get_item_expression(self, p):
'''
expression : expression '[' expression ']'
'''
p[0] = (
'expression',
dict(
operator=('operator', p[2] + p[4]),
args=(
p[1],
p[3]
)
)
)
def p_number(self, p):
'''
number : NUMBER
'''
p[0] = ('number', p[1])
def p_keyword(self, p):
'''
keyword : KEYWORD
'''
p[0] = (
'keyword',
dict(
name=p[1]
)
)
def p_string(self, p):
'''
string : STRING
'''
p[0] = ('string', p[1])
def p_if_struct(self, p):
'''
if_struct : IF '(' expression ')' INDENT multi_lines OUTDENT
'''
p[0] = (
'if_struct',
dict(
condition=p[3],
then_block=p[6],
else_block=None,
)
)
def p_if_with_block(self, p):
'''
if_struct : IF INDENT multi_lines OUTDENT NEWLINE THEN INDENT multi_lines OUTDENT
'''
p[0] = (
'if_struct',
dict(
condition=p[3],
then_block=p[8],
else_block=None
)
)
def p_if_with_else(self, p):
'''
if_struct : if_struct NEWLINE ELSE INDENT multi_lines OUTDENT
'''
p[0] = (
'if_struct',
dict(
condition=p[1][1]['condition'],
then_block=p[1][1]['then_block'],
else_block=p[5],
)
)
def p_for_struct(self, p):
'''
for_struct : FOR '(' keyword IN expression ')' INDENT multi_lines OUTDENT
'''
p[0] = (
'for_struct',
dict(
keyword=p[3],
expression=p[5],
body=p[8],
)
)
def p_while_struct(self, p):
'''
while_struct : WHILE '(' expression ')' block
'''
p[0] = (
'while_struct',
dict(
condition=p[3],
body=p[5],
)
)
def p_function_struct(self, p):
'''
function_struct : FUNCTION keyword '(' formal_param_list ')' INDENT multi_lines OUTDENT
'''
p[0] = (
'function_struct',
dict(
name=p[2],
params=p[4],
body=p[7],
)
)
def p_no_param_function_struct(self, p):
'''
function_struct : FUNCTION keyword '(' ')' INDENT multi_lines OUTDENT
'''
p[0] = (
'function_struct',
dict(
name=p[2],
params=[],
body=p[6],
)
)
def p_formal_param_list_with_one_item(self, p):
'''
formal_param_list : keyword
'''
p[0] = [p[1]]
def p_formal_param_list_with_multi_items(self, p):
'''
formal_param_list : formal_param_list ',' keyword
'''
formal_param_list = p[1]
formal_param_list.append(p[3])
p[0] = p[1] + [p[3]]
def p_member_function_call(self, p):
'''
member_function_call : expression '.' keyword '(' comma_expression_list ')'
'''
p[0] = (
'member_function_call',
dict(
this_object=p[1],
name=p[3],
params=p[5],
)
)
def p_no_param_member_function_call(self, p):
'''
member_function_call : expression '.' keyword '(' ')'
'''
p[0] = (
'member_function_call',
dict(
this_object=p[1],
name=p[3],
params=[],
)
)
def p_function_call(self, p):
'''
function_call : expression '(' comma_expression_list ')'
'''
p[0] = (
'function_call',
dict(
function=p[1],
params=p[3],
)
)
def p_no_param_function_call(self, p):
'''
function_call : expression '(' ')'
'''
p[0] = (
'function_call',
dict(
function=p[1],
params=[]
)
)
def p_call_block(self, p):
'''
function_call : expression block
'''
p[0] = (
'function_call',
dict(
function=p[1],
params=[p[2]]
)
)
def p_actual_param_list_with_one_item(self, p):
'''
comma_expression_list : expression
'''
p[0] = [p[1]]
def p_actual_param_list_with_multi_items(self, p):
'''
comma_expression_list : comma_expression_list ',' expression
| comma_expression_list ',' expression NEWLINE
'''
p[0] = p[1] + [p[3]]
def p_anonymous_function_struct(self, p):
'''
anonymous_function_struct : FUNCTION '(' formal_param_list ')' INDENT multi_lines OUTDENT
'''
p[0] = (
'function_struct',
dict(
name=None,
params=p[3],
body=p[6]
)
)
def p_anonymous_function_without_param(self, p):
'''
anonymous_function_struct : FUNCTION '(' ')' INDENT multi_lines OUTDENT
'''
p[0] = (
'function_struct',
dict(
name=None,
params=[],
body=p[5],
)
)
def p_anonymous_function_struct_without_param(self, p):
'''
block : INDENT multi_lines OUTDENT
'''
p[0] = (
'function_struct',
dict(
name=None,
params=[],
body=p[2]
)
)
| gpl-3.0 | -1,472,556,201,478,067,700 | 22.911579 | 101 | 0.36318 | false |
rwl/PyCIM | CIM15/IEC61970/Meas/AccumulatorLimit.py | 1 | 2420 | # Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Meas.Limit import Limit
class AccumulatorLimit(Limit):
"""Limit values for Accumulator measurementsLimit values for Accumulator measurements
"""
def __init__(self, value=0, LimitSet=None, *args, **kw_args):
"""Initialises a new 'AccumulatorLimit' instance.
@param value: The value to supervise against. The value is positive.
@param LimitSet: The set of limits.
"""
#: The value to supervise against. The value is positive.
self.value = value
self._LimitSet = None
self.LimitSet = LimitSet
super(AccumulatorLimit, self).__init__(*args, **kw_args)
_attrs = ["value"]
_attr_types = {"value": int}
_defaults = {"value": 0}
_enums = {}
_refs = ["LimitSet"]
_many_refs = []
def getLimitSet(self):
"""The set of limits.
"""
return self._LimitSet
def setLimitSet(self, value):
if self._LimitSet is not None:
filtered = [x for x in self.LimitSet.Limits if x != self]
self._LimitSet._Limits = filtered
self._LimitSet = value
if self._LimitSet is not None:
if self not in self._LimitSet._Limits:
self._LimitSet._Limits.append(self)
LimitSet = property(getLimitSet, setLimitSet)
| mit | 4,569,718,613,417,140,700 | 36.8125 | 89 | 0.681818 | false |
ResolveWang/algrithm_qa | 分类代表题目/字符串/数字翻译成字符串(动态规划).py | 1 | 1178 | """
给定一个数字,我们按照下面规则将其翻译成字符串:
0翻译成"a",1翻译成"b",...25翻译成"z",一个数字可能有多种翻译,比如
12258有5种不同的翻译,分别是"bccfi","bwfi","bczi","mcfi"和"mzfi",
求给定一个数字它的翻译方法有多少种?
思路:
套路就是求以每个位置结尾的情况有多少种翻译方式,可以通过动态规划求解
dp[i] = dp[i-1] + tmp(tmp=dp[i-2]当num_str[index-1:index+1]可以
被翻译成合法的字符,否则tmp为0)
"""
class Num2Str:
def get_total_res(self, num):
if num < 0:
return 0
if len(str(num)) == 1:
return 1
str_num = str(num)
dp = [0 for _ in range(len(str_num))]
dp[0] = 1
if int(str_num[0:2]) > 25:
dp[1] = 1
else:
dp[1] = 2
index = 2
while index < len(str_num):
tmp = 0
if int(str_num[index-1: index+1]) <= 25:
tmp = dp[index-2]
dp[index] = dp[index-1] + tmp
index += 1
return dp[-1]
if __name__ == '__main__':
print(Num2Str().get_total_res(12258)) | mit | -2,510,281,898,307,719,700 | 21.414634 | 60 | 0.502179 | false |
marcocamma/trx | trx/cell.py | 1 | 3337 | # -*- coding: utf-8 -*-
from __future__ import print_function,division,absolute_import
import collections
import itertools
import numpy as np
from numpy import sin,cos
class Triclinic(object):
def __init__(self,a=1,b=1,c=1,alpha=90,beta=90,gamma=90):
self.a = a
self.b = b
self.c = c
alpha = alpha*np.pi/180
beta = beta*np.pi/180
gamma = gamma*np.pi/180
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self._s11 = b**2 * c**2 * sin(alpha)**2
self._s22 = a**2 * c**2 * sin(beta)**2
self._s33 = a**2 * b**2 * sin(gamma)**2
self._s12 = a*b*c**2*(cos(alpha) * cos(beta) - cos(gamma))
self._s23 = a**2*b*c*(cos(beta) * cos(gamma) - cos(alpha))
self._s13 = a*b**2*c*(cos(gamma) * cos(alpha) - cos(beta))
self.V = (a*b*c)*np.sqrt(1-cos(alpha)**2 - cos(beta)**2 - cos(gamma)**2 + 2*cos(alpha)*cos(beta)*cos(gamma))
def __call__(self,h,k,l): return self.q(h,k,l)
def d(self,h,k,l):
temp = self._s11*h**2 + \
self._s22*k**2 + \
self._s33*l**2 + \
2*self._s12*h*k+ \
2*self._s23*k*l+ \
2*self._s13*h*l
d = self.V/np.sqrt(temp)
return d
def Q(self,h,k,l):
return 2*np.pi/self.d(h,k,l)
def reflection_list(self,maxQ=3,lim=10):
ret=dict()
# prepare hkl
i = range(-lim,lim+1)
prod = itertools.product( i,i,i )
hkl = np.asarray( list( itertools.product( i,i,i ) ) )
h,k,l = hkl.T
q = self.Q(h,k,l)
idx = q<maxQ;
q = q[idx]
hkl = hkl[idx]
q = np.round(q,12)
qunique = np.unique(q)
ret = []
for qi in qunique:
reflec = hkl[ q == qi ]
ret.append( (qi,tuple(np.abs(reflec)[0]),len(reflec),reflec) )
return qunique,ret
# for h in range(-lim,lim+1):
# for j in range(-lim,lim+1):
class Orthorombic(Triclinic):
def __init__(self,a=1,b=1,c=1):
Triclinic.__init__(self,a=a,b=b,c=c,alpha=90,beta=90,gamma=90)
class Cubic(Orthorombic):
def __init__(self,a=1):
Orthorombic.__init__(self,a=a,b=a,c=a)
class Monoclinic(object):
def __init__(self,a=1,b=1,c=1,beta=90.):
Triclinic.__init__(self,a=a,b=b,c=c,alpha=90,beta=beta,gamma=90)
def plotReflections(cell_instance,maxQ=3,ax=None,line_kw=dict(),text_kw=dict()):
import matplotlib.pyplot as plt
from matplotlib import lines
import matplotlib.transforms as transforms
_,refl_info = cell_instance.reflection_list(maxQ=maxQ)
if ax is None: ax = plt.gca()
# the x coords of this transformation are data, and the
# y coord are axes
trans = transforms.blended_transform_factory(ax.transData, ax.transAxes)
txt_kw = dict( horizontalalignment='center', rotation=45)
txt_kw.update(**text_kw)
for reflection in refl_info[1:]:
q,hkl,n,_ = reflection
line = lines.Line2D( [q,q],[1,1.1],transform=trans,**line_kw)
line.set_clip_on(False)
ax.add_line(line)
ax.text(q,1.15,str(hkl),transform=trans,**txt_kw)
ti3o5_lambda = Triclinic(a = 9.83776, b = 3.78674, c = 9.97069, beta = 91.2567)
ti3o5_beta = Triclinic(a = 9.7382 , b = 3.8005 , c = 9.4333 , beta = 91.496)
#ti3o5_beta = Monoclinic(a = 9.7382 , b = 3.8005 , c = 9.4333 , beta = 91.496)
ti3o5_alpha = Triclinic(a = 9.8372, b = 3.7921, c = 9.9717)
ti3o5_alpha1 = Orthorombic(a = 9.8372, b = 3.7921, c = 9.9717)
si = Cubic(a=5.431020504)
| mit | 8,474,517,120,379,409,000 | 30.481132 | 115 | 0.594246 | false |
ada-x/respect_mah_authoritay | movies_project.py | 1 | 2198 | import movies # my file with the class definition
import fresh_tomatoes # renders site
pi_movie = movies.Movie('Pi',
'https://www.youtube.com/watch?v=jo18VIoR2xU',
'a mathematician makes an incredible discovery',
'http://images.moviepostershop.com/pi-movie-poster-1998-1020474533.jpg')
big_fish = movies.Movie('Big Fish',
'https://www.youtube.com/watch?v=M3YVTgTl-F0',
'a story about the stories between a father and son',
'http://www.gstatic.com/tv/thumb/movieposters/32942/p32942_p_v8_aa.jpg')
gone_in_60_seconds = movies.Movie('Gone In 60 Seconds',
'https://www.youtube.com/watch?v=o6AyAM1buQ8',
'A reformed car thief is given three days to steal 50 pristine autos',
'http://www.gstatic.com/tv/thumb/movieposters/25612/p25612_p_v8_aa.jpg')
lauberge_espagnole = movies.Movie('L\'auberge Espagnole',
'https://www.youtube.com/watch?v=CCs6AzLeNQI',
'a student\'s adventures living in Barcelona',
'http://www.gstatic.com/tv/thumb/dvdboxart/30919/p30919_d_v8_aa.jpg')
lilo_and_stitch = movies.Movie('Lilo and Stitch',
'https://www.youtube.com/watch?v=hu9bERy7XGY',
'a lonely little girl gets an extra-terrestrial friend',
'http://img.lum.dolimg.com/v1/images/open-uri20150422-12561-1dajwj_23920e88.jpeg?region=0%2C0%2C1000%2C1409')
idiocracy = movies.Movie('Idiocracy',
'https://www.youtube.com/watch?v=BBvIweCIgwk',
'an average american wakes up in the future',
'http://www.gstatic.com/tv/thumb/dvdboxart/159395/p159395_d_v8_aa.jpg')
movies_list = [pi_movie, lilo_and_stitch, lauberge_espagnole,
gone_in_60_seconds, big_fish, idiocracy]
# print(movies_list)
# pi_movie.show_trailer()
# opens and renders display
fresh_tomatoes.open_movies_page(movies_list)
| unlicense | 8,596,849,178,169,684,000 | 51.333333 | 140 | 0.572793 | false |
garrettr/onionshare | onionshare_gui/onionshare_gui.py | 1 | 5020 | import os, sys, subprocess, inspect, platform, argparse
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import *
if platform.system() == 'Darwin':
onionshare_gui_dir = os.path.dirname(__file__)
else:
onionshare_gui_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
try:
import onionshare
except ImportError:
sys.path.append(os.path.abspath(onionshare_gui_dir+"/.."))
import onionshare
from onionshare import translated
import webapp
window_icon = None
class Application(QApplication):
def __init__(self):
platform = onionshare.get_platform()
if platform == 'Tails' or platform == 'Linux':
self.setAttribute(Qt.AA_X11InitThreads, True)
QApplication.__init__(self, sys.argv)
class WebAppThread(QThread):
def __init__(self, webapp_port):
QThread.__init__(self)
self.webapp_port = webapp_port
def run(self):
webapp.app.run(port=self.webapp_port)
class Window(QWebView):
def __init__(self, basename, webapp_port):
global window_icon
QWebView.__init__(self)
self.setWindowTitle("{0} | OnionShare".format(basename))
self.resize(580, 400)
self.setMinimumSize(580, 400)
self.setMaximumSize(580, 400)
self.setWindowIcon(window_icon)
self.load(QUrl("http://127.0.0.1:{0}".format(webapp_port)))
def alert(msg, icon=QMessageBox.NoIcon):
global window_icon
dialog = QMessageBox()
dialog.setWindowTitle("OnionShare")
dialog.setWindowIcon(window_icon)
dialog.setText(msg)
dialog.setIcon(icon)
dialog.exec_()
def select_file(strings, filename=None):
# get filename, either from argument or file chooser dialog
if not filename:
args = {}
if onionshare.get_platform() == 'Tails':
args['directory'] = '/home/amnesia'
filename = QFileDialog.getOpenFileName(caption=translated('choose_file'), options=QFileDialog.ReadOnly, **args)
if not filename:
return False, False
filename = str(filename)
# validate filename
if not os.path.isfile(filename):
alert(translated("not_a_file").format(filename), QMessageBox.Warning)
return False, False
filename = os.path.abspath(filename)
basename = os.path.basename(filename)
return filename, basename
def main():
onionshare.strings = onionshare.load_strings()
# start the Qt app
app = Application()
# check for root in Tails
if onionshare.get_platform() == 'Tails' and not onionshare.is_root():
subprocess.call(['/usr/bin/gksudo']+sys.argv)
return
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--local-only', action='store_true', dest='local_only', help='Do not attempt to use tor: for development only')
parser.add_argument('--stay-open', action='store_true', dest='stay_open', help='Keep hidden service running after download has finished')
parser.add_argument('--debug', action='store_true', dest='debug', help='Log errors to disk')
parser.add_argument('filename', nargs='?', help='File to share')
args = parser.parse_args()
filename = args.filename
local_only = args.local_only
stay_open = bool(args.stay_open)
debug = bool(args.debug)
onionshare.set_stay_open(stay_open)
# create the onionshare icon
global window_icon, onionshare_gui_dir
window_icon = QIcon("{0}/onionshare-icon.png".format(onionshare_gui_dir))
# try starting hidden service
onionshare_port = onionshare.choose_port()
local_host = "127.0.0.1:{0}".format(onionshare_port)
if not local_only:
try:
onion_host = onionshare.start_hidden_service(onionshare_port)
except onionshare.NoTor as e:
alert(e.args[0], QMessageBox.Warning)
return
onionshare.tails_open_port(onionshare_port)
# select file to share
filename, basename = select_file(onionshare.strings, filename)
if not filename:
return
# initialize the web app
webapp.onionshare = onionshare
webapp.onionshare_port = onionshare_port
webapp.filename = filename
webapp.qtapp = app
webapp.clipboard = app.clipboard()
webapp.stay_open = stay_open
if not local_only:
webapp.onion_host = onion_host
else:
webapp.onion_host = local_host
if debug:
webapp.debug_mode()
# run the web app in a new thread
webapp_port = onionshare.choose_port()
onionshare.tails_open_port(webapp_port)
webapp_thread = WebAppThread(webapp_port)
webapp_thread.start()
# clean up when app quits
def shutdown():
onionshare.tails_close_port(onionshare_port)
onionshare.tails_close_port(webapp_port)
app.connect(app, SIGNAL("aboutToQuit()"), shutdown)
# launch the window
web = Window(basename, webapp_port)
web.show()
# all done
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| gpl-3.0 | 1,164,837,602,835,972,400 | 30.572327 | 141 | 0.65996 | false |
openweave/openweave-core | src/tools/simnet/lib/simnet/layouts/two-hans-shared-host-gateway.py | 1 | 3105 | #
# Simnet Network Layout: Two HANs with shared gateways implemented on host
#
# This simnet configuration defines two HANs, each with its own WiFi and Thread networks.
# Both HANs contain a single Weave device connected to the respective WiFi/Thread networks.
# The HANs also contain separate Gateway nodes that are implemented, in unison, by the
# host (i.e. the host acts as both Gateways simultaneously). The gateways use the host's
# default interface (typically eth0) as their outside interface allowing the HANs to
# access the internet if the host has internet access.
#
# Note: In order for this configuration to work, the two Gateway nodes must use distinct
# IPv4 subnets on their inside interfaces, even though in a real scenario they could use
# the same subnet.
#
#
# Copyright (c) 2015-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#===============================================================================
# HAN-1
#===============================================================================
WiFiNetwork(
name = 'han-1-wifi',
)
ThreadNetwork(
name = 'han-1-thread',
meshLocalPrefix = 'fd24:2424:2424::/64'
)
# Gateway in HAN-1 implemented on host, with outside access via host's default interface.
Gateway(
name = 'han-1-gw',
outsideNetwork = None,
outsideInterface = 'host-default',
useHost = True,
insideNetwork = 'han-1-wifi',
insideIP4Subnet = '192.168.168.0/24',
isIP4DefaultGateway = True
)
# Weave device in HAN-1 connected to HAN-1 WiFi and Thread networks
WeaveDevice(
name = 'han-1-dev',
weaveNodeId = 1,
weaveFabricId = 1,
wifiNetwork = 'han-1-wifi',
threadNetwork = 'han-1-thread'
)
#===============================================================================
# HAN-2
#===============================================================================
WiFiNetwork(
name = 'han-2-wifi',
)
ThreadNetwork(
name = 'han-2-thread',
meshLocalPrefix = 'fd42:4242:4242::/64'
)
# Gateway in HAN-2 implemented on host, with outside access via host's default interface.
Gateway(
name = 'han-2-gw',
outsideNetwork = None,
outsideInterface = 'host-default',
useHost = True,
insideNetwork = 'han-2-wifi',
insideIP4Subnet = '192.168.167.0/24',
isIP4DefaultGateway = True
)
# Weave device in HAN-2 connected to HAN-2 WiFi and Thread networks
WeaveDevice(
name = 'han-2-dev',
weaveNodeId = 2,
weaveFabricId = 2,
wifiNetwork = 'han-2-wifi',
threadNetwork = 'han-2-thread'
)
| apache-2.0 | -7,368,057,710,601,936,000 | 31.010309 | 91 | 0.629308 | false |
dulichan/iot-ref-arch | python-agent/agent/Agent.py | 1 | 5037 | '''
Copyright (c) 2005-2011, WSO2 Inc. (http://www.wso2.org) All Rights Reserved.
WSO2 Inc. licenses this file to you under the Apache License,
Version 2.0 (the "License"); you may not use this file except
in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
'''
import core.Manager as Manager
#from custom.publishers.TemperaturePublisher import TemperaturePublisher
import time
import threading
import ConfigParser
import os
import pkgutil
import sys
import argparse
class Agent:
def start(self):
self.load_manager()
arguments = self.pass_arguments()
if(arguments.dmURL):
self.configure_dm_url(arguments.dmURL)
else:
self.configure_dm_url("https://localhost:9453/")
if(arguments.token):
# if the token doesn't exists - ask the agent to enroll the device
self.manager.enroll(self, arguments.token)
else:
enroll = self.agent_params['enroll']
if(enroll):
print "Device was enrolled to Device Manager previously"
else:
self.manager.enroll(self)
#self.add_process(TemperaturePublisher())
self.execute()
def pass_arguments(self):
'''
Parse command line arguments for the token
'''
parser = argparse.ArgumentParser()
parser.add_argument("--token")
parser.add_argument("--dmURL")
args = parser.parse_args()
# args.token = "sdfsdf"
return args
def __init__(self):
'''
Parse config files and setup agent variables
'''
self.config = ConfigParser.ConfigParser()
self.config.read("config.conf")
self.process_list = []
self.agent_params = {}
self.configs = {}
# Runtime configs
self.agent_params['timer'] = self.config.get('agent', 'timer')
self.agent_params['timer_interval'] = float(self.config.get('agent', 'timer_interval'))
self.agent_params['autoload'] = self.config.get('agent', 'autoload')
if(self.config.has_option('agent', 'enroll')):
self.agent_params['enroll'] = self.config.get('agent', 'enroll')
else:
self.agent_params['enroll'] = False
#self.configs['deviceId'] = self.config.get('agent', 'enrollment')
# Security code
if(self.config.has_section('security')):
self.agent_params['access_token'] = self.config.get('security', 'access_token')
self.agent_params['refresh_token'] = self.config.get('security', 'refresh_token')
self.agent_params['enroll'] = self.config.get('security', 'enroll')
def configure_dm_url(self, dm_url):
self.manager.configure_dm_url(dm_url)
def load_manager(self):
'''
Load the Platform specific Device Manager implementation using the core Device Manager
'''
#self.manager = RaspberryPiManager()
self.manager = Manager.get_device_manager()
if(self.manager==None):
raise Exception("No Device Manager found for Platform")
def add_process(self, process):
self.process_list.append(process)
print "Adding process " + str(process)
def execute(self):
'''
The execution will run periodically based on the timer property in
the config.conf file
'''
# print len(self.process_list)
for process in self.process_list:
print "Executing process"
process.run()
if self.agent_params['autoload'] == 'True':
self.reload()
if self.agent_params['timer'] == 'True':
threading.Timer(self.agent_params['timer_interval'], self.execute).start()
def reload(self):
'''
Reload processors
'''
path = os.path.join(os.path.dirname(__file__), "custom/publishers")
modules = pkgutil.iter_modules(path=[path])
for loader, mod_name, ispkg in modules:
# Ensure that module isn't already loaded
# print mod_name not in sys.modules
if "custom.publishers." + mod_name not in sys.modules:
# Import module
loaded_mod = __import__(
"custom.publishers" + "." + mod_name, fromlist=[mod_name])
# Load class from imported module
class_name = mod_name
loaded_class = getattr(loaded_mod, class_name)
# Create an instance of the class
instance = loaded_class()
self.add_process(instance)
| apache-2.0 | 8,433,987,660,126,421,000 | 34.978571 | 98 | 0.600754 | false |
alienfluid/db.py | db/queries/mysql.py | 1 | 2424 | queries = {
"column": {
"head": "select %s from %s limit %d;",
"all": "select %s from %s;",
"unique": "select distinct %s from %s;",
"sample": "select %s from %s order by rand() limit %d;"
},
"table": {
"select": "select %s from %s;",
"head": "select * from %s limit %d;",
"all": "select * from %s;",
"unique": "select distinct %s from %s;",
"sample": "select * from %s order by rand() limit %d;"
},
"system": {
"schema_no_system": """
select
table_name
, column_name
, data_type
from
information_schema.columns
where
table_schema not in ('information_schema', 'performance_schema', 'mysql')
""",
"schema_with_system": """
select
table_name
, column_name
, data_type
from
information_schema.columns;
""",
"schema_specified": """
select
table_name
, column_name
, udt_name
from
information_schema.columns
where table_schema in (%s);
""",
"foreign_keys_for_table": """
select
column_name
, referenced_table_name
, referenced_column_name
from
information_schema.key_column_usage
where
table_name = '%s'
and referenced_column_name IS NOT NULL;
""",
"foreign_keys_for_column": """
select
column_name
, referenced_table_name
, referenced_column_name
from
information_schema.key_column_usage
where
table_name = '%s'
and column_name = '%s'
and referenced_column_name IS NOT NULL;
""",
"ref_keys_for_table": """
select
referenced_column_name
, table_name
, column_name
from
information_schema.key_column_usage
where
referenced_table_name = '%s'
and referenced_column_name IS NOT NULL;
"""
}
}
| bsd-2-clause | -2,213,053,112,435,723,500 | 30.076923 | 93 | 0.420792 | false |
ps-jay/temper-python | temperusb/snmp.py | 1 | 3612 | # encoding: utf-8
#
# Run snmp_temper.py as a pass-persist module for NetSNMP.
# See README.md for instructions.
#
# Copyright 2012-2014 Philipp Adelt <[email protected]>
#
# This code is licensed under the GNU public license (GPL). See LICENSE.md for details.
import os
import sys
import syslog
import threading
import snmp_passpersist as snmp
from temperusb.temper import TemperHandler, TemperDevice
ERROR_TEMPERATURE = 9999
def _unbuffered_handle(fd):
return os.fdopen(fd.fileno(), 'w', 0)
class LogWriter():
def __init__(self, ident='temper-python', facility=syslog.LOG_DAEMON):
syslog.openlog(ident, 0, facility)
def write_log(self, message, prio=syslog.LOG_INFO):
syslog.syslog(prio, message)
class Updater():
def __init__(self, pp, logger, testmode=False):
self.logger = logger
self.pp = pp
self.testmode = testmode
self.usb_lock = threading.Lock() # used to stop reinitialization interfering with update-thread
self._initialize()
def _initialize(self):
with self.usb_lock:
try:
self.th = TemperHandler()
self.devs = self.th.get_devices()
self.logger.write_log('Found %i thermometer devices.' % len(self.devs))
for i, d in enumerate(self.devs):
self.logger.write_log('Initial temperature of device #%i: %0.1f degree celsius' % (i, d.get_temperature()))
except Exception as e:
self.logger.write_log('Exception while initializing: %s' % str(e))
def _reinitialize(self):
# Tries to close all known devices and starts over.
self.logger.write_log('Reinitializing devices')
with self.usb_lock:
for i,d in enumerate(self.devs):
try:
d.close()
except Exception as e:
self.logger.write_log('Exception closing device #%i: %s' % (i, str(e)))
self._initialize()
def update(self):
if self.testmode:
# APC Internal/Battery Temperature
self.pp.add_int('318.1.1.1.2.2.2.0', 99)
# Cisco devices temperature OIDs
self.pp.add_int('9.9.13.1.3.1.3.1', 97)
self.pp.add_int('9.9.13.1.3.1.3.2', 98)
self.pp.add_int('9.9.13.1.3.1.3.3', 99)
else:
try:
with self.usb_lock:
temperatures = [d.get_temperature() for d in self.devs]
self.pp.add_int('318.1.1.1.2.2.2.0', int(max(temperatures)))
for i, temperature in enumerate(temperatures[:3]): # use max. first 3 devices
self.pp.add_int('9.9.13.1.3.1.3.%i' % (i+1), int(temperature))
except Exception as e:
self.logger.write_log('Exception while updating data: %s' % str(e))
# Report an exceptionally large temperature to set off all alarms.
# snmp_passpersist does not expose an API to remove an OID.
for oid in ('318.1.1.1.2.2.2.0', '9.9.13.1.3.1.3.1', '9.9.13.1.3.1.3.2', '9.9.13.1.3.1.3.3'):
self.pp.add_int(oid, ERROR_TEMPERATURE)
self.logger.write_log('Starting reinitialize after error on update')
self._reinitialize()
def main():
sys.stdout = _unbuffered_handle(sys.stdout)
pp = snmp.PassPersist(".1.3.6.1.4.1")
logger = LogWriter()
upd = Updater(pp, logger, testmode=('--testmode' in sys.argv))
pp.start(upd.update, 5) # update every 5s
if __name__ == '__main__':
main()
| gpl-3.0 | -4,935,427,862,383,892,000 | 37.425532 | 127 | 0.580288 | false |
rs2/bokeh | sphinx/source/docs/user_guide/examples/extensions_example_tool.py | 1 | 1483 | from bokeh.core.properties import Instance
from bokeh.io import output_file, show
from bokeh.models import ColumnDataSource, Tool
from bokeh.plotting import figure
output_file('tool.html')
JS_CODE = """
import * as p from "core/properties"
import {GestureTool, GestureToolView} from "models/tools/gestures/gesture_tool"
export class DrawToolView extends GestureToolView
# this is executed when the pan/drag event starts
_pan_start: (e) ->
@model.source.data = {x: [], y: []}
# this is executed on subsequent mouse/touch moves
_pan: (e) ->
frame = @plot_model.frame
{sx, sy} = e.bokeh
if not frame.bbox.contains(sx, sy)
return null
x = frame.xscales['default'].invert(sx)
y = frame.yscales['default'].invert(sy)
@model.source.data.x.push(x)
@model.source.data.y.push(y)
@model.source.change.emit()
# this is executed then the pan/drag ends
_pan_end: (e) -> return null
export class DrawTool extends GestureTool
default_view: DrawToolView
type: "DrawTool"
tool_name: "Drag Span"
icon: "bk-tool-icon-lasso-select"
event_type: "pan"
default_order: 12
@define { source: [ p.Instance ] }
"""
class DrawTool(Tool):
__implementation__ = JS_CODE
source = Instance(ColumnDataSource)
source = ColumnDataSource(data=dict(x=[], y=[]))
plot = figure(x_range=(0,10), y_range=(0,10), tools=[DrawTool(source=source)])
plot.title.text ="Drag to draw on the plot"
plot.line('x', 'y', source=source)
show(plot)
| bsd-3-clause | -8,073,249,730,603,915,000 | 24.568966 | 79 | 0.685772 | false |
amitay/samba | buildtools/wafsamba/irixcc.py | 1 | 1943 |
# compiler definition for irix/MIPSpro cc compiler
# based on suncc.py from waf
import os, optparse
import Utils, Options, Configure
import ccroot, ar
from Configure import conftest
from compiler_cc import c_compiler
c_compiler['irix'] = ['gcc', 'irixcc']
@conftest
def find_irixcc(conf):
v = conf.env
cc = None
if v['CC']: cc = v['CC']
elif 'CC' in conf.environ: cc = conf.environ['CC']
if not cc: cc = conf.find_program('cc', var='CC')
if not cc: conf.fatal('irixcc was not found')
cc = conf.cmd_to_list(cc)
try:
if Utils.cmd_output(cc + ['-version']) != '':
conf.fatal('irixcc %r was not found' % cc)
except ValueError:
conf.fatal('irixcc -v could not be executed')
v['CC'] = cc
v['CC_NAME'] = 'irix'
@conftest
def irixcc_common_flags(conf):
v = conf.env
v['CC_SRC_F'] = ''
v['CC_TGT_F'] = ['-c', '-o', '']
v['CPPPATH_ST'] = '-I%s' # template for adding include paths
# linker
if not v['LINK_CC']: v['LINK_CC'] = v['CC']
v['CCLNK_SRC_F'] = ''
v['CCLNK_TGT_F'] = ['-o', '']
v['LIB_ST'] = '-l%s' # template for adding libs
v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
v['STATICLIB_ST'] = '-l%s'
v['STATICLIBPATH_ST'] = '-L%s'
v['CCDEFINES_ST'] = '-D%s'
# v['SONAME_ST'] = '-Wl,-h -Wl,%s'
# v['SHLIB_MARKER'] = '-Bdynamic'
# v['STATICLIB_MARKER'] = '-Bstatic'
# program
v['program_PATTERN'] = '%s'
# shared library
# v['shlib_CCFLAGS'] = ['-Kpic', '-DPIC']
# v['shlib_LINKFLAGS'] = ['-G']
v['shlib_PATTERN'] = 'lib%s.so'
# static lib
# v['staticlib_LINKFLAGS'] = ['-Bstatic']
# v['staticlib_PATTERN'] = 'lib%s.a'
detect = '''
find_irixcc
find_cpp
find_ar
irixcc_common_flags
cc_load_tools
cc_add_flags
link_add_flags
'''
| gpl-3.0 | 6,208,172,338,209,144,000 | 24.233766 | 73 | 0.529593 | false |
mhl/mysociety-cvs | sitestats/pylib/sitestats/newsletters/tests/hfymp.py | 1 | 1131 | import unittest
from sitestats.newsletters.models.hfymp import HFYMPNewsletter
from tests import example_dir
from newsletter import MockPiwik, MockGoogle, newsletter_date
class HFYMPNewsletterTests(unittest.TestCase):
def setUp(self):
self.sources = {'piwik' : MockPiwik(), 'google' : MockGoogle()}
self.hfymp = HFYMPNewsletter()
self.hfymp.set_site_id = lambda sources: None
self.hfymp.base_url = 'http://www.hearfromyourmp.com'
def testRenderedToHTMLTemplateCorrectly(self):
html = self.hfymp.render('html', self.sources, date=newsletter_date()).strip()
expected_html = open(example_dir() + 'hfymp.html').read().strip()
self.assertEqual(expected_html, html, 'render produces correct output in HTML for example data')
def testRenderedToTextTemplateCorrectly(self):
text = self.hfymp.render('text', self.sources, date=newsletter_date()).strip()
expected_text = open(example_dir() + 'hfymp.txt').read().strip()
self.assertEqual(expected_text, text, 'render produces correct output in text for example data') | agpl-3.0 | 3,400,418,588,539,802,600 | 50.454545 | 104 | 0.691424 | false |
melinath/django-graph-api | django_graph_api/tests/conftest.py | 1 | 1399 | import pytest
from test_app.models import (
Droid,
Episode,
Human,
)
@pytest.fixture
def starwars_data(transactional_db):
luke, _ = Human.objects.get_or_create(
id=1000,
name='Luke Skywalker',
)
darth_vader, _ = Human.objects.get_or_create(
id=1001,
name='Darth Vader',
)
han, _ = Human.objects.get_or_create(
id=1002,
name='Han Solo',
)
leia, _ = Human.objects.get_or_create(
id=1003,
name='Leia Organa',
)
c3po, _ = Droid.objects.get_or_create(
id=2000,
name='C-3PO',
primary_function='Protocol',
)
r2d2, _ = Droid.objects.get_or_create(
id=2001,
name='R2-D2',
primary_function='Astromech',
)
for friend in (han, leia, c3po, r2d2):
luke.friends.add(friend)
han.friends.add(leia)
han.friends.add(r2d2)
leia.friends.add(c3po)
leia.friends.add(r2d2)
c3po.friends.add(r2d2)
a_new_hope, _ = Episode.objects.get_or_create(
id=1,
name='A New Hope',
number=4
)
empire_strikes_back, _ = Episode.objects.get_or_create(
id=2,
name='The Empire Strikes Back',
number=5
)
for character in (luke, han, leia, c3po, r2d2, darth_vader):
a_new_hope.characters.add(character)
empire_strikes_back.characters.add(character)
| mit | -1,655,201,041,344,370,000 | 21.934426 | 64 | 0.567548 | false |
cherepaha/PyDLV | pydlv/dl_plotter.py | 1 | 2699 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import cm
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.mplot3d import Axes3D
class DLPlotter:
'''
This class is responsible for plotting decision landscapes. Matplotlib is used as a background.
'''
figsize = (10.5, 6) # in inches, at 100 dpi
# figsize = (14, 8) # in inches, at 100 dpi
legendFontSize = 24
tickLabelFontSize = 18
axisLabelFontSize = 24
lw=2.0
def __init__(self, elev=27, azim=130, ax=None):
if ax is None:
fig = plt.figure(figsize=self.figsize)
self.ax = fig.add_subplot(111, projection='3d')
# self.ax = fig.gca(projection='3d')
else:
self.ax = ax
self.set_axis_params(elev, azim)
def set_axis_params(self, elev=27, azim=130):
self.ax.xaxis.set_major_locator(MaxNLocator(5))
self.ax.yaxis.set_major_locator(MaxNLocator(5))
self.ax.zaxis.set_major_locator(MaxNLocator(1))
self.ax.set_xlabel(r'x coordinate', fontsize=self.axisLabelFontSize, labelpad=20)
self.ax.set_ylabel(r'y coordinate', fontsize=self.axisLabelFontSize, labelpad=20)
self.ax.tick_params(axis='both', which='major', labelsize=self.tickLabelFontSize)
self.ax.view_init(elev, azim)
def plot_surface(self, x_grid, y_grid, z, cmap=cm.viridis, color=None, scale_z=True,
view=None, alpha=1.0, shade=False, linewidth=0.1, aa=True, plot_marble=True):
n_cells=100
x, y = np.meshgrid((x_grid[1:]+x_grid[:-1])/2, (y_grid[1:]+y_grid[:-1])/2)
z = np.nan_to_num(z)
if scale_z:
self.ax.set_zlim([np.min(z), 0])
norm = mpl.colors.Normalize(vmin=np.min(z), vmax=0, clip=False)
if plot_marble:
self.ax.plot([0.], [0.], [0.], marker='o', markersize=15, color='black')
if color is None:
self.ax.plot_surface(x, y, z, cmap=cmap, norm=norm, alpha=alpha, shade=shade,
rcount=n_cells, ccount=n_cells, linewidth=linewidth, edgecolors='k', antialiased=aa)
else:
self.ax.plot_surface(x, y, z, color=color, alpha=alpha, shade=shade, rcount=n_cells,
ccount=n_cells, linewidth=linewidth, edgecolors='k', antialiased=aa)
if view == 'top right':
self.ax.view_init(elev=27, azim=40)
return self.ax
def add_legend(self, colors, labels):
patches = [mpl.patches.Patch(color=color, linewidth=0) for color in colors]
self.ax.legend(patches, labels, fontsize=self.legendFontSize) | gpl-3.0 | 8,262,736,209,481,907,000 | 41.857143 | 113 | 0.607262 | false |
elkingtonmcb/bcbio-nextgen | bcbio/variation/varscan.py | 1 | 16073 | """Provide variant calling with VarScan from TGI at Wash U.
http://varscan.sourceforge.net/
"""
from collections import namedtuple
import contextlib
from distutils.version import LooseVersion
import os
import shutil
from bcbio import broad, utils
from bcbio.distributed.transaction import file_transaction, tx_tmpdir
from bcbio.pipeline import config_utils
from bcbio.provenance import do, programs
from bcbio.utils import file_exists, append_stem
from bcbio.variation import freebayes, samtools, vcfutils
from bcbio.variation.vcfutils import (combine_variant_files, write_empty_vcf,
get_paired_bams, is_paired_analysis,
bgzip_and_index, move_vcf)
import pysam
import vcf
def run_varscan(align_bams, items, ref_file, assoc_files,
region=None, out_file=None):
paired = get_paired_bams(align_bams, items)
if paired and paired.normal_bam and paired.tumor_bam:
call_file = samtools.shared_variantcall(_varscan_paired, "varscan",
align_bams, ref_file, items,
assoc_files, region, out_file)
else:
vcfutils.check_paired_problems(items)
call_file = samtools.shared_variantcall(_varscan_work, "varscan",
align_bams, ref_file,
items, assoc_files,
region, out_file)
return call_file
def _get_varscan_opts(config, tmp_dir):
"""Retrieve common options for running VarScan.
Handles jvm_opts, setting user and country to English to avoid issues
with different locales producing non-compliant VCF.
"""
resources = config_utils.get_resources("varscan", config)
jvm_opts = resources.get("jvm_opts", ["-Xmx750m", "-Xmx2g"])
jvm_opts = config_utils.adjust_opts(jvm_opts,
{"algorithm": {"memory_adjust":
{"magnitude": 1.1, "direction": "decrease"}}})
jvm_opts += ["-Duser.language=en", "-Duser.country=US"]
jvm_opts += broad.get_default_jvm_opts(tmp_dir)
return " ".join(jvm_opts)
def _varscan_paired(align_bams, ref_file, items, target_regions, out_file):
"""Run a paired VarScan analysis, also known as "somatic". """
max_read_depth = "1000"
config = items[0]["config"]
version = programs.jar_versioner("varscan", "VarScan")(config)
if LooseVersion(version) < LooseVersion("v2.3.6"):
raise IOError(
"Please install version 2.3.6 or better of VarScan with support "
"for multisample calling and indels in VCF format.")
varscan_jar = config_utils.get_jar(
"VarScan",
config_utils.get_program("varscan", config, "dir"))
remove_zerocoverage = "grep -v -P '\t0\t\t$'"
# No need for names in VarScan, hence the "_"
paired = get_paired_bams(align_bams, items)
if not paired.normal_bam:
affected_batch = items[0]["metadata"]["batch"]
message = ("Batch {} requires both tumor and normal BAM files for"
" VarScan cancer calling").format(affected_batch)
raise ValueError(message)
if not file_exists(out_file):
orig_out_file = out_file
out_file = orig_out_file.replace(".vcf.gz", ".vcf")
base, ext = utils.splitext_plus(out_file)
cleanup_files = []
for fname, mpext in [(paired.normal_bam, "normal"), (paired.tumor_bam, "tumor")]:
mpfile = "%s-%s.mpileup" % (base, mpext)
cleanup_files.append(mpfile)
with file_transaction(config, mpfile) as mpfile_tx:
mpileup = samtools.prep_mpileup([fname], ref_file,
config, max_read_depth,
target_regions=target_regions,
want_bcf=False)
cmd = "{mpileup} > {mpfile_tx}"
cmd = cmd.format(**locals())
do.run(cmd, "samtools mpileup".format(**locals()), None,
[do.file_exists(mpfile_tx)])
# Sometimes mpileup writes an empty file: in this case we
# just skip the rest of the analysis (VarScan will hang otherwise)
if any(os.stat(filename).st_size == 0 for filename in cleanup_files):
write_empty_vcf(orig_out_file, config)
return
# First index is normal, second is tumor
normal_tmp_mpileup = cleanup_files[0]
tumor_tmp_mpileup = cleanup_files[1]
indel_file = base + ".indel.vcf"
snp_file = base + ".snp.vcf"
cleanup_files.append(indel_file)
cleanup_files.append(snp_file)
with file_transaction(config, indel_file, snp_file) as (tx_indel, tx_snp):
with tx_tmpdir(items[0]) as tmp_dir:
jvm_opts = _get_varscan_opts(config, tmp_dir)
fix_ambig = vcfutils.fix_ambiguous_cl()
tx_snp_in = "%s-orig" % os.path.splitext(tx_snp)[0]
tx_indel_in = "%s-orig" % os.path.splitext(tx_indel)[0]
varscan_cmd = ("java {jvm_opts} -jar {varscan_jar} somatic"
" {normal_tmp_mpileup} {tumor_tmp_mpileup} "
"--output-snp {tx_snp_in} --output-indel {tx_indel_in} "
" --output-vcf --min-coverage 5 --p-value 0.98 "
"--strand-filter 1 ")
# add minimum AF
if "--min-var-freq" not in varscan_cmd:
min_af = float(utils.get_in(paired.tumor_config, ("algorithm",
"min_allele_fraction"),10)) / 100.0
varscan_cmd += "--min-var-freq {min_af} "
do.run(varscan_cmd.format(**locals()), "Varscan", None, None)
for orig_fname, fname in [(tx_snp_in, tx_snp), (tx_indel_in, tx_indel)]:
cmd = "vcfuniqalleles {orig_fname}.vcf | {fix_ambig} > {fname}"
do.run(cmd.format(**locals()), "Varscan paired fix")
# VarScan files need to be corrected to match the VCF specification
# We do this before combining them otherwise merging may fail
# if there are invalid records
to_combine = []
if do.file_exists(snp_file):
to_combine.append(snp_file)
_fix_varscan_vcf(snp_file, paired.normal_name, paired.tumor_name, config)
if do.file_exists(indel_file):
to_combine.append(indel_file)
_fix_varscan_vcf(indel_file, paired.normal_name, paired.tumor_name, config)
if not to_combine:
write_empty_vcf(orig_out_file, config)
return
out_file = combine_variant_files([snp_file, indel_file],
out_file, ref_file, config,
region=target_regions)
# Remove cleanup files
for extra_file in cleanup_files:
for ext in ["", ".gz", ".gz.tbi"]:
if os.path.exists(extra_file + ext):
os.remove(extra_file + ext)
if os.path.getsize(out_file) == 0:
write_empty_vcf(out_file)
if orig_out_file.endswith(".gz"):
out_file = bgzip_and_index(out_file, config)
_add_reject_flag(out_file, config)
def _fix_varscan_vcf(orig_file, normal_name, tumor_name, config):
"""Fixes issues with the standard VarScan VCF output.
- Remap sample names back to those defined in the input BAM file.
- Convert indels into correct VCF representation.
"""
tmp_file = append_stem(orig_file, "-origsample")
if not file_exists(tmp_file):
shutil.move(orig_file, tmp_file)
with file_transaction(config, orig_file) as tx_out_file:
with open(tmp_file) as in_handle:
with open(tx_out_file, "w") as out_handle:
for line in in_handle:
line = _clean_varscan_line(_fix_varscan_output(line, normal_name, tumor_name))
if not line:
continue
out_handle.write(line)
def _add_reject_flag(in_file, config):
"""Add REJECT flag to all records that aren't flagged somatic
(SS=2)"""
Filter = namedtuple('Filter', ['id', 'desc'])
reject_filter = Filter(id='REJECT',
desc='Rejected as non-SOMATIC or by quality')
# NOTE: PyVCF will write an uncompressed VCF
base, ext = utils.splitext_plus(in_file)
name = "rejectfix"
out_file = "{0}-{1}{2}".format(base, name, ".vcf")
if utils.file_exists(in_file):
reader = vcf.VCFReader(filename=in_file)
# Add info to the header of the reader
reader.filters["REJECT"] = reject_filter
with file_transaction(config, out_file) as tx_out_file:
with open(tx_out_file, "wb") as handle:
writer = vcf.VCFWriter(handle, template=reader)
for record in reader:
if "SS" in record.INFO:
# VarScan encodes it as a string
# TODO: Set it as integer when cleaning
if record.INFO["SS"] != "2":
record.add_filter("REJECT")
writer.write_record(record)
# Re-compress the file
out_file = bgzip_and_index(out_file, config)
move_vcf(in_file, "{0}.orig".format(in_file))
move_vcf(out_file, in_file)
with open(out_file, "w") as out_handle:
out_handle.write("Moved to {0}".format(in_file))
def _fix_varscan_output(line, normal_name, tumor_name):
"""Fix a varscan VCF line
Fixes the ALT column and also fixes floating point values
output as strings to by Floats: FREQ, SSC.
:param line: a pre-split and stripped varscan line
This function was contributed by Sean Davis <[email protected]>,
with minor modifications by Luca Beltrame <[email protected]>.
"""
line = line.strip()
tofix = ("##INFO=<ID=SSC", "##FORMAT=<ID=FREQ")
if(line.startswith("##")):
if line.startswith(tofix):
line = line.replace('Number=1,Type=String',
'Number=1,Type=Float')
return line + "\n"
line = line.split("\t")
mapping = {"NORMAL": normal_name, "TUMOR": tumor_name}
if(line[0].startswith("#CHROM")):
base_header = line[:9]
old_samples = line[9:]
if len(old_samples) == 0:
return "\t".join(line) + "\n"
samples = [mapping[sample_name] for sample_name in old_samples]
assert len(old_samples) == len(samples)
return "\t".join(base_header + samples) + "\n"
try:
REF, ALT = line[3:5]
except ValueError:
return "\t".join(line) + "\n"
Ifreq = line[8].split(":").index("FREQ")
ndat = line[9].split(":")
tdat = line[10].split(":")
somatic_status = line[7].split(";") # SS=<number>
# HACK: The position of the SS= changes, so we just search for it
somatic_status = [item for item in somatic_status
if item.startswith("SS=")][0]
somatic_status = int(somatic_status.split("=")[1]) # Get the number
ndat[Ifreq] = str(float(ndat[Ifreq].rstrip("%")) / 100)
tdat[Ifreq] = str(float(tdat[Ifreq].rstrip("%")) / 100)
line[9] = ":".join(ndat)
line[10] = ":".join(tdat)
#FIXME: VarScan also produces invalid REF records (e.g. CAA/A)
# This is not handled yet.
if somatic_status == 5:
# "Unknown" states are broken in current versions of VarScan
# so we just bail out here for now
return
if "+" in ALT or "-" in ALT:
if "/" not in ALT:
if ALT[0] == "+":
R = REF
A = REF + ALT[1:]
elif ALT[0] == "-":
R = REF + ALT[1:]
A = REF
else:
Ins = [p[1:] for p in ALT.split("/") if p[0] == "+"]
Del = [p[1:] for p in ALT.split("/") if p[0] == "-"]
if len(Del):
REF += sorted(Del, key=lambda x: len(x))[-1]
A = ",".join([REF[::-1].replace(p[::-1], "", 1)[::-1]
for p in Del] + [REF + p for p in Ins])
R = REF
REF = R
ALT = A
else:
ALT = ALT.replace('/', ',')
line[3] = REF
line[4] = ALT
return "\t".join(line) + "\n"
def _create_sample_list(in_bams, vcf_file):
"""Pull sample names from input BAMs and create input sample list.
"""
out_file = "%s-sample_list.txt" % os.path.splitext(vcf_file)[0]
with open(out_file, "w") as out_handle:
for in_bam in in_bams:
with contextlib.closing(pysam.Samfile(in_bam, "rb")) as work_bam:
for rg in work_bam.header.get("RG", []):
out_handle.write("%s\n" % rg["SM"])
return out_file
def _varscan_work(align_bams, ref_file, items, target_regions, out_file):
"""Perform SNP and indel genotyping with VarScan.
"""
config = items[0]["config"]
orig_out_file = out_file
out_file = orig_out_file.replace(".vcf.gz", ".vcf")
max_read_depth = "1000"
version = programs.jar_versioner("varscan", "VarScan")(config)
if version < "v2.3.6":
raise IOError("Please install version 2.3.6 or better of VarScan"
" with support for multisample calling and indels"
" in VCF format.")
varscan_jar = config_utils.get_jar("VarScan",
config_utils.get_program("varscan", config, "dir"))
sample_list = _create_sample_list(align_bams, out_file)
mpileup = samtools.prep_mpileup(align_bams, ref_file, config, max_read_depth,
target_regions=target_regions, want_bcf=False)
# VarScan fails to generate a header on files that start with
# zerocoverage calls; strip these with grep, we're not going to
# call on them
remove_zerocoverage = "grep -v -P '\t0\t\t$'"
# write a temporary mpileup file so we can check if empty
mpfile = "%s.mpileup" % os.path.splitext(out_file)[0]
with file_transaction(config, mpfile) as mpfile_tx:
cmd = ("{mpileup} | {remove_zerocoverage} > {mpfile_tx}")
do.run(cmd.format(**locals()), "mpileup for Varscan")
if os.path.getsize(mpfile) == 0:
write_empty_vcf(out_file)
else:
with tx_tmpdir(items[0]) as tmp_dir:
jvm_opts = _get_varscan_opts(config, tmp_dir)
fix_ambig = vcfutils.fix_ambiguous_cl()
cmd = ("cat {mpfile} "
"| java {jvm_opts} -jar {varscan_jar} mpileup2cns --min-coverage 5 --p-value 0.98 "
" --vcf-sample-list {sample_list} --output-vcf --variants "
"| {fix_ambig} | vcfuniqalleles > {out_file}")
do.run(cmd.format(**locals()), "Varscan", None,
[do.file_exists(out_file)])
os.remove(sample_list)
os.remove(mpfile)
# VarScan can create completely empty files in regions without
# variants, so we create a correctly formatted empty file
if os.path.getsize(out_file) == 0:
write_empty_vcf(out_file)
else:
freebayes.clean_vcf_output(out_file, _clean_varscan_line, config)
if orig_out_file.endswith(".gz"):
vcfutils.bgzip_and_index(out_file, config)
def _clean_varscan_line(line):
"""Avoid lines with non-GATC bases, ambiguous output bases make GATK unhappy.
"""
if line and not line.startswith("#"):
parts = line.split("\t")
alleles = [x.strip() for x in parts[4].split(",")] + [parts[3].strip()]
for a in alleles:
if len(set(a) - set("GATCgatc")) > 0:
return None
return line
| mit | -5,961,469,110,258,043,000 | 39.1825 | 105 | 0.554657 | false |
frozstone/mcatunification | mathmlpres_to_string_test.py | 1 | 1562 | from lxml import etree
from mathml_to_string import MathML2String
s1 = '''<math xmlns="http://ntcir-math.nii.ac.jp/" xmlns:m="http://www.w3.org/1998/Math/MathML">
<m:mrow xml:id="m22.1.10.pmml" xref="m22.1.10">
<m:mo xml:id="m22.1.1.pmml" xref="m22.1.1">-</m:mo>
<m:mrow xml:id="m22.1.10.1.pmml" xref="m22.1.10.1">
<m:mi xml:id="m22.1.2.pmml" xref="m22.1.2">t</m:mi>
<m:mo xml:id="m22.1.10.1.1.pmml" xref="m22.1.10.1.1"></m:mo>
<m:mi xml:id="m22.1.3.pmml" xref="m22.1.3">r</m:mi>
<m:mo xml:id="m22.1.10.1.1a.pmml" xref="m22.1.10.1.1"></m:mo>
<m:mrow xml:id="m22.1.10.1.2.pmml" xref="m22.1.10.1.2">
<m:mo xml:id="m22.1.10.1.2a.pmml" xref="m22.1.10.1.2">(</m:mo>
<m:mrow xml:id="m22.1.10.1.2b.pmml" xref="m22.1.10.1.2">
<mws:qvar xmlns:mws="http://search.mathweb.org/ns" name="x"/>
<m:mo xml:id="m22.1.10.1.2.1.pmml" xref="m22.1.10.1.2.1"></m:mo>
<m:mi xml:id="m22.1.6.pmml" xref="m22.1.6">l</m:mi>
<m:mo xml:id="m22.1.10.1.2.1a.pmml" xref="m22.1.10.1.2.1"></m:mo>
<m:mi xml:id="m22.1.7.pmml" xref="m22.1.7">n</m:mi>
<m:mo xml:id="m22.1.10.1.2.1b.pmml" xref="m22.1.10.1.2.1"></m:mo>
<mws:qvar xmlns:mws="http://search.mathweb.org/ns" name="x"/>
</m:mrow>
<m:mo xml:id="m22.1.10.1.2c.pmml" xref="m22.1.10.1.2">)</m:mo>
</m:mrow>
</m:mrow>
</m:mrow>
</math>
'''
d1 = etree.fromstring(s1.encode("utf-8"))
| mit | 475,348,248,501,275,300 | 49.064516 | 96 | 0.510309 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.