repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
MobinRanjbar/hue | apps/oozie/src/oozie/migrations/0027_auto__chg_field_node_name__chg_field_job_name.py | 21 | 25573 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Node.name'
db.alter_column(u'oozie_node', 'name', self.gf('django.db.models.fields.CharField')(max_length=255))
# Changing field 'Job.name'
db.alter_column(u'oozie_job', 'name', self.gf('django.db.models.fields.CharField')(max_length=255))
def backwards(self, orm):
# Changing field 'Node.name'
db.alter_column(u'oozie_node', 'name', self.gf('django.db.models.fields.CharField')(max_length=40))
# Changing field 'Job.name'
db.alter_column(u'oozie_job', 'name', self.gf('django.db.models.fields.CharField')(max_length=40))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'oozie.bundle': {
'Meta': {'object_name': 'Bundle', '_ormbases': [u'oozie.Job']},
'coordinators': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['oozie.Coordinator']", 'through': u"orm['oozie.BundledCoordinator']", 'symmetrical': 'False'}),
u'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'kick_off_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 8, 28, 0, 0)'})
},
u'oozie.bundledcoordinator': {
'Meta': {'object_name': 'BundledCoordinator'},
'bundle': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['oozie.Bundle']"}),
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['oozie.Coordinator']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parameters': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"}]\''})
},
u'oozie.coordinator': {
'Meta': {'object_name': 'Coordinator', '_ormbases': [u'oozie.Job']},
'concurrency': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'end': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 8, 31, 0, 0)'}),
'execution': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
u'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 8, 28, 0, 0)'}),
'throttle': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timeout': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['oozie.Workflow']", 'null': 'True'})
},
u'oozie.datainput': {
'Meta': {'object_name': 'DataInput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Dataset']", 'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'oozie.dataoutput': {
'Meta': {'object_name': 'DataOutput'},
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['oozie.Coordinator']"}),
'dataset': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Dataset']", 'unique': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'oozie.dataset': {
'Meta': {'object_name': 'Dataset'},
'advanced_end_instance': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '128', 'blank': 'True'}),
'advanced_start_instance': ('django.db.models.fields.CharField', [], {'default': "'0'", 'max_length': '128'}),
'coordinator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['oozie.Coordinator']"}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'done_flag': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'blank': 'True'}),
'frequency_number': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'frequency_unit': ('django.db.models.fields.CharField', [], {'default': "'days'", 'max_length': '20'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_choice': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '10'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'start': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 8, 28, 0, 0)'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "'America/Los_Angeles'", 'max_length': '24'}),
'uri': ('django.db.models.fields.CharField', [], {'default': "'/data/${YEAR}${MONTH}${DAY}'", 'max_length': '1024'})
},
u'oozie.decision': {
'Meta': {'object_name': 'Decision'},
u'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
u'oozie.decisionend': {
'Meta': {'object_name': 'DecisionEnd'},
u'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
u'oozie.distcp': {
'Meta': {'object_name': 'DistCp'},
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
u'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
u'oozie.email': {
'Meta': {'object_name': 'Email'},
'body': ('django.db.models.fields.TextField', [], {'default': "''"}),
'cc': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'subject': ('django.db.models.fields.TextField', [], {'default': "''"}),
'to': ('django.db.models.fields.TextField', [], {'default': "''"})
},
u'oozie.end': {
'Meta': {'object_name': 'End'},
u'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
u'oozie.fork': {
'Meta': {'object_name': 'Fork'},
u'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
u'oozie.fs': {
'Meta': {'object_name': 'Fs'},
'chmods': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'deletes': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'mkdirs': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
'moves': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'}),
u'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'touchzs': ('django.db.models.fields.TextField', [], {'default': "'[]'", 'blank': 'True'})
},
u'oozie.generic': {
'Meta': {'object_name': 'Generic'},
u'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {'default': "''"})
},
u'oozie.history': {
'Meta': {'object_name': 'History'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['oozie.Job']"}),
'oozie_job_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'properties': ('django.db.models.fields.TextField', [], {}),
'submission_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'submitter': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'oozie.hive': {
'Meta': {'object_name': 'Hive'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "'hive-config.xml'", 'max_length': '512', 'blank': 'True'}),
u'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'oozie.java': {
'Meta': {'object_name': 'Java'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'args': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'java_opts': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'main_class': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
u'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
u'oozie.job': {
'Meta': {'object_name': 'Job'},
'data': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'blank': 'True'}),
'deployment_dir': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_shared': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_trashed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'parameters': ('django.db.models.fields.TextField', [], {'default': '\'[{"name":"oozie.use.system.libpath","value":"true"}]\''}),
'schema_version': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'oozie.join': {
'Meta': {'object_name': 'Join'},
u'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
u'oozie.kill': {
'Meta': {'object_name': 'Kill'},
'message': ('django.db.models.fields.CharField', [], {'default': "'Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]'", 'max_length': '256'}),
u'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
u'oozie.link': {
'Meta': {'object_name': 'Link'},
'child': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parent_node'", 'to': u"orm['oozie.Node']"}),
'comment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'child_node'", 'to': u"orm['oozie.Node']"})
},
u'oozie.mapreduce': {
'Meta': {'object_name': 'Mapreduce'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'jar_path': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
u'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
u'oozie.node': {
'Meta': {'object_name': 'Node'},
'children': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'parents'", 'symmetrical': 'False', 'through': u"orm['oozie.Link']", 'to': u"orm['oozie.Node']"}),
'data': ('django.db.models.fields.TextField', [], {'default': "'{}'", 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'node_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'workflow': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['oozie.Workflow']"})
},
u'oozie.pig': {
'Meta': {'object_name': 'Pig'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
u'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
u'oozie.shell': {
'Meta': {'object_name': 'Shell'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
u'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"})
},
u'oozie.sqoop': {
'Meta': {'object_name': 'Sqoop'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
u'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'prepares': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'script_path': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
},
u'oozie.ssh': {
'Meta': {'object_name': 'Ssh'},
'capture_output': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'command': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
u'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'params': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'user': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
u'oozie.start': {
'Meta': {'object_name': 'Start'},
u'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'})
},
u'oozie.streaming': {
'Meta': {'object_name': 'Streaming'},
'archives': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'files': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
'mapper': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
u'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'reducer': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
u'oozie.subworkflow': {
'Meta': {'object_name': 'SubWorkflow'},
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
u'node_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Node']", 'unique': 'True', 'primary_key': 'True'}),
'propagate_configuration': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'sub_workflow': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['oozie.Workflow']", 'null': 'True', 'blank': 'True'})
},
u'oozie.workflow': {
'Meta': {'object_name': 'Workflow', '_ormbases': [u'oozie.Job']},
'end': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'end_workflow'", 'null': 'True', 'to': u"orm['oozie.End']"}),
'is_single': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'job_properties': ('django.db.models.fields.TextField', [], {'default': "'[]'"}),
u'job_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['oozie.Job']", 'unique': 'True', 'primary_key': 'True'}),
'job_xml': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'start': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'start_workflow'", 'null': 'True', 'to': u"orm['oozie.Start']"})
}
}
complete_apps = ['oozie'] | apache-2.0 | 5,193,480,082,138,684,000 | 77.207951 | 196 | 0.534548 | false |
nerandell/vyked | vyked/bus.py | 1 | 12997 | import asyncio
from collections import defaultdict
from functools import partial
import json
import logging
import random
import uuid
from again.utils import unique_hex
import aiohttp
from retrial.retrial import retry
from .services import TCPServiceClient, HTTPServiceClient
from .pubsub import PubSub
from .packet import ControlPacket, MessagePacket
from .protocol_factory import get_vyked_protocol
from .utils.jsonencoder import VykedEncoder
from .exceptions import ClientNotFoundError, ClientDisconnected
HTTP = 'http'
TCP = 'tcp'
def _retry_for_pub(result):
return not result
def _retry_for_exception(_):
return True
class HTTPBus:
def __init__(self, registry_client):
self._registry_client = registry_client
def send_http_request(self, app: str, service: str, version: str, method: str, entity: str, params: dict):
"""
A convenience method that allows you to send a well formatted http request to another service
"""
host, port, node_id, service_type = self._registry_client.resolve(service, version, entity, HTTP)
url = 'http://{}:{}{}'.format(host, port, params.pop('path'))
http_keys = ['data', 'headers', 'cookies', 'auth', 'allow_redirects', 'compress', 'chunked']
kwargs = {k: params[k] for k in http_keys if k in params}
query_params = params.pop('params', {})
if app is not None:
query_params['app'] = app
query_params['version'] = version
query_params['service'] = service
response = yield from aiohttp.request(method, url, params=query_params, **kwargs)
return response
class TCPBus:
def __init__(self, registry_client):
registry_client.conn_handler = self
self._registry_client = registry_client
self._client_protocols = {}
self._pingers = {}
self._node_clients = {}
self._service_clients = []
self.tcp_host = None
self.http_host = None
self._host_id = unique_hex()
self._ronin = False
self._registered = False
self._logger = logging.getLogger(__name__)
def _create_service_clients(self):
futures = []
for sc in self._service_clients:
for host, port, node_id, service_type in self._registry_client.get_all_addresses(*sc.properties):
if service_type == 'tcp':
self._node_clients[node_id] = sc
future = self._connect_to_client(host, node_id, port, service_type, sc)
futures.append(future)
return asyncio.gather(*futures, return_exceptions=False)
def connect(self):
clients = self.tcp_host.clients if self.tcp_host else self.http_host.clients
for client in clients:
if isinstance(client, (TCPServiceClient, HTTPServiceClient)):
client.bus = self
self._service_clients = clients
yield from self._registry_client.connect()
def register(self):
if self.tcp_host:
self._registry_client.register(self.tcp_host.host, self.tcp_host.port, self.tcp_host.name,
self.tcp_host.version, self.tcp_host.node_id, self.tcp_host.clients, 'tcp')
if self.http_host:
self._registry_client.register(self.http_host.host, self.http_host.port, self.http_host.name,
self.http_host.version, self.http_host.node_id, self.http_host.clients,
'http')
def registration_complete(self):
if not self._registered:
self._create_service_clients()
self._registered = True
def new_instance(self, service, version, host, port, node_id, type):
sc = next(sc for sc in self._service_clients if sc.name == service and sc.version == version)
if type == 'tcp':
self._node_clients[node_id] = sc
asyncio.async(self._connect_to_client(host, node_id, port, type, sc))
def send(self, packet: dict):
packet['from'] = self._host_id
func = getattr(self, '_' + packet['type'] + '_sender')
asyncio.async(func(packet))
@retry(should_retry_for_result=lambda x: not x, should_retry_for_exception=lambda x: True, timeout=None,
max_attempts=5, multiplier=2)
def _request_sender(self, packet: dict):
"""
Sends a request to a server from a ServiceClient
auto dispatch method called from self.send()
"""
node_id = self._get_node_id_for_packet(packet)
client_protocol = self._client_protocols.get(node_id)
if node_id and client_protocol:
if client_protocol.is_connected():
packet['to'] = node_id
client_protocol.send(packet)
return True
else:
self._logger.error('Client protocol is not connected for packet %s', packet)
raise ClientDisconnected()
else:
# No node found to send request
self._logger.error('Out of %s, Client Not found for packet %s', self._client_protocols.keys(), packet)
raise ClientNotFoundError()
def _connect_to_client(self, host, node_id, port, service_type, service_client):
future = asyncio.async(
asyncio.get_event_loop().create_connection(partial(get_vyked_protocol, service_client), host, port,
ssl=service_client._ssl_context))
future.add_done_callback(
partial(self._service_client_connection_callback, self._node_clients[node_id], node_id, service_type))
return future
def _service_client_connection_callback(self, sc, node_id, service_type, future):
_, protocol = future.result()
# TODO : handle pinging
# if service_type == TCP:
# pinger = Pinger(self, asyncio.get_event_loop())
# self._pingers[node_id] = pinger
# pinger.register_tcp_service(protocol, node_id)
# asyncio.async(pinger.start_ping())
self._client_protocols[node_id] = protocol
@staticmethod
def _create_json_service_name(app, service, version):
return {'app': app, 'name': service, 'version': version}
@staticmethod
def _handle_ping(packet, protocol):
protocol.send(ControlPacket.pong(packet['node_id']))
def _handle_pong(self, node_id, count):
pinger = self._pingers[node_id]
asyncio.async(pinger.pong_received(count))
def _get_node_id_for_packet(self, packet):
service, version, entity = packet['name'], packet['version'], packet['entity']
node = self._registry_client.resolve(service, version, entity, TCP)
return node[2] if node else None
def handle_ping_timeout(self, node_id):
self._logger.info("Service client connection timed out {}".format(node_id))
self._pingers.pop(node_id, None)
service_props = self._registry_client.get_for_node(node_id)
self._logger.info('service client props {}'.format(service_props))
if service_props is not None:
host, port, _node_id, _type = service_props
asyncio.async(self._connect_to_client(host, _node_id, port, _type))
def receive(self, packet: dict, protocol, transport):
if packet['type'] == 'ping':
self._handle_ping(packet, protocol)
elif packet['type'] == 'pong':
self._handle_pong(packet['node_id'], packet['count'])
elif packet['type'] == 'publish':
self._handle_publish(packet, protocol)
else:
if self.tcp_host.is_for_me(packet['name'], packet['version']):
func = getattr(self, '_' + packet['type'] + '_receiver')
func(packet, protocol)
else:
self._logger.warn('wrongly routed packet: ', packet)
def _request_receiver(self, packet, protocol):
api_fn = getattr(self.tcp_host, packet['endpoint'])
if api_fn.is_api:
from_node_id = packet['from']
entity = packet['entity']
future = asyncio.async(api_fn(from_id=from_node_id, entity=entity, **packet['payload']))
def send_result(f):
result_packet = f.result()
protocol.send(result_packet)
future.add_done_callback(send_result)
else:
print('no api found for packet: ', packet)
def _handle_publish(self, packet, protocol):
service, version, endpoint, payload, publish_id = (packet['name'], packet['version'], packet['endpoint'],
packet['payload'], packet['publish_id'])
for client in self._service_clients:
if client.name == service and client.version == version:
fun = getattr(client, endpoint)
asyncio.async(fun(payload))
protocol.send(MessagePacket.ack(publish_id))
def handle_connected(self):
if self.tcp_host:
yield from self.tcp_host.initiate()
if self.http_host:
yield from self.http_host.initiate()
class PubSubBus:
PUBSUB_DELAY = 5
def __init__(self, pubsub_host, pubsub_port, registry_client, ssl_context=None):
self._host = pubsub_host
self._port = pubsub_port
self._pubsub_handler = None
self._registry_client = registry_client
self._clients = None
self._pending_publishes = {}
self._ssl_context = ssl_context
def create_pubsub_handler(self):
self._pubsub_handler = PubSub(self._host, self._port)
yield from self._pubsub_handler.connect()
def register_for_subscription(self, host, port, node_id, clients):
self._clients = clients
subscription_list = []
xsubscription_list = []
for client in clients:
if isinstance(client, TCPServiceClient):
for each in dir(client):
fn = getattr(client, each)
if callable(fn) and getattr(fn, 'is_subscribe', False):
subscription_list.append(self._get_pubsub_key(client.name, client.version, fn.__name__))
elif callable(fn) and getattr(fn, 'is_xsubscribe', False):
xsubscription_list.append((client.name, client.version, fn.__name__, getattr(fn, 'strategy')))
self._registry_client.x_subscribe(host, port, node_id, xsubscription_list)
yield from self._pubsub_handler.subscribe(subscription_list, handler=self.subscription_handler)
def publish(self, service, version, endpoint, payload):
endpoint_key = self._get_pubsub_key(service, version, endpoint)
asyncio.async(self._pubsub_handler.publish(endpoint_key, json.dumps(payload, cls=VykedEncoder)))
asyncio.async(self.xpublish(service, version, endpoint, payload))
def xpublish(self, service, version, endpoint, payload):
subscribers = yield from self._registry_client.get_subscribers(service, version, endpoint)
strategies = defaultdict(list)
for subscriber in subscribers:
strategies[(subscriber['name'], subscriber['version'])].append(
(subscriber['host'], subscriber['port'], subscriber['node_id'], subscriber['strategy']))
for key, value in strategies.items():
publish_id = str(uuid.uuid4())
future = asyncio.async(
self._connect_and_publish(publish_id, service, version, endpoint, value, payload))
self._pending_publishes[publish_id] = future
def receive(self, packet, transport, protocol):
if packet['type'] == 'ack':
future = self._pending_publishes.pop(packet['request_id'], None)
if future:
future.cancel()
transport.close()
def subscription_handler(self, endpoint, payload):
service, version, endpoint = endpoint.split('/')
client = [sc for sc in self._clients if (sc.name == service and sc.version == version)][0]
func = getattr(client, endpoint)
asyncio.async(func(**json.loads(payload)))
@staticmethod
def _get_pubsub_key(service, version, endpoint):
return '/'.join((service, str(version), endpoint))
def _connect_and_publish(self, publish_id, service, version, endpoint, subscribers, payload):
if subscribers[0][3] == 'LEADER':
host, port = subscribers[0][0], subscribers[0][1]
else:
random_metadata = random.choice(subscribers)
host, port = random_metadata[0], random_metadata[1]
transport, protocol = yield from asyncio.get_event_loop().create_connection(
partial(get_vyked_protocol, self), host, port)
packet = MessagePacket.publish(publish_id, service, version, endpoint, payload)
protocol.send(packet)
yield from asyncio.sleep(self.PUBSUB_DELAY)
| mit | -2,944,843,209,032,914,400 | 41.613115 | 118 | 0.608525 | false |
JTCunning/sentry | src/sentry/migrations/0126_auto__add_field_option_last_updated.py | 36 | 25153 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Option.last_updated'
db.add_column('sentry_option', 'last_updated',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Option.last_updated'
db.delete_column('sentry_option', 'last_updated')
models = {
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Alert']"}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group', 'datetime'),)"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.TeamMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry']
| bsd-3-clause | -5,423,655,050,034,848,000 | 83.40604 | 223 | 0.558422 | false |
immstudios/notify | scripts/subscriber.py | 1 | 1544 | #!/usr/bin/env python
import json
import thread
import uuid
try:
from urllib.request import urlopen
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from urllib2 import urlopen
__all__ = ["NotifyPublisher"]
class NotifySubscriber():
def __init__(self, **kwargs):
self.channel = kwargs.get("channel", self.default_channel)
self.host = kwargs.get("host", "localhost")
self.port = kwargs.get("port", 80)
self.ssl = kwargs.get("ssl", False)
assert type(self.port) == int
assert self.ssl in [True, False]
thread.start_new_thread(self.listen, ())
@property
def default_channel(self):
return str(uuid.uuid1())
@property
def url(self):
return "{protocol}://{host}:{port}/sub/{channel}".format(
protocol=["http","https"][self.ssl],
host=self.host,
port=self.port,
channel=self.channel
)
def listen(self):
print "listening", self.url
feed = urlopen(self.url, timeout=2)
buff = ""
while True:
ch = feed.read(1)
if ch == "\n":
self.handler(buff)
buff = ""
else:
buff += ch
def handler(self, message):
print message
if __name__ == "__main__":
import time
config = json.load(open("local_settings.json"))
notify = NotifySubscriber(**config)
while True:
time.sleep(1)
| gpl-3.0 | 8,703,723,381,790,196,000 | 23.903226 | 66 | 0.551166 | false |
danicarrion/python-sepa | sepa/debit/core.py | 1 | 6958 | # -*- coding: utf-8 -*-
import gettext
import os
import random
import string
from datetime import datetime
from jinja2 import Environment, PackageLoader
_ = gettext.translation("sepa", os.path.join(os.path.dirname(os.path.abspath(__file__)), "../locale"), ["es"]).gettext
SEQUENCE_TYPES = ("FRST", "RCUR", "FNAL", "OOFF")
def prep_str(original_string):
final_string = ""
for char in original_string or "":
if char == "á" or char == "Á":
final_string += "A"
elif char == "é" or char == "É":
final_string += "E"
elif char == "í" or char == "Í":
final_string += "I"
elif char == "ó" or char == "Ó":
final_string += "O"
elif char == "ú" or char == "Ú":
final_string += "U"
elif char == "ü" or char == "Ü":
final_string += "U"
elif char == "ñ" or char == "Ñ":
final_string += "N"
elif char == "º" or char == "ª":
continue
elif char == "ç" or char == "Ç":
final_string += "C"
else:
final_string += char.upper()
return final_string
class Payment(object):
total_amount = 0
total_invoices = 0
errors = []
def __init__(self, company, invoices, name_map=None, backend="django"):
self.company = company
self.invoices = invoices
self.name_map = name_map
self.backend = backend
self.current_time = datetime.now()
self.env = Environment(loader=PackageLoader('sepa', 'templates'))
self.template = self.env.get_template('core.xml')
def get_key(self, name):
if self.name_map is not None and name in self.name_map:
return self.name_map[name]
else:
return name
def get_value(self, model, name):
name = self.get_key(name)
for model_name in name.split(".")[:-1]:
model = getattr(model, model_name)
return getattr(model, name, None)
def append_payment_info(self, invoices, payment_infos, sequence_type):
if len(invoices) == 0:
return
payment_info = {
"seq_tp": sequence_type,
"pmt_inf_id": "".join(random.choice(string.ascii_uppercase + string.digits) for x in range(35)),
"pmt_mtd": "DD",
"cd__sl": "SEPA",
"cd__li": "CORE",
"reqd_colltn_dt": self.current_time.strftime("%Y-%m-%d"), # Add 2 days?
"creditor_scheme_id": self.company.creditor_scheme_id,
"creditor_scheme_property": "SEPA",
}
total_amount = 0
total_invoices = 0
payment_info["transaction_infos"] = []
for invoice in invoices:
transaction_info = {}
debtor = prep_str(self.get_value(invoice, "debtor"))
if not debtor:
if self.backend == "django":
self.errors.append("%s: %s" % (invoice.__str__(), _("Invoice without debtor.")))
else:
self.errors.append(_("Invoice without debtor."))
continue
amount = self.get_value(invoice, "amount")
if amount is None:
self.errors.append(u"%s: %s" % (debtor, _("Invalid amount.")))
continue
total_amount += amount
iban = self.get_value(invoice, "iban")
if not iban:
self.errors.append(u"%s: %s" % (debtor, _("Invalid IBAN.")))
continue
bic = self.get_value(invoice, "bic")
remittance_information = prep_str(self.get_value(invoice, "remittance_information"))
if not remittance_information:
self.errors.append(u"%s: %s" % (debtor, _("Invalid remittance information.")))
continue
mandate_reference = prep_str(self.get_value(invoice, "mandate_reference"))
if not mandate_reference:
self.errors.append(u"%s: %s" % (debtor, _("Invalid mandate reference.")))
continue
mandate_date_of_signature = self.get_value(invoice, "mandate_date_of_signature")
if not mandate_date_of_signature:
self.errors.append(u"%s: %s" % (debtor, _("Invalid mandate's date of signature.")))
continue
transaction_info["end_to_end_id"] = "".join(random.choice(string.ascii_uppercase + string.digits) for x in range(35))
transaction_info["instd_amt"] = "%.02f" % amount
transaction_info["nm"] = debtor
transaction_info["iban"] = iban
transaction_info["bic"] = bic
transaction_info["ustrd"] = remittance_information
transaction_info["mndt_id"] = mandate_reference
if mandate_date_of_signature is not None:
transaction_info["dt_of_sgntr"] = mandate_date_of_signature.strftime("%Y-%m-%d")
payment_info["transaction_infos"].append(transaction_info)
total_invoices += 1
payment_info["ctrl_sum"] = total_amount
payment_info["nb_of_txs"] = total_invoices
self.total_amount += total_amount
self.total_invoices += total_invoices
payment_infos.append(payment_info)
def filter_invoices_by_sequence_type(self, sequence_type):
if self.backend == "django":
return self.invoices.filter(**{self.get_key("sequence_type"): sequence_type})
else:
return self.invoices # ignore sequence_type for testing only (example.py)
def render_xml(self):
self.errors = []
context = {}
context["payment_infos"] = []
for sequence_type in SEQUENCE_TYPES:
self.append_payment_info(self.filter_invoices_by_sequence_type(sequence_type), context["payment_infos"],
sequence_type)
# Header group definition
context["msg_id"] = "".join(random.choice(string.ascii_uppercase + string.digits) for x in range(35))
context["cre_dt_tm"] = self.current_time.strftime("%Y-%m-%dT%H:%M:%S")
context["ctrl_sum"] = "%.02f" % self.total_amount
context["nb_of_txs"] = str(self.total_invoices)
nm = prep_str(self.get_value(self.company, "name"))
if not nm:
self.errors.append(_("Invalid company name."))
vatin = prep_str(self.get_value(self.company, "vatin"))
if not vatin:
self.errors.append(_("Invalid company VATIN."))
iban = self.get_value(self.company, "iban")
if not iban:
self.errors.append(_("Invalid company IBAN."))
bic = self.get_value(self.company, "bic")
if not bic:
self.errors.append(_("Invalid company BIC."))
context["nm"] = nm
context["vatin"] = vatin
context["iban"] = iban
context["bic"] = bic
return self.template.render(**context)
| mit | 8,724,976,275,508,185,000 | 35.145833 | 129 | 0.549712 | false |
GheRivero/ansible | lib/ansible/modules/web_infrastructure/rundeck_project.py | 99 | 6314 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Ansible module to manage rundeck projects
# (c) 2017, Loic Blot <[email protected]>
# Sponsored by Infopro Digital. http://www.infopro-digital.com/
# Sponsored by E.T.A.I. http://www.etai.fr/
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rundeck_project
short_description: Manage Rundeck projects.
description:
- Create and remove Rundeck projects through HTTP API.
version_added: "2.4"
author: "Loic Blot (@nerzhul)"
options:
state:
description:
- Create or remove Rundeck project.
choices: ['present', 'absent']
default: 'present'
name:
description:
- Sets the project name.
required: True
url:
description:
- Sets the rundeck instance URL.
required: True
api_version:
description:
- Sets the API version used by module.
- API version must be at least 14.
default: 14
token:
description:
- Sets the token to authenticate against Rundeck API.
required: True
'''
EXAMPLES = '''
- name: Create a rundeck project
rundeck_project:
name: "Project_01"
api_version: 18
url: "https://rundeck.example.org"
token: "mytoken"
state: present
- name: Remove a rundeck project
rundeck_project:
name: "Project_02"
url: "https://rundeck.example.org"
token: "mytoken"
state: absent
'''
RETURN = '''
rundeck_response:
description: Rundeck response when a failure occurs
returned: failed
type: string
before:
description: dictionnary containing project informations before modification
returned: success
type: dict
after:
description: dictionnary containing project informations after modification
returned: success
type: dict
'''
# import module snippets
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import fetch_url
import json
class RundeckProjectManager(object):
def __init__(self, module):
self.module = module
def handle_http_code_if_needed(self, infos):
if infos["status"] == 403:
self.module.fail_json(msg="Token not allowed. Please ensure token is allowed or has the correct "
"permissions.", rundeck_response=infos["body"])
elif infos["status"] >= 500:
self.module.fail_json(msg="Fatal Rundeck API error.", rundeck_response=infos["body"])
def request_rundeck_api(self, query, data=None, method="GET"):
resp, info = fetch_url(self.module,
"%s/api/%d/%s" % (self.module.params["url"], self.module.params["api_version"], query),
data=json.dumps(data),
method=method,
headers={
"Content-Type": "application/json",
"Accept": "application/json",
"X-Rundeck-Auth-Token": self.module.params["token"]
})
self.handle_http_code_if_needed(info)
if resp is not None:
resp = resp.read()
if resp != "":
try:
json_resp = json.loads(resp)
return json_resp, info
except ValueError as e:
self.module.fail_json(msg="Rundeck response was not a valid JSON. Exception was: %s. "
"Object was: %s" % (to_native(e), resp))
return resp, info
def get_project_facts(self):
resp, info = self.request_rundeck_api("project/%s" % self.module.params["name"])
return resp
def create_or_update_project(self):
facts = self.get_project_facts()
if facts is None:
# If in check mode don't create project, simulate a fake project creation
if self.module.check_mode:
self.module.exit_json(changed=True, before={}, after={"name": self.module.params["name"]})
resp, info = self.request_rundeck_api("projects", method="POST", data={
"name": self.module.params["name"],
"config": {}
})
if info["status"] == 201:
self.module.exit_json(changed=True, before={}, after=self.get_project_facts())
else:
self.module.fail_json(msg="Unhandled HTTP status %d, please report the bug" % info["status"],
before={}, after=self.get_project_facts())
else:
self.module.exit_json(changed=False, before=facts, after=facts)
def remove_project(self):
facts = self.get_project_facts()
if facts is None:
self.module.exit_json(changed=False, before={}, after={})
else:
# If not in check mode, remove the project
if not self.module.check_mode:
self.request_rundeck_api("project/%s" % self.module.params["name"], method="DELETE")
self.module.exit_json(changed=True, before=facts, after={})
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(type='str', choices=['present', 'absent'], default='present'),
name=dict(required=True, type='str'),
url=dict(required=True, type='str'),
api_version=dict(type='int', default=14),
token=dict(required=True, type='str', no_log=True),
),
supports_check_mode=True
)
if module.params["api_version"] < 14:
module.fail_json(msg="API version should be at least 14")
rundeck = RundeckProjectManager(module)
if module.params['state'] == 'present':
rundeck.create_or_update_project()
elif module.params['state'] == 'absent':
rundeck.remove_project()
if __name__ == '__main__':
main()
| gpl-3.0 | -1,655,150,778,085,865,200 | 33.315217 | 118 | 0.575863 | false |
mulby/edx-idea | edx/idea/workflow.py | 1 | 1760 |
from edx.idea.common.identifier import generate_uuid
class Workflow(object):
def __init__(self, phases=None, name=None):
self.phases = phases or []
self.name = name or ('workflow_' + generate_uuid())
def __repr__(self):
return 'Workflow(phases={0}, name={1})'.format(
repr(self.phases),
repr(self.name)
)
def __str__(self):
return 'Workflow[{}]'.format(self.name)
@staticmethod
def from_struct(struct):
phases = [Phase.from_struct(p) for p in struct['phases']]
return Workflow(phases=phases, name=struct.get('name'))
class Phase(object):
def __init__(self, tasks=None, name=None):
self.tasks = tasks or []
self.name = name or ('phase_' + generate_uuid())
def __repr__(self):
return 'Phase(tasks={0}, name={1})'.format(
repr(self.tasks),
repr(self.name)
)
def __str__(self):
return 'Phase[{}]'.format(self.name)
@staticmethod
def from_struct(struct):
tasks = [Task.from_struct(p) for p in struct['tasks']]
return Phase(tasks=tasks, name=struct.get('name'))
class Task(object):
def __init__(self, path, args=None, name=None):
self.path = path
self.args = args or []
self.name = name or ('task_' + generate_uuid())
def __repr__(self):
return 'Task(path={0}, args={1}, name={2})'.format(
repr(self.path),
repr(self.args),
repr(self.name)
)
def __str__(self):
return 'Task[{0}:{1}]'.format(self.name, self.path)
@staticmethod
def from_struct(struct):
return Task(path=struct['path'], args=struct.get('args'), name=struct.get('name'))
| agpl-3.0 | -6,575,030,221,146,370,000 | 25.666667 | 90 | 0.553977 | false |
OsirisSPS/osiris-sps | client/share/plugins/AF9A4C281070FDB0F34CF417CDB168AB38C8A388/lib/gzip.py | 78 | 18226 | """Functions that read and write gzipped files.
The user of the file doesn't have to worry about the compression,
but random access is not allowed."""
# based on Andrew Kuchling's minigzip.py distributed with the zlib module
import struct, sys, time, os
import zlib
import io
import __builtin__
__all__ = ["GzipFile","open"]
FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16
READ, WRITE = 1, 2
def write32u(output, value):
# The L format writes the bit pattern correctly whether signed
# or unsigned.
output.write(struct.pack("<L", value))
def read32(input):
return struct.unpack("<I", input.read(4))[0]
def open(filename, mode="rb", compresslevel=9):
"""Shorthand for GzipFile(filename, mode, compresslevel).
The filename argument is required; mode defaults to 'rb'
and compresslevel defaults to 9.
"""
return GzipFile(filename, mode, compresslevel)
class GzipFile(io.BufferedIOBase):
"""The GzipFile class simulates most of the methods of a file object with
the exception of the readinto() and truncate() methods.
"""
myfileobj = None
max_read_chunk = 10 * 1024 * 1024 # 10Mb
def __init__(self, filename=None, mode=None,
compresslevel=9, fileobj=None, mtime=None):
"""Constructor for the GzipFile class.
At least one of fileobj and filename must be given a
non-trivial value.
The new class instance is based on fileobj, which can be a regular
file, a StringIO object, or any other object which simulates a file.
It defaults to None, in which case filename is opened to provide
a file object.
When fileobj is not None, the filename argument is only used to be
included in the gzip file header, which may includes the original
filename of the uncompressed file. It defaults to the filename of
fileobj, if discernible; otherwise, it defaults to the empty string,
and in this case the original filename is not included in the header.
The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', or 'wb',
depending on whether the file will be read or written. The default
is the mode of fileobj if discernible; otherwise, the default is 'rb'.
Be aware that only the 'rb', 'ab', and 'wb' values should be used
for cross-platform portability.
The compresslevel argument is an integer from 1 to 9 controlling the
level of compression; 1 is fastest and produces the least compression,
and 9 is slowest and produces the most compression. The default is 9.
The mtime argument is an optional numeric timestamp to be written
to the stream when compressing. All gzip compressed streams
are required to contain a timestamp. If omitted or None, the
current time is used. This module ignores the timestamp when
decompressing; however, some programs, such as gunzip, make use
of it. The format of the timestamp is the same as that of the
return value of time.time() and of the st_mtime member of the
object returned by os.stat().
"""
# guarantee the file is opened in binary mode on platforms
# that care about that sort of thing
if mode and 'b' not in mode:
mode += 'b'
if fileobj is None:
fileobj = self.myfileobj = __builtin__.open(filename, mode or 'rb')
if filename is None:
if hasattr(fileobj, 'name'): filename = fileobj.name
else: filename = ''
if mode is None:
if hasattr(fileobj, 'mode'): mode = fileobj.mode
else: mode = 'rb'
if mode[0:1] == 'r':
self.mode = READ
# Set flag indicating start of a new member
self._new_member = True
# Buffer data read from gzip file. extrastart is offset in
# stream where buffer starts. extrasize is number of
# bytes remaining in buffer from current stream position.
self.extrabuf = ""
self.extrasize = 0
self.extrastart = 0
self.name = filename
# Starts small, scales exponentially
self.min_readsize = 100
elif mode[0:1] == 'w' or mode[0:1] == 'a':
self.mode = WRITE
self._init_write(filename)
self.compress = zlib.compressobj(compresslevel,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
else:
raise IOError, "Mode " + mode + " not supported"
self.fileobj = fileobj
self.offset = 0
self.mtime = mtime
if self.mode == WRITE:
self._write_gzip_header()
@property
def filename(self):
import warnings
warnings.warn("use the name attribute", DeprecationWarning, 2)
if self.mode == WRITE and self.name[-3:] != ".gz":
return self.name + ".gz"
return self.name
def __repr__(self):
s = repr(self.fileobj)
return '<gzip ' + s[1:-1] + ' ' + hex(id(self)) + '>'
def _check_closed(self):
"""Raises a ValueError if the underlying file object has been closed.
"""
if self.closed:
raise ValueError('I/O operation on closed file.')
def _init_write(self, filename):
self.name = filename
self.crc = zlib.crc32("") & 0xffffffffL
self.size = 0
self.writebuf = []
self.bufsize = 0
def _write_gzip_header(self):
self.fileobj.write('\037\213') # magic header
self.fileobj.write('\010') # compression method
fname = os.path.basename(self.name)
if fname.endswith(".gz"):
fname = fname[:-3]
flags = 0
if fname:
flags = FNAME
self.fileobj.write(chr(flags))
mtime = self.mtime
if mtime is None:
mtime = time.time()
write32u(self.fileobj, long(mtime))
self.fileobj.write('\002')
self.fileobj.write('\377')
if fname:
self.fileobj.write(fname + '\000')
def _init_read(self):
self.crc = zlib.crc32("") & 0xffffffffL
self.size = 0
def _read_gzip_header(self):
magic = self.fileobj.read(2)
if magic != '\037\213':
raise IOError, 'Not a gzipped file'
method = ord( self.fileobj.read(1) )
if method != 8:
raise IOError, 'Unknown compression method'
flag = ord( self.fileobj.read(1) )
self.mtime = read32(self.fileobj)
# extraflag = self.fileobj.read(1)
# os = self.fileobj.read(1)
self.fileobj.read(2)
if flag & FEXTRA:
# Read & discard the extra field, if present
xlen = ord(self.fileobj.read(1))
xlen = xlen + 256*ord(self.fileobj.read(1))
self.fileobj.read(xlen)
if flag & FNAME:
# Read and discard a null-terminated string containing the filename
while True:
s = self.fileobj.read(1)
if not s or s=='\000':
break
if flag & FCOMMENT:
# Read and discard a null-terminated string containing a comment
while True:
s = self.fileobj.read(1)
if not s or s=='\000':
break
if flag & FHCRC:
self.fileobj.read(2) # Read & discard the 16-bit header CRC
def write(self,data):
self._check_closed()
if self.mode != WRITE:
import errno
raise IOError(errno.EBADF, "write() on read-only GzipFile object")
if self.fileobj is None:
raise ValueError, "write() on closed GzipFile object"
# Convert data type if called by io.BufferedWriter.
if isinstance(data, memoryview):
data = data.tobytes()
if len(data) > 0:
self.size = self.size + len(data)
self.crc = zlib.crc32(data, self.crc) & 0xffffffffL
self.fileobj.write( self.compress.compress(data) )
self.offset += len(data)
return len(data)
def read(self, size=-1):
self._check_closed()
if self.mode != READ:
import errno
raise IOError(errno.EBADF, "read() on write-only GzipFile object")
if self.extrasize <= 0 and self.fileobj is None:
return ''
readsize = 1024
if size < 0: # get the whole thing
try:
while True:
self._read(readsize)
readsize = min(self.max_read_chunk, readsize * 2)
except EOFError:
size = self.extrasize
else: # just get some more of it
try:
while size > self.extrasize:
self._read(readsize)
readsize = min(self.max_read_chunk, readsize * 2)
except EOFError:
if size > self.extrasize:
size = self.extrasize
offset = self.offset - self.extrastart
chunk = self.extrabuf[offset: offset + size]
self.extrasize = self.extrasize - size
self.offset += size
return chunk
def _unread(self, buf):
self.extrasize = len(buf) + self.extrasize
self.offset -= len(buf)
def _read(self, size=1024):
if self.fileobj is None:
raise EOFError, "Reached EOF"
if self._new_member:
# If the _new_member flag is set, we have to
# jump to the next member, if there is one.
#
# First, check if we're at the end of the file;
# if so, it's time to stop; no more members to read.
pos = self.fileobj.tell() # Save current position
self.fileobj.seek(0, 2) # Seek to end of file
if pos == self.fileobj.tell():
raise EOFError, "Reached EOF"
else:
self.fileobj.seek( pos ) # Return to original position
self._init_read()
self._read_gzip_header()
self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
self._new_member = False
# Read a chunk of data from the file
buf = self.fileobj.read(size)
# If the EOF has been reached, flush the decompression object
# and mark this object as finished.
if buf == "":
uncompress = self.decompress.flush()
self._read_eof()
self._add_read_data( uncompress )
raise EOFError, 'Reached EOF'
uncompress = self.decompress.decompress(buf)
self._add_read_data( uncompress )
if self.decompress.unused_data != "":
# Ending case: we've come to the end of a member in the file,
# so seek back to the start of the unused data, finish up
# this member, and read a new gzip header.
# (The number of bytes to seek back is the length of the unused
# data, minus 8 because _read_eof() will rewind a further 8 bytes)
self.fileobj.seek( -len(self.decompress.unused_data)+8, 1)
# Check the CRC and file size, and set the flag so we read
# a new member on the next call
self._read_eof()
self._new_member = True
def _add_read_data(self, data):
self.crc = zlib.crc32(data, self.crc) & 0xffffffffL
offset = self.offset - self.extrastart
self.extrabuf = self.extrabuf[offset:] + data
self.extrasize = self.extrasize + len(data)
self.extrastart = self.offset
self.size = self.size + len(data)
def _read_eof(self):
# We've read to the end of the file, so we have to rewind in order
# to reread the 8 bytes containing the CRC and the file size.
# We check the that the computed CRC and size of the
# uncompressed data matches the stored values. Note that the size
# stored is the true file size mod 2**32.
self.fileobj.seek(-8, 1)
crc32 = read32(self.fileobj)
isize = read32(self.fileobj) # may exceed 2GB
if crc32 != self.crc:
raise IOError("CRC check failed %s != %s" % (hex(crc32),
hex(self.crc)))
elif isize != (self.size & 0xffffffffL):
raise IOError, "Incorrect length of data produced"
# Gzip files can be padded with zeroes and still have archives.
# Consume all zero bytes and set the file position to the first
# non-zero byte. See http://www.gzip.org/#faq8
c = "\x00"
while c == "\x00":
c = self.fileobj.read(1)
if c:
self.fileobj.seek(-1, 1)
@property
def closed(self):
return self.fileobj is None
def close(self):
if self.fileobj is None:
return
if self.mode == WRITE:
self.fileobj.write(self.compress.flush())
write32u(self.fileobj, self.crc)
# self.size may exceed 2GB, or even 4GB
write32u(self.fileobj, self.size & 0xffffffffL)
self.fileobj = None
elif self.mode == READ:
self.fileobj = None
if self.myfileobj:
self.myfileobj.close()
self.myfileobj = None
def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH):
self._check_closed()
if self.mode == WRITE:
# Ensure the compressor's buffer is flushed
self.fileobj.write(self.compress.flush(zlib_mode))
self.fileobj.flush()
def fileno(self):
"""Invoke the underlying file object's fileno() method.
This will raise AttributeError if the underlying file object
doesn't support fileno().
"""
return self.fileobj.fileno()
def rewind(self):
'''Return the uncompressed stream file position indicator to the
beginning of the file'''
if self.mode != READ:
raise IOError("Can't rewind in write mode")
self.fileobj.seek(0)
self._new_member = True
self.extrabuf = ""
self.extrasize = 0
self.extrastart = 0
self.offset = 0
def readable(self):
return self.mode == READ
def writable(self):
return self.mode == WRITE
def seekable(self):
return True
def seek(self, offset, whence=0):
if whence:
if whence == 1:
offset = self.offset + offset
else:
raise ValueError('Seek from end not supported')
if self.mode == WRITE:
if offset < self.offset:
raise IOError('Negative seek in write mode')
count = offset - self.offset
for i in range(count // 1024):
self.write(1024 * '\0')
self.write((count % 1024) * '\0')
elif self.mode == READ:
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
return self.offset
def readline(self, size=-1):
if size < 0:
# Shortcut common case - newline found in buffer.
offset = self.offset - self.extrastart
i = self.extrabuf.find('\n', offset) + 1
if i > 0:
self.extrasize -= i - offset
self.offset += i - offset
return self.extrabuf[offset: i]
size = sys.maxint
readsize = self.min_readsize
else:
readsize = size
bufs = []
while size != 0:
c = self.read(readsize)
i = c.find('\n')
# We set i=size to break out of the loop under two
# conditions: 1) there's no newline, and the chunk is
# larger than size, or 2) there is a newline, but the
# resulting line would be longer than 'size'.
if (size <= i) or (i == -1 and len(c) > size):
i = size - 1
if i >= 0 or c == '':
bufs.append(c[:i + 1]) # Add portion of last chunk
self._unread(c[i + 1:]) # Push back rest of chunk
break
# Append chunk to list, decrease 'size',
bufs.append(c)
size = size - len(c)
readsize = min(size, readsize * 2)
if readsize > self.min_readsize:
self.min_readsize = min(readsize, self.min_readsize * 2, 512)
return ''.join(bufs) # Return resulting line
def _test():
# Act like gzip; with -d, act like gunzip.
# The input file is not deleted, however, nor are any other gzip
# options or features supported.
args = sys.argv[1:]
decompress = args and args[0] == "-d"
if decompress:
args = args[1:]
if not args:
args = ["-"]
for arg in args:
if decompress:
if arg == "-":
f = GzipFile(filename="", mode="rb", fileobj=sys.stdin)
g = sys.stdout
else:
if arg[-3:] != ".gz":
print "filename doesn't end in .gz:", repr(arg)
continue
f = open(arg, "rb")
g = __builtin__.open(arg[:-3], "wb")
else:
if arg == "-":
f = sys.stdin
g = GzipFile(filename="", mode="wb", fileobj=sys.stdout)
else:
f = __builtin__.open(arg, "rb")
g = open(arg + ".gz", "wb")
while True:
chunk = f.read(1024)
if not chunk:
break
g.write(chunk)
if g is not sys.stdout:
g.close()
if f is not sys.stdin:
f.close()
if __name__ == '__main__':
_test()
| gpl-3.0 | 7,786,892,701,775,080,000 | 34.807466 | 79 | 0.547295 | false |
mahak/nova | nova/policies/server_diagnostics.py | 3 | 1211 | # Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-server-diagnostics'
server_diagnostics_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
check_str=base.SYSTEM_ADMIN,
description="Show the usage data for a server",
operations=[
{
'method': 'GET',
'path': '/servers/{server_id}/diagnostics'
}
],
scope_types=['system', 'project']),
]
def list_rules():
return server_diagnostics_policies
| apache-2.0 | -4,643,886,886,718,087,000 | 29.275 | 78 | 0.668043 | false |
CedricVallee/pythonFinancialAnalyst | FinancialAnalystV2/extractMDA.py | 1 | 2088 | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 02 2016
@author: Cedric Vallee
Inspired by Chong Wee Tan
"""
import os
import Helper as helper
import Scraper as scraper
from textblob import TextBlob
from bs4 import BeautifulSoup
def getMDAfromText(filename,text):
try:
soup = BeautifulSoup(text, "lxml")
fullText = scraper.scrapeByAnchorTag(soup)
if fullText is not None:
print("{0}\tScraped By Anchor".format(filename))
return fullText
fullText = scraper.scrapeByRegex(soup)
if fullText is not None:
print("{0}\tScraped By Regex".format(filename))
return fullText
if fullText is None:
print("{0}\tUnable to scrape".format(filename))
text = ''.join(soup.findAll(text=True))
text.replace("’","'")
helper.writeToDirectoryFile("debug",filename,text)
return None
except UnicodeEncodeError:
print("{0}\tUnicodeEncodeError".format(filename))
helper.writeToDirectoryFile("debug",filename,text)
return None
# Function to create a folder named 'mda' with all the MDAs extracted from reports in the 'data' folder, using the previous function [path = "../data/"]
def createMDAfiles(path):
for filename in os.listdir(path):
text = open(path + filename).read()
mdaText = getMDAfromText(filename,text)
if mdaText is not None:
helper.writeToDirectoryFile("../mda/",filename,mdaText)
# Function to extract sentiment using Textblob, on all documents present in a folder [path = "../mda/"]
def gaugePolarities(path):
for filename in os.listdir(path):
mdaText = helper.readFromFile("mda",filename)
blob = TextBlob(mdaText)
pol = blob.sentiment.polarity
if pol > 0 :
os.rename(filename, filename + "_pos")
else :
os.rename(filename, filename + "_neg")
# Main function
createMDAfiles("../data/")
# gaugePolarities("../mda/") | mit | -5,543,405,821,135,839,000 | 34.666667 | 152 | 0.616858 | false |
csachs/openmicroscopy | components/tools/OmeroPy/src/omero/plugins/delete.py | 9 | 2773 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Startup plugin for command-line deletes
Copyright 2009-2015 Glencoe Software, Inc. All rights reserved.
Use is subject to license terms supplied in LICENSE.txt
"""
import sys
from omero.cli import CLI, GraphControl
HELP = """Delete OMERO data.
Remove entire graphs of data based on the ID of the top-node.
By default linked tag, file and term annotations are not deleted.
To delete linked annoations they must be explicitly included.
Examples:
# Delete an image but not its linked tag, file and term annotations
omero delete Image:50
# Delete an image including linked tag, file and term annotations
omero delete Image:51 --include TagAnnotation,FileAnnotation,TermAnnotation
# Delete an image including all linked annotations
omero delete Image:52 --include Annotation
# Delete three images and two datasets including their contents
omero delete omero delete Image:101,102,103 Dataset:201,202
# Delete a project excluding contained datasets and linked annotations
omero delete Project:101 --exclude Dataset,Annotation
# Delete all images contained under a project
omero delete Project/Dataset/Image:53
# Delete all images contained under two projects
omero delete Project/Image:201,202
# Do a dry run of a delete reporting the outcome if the delete had been run
omero delete Dataset:53 --dry-run
# Do a dry run of a delete, reporting all the objects
# that would have been deleted
omero delete Dataset:53 --dry-run --report
"""
class DeleteControl(GraphControl):
def cmd_type(self):
import omero
import omero.all
return omero.cmd.Delete2
def print_detailed_report(self, req, rsp, status):
import omero
if isinstance(rsp, omero.cmd.DoAllRsp):
for response in rsp.responses:
if isinstance(response, omero.cmd.Delete2Response):
self.print_delete_response(response)
elif isinstance(rsp, omero.cmd.Delete2Response):
self.print_delete_response(rsp)
def print_delete_response(self, rsp):
if rsp.deletedObjects:
self.ctx.out("Deleted objects")
objIds = self._get_object_ids(rsp.deletedObjects)
for k in objIds:
self.ctx.out(" %s:%s" % (k, objIds[k]))
def default_exclude(self):
"""
Don't delete these three types of Annotation by default
"""
return ["TagAnnotation", "TermAnnotation", "FileAnnotation"]
try:
register("delete", DeleteControl, HELP)
except NameError:
if __name__ == "__main__":
cli = CLI()
cli.register("delete", DeleteControl, HELP)
cli.invoke(sys.argv[1:])
| gpl-2.0 | 8,406,148,015,071,239,000 | 31.244186 | 79 | 0.677605 | false |
Ichag/odoo | addons/l10n_fr_rib/__init__.py | 433 | 1046 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 Numérigraphe SARL.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import bank
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 1,136,495,127,297,405,400 | 40.8 | 78 | 0.617225 | false |
luiseduardohdbackup/odoo | addons/crm_partner_assign/crm_lead.py | 221 | 3039 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.tools.safe_eval import safe_eval as eval
class crm_lead(osv.osv):
_inherit = 'crm.lead'
def get_interested_action(self, cr, uid, interested, context=None):
try:
model, action_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'crm_partner_assign', 'crm_lead_channel_interested_act')
except ValueError:
raise osv.except_osv(_('Error!'), _("The CRM Channel Interested Action is missing"))
action = self.pool[model].read(cr, uid, [action_id], context=context)[0]
action_context = eval(action['context'])
action_context['interested'] = interested
action['context'] = str(action_context)
return action
def case_interested(self, cr, uid, ids, context=None):
return self.get_interested_action(cr, uid, True, context=context)
def case_disinterested(self, cr, uid, ids, context=None):
return self.get_interested_action(cr, uid, False, context=context)
def assign_salesman_of_assigned_partner(self, cr, uid, ids, context=None):
salesmans_leads = {}
for lead in self.browse(cr, uid, ids, context=context):
if (lead.stage_id.probability > 0 and lead.stage_id.probability < 100) or lead.stage_id.sequence == 1:
if lead.partner_assigned_id and lead.partner_assigned_id.user_id and lead.partner_assigned_id.user_id != lead.user_id:
salesman_id = lead.partner_assigned_id.user_id.id
if salesmans_leads.get(salesman_id):
salesmans_leads[salesman_id].append(lead.id)
else:
salesmans_leads[salesman_id] = [lead.id]
for salesman_id, lead_ids in salesmans_leads.items():
salesteam_id = self.on_change_user(cr, uid, lead_ids, salesman_id, context=None)['value'].get('section_id')
self.write(cr, uid, lead_ids, {'user_id': salesman_id, 'section_id': salesteam_id}, context=context)
| agpl-3.0 | -4,883,371,584,449,180,000 | 50.508475 | 148 | 0.624218 | false |
lidavidm/sympy | sympy/physics/quantum/piab.py | 124 | 1756 | """1D quantum particle in a box."""
from __future__ import print_function, division
from sympy import Symbol, pi, sqrt, sin, Interval, S
from sympy.physics.quantum.operator import HermitianOperator
from sympy.physics.quantum.state import Ket, Bra
from sympy.physics.quantum.constants import hbar
from sympy.functions.special.tensor_functions import KroneckerDelta
from sympy.physics.quantum.hilbert import L2
m = Symbol('m')
L = Symbol('L')
__all__ = [
'PIABHamiltonian',
'PIABKet',
'PIABBra'
]
class PIABHamiltonian(HermitianOperator):
"""Particle in a box Hamiltonian operator."""
@classmethod
def _eval_hilbert_space(cls, label):
return L2(Interval(S.NegativeInfinity, S.Infinity))
def _apply_operator_PIABKet(self, ket, **options):
n = ket.label[0]
return (n**2*pi**2*hbar**2)/(2*m*L**2)*ket
class PIABKet(Ket):
"""Particle in a box eigenket."""
@classmethod
def _eval_hilbert_space(cls, args):
return L2(Interval(S.NegativeInfinity, S.Infinity))
@classmethod
def dual_class(self):
return PIABBra
def _represent_default_basis(self, **options):
return self._represent_XOp(None, **options)
def _represent_XOp(self, basis, **options):
x = Symbol('x')
n = Symbol('n')
subs_info = options.get('subs', {})
return sqrt(2/L)*sin(n*pi*x/L).subs(subs_info)
def _eval_innerproduct_PIABBra(self, bra):
return KroneckerDelta(bra.label[0], self.label[0])
class PIABBra(Bra):
"""Particle in a box eigenbra."""
@classmethod
def _eval_hilbert_space(cls, label):
return L2(Interval(S.NegativeInfinity, S.Infinity))
@classmethod
def dual_class(self):
return PIABKet
| bsd-3-clause | 8,298,416,204,019,598,000 | 24.449275 | 67 | 0.657745 | false |
s20121035/rk3288_android5.1_repo | external/wpa_supplicant_8/wpa_supplicant/examples/wpas-test.py | 189 | 2571 | #!/usr/bin/python
import dbus
import sys, os
import time
WPAS_DBUS_SERVICE = "fi.epitest.hostap.WPASupplicant"
WPAS_DBUS_INTERFACE = "fi.epitest.hostap.WPASupplicant"
WPAS_DBUS_OPATH = "/fi/epitest/hostap/WPASupplicant"
WPAS_DBUS_INTERFACES_INTERFACE = "fi.epitest.hostap.WPASupplicant.Interface"
WPAS_DBUS_INTERFACES_OPATH = "/fi/epitest/hostap/WPASupplicant/Interfaces"
WPAS_DBUS_BSSID_INTERFACE = "fi.epitest.hostap.WPASupplicant.BSSID"
def byte_array_to_string(s):
import urllib
r = ""
for c in s:
if c >= 32 and c < 127:
r += "%c" % c
else:
r += urllib.quote(chr(c))
return r
def main():
if len(sys.argv) != 2:
print "Usage: wpas-test.py <interface>"
os._exit(1)
ifname = sys.argv[1]
bus = dbus.SystemBus()
wpas_obj = bus.get_object(WPAS_DBUS_SERVICE, WPAS_DBUS_OPATH)
wpas = dbus.Interface(wpas_obj, WPAS_DBUS_INTERFACE)
# See if wpa_supplicant already knows about this interface
path = None
try:
path = wpas.getInterface(ifname)
except dbus.dbus_bindings.DBusException, exc:
if str(exc) != "wpa_supplicant knows nothing about this interface.":
raise exc
try:
path = wpas.addInterface(ifname, {'driver': dbus.Variant('wext')})
except dbus.dbus_bindings.DBusException, exc:
if str(exc) != "wpa_supplicant already controls this interface.":
raise exc
if_obj = bus.get_object(WPAS_DBUS_SERVICE, path)
iface = dbus.Interface(if_obj, WPAS_DBUS_INTERFACES_INTERFACE)
iface.scan()
# Should really wait for the "scanResults" signal instead of sleeping
time.sleep(5)
res = iface.scanResults()
print "Scanned wireless networks:"
for opath in res:
net_obj = bus.get_object(WPAS_DBUS_SERVICE, opath)
net = dbus.Interface(net_obj, WPAS_DBUS_BSSID_INTERFACE)
props = net.properties()
# Convert the byte-array for SSID and BSSID to printable strings
bssid = ""
for item in props["bssid"]:
bssid = bssid + ":%02x" % item
bssid = bssid[1:]
ssid = byte_array_to_string(props["ssid"])
wpa = "no"
if props.has_key("wpaie"):
wpa = "yes"
wpa2 = "no"
if props.has_key("rsnie"):
wpa2 = "yes"
freq = 0
if props.has_key("frequency"):
freq = props["frequency"]
caps = props["capabilities"]
qual = props["quality"]
level = props["level"]
noise = props["noise"]
maxrate = props["maxrate"] / 1000000
print " %s :: ssid='%s' wpa=%s wpa2=%s quality=%d%% rate=%d freq=%d" % (bssid, ssid, wpa, wpa2, qual, maxrate, freq)
wpas.removeInterface(dbus.ObjectPath(path))
# Should fail here with unknown interface error
iface.scan()
if __name__ == "__main__":
main()
| gpl-3.0 | 6,988,988,108,679,617,000 | 27.252747 | 126 | 0.683392 | false |
woodpecker1/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/common/system/filesystem.py | 126 | 9517 | # Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Wrapper object for the file system / source tree."""
import codecs
import errno
import exceptions
import glob
import hashlib
import os
import shutil
import sys
import tempfile
import time
class FileSystem(object):
"""FileSystem interface for webkitpy.
Unless otherwise noted, all paths are allowed to be either absolute
or relative."""
sep = os.sep
pardir = os.pardir
def abspath(self, path):
return os.path.abspath(path)
def realpath(self, path):
return os.path.realpath(path)
def path_to_module(self, module_name):
"""A wrapper for all calls to __file__ to allow easy unit testing."""
# FIXME: This is the only use of sys in this file. It's possible this function should move elsewhere.
return sys.modules[module_name].__file__ # __file__ is always an absolute path.
def expanduser(self, path):
return os.path.expanduser(path)
def basename(self, path):
return os.path.basename(path)
def chdir(self, path):
return os.chdir(path)
def copyfile(self, source, destination):
shutil.copyfile(source, destination)
def dirname(self, path):
return os.path.dirname(path)
def exists(self, path):
return os.path.exists(path)
def files_under(self, path, dirs_to_skip=[], file_filter=None):
"""Return the list of all files under the given path in topdown order.
Args:
dirs_to_skip: a list of directories to skip over during the
traversal (e.g., .svn, resources, etc.)
file_filter: if not None, the filter will be invoked
with the filesystem object and the dirname and basename of
each file found. The file is included in the result if the
callback returns True.
"""
def filter_all(fs, dirpath, basename):
return True
file_filter = file_filter or filter_all
files = []
if self.isfile(path):
if file_filter(self, self.dirname(path), self.basename(path)):
files.append(path)
return files
if self.basename(path) in dirs_to_skip:
return []
for (dirpath, dirnames, filenames) in os.walk(path):
for d in dirs_to_skip:
if d in dirnames:
dirnames.remove(d)
for filename in filenames:
if file_filter(self, dirpath, filename):
files.append(self.join(dirpath, filename))
return files
def getcwd(self):
return os.getcwd()
def glob(self, path):
return glob.glob(path)
def isabs(self, path):
return os.path.isabs(path)
def isfile(self, path):
return os.path.isfile(path)
def isdir(self, path):
return os.path.isdir(path)
def join(self, *comps):
return os.path.join(*comps)
def listdir(self, path):
return os.listdir(path)
def mkdtemp(self, **kwargs):
"""Create and return a uniquely named directory.
This is like tempfile.mkdtemp, but if used in a with statement
the directory will self-delete at the end of the block (if the
directory is empty; non-empty directories raise errors). The
directory can be safely deleted inside the block as well, if so
desired.
Note that the object returned is not a string and does not support all of the string
methods. If you need a string, coerce the object to a string and go from there.
"""
class TemporaryDirectory(object):
def __init__(self, **kwargs):
self._kwargs = kwargs
self._directory_path = tempfile.mkdtemp(**self._kwargs)
def __str__(self):
return self._directory_path
def __enter__(self):
return self._directory_path
def __exit__(self, type, value, traceback):
# Only self-delete if necessary.
# FIXME: Should we delete non-empty directories?
if os.path.exists(self._directory_path):
os.rmdir(self._directory_path)
return TemporaryDirectory(**kwargs)
def maybe_make_directory(self, *path):
"""Create the specified directory if it doesn't already exist."""
try:
os.makedirs(self.join(*path))
except OSError, e:
if e.errno != errno.EEXIST:
raise
def move(self, source, destination):
shutil.move(source, destination)
def mtime(self, path):
return os.stat(path).st_mtime
def normpath(self, path):
return os.path.normpath(path)
def open_binary_tempfile(self, suffix):
"""Create, open, and return a binary temp file. Returns a tuple of the file and the name."""
temp_fd, temp_name = tempfile.mkstemp(suffix)
f = os.fdopen(temp_fd, 'wb')
return f, temp_name
def open_binary_file_for_reading(self, path):
return codecs.open(path, 'rb')
def read_binary_file(self, path):
"""Return the contents of the file at the given path as a byte string."""
with file(path, 'rb') as f:
return f.read()
def write_binary_file(self, path, contents):
with file(path, 'wb') as f:
f.write(contents)
def open_text_file_for_reading(self, path):
# Note: There appears to be an issue with the returned file objects
# not being seekable. See http://stackoverflow.com/questions/1510188/can-seek-and-tell-work-with-utf-8-encoded-documents-in-python .
return codecs.open(path, 'r', 'utf8')
def open_text_file_for_writing(self, path):
return codecs.open(path, 'w', 'utf8')
def read_text_file(self, path):
"""Return the contents of the file at the given path as a Unicode string.
The file is read assuming it is a UTF-8 encoded file with no BOM."""
with codecs.open(path, 'r', 'utf8') as f:
return f.read()
def write_text_file(self, path, contents):
"""Write the contents to the file at the given location.
The file is written encoded as UTF-8 with no BOM."""
with codecs.open(path, 'w', 'utf8') as f:
f.write(contents)
def sha1(self, path):
contents = self.read_binary_file(path)
return hashlib.sha1(contents).hexdigest()
def relpath(self, path, start='.'):
return os.path.relpath(path, start)
class _WindowsError(exceptions.OSError):
"""Fake exception for Linux and Mac."""
pass
def remove(self, path, osremove=os.remove):
"""On Windows, if a process was recently killed and it held on to a
file, the OS will hold on to the file for a short while. This makes
attempts to delete the file fail. To work around that, this method
will retry for a few seconds until Windows is done with the file."""
try:
exceptions.WindowsError
except AttributeError:
exceptions.WindowsError = FileSystem._WindowsError
retry_timeout_sec = 3.0
sleep_interval = 0.1
while True:
try:
osremove(path)
return True
except exceptions.WindowsError, e:
time.sleep(sleep_interval)
retry_timeout_sec -= sleep_interval
if retry_timeout_sec < 0:
raise e
def rmtree(self, path):
"""Delete the directory rooted at path, whether empty or not."""
shutil.rmtree(path, ignore_errors=True)
def copytree(self, source, destination):
shutil.copytree(source, destination)
def split(self, path):
"""Return (dirname, basename + '.' + ext)"""
return os.path.split(path)
def splitext(self, path):
"""Return (dirname + os.sep + basename, '.' + ext)"""
return os.path.splitext(path)
| bsd-3-clause | 6,424,022,748,345,570,000 | 34.379182 | 140 | 0.630241 | false |
pjdelport/django-devserver | devserver/modules/request.py | 13 | 2777 | import urllib
from devserver.modules import DevServerModule
class SessionInfoModule(DevServerModule):
"""
Displays information about the currently authenticated user and session.
"""
logger_name = 'session'
def process_request(self, request):
self.has_session = bool(getattr(request, 'session', False))
if self.has_session is not None:
self._save = request.session.save
self.session = request.session
request.session.save = self.handle_session_save
def process_response(self, request, response):
if getattr(self, 'has_session', False):
if getattr(request, 'user', None) and request.user.is_authenticated():
user = '%s (id:%s)' % (request.user.username, request.user.pk)
else:
user = '(Anonymous)'
self.logger.info('Session %s authenticated by %s', request.session.session_key, user)
request.session.save = self._save
self._save = None
self.session = None
self.has_session = False
def handle_session_save(self, *args, **kwargs):
self._save(*args, **kwargs)
self.logger.info('Session %s has been saved.', self.session.session_key)
class RequestDumpModule(DevServerModule):
"""
Dumps the request headers and variables.
"""
logger_name = 'request'
def process_request(self, request):
req = self.logger.style.SQL_KEYWORD('%s %s %s\n' % (request.method, '?'.join((request.META['PATH_INFO'], request.META['QUERY_STRING'])), request.META['SERVER_PROTOCOL']))
for var, val in request.META.items():
if var.startswith('HTTP_'):
var = var[5:].replace('_', '-').title()
req += '%s: %s\n' % (self.logger.style.SQL_KEYWORD(var), val)
if request.META['CONTENT_LENGTH']:
req += '%s: %s\n' % (self.logger.style.SQL_KEYWORD('Content-Length'), request.META['CONTENT_LENGTH'])
if request.POST:
req += '\n%s\n' % self.logger.style.HTTP_INFO(urllib.urlencode(dict((k, v.encode('utf8')) for k, v in request.POST.items())))
if request.FILES:
req += '\n%s\n' % self.logger.style.HTTP_NOT_MODIFIED(urllib.urlencode(request.FILES))
self.logger.info('Full request:\n%s', req)
class ResponseDumpModule(DevServerModule):
"""
Dumps the request headers and variables.
"""
logger_name = 'response'
def process_response(self, request, response):
res = self.logger.style.SQL_FIELD('Status code: %s\n' % response.status_code)
res += '\n'.join(['%s: %s' % (self.logger.style.SQL_FIELD(k), v)
for k, v in response._headers.values()])
self.logger.info('Full response:\n%s', res)
| bsd-3-clause | 5,146,468,413,408,474,000 | 39.246377 | 178 | 0.60641 | false |
af1rst/bite-project | deps/gdata-python-client/src/gdata/tlslite/Session.py | 359 | 4733 | """Class representing a TLS session."""
from utils.compat import *
from mathtls import *
from constants import *
class Session:
"""
This class represents a TLS session.
TLS distinguishes between connections and sessions. A new
handshake creates both a connection and a session. Data is
transmitted over the connection.
The session contains a more permanent record of the handshake. The
session can be inspected to determine handshake results. The
session can also be used to create a new connection through
"session resumption". If the client and server both support this,
they can create a new connection based on an old session without
the overhead of a full handshake.
The session for a L{tlslite.TLSConnection.TLSConnection} can be
retrieved from the connection's 'session' attribute.
@type srpUsername: str
@ivar srpUsername: The client's SRP username (or None).
@type sharedKeyUsername: str
@ivar sharedKeyUsername: The client's shared-key username (or
None).
@type clientCertChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@ivar clientCertChain: The client's certificate chain (or None).
@type serverCertChain: L{tlslite.X509CertChain.X509CertChain} or
L{cryptoIDlib.CertChain.CertChain}
@ivar serverCertChain: The server's certificate chain (or None).
"""
def __init__(self):
self.masterSecret = createByteArraySequence([])
self.sessionID = createByteArraySequence([])
self.cipherSuite = 0
self.srpUsername = None
self.sharedKeyUsername = None
self.clientCertChain = None
self.serverCertChain = None
self.resumable = False
self.sharedKey = False
def _clone(self):
other = Session()
other.masterSecret = self.masterSecret
other.sessionID = self.sessionID
other.cipherSuite = self.cipherSuite
other.srpUsername = self.srpUsername
other.sharedKeyUsername = self.sharedKeyUsername
other.clientCertChain = self.clientCertChain
other.serverCertChain = self.serverCertChain
other.resumable = self.resumable
other.sharedKey = self.sharedKey
return other
def _calcMasterSecret(self, version, premasterSecret, clientRandom,
serverRandom):
if version == (3,0):
self.masterSecret = PRF_SSL(premasterSecret,
concatArrays(clientRandom, serverRandom), 48)
elif version in ((3,1), (3,2)):
self.masterSecret = PRF(premasterSecret, "master secret",
concatArrays(clientRandom, serverRandom), 48)
else:
raise AssertionError()
def valid(self):
"""If this session can be used for session resumption.
@rtype: bool
@return: If this session can be used for session resumption.
"""
return self.resumable or self.sharedKey
def _setResumable(self, boolean):
#Only let it be set if this isn't a shared key
if not self.sharedKey:
#Only let it be set to True if the sessionID is non-null
if (not boolean) or (boolean and self.sessionID):
self.resumable = boolean
def getCipherName(self):
"""Get the name of the cipher used with this connection.
@rtype: str
@return: The name of the cipher used with this connection.
Either 'aes128', 'aes256', 'rc4', or '3des'.
"""
if self.cipherSuite in CipherSuite.aes128Suites:
return "aes128"
elif self.cipherSuite in CipherSuite.aes256Suites:
return "aes256"
elif self.cipherSuite in CipherSuite.rc4Suites:
return "rc4"
elif self.cipherSuite in CipherSuite.tripleDESSuites:
return "3des"
else:
return None
def _createSharedKey(self, sharedKeyUsername, sharedKey):
if len(sharedKeyUsername)>16:
raise ValueError()
if len(sharedKey)>47:
raise ValueError()
self.sharedKeyUsername = sharedKeyUsername
self.sessionID = createByteArrayZeros(16)
for x in range(len(sharedKeyUsername)):
self.sessionID[x] = ord(sharedKeyUsername[x])
premasterSecret = createByteArrayZeros(48)
sharedKey = chr(len(sharedKey)) + sharedKey
for x in range(48):
premasterSecret[x] = ord(sharedKey[x % len(sharedKey)])
self.masterSecret = PRF(premasterSecret, "shared secret",
createByteArraySequence([]), 48)
self.sharedKey = True
return self
| apache-2.0 | 2,835,819,671,686,261,000 | 35.129771 | 77 | 0.646736 | false |
KnowNo/reviewboard | reviewboard/ssh/storage.py | 8 | 8054 | from __future__ import unicode_literals
import logging
import os
from django.utils.translation import ugettext_lazy as _
import paramiko
from reviewboard.ssh.errors import MakeSSHDirError, UnsupportedSSHKeyError
class SSHStorage(object):
def __init__(self, namespace=None):
self.namespace = namespace
def read_user_key(self):
"""Reads the user key.
This will return an instance of :py:mod:`paramiko.PKey` representing
the user key, if one exists. Otherwise, it will return None.
"""
raise NotImplementedError
def write_user_key(self, key):
"""Writes a user key.
The user key will be stored, and can be accessed later by
read_user_key.
This will raise UnsupportedSSHKeyError if ``key`` isn't a
:py:mod:`paramiko.RSAKey` or :py:mod:`paramiko.DSSKey`.
It may also raise :py:mod:`paramiko.SSHException` for key-related
errors.
"""
raise NotImplementedError
def delete_user_key(self, key):
"""Deletes a user key.
The user key, if it exists, will be removed from storage.
If no user key exists, this will do nothing.
"""
raise NotImplementedError
def read_authorized_keys(self):
"""Reads a list of authorized keys.
The authorized keys are returned as a list of raw key data, which
can then be converted into classes as needed.
"""
raise NotImplementedError
def read_host_keys(self):
"""Reads a list of known host keys.
This known host keys are returned as a list of raw key data, which
can then be converted into classes as needed.
"""
raise NotImplementedError
def add_host_key(self, hostname, key):
"""Adds a known key for a given host.
This will store a mapping of the key and hostname so that future
access to the server will know the host is legitimate.
"""
raise NotImplementedError
def replace_host_key(self, hostname, old_key, new_key):
"""Replaces a host key in the known hosts list with another.
This is used for replacing host keys that have changed.
"""
raise NotImplementedError
class FileSSHStorage(SSHStorage):
DEFAULT_KEY_FILES = (
(paramiko.RSAKey, 'id_rsa'),
(paramiko.DSSKey, 'id_dsa'),
)
SSH_DIRS = ('.ssh', 'ssh')
_ssh_dir = None
def get_user_key_info(self):
for cls, filename in self.DEFAULT_KEY_FILES:
# Paramiko looks in ~/.ssh and ~/ssh, depending on the platform,
# so check both.
for sshdir in self.SSH_DIRS:
path = os.path.join(self.get_ssh_dir(sshdir), filename)
if os.path.isfile(path):
return cls, path
return None, None
def read_user_key(self):
cls, path = self.get_user_key_info()
if path:
return cls.from_private_key_file(path)
return None
def write_user_key(self, key):
key_filename = None
for cls, filename in self.DEFAULT_KEY_FILES:
if isinstance(key, cls):
key_filename = filename
if not key_filename:
raise UnsupportedSSHKeyError()
sshdir = self.ensure_ssh_dir()
filename = os.path.join(sshdir, key_filename)
key.write_private_key_file(filename)
def delete_user_key(self):
cls, path = self.get_user_key_info()
if path:
# Allow any exceptions to bubble up.
os.unlink(path)
def read_authorized_keys(self):
filename = os.path.join(self.get_ssh_dir(), 'authorized_keys')
try:
fp = open(filename, 'r')
lines = fp.readlines()
fp.close()
return lines
except IOError as e:
logging.warning('Unable to read SSH authorized_keys file %s: %s'
% (filename, e))
raise
def read_host_keys(self):
filename = self.get_host_keys_filename()
lines = []
if os.path.exists(filename):
try:
with open(filename, 'r') as f:
for line in f:
line = line.strip()
if line and line[0] != '#':
lines.append(line)
except IOError as e:
logging.error('Unable to read host keys file %s: %s'
% (filename, e))
return lines
def add_host_key(self, hostname, key):
self.ensure_ssh_dir()
filename = self.get_host_keys_filename()
try:
with open(filename, 'a') as fp:
fp.write('%s %s %s\n' % (hostname, key.get_name(),
key.get_base64()))
except IOError as e:
raise IOError(
_('Unable to write host keys file %(filename)s: %(error)s') % {
'filename': filename,
'error': e,
})
def replace_host_key(self, hostname, old_key, new_key):
filename = self.get_host_keys_filename()
if not os.path.exists(filename):
self.add_host_key(hostname, new_key)
return
try:
with open(filename, 'r') as fp:
lines = fp.readlines()
old_key_base64 = old_key.get_base64()
except IOError as e:
raise IOError(
_('Unable to read host keys file %(filename)s: %(error)s') % {
'filename': filename,
'error': e,
})
try:
with open(filename, 'w') as fp:
for line in lines:
parts = line.strip().split(" ")
if parts[-1] == old_key_base64:
parts[1] = new_key.get_name()
parts[-1] = new_key.get_base64()
fp.write(' '.join(parts) + '\n')
except IOError as e:
raise IOError(
_('Unable to write host keys file %(filename)s: %(error)s') % {
'filename': filename,
'error': e,
})
def get_host_keys_filename(self):
"""Returns the path to the known host keys file."""
return os.path.join(self.get_ssh_dir(), 'known_hosts')
def get_ssh_dir(self, ssh_dir_name=None):
"""Returns the path to the SSH directory on the system.
By default, this will attempt to find either a .ssh or ssh directory.
If ``ssh_dir_name`` is specified, the search will be skipped, and we'll
use that name instead.
"""
path = self._ssh_dir
if not path or ssh_dir_name:
path = os.path.expanduser('~')
if not ssh_dir_name:
ssh_dir_name = None
for name in self.SSH_DIRS:
if os.path.exists(os.path.join(path, name)):
ssh_dir_name = name
break
if not ssh_dir_name:
ssh_dir_name = self.SSH_DIRS[0]
path = os.path.join(path, ssh_dir_name)
if not ssh_dir_name:
self.__class__._ssh_dir = path
if self.namespace:
return os.path.join(path, self.namespace)
else:
return path
def ensure_ssh_dir(self):
"""Ensures the existance of the .ssh directory.
If the directory doesn't exist, it will be created.
The full path to the directory will be returned.
Callers are expected to handle any exceptions. This may raise
IOError for any problems in creating the directory.
"""
sshdir = self.get_ssh_dir()
if not os.path.exists(sshdir):
try:
os.makedirs(sshdir, 0o700)
except OSError:
raise MakeSSHDirError(sshdir)
return sshdir
| mit | 5,059,854,025,311,192,000 | 29.164794 | 79 | 0.537124 | false |
kingvuplus/enigma2 | lib/python/Components/PackageInfo.py | 43 | 12858 | import xml.sax
from Tools.Directories import crawlDirectory, resolveFilename, SCOPE_CONFIG, SCOPE_SKIN, copyfile, copytree
from Components.NimManager import nimmanager
from Components.Ipkg import IpkgComponent
from Components.config import config, configfile
from Tools.HardwareInfo import HardwareInfo
from enigma import eConsoleAppContainer, eDVBDB
import os
class InfoHandlerParseError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class InfoHandler(xml.sax.ContentHandler):
def __init__(self, prerequisiteMet, directory):
self.attributes = {}
self.directory = directory
self.list = []
self.globalprerequisites = {}
self.prerequisites = {}
self.elements = []
self.validFileTypes = ["skin", "config", "services", "favourites", "package"]
self.prerequisitesMet = prerequisiteMet
self.data = ""
def printError(self, error):
raise InfoHandlerParseError, error
def startElement(self, name, attrs):
self.elements.append(name)
if name in ("hardware", "bcastsystem", "satellite", "tag", "flag"):
if not attrs.has_key("type"):
self.printError(str(name) + " tag with no type attribute")
if self.elements[-3] in ("default", "package"):
prerequisites = self.globalprerequisites
else:
prerequisites = self.prerequisites
if not prerequisites.has_key(name):
prerequisites[name] = []
prerequisites[name].append(str(attrs["type"]))
if name == "info":
self.foundTranslation = None
self.data = ""
if name == "files":
if attrs.has_key("type"):
if attrs["type"] == "directories":
self.attributes["filestype"] = "directories"
elif attrs["type"] == "package":
self.attributes["filestype"] = "package"
if name == "file":
self.prerequisites = {}
if not attrs.has_key("type"):
self.printError("file tag with no type attribute")
else:
if not attrs.has_key("name"):
self.printError("file tag with no name attribute")
else:
if not attrs.has_key("directory"):
directory = self.directory
type = attrs["type"]
if not type in self.validFileTypes:
self.printError("file tag with invalid type attribute")
else:
self.filetype = type
self.fileattrs = attrs
if name == "package":
if attrs.has_key("details"):
self.attributes["details"] = str(attrs["details"])
if attrs.has_key("name"):
self.attributes["name"] = str(attrs["name"])
if attrs.has_key("packagename"):
self.attributes["packagename"] = str(attrs["packagename"])
if attrs.has_key("packagetype"):
self.attributes["packagetype"] = str(attrs["packagetype"])
if attrs.has_key("needsRestart"):
self.attributes["needsRestart"] = str(attrs["needsRestart"])
if attrs.has_key("shortdescription"):
self.attributes["shortdescription"] = str(attrs["shortdescription"])
if name == "screenshot":
if attrs.has_key("src"):
self.attributes["screenshot"] = str(attrs["src"])
def endElement(self, name):
self.elements.pop()
if name == "file":
if len(self.prerequisites) == 0 or self.prerequisitesMet(self.prerequisites):
if not self.attributes.has_key(self.filetype):
self.attributes[self.filetype] = []
if self.fileattrs.has_key("directory"):
directory = str(self.fileattrs["directory"])
if len(directory) < 1 or directory[0] != "/":
directory = self.directory + directory
else:
directory = self.directory
self.attributes[self.filetype].append({ "name": str(self.fileattrs["name"]), "directory": directory })
if name in ( "default", "package" ):
self.list.append({"attributes": self.attributes, 'prerequisites': self.globalprerequisites})
self.attributes = {}
self.globalprerequisites = {}
def characters(self, data):
if self.elements[-1] == "author":
self.attributes["author"] = str(data)
if self.elements[-1] == "name":
self.attributes["name"] = str(data)
if self.elements[-1] == "packagename":
self.attributes["packagename"] = str(data)
if self.elements[-1] == "needsRestart":
self.attributes["needsRestart"] = str(data)
if self.elements[-1] == "shortdescription":
self.attributes["shortdescription"] = str(data)
if self.elements[-1] == "description":
self.data += data.strip()
self.attributes["description"] = str(self.data)
class PackageInfoHandler:
STATUS_WORKING = 0
STATUS_DONE = 1
STATUS_ERROR = 2
STATUS_INIT = 4
def __init__(self, statusCallback, blocking = False, neededTag = None, neededFlag = None):
self.directory = "/"
self.neededTag = neededTag
self.neededFlag = neededFlag
# caution: blocking should only be used, if further execution in enigma2 depends on the outcome of
# the installer!
self.blocking = blocking
self.currentlyInstallingMetaIndex = None
self.console = eConsoleAppContainer()
self.console.appClosed.append(self.installNext)
self.reloadFavourites = False
self.statusCallback = statusCallback
self.setStatus(self.STATUS_INIT)
self.packageslist = []
self.packagesIndexlist = []
self.packageDetails = []
def readInfo(self, directory, file):
handler = InfoHandler(self.prerequisiteMet, directory)
try:
xml.sax.parse(file, handler)
for entry in handler.list:
self.packageslist.append((entry,file))
except InfoHandlerParseError:
pass
def readIndex(self, directory, file):
handler = InfoHandler(self.prerequisiteMet, directory)
try:
xml.sax.parse(file, handler)
for entry in handler.list:
self.packagesIndexlist.append((entry,file))
except InfoHandlerParseError:
pass
def readDetails(self, directory, file):
self.packageDetails = []
handler = InfoHandler(self.prerequisiteMet, directory)
try:
xml.sax.parse(file, handler)
for entry in handler.list:
self.packageDetails.append((entry,file))
except InfoHandlerParseError:
pass
def fillPackagesList(self, prerequisites = True):
self.packageslist = []
packages = []
if not isinstance(self.directory, list):
self.directory = [self.directory]
for directory in self.directory:
packages += crawlDirectory(directory, ".*\.info$")
for package in packages:
self.readInfo(package[0] + "/", package[0] + "/" + package[1])
if prerequisites:
for package in self.packageslist[:]:
if not self.prerequisiteMet(package[0]["prerequisites"]):
self.packageslist.remove(package)
return self.packageslist
def fillPackagesIndexList(self, prerequisites = True):
self.packagesIndexlist = []
indexfileList = []
if not isinstance(self.directory, list):
self.directory = [self.directory]
for indexfile in os.listdir(self.directory[0]):
if indexfile.startswith("index-"):
if indexfile.endswith(".xml"):
if indexfile[-7:-6] == "_":
continue
indexfileList.append(indexfile)
if len(indexfileList):
for file in indexfileList:
neededFile = self.directory[0] + "/" + file
if os.path.isfile(neededFile):
self.readIndex(self.directory[0] + "/" , neededFile)
if prerequisites:
for package in self.packagesIndexlist[:]:
if not self.prerequisiteMet(package[0]["prerequisites"]):
self.packagesIndexlist.remove(package)
return self.packagesIndexlist
def fillPackageDetails(self, details = None):
self.packageDetails = []
detailsfile = details
if not isinstance(self.directory, list):
self.directory = [self.directory]
self.readDetails(self.directory[0] + "/", self.directory[0] + "/" + detailsfile)
return self.packageDetails
def prerequisiteMet(self, prerequisites):
met = True
if self.neededTag is None:
if prerequisites.has_key("tag"):
return False
elif self.neededTag == 'ALL_TAGS':
return True
else:
if prerequisites.has_key("tag"):
if not self.neededTag in prerequisites["tag"]:
return False
else:
return False
if self.neededFlag is None:
if prerequisites.has_key("flag"):
return False
else:
if prerequisites.has_key("flag"):
if not self.neededFlag in prerequisites["flag"]:
return False
else:
return True
if prerequisites.has_key("satellite"):
for sat in prerequisites["satellite"]:
if int(sat) not in nimmanager.getConfiguredSats():
return False
if prerequisites.has_key("bcastsystem"):
has_system = False
for bcastsystem in prerequisites["bcastsystem"]:
if nimmanager.hasNimType(bcastsystem):
has_system = True
if not has_system:
return False
if prerequisites.has_key("hardware"):
hardware_found = False
for hardware in prerequisites["hardware"]:
if hardware == HardwareInfo().device_name:
hardware_found = True
if not hardware_found:
return False
return True
def installPackages(self, indexes):
if len(indexes) == 0:
self.setStatus(self.STATUS_DONE)
return
self.installIndexes = indexes
self.currentlyInstallingMetaIndex = 0
self.installPackage(self.installIndexes[self.currentlyInstallingMetaIndex])
def installPackage(self, index):
if len(self.packageslist) <= index:
return
attributes = self.packageslist[index][0]["attributes"]
self.installingAttributes = attributes
self.attributeNames = ["skin", "config", "favourites", "package", "services"]
self.currentAttributeIndex = 0
self.currentIndex = -1
self.installNext()
def setStatus(self, status):
self.status = status
self.statusCallback(self.status, None)
def installNext(self, *args, **kwargs):
if self.reloadFavourites:
self.reloadFavourites = False
db = eDVBDB.getInstance().reloadBouquets()
self.currentIndex += 1
attributes = self.installingAttributes
if self.currentAttributeIndex >= len(self.attributeNames):
if self.currentlyInstallingMetaIndex is None or self.currentlyInstallingMetaIndex >= len(self.installIndexes) - 1:
self.setStatus(self.STATUS_DONE)
return
else:
self.currentlyInstallingMetaIndex += 1
self.currentAttributeIndex = 0
self.installPackage(self.installIndexes[self.currentlyInstallingMetaIndex])
return
self.setStatus(self.STATUS_WORKING)
currentAttribute = self.attributeNames[self.currentAttributeIndex]
if attributes.has_key(currentAttribute):
if self.currentIndex >= len(attributes[currentAttribute]):
self.currentIndex = -1
self.currentAttributeIndex += 1
self.installNext()
return
else:
self.currentIndex = -1
self.currentAttributeIndex += 1
self.installNext()
return
if currentAttribute == "skin":
skin = attributes["skin"][self.currentIndex]
self.installSkin(skin["directory"], skin["name"])
elif currentAttribute == "config":
if self.currentIndex == 0:
from Components.config import configfile
configfile.save()
config = attributes["config"][self.currentIndex]
self.mergeConfig(config["directory"], config["name"])
elif currentAttribute == "favourites":
favourite = attributes["favourites"][self.currentIndex]
self.installFavourites(favourite["directory"], favourite["name"])
elif currentAttribute == "package":
package = attributes["package"][self.currentIndex]
self.installIPK(package["directory"], package["name"])
elif currentAttribute == "services":
service = attributes["services"][self.currentIndex]
self.mergeServices(service["directory"], service["name"])
def readfile(self, filename):
if not os.path.isfile(filename):
return []
fd = open(filename)
lines = fd.readlines()
fd.close()
return lines
def mergeConfig(self, directory, name, merge = True):
if os.path.isfile(directory + name):
config.loadFromFile(directory + name, base_file=False)
configfile.save()
self.installNext()
def installIPK(self, directory, name):
if self.blocking:
os.system("opkg install " + directory + name)
self.installNext()
else:
self.ipkg = IpkgComponent()
self.ipkg.addCallback(self.ipkgCallback)
self.ipkg.startCmd(IpkgComponent.CMD_INSTALL, {'package': directory + name})
def ipkgCallback(self, event, param):
if event == IpkgComponent.EVENT_DONE:
self.installNext()
elif event == IpkgComponent.EVENT_ERROR:
self.installNext()
def installSkin(self, directory, name):
if self.blocking:
copytree(directory, resolveFilename(SCOPE_SKIN))
self.installNext()
else:
if self.console.execute("cp -a %s %s" % (directory, resolveFilename(SCOPE_SKIN))):
self.installNext()
def mergeServices(self, directory, name, merge = False):
if os.path.isfile(directory + name):
db = eDVBDB.getInstance()
db.reloadServicelist()
db.loadServicelist(directory + name)
db.saveServicelist()
self.installNext()
def installFavourites(self, directory, name):
self.reloadFavourites = True
if self.blocking:
copyfile(directory + name, resolveFilename(SCOPE_CONFIG))
self.installNext()
else:
if self.console.execute("cp %s %s" % ((directory + name), resolveFilename(SCOPE_CONFIG))):
self.installNext()
| gpl-2.0 | 8,949,410,646,252,805,000 | 30.592138 | 117 | 0.704542 | false |
SymbiFlow/python-fpga-interchange | fpga_interchange/constraints/sat.py | 1 | 11109 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
class AssertStateVariable():
""" Abstract asserted state variable. """
def __init__(self, parent, state):
self.parent = parent
self.state = state
def variable_name(self):
return '{}.{}'.format(self.parent.prefix, self.state)
def variable(self, solver):
return solver.get_variable(self.variable_name())
def __str__(self):
return self.variable_name()
class DeassertStateVariable():
""" Abstract deasserted state variable. """
def __init__(self, parent, state):
self.parent = parent
self.state = state
def variable_name(self):
return '{}.NOT.{}'.format(self.parent.prefix, self.state)
def variable(self, solver):
return solver.get_variable(self.variable_name())
def __str__(self):
return self.variable_name()
class Not():
""" Abstract inverted variable. """
def __init__(self, variable):
self.a_variable = variable
def variable_name(self):
return self.a_variable.variable_name()
def variable(self, solver):
return -solver.get_variable(self.variable_name())
def __str__(self):
return '!' + self.variable_name()
class Xor():
""" Abstract XOR SAT clause. """
def __init__(self, variable_a, variable_b):
self.variable_a = variable_a
self.variable_b = variable_b
def clauses(self):
yield [self.variable_a, self.variable_b]
yield [Not(self.variable_a), Not(self.variable_b)]
def __str__(self):
return '{} xor {}'.format(self.variable_a.variable_name(),
self.variable_b.variable_name())
class Implies():
""" Abstract implies (->) SAT clause. """
def __init__(self, source_variable, target_variable):
self.source_variable = source_variable
self.target_variable = target_variable
def clauses(self):
yield [Not(self.source_variable), self.target_variable]
def __str__(self):
return '{} -> {}'.format(self.source_variable.variable_name(),
self.target_variable.variable_name())
class Or():
""" Abstract OR SAT clause. """
def __init__(self, variables):
self.variables = variables
def clauses(self):
yield self.variables
def __str__(self):
return 'sum({})'.format(', '.join(str(var) for var in self.variables))
class ExclusiveStateGroup():
""" A group of states that have at most 1 state selected. """
def __init__(self, prefix, default):
self.prefix = prefix
self.states = set()
self.default = default
def name(self):
""" Return name of state group. """
return self.prefix
def add_state(self, state):
""" Add a state to this group. """
self.states.add(state)
def assert_state(self, state):
""" Return a SAT variable that asserts that a state must be asserted. """
return AssertStateVariable(self, state)
def deassert_state(self, state):
""" Return a SAT variable that asserts that a state must be deasserted. """
return DeassertStateVariable(self, state)
def select_one(self):
""" Yields SAT clauses that ensure that one variable from this state group is selected. """
yield Or([self.assert_state(state) for state in self.states])
def implies_clause(self, source_variable, state):
""" Yields SAT clauses that ensure if source_variable is true, then state is asserted from this group. """
assert state in self.states, state
yield Implies(source_variable, self.assert_state(state))
def implies_not_clause(self, source_variable, state):
""" Yields SAT clauses that ensure if source_variable is true, then state is deassert from this group. """
assert state in self.states
yield Implies(source_variable, self.deassert_state(state))
def requires_clause(self, source_variable, states):
""" Yields SAT clauses that ensure if source_variable is true, then one of the supplied states must be asserted from this group. """
for other_state in self.states - states:
yield self.implies_not_clause(source_variable, other_state)
def variables(self):
""" Yields SAT variables generated from this state group. """
for state in self.states:
yield self.assert_state(state)
yield self.deassert_state(state)
def clauses(self):
""" Yield SAT clauses that ensure this state group selects at most one state. """
for state in self.states:
yield Xor(
AssertStateVariable(self, state),
DeassertStateVariable(self, state))
for other_state in (self.states - set([state])):
yield Implies(
AssertStateVariable(self, state),
DeassertStateVariable(self, other_state))
def get_state(self, variables_for_state_group):
""" Return state for this group based on true SAT variables relevant to this group. """
state = None
for variable in variables_for_state_group:
assert variable.startswith(self.prefix + '.')
data_portion = variable[len(self.prefix) + 1:]
not_set = False
if data_portion.startswith('NOT.'):
data_portion = data_portion[len('NOT.'):]
not_set = True
assert data_portion in self.states
if not_set:
continue
if state is None:
state = data_portion
else:
assert False, (state, data_portion)
if state is None:
state = self.default
return state
class Solver():
""" Abstract SAT solver, where each SAT variable is a string.
Clauses used in this class are "abstract" clauses, that can yield more than
one clause.
"""
def __init__(self):
self.variable_names = set()
self.variable_name_to_index = None
self.abstract_clauses = []
self.state_group_names = set()
self.state_groups = []
self.variable_to_state_group = {}
def add_state_group(self, state_group):
""" Adds a state group to the solver.
state_group (ExclusiveStateGroup) - State group.
"""
assert state_group.name() not in self.state_group_names
self.state_group_names.add(state_group.name())
self.state_groups.append(state_group)
def add_variable_names(self, variables):
""" Adds a variable names to this Solver.
These variable names cannot already be apart of the Solver.
"""
new_variable_names = set()
for variable in variables:
new_variable_names.add(variable)
assert len(self.variable_names & variables) == 0
self.variable_names |= new_variable_names
def add_clause(self, clause):
""" Add an abstract clause to the Solver.
Interface for abstract clause should have one method that yields a
list of abstract variable objects.
Abstract variable objects should have a method called variable, that
takes a Solver object.
"""
self.abstract_clauses.append(clause)
def get_variable(self, variable_name):
""" Return SAT variable index for a variable name. """
assert self.variable_name_to_index is not None
return self.variable_name_to_index[variable_name]
def get_variable_name(self, variable_index):
""" Return a SAT variable name for a given variable index. """
return self.variable_names[variable_index - 1]
def prepare_for_sat(self):
""" Convert SAT clauses using variable name strings to SAT indicies """
for state_group in self.state_groups:
new_variables = set()
for variable in state_group.variables():
new_variables.add(variable.variable_name())
self.add_variable_names(new_variables)
for variable in new_variables:
assert variable not in self.variable_to_state_group
self.variable_to_state_group[variable] = state_group
for clause in state_group.clauses():
self.add_clause(clause)
self.variable_names = sorted(self.variable_names)
self.variable_name_to_index = {}
# Assign SAT variables indicies to variable names
for idx, variable_name in enumerate(self.variable_names):
assert variable_name not in self.variable_name_to_index
self.variable_name_to_index[variable_name] = idx + 1
# Convert abstract clauses using variable names to SAT clauses
concrete_clauses = set()
for abstract_clause in self.abstract_clauses:
for clause in abstract_clause.clauses():
concrete_clause = []
for part in clause:
concrete_clause.append(part.variable(self))
assert len(set(concrete_clause)) == len(concrete_clause)
concrete_clauses.add(tuple(sorted(concrete_clause)))
return sorted(concrete_clauses)
def decode_solution_model(self, sat_model):
""" Decode a solution from a SAT solver.
Returns a dict of state group states and a set of SAT variables that
don't belong to state group states.
"""
state_group_variables = {}
other_variables = set()
for idx in sat_model:
if idx < 0:
continue
variable = self.get_variable_name(idx)
if variable in self.variable_to_state_group:
state_group = self.variable_to_state_group[variable]
state_group_name = state_group.name()
if state_group_name not in state_group_variables:
state_group_variables[state_group_name] = set()
state_group_variables[state_group_name].add(variable)
else:
other_variables.add(variable)
state_group_results = {}
for state_group_name, variables in state_group_variables.items():
state_group = self.variable_to_state_group[list(variables)[0]]
state_group_results[state_group_name] = state_group.get_state(
variables)
return state_group_results, other_variables
def print_debug(self):
""" Print debugging information for the abstract SAT solver. """
print()
print("Variable names ({} total):".format(len(self.variable_names)))
print()
for variable in self.variable_names:
print(variable)
print()
print("Clauses:")
print()
for clause in self.abstract_clauses:
print(clause)
| isc | -1,200,851,189,210,134,500 | 31.673529 | 140 | 0.606085 | false |
takeshineshiro/horizon | openstack_dashboard/dashboards/identity/projects/panel.py | 43 | 1040 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils.translation import ugettext_lazy as _
import horizon
class Tenants(horizon.Panel):
name = _("Projects")
slug = 'projects'
policy_rules = (("identity", "identity:list_projects"),
("identity", "identity:list_user_projects"))
| apache-2.0 | 384,950,350,010,431,100 | 36.142857 | 78 | 0.716346 | false |
hedaoyuan/Paddle | python/paddle/trainer_config_helpers/tests/configs/projections.py | 8 | 1705 | '''
Test mixed layer, projections and operators.
'''
from paddle.trainer_config_helpers import *
settings(batch_size=1000, learning_rate=1e-4)
din = data_layer(name='test', size=100)
din = embedding_layer(input=din, size=256)
with mixed_layer(size=100) as m1:
m1 += full_matrix_projection(input=din)
with mixed_layer(size=100) as m2:
m2 += table_projection(input=m1)
with mixed_layer(size=100) as m3:
m3 += identity_projection(input=m2)
with mixed_layer(size=100) as m4:
m4 += dotmul_projection(input=m3)
with mixed_layer() as m5:
m5 += context_projection(input=m4, context_len=3)
with mixed_layer() as m6:
m6 += dotmul_operator(a=m3, b=m4)
m6 += scaling_projection(m3)
img = data_layer(name='img', size=32 * 32)
flt = data_layer(name='filter', size=3 * 3 * 1 * 64)
with mixed_layer() as m7:
m7 += conv_operator(
img=img, filter=flt, num_filters=64, num_channels=1, filter_size=3)
m7 += conv_projection(img, filter_size=3, num_filters=64, num_channels=1)
with mixed_layer() as m8:
m8 += conv_operator(
img=img,
filter=flt,
num_filters=64,
num_channels=1,
filter_size=3,
stride=2,
padding=1,
trans=True)
m8 += conv_projection(
img,
filter_size=3,
num_filters=64,
num_channels=1,
stride=2,
padding=1,
trans=True)
end = mixed_layer(
input=[
full_matrix_projection(input=m5),
trans_full_matrix_projection(input=m6),
full_matrix_projection(input=m7), full_matrix_projection(input=m8)
],
size=100,
layer_attr=ExtraAttr(
drop_rate=0.5, error_clipping_threshold=40))
outputs(end)
| apache-2.0 | 7,697,053,768,494,347,000 | 24.447761 | 77 | 0.626393 | false |
msarana/selenium_python | ENV/Lib/site-packages/pip/_vendor/requests/packages/urllib3/contrib/ntlmpool.py | 199 | 4546 | """
NTLM authenticating pool, contributed by erikcederstran
Issue #10, see: http://code.google.com/p/urllib3/issues/detail?id=10
"""
from __future__ import absolute_import
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
from logging import getLogger
from ntlm import ntlm
from urllib3 import HTTPSConnectionPool
log = getLogger(__name__)
class NTLMConnectionPool(HTTPSConnectionPool):
"""
Implements an NTLM authentication version of an urllib3 connection pool
"""
scheme = 'https'
def __init__(self, user, pw, authurl, *args, **kwargs):
"""
authurl is a random URL on the server that is protected by NTLM.
user is the Windows user, probably in the DOMAIN\\username format.
pw is the password for the user.
"""
super(NTLMConnectionPool, self).__init__(*args, **kwargs)
self.authurl = authurl
self.rawuser = user
user_parts = user.split('\\', 1)
self.domain = user_parts[0].upper()
self.user = user_parts[1]
self.pw = pw
def _new_conn(self):
# Performs the NTLM handshake that secures the connection. The socket
# must be kept open while requests are performed.
self.num_connections += 1
log.debug('Starting NTLM HTTPS connection no. %d: https://%s%s' %
(self.num_connections, self.host, self.authurl))
headers = {}
headers['Connection'] = 'Keep-Alive'
req_header = 'Authorization'
resp_header = 'www-authenticate'
conn = HTTPSConnection(host=self.host, port=self.port)
# Send negotiation message
headers[req_header] = (
'NTLM %s' % ntlm.create_NTLM_NEGOTIATE_MESSAGE(self.rawuser))
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
reshdr = dict(res.getheaders())
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % reshdr)
log.debug('Response data: %s [...]' % res.read(100))
# Remove the reference to the socket, so that it can not be closed by
# the response object (we want to keep the socket open)
res.fp = None
# Server should respond with a challenge message
auth_header_values = reshdr[resp_header].split(', ')
auth_header_value = None
for s in auth_header_values:
if s[:5] == 'NTLM ':
auth_header_value = s[5:]
if auth_header_value is None:
raise Exception('Unexpected %s response header: %s' %
(resp_header, reshdr[resp_header]))
# Send authentication message
ServerChallenge, NegotiateFlags = \
ntlm.parse_NTLM_CHALLENGE_MESSAGE(auth_header_value)
auth_msg = ntlm.create_NTLM_AUTHENTICATE_MESSAGE(ServerChallenge,
self.user,
self.domain,
self.pw,
NegotiateFlags)
headers[req_header] = 'NTLM %s' % auth_msg
log.debug('Request headers: %s' % headers)
conn.request('GET', self.authurl, None, headers)
res = conn.getresponse()
log.debug('Response status: %s %s' % (res.status, res.reason))
log.debug('Response headers: %s' % dict(res.getheaders()))
log.debug('Response data: %s [...]' % res.read()[:100])
if res.status != 200:
if res.status == 401:
raise Exception('Server rejected request: wrong '
'username or password')
raise Exception('Wrong server response: %s %s' %
(res.status, res.reason))
res.fp = None
log.debug('Connection established')
return conn
def urlopen(self, method, url, body=None, headers=None, retries=3,
redirect=True, assert_same_host=True):
if headers is None:
headers = {}
headers['Connection'] = 'Keep-Alive'
return super(NTLMConnectionPool, self).urlopen(method, url, body,
headers, retries,
redirect,
assert_same_host)
| apache-2.0 | 7,307,572,819,719,631,000 | 38.530435 | 77 | 0.554333 | false |
mitchcapper/mythbox | resources/lib/IMDbPY/imdb/_compat.py | 128 | 2753 | """
_compat module (imdb package).
This module provides compatibility functions used by the imdb package
to deal with unusual environments.
Copyright 2008-2010 Davide Alberani <[email protected]>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
# TODO: now we're heavily using the 'logging' module, which was not
# present in Python 2.2. To work in a Symbian environment, we
# need to create a fake 'logging' module (its functions may call
# the 'warnings' module, or do nothing at all).
import os
# If true, we're working on a Symbian device.
if os.name == 'e32':
# Replace os.path.expandvars and os.path.expanduser, if needed.
def _noact(x):
"""Ad-hoc replacement for IMDbPY."""
return x
try:
os.path.expandvars
except AttributeError:
os.path.expandvars = _noact
try:
os.path.expanduser
except AttributeError:
os.path.expanduser = _noact
# time.strptime is missing, on Symbian devices.
import time
try:
time.strptime
except AttributeError:
import re
_re_web_time = re.compile(r'Episode dated (\d+) (\w+) (\d+)')
_re_ptdf_time = re.compile(r'\((\d+)-(\d+)-(\d+)\)')
_month2digit = {'January': '1', 'February': '2', 'March': '3',
'April': '4', 'May': '5', 'June': '6', 'July': '7',
'August': '8', 'September': '9', 'October': '10',
'November': '11', 'December': '12'}
def strptime(s, format):
"""Ad-hoc strptime replacement for IMDbPY."""
try:
if format.startswith('Episode'):
res = _re_web_time.findall(s)[0]
return (int(res[2]), int(_month2digit[res[1]]), int(res[0]),
0, 0, 0, 0, 1, 0)
else:
res = _re_ptdf_time.findall(s)[0]
return (int(res[0]), int(res[1]), int(res[2]),
0, 0, 0, 0, 1, 0)
except:
raise ValueError('error in IMDbPY\'s ad-hoc strptime!')
time.strptime = strptime
| gpl-2.0 | 3,267,902,793,357,768,700 | 37.236111 | 80 | 0.600799 | false |
jackTheRipper/iotrussia | web_server/lib/werkzeug-master/examples/simplewiki/actions.py | 45 | 6428 | # -*- coding: utf-8 -*-
"""
simplewiki.actions
~~~~~~~~~~~~~~~~~~
The per page actions. The actions are defined in the URL with the
`action` parameter and directly dispatched to the functions in this
module. In the module the actions are prefixed with 'on_', so be
careful not to name any other objects in the module with the same
prefix unless you want to act them as actions.
:copyright: (c) 2009 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD.
"""
from difflib import unified_diff
from simplewiki.utils import Response, generate_template, parse_creole, \
href, redirect, format_datetime
from simplewiki.database import RevisionedPage, Page, Revision, session
def on_show(request, page_name):
"""Displays the page the user requests."""
revision_id = request.args.get('rev', type=int)
query = RevisionedPage.query.filter_by(name=page_name)
if revision_id:
query = query.filter_by(revision_id=revision_id)
revision_requested = True
else:
query = query.order_by(RevisionedPage.revision_id.desc())
revision_requested = False
page = query.first()
if page is None:
return page_missing(request, page_name, revision_requested)
return Response(generate_template('action_show.html',
page=page
))
def on_edit(request, page_name):
"""Edit the current revision of a page."""
change_note = error = ''
revision = Revision.query.filter(
(Page.name == page_name) &
(Page.page_id == Revision.page_id)
).order_by(Revision.revision_id.desc()).first()
if revision is None:
page = None
else:
page = revision.page
if request.method == 'POST':
text = request.form.get('text')
if request.form.get('cancel') or \
revision and revision.text == text:
return redirect(href(page.name))
elif not text:
error = 'You cannot save empty revisions.'
else:
change_note = request.form.get('change_note', '')
if page is None:
page = Page(page_name)
session.add(page)
session.add(Revision(page, text, change_note))
session.commit()
return redirect(href(page.name))
return Response(generate_template('action_edit.html',
revision=revision,
page=page,
new=page is None,
page_name=page_name,
change_note=change_note,
error=error
))
def on_log(request, page_name):
"""Show the list of recent changes."""
page = Page.query.filter_by(name=page_name).first()
if page is None:
return page_missing(request, page_name, False)
return Response(generate_template('action_log.html',
page=page
))
def on_diff(request, page_name):
"""Show the diff between two revisions."""
old = request.args.get('old', type=int)
new = request.args.get('new', type=int)
error = ''
diff = page = old_rev = new_rev = None
if not (old and new):
error = 'No revisions specified.'
else:
revisions = dict((x.revision_id, x) for x in Revision.query.filter(
(Revision.revision_id.in_((old, new))) &
(Revision.page_id == Page.page_id) &
(Page.name == page_name)
))
if len(revisions) != 2:
error = 'At least one of the revisions requested ' \
'does not exist.'
else:
new_rev = revisions[new]
old_rev = revisions[old]
page = old_rev.page
diff = unified_diff(
(old_rev.text + '\n').splitlines(True),
(new_rev.text + '\n').splitlines(True),
page.name, page.name,
format_datetime(old_rev.timestamp),
format_datetime(new_rev.timestamp),
3
)
return Response(generate_template('action_diff.html',
error=error,
old_revision=old_rev,
new_revision=new_rev,
page=page,
diff=diff
))
def on_revert(request, page_name):
"""Revert an old revision."""
rev_id = request.args.get('rev', type=int)
old_revision = page = None
error = 'No such revision'
if request.method == 'POST' and request.form.get('cancel'):
return redirect(href(page_name))
if rev_id:
old_revision = Revision.query.filter(
(Revision.revision_id == rev_id) &
(Revision.page_id == Page.page_id) &
(Page.name == page_name)
).first()
if old_revision:
new_revision = Revision.query.filter(
(Revision.page_id == Page.page_id) &
(Page.name == page_name)
).order_by(Revision.revision_id.desc()).first()
if old_revision == new_revision:
error = 'You tried to revert the current active ' \
'revision.'
elif old_revision.text == new_revision.text:
error = 'There are no changes between the current ' \
'revision and the revision you want to ' \
'restore.'
else:
error = ''
page = old_revision.page
if request.method == 'POST':
change_note = request.form.get('change_note', '')
change_note = 'revert' + (change_note and ': ' +
change_note or '')
session.add(Revision(page, old_revision.text,
change_note))
session.commit()
return redirect(href(page_name))
return Response(generate_template('action_revert.html',
error=error,
old_revision=old_revision,
page=page
))
def page_missing(request, page_name, revision_requested, protected=False):
"""Displayed if page or revision does not exist."""
return Response(generate_template('page_missing.html',
page_name=page_name,
revision_requested=revision_requested,
protected=protected
), status=404)
def missing_action(request, action):
"""Displayed if a user tried to access a action that does not exist."""
return Response(generate_template('missing_action.html',
action=action
), status=404)
| gpl-2.0 | 5,793,568,842,011,693,000 | 33.374332 | 76 | 0.569384 | false |
greyg00s/googletest | scripts/common.py | 1180 | 2919 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Shared utilities for writing scripts for Google Test/Mock."""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
# Matches the line from 'svn info .' output that describes what SVN
# path the current local directory corresponds to. For example, in
# a googletest SVN workspace's trunk/test directory, the output will be:
#
# URL: https://googletest.googlecode.com/svn/trunk/test
_SVN_INFO_URL_RE = re.compile(r'^URL: https://(\w+)\.googlecode\.com/svn(.*)')
def GetCommandOutput(command):
"""Runs the shell command and returns its stdout as a list of lines."""
f = os.popen(command, 'r')
lines = [line.strip() for line in f.readlines()]
f.close()
return lines
def GetSvnInfo():
"""Returns the project name and the current SVN workspace's root path."""
for line in GetCommandOutput('svn info .'):
m = _SVN_INFO_URL_RE.match(line)
if m:
project = m.group(1) # googletest or googlemock
rel_path = m.group(2)
root = os.path.realpath(rel_path.count('/') * '../')
return project, root
return None, None
def GetSvnTrunk():
"""Returns the current SVN workspace's trunk root path."""
_, root = GetSvnInfo()
return root + '/trunk' if root else None
def IsInGTestSvn():
project, _ = GetSvnInfo()
return project == 'googletest'
def IsInGMockSvn():
project, _ = GetSvnInfo()
return project == 'googlemock'
| bsd-3-clause | 5,745,368,047,817,061,000 | 34.168675 | 78 | 0.730045 | false |
CharlesZhong/Blog | app/main/forms.py | 2 | 2058 | from flask.ext.wtf import Form
from wtforms import StringField, TextAreaField, BooleanField, SelectField,\
SubmitField
from wtforms.validators import Required, Length, Email, Regexp
from wtforms import ValidationError
from ..models import Role, User
class NameForm(Form):
name = StringField('What is your name?', validators=[Required()])
submit = SubmitField('Submit')
class EditProfileForm(Form):
name = StringField('Real name', validators=[Length(0, 64)])
location = StringField('Location', validators=[Length(0, 64)])
about_me = TextAreaField('About me')
submit = SubmitField('Submit')
class EditProfileAdminForm(Form):
email = StringField('Email', validators=[Required(), Length(1, 64),
Email()])
username = StringField('Username', validators=[
Required(), Length(1, 64), Regexp('^[A-Za-z][A-Za-z0-9_.]*$', 0,
'Usernames must have only letters, '
'numbers, dots or underscores')])
confirmed = BooleanField('Confirmed')
role = SelectField('Role', coerce=int)
name = StringField('Real name', validators=[Length(0, 64)])
location = StringField('Location', validators=[Length(0, 64)])
about_me = TextAreaField('About me')
submit = SubmitField('Submit')
def __init__(self, user, *args, **kwargs):
super(EditProfileAdminForm, self).__init__(*args, **kwargs)
self.role.choices = [(role.id, role.name)
for role in Role.query.order_by(Role.name).all()]
self.user = user
def validate_email(self, field):
if field.data != self.user.email and \
User.query.filter_by(email=field.data).first():
raise ValidationError('Email already registered.')
def validate_username(self, field):
if field.data != self.user.username and \
User.query.filter_by(username=field.data).first():
raise ValidationError('Username already in use.')
| gpl-2.0 | -8,898,752,411,423,584,000 | 41 | 78 | 0.617104 | false |
openhatch/oh-mainline | vendor/packages/Django/django/core/serializers/json.py | 113 | 3461 | """
Serialize data to/from JSON
"""
# Avoid shadowing the standard library json module
from __future__ import absolute_import
import datetime
import decimal
import json
from django.core.serializers.base import DeserializationError
from django.core.serializers.python import Serializer as PythonSerializer
from django.core.serializers.python import Deserializer as PythonDeserializer
from django.utils import six
from django.utils.timezone import is_aware
class Serializer(PythonSerializer):
"""
Convert a queryset to JSON.
"""
internal_use_only = False
def start_serialization(self):
if json.__version__.split('.') >= ['2', '1', '3']:
# Use JS strings to represent Python Decimal instances (ticket #16850)
self.options.update({'use_decimal': False})
self._current = None
self.json_kwargs = self.options.copy()
self.json_kwargs.pop('stream', None)
self.json_kwargs.pop('fields', None)
self.stream.write("[")
def end_serialization(self):
if self.options.get("indent"):
self.stream.write("\n")
self.stream.write("]")
if self.options.get("indent"):
self.stream.write("\n")
def end_object(self, obj):
# self._current has the field data
indent = self.options.get("indent")
if not self.first:
self.stream.write(",")
if not indent:
self.stream.write(" ")
if indent:
self.stream.write("\n")
json.dump(self.get_dump_object(obj), self.stream,
cls=DjangoJSONEncoder, **self.json_kwargs)
self._current = None
def getvalue(self):
# Grand-parent super
return super(PythonSerializer, self).getvalue()
def Deserializer(stream_or_string, **options):
"""
Deserialize a stream or string of JSON data.
"""
if not isinstance(stream_or_string, (bytes, six.string_types)):
stream_or_string = stream_or_string.read()
if isinstance(stream_or_string, bytes):
stream_or_string = stream_or_string.decode('utf-8')
try:
objects = json.loads(stream_or_string)
for obj in PythonDeserializer(objects, **options):
yield obj
except GeneratorExit:
raise
except Exception as e:
# Map to deserializer error
raise DeserializationError(e)
class DjangoJSONEncoder(json.JSONEncoder):
"""
JSONEncoder subclass that knows how to encode date/time and decimal types.
"""
def default(self, o):
# See "Date Time String Format" in the ECMA-262 specification.
if isinstance(o, datetime.datetime):
r = o.isoformat()
if o.microsecond:
r = r[:23] + r[26:]
if r.endswith('+00:00'):
r = r[:-6] + 'Z'
return r
elif isinstance(o, datetime.date):
return o.isoformat()
elif isinstance(o, datetime.time):
if is_aware(o):
raise ValueError("JSON can't represent timezone-aware times.")
r = o.isoformat()
if o.microsecond:
r = r[:12]
return r
elif isinstance(o, decimal.Decimal):
return str(o)
else:
return super(DjangoJSONEncoder, self).default(o)
# Older, deprecated class name (for backwards compatibility purposes).
DateTimeAwareJSONEncoder = DjangoJSONEncoder
| agpl-3.0 | -3,624,757,502,479,542,000 | 31.345794 | 82 | 0.60965 | false |
mpetyx/pychatbot | AIML/howie-src-0.6.0/howie/frontends/xmpp/filetransfer.py | 1 | 8172 | ## filetransfer.py
##
## Copyright (C) 2004 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# $Id: filetransfer.py,v 1.3 2004/10/12 17:32:01 cort Exp $
from protocol import *
from dispatcher import PlugIn
import base64
class IBB(PlugIn):
def __init__(self):
PlugIn.__init__(self)
self.DBG_LINE='ibb'
self._exported_methods=[self.OpenStream]
self._streams={}
self._ampnode=Node(NS_AMP+' amp',payload=[Node('rule',{'condition':'deliver-at','value':'stored','action':'error'}),Node('rule',{'condition':'match-resource','value':'exact','action':'error'})])
def plugin(self,owner):
self._owner.RegisterHandlerOnce('iq',self.StreamOpenReplyHandler) # Move to StreamOpen and specify stanza id
self._owner.RegisterHandler('iq',self.IqHandler,ns=NS_IBB)
self._owner.RegisterHandler('message',self.ReceiveHandler,ns=NS_IBB)
def IqHandler(self,conn,stanza):
typ=stanza.getType()
self.DEBUG('IqHandler called typ->%s'%typ,'info')
if typ=='set' and stanza.getTag('open',namespace=NS_IBB): self.StreamOpenHandler(conn,stanza)
elif typ=='set' and stanza.getTag('close',namespace=NS_IBB): self.StreamCloseHandler(conn,stanza)
elif typ=='result': self.StreamCommitHandler(conn,stanza)
elif typ=='error': self.StreamOpenReplyHandler(conn,stanza)
else: conn.send(Error(stanza,ERR_BAD_REQUEST))
raise NodeProcessed
def StreamOpenHandler(self,conn,stanza):
"""
<iq type='set'
from='[email protected]/orchard'
to='[email protected]/balcony'
id='inband_1'>
<open sid='mySID'
block-size='4096'
xmlns='http://jabber.org/protocol/ibb'/>
</iq>
"""
err=None
sid,blocksize=stanza.getTagAttr('open','sid'),stanza.getTagAttr('open','block-size')
self.DEBUG('StreamOpenHandler called sid->%s blocksize->%s'%(sid,blocksize),'info')
try: blocksize=int(blocksize)
except: err=ERR_BAD_REQUEST
if not sid or not blocksize: err=ERR_BAD_REQUEST
elif sid in self._streams.keys(): err=ERR_UNEXPECTED_REQUEST
if err: rep=Error(stanza,err)
else:
self.DEBUG("Opening stream: id %s, block-size %s"%(sid,blocksize),'info')
rep=Protocol('iq',stanza.getFrom(),'result',stanza.getTo(),{'id':stanza.getID()})
self._streams[sid]={'direction':'<'+str(stanza.getFrom()),'block-size':blocksize,'fp':open('/tmp/xmpp_file_'+sid,'w'),'seq':0,'syn_id':stanza.getID()}
conn.send(rep)
def OpenStream(self,sid,to,fp,blocksize=4096):
if sid in self._streams.keys(): return
if not JID(to).getResource(): return
self._streams[sid]={'direction':'|>'+to,'block-size':blocksize,'fp':fp,'seq':0}
self._owner.RegisterCycleHandler(self.SendHandler)
syn=Protocol('iq',to,'set',payload=[Node(NS_IBB+' open',{'sid':sid,'block-size':blocksize})])
self._owner.send(syn)
self._streams[sid]['syn_id']=syn.getID()
return self._streams[sid]
def SendHandler(self,conn):
self.DEBUG('SendHandler called','info')
for sid in self._streams.keys():
stream=self._streams[sid]
if stream['direction'][:2]=='|>': cont=1
elif stream['direction'][0]=='>':
chunk=stream['fp'].read(stream['block-size'])
if chunk:
datanode=Node(NS_IBB+' data',{'sid':sid,'seq':stream['seq']},base64.encodestring(chunk))
stream['seq']+=1
if stream['seq']==65536: stream['seq']=0
conn.send(Protocol('message',stream['direction'][1:],payload=[datanode,self._ampnode]))
else:
""" notify the other side about stream closing
notify the local user about sucessfull send
delete the local stream"""
conn.send(Protocol('iq',stream['direction'][1:],'set',payload=[Node(NS_IBB+' close',{'sid':sid})]))
conn.Event(self.DBG_LINE,'SUCCESSFULL SEND',stream)
del self._streams[sid]
self._owner.UnregisterCycleHandler(self.SendHandler)
"""
<message from='[email protected]/orchard' to='[email protected]/balcony' id='msg1'>
<data xmlns='http://jabber.org/protocol/ibb' sid='mySID' seq='0'>
qANQR1DBwU4DX7jmYZnncmUQB/9KuKBddzQH+tZ1ZywKK0yHKnq57kWq+RFtQdCJ
WpdWpR0uQsuJe7+vh3NWn59/gTc5MDlX8dS9p0ovStmNcyLhxVgmqS8ZKhsblVeu
IpQ0JgavABqibJolc3BKrVtVV1igKiX/N7Pi8RtY1K18toaMDhdEfhBRzO/XB0+P
AQhYlRjNacGcslkhXqNjK5Va4tuOAPy2n1Q8UUrHbUd0g+xJ9Bm0G0LZXyvCWyKH
kuNEHFQiLuCY6Iv0myq6iX6tjuHehZlFSh80b5BVV9tNLwNR5Eqz1klxMhoghJOA
</data>
<amp xmlns='http://jabber.org/protocol/amp'>
<rule condition='deliver-at' value='stored' action='error'/>
<rule condition='match-resource' value='exact' action='error'/>
</amp>
</message>
"""
def ReceiveHandler(self,conn,stanza):
sid,seq,data=stanza.getTagAttr('data','sid'),stanza.getTagAttr('data','seq'),stanza.getTagData('data')
self.DEBUG('ReceiveHandler called sid->%s seq->%s'%(sid,seq),'info')
try: seq=int(seq); data=base64.decodestring(data)
except: seq=''; data=''
err=None
if not sid in self._streams.keys(): err=ERR_ITEM_NOT_FOUND
else:
stream=self._streams[sid]
if not data: err=ERR_BAD_REQUEST
elif seq<>stream['seq']: err=ERR_UNEXPECTED_REQUEST
else:
self.DEBUG('Successfull receive sid->%s %s+%s bytes'%(sid,stream['fp'].tell(),len(data)),'ok')
stream['seq']+=1
stream['fp'].write(data)
if err:
self.DEBUG('Error on receive: %s'%err,'error')
conn.send(Error(Iq(to=stanza.getFrom(),frm=stanza.getTo(),payload=[Node(NS_IBB+' close')]),err,reply=0))
def StreamCloseHandler(self,conn,stanza):
sid=stanza.getTagAttr('close','sid')
self.DEBUG('StreamCloseHandler called sid->%s'%sid,'info')
if sid in self._streams.keys():
conn.send(stanza.buildReply('result'))
conn.Event(self.DBG_LINE,'SUCCESSFULL RECEIVE',self._streams[sid])
del self._streams[sid]
else: conn.send(Error(stanza,ERR_ITEM_NOT_FOUND))
def StreamBrokenHandler(self,conn,stanza):
syn_id=stanza.getID()
self.DEBUG('StreamBrokenHandler called syn_id->%s'%syn_id,'info')
for sid in self._streams.keys():
stream=self._streams[sid]
if stream['syn_id']==syn_id:
if stream['direction'][0]=='<': conn.Event(self.DBG_LINE,'ERROR ON RECEIVE',stream)
else: conn.Event(self.DBG_LINE,'ERROR ON SEND',stream)
del self._streams[sid]
def StreamOpenReplyHandler(self,conn,stanza):
syn_id=stanza.getID()
self.DEBUG('StreamOpenReplyHandler called syn_id->%s'%syn_id,'info')
for sid in self._streams.keys():
stream=self._streams[sid]
if stream['syn_id']==syn_id:
if stanza.getType()=='error':
if stream['direction'][0]=='<': conn.Event(self.DBG_LINE,'ERROR ON RECEIVE',stream)
else: conn.Event(self.DBG_LINE,'ERROR ON SEND',stream)
del self._streams[sid]
elif stanza.getType()=='result':
if stream['direction'][0]=='|':
stream['direction']=stream['direction'][1:]
conn.Event(self.DBG_LINE,'STREAM COMMITTED',stream)
else: conn.send(Error(stanza,ERR_UNEXPECTED_REQUEST))
| apache-2.0 | 303,146,057,119,499,200 | 47.642857 | 202 | 0.613681 | false |
ratschlab/ASP | examples/undocumented/python_static/kernel_polymatchword.py | 22 | 1186 | from tools.load import LoadMatrix
from sg import sg
lm=LoadMatrix()
traindna=lm.load_dna('../data/fm_train_dna.dat')
testdna=lm.load_dna('../data/fm_test_dna.dat')
trainlabel=lm.load_labels('../data/label_train_dna.dat')
parameter_list=[[traindna,testdna,trainlabel,10,2,True,True,3,0,'n'],
[traindna,testdna,trainlabel,11,3,True,True,4,0,'n']]
def kernel_polymatchword (fm_train_dna=traindna,fm_test_dna=testdna,
label_train_dna=trainlabel,size_cache=10,
degree=2,inhomogene=True,normalize=True,
order=3,gap=0,reverse='n'):
sg('add_preproc', 'SORTWORDSTRING')
sg('set_features', 'TRAIN', fm_train_dna, 'DNA')
sg('convert', 'TRAIN', 'STRING', 'CHAR', 'STRING', 'WORD', order, order-1, gap, reverse)
sg('attach_preproc', 'TRAIN')
sg('set_features', 'TEST', fm_test_dna, 'DNA')
sg('convert', 'TEST', 'STRING', 'CHAR', 'STRING', 'WORD', order, order-1, gap, reverse)
sg('attach_preproc', 'TEST')
sg('set_kernel', 'POLYMATCH', 'WORD', size_cache, degree, inhomogene, normalize)
km=sg('get_kernel_matrix', 'TRAIN')
km=sg('get_kernel_matrix', 'TEST')
return km
if __name__=='__main__':
print('PolyMatchWord')
kernel_polymatchword(*parameter_list[0])
| gpl-2.0 | 3,845,931,131,152,108,500 | 36.0625 | 89 | 0.677909 | false |
pmarques/ansible | test/units/module_utils/urls/test_fetch_url.py | 40 | 8423 | # -*- coding: utf-8 -*-
# (c) 2018 Matt Martz <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import socket
from ansible.module_utils.six import StringIO
from ansible.module_utils.six.moves.http_cookiejar import Cookie
from ansible.module_utils.six.moves.http_client import HTTPMessage
from ansible.module_utils.urls import fetch_url, urllib_error, ConnectionError, NoSSLError, httplib
import pytest
from mock import MagicMock
class AnsibleModuleExit(Exception):
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
class ExitJson(AnsibleModuleExit):
pass
class FailJson(AnsibleModuleExit):
pass
@pytest.fixture
def open_url_mock(mocker):
return mocker.patch('ansible.module_utils.urls.open_url')
@pytest.fixture
def fake_ansible_module():
return FakeAnsibleModule()
class FakeAnsibleModule:
def __init__(self):
self.params = {}
self.tmpdir = None
def exit_json(self, *args, **kwargs):
raise ExitJson(*args, **kwargs)
def fail_json(self, *args, **kwargs):
raise FailJson(*args, **kwargs)
def test_fetch_url_no_urlparse(mocker, fake_ansible_module):
mocker.patch('ansible.module_utils.urls.HAS_URLPARSE', new=False)
with pytest.raises(FailJson):
fetch_url(fake_ansible_module, 'http://ansible.com/')
def test_fetch_url(open_url_mock, fake_ansible_module):
r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
dummy, kwargs = open_url_mock.call_args
open_url_mock.assert_called_once_with('http://ansible.com/', client_cert=None, client_key=None, cookies=kwargs['cookies'], data=None,
follow_redirects='urllib2', force=False, force_basic_auth='', headers=None,
http_agent='ansible-httpget', last_mod_time=None, method=None, timeout=10, url_password='', url_username='',
use_proxy=True, validate_certs=True, use_gssapi=False, unix_socket=None, ca_path=None)
def test_fetch_url_params(open_url_mock, fake_ansible_module):
fake_ansible_module.params = {
'validate_certs': False,
'url_username': 'user',
'url_password': 'passwd',
'http_agent': 'ansible-test',
'force_basic_auth': True,
'follow_redirects': 'all',
'client_cert': 'client.pem',
'client_key': 'client.key',
}
r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
dummy, kwargs = open_url_mock.call_args
open_url_mock.assert_called_once_with('http://ansible.com/', client_cert='client.pem', client_key='client.key', cookies=kwargs['cookies'], data=None,
follow_redirects='all', force=False, force_basic_auth=True, headers=None,
http_agent='ansible-test', last_mod_time=None, method=None, timeout=10, url_password='passwd', url_username='user',
use_proxy=True, validate_certs=False, use_gssapi=False, unix_socket=None, ca_path=None)
def test_fetch_url_cookies(mocker, fake_ansible_module):
def make_cookies(*args, **kwargs):
cookies = kwargs['cookies']
r = MagicMock()
try:
r.headers = HTTPMessage()
add_header = r.headers.add_header
except TypeError:
# PY2
r.headers = HTTPMessage(StringIO())
add_header = r.headers.addheader
r.info.return_value = r.headers
for name, value in (('Foo', 'bar'), ('Baz', 'qux')):
cookie = Cookie(
version=0,
name=name,
value=value,
port=None,
port_specified=False,
domain="ansible.com",
domain_specified=True,
domain_initial_dot=False,
path="/",
path_specified=True,
secure=False,
expires=None,
discard=False,
comment=None,
comment_url=None,
rest=None
)
cookies.set_cookie(cookie)
add_header('Set-Cookie', '%s=%s' % (name, value))
return r
mocker = mocker.patch('ansible.module_utils.urls.open_url', new=make_cookies)
r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
assert info['cookies'] == {'Baz': 'qux', 'Foo': 'bar'}
# Python sorts cookies in order of most specific (ie. longest) path first
# items with the same path are reversed from response order
assert info['cookies_string'] == 'Baz=qux; Foo=bar'
# The key here has a `-` as opposed to what we see in the `uri` module that converts to `_`
# Note: this is response order, which differs from cookies_string
assert info['set-cookie'] == 'Foo=bar, Baz=qux'
def test_fetch_url_nossl(open_url_mock, fake_ansible_module, mocker):
mocker.patch('ansible.module_utils.urls.get_distribution', return_value='notredhat')
open_url_mock.side_effect = NoSSLError
with pytest.raises(FailJson) as excinfo:
fetch_url(fake_ansible_module, 'http://ansible.com/')
assert 'python-ssl' not in excinfo.value.kwargs['msg']
mocker.patch('ansible.module_utils.urls.get_distribution', return_value='redhat')
open_url_mock.side_effect = NoSSLError
with pytest.raises(FailJson) as excinfo:
fetch_url(fake_ansible_module, 'http://ansible.com/')
assert 'python-ssl' in excinfo.value.kwargs['msg']
assert 'http://ansible.com/' == excinfo.value.kwargs['url']
assert excinfo.value.kwargs['status'] == -1
def test_fetch_url_connectionerror(open_url_mock, fake_ansible_module):
open_url_mock.side_effect = ConnectionError('TESTS')
with pytest.raises(FailJson) as excinfo:
fetch_url(fake_ansible_module, 'http://ansible.com/')
assert excinfo.value.kwargs['msg'] == 'TESTS'
assert 'http://ansible.com/' == excinfo.value.kwargs['url']
assert excinfo.value.kwargs['status'] == -1
open_url_mock.side_effect = ValueError('TESTS')
with pytest.raises(FailJson) as excinfo:
fetch_url(fake_ansible_module, 'http://ansible.com/')
assert excinfo.value.kwargs['msg'] == 'TESTS'
assert 'http://ansible.com/' == excinfo.value.kwargs['url']
assert excinfo.value.kwargs['status'] == -1
def test_fetch_url_httperror(open_url_mock, fake_ansible_module):
open_url_mock.side_effect = urllib_error.HTTPError(
'http://ansible.com/',
500,
'Internal Server Error',
{'Content-Type': 'application/json'},
StringIO('TESTS')
)
r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
assert info == {'msg': 'HTTP Error 500: Internal Server Error', 'body': 'TESTS',
'status': 500, 'url': 'http://ansible.com/', 'content-type': 'application/json'}
def test_fetch_url_urlerror(open_url_mock, fake_ansible_module):
open_url_mock.side_effect = urllib_error.URLError('TESTS')
r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
assert info == {'msg': 'Request failed: <urlopen error TESTS>', 'status': -1, 'url': 'http://ansible.com/'}
def test_fetch_url_socketerror(open_url_mock, fake_ansible_module):
open_url_mock.side_effect = socket.error('TESTS')
r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
assert info == {'msg': 'Connection failure: TESTS', 'status': -1, 'url': 'http://ansible.com/'}
def test_fetch_url_exception(open_url_mock, fake_ansible_module):
open_url_mock.side_effect = Exception('TESTS')
r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
exception = info.pop('exception')
assert info == {'msg': 'An unknown error occurred: TESTS', 'status': -1, 'url': 'http://ansible.com/'}
assert "Exception: TESTS" in exception
def test_fetch_url_badstatusline(open_url_mock, fake_ansible_module):
open_url_mock.side_effect = httplib.BadStatusLine('TESTS')
r, info = fetch_url(fake_ansible_module, 'http://ansible.com/')
assert info == {'msg': 'Connection failure: connection was closed before a valid response was received: TESTS', 'status': -1, 'url': 'http://ansible.com/'}
| gpl-3.0 | 8,721,435,920,171,849,000 | 37.286364 | 159 | 0.627449 | false |
robhudson/django | tests/gis_tests/relatedapp/tests.py | 39 | 15582 | from __future__ import unicode_literals
from django.contrib.gis.db.models import F, Collect, Count, Extent, Union
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.geos import GEOSGeometry, MultiPoint, Point
from django.db import connection
from django.test import TestCase, skipUnlessDBFeature
from django.test.utils import override_settings
from django.utils import timezone
from ..utils import no_oracle
from .models import (
Article, Author, Book, City, DirectoryEntry, Event, Location, Parcel,
)
@skipUnlessDBFeature("gis_enabled")
class RelatedGeoModelTest(TestCase):
fixtures = ['initial']
def test02_select_related(self):
"Testing `select_related` on geographic models (see #7126)."
qs1 = City.objects.order_by('id')
qs2 = City.objects.order_by('id').select_related()
qs3 = City.objects.order_by('id').select_related('location')
# Reference data for what's in the fixtures.
cities = (
('Aurora', 'TX', -97.516111, 33.058333),
('Roswell', 'NM', -104.528056, 33.387222),
('Kecksburg', 'PA', -79.460734, 40.18476),
)
for qs in (qs1, qs2, qs3):
for ref, c in zip(cities, qs):
nm, st, lon, lat = ref
self.assertEqual(nm, c.name)
self.assertEqual(st, c.state)
self.assertEqual(Point(lon, lat), c.location.point)
@skipUnlessDBFeature("has_transform_method")
def test03_transform_related(self):
"Testing the `transform` GeoQuerySet method on related geographic models."
# All the transformations are to state plane coordinate systems using
# US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot).
tol = 0
def check_pnt(ref, pnt):
self.assertAlmostEqual(ref.x, pnt.x, tol)
self.assertAlmostEqual(ref.y, pnt.y, tol)
self.assertEqual(ref.srid, pnt.srid)
# Each city transformed to the SRID of their state plane coordinate system.
transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'),
('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'),
('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'),
)
for name, srid, wkt in transformed:
# Doing this implicitly sets `select_related` select the location.
# TODO: Fix why this breaks on Oracle.
qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point'))
check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point)
@skipUnlessDBFeature("supports_extent_aggr")
def test_related_extent_aggregate(self):
"Testing the `Extent` aggregate on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Extent('location__point'))
# One for all locations, one that excludes New Mexico (Roswell).
all_extent = (-104.528056, 29.763374, -79.460734, 40.18476)
txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476)
e1 = City.objects.aggregate(Extent('location__point'))['location__point__extent']
e2 = City.objects.exclude(state='NM').aggregate(Extent('location__point'))['location__point__extent']
e3 = aggs['location__point__extent']
# The tolerance value is to four decimal places because of differences
# between the Oracle and PostGIS spatial backends on the extent calculation.
tol = 4
for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]:
for ref_val, e_val in zip(ref, e):
self.assertAlmostEqual(ref_val, e_val, tol)
@skipUnlessDBFeature("supports_extent_aggr")
def test_related_extent_annotate(self):
"""
Test annotation with Extent GeoAggregate.
"""
cities = City.objects.annotate(points_extent=Extent('location__point')).order_by('name')
tol = 4
self.assertAlmostEqual(
cities[0].points_extent,
(-97.516111, 33.058333, -97.516111, 33.058333),
tol
)
@skipUnlessDBFeature("has_unionagg_method")
def test_related_union_aggregate(self):
"Testing the `Union` aggregate on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Union('location__point'))
# These are the points that are components of the aggregate geographic
# union that is returned. Each point # corresponds to City PK.
p1 = Point(-104.528056, 33.387222)
p2 = Point(-97.516111, 33.058333)
p3 = Point(-79.460734, 40.18476)
p4 = Point(-96.801611, 32.782057)
p5 = Point(-95.363151, 29.763374)
# The second union aggregate is for a union
# query that includes limiting information in the WHERE clause (in other
# words a `.filter()` precedes the call to `.aggregate(Union()`).
ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326)
ref_u2 = MultiPoint(p2, p3, srid=4326)
u1 = City.objects.aggregate(Union('location__point'))['location__point__union']
u2 = City.objects.exclude(
name__in=('Roswell', 'Houston', 'Dallas', 'Fort Worth'),
).aggregate(Union('location__point'))['location__point__union']
u3 = aggs['location__point__union']
self.assertEqual(type(u1), MultiPoint)
self.assertEqual(type(u3), MultiPoint)
# Ordering of points in the result of the union is not defined and
# implementation-dependent (DB backend, GEOS version)
self.assertSetEqual({p.ewkt for p in ref_u1}, {p.ewkt for p in u1})
self.assertSetEqual({p.ewkt for p in ref_u2}, {p.ewkt for p in u2})
self.assertSetEqual({p.ewkt for p in ref_u1}, {p.ewkt for p in u3})
def test05_select_related_fk_to_subclass(self):
"Testing that calling select_related on a query over a model with an FK to a model subclass works"
# Regression test for #9752.
list(DirectoryEntry.objects.all().select_related())
def test06_f_expressions(self):
"Testing F() expressions on GeometryFields."
# Constructing a dummy parcel border and getting the City instance for
# assigning the FK.
b1 = GEOSGeometry(
'POLYGON((-97.501205 33.052520,-97.501205 33.052576,'
'-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))',
srid=4326
)
pcity = City.objects.get(name='Aurora')
# First parcel has incorrect center point that is equal to the City;
# it also has a second border that is different from the first as a
# 100ft buffer around the City.
c1 = pcity.location.point
c2 = c1.transform(2276, clone=True)
b2 = c2.buffer(100)
Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2)
# Now creating a second Parcel where the borders are the same, just
# in different coordinate systems. The center points are also the
# same (but in different coordinate systems), and this time they
# actually correspond to the centroid of the border.
c1 = b1.centroid
c2 = c1.transform(2276, clone=True)
Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1)
# Should return the second Parcel, which has the center within the
# border.
qs = Parcel.objects.filter(center1__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
if connection.features.supports_transform:
# This time center2 is in a different coordinate system and needs
# to be wrapped in transformation SQL.
qs = Parcel.objects.filter(center2__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
# Should return the first Parcel, which has the center point equal
# to the point in the City ForeignKey.
qs = Parcel.objects.filter(center1=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
if connection.features.supports_transform:
# This time the city column should be wrapped in transformation SQL.
qs = Parcel.objects.filter(border2__contains=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
def test07_values(self):
"Testing values() and values_list() and GeoQuerySets."
gqs = Location.objects.all()
gvqs = Location.objects.values()
gvlqs = Location.objects.values_list()
# Incrementing through each of the models, dictionaries, and tuples
# returned by the different types of GeoQuerySets.
for m, d, t in zip(gqs, gvqs, gvlqs):
# The values should be Geometry objects and not raw strings returned
# by the spatial database.
self.assertIsInstance(d['point'], Geometry)
self.assertIsInstance(t[1], Geometry)
self.assertEqual(m.point, d['point'])
self.assertEqual(m.point, t[1])
@override_settings(USE_TZ=True)
def test_07b_values(self):
"Testing values() and values_list() with aware datetime. See #21565."
Event.objects.create(name="foo", when=timezone.now())
list(Event.objects.values_list('when'))
def test08_defer_only(self):
"Testing defer() and only() on Geographic models."
qs = Location.objects.all()
def_qs = Location.objects.defer('point')
for loc, def_loc in zip(qs, def_qs):
self.assertEqual(loc.point, def_loc.point)
def test09_pk_relations(self):
"Ensuring correct primary key column is selected across relations. See #10757."
# The expected ID values -- notice the last two location IDs
# are out of order. Dallas and Houston have location IDs that differ
# from their PKs -- this is done to ensure that the related location
# ID column is selected instead of ID column for the city.
city_ids = (1, 2, 3, 4, 5)
loc_ids = (1, 2, 3, 5, 4)
ids_qs = City.objects.order_by('id').values('id', 'location__id')
for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids):
self.assertEqual(val_dict['id'], c_id)
self.assertEqual(val_dict['location__id'], l_id)
# TODO: fix on Oracle -- qs2 returns an empty result for an unknown reason
@no_oracle
def test10_combine(self):
"Testing the combination of two GeoQuerySets. See #10807."
buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1)
buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1)
qs1 = City.objects.filter(location__point__within=buf1)
qs2 = City.objects.filter(location__point__within=buf2)
combined = qs1 | qs2
names = [c.name for c in combined]
self.assertEqual(2, len(names))
self.assertIn('Aurora', names)
self.assertIn('Kecksburg', names)
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test12a_count(self):
"Testing `Count` aggregate use with the `GeoManager` on geo-fields."
# The City, 'Fort Worth' uses the same location as Dallas.
dallas = City.objects.get(name='Dallas')
# Count annotation should be 2 for the Dallas location now.
loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id)
self.assertEqual(2, loc.num_cities)
def test12b_count(self):
"Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087."
# Should only be one author (Trevor Paglen) returned by this query, and
# the annotation should have 3 for the number of books, see #11087.
# Also testing with a values(), see #11489.
qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1)
vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1)
self.assertEqual(1, len(qs))
self.assertEqual(3, qs[0].num_books)
self.assertEqual(1, len(vqs))
self.assertEqual(3, vqs[0]['num_books'])
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test13c_count(self):
"Testing `Count` aggregate with `.values()`. See #15305."
qs = Location.objects.filter(id=5).annotate(num_cities=Count('city')).values('id', 'point', 'num_cities')
self.assertEqual(1, len(qs))
self.assertEqual(2, qs[0]['num_cities'])
self.assertIsInstance(qs[0]['point'], GEOSGeometry)
# TODO: The phantom model does appear on Oracle.
@no_oracle
def test13_select_related_null_fk(self):
"Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381."
Book.objects.create(title='Without Author')
b = Book.objects.select_related('author').get(title='Without Author')
# Should be `None`, and not a 'dummy' model.
self.assertIsNone(b.author)
@skipUnlessDBFeature("supports_collect_aggr")
def test_collect(self):
"""
Testing the `Collect` aggregate.
"""
# Reference query:
# SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN
# "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id")
# WHERE "relatedapp_city"."state" = 'TX';
ref_geom = GEOSGeometry(
'MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,'
'-95.363151 29.763374,-96.801611 32.782057)'
)
coll = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect']
# Even though Dallas and Ft. Worth share same point, Collect doesn't
# consolidate -- that's why 4 points in MultiPoint.
self.assertEqual(4, len(coll))
self.assertTrue(ref_geom.equals(coll))
def test15_invalid_select_related(self):
"Testing doing select_related on the related name manager of a unique FK. See #13934."
qs = Article.objects.select_related('author__article')
# This triggers TypeError when `get_default_columns` has no `local_only`
# keyword. The TypeError is swallowed if QuerySet is actually
# evaluated as list generation swallows TypeError in CPython.
str(qs.query)
def test16_annotated_date_queryset(self):
"Ensure annotated date querysets work if spatial backend is used. See #14648."
birth_years = [dt.year for dt in
list(Author.objects.annotate(num_books=Count('books')).dates('dob', 'year'))]
birth_years.sort()
self.assertEqual([1950, 1974], birth_years)
# TODO: Related tests for KML, GML, and distance lookups.
| bsd-3-clause | -2,444,144,809,718,204,400 | 46.944615 | 113 | 0.637017 | false |
xlqian/navitia | source/jormungandr/jormungandr/compat.py | 3 | 2439 | # Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
"""
This module is here for handling compatibility with other version of dependencies than the one we used at first
Ideally the code must be directly adapted, this module should use for ease transitioning.
"""
from __future__ import absolute_import, print_function, unicode_literals, division
from functools import wraps
from flask_restful import reqparse
from werkzeug.exceptions import BadRequest
import logging
def replace_parse_arg(func):
@wraps(func)
def _exec(*args, **kw):
try:
return func(*args, **kw)
except BadRequest as e:
if hasattr(e, "data") and isinstance(e.data, dict) and 'message' in e.data:
e.data['message'] = list(e.data['message'].values())[0]
raise e
return _exec
def patch_reqparse():
"""
New version of flask-restful return a dict in place of a str when args validation fail.
This is a lot nicer but will break the interface, so in a first time we want to go back to the previous errors
"""
logging.getLogger(__name__).info('monkey patching parse_args of RequestParser')
reqparse.RequestParser.parse_args = replace_parse_arg(reqparse.RequestParser.parse_args)
| agpl-3.0 | 8,438,755,299,339,916,000 | 38.33871 | 114 | 0.724067 | false |
cmsj/pibus | EPD.py | 1 | 4018 | # Copyright 2013-2015 Pervasive Displays, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language
# governing permissions and limitations under the License.
from PIL import Image
from PIL import ImageOps
import re
import os
class EPDError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class EPD(object):
"""EPD E-Ink interface
to use:
from EPD import EPD
epd = EPD([path='/path/to/epd'], [auto=boolean])
image = Image.new('1', epd.size, 0)
# draw on image
epd.clear() # clear the panel
epd.display(image) # tranfer image data
epd.update() # refresh the panel image - not deeed if auto=true
"""
PANEL_RE = re.compile('^([A-Za-z]+)\s+(\d+\.\d+)\s+(\d+)x(\d+)\s+COG\s+(\d+)\s+FILM\s+(\d+)\s*$', flags=0)
def __init__(self, *args, **kwargs):
self._epd_path = '/dev/epd'
self._width = 200
self._height = 96
self._panel = 'EPD 2.0'
self._cog = 0
self._film = 0
self._auto = False
if len(args) > 0:
self._epd_path = args[0]
elif 'epd' in kwargs:
self._epd_path = kwargs['epd']
if ('auto' in kwargs) and kwargs['auto']:
self._auto = True
with open(os.path.join(self._epd_path, 'version')) as f:
self._version = f.readline().rstrip('\n')
with open(os.path.join(self._epd_path, 'panel')) as f:
line = f.readline().rstrip('\n')
m = self.PANEL_RE.match(line)
if None == m:
raise EPDError('invalid panel string')
self._panel = m.group(1) + ' ' + m.group(2)
self._width = int(m.group(3))
self._height = int(m.group(4))
self._cog = int(m.group(5))
self._film = int(m.group(6))
if self._width < 1 or self._height < 1:
raise EPDError('invalid panel geometry')
@property
def size(self):
return (self._width, self._height)
@property
def width(self):
return self._width
@property
def height(self):
return self._height
@property
def panel(self):
return self._panel
@property
def version(self):
return self._version
@property
def cog(self):
return self._cog
@property
def film(self):
return self._film
@property
def auto(self):
return self._auto
@auto.setter
def auto(self, flag):
if flag:
self._auto = True
else:
self._auto = False
def display(self, image):
# attempt grayscale conversion, ath then to single bit
# better to do this before callin this if the image is to
# be dispayed several times
if image.mode != "1":
image = ImageOps.grayscale(image).convert("1", dither=Image.FLOYDSTEINBERG)
if image.mode != "1":
raise EPDError('only single bit images are supported')
if image.size != self.size:
raise EPDError('image size mismatch')
with open(os.path.join(self._epd_path, 'LE', 'display_inverse'), 'r+b') as f:
f.write(image.tobytes())
if self.auto:
self.update()
def update(self):
self._command('U')
def partial_update(self):
self._command('P')
def clear(self):
self._command('C')
def _command(self, c):
with open(os.path.join(self._epd_path, 'command'), 'wb') as f:
f.write(bytes(c, 'UTF-8'))
| mit | -287,905,039,462,322,270 | 24.75641 | 110 | 0.56894 | false |
SaikWolf/gnuradio | grc/core/utils/expr_utils.py | 17 | 5486 | """
Copyright 2008-2011 Free Software Foundation, Inc.
This file is part of GNU Radio
GNU Radio Companion is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
GNU Radio Companion is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
"""
import string
VAR_CHARS = string.letters + string.digits + '_'
class graph(object):
"""
Simple graph structure held in a dictionary.
"""
def __init__(self): self._graph = dict()
def __str__(self): return str(self._graph)
def add_node(self, node_key):
if node_key in self._graph:
return
self._graph[node_key] = set()
def remove_node(self, node_key):
if node_key not in self._graph:
return
for edges in self._graph.values():
if node_key in edges:
edges.remove(node_key)
self._graph.pop(node_key)
def add_edge(self, src_node_key, dest_node_key):
self._graph[src_node_key].add(dest_node_key)
def remove_edge(self, src_node_key, dest_node_key):
self._graph[src_node_key].remove(dest_node_key)
def get_nodes(self):
return self._graph.keys()
def get_edges(self, node_key):
return self._graph[node_key]
def expr_split(expr, var_chars=VAR_CHARS):
"""
Split up an expression by non alphanumeric characters, including underscore.
Leave strings in-tact.
#TODO ignore escaped quotes, use raw strings.
Args:
expr: an expression string
Returns:
a list of string tokens that form expr
"""
toks = list()
tok = ''
quote = ''
for char in expr:
if quote or char in var_chars:
if char == quote:
quote = ''
tok += char
elif char in ("'", '"'):
toks.append(tok)
tok = char
quote = char
else:
toks.append(tok)
toks.append(char)
tok = ''
toks.append(tok)
return filter(lambda t: t, toks)
def expr_replace(expr, replace_dict):
"""
Search for vars in the expression and add the prepend.
Args:
expr: an expression string
replace_dict: a dict of find:replace
Returns:
a new expression with the prepend
"""
expr_splits = expr_split(expr, var_chars=VAR_CHARS + '.')
for i, es in enumerate(expr_splits):
if es in replace_dict.keys():
expr_splits[i] = replace_dict[es]
return ''.join(expr_splits)
def get_variable_dependencies(expr, vars):
"""
Return a set of variables used in this expression.
Args:
expr: an expression string
vars: a list of variable names
Returns:
a subset of vars used in the expression
"""
expr_toks = expr_split(expr)
return set(var for var in vars if var in expr_toks)
def get_graph(exprs):
"""
Get a graph representing the variable dependencies
Args:
exprs: a mapping of variable name to expression
Returns:
a graph of variable deps
"""
vars = exprs.keys()
# Get dependencies for each expression, load into graph
var_graph = graph()
for var in vars:
var_graph.add_node(var)
for var, expr in exprs.iteritems():
for dep in get_variable_dependencies(expr, vars):
if dep != var:
var_graph.add_edge(dep, var)
return var_graph
def sort_variables(exprs):
"""
Get a list of variables in order of dependencies.
Args:
exprs: a mapping of variable name to expression
Returns:
a list of variable names
@throws Exception circular dependencies
"""
var_graph = get_graph(exprs)
sorted_vars = list()
# Determine dependency order
while var_graph.get_nodes():
# Get a list of nodes with no edges
indep_vars = filter(lambda var: not var_graph.get_edges(var), var_graph.get_nodes())
if not indep_vars:
raise Exception('circular dependency caught in sort_variables')
# Add the indep vars to the end of the list
sorted_vars.extend(sorted(indep_vars))
# Remove each edge-less node from the graph
for var in indep_vars:
var_graph.remove_node(var)
return reversed(sorted_vars)
def sort_objects(objects, get_id, get_expr):
"""
Sort a list of objects according to their expressions.
Args:
objects: the list of objects to sort
get_id: the function to extract an id from the object
get_expr: the function to extract an expression from the object
Returns:
a list of sorted objects
"""
id2obj = dict([(get_id(obj), obj) for obj in objects])
# Map obj id to expression code
id2expr = dict([(get_id(obj), get_expr(obj)) for obj in objects])
# Sort according to dependency
sorted_ids = sort_variables(id2expr)
# Return list of sorted objects
return [id2obj[id] for id in sorted_ids]
| gpl-3.0 | 1,450,718,963,967,682,000 | 27.722513 | 92 | 0.628873 | false |
s3nk4s/flaskTutorials | FlaskApp/FlaskApp/venv/local/lib/python2.7/encodings/ptcp154.py | 647 | 8950 | """ Python Character Mapping Codec generated from 'PTCP154.txt' with gencodec.py.
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
(c) Copyright 2000 Guido van Rossum.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_map)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_map)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='ptcp154',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0496, # CYRILLIC CAPITAL LETTER ZHE WITH DESCENDER
0x0081: 0x0492, # CYRILLIC CAPITAL LETTER GHE WITH STROKE
0x0082: 0x04ee, # CYRILLIC CAPITAL LETTER U WITH MACRON
0x0083: 0x0493, # CYRILLIC SMALL LETTER GHE WITH STROKE
0x0084: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x0085: 0x2026, # HORIZONTAL ELLIPSIS
0x0086: 0x04b6, # CYRILLIC CAPITAL LETTER CHE WITH DESCENDER
0x0087: 0x04ae, # CYRILLIC CAPITAL LETTER STRAIGHT U
0x0088: 0x04b2, # CYRILLIC CAPITAL LETTER HA WITH DESCENDER
0x0089: 0x04af, # CYRILLIC SMALL LETTER STRAIGHT U
0x008a: 0x04a0, # CYRILLIC CAPITAL LETTER BASHKIR KA
0x008b: 0x04e2, # CYRILLIC CAPITAL LETTER I WITH MACRON
0x008c: 0x04a2, # CYRILLIC CAPITAL LETTER EN WITH DESCENDER
0x008d: 0x049a, # CYRILLIC CAPITAL LETTER KA WITH DESCENDER
0x008e: 0x04ba, # CYRILLIC CAPITAL LETTER SHHA
0x008f: 0x04b8, # CYRILLIC CAPITAL LETTER CHE WITH VERTICAL STROKE
0x0090: 0x0497, # CYRILLIC SMALL LETTER ZHE WITH DESCENDER
0x0091: 0x2018, # LEFT SINGLE QUOTATION MARK
0x0092: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x0093: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x0094: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x0095: 0x2022, # BULLET
0x0096: 0x2013, # EN DASH
0x0097: 0x2014, # EM DASH
0x0098: 0x04b3, # CYRILLIC SMALL LETTER HA WITH DESCENDER
0x0099: 0x04b7, # CYRILLIC SMALL LETTER CHE WITH DESCENDER
0x009a: 0x04a1, # CYRILLIC SMALL LETTER BASHKIR KA
0x009b: 0x04e3, # CYRILLIC SMALL LETTER I WITH MACRON
0x009c: 0x04a3, # CYRILLIC SMALL LETTER EN WITH DESCENDER
0x009d: 0x049b, # CYRILLIC SMALL LETTER KA WITH DESCENDER
0x009e: 0x04bb, # CYRILLIC SMALL LETTER SHHA
0x009f: 0x04b9, # CYRILLIC SMALL LETTER CHE WITH VERTICAL STROKE
0x00a1: 0x040e, # CYRILLIC CAPITAL LETTER SHORT U (Byelorussian)
0x00a2: 0x045e, # CYRILLIC SMALL LETTER SHORT U (Byelorussian)
0x00a3: 0x0408, # CYRILLIC CAPITAL LETTER JE
0x00a4: 0x04e8, # CYRILLIC CAPITAL LETTER BARRED O
0x00a5: 0x0498, # CYRILLIC CAPITAL LETTER ZE WITH DESCENDER
0x00a6: 0x04b0, # CYRILLIC CAPITAL LETTER STRAIGHT U WITH STROKE
0x00a8: 0x0401, # CYRILLIC CAPITAL LETTER IO
0x00aa: 0x04d8, # CYRILLIC CAPITAL LETTER SCHWA
0x00ad: 0x04ef, # CYRILLIC SMALL LETTER U WITH MACRON
0x00af: 0x049c, # CYRILLIC CAPITAL LETTER KA WITH VERTICAL STROKE
0x00b1: 0x04b1, # CYRILLIC SMALL LETTER STRAIGHT U WITH STROKE
0x00b2: 0x0406, # CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
0x00b3: 0x0456, # CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
0x00b4: 0x0499, # CYRILLIC SMALL LETTER ZE WITH DESCENDER
0x00b5: 0x04e9, # CYRILLIC SMALL LETTER BARRED O
0x00b8: 0x0451, # CYRILLIC SMALL LETTER IO
0x00b9: 0x2116, # NUMERO SIGN
0x00ba: 0x04d9, # CYRILLIC SMALL LETTER SCHWA
0x00bc: 0x0458, # CYRILLIC SMALL LETTER JE
0x00bd: 0x04aa, # CYRILLIC CAPITAL LETTER ES WITH DESCENDER
0x00be: 0x04ab, # CYRILLIC SMALL LETTER ES WITH DESCENDER
0x00bf: 0x049d, # CYRILLIC SMALL LETTER KA WITH VERTICAL STROKE
0x00c0: 0x0410, # CYRILLIC CAPITAL LETTER A
0x00c1: 0x0411, # CYRILLIC CAPITAL LETTER BE
0x00c2: 0x0412, # CYRILLIC CAPITAL LETTER VE
0x00c3: 0x0413, # CYRILLIC CAPITAL LETTER GHE
0x00c4: 0x0414, # CYRILLIC CAPITAL LETTER DE
0x00c5: 0x0415, # CYRILLIC CAPITAL LETTER IE
0x00c6: 0x0416, # CYRILLIC CAPITAL LETTER ZHE
0x00c7: 0x0417, # CYRILLIC CAPITAL LETTER ZE
0x00c8: 0x0418, # CYRILLIC CAPITAL LETTER I
0x00c9: 0x0419, # CYRILLIC CAPITAL LETTER SHORT I
0x00ca: 0x041a, # CYRILLIC CAPITAL LETTER KA
0x00cb: 0x041b, # CYRILLIC CAPITAL LETTER EL
0x00cc: 0x041c, # CYRILLIC CAPITAL LETTER EM
0x00cd: 0x041d, # CYRILLIC CAPITAL LETTER EN
0x00ce: 0x041e, # CYRILLIC CAPITAL LETTER O
0x00cf: 0x041f, # CYRILLIC CAPITAL LETTER PE
0x00d0: 0x0420, # CYRILLIC CAPITAL LETTER ER
0x00d1: 0x0421, # CYRILLIC CAPITAL LETTER ES
0x00d2: 0x0422, # CYRILLIC CAPITAL LETTER TE
0x00d3: 0x0423, # CYRILLIC CAPITAL LETTER U
0x00d4: 0x0424, # CYRILLIC CAPITAL LETTER EF
0x00d5: 0x0425, # CYRILLIC CAPITAL LETTER HA
0x00d6: 0x0426, # CYRILLIC CAPITAL LETTER TSE
0x00d7: 0x0427, # CYRILLIC CAPITAL LETTER CHE
0x00d8: 0x0428, # CYRILLIC CAPITAL LETTER SHA
0x00d9: 0x0429, # CYRILLIC CAPITAL LETTER SHCHA
0x00da: 0x042a, # CYRILLIC CAPITAL LETTER HARD SIGN
0x00db: 0x042b, # CYRILLIC CAPITAL LETTER YERU
0x00dc: 0x042c, # CYRILLIC CAPITAL LETTER SOFT SIGN
0x00dd: 0x042d, # CYRILLIC CAPITAL LETTER E
0x00de: 0x042e, # CYRILLIC CAPITAL LETTER YU
0x00df: 0x042f, # CYRILLIC CAPITAL LETTER YA
0x00e0: 0x0430, # CYRILLIC SMALL LETTER A
0x00e1: 0x0431, # CYRILLIC SMALL LETTER BE
0x00e2: 0x0432, # CYRILLIC SMALL LETTER VE
0x00e3: 0x0433, # CYRILLIC SMALL LETTER GHE
0x00e4: 0x0434, # CYRILLIC SMALL LETTER DE
0x00e5: 0x0435, # CYRILLIC SMALL LETTER IE
0x00e6: 0x0436, # CYRILLIC SMALL LETTER ZHE
0x00e7: 0x0437, # CYRILLIC SMALL LETTER ZE
0x00e8: 0x0438, # CYRILLIC SMALL LETTER I
0x00e9: 0x0439, # CYRILLIC SMALL LETTER SHORT I
0x00ea: 0x043a, # CYRILLIC SMALL LETTER KA
0x00eb: 0x043b, # CYRILLIC SMALL LETTER EL
0x00ec: 0x043c, # CYRILLIC SMALL LETTER EM
0x00ed: 0x043d, # CYRILLIC SMALL LETTER EN
0x00ee: 0x043e, # CYRILLIC SMALL LETTER O
0x00ef: 0x043f, # CYRILLIC SMALL LETTER PE
0x00f0: 0x0440, # CYRILLIC SMALL LETTER ER
0x00f1: 0x0441, # CYRILLIC SMALL LETTER ES
0x00f2: 0x0442, # CYRILLIC SMALL LETTER TE
0x00f3: 0x0443, # CYRILLIC SMALL LETTER U
0x00f4: 0x0444, # CYRILLIC SMALL LETTER EF
0x00f5: 0x0445, # CYRILLIC SMALL LETTER HA
0x00f6: 0x0446, # CYRILLIC SMALL LETTER TSE
0x00f7: 0x0447, # CYRILLIC SMALL LETTER CHE
0x00f8: 0x0448, # CYRILLIC SMALL LETTER SHA
0x00f9: 0x0449, # CYRILLIC SMALL LETTER SHCHA
0x00fa: 0x044a, # CYRILLIC SMALL LETTER HARD SIGN
0x00fb: 0x044b, # CYRILLIC SMALL LETTER YERU
0x00fc: 0x044c, # CYRILLIC SMALL LETTER SOFT SIGN
0x00fd: 0x044d, # CYRILLIC SMALL LETTER E
0x00fe: 0x044e, # CYRILLIC SMALL LETTER YU
0x00ff: 0x044f, # CYRILLIC SMALL LETTER YA
})
### Encoding Map
encoding_map = codecs.make_encoding_map(decoding_map)
| mit | -2,846,169,505,267,492,000 | 50.142857 | 81 | 0.616648 | false |
klml/kohrsupply | settings.example.py | 1 | 3449 | """
Django settings for kohrsupply project.
Generated by 'django-admin startproject' using Django 1.10.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '0a1lbb-4iff4@clml6^wk8j_x1t@+0qfp%bsa1o4@rz%p+o_9b'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = [
'127.0.0.1'
]
SITE_ID = 1
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = ''
# Application definition
INSTALLED_APPS = [
'kohrsupply.apps.kohrsupplyConfig',
'django.contrib.sites',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'kohrsupply.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
"kohrsupply.context_processors.userstate",
"kohrsupply.context_processors.usersLocations",
"kohrsupply.context_processors.settings",
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'kohrsupply.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'de-de'
TIME_ZONE = 'Europe/Berlin'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# kohrsupply
KOHR_MAP = True | mit | 5,635,828,932,064,017,000 | 23.820144 | 91 | 0.682227 | false |
aayush2911/Fibonaccio | web2py/gluon/packages/dal/pydal/adapters/imap.py | 8 | 43003 | # -*- coding: utf-8 -*-
import datetime
import re
import sys
from .._globals import IDENTITY, GLOBAL_LOCKER
from .._compat import PY2, integer_types, basestring
from ..connection import ConnectionPool
from ..objects import Field, Query, Expression
from ..helpers.classes import SQLALL
from ..helpers.methods import use_common_filters
from .base import NoSQLAdapter
long = integer_types[-1]
class IMAPAdapter(NoSQLAdapter):
""" IMAP server adapter
This class is intended as an interface with
email IMAP servers to perform simple queries in the
web2py DAL query syntax, so email read, search and
other related IMAP mail services (as those implemented
by brands like Google(r), and Yahoo!(r)
can be managed from web2py applications.
The code uses examples by Yuji Tomita on this post:
http://yuji.wordpress.com/2011/06/22/python-imaplib-imap-example-with-gmail/#comment-1137
and is based in docs for Python imaplib, python email
and email IETF's (i.e. RFC2060 and RFC3501)
This adapter was tested with a small set of operations with Gmail(r). Other
services requests could raise command syntax and response data issues.
It creates its table and field names "statically",
meaning that the developer should leave the table and field
definitions to the DAL instance by calling the adapter's
.define_tables() method. The tables are defined with the
IMAP server mailbox list information.
.define_tables() returns a dictionary mapping dal tablenames
to the server mailbox names with the following structure:
{<tablename>: str <server mailbox name>}
Here is a list of supported fields:
=========== ============== ===========
Field Type Description
=========== ============== ===========
uid string
answered boolean Flag
created date
content list:string A list of dict text or html parts
to string
cc string
bcc string
size integer the amount of octets of the message*
deleted boolean Flag
draft boolean Flag
flagged boolean Flag
sender string
recent boolean Flag
seen boolean Flag
subject string
mime string The mime header declaration
email string The complete RFC822 message (*)
attachments list Each non text part as dict
encoding string The main detected encoding
=========== ============== ===========
(*) At the application side it is measured as the length of the RFC822
message string
WARNING: As row id's are mapped to email sequence numbers,
make sure your imap client web2py app does not delete messages
during select or update actions, to prevent
updating or deleting different messages.
Sequence numbers change whenever the mailbox is updated.
To avoid this sequence numbers issues, it is recommended the use
of uid fields in query references (although the update and delete
in separate actions rule still applies).
::
# This is the code recommended to start imap support
# at the app's model:
imapdb = DAL("imap://user:password@server:port", pool_size=1) # port 993 for ssl
imapdb.define_tables()
Here is an (incomplete) list of possible imap commands::
# Count today's unseen messages
# smaller than 6000 octets from the
# inbox mailbox
q = imapdb.INBOX.seen == False
q &= imapdb.INBOX.created == datetime.date.today()
q &= imapdb.INBOX.size < 6000
unread = imapdb(q).count()
# Fetch last query messages
rows = imapdb(q).select()
# it is also possible to filter query select results with limitby and
# sequences of mailbox fields
set.select(<fields sequence>, limitby=(<int>, <int>))
# Mark last query messages as seen
messages = [row.uid for row in rows]
seen = imapdb(imapdb.INBOX.uid.belongs(messages)).update(seen=True)
# Delete messages in the imap database that have mails from mr. Gumby
deleted = 0
for mailbox in imapdb.tables
deleted += imapdb(imapdb[mailbox].sender.contains("gumby")).delete()
# It is possible also to mark messages for deletion instead of ereasing them
# directly with set.update(deleted=True)
# This object give access
# to the adapter auto mailbox
# mapped names (which native
# mailbox has what table name)
imapdb.mailboxes <dict> # tablename, server native name pairs
# To retrieve a table native mailbox name use:
imapdb.<table>.mailbox
### New features v2.4.1:
# Declare mailboxes statically with tablename, name pairs
# This avoids the extra server names retrieval
imapdb.define_tables({"inbox": "INBOX"})
# Selects without content/attachments/email columns will only
# fetch header and flags
imapdb(q).select(imapdb.INBOX.sender, imapdb.INBOX.subject)
"""
drivers = ('imaplib',)
types = {
'string': str,
'text': str,
'date': datetime.date,
'datetime': datetime.datetime,
'id': long,
'boolean': bool,
'integer': int,
'bigint': long,
'blob': str,
'list:string': str
}
dbengine = 'imap'
REGEX_URI = re.compile('^(?P<user>[^:]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?$')
def __init__(self,
db,
uri,
pool_size=0,
folder=None,
db_codec ='UTF-8',
credential_decoder=IDENTITY,
driver_args={},
adapter_args={},
do_connect=True,
after_connection=None):
super(IMAPAdapter, self).__init__(
db=db,
uri=uri,
pool_size=pool_size,
folder=folder,
db_codec=db_codec,
credential_decoder=credential_decoder,
driver_args=driver_args,
adapter_args=adapter_args,
do_connect=do_connect,
after_connection=after_connection)
# db uri: [email protected]:[email protected]:123
# TODO: max size adapter argument for preventing large mail transfers
if do_connect: self.find_driver(adapter_args)
self.credential_decoder = credential_decoder
self.driver_args = driver_args
self.adapter_args = adapter_args
self.mailbox_size = None
self.static_names = None
self.charset = sys.getfilesystemencoding()
# imap class
self.imap4 = None
uri = uri.split("://")[1]
""" MESSAGE is an identifier for sequence number"""
self.flags = {'deleted': '\\Deleted', 'draft': '\\Draft',
'flagged': '\\Flagged', 'recent': '\\Recent',
'seen': '\\Seen', 'answered': '\\Answered'}
self.search_fields = {
'id': 'MESSAGE', 'created': 'DATE',
'uid': 'UID', 'sender': 'FROM',
'to': 'TO', 'cc': 'CC',
'bcc': 'BCC', 'content': 'TEXT',
'size': 'SIZE', 'deleted': '\\Deleted',
'draft': '\\Draft', 'flagged': '\\Flagged',
'recent': '\\Recent', 'seen': '\\Seen',
'subject': 'SUBJECT', 'answered': '\\Answered',
'mime': None, 'email': None,
'attachments': None
}
db['_lastsql'] = ''
m = self.REGEX_URI.match(uri)
user = m.group('user')
password = m.group('password')
host = m.group('host')
port = int(m.group('port'))
over_ssl = False
if port==993:
over_ssl = True
driver_args.update(host=host,port=port, password=password, user=user)
def connector(driver_args=driver_args):
# it is assumed sucessful authentication alLways
# TODO: support direct connection and login tests
if over_ssl:
self.imap4 = self.driver.IMAP4_SSL
else:
self.imap4 = self.driver.IMAP4
connection = self.imap4(driver_args["host"], driver_args["port"])
data = connection.login(driver_args["user"], driver_args["password"])
# static mailbox list
connection.mailbox_names = None
# dummy dbapi functions
connection.cursor = lambda : self.fake_cursor
connection.close = lambda : None
connection.commit = lambda : None
return connection
self.db.define_tables = self.define_tables
self.connector = connector
if do_connect: self.reconnect()
def reconnect(self, f=None):
"""
IMAP4 Pool connection method
imap connection lacks of self cursor command.
A custom command should be provided as a replacement
for connection pooling to prevent uncaught remote session
closing
"""
if getattr(self, 'connection', None) is not None:
return
if f is None:
f = self.connector
if not self.pool_size:
self.connection = f()
self.cursor = self.connection.cursor()
else:
POOLS = ConnectionPool.POOLS
uri = self.uri
while True:
GLOBAL_LOCKER.acquire()
if not uri in POOLS:
POOLS[uri] = []
if POOLS[uri]:
self.connection = POOLS[uri].pop()
GLOBAL_LOCKER.release()
self.cursor = self.connection.cursor()
if self.cursor and self.check_active_connection:
try:
# check if connection is alive or close it
result, data = self.connection.list()
except:
# Possible connection reset error
# TODO: read exception class
self.connection = f()
break
else:
GLOBAL_LOCKER.release()
self.connection = f()
self.cursor = self.connection.cursor()
break
self.after_connection_hook()
def get_last_message(self, tablename):
last_message = None
# request mailbox list to the server if needed.
if not isinstance(self.connection.mailbox_names, dict):
self.get_mailboxes()
try:
result = self.connection.select(
self.connection.mailbox_names[tablename])
last_message = int(result[1][0])
# Last message must be a positive integer
if last_message == 0:
last_message = 1
except (IndexError, ValueError, TypeError, KeyError):
e = sys.exc_info()[1]
self.db.logger.debug("Error retrieving the last mailbox" +
" sequence number. %s" % str(e))
return last_message
def get_uid_bounds(self, tablename):
if not isinstance(self.connection.mailbox_names, dict):
self.get_mailboxes()
# fetch first and last messages
# return (first, last) messages uid's
last_message = self.get_last_message(tablename)
result, data = self.connection.uid("search", None, "(ALL)")
uid_list = data[0].strip().split()
if len(uid_list) <= 0:
return None
else:
return (uid_list[0], uid_list[-1])
def convert_date(self, date, add=None, imf=False):
if add is None:
add = datetime.timedelta()
""" Convert a date object to a string
with d-Mon-Y style for IMAP or the inverse
case
add <timedelta> adds to the date object
"""
months = [None, "JAN","FEB","MAR","APR","MAY","JUN",
"JUL", "AUG","SEP","OCT","NOV","DEC"]
if isinstance(date, basestring):
# Prevent unexpected date response format
try:
if "," in date:
dayname, datestring = date.split(",")
else:
dayname, datestring = None, date
date_list = datestring.strip().split()
year = int(date_list[2])
month = months.index(date_list[1].upper())
day = int(date_list[0])
hms = list(map(int, date_list[3].split(":")))
return datetime.datetime(year, month, day,
hms[0], hms[1], hms[2]) + add
except (ValueError, AttributeError, IndexError) as e:
self.db.logger.error("Could not parse date text: %s. %s" %
(date, e))
return None
elif isinstance(date, (datetime.date, datetime.datetime)):
if imf: date_format = "%a, %d %b %Y %H:%M:%S %z"
else: date_format = "%d-%b-%Y"
return (date + add).strftime(date_format)
else:
return None
@staticmethod
def header_represent(f, r):
from email.header import decode_header
text, encoding = decode_header(f)[0]
if encoding:
text = text.decode(encoding).encode('utf-8')
return text
def encode_text(self, text, charset, errors="replace"):
""" convert text for mail to unicode"""
if text is None:
text = ""
if PY2:
if isinstance(text, str):
if charset is None:
text = unicode(text, "utf-8", errors)
else:
text = unicode(text, charset, errors)
else:
raise Exception("Unsupported mail text type %s" % type(text))
return text.encode("utf-8")
else:
if isinstance(text, bytes):
return text.decode("utf-8")
return text
def get_charset(self, message):
charset = message.get_content_charset()
return charset
def get_mailboxes(self):
""" Query the mail database for mailbox names """
if self.static_names:
# statically defined mailbox names
self.connection.mailbox_names = self.static_names
return self.static_names.keys()
mailboxes_list = self.connection.list()
self.connection.mailbox_names = dict()
mailboxes = list()
x = 0
for item in mailboxes_list[1]:
x = x + 1
item = item.strip()
if not "NOSELECT" in item.upper():
sub_items = item.split("\"")
sub_items = [sub_item for sub_item in sub_items \
if len(sub_item.strip()) > 0]
# mailbox = sub_items[len(sub_items) -1]
mailbox = sub_items[-1].strip()
# remove unwanted characters and store original names
# Don't allow leading non alphabetic characters
mailbox_name = re.sub('^[_0-9]*', '', re.sub('[^_\w]','',re.sub('[/ ]','_',mailbox)))
mailboxes.append(mailbox_name)
self.connection.mailbox_names[mailbox_name] = mailbox
return mailboxes
def get_query_mailbox(self, query):
nofield = True
tablename = None
attr = query
while nofield:
if hasattr(attr, "first"):
attr = attr.first
if isinstance(attr, Field):
return attr.tablename
elif isinstance(attr, Query):
pass
else:
return None
else:
return None
return tablename
def is_flag(self, flag):
if self.search_fields.get(flag, None) in self.flags.values():
return True
else:
return False
def define_tables(self, mailbox_names=None):
"""
Auto create common IMAP fileds
This function creates fields definitions "statically"
meaning that custom fields as in other adapters should
not be supported and definitions handled on a service/mode
basis (local syntax for Gmail(r), Ymail(r)
Returns a dictionary with tablename, server native mailbox name
pairs.
"""
if mailbox_names:
# optional statically declared mailboxes
self.static_names = mailbox_names
else:
self.static_names = None
if not isinstance(self.connection.mailbox_names, dict):
self.get_mailboxes()
names = self.connection.mailbox_names.keys()
for name in names:
self.db.define_table("%s" % name,
Field("uid", writable=False),
Field("created", "datetime", writable=False),
Field("content", "text", writable=False),
Field("to", writable=False),
Field("cc", writable=False),
Field("bcc", writable=False),
Field("sender", writable=False),
Field("size", "integer", writable=False),
Field("subject", writable=False),
Field("mime", writable=False),
Field("email", "text", writable=False, readable=False),
Field("attachments", "text", writable=False, readable=False),
Field("encoding", writable=False),
Field("answered", "boolean"),
Field("deleted", "boolean"),
Field("draft", "boolean"),
Field("flagged", "boolean"),
Field("recent", "boolean", writable=False),
Field("seen", "boolean")
)
# Set a special _mailbox attribute for storing
# native mailbox names
self.db[name].mailbox = \
self.connection.mailbox_names[name]
# decode quoted printable
self.db[name].to.represent = self.db[name].cc.represent = \
self.db[name].bcc.represent = self.db[name].sender.represent = \
self.db[name].subject.represent = self.header_represent
# Set the db instance mailbox collections
self.db.mailboxes = self.connection.mailbox_names
return self.db.mailboxes
def create_table(self, *args, **kwargs):
# not implemented
# but required by DAL
pass
def select(self, query, fields, attributes):
""" Searches and Fetches records and return web2py rows
"""
# move this statement elsewhere (upper-level)
if use_common_filters(query):
query = self.common_filter(query, [self.get_query_mailbox(query),])
import email
# get records from imap server with search + fetch
# convert results to a dictionary
tablename = None
fetch_results = list()
if isinstance(query, Query):
tablename = self.get_table(query)
mailbox = self.connection.mailbox_names.get(tablename, None)
if mailbox is None:
raise ValueError("Mailbox name not found: %s" % mailbox)
else:
# select with readonly
result, selected = self.connection.select(mailbox, True)
if result != "OK":
raise Exception("IMAP error: %s" % selected)
self.mailbox_size = int(selected[0])
search_query = "(%s)" % str(query).strip()
search_result = self.connection.uid("search", None, search_query)
# Normal IMAP response OK is assumed (change this)
if search_result[0] == "OK":
# For "light" remote server responses just get the first
# ten records (change for non-experimental implementation)
# However, light responses are not guaranteed with this
# approach, just fewer messages.
limitby = attributes.get('limitby', None)
messages_set = search_result[1][0].split()
# descending order
messages_set.reverse()
if limitby is not None:
# TODO: orderby, asc/desc, limitby from complete message set
messages_set = messages_set[int(limitby[0]):int(limitby[1])]
# keep the requests small for header/flags
if any([(field.name in ["content", "size",
"attachments", "email"]) for
field in fields]):
imap_fields = "(RFC822 FLAGS)"
else:
imap_fields = "(RFC822.HEADER FLAGS)"
if len(messages_set) > 0:
# create fetch results object list
# fetch each remote message and store it in memmory
# (change to multi-fetch command syntax for faster
# transactions)
for uid in messages_set:
# fetch the RFC822 message body
typ, data = self.connection.uid("fetch", uid, imap_fields)
if typ == "OK":
fr = {"message": int(data[0][0].split()[0]),
"uid": long(uid),
"email": email.message_from_string(data[0][1]),
"raw_message": data[0][1]}
fr["multipart"] = fr["email"].is_multipart()
# fetch flags for the message
if PY2:
fr["flags"] = self.driver.ParseFlags(data[1])
else:
fr["flags"] = self.driver.ParseFlags(
bytes(data[1], "utf-8"))
fetch_results.append(fr)
else:
# error retrieving the message body
raise Exception("IMAP error retrieving the body: %s" % data)
else:
raise Exception("IMAP search error: %s" % search_result[1])
elif isinstance(query, (Expression, basestring)):
raise NotImplementedError()
else:
raise TypeError("Unexpected query type")
imapqry_dict = {}
imapfields_dict = {}
if len(fields) == 1 and isinstance(fields[0], SQLALL):
allfields = True
elif len(fields) == 0:
allfields = True
else:
allfields = False
if allfields:
colnames = ["%s.%s" % (tablename, field) for field in self.search_fields.keys()]
else:
colnames = ["%s.%s" % (tablename, field.name) for field in fields]
for k in colnames:
imapfields_dict[k] = k
imapqry_list = list()
imapqry_array = list()
for fr in fetch_results:
attachments = []
content = []
size = 0
n = int(fr["message"])
item_dict = dict()
message = fr["email"]
uid = fr["uid"]
charset = self.get_charset(message)
flags = fr["flags"]
raw_message = fr["raw_message"]
# Return messages data mapping static fields
# and fetched results. Mapping should be made
# outside the select function (with auxiliary
# instance methods)
# pending: search flags states trough the email message
# instances for correct output
# preserve subject encoding (ASCII/quoted printable)
if "%s.id" % tablename in colnames:
item_dict["%s.id" % tablename] = n
if "%s.created" % tablename in colnames:
item_dict["%s.created" % tablename] = self.convert_date(message["Date"])
if "%s.uid" % tablename in colnames:
item_dict["%s.uid" % tablename] = uid
if "%s.sender" % tablename in colnames:
# If there is no encoding found in the message header
# force utf-8 replacing characters (change this to
# module's defaults). Applies to .sender, .to, .cc and .bcc fields
item_dict["%s.sender" % tablename] = message["From"]
if "%s.to" % tablename in colnames:
item_dict["%s.to" % tablename] = message["To"]
if "%s.cc" % tablename in colnames:
if "Cc" in message.keys():
item_dict["%s.cc" % tablename] = message["Cc"]
else:
item_dict["%s.cc" % tablename] = ""
if "%s.bcc" % tablename in colnames:
if "Bcc" in message.keys():
item_dict["%s.bcc" % tablename] = message["Bcc"]
else:
item_dict["%s.bcc" % tablename] = ""
if "%s.deleted" % tablename in colnames:
item_dict["%s.deleted" % tablename] = "\\Deleted" in flags
if "%s.draft" % tablename in colnames:
item_dict["%s.draft" % tablename] = "\\Draft" in flags
if "%s.flagged" % tablename in colnames:
item_dict["%s.flagged" % tablename] = "\\Flagged" in flags
if "%s.recent" % tablename in colnames:
item_dict["%s.recent" % tablename] = "\\Recent" in flags
if "%s.seen" % tablename in colnames:
item_dict["%s.seen" % tablename] = "\\Seen" in flags
if "%s.subject" % tablename in colnames:
item_dict["%s.subject" % tablename] = message["Subject"]
if "%s.answered" % tablename in colnames:
item_dict["%s.answered" % tablename] = "\\Answered" in flags
if "%s.mime" % tablename in colnames:
item_dict["%s.mime" % tablename] = message.get_content_type()
if "%s.encoding" % tablename in colnames:
item_dict["%s.encoding" % tablename] = charset
# Here goes the whole RFC822 body as an email instance
# for controller side custom processing
# The message is stored as a raw string
# >> email.message_from_string(raw string)
# returns a Message object for enhanced object processing
if "%s.email" % tablename in colnames:
# WARNING: no encoding performed (raw message)
item_dict["%s.email" % tablename] = raw_message
# Size measure as suggested in a Velocity Reviews post
# by Tim Williams: "how to get size of email attachment"
# Note: len() and server RFC822.SIZE reports doesn't match
# To retrieve the server size for representation would add a new
# fetch transaction to the process
for part in message.walk():
maintype = part.get_content_maintype()
if ("%s.attachments" % tablename in colnames) or \
("%s.content" % tablename in colnames):
payload = part.get_payload(decode=True)
if payload:
filename = part.get_filename()
values = {"mime": part.get_content_type()}
if ((filename or not "text" in maintype) and
("%s.attachments" % tablename in colnames)):
values.update({"payload": payload,
"filename": filename,
"encoding": part.get_content_charset(),
"disposition": part["Content-Disposition"]})
attachments.append(values)
elif (("text" in maintype) and
("%s.content" % tablename in colnames)):
values.update({"text": self.encode_text(payload,
self.get_charset(part))})
content.append(values)
if "%s.size" % tablename in colnames:
if part is not None:
size += len(str(part))
item_dict["%s.content" % tablename] = content
item_dict["%s.attachments" % tablename] = attachments
item_dict["%s.size" % tablename] = size
imapqry_list.append(item_dict)
# extra object mapping for the sake of rows object
# creation (sends an array or lists)
for item_dict in imapqry_list:
imapqry_array_item = list()
for fieldname in colnames:
imapqry_array_item.append(item_dict[fieldname])
imapqry_array.append(imapqry_array_item)
# parse result and return a rows object
colnames = colnames
processor = attributes.get('processor',self.parse)
return processor(imapqry_array, fields, colnames)
def insert(self, table, fields):
def add_payload(message, obj):
payload = Message()
encoding = obj.get("encoding", "utf-8")
if encoding and (encoding.upper() in
("BASE64", "7BIT", "8BIT", "BINARY")):
payload.add_header("Content-Transfer-Encoding", encoding)
else:
payload.set_charset(encoding)
mime = obj.get("mime", None)
if mime:
payload.set_type(mime)
if "text" in obj:
payload.set_payload(obj["text"])
elif "payload" in obj:
payload.set_payload(obj["payload"])
if "filename" in obj and obj["filename"]:
payload.add_header("Content-Disposition",
"attachment", filename=obj["filename"])
message.attach(payload)
mailbox = table.mailbox
d = dict(((k.name, v) for k, v in fields))
date_time = d.get("created") or datetime.datetime.now()
struct_time = date_time.timetuple()
if len(d) > 0:
message = d.get("email", None)
attachments = d.get("attachments", [])
content = d.get("content", [])
flags = " ".join(["\\%s" % flag.capitalize() for flag in
("answered", "deleted", "draft", "flagged",
"recent", "seen") if d.get(flag, False)])
if not message:
from email.message import Message
mime = d.get("mime", None)
charset = d.get("encoding", None)
message = Message()
message["from"] = d.get("sender", "")
message["subject"] = d.get("subject", "")
message["date"] = self.convert_date(date_time, imf=True)
if mime:
message.set_type(mime)
if charset:
message.set_charset(charset)
for item in ("to", "cc", "bcc"):
value = d.get(item, "")
if isinstance(value, basestring):
message[item] = value
else:
message[item] = ";".join([i for i in
value])
if (not message.is_multipart() and
(not message.get_content_type().startswith(
"multipart"))):
if isinstance(content, basestring):
message.set_payload(content)
elif len(content) > 0:
message.set_payload(content[0]["text"])
else:
[add_payload(message, c) for c in content]
[add_payload(message, a) for a in attachments]
message = message.as_string()
result, data = self.connection.append(mailbox, flags, struct_time, message)
if result == "OK":
uid = int(re.findall("\d+", str(data))[-1])
return self.db(table.uid==uid).select(table.id).first().id
else:
raise Exception("IMAP message append failed: %s" % data)
else:
raise NotImplementedError("IMAP empty insert is not implemented")
def update(self, tablename, query, fields):
# TODO: the adapter should implement an .expand method
commands = list()
rowcount = 0
if use_common_filters(query):
query = self.common_filter(query, [tablename,])
mark = []
unmark = []
if query:
for item in fields:
field = item[0]
name = field.name
value = item[1]
if self.is_flag(name):
flag = self.search_fields[name]
if (value is not None) and (flag != "\\Recent"):
if value:
mark.append(flag)
else:
unmark.append(flag)
result, data = self.connection.select(
self.connection.mailbox_names[tablename])
string_query = "(%s)" % query
result, data = self.connection.search(None, string_query)
store_list = [item.strip() for item in data[0].split()
if item.strip().isdigit()]
# build commands for marked flags
for number in store_list:
result = None
if len(mark) > 0:
commands.append((number, "+FLAGS", "(%s)" % " ".join(mark)))
if len(unmark) > 0:
commands.append((number, "-FLAGS", "(%s)" % " ".join(unmark)))
for command in commands:
result, data = self.connection.store(*command)
if result == "OK":
rowcount += 1
else:
raise Exception("IMAP storing error: %s" % data)
return rowcount
def count(self,query,distinct=None):
counter = 0
tablename = self.get_query_mailbox(query)
if query and tablename is not None:
if use_common_filters(query):
query = self.common_filter(query, [tablename,])
result, data = self.connection.select(self.connection.mailbox_names[tablename])
string_query = "(%s)" % query
result, data = self.connection.search(None, string_query)
store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()]
counter = len(store_list)
return counter
def delete(self, tablename, query):
counter = 0
if query:
if use_common_filters(query):
query = self.common_filter(query, [tablename,])
result, data = self.connection.select(self.connection.mailbox_names[tablename])
string_query = "(%s)" % query
result, data = self.connection.search(None, string_query)
store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()]
for number in store_list:
result, data = self.connection.store(number, "+FLAGS", "(\\Deleted)")
if result == "OK":
counter += 1
else:
raise Exception("IMAP store error: %s" % data)
if counter > 0:
result, data = self.connection.expunge()
return counter
def BELONGS(self, first, second):
result = None
name = self.search_fields[first.name]
if name == "MESSAGE":
values = [str(val) for val in second if str(val).isdigit()]
result = "%s" % ",".join(values).strip()
elif name == "UID":
values = [str(val) for val in second if str(val).isdigit()]
result = "UID %s" % ",".join(values).strip()
else:
raise Exception("Operation not supported")
# result = "(%s %s)" % (self.expand(first), self.expand(second))
return result
def CONTAINS(self, first, second, case_sensitive=False):
# silently ignore, only case sensitive
result = None
name = self.search_fields[first.name]
if name in ("FROM", "TO", "SUBJECT", "TEXT"):
result = "%s \"%s\"" % (name, self.expand(second))
else:
if first.name in ("cc", "bcc"):
result = "%s \"%s\"" % (first.name.upper(), self.expand(second))
elif first.name == "mime":
result = "HEADER Content-Type \"%s\"" % self.expand(second)
else:
raise Exception("Operation not supported")
return result
def GT(self, first, second):
result = None
name = self.search_fields[first.name]
if name == "MESSAGE":
last_message = self.get_last_message(first.tablename)
result = "%d:%d" % (int(self.expand(second)) + 1, last_message)
elif name == "UID":
# GT and LT may not return
# expected sets depending on
# the uid format implemented
try:
pedestal, threshold = self.get_uid_bounds(first.tablename)
except TypeError:
e = sys.exc_info()[1]
self.db.logger.debug("Error requesting uid bounds: %s", str(e))
return ""
try:
lower_limit = int(self.expand(second)) + 1
except (ValueError, TypeError):
e = sys.exc_info()[1]
raise Exception("Operation not supported (non integer UID)")
result = "UID %s:%s" % (lower_limit, threshold)
elif name == "DATE":
result = "SINCE %s" % self.convert_date(second, add=datetime.timedelta(1))
elif name == "SIZE":
result = "LARGER %s" % self.expand(second)
else:
raise Exception("Operation not supported")
return result
def GE(self, first, second):
result = None
name = self.search_fields[first.name]
if name == "MESSAGE":
last_message = self.get_last_message(first.tablename)
result = "%s:%s" % (self.expand(second), last_message)
elif name == "UID":
# GT and LT may not return
# expected sets depending on
# the uid format implemented
try:
pedestal, threshold = self.get_uid_bounds(first.tablename)
except TypeError:
e = sys.exc_info()[1]
self.db.logger.debug("Error requesting uid bounds: %s", str(e))
return ""
lower_limit = self.expand(second)
result = "UID %s:%s" % (lower_limit, threshold)
elif name == "DATE":
result = "SINCE %s" % self.convert_date(second)
else:
raise Exception("Operation not supported")
return result
def LT(self, first, second):
result = None
name = self.search_fields[first.name]
if name == "MESSAGE":
result = "%s:%s" % (1, int(self.expand(second)) - 1)
elif name == "UID":
try:
pedestal, threshold = self.get_uid_bounds(first.tablename)
except TypeError:
e = sys.exc_info()[1]
self.db.logger.debug("Error requesting uid bounds: %s", str(e))
return ""
try:
upper_limit = int(self.expand(second)) - 1
except (ValueError, TypeError):
e = sys.exc_info()[1]
raise Exception("Operation not supported (non integer UID)")
result = "UID %s:%s" % (pedestal, upper_limit)
elif name == "DATE":
result = "BEFORE %s" % self.convert_date(second)
elif name == "SIZE":
result = "SMALLER %s" % self.expand(second)
else:
raise Exception("Operation not supported")
return result
def LE(self, first, second):
result = None
name = self.search_fields[first.name]
if name == "MESSAGE":
result = "%s:%s" % (1, self.expand(second))
elif name == "UID":
try:
pedestal, threshold = self.get_uid_bounds(first.tablename)
except TypeError:
e = sys.exc_info()[1]
self.db.logger.debug("Error requesting uid bounds: %s", str(e))
return ""
upper_limit = int(self.expand(second))
result = "UID %s:%s" % (pedestal, upper_limit)
elif name == "DATE":
result = "BEFORE %s" % self.convert_date(second, add=datetime.timedelta(1))
else:
raise Exception("Operation not supported")
return result
def NE(self, first, second=None):
if (second is None) and isinstance(first, Field):
# All records special table query
if first.type == "id":
return self.GE(first, 1)
result = self.NOT(self.EQ(first, second))
result = result.replace("NOT NOT", "").strip()
return result
def EQ(self,first,second):
name = self.search_fields[first.name]
result = None
if name is not None:
if name == "MESSAGE":
# query by message sequence number
result = "%s" % self.expand(second)
elif name == "UID":
result = "UID %s" % self.expand(second)
elif name == "DATE":
result = "ON %s" % self.convert_date(second)
elif name in self.flags.values():
if second:
result = "%s" % (name.upper()[1:])
else:
result = "NOT %s" % (name.upper()[1:])
else:
raise Exception("Operation not supported")
else:
raise Exception("Operation not supported")
return result
def AND(self, first, second):
result = "%s %s" % (self.expand(first), self.expand(second))
return result
def OR(self, first, second):
result = "OR %s %s" % (self.expand(first), self.expand(second))
return "%s" % result.replace("OR OR", "OR")
def NOT(self, first):
result = "NOT %s" % self.expand(first)
return result
| gpl-2.0 | 3,524,233,606,457,079,000 | 39.838557 | 110 | 0.519173 | false |
cmdunkers/DeeperMind | PythonEnv/lib/python2.7/site-packages/pip/_vendor/requests/cookies.py | 413 | 17191 | # -*- coding: utf-8 -*-
"""
Compatibility code to be able to use `cookielib.CookieJar` with requests.
requests.utils imports from here, so be careful with imports.
"""
import copy
import time
import collections
from .compat import cookielib, urlparse, urlunparse, Morsel
try:
import threading
# grr, pyflakes: this fixes "redefinition of unused 'threading'"
threading
except ImportError:
import dummy_threading as threading
class MockRequest(object):
"""Wraps a `requests.Request` to mimic a `urllib2.Request`.
The code in `cookielib.CookieJar` expects this interface in order to correctly
manage cookie policies, i.e., determine whether a cookie can be set, given the
domains of the request and the cookie.
The original request object is read-only. The client is responsible for collecting
the new headers via `get_new_headers()` and interpreting them appropriately. You
probably want `get_cookie_header`, defined below.
"""
def __init__(self, request):
self._r = request
self._new_headers = {}
self.type = urlparse(self._r.url).scheme
def get_type(self):
return self.type
def get_host(self):
return urlparse(self._r.url).netloc
def get_origin_req_host(self):
return self.get_host()
def get_full_url(self):
# Only return the response's URL if the user hadn't set the Host
# header
if not self._r.headers.get('Host'):
return self._r.url
# If they did set it, retrieve it and reconstruct the expected domain
host = self._r.headers['Host']
parsed = urlparse(self._r.url)
# Reconstruct the URL as we expect it
return urlunparse([
parsed.scheme, host, parsed.path, parsed.params, parsed.query,
parsed.fragment
])
def is_unverifiable(self):
return True
def has_header(self, name):
return name in self._r.headers or name in self._new_headers
def get_header(self, name, default=None):
return self._r.headers.get(name, self._new_headers.get(name, default))
def add_header(self, key, val):
"""cookielib has no legitimate use for this method; add it back if you find one."""
raise NotImplementedError("Cookie headers should be added with add_unredirected_header()")
def add_unredirected_header(self, name, value):
self._new_headers[name] = value
def get_new_headers(self):
return self._new_headers
@property
def unverifiable(self):
return self.is_unverifiable()
@property
def origin_req_host(self):
return self.get_origin_req_host()
@property
def host(self):
return self.get_host()
class MockResponse(object):
"""Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
...what? Basically, expose the parsed HTTP headers from the server response
the way `cookielib` expects to see them.
"""
def __init__(self, headers):
"""Make a MockResponse for `cookielib` to read.
:param headers: a httplib.HTTPMessage or analogous carrying the headers
"""
self._headers = headers
def info(self):
return self._headers
def getheaders(self, name):
self._headers.getheaders(name)
def extract_cookies_to_jar(jar, request, response):
"""Extract the cookies from the response into a CookieJar.
:param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar)
:param request: our own requests.Request object
:param response: urllib3.HTTPResponse object
"""
if not (hasattr(response, '_original_response') and
response._original_response):
return
# the _original_response field is the wrapped httplib.HTTPResponse object,
req = MockRequest(request)
# pull out the HTTPMessage with the headers and put it in the mock:
res = MockResponse(response._original_response.msg)
jar.extract_cookies(res, req)
def get_cookie_header(jar, request):
"""Produce an appropriate Cookie header string to be sent with `request`, or None."""
r = MockRequest(request)
jar.add_cookie_header(r)
return r.get_new_headers().get('Cookie')
def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
"""Unsets a cookie by name, by default over all domains and paths.
Wraps CookieJar.clear(), is O(n).
"""
clearables = []
for cookie in cookiejar:
if cookie.name == name:
if domain is None or domain == cookie.domain:
if path is None or path == cookie.path:
clearables.append((cookie.domain, cookie.path, cookie.name))
for domain, path, name in clearables:
cookiejar.clear(domain, path, name)
class CookieConflictError(RuntimeError):
"""There are two cookies that meet the criteria specified in the cookie jar.
Use .get and .set and include domain and path args in order to be more specific."""
class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
"""Compatibility class; is a cookielib.CookieJar, but exposes a dict
interface.
This is the CookieJar we create by default for requests and sessions that
don't specify one, since some clients may expect response.cookies and
session.cookies to support dict operations.
Requests does not use the dict interface internally; it's just for
compatibility with external client code. All requests code should work
out of the box with externally provided instances of ``CookieJar``, e.g.
``LWPCookieJar`` and ``FileCookieJar``.
Unlike a regular CookieJar, this class is pickleable.
.. warning:: dictionary operations that are normally O(1) may be O(n).
"""
def get(self, name, default=None, domain=None, path=None):
"""Dict-like get() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains.
.. warning:: operation is O(n), not O(1)."""
try:
return self._find_no_duplicates(name, domain, path)
except KeyError:
return default
def set(self, name, value, **kwargs):
"""Dict-like set() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains."""
# support client code that unsets cookies by assignment of a None value:
if value is None:
remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path'))
return
if isinstance(value, Morsel):
c = morsel_to_cookie(value)
else:
c = create_cookie(name, value, **kwargs)
self.set_cookie(c)
return c
def iterkeys(self):
"""Dict-like iterkeys() that returns an iterator of names of cookies
from the jar. See itervalues() and iteritems()."""
for cookie in iter(self):
yield cookie.name
def keys(self):
"""Dict-like keys() that returns a list of names of cookies from the
jar. See values() and items()."""
return list(self.iterkeys())
def itervalues(self):
"""Dict-like itervalues() that returns an iterator of values of cookies
from the jar. See iterkeys() and iteritems()."""
for cookie in iter(self):
yield cookie.value
def values(self):
"""Dict-like values() that returns a list of values of cookies from the
jar. See keys() and items()."""
return list(self.itervalues())
def iteritems(self):
"""Dict-like iteritems() that returns an iterator of name-value tuples
from the jar. See iterkeys() and itervalues()."""
for cookie in iter(self):
yield cookie.name, cookie.value
def items(self):
"""Dict-like items() that returns a list of name-value tuples from the
jar. See keys() and values(). Allows client-code to call
``dict(RequestsCookieJar)`` and get a vanilla python dict of key value
pairs."""
return list(self.iteritems())
def list_domains(self):
"""Utility method to list all the domains in the jar."""
domains = []
for cookie in iter(self):
if cookie.domain not in domains:
domains.append(cookie.domain)
return domains
def list_paths(self):
"""Utility method to list all the paths in the jar."""
paths = []
for cookie in iter(self):
if cookie.path not in paths:
paths.append(cookie.path)
return paths
def multiple_domains(self):
"""Returns True if there are multiple domains in the jar.
Returns False otherwise."""
domains = []
for cookie in iter(self):
if cookie.domain is not None and cookie.domain in domains:
return True
domains.append(cookie.domain)
return False # there is only one domain in jar
def get_dict(self, domain=None, path=None):
"""Takes as an argument an optional domain and path and returns a plain
old Python dict of name-value pairs of cookies that meet the
requirements."""
dictionary = {}
for cookie in iter(self):
if (domain is None or cookie.domain == domain) and (path is None
or cookie.path == path):
dictionary[cookie.name] = cookie.value
return dictionary
def __getitem__(self, name):
"""Dict-like __getitem__() for compatibility with client code. Throws
exception if there are more than one cookie with name. In that case,
use the more explicit get() method instead.
.. warning:: operation is O(n), not O(1)."""
return self._find_no_duplicates(name)
def __setitem__(self, name, value):
"""Dict-like __setitem__ for compatibility with client code. Throws
exception if there is already a cookie of that name in the jar. In that
case, use the more explicit set() method instead."""
self.set(name, value)
def __delitem__(self, name):
"""Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s
``remove_cookie_by_name()``."""
remove_cookie_by_name(self, name)
def set_cookie(self, cookie, *args, **kwargs):
if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'):
cookie.value = cookie.value.replace('\\"', '')
return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs)
def update(self, other):
"""Updates this jar with cookies from another CookieJar or dict-like"""
if isinstance(other, cookielib.CookieJar):
for cookie in other:
self.set_cookie(copy.copy(cookie))
else:
super(RequestsCookieJar, self).update(other)
def _find(self, name, domain=None, path=None):
"""Requests uses this method internally to get cookie values. Takes as
args name and optional domain and path. Returns a cookie.value. If
there are conflicting cookies, _find arbitrarily chooses one. See
_find_no_duplicates if you want an exception thrown if there are
conflicting cookies."""
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
return cookie.value
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def _find_no_duplicates(self, name, domain=None, path=None):
"""Both ``__get_item__`` and ``get`` call this function: it's never
used elsewhere in Requests. Takes as args name and optional domain and
path. Returns a cookie.value. Throws KeyError if cookie is not found
and CookieConflictError if there are multiple cookies that match name
and optionally domain and path."""
toReturn = None
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
if toReturn is not None: # if there are multiple cookies that meet passed in criteria
raise CookieConflictError('There are multiple cookies with name, %r' % (name))
toReturn = cookie.value # we will eventually return this as long as no cookie conflict
if toReturn:
return toReturn
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def __getstate__(self):
"""Unlike a normal CookieJar, this class is pickleable."""
state = self.__dict__.copy()
# remove the unpickleable RLock object
state.pop('_cookies_lock')
return state
def __setstate__(self, state):
"""Unlike a normal CookieJar, this class is pickleable."""
self.__dict__.update(state)
if '_cookies_lock' not in self.__dict__:
self._cookies_lock = threading.RLock()
def copy(self):
"""Return a copy of this RequestsCookieJar."""
new_cj = RequestsCookieJar()
new_cj.update(self)
return new_cj
def _copy_cookie_jar(jar):
if jar is None:
return None
if hasattr(jar, 'copy'):
# We're dealing with an instane of RequestsCookieJar
return jar.copy()
# We're dealing with a generic CookieJar instance
new_jar = copy.copy(jar)
new_jar.clear()
for cookie in jar:
new_jar.set_cookie(copy.copy(cookie))
return new_jar
def create_cookie(name, value, **kwargs):
"""Make a cookie from underspecified parameters.
By default, the pair of `name` and `value` will be set for the domain ''
and sent on every request (this is sometimes called a "supercookie").
"""
result = dict(
version=0,
name=name,
value=value,
port=None,
domain='',
path='/',
secure=False,
expires=None,
discard=True,
comment=None,
comment_url=None,
rest={'HttpOnly': None},
rfc2109=False,)
badargs = set(kwargs) - set(result)
if badargs:
err = 'create_cookie() got unexpected keyword arguments: %s'
raise TypeError(err % list(badargs))
result.update(kwargs)
result['port_specified'] = bool(result['port'])
result['domain_specified'] = bool(result['domain'])
result['domain_initial_dot'] = result['domain'].startswith('.')
result['path_specified'] = bool(result['path'])
return cookielib.Cookie(**result)
def morsel_to_cookie(morsel):
"""Convert a Morsel object into a Cookie containing the one k/v pair."""
expires = None
if morsel['max-age']:
expires = time.time() + morsel['max-age']
elif morsel['expires']:
time_template = '%a, %d-%b-%Y %H:%M:%S GMT'
expires = time.mktime(
time.strptime(morsel['expires'], time_template)) - time.timezone
return create_cookie(
comment=morsel['comment'],
comment_url=bool(morsel['comment']),
discard=False,
domain=morsel['domain'],
expires=expires,
name=morsel.key,
path=morsel['path'],
port=None,
rest={'HttpOnly': morsel['httponly']},
rfc2109=False,
secure=bool(morsel['secure']),
value=morsel.value,
version=morsel['version'] or 0,
)
def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):
"""Returns a CookieJar from a key/value dictionary.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:param cookiejar: (optional) A cookiejar to add the cookies to.
:param overwrite: (optional) If False, will not replace cookies
already in the jar with new ones.
"""
if cookiejar is None:
cookiejar = RequestsCookieJar()
if cookie_dict is not None:
names_from_jar = [cookie.name for cookie in cookiejar]
for name in cookie_dict:
if overwrite or (name not in names_from_jar):
cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
return cookiejar
def merge_cookies(cookiejar, cookies):
"""Add cookies to cookiejar and returns a merged CookieJar.
:param cookiejar: CookieJar object to add the cookies to.
:param cookies: Dictionary or CookieJar object to be added.
"""
if not isinstance(cookiejar, cookielib.CookieJar):
raise ValueError('You can only merge into CookieJar')
if isinstance(cookies, dict):
cookiejar = cookiejar_from_dict(
cookies, cookiejar=cookiejar, overwrite=False)
elif isinstance(cookies, cookielib.CookieJar):
try:
cookiejar.update(cookies)
except AttributeError:
for cookie_in_jar in cookies:
cookiejar.set_cookie(cookie_in_jar)
return cookiejar
| bsd-3-clause | -4,791,816,411,596,893,000 | 34.889353 | 111 | 0.629457 | false |
adw0rd/lettuce | tests/integration/lib/Django-1.2.5/django/utils/functional.py | 307 | 14218 | # License for code in this file that was taken from Python 2.5.
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
# --------------------------------------------
#
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
# otherwise using this software ("Python") in source or binary form and
# its associated documentation.
#
# 2. Subject to the terms and conditions of this License Agreement, PSF
# hereby grants Licensee a nonexclusive, royalty-free, world-wide
# license to reproduce, analyze, test, perform and/or display publicly,
# prepare derivative works, distribute, and otherwise use Python
# alone or in any derivative version, provided, however, that PSF's
# License Agreement and PSF's notice of copyright, i.e., "Copyright (c)
# 2001, 2002, 2003, 2004, 2005, 2006, 2007 Python Software Foundation;
# All Rights Reserved" are retained in Python alone or in any derivative
# version prepared by Licensee.
#
# 3. In the event Licensee prepares a derivative work that is based on
# or incorporates Python or any part thereof, and wants to make
# the derivative work available to others as provided herein, then
# Licensee hereby agrees to include in any such work a brief summary of
# the changes made to Python.
#
# 4. PSF is making Python available to Licensee on an "AS IS"
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
# INFRINGE ANY THIRD PARTY RIGHTS.
#
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
#
# 6. This License Agreement will automatically terminate upon a material
# breach of its terms and conditions.
#
# 7. Nothing in this License Agreement shall be deemed to create any
# relationship of agency, partnership, or joint venture between PSF and
# Licensee. This License Agreement does not grant permission to use PSF
# trademarks or trade name in a trademark sense to endorse or promote
# products or services of Licensee, or any third party.
#
# 8. By copying, installing or otherwise using Python, Licensee
# agrees to be bound by the terms and conditions of this License
# Agreement.
def curry(_curried_func, *args, **kwargs):
def _curried(*moreargs, **morekwargs):
return _curried_func(*(args+moreargs), **dict(kwargs, **morekwargs))
return _curried
### Begin from Python 2.5 functools.py ########################################
# Summary of changes made to the Python 2.5 code below:
# * swapped ``partial`` for ``curry`` to maintain backwards-compatibility
# in Django.
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007 Python Software Foundation.
# All Rights Reserved.
###############################################################################
# update_wrapper() and wraps() are tools to help write
# wrapper functions that can handle naive introspection
WRAPPER_ASSIGNMENTS = ('__module__', '__name__', '__doc__')
WRAPPER_UPDATES = ('__dict__',)
def update_wrapper(wrapper,
wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Update a wrapper function to look like the wrapped function
wrapper is the function to be updated
wrapped is the original function
assigned is a tuple naming the attributes assigned directly
from the wrapped function to the wrapper function (defaults to
functools.WRAPPER_ASSIGNMENTS)
updated is a tuple naming the attributes off the wrapper that
are updated with the corresponding attribute from the wrapped
function (defaults to functools.WRAPPER_UPDATES)
"""
for attr in assigned:
setattr(wrapper, attr, getattr(wrapped, attr))
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr))
# Return the wrapper so this can be used as a decorator via curry()
return wrapper
def wraps(wrapped,
assigned = WRAPPER_ASSIGNMENTS,
updated = WRAPPER_UPDATES):
"""Decorator factory to apply update_wrapper() to a wrapper function
Returns a decorator that invokes update_wrapper() with the decorated
function as the wrapper argument and the arguments to wraps() as the
remaining arguments. Default arguments are as for update_wrapper().
This is a convenience function to simplify applying curry() to
update_wrapper().
"""
return curry(update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
### End from Python 2.5 functools.py ##########################################
def memoize(func, cache, num_args):
"""
Wrap a function so that results for any argument tuple are stored in
'cache'. Note that the args to the function must be usable as dictionary
keys.
Only the first num_args are considered when creating the key.
"""
def wrapper(*args):
mem_args = args[:num_args]
if mem_args in cache:
return cache[mem_args]
result = func(*args)
cache[mem_args] = result
return result
return wraps(func)(wrapper)
class Promise(object):
"""
This is just a base class for the proxy class created in
the closure of the lazy function. It can be used to recognize
promises in code.
"""
pass
def lazy(func, *resultclasses):
"""
Turns any callable into a lazy evaluated callable. You need to give result
classes or types -- at least one is needed so that the automatic forcing of
the lazy evaluation code is triggered. Results are not memoized; the
function is evaluated on every access.
"""
class __proxy__(Promise):
"""
Encapsulate a function call and act as a proxy for methods that are
called on the result of that function. The function is not evaluated
until one of the methods on the result is called.
"""
__dispatch = None
def __init__(self, args, kw):
self.__func = func
self.__args = args
self.__kw = kw
if self.__dispatch is None:
self.__prepare_class__()
def __reduce__(self):
return (
_lazy_proxy_unpickle,
(self.__func, self.__args, self.__kw) + resultclasses
)
def __prepare_class__(cls):
cls.__dispatch = {}
for resultclass in resultclasses:
cls.__dispatch[resultclass] = {}
for (k, v) in resultclass.__dict__.items():
# All __promise__ return the same wrapper method, but they
# also do setup, inserting the method into the dispatch
# dict.
meth = cls.__promise__(resultclass, k, v)
if hasattr(cls, k):
continue
setattr(cls, k, meth)
cls._delegate_str = str in resultclasses
cls._delegate_unicode = unicode in resultclasses
assert not (cls._delegate_str and cls._delegate_unicode), "Cannot call lazy() with both str and unicode return types."
if cls._delegate_unicode:
cls.__unicode__ = cls.__unicode_cast
elif cls._delegate_str:
cls.__str__ = cls.__str_cast
__prepare_class__ = classmethod(__prepare_class__)
def __promise__(cls, klass, funcname, func):
# Builds a wrapper around some magic method and registers that magic
# method for the given type and method name.
def __wrapper__(self, *args, **kw):
# Automatically triggers the evaluation of a lazy value and
# applies the given magic method of the result type.
res = self.__func(*self.__args, **self.__kw)
for t in type(res).mro():
if t in self.__dispatch:
return self.__dispatch[t][funcname](res, *args, **kw)
raise TypeError("Lazy object returned unexpected type.")
if klass not in cls.__dispatch:
cls.__dispatch[klass] = {}
cls.__dispatch[klass][funcname] = func
return __wrapper__
__promise__ = classmethod(__promise__)
def __unicode_cast(self):
return self.__func(*self.__args, **self.__kw)
def __str_cast(self):
return str(self.__func(*self.__args, **self.__kw))
def __cmp__(self, rhs):
if self._delegate_str:
s = str(self.__func(*self.__args, **self.__kw))
elif self._delegate_unicode:
s = unicode(self.__func(*self.__args, **self.__kw))
else:
s = self.__func(*self.__args, **self.__kw)
if isinstance(rhs, Promise):
return -cmp(rhs, s)
else:
return cmp(s, rhs)
def __mod__(self, rhs):
if self._delegate_str:
return str(self) % rhs
elif self._delegate_unicode:
return unicode(self) % rhs
else:
raise AssertionError('__mod__ not supported for non-string types')
def __deepcopy__(self, memo):
# Instances of this class are effectively immutable. It's just a
# collection of functions. So we don't need to do anything
# complicated for copying.
memo[id(self)] = self
return self
def __wrapper__(*args, **kw):
# Creates the proxy object, instead of the actual value.
return __proxy__(args, kw)
return wraps(func)(__wrapper__)
def _lazy_proxy_unpickle(func, args, kwargs, *resultclasses):
return lazy(func, *resultclasses)(*args, **kwargs)
def allow_lazy(func, *resultclasses):
"""
A decorator that allows a function to be called with one or more lazy
arguments. If none of the args are lazy, the function is evaluated
immediately, otherwise a __proxy__ is returned that will evaluate the
function when needed.
"""
def wrapper(*args, **kwargs):
for arg in list(args) + kwargs.values():
if isinstance(arg, Promise):
break
else:
return func(*args, **kwargs)
return lazy(func, *resultclasses)(*args, **kwargs)
return wraps(func)(wrapper)
class LazyObject(object):
"""
A wrapper for another class that can be used to delay instantiation of the
wrapped class.
By subclassing, you have the opportunity to intercept and alter the
instantiation. If you don't need to do that, use SimpleLazyObject.
"""
def __init__(self):
self._wrapped = None
def __getattr__(self, name):
if self._wrapped is None:
self._setup()
return getattr(self._wrapped, name)
def __setattr__(self, name, value):
if name == "_wrapped":
# Assign to __dict__ to avoid infinite __setattr__ loops.
self.__dict__["_wrapped"] = value
else:
if self._wrapped is None:
self._setup()
setattr(self._wrapped, name, value)
def __delattr__(self, name):
if name == "_wrapped":
raise TypeError("can't delete _wrapped.")
if self._wrapped is None:
self._setup()
delattr(self._wrapped, name)
def _setup(self):
"""
Must be implemented by subclasses to initialise the wrapped object.
"""
raise NotImplementedError
# introspection support:
__members__ = property(lambda self: self.__dir__())
def __dir__(self):
if self._wrapped is None:
self._setup()
return dir(self._wrapped)
class SimpleLazyObject(LazyObject):
"""
A lazy object initialised from any function.
Designed for compound objects of unknown type. For builtins or objects of
known type, use django.utils.functional.lazy.
"""
def __init__(self, func):
"""
Pass in a callable that returns the object to be wrapped.
If copies are made of the resulting SimpleLazyObject, which can happen
in various circumstances within Django, then you must ensure that the
callable can be safely run more than once and will return the same
value.
"""
self.__dict__['_setupfunc'] = func
# For some reason, we have to inline LazyObject.__init__ here to avoid
# recursion
self._wrapped = None
def __str__(self):
if self._wrapped is None: self._setup()
return str(self._wrapped)
def __unicode__(self):
if self._wrapped is None: self._setup()
return unicode(self._wrapped)
def __deepcopy__(self, memo):
if self._wrapped is None:
# We have to use SimpleLazyObject, not self.__class__, because the
# latter is proxied.
result = SimpleLazyObject(self._setupfunc)
memo[id(self)] = result
return result
else:
# Changed to use deepcopy from copycompat, instead of copy
# For Python 2.4.
from django.utils.copycompat import deepcopy
return deepcopy(self._wrapped, memo)
# Need to pretend to be the wrapped class, for the sake of objects that care
# about this (especially in equality tests)
def __get_class(self):
if self._wrapped is None: self._setup()
return self._wrapped.__class__
__class__ = property(__get_class)
def __eq__(self, other):
if self._wrapped is None: self._setup()
return self._wrapped == other
def __hash__(self):
if self._wrapped is None: self._setup()
return hash(self._wrapped)
def _setup(self):
self._wrapped = self._setupfunc()
| gpl-3.0 | 1,965,849,032,980,865,300 | 37.741144 | 130 | 0.610705 | false |
danilobellini/audiolazy | examples/lpc_plot.py | 1 | 1396 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of AudioLazy, the signal processing Python package.
# Copyright (C) 2012-2016 Danilo de Jesus da Silva Bellini
#
# AudioLazy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
LPC plot with DFT, showing two formants (magnitude peaks)
"""
from audiolazy import sHz, sin_table, str2freq, lpc
import pylab
rate = 22050
s, Hz = sHz(rate)
size = 512
table = sin_table.harmonize({1: 1, 2: 5, 3: 3, 4: 2, 6: 9, 8: 1}).normalize()
data = table(str2freq("Bb3") * Hz).take(size)
filt = lpc(data, order=14) # Analysis filter
gain = 1e-2 # Gain just for alignment with DFT
# Plots the synthesis filter
# - If blk is given, plots the block DFT together with the filter
# - If rate is given, shows the frequency range in Hz
(gain / filt).plot(blk=data, rate=rate, samples=1024, unwrap=False)
pylab.ioff()
pylab.show()
| gpl-3.0 | 3,357,333,521,711,741,000 | 35.736842 | 77 | 0.72851 | false |
lukasjuhrich/pycroft | hades_logs/__init__.py | 1 | 5307 | """
hades_logs
----------
This module provides access to Hades' radius logs utilizing its celery
RPC api.
"""
import logging
from celery.exceptions import TimeoutError as CeleryTimeoutError
from flask.globals import current_app
from werkzeug import LocalProxy
from .app import HadesCelery
from .exc import HadesConfigError, HadesOperationalError, HadesTimeout
from .parsing import RadiusLogEntry, reduce_radius_logs
_CONFIGURATION_DOCS = """\
This Flask application utilizes the `HadesLogs` extension, \
which needs certain config variables.
A minimal example configuration would look like this:
> app.config['HADES_CELERY_APP_NAME'] = 'hades'
> app.config['HADES_BROKER_URI'] = 'pyamqp://user:password@rabbitmq_host:5762/vhost'
> app.config['HADES_RESULT_BACKEND_URI'] = 'pyamqp://user:password@rabbitmq_host:5762/vhost'\
"""
class HadesLogs:
"""The ``HadesLogs`` Flask extension
This extension provides access to the Hades RPC. The core
functionality is provided by :py:meth:`fetch_logs`.
You need to provide the following configuration to
:py:obj:`app.config`:
- 'HADES_CELERY_APP_NAME': The Name of the celery app
- 'HADES_BROKER_URI': The broker URI
- 'HADES_RESULT_BACKEND_URI': The URI of the Result backend
- 'HADES_TIMEOUT' (Optional, default=5): The Timeout to wait
with each task in seconds.
- 'HADES_ROUTING_KEY' (Optional, default=None): The routing
key to use for the celery messages
Usage:
>>> from flask import Flask
>>> from hades_logs import HadesLogs
>>> app = Flask('test')
>>> logs = HadesLogs(app)
>>> logs.fetch_logs(<nasip>, <portid>)
"""
def __init__(self, app=None):
self.app = app
self.logger = logging.getLogger('hades_logs')
if app is not None:
self.init_app(app)
def init_app(self, app):
try:
app_name = app.config['HADES_CELERY_APP_NAME']
broker_uri = app.config['HADES_BROKER_URI']
backend_uri = app.config['HADES_RESULT_BACKEND_URI']
routing_key = app.config.get('HADES_ROUTING_KEY','hades-ng')
except KeyError as e:
self.logger.warning("Missing config key: %s\n%s", e, _CONFIGURATION_DOCS)
raise KeyError("Missing config key: {}".format(e)) from e
self.timeout = app.config.get('HADES_TIMEOUT', 5)
self.celery = HadesCelery(app_name, broker=broker_uri, backend=backend_uri,
routing_key=routing_key)
# Gets run only on success
self.logger.info("Initialization complete, registering 'hades_logs' extension")
app.extensions['hades_logs'] = self
def create_task(self, name, *args, **kwargs):
"""Create a Celery task object by name, args and kwargs
``*args`` and ``**kwargs`` are passed to the corresponding
parameters of :py:func:`Celery.signature(name, args, kwargs)`
:param name: The name of the task without the celery app name.
Assembling is done using :py:attr:`self.celery.main`.
:returns: the signature of the task
:rtype: :py:obj:`celery.Signature`
"""
full_task_name = '{}.{}'.format(self.celery.main, name)
return self.celery.signature(full_task_name, args=args, kwargs=kwargs)
def fetch_logs(self, nasipaddress, nasportid, limit=100, reduced=True):
"""Fetch the auth logs of the given port
:param ipaddr nasipaddress: The IP address of the NAS
:param str nasportid: The port identifier (e.g. `C12`) of the
NAS port
:returns: the result of the task (see
``get_port_auth_attempts`` in hades)
:rtype: iterable (generator if :param:`reduced`)
:raises HadesTimeout: on timeouts, e.g. when the task takes
too long to be executed by a worker or when the broker is
down.
"""
if reduced:
reductor = reduce_radius_logs
else:
def reductor(x):
return x
task = self.create_task(name='get_auth_attempts_at_port',
nas_ip_address=nasipaddress, nas_port_id=nasportid,
limit=limit)
return reductor(RadiusLogEntry(*e) for e in self.wait_for_task(task))
def wait_for_task(self, task):
self.logger.info("Waiting for task: %s", task)
try:
return task.apply_async().wait(timeout=self.timeout)
except CeleryTimeoutError as e:
raise HadesTimeout("The Hades lookup task has timed out") from e
except OSError as e:
# In newer versions of celery, this is encapsuled by
# `kombu.exc.OperationalError`. It is thrown when e.g. the
# broker is down
if "timeout" in str(e).lower():
raise HadesTimeout("The Hades lookup task has timed out") from e
else:
raise HadesOperationalError("OSError when fetching hades logs") from e
def _get_extension():
try:
return current_app.extensions['hades_logs']
except KeyError:
raise HadesConfigError("No HadesLogs instance registered to current Flask app")
hades_logs = LocalProxy(_get_extension)
| apache-2.0 | 2,022,353,213,843,764,200 | 34.145695 | 93 | 0.628227 | false |
rsteca/python-social-auth | social/backends/goclio.py | 77 | 1247 | from social.backends.oauth import BaseOAuth2
class GoClioOAuth2(BaseOAuth2):
name = 'goclio'
AUTHORIZATION_URL = 'https://app.goclio.com/oauth/authorize/'
ACCESS_TOKEN_METHOD = 'POST'
ACCESS_TOKEN_URL = 'https://app.goclio.com/oauth/token/'
REDIRECT_STATE = False
STATE_PARAMETER = False
def get_user_details(self, response):
"""Return user details from GoClio account"""
user = response.get('user', {})
username = user.get('id', None)
email = user.get('email', None)
first_name, last_name = (user.get('first_name', None),
user.get('last_name', None))
fullname = '%s %s' % (first_name, last_name)
return {'username': username,
'fullname': fullname,
'first_name': first_name,
'last_name': last_name,
'email': email}
def user_data(self, access_token, *args, **kwargs):
"""Loads user data from service"""
return self.get_json(
'https://app.goclio.com/api/v2/users/who_am_i',
params={'access_token': access_token}
)
def get_user_id(self, details, response):
return response.get('user', {}).get('id')
| bsd-3-clause | 5,619,559,984,779,561,000 | 34.628571 | 65 | 0.566159 | false |
BFriedland/UserDataBase-Heroku | venv/Lib/site-packages/pip/_vendor/requests/compat.py | 571 | 2556 | # -*- coding: utf-8 -*-
"""
pythoncompat
"""
from .packages import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
#: Python 3.0.x
is_py30 = (is_py3 and _ver[1] == 0)
#: Python 3.1.x
is_py31 = (is_py3 and _ver[1] == 1)
#: Python 3.2.x
is_py32 = (is_py3 and _ver[1] == 2)
#: Python 3.3.x
is_py33 = (is_py3 and _ver[1] == 3)
#: Python 3.4.x
is_py34 = (is_py3 and _ver[1] == 4)
#: Python 2.7.x
is_py27 = (is_py2 and _ver[1] == 7)
#: Python 2.6.x
is_py26 = (is_py2 and _ver[1] == 6)
#: Python 2.5.x
is_py25 = (is_py2 and _ver[1] == 5)
#: Python 2.4.x
is_py24 = (is_py2 and _ver[1] == 4) # I'm assuming this is not by choice.
# ---------
# Platforms
# ---------
# Syntax sugar.
_ver = sys.version.lower()
is_pypy = ('pypy' in _ver)
is_jython = ('jython' in _ver)
is_ironpython = ('iron' in _ver)
# Assume CPython, if nothing else.
is_cpython = not any((is_pypy, is_jython, is_ironpython))
# Windows-based system.
is_windows = 'win32' in str(sys.platform).lower()
# Standard Linux 2+ system.
is_linux = ('linux' in str(sys.platform).lower())
is_osx = ('darwin' in str(sys.platform).lower())
is_hpux = ('hpux' in str(sys.platform).lower()) # Complete guess.
is_solaris = ('solar==' in str(sys.platform).lower()) # Complete guess.
try:
import simplejson as json
except ImportError:
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
from httplib import IncompleteRead
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
from http.client import IncompleteRead
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
| mit | -663,752,567,564,605,800 | 21.226087 | 132 | 0.641628 | false |
yazug/shade_janitor | shade_janitor/tests/unit/cleanup/test_cleanup_subnet.py | 1 | 2092 | import mock
from shade_janitor import cleanup
from shade_janitor.tests.unit import base
class TestCleanupSubnet(base.BaseTestCase):
def setUp(self):
super(TestCleanupSubnet, self).setUp()
self.cloud.delete_subnet = mock.Mock()
self.subnet = mock.Mock()
def add_single(self):
self.resources._add('subnets', self.subnet.id, self.subnet.name)
@mock.patch('shade_janitor.cleanup.show_cleanup')
def test_dry_cleanup_subnet(self, mock_subnets_cleanup):
self.add_single()
cleanup.cleanup_resources(self.cloud, self.resources.get_selection())
self.assertTrue(mock_subnets_cleanup.called)
def test_cleanup_subnet(self):
dry_cleanup = False
self.add_single()
cleanup.cleanup_resources(
self.cloud, self.resources.get_selection(), dry_cleanup)
self.assertTrue(self.cloud.delete_subnet.called)
@mock.patch('shade_janitor.cleanup.show_cleanup')
def test_cleanup_no_subnet(self, mock_subnets_cleanup):
cleanup.cleanup_resources(self.cloud, self.resources.get_selection())
self.assertFalse(mock_subnets_cleanup.called)
self.assertFalse(self.cloud.delete_subnet.called)
@mock.patch('shade_janitor.cleanup.dry_cleanup_subnets')
def test_dry_cleanup_subnet_micro(self, mock_subnets_cleanup):
self.add_single()
cleanup.cleanup_resources(self.cloud, self.resources.get_selection())
self.assertTrue(mock_subnets_cleanup.called)
@mock.patch('shade_janitor.cleanup.cleanup_subnets')
def test_cleanup_subnet_micro(self, mock_subnets_cleanup):
dry_cleanup = False
self.add_single()
cleanup.cleanup_resources(
self.cloud, self.resources.get_selection(), dry_cleanup)
self.assertTrue(mock_subnets_cleanup.called)
@mock.patch('shade_janitor.cleanup.dry_cleanup_subnets')
def test_cleanup_no_subnet_micro(self, mock_subnets_cleanup):
cleanup.cleanup_resources(self.cloud, self.resources.get_selection())
self.assertFalse(mock_subnets_cleanup.called)
| gpl-3.0 | 6,612,498,921,566,346,000 | 38.471698 | 77 | 0.695507 | false |
leighpauls/k2cro4 | third_party/webdriver/pylib/test/selenium/webdriver/common/alerts_tests.py | 1 | 5602 | #Copyright 2007-2009 WebDriver committers
#Copyright 2007-2009 Google Inc.
#
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import pytest
from selenium.webdriver.common.by import By
from selenium.common.exceptions import ElementNotVisibleException
from selenium.common.exceptions import NoAlertPresentException
import unittest
@pytest.mark.ignore_opera
class AlertsTest(unittest.TestCase):
def testShouldBeAbleToOverrideTheWindowAlertMethod(self):
self._loadPage("alerts")
self.driver.execute_script(
"window.alert = function(msg) { document.getElementById('text').innerHTML = msg; }")
self.driver.find_element(by=By.ID, value="alert").click()
def testShouldAllowUsersToAcceptAnAlertManually(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="alert").click()
alert = self.driver.switch_to_alert()
alert.accept()
# If we can perform any action, we're good to go
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldAllowUsersToAcceptAnAlertWithNoTextManually(self):
self._loadPage("alerts")
self.driver.find_element(By.ID,"empty-alert").click();
alert = self.driver.switch_to_alert()
alert.accept()
# If we can perform any action, we're good to go
self.assertEqual("Testing Alerts", self.driver.title)
@pytest.mark.ignore_chrome
def testShouldAllowUsersToDismissAnAlertManually(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="alert").click()
alert = self.driver.switch_to_alert()
alert.dismiss()
# If we can perform any action, we're good to go
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldAllowAUserToAcceptAPrompt(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="prompt").click()
alert = self.driver.switch_to_alert()
alert.accept()
# If we can perform any action, we're good to go
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldAllowAUserToDismissAPrompt(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="prompt").click()
alert = self.driver.switch_to_alert()
alert.dismiss()
# If we can perform any action, we're good to go
self.assertEqual("Testing Alerts", self.driver.title)
def testShouldAllowAUserToSetTheValueOfAPrompt(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="prompt").click()
alert = self.driver.switch_to_alert()
alert.send_keys("cheese")
alert.accept()
result = self.driver.find_element(by=By.ID, value="text").text
self.assertEqual("cheese", result)
@pytest.mark.ignore_chrome
def testSettingTheValueOfAnAlertThrows(self):
self._loadPage("alerts")
self.driver.find_element(By.ID,"alert").click();
alert = self.driver.switch_to_alert()
try:
alert.send_keys("cheese");
self.fail("Expected exception");
except ElementNotVisibleException:
pass
finally:
alert.accept()
@pytest.mark.ignore_chrome
def testAlertShouldNotAllowAdditionalCommandsIfDimissed(self):
self._loadPage("alerts");
self.driver.find_element(By.ID, "alert").click()
alert = self.driver.switch_to_alert()
alert.dismiss()
try:
alert.text
self.fail("Expected NoAlertPresentException")
except NoAlertPresentException:
pass
def testPromptShouldUseDefaultValueIfNoKeysSent(self):
self._loadPage("alerts")
self.driver.find_element(By.ID, "prompt-with-default").click()
alert = self.driver.switch_to_alert()
alert.accept()
txt = self.driver.find_element(By.ID, "text").text
self.assertEqual("This is a default value", txt)
def testHandlesTwoAlertsFromOneInteraction(self):
self._loadPage("alerts")
self.driver.find_element(By.ID, "double-prompt").click()
alert1 = self.driver.switch_to_alert()
alert1.send_keys("brie")
alert1.accept()
alert2 = self.driver.switch_to_alert()
alert2.send_keys("cheddar")
alert2.accept();
self.assertEqual(self.driver.find_element(By.ID, "text1").text, "brie")
self.assertEqual(self.driver.find_element(By.ID, "text2").text, "cheddar")
def testShouldAllowTheUserToGetTheTextOfAnAlert(self):
self._loadPage("alerts")
self.driver.find_element(by=By.ID, value="alert").click()
alert = self.driver.switch_to_alert()
value = alert.text
alert.accept()
self.assertEqual("cheese", value)
def _pageURL(self, name):
return "http://localhost:%d/%s.html" % (self.webserver.port, name)
def _loadSimplePage(self):
self._loadPage("simpleTest")
def _loadPage(self, name):
self.driver.get(self._pageURL(name))
| bsd-3-clause | -8,435,785,012,467,050,000 | 35.141935 | 96 | 0.662085 | false |
demon-ru/iml-crm | addons/l10n_fr/report/bilan_report.py | 374 | 6196 | # -*- coding: utf-8 -*-
#
#
# Copyright (c) 2008 JAILLET Simon - CrysaLEAD - www.crysalead.fr
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsability of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# garantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
#
import base_report
from openerp.osv import osv
class bilan(base_report.base_report):
def __init__(self, cr, uid, name, context):
super(bilan, self).__init__(cr, uid, name, context)
def set_context(self, objects, data, ids):
super(bilan, self).set_context(objects, data, ids)
self._load('bilan', self.localcontext['data']['form'])
self._set_variable(
'at1a',
self.localcontext['bavar1'] + self.localcontext['bavar2'] + self.localcontext['bavar3']
+ self.localcontext['bavar4'] + self.localcontext[
'bavar5'] + self.localcontext['bavar6']
+ self.localcontext['bavar7'] + self.localcontext[
'bavar8'] + self.localcontext['bavar9']
+ self.localcontext['bavar10'] + self.localcontext[
'bavar11'] + self.localcontext['bavar12']
+ self.localcontext['bavar13'] + self.localcontext[
'bavar14'] + self.localcontext['bavar15']
+ self.localcontext['bavar16'] + self.localcontext[
'bavar17'] + self.localcontext['bavar18']
+ self.localcontext['bavar19'] + self.localcontext['bavar20']
)
self._set_variable(
'at1b',
self.localcontext['bavar2b'] + self.localcontext[
'bavar3b'] + self.localcontext['bavar4b']
+ self.localcontext['bavar5b'] + self.localcontext[
'bavar6b'] + self.localcontext['bavar7b']
+ self.localcontext['bavar9b'] + self.localcontext[
'bavar10b'] + self.localcontext['bavar11b']
+ self.localcontext['bavar12b'] + self.localcontext[
'bavar13b'] + self.localcontext['bavar15b']
+ self.localcontext['bavar16b'] + self.localcontext[
'bavar17b'] + self.localcontext['bavar18b']
+ self.localcontext['bavar19b'] + self.localcontext['bavar20b']
)
self._set_variable(
'at1',
self.localcontext['at1a'] + self.localcontext['at1b']
)
self._set_variable(
'at2a',
self.localcontext['bavar21'] + self.localcontext[
'bavar22'] + self.localcontext['bavar23']
+ self.localcontext['bavar24'] + self.localcontext[
'bavar25'] + self.localcontext['bavar26']
+ self.localcontext['bavar27'] + self.localcontext[
'bavar28'] + self.localcontext['bavar29']
+ self.localcontext['bavar30'] + self.localcontext[
'bavar31'] + self.localcontext['bavar32']
+ self.localcontext['bavar33']
)
self._set_variable(
'at2b',
self.localcontext['bavar21b'] + self.localcontext[
'bavar22b'] + self.localcontext['bavar23b']
+ self.localcontext['bavar24b'] + self.localcontext[
'bavar26b'] + self.localcontext['bavar27b']
+ self.localcontext['bavar29b'] + self.localcontext['bavar30b']
)
self._set_variable(
'at2',
self.localcontext['at2a'] + self.localcontext['at2b']
)
self._set_variable(
'actif',
self.localcontext['at1'] + self.localcontext['at2'] + self.localcontext['bavar34']
+ self.localcontext['bavar35'] + self.localcontext['bavar36']
)
self._set_variable(
'pt1',
self.localcontext['bpvar1'] + self.localcontext['bpvar2'] + self.localcontext['bpvar3']
+ self.localcontext['bpvar4'] + self.localcontext[
'bpvar5'] + self.localcontext['bpvar6']
+ self.localcontext['bpvar7'] + self.localcontext[
'bpvar8'] + self.localcontext['bpvar9']
+ self.localcontext['bpvar10'] + self.localcontext[
'bpvar11'] + self.localcontext['bpvar12']
)
self._set_variable(
'pt2',
self.localcontext['bpvar13'] + self.localcontext['bpvar14']
)
self._set_variable(
'pt3',
self.localcontext['bpvar15'] + self.localcontext[
'bpvar16'] + self.localcontext['bpvar17']
+ self.localcontext['bpvar18'] + self.localcontext[
'bpvar19'] + self.localcontext['bpvar20']
+ self.localcontext['bpvar21'] + self.localcontext[
'bpvar22'] + self.localcontext['bpvar23']
+ self.localcontext['bpvar24'] + self.localcontext['bpvar25']
)
self._set_variable(
'passif',
self.localcontext['pt1'] + self.localcontext['pt2'] + self.localcontext['pt3']
+ self.localcontext['bpvar26']
)
class wrapped_report_bilan(osv.AbstractModel):
_name = 'report.l10n_fr.report_l10nfrbilan'
_inherit = 'report.abstract_report'
_template = 'l10n_fr.report_l10nfrbilan'
_wrapped_report_class = bilan
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 6,254,801,947,465,168,000 | 42.943262 | 99 | 0.602485 | false |
fevxie/odoo | addons/account_analytic_default/__init__.py | 445 | 1087 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import account_analytic_default
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -6,987,626,279,441,079,000 | 44.291667 | 79 | 0.613615 | false |
bartvm/pylearn2 | pylearn2/scripts/icml_2013_wrepl/emotions/make_submission.py | 21 | 2184 | from __future__ import print_function
import sys
from theano.compat.six.moves import xrange
def usage():
print("""usage: python make_submission.py model.pkl submission.csv)
Where model.pkl contains a trained pylearn2.models.mlp.MLP object.
The script will make submission.csv, which you may then upload to the
kaggle site.""")
if len(sys.argv) != 3:
usage()
print("(You used the wrong # of arguments)")
quit(-1)
_, model_path, out_path = sys.argv
import os
if os.path.exists(out_path):
usage()
print(out_path+" already exists, and I don't want to overwrite anything just to be safe.")
quit(-1)
from pylearn2.utils import serial
try:
model = serial.load(model_path)
except Exception as e:
usage()
print(model_path + "doesn't seem to be a valid model path, I got this error when trying to load it: ")
print(e)
from pylearn2.config import yaml_parse
dataset = yaml_parse.load(model.dataset_yaml_src)
dataset = dataset.get_test_set()
# use smallish batches to avoid running out of memory
batch_size = 100
model.set_batch_size(batch_size)
# dataset must be multiple of batch size of some batches will have
# different sizes. theano convolution requires a hard-coded batch size
m = dataset.X.shape[0]
extra = batch_size - m % batch_size
assert (m + extra) % batch_size == 0
import numpy as np
if extra > 0:
dataset.X = np.concatenate((dataset.X, np.zeros((extra, dataset.X.shape[1]),
dtype=dataset.X.dtype)), axis=0)
assert dataset.X.shape[0] % batch_size == 0
X = model.get_input_space().make_batch_theano()
Y = model.fprop(X)
from theano import tensor as T
y = T.argmax(Y, axis=1)
from theano import function
f = function([X], y)
y = []
for i in xrange(dataset.X.shape[0] / batch_size):
x_arg = dataset.X[i*batch_size:(i+1)*batch_size,:]
if X.ndim > 2:
x_arg = dataset.get_topological_view(x_arg)
y.append(f(x_arg.astype(X.dtype)))
y = np.concatenate(y)
assert y.ndim == 1
assert y.shape[0] == dataset.X.shape[0]
# discard any zero-padding that was used to give the batches uniform size
y = y[:m]
out = open(out_path, 'w')
for i in xrange(y.shape[0]):
out.write('%d\n' % y[i])
out.close()
| bsd-3-clause | -8,061,702,330,874,752,000 | 24.694118 | 106 | 0.688187 | false |
dsanno/chainer-cifar | src/dataset.py | 1 | 4361 | import cPickle as pickle
import numpy as np
import os
from PIL import Image
import six
train_files = ['data_batch_{}'.format(i + 1) for i in six.moves.range(5)]
test_files = ['test_batch']
def load_file(file_path):
with open(file_path, 'rb') as f:
data = pickle.load(f)
return data['data'].astype(np.float32), np.asarray(data['labels'], dtype=np.int32)
def load(data_dir):
train_data = [load_file(os.path.join(data_dir, file_name)) for file_name in train_files]
images, labels = zip(*train_data)
train_images = np.concatenate(images)
train_labels = np.concatenate(labels)
test_data = [load_file(os.path.join(data_dir, file_name)) for file_name in test_files]
images, labels = zip(*test_data)
test_images = np.concatenate(images)
test_labels = np.concatenate(labels)
return train_images, train_labels, test_images, test_labels
def calc_mean(x):
return x.reshape((-1, 3, 32 * 32)).mean(axis=(0, 2))
def calc_std(x):
return x.reshape((-1, 3, 32 * 32)).std(axis=(0, 2))
def normalize_dataset(x, mean, std=None):
shape = x.shape
x = x.reshape((-1, 3)) - mean
if std is not None:
x /= std
return x.reshape(shape)
def calc_zca(x):
n = x.shape[0]
mean = np.mean(x, axis=0)
x = x - mean
c = np.dot(x.T, x)
u, lam, v = np.linalg.svd(c)
eps = 0
sqlam = np.sqrt(lam + eps)
uzca = np.dot(u / sqlam[np.newaxis, :], u.T)
return uzca, mean
def save_image(x, path, normalize=True):
image = train_x[:100,:]
if normalize:
max_value = np.max(np.abs(image), axis=1).reshape((100, 1))
image = image / max_value * 127
image = (image + 128).clip(0, 255).astype(np.uint8)
image = image.reshape((10, 10, 3, 32, 32))
image = np.pad(image, ((0, 0), (0, 0), (0, 0), (2, 2), (2, 2)), mode='constant', constant_values=0)
image = image.transpose((0, 3, 1, 4, 2)).reshape((360, 360, 3))
Image.fromarray(image).save(path)
if __name__ == '__main__':
dataset_path = 'dataset/cifar-10-batches-py'
output_path = 'dataset'
raw_train_x, raw_train_y, raw_test_x, raw_test_y = load(dataset_path)
# save labels
labels = {'train': raw_train_y, 'test': raw_test_y}
with open(os.path.join(output_path, 'label.pkl'), 'wb') as f:
pickle.dump(labels, f, pickle.HIGHEST_PROTOCOL)
mean = calc_mean(raw_train_x)
std = calc_std(raw_train_x)
# subtract mean
train_x = normalize_dataset(raw_train_x, mean)
test_x = normalize_dataset(raw_test_x, mean)
images = {'train': train_x, 'test': test_x}
with open(os.path.join(output_path, 'image.pkl'), 'wb') as f:
pickle.dump(images, f, pickle.HIGHEST_PROTOCOL)
with open(os.path.join(output_path, 'mean.txt'), 'w') as f:
f.write(np.array_str(mean))
save_image(train_x, os.path.join(output_path, 'sample.png'))
# contrast normalization
train_x = normalize_dataset(raw_train_x, mean, std)
test_x = normalize_dataset(raw_test_x, mean, std)
images = {'train': train_x, 'test': test_x}
with open(os.path.join(output_path, 'image_norm.pkl'), 'wb') as f:
pickle.dump(images, f, pickle.HIGHEST_PROTOCOL)
save_image(train_x, os.path.join(output_path, 'sample_norm.png'), normalize=True)
# ZCA whitening
zca, zca_mean = calc_zca(raw_train_x)
train_x = np.dot(raw_train_x - zca_mean, zca.T)
test_x = np.dot(raw_test_x - zca_mean, zca.T)
images = {'train': train_x, 'test': test_x}
with open(os.path.join(output_path, 'image_zca.pkl'), 'wb') as f:
pickle.dump(images, f, pickle.HIGHEST_PROTOCOL)
save_image(train_x, os.path.join(output_path, 'sample_zca.png'), normalize=True)
# contrast normalization and ZCA whitening
train_x = normalize_dataset(raw_train_x, mean, std)
test_x = normalize_dataset(raw_test_x, mean, std)
zca, zca_mean = calc_zca(train_x)
train_x = np.dot(train_x - zca_mean, zca.T)
test_x = np.dot(test_x - zca_mean, zca.T)
images = {'train': train_x, 'test': test_x}
with open(os.path.join(output_path, 'image_norm_zca.pkl'), 'wb') as f:
pickle.dump(images, f, pickle.HIGHEST_PROTOCOL)
save_image(train_x, os.path.join(output_path, 'sample_norm_zca.png'), normalize=True)
| mit | 3,803,710,116,087,965,000 | 36.254386 | 103 | 0.608805 | false |
maackle/ILC-app | make.py | 1 | 27624 | import csv
import os
import re
import sys
import argparse
import tempfile
from subprocess import call
import json
from pyspatialite import dbapi2 as sqlite3
import geojson
from shapely import wkt, wkb
from conf import settings
from conf.settings import global_datasets
from conf.projects import projects
from util import *
def interpolate_sql(script, **kwargs):
for k, v in kwargs.items():
script = script.replace(":%s" % str(k), str(v))
return script
def run_file_based_spatialite_script(conn, script_template, driver='sqlite', print_script=False, **kwargs):
assert driver == 'sqlite', "Unsupported driver: %s" % driver
script = interpolate_sql(script_template, **kwargs)
if print_script:
print script
fd, path = tempfile.mkstemp()
with open(path, 'w') as f:
f.write(script)
os.close(fd)
return call("spatialite {db} < {script}".format(script=path, db=settings.SPATIALITE_DB_FILE), shell=True)
def run_spatialite_script(conn, script_template, driver='sqlite', print_script=False, **kwargs):
assert driver == 'sqlite', "Unsupported driver: %s" % driver
script = interpolate_sql(script_template, **kwargs)
with db_connect() as conn:
cur = conn.executescript(script)
conn.commit()
return cur
class db_connect:
def __enter__(self):
self.conn = sqlite3.connect(settings.SPATIALITE_DB_FILE)
self.conn.row_factory = sqlite3.Row
return self.conn
def __exit__(self, type, value, traceback):
self.conn.close()
def get_project(name):
assert name in map(lambda p: p.name, projects), "Invalid project name"
return filter(lambda p: p.name == name, projects)[0]
def task_load_global_shapefiles():
filename = settings.SPATIALITE_DB_FILE
for name, data in global_datasets.items():
load_shapefile(data['table'], data['path'], data['srid'])
def task_localize_demography(*names):
if not names:
print "Please specify the names of projects you want to load."
print "Did nothing..."
for proj_name in names:
project = get_project(proj_name)
fips_list = project.fips_list
if len(fips_list) > 1:
fips_list = str(fips_list)
elif len(fips_list) == 1:
fips_list = "('{0}')".format(fips_list[0])
else:
raise "no fips_list specified"
with db_connect() as conn:
conn.execute("DROP TABLE IF EXISTS {local_demography_table}".format(local_demography_table=project.raw_demography_table))
print '[{0}] Dropped local demography table'.format(proj_name)
conn.execute("""
CREATE TABLE {local_demography_table} AS
SELECT * FROM {global_demography_table}
LIMIT 0
;
""".format(
global_demography_table=settings.DEMOGRAPHY_TABLE,
local_demography_table=project.raw_demography_table,
))
print '[{0}] Recreated local demography table'.format(proj_name)
fips_query = """
select FIPS from _G_counties where intersects(geom, (select collect(geom) from _G_counties where FIPS in {fips_list}))
""".format(
fips_list=fips_list
)
extended_fips_list = "(" + ",".join("'{0}'".format(row[0]) for row in conn.execute(fips_query).fetchall()) + ")"
cur = None
# while not cur or cur.rowcount > 0:
query = """
INSERT INTO {local_demography_table}
SELECT * FROM {global_demography_table}
where substr(GEOID10, 1, 5) IN {extended_fips_list}
""".format(
global_demography_table=settings.DEMOGRAPHY_TABLE,
local_demography_table=project.raw_demography_table,
buffer_radius=1609*10,
extended_fips_list=extended_fips_list,
)
cur = conn.execute(query)
print "rows added:", cur.rowcount
conn.commit()
def task_localize_brownfields(*names):
if not names:
print "Please specify the names of projects you want to load."
print "Did nothing..."
for proj_name in names:
project = get_project(proj_name)
fips_list = project.fips_list
if len(fips_list) > 1:
fips_list = str(fips_list)
elif len(fips_list) == 1:
fips_list = "('{0}')".format(fips_list[0])
else:
raise "no fips_list specified"
with db_connect() as conn:
conn.execute("DROP TABLE IF EXISTS {local_brownfields_table}".format(local_brownfields_table=project.raw_brownfields_table))
print '[{0}] Dropped local brownfield table'.format(proj_name)
conn.execute("""
CREATE TABLE {local_brownfields_table} AS
SELECT * FROM {global_brownfields_table}
LIMIT 0
;
""".format(
global_brownfields_table=settings.BROWNFIELDS_TABLE,
local_brownfields_table=project.raw_brownfields_table,
))
print '[{0}] Recreated local brownfield table'.format(proj_name)
fips_query = """
select FIPS from _G_counties where intersects(geom, (select collect(geom) from _G_counties where FIPS in {fips_list}))
""".format(
fips_list=fips_list
)
extended_fips_list = "(" + ",".join("'{0}'".format(row[0]) for row in conn.execute(fips_query).fetchall()) + ")"
cur = None
# need to filter by Y > 0 in this case because of some bad data...
query = """
INSERT INTO {local_brownfields_table}
SELECT * FROM {global_brownfields_table}
where substr(FIPS_CODE, 1, 5) IN {extended_fips_list}
and Y(geom) > 0
""".format(
global_brownfields_table=settings.BROWNFIELDS_TABLE,
local_brownfields_table=project.raw_brownfields_table,
buffer_radius=1609*10,
extended_fips_list=extended_fips_list,
)
cur = conn.execute(query)
print "rows added:", cur.rowcount
conn.commit()
# def db2geojson(outfilename, cur):
# with open(outfilename, 'w') as f:
# features = []
# for row in cur:
# properties = {}
# for key in row.keys():
# k = key.lower()
# if k not in ('geom', 'geom_wkt'):
# properties[k] = row[key]
# geometry = wkt.loads(row['geom_wkt'])
# geometry = geojson.loads(geojson.dumps(geometry))
# features.append({
# 'type': 'Feature',
# 'geometry': geometry,
# 'properties': properties,
# })
# f.write(json.dumps({
# 'type': 'FeatureCollection',
# 'features': features,
# }))
def task_generate_brownfields(*names):
if not names:
print "Please specify the names of projects you want to load."
print "Did nothing..."
for proj_name in names:
project = get_project(proj_name)
json_dir = project.app_data_dir('json')
lazy_mkdir(json_dir)
dump_shapefile(project.raw_brownfields_table, os.path.join(json_dir, 'brownfields'), 'POINT')
# print "*** THIS STEP IS NOT COMPLETELY AUTOMATED YET! Please follow instructions... ***"
# print "run the following command:"
outfile = os.path.join(json_dir, 'brownfields.geojson')
os.remove(outfile)
cmd = """ogr2ogr -f GeoJSON -select "PK_UID" {outfile} {infile}""".format(
outfile=outfile,
infile=os.path.join(json_dir, 'brownfields.shp'),
)
call(cmd, shell=True)
# def task_generate_brownfields(*names):
# if not names:
# print "Please specify the names of projects you want to load."
# print "Did nothing..."
# for proj_name in names:
# project = get_project(proj_name)
# fips_list = project.fips_list
# if len(fips_list) > 1:
# fips_list = str(fips_list)
# elif len(fips_list) == 1:
# fips_list = "('{0}')".format(fips_list[0])
# else:
# raise "no fips_list specified"
# fips_subquery = """
# select FIPS from _G_counties
# where intersects(
# geom, (
# select collect(geom)
# from _G_counties where FIPS in {fips_list}
# )
# )
# """.format(fips_list=fips_list)
# with db_connect() as conn:
# extended_fips_list = "(" + ",".join("'{0}'".format(row[0]) for row in conn.execute(fips_subquery).fetchall()) + ")"
# # fips_subquery = re.sub(r"\(|\)", "", fips_list)
# output_path = project.app_data_dir('brownfields.geojson')
# try:
# os.remove(output_path)
# except OSError:
# pass
# sql = """SELECT * from '{table}' where substr(`FIPS_CODE`,1,5) IN {extended_fips_list}""".format(
# extended_fips_list=extended_fips_list,
# table=global_datasets['brownfields']['filename']
# )
# # sql = """SELECT * FROM 'brownfields'"""
# print sql
# cmd = """ogr2ogr -f "GeoJSON" -sql "{sql}" -overwrite {output_path} {input_path}.shp""".format(
# input_path=os.path.join(settings.RAW_DATA_DIR, global_datasets['brownfields']['path']),
# output_path=output_path,
# sql=re.sub(r"\s+|\n+", " ", sql),
# # sql=sql,
# )
# call(cmd, shell=True)
def task_load_project_shapefiles(*names):
if not names:
print "Please specify the names of projects you want to load."
print "Did nothing..."
for name in names:
project = get_project(name)
project.load_shapefiles()
def task_load_all():
task_load_global_shapefiles()
project_names = map(lambda p: p.name, projects)
task_load_project_shapefiles(*project_names)
def rebuild_demography_tables(*project_names):
for proj_name in project_names:
print '[{0}] Rebuilding demography table'.format(proj_name)
project = get_project(proj_name)
race_definitions = ",".join("{name} FLOAT".format(name=cat['name']) for cat in settings.demography_categories['race_categories'])
occupation_definitions = ",".join("{name} FLOAT".format(name=cat['name']) for cat in settings.demography_categories['occupation_categories'])
race_script = """
BEGIN;
DROP TABLE IF EXISTS {table};
CREATE TABLE {table} (
gid INT PRIMARY KEY,
{field_definitions}
);
END;
""".format(
table=project.race_table,
field_definitions=race_definitions,
)
occupation_script = """
BEGIN;
DROP TABLE IF EXISTS {table};
CREATE TABLE {table} (
gid INT PRIMARY KEY,
{field_definitions}
);
END;
""".format(
table=project.occupation_table,
field_definitions=occupation_definitions,
)
with db_connect() as conn:
conn.executescript(race_script)
conn.executescript(occupation_script)
conn.commit()
def task_process_demography(*project_names):
rebuild_demography_tables(*project_names)
for proj_name in project_names:
project = get_project(proj_name)
for what in ('race', 'occupation'):
if what == 'race':
target_table = project.race_table
buffer_mi = 1.0
elif what == 'occupation':
target_table = project.occupation_table
buffer_mi = 5.0
category_key = what + '_categories'
categories = settings.demography_categories[category_key]
rawname_list = ",".join(cat['rawname'] for cat in categories)
name_list = ",".join(cat['name'] for cat in categories)
assignment_list = ",".join(
("SUM( density * {rawname} ) AS {name}".format(rawname=cat['rawname'], name=cat['name']) for cat in categories)
)
with db_connect() as conn:
print 'processing ', proj_name, ':', what
cur = None
limit = settings.BACKEND_CHUNK_SIZE
offset = 0
while not cur or cur.rowcount > 0:
query = """
INSERT INTO {target_table} (gid, {name_list})
SELECT
{pk},
{assignment_list}
FROM (
SELECT
raw.{pk},
census.*,
ST_Area( ST_Intersection( circle_buffer, census.tract ) ) / census.tract_area AS density
FROM (
SELECT
{pk},
ST_Buffer( ST_Transform( ST_Centroid(geom), {equal_area_srid} ), 1609*{buffer_mi} ) as circle_buffer
FROM {raw_industrial_table}
LIMIT {limit} OFFSET {offset}
) as raw
JOIN (
SELECT
{rawname_list},
geom as tract,
ST_Area( geom ) as tract_area
FROM {local_demography_table}
) as census
ON ST_Intersects( raw.circle_buffer, census.tract )
)
GROUP BY {pk};
""".format(
pk=settings.SPATIALITE_PK_NAME,
target_table=target_table,
local_demography_table=project.raw_demography_table,
raw_industrial_table=project.raw_industrial_table,
equal_area_srid=settings.EQUAL_AREA_SRID,
name_list=name_list,
rawname_list=rawname_list,
assignment_list=assignment_list,
buffer_mi=buffer_mi,
limit=limit,
offset=offset,
)
cur = conn.execute(query)
print offset, '... '
offset += limit
conn.commit()
def create_industrial_table(*project_names):
with db_connect() as conn:
with open("sql/generate-industrial.sql") as f:
script_template = " ".join(f.readlines())
for proj_name in project_names:
project = get_project(proj_name)
probs = project.industrial_parcels['probability_categories']
probability_names = [p['name'] for p in probs]
raw_probability_names = [p['rawname'] for p in probs]
name_list = " ".join((name + ', ' for name in probability_names))
rawname_list = " ".join((rawname + ', ' for rawname in raw_probability_names))
name_with_type_list = " ".join((name + ' FLOAT, ' for name in probability_names))
run_spatialite_script(conn, script_template,
table=project.industrial_table,
rawtable=project.raw_industrial_table,
probability_name_list=name_list,
probability_rawname_list=rawname_list,
probability_name_with_type_list=name_with_type_list,
geog_srid=settings.GEOGRAPHIC_SRID,
)
def task_generate_converted(*project_names):
set_name = 'converted'
with db_connect() as conn:
for proj_name in project_names:
project = get_project(proj_name)
query_template = """
SELECT *, AsText(geom) as geom_wkt from {table}
"""
query = query_template.format(
table=project.raw_converted_table,
chunk_size=settings.FEATURE_CHUNK_SIZE,
)
cur = conn.execute(query)
chunk_num = 0
chunk = cur.fetchmany(settings.FEATURE_CHUNK_SIZE)
json_dir = project.app_data_dir('json')
lazy_mkdir(json_dir)
for f in os.listdir(json_dir):
if f.startswith(set_name+'-') and f.endswith('.geojson'):
# print f
os.remove(os.path.join(json_dir, f))
while chunk:
with open(os.path.join(json_dir, set_name+'-{0}.geojson'.format(chunk_num)), 'w') as f:
features = []
for row in chunk:
properties = {}
for key in row.keys():
k = key.lower()
if k not in ('geom', 'geom_wkt'):
properties[k] = row[key]
geometry = wkt.loads(row['geom_wkt'])
geometry = geojson.loads(geojson.dumps(geometry))
features.append({
'type': 'Feature',
'geometry': geometry,
'properties': properties,
})
f.write(json.dumps({
'type': 'FeatureCollection',
'features': features,
}))
chunk = cur.fetchmany(settings.FEATURE_CHUNK_SIZE)
chunk_num += 1
def task_generate_industrial(*project_names):
create_industrial_table(*project_names)
setup_project_directories()
with db_connect() as conn:
for proj_name in project_names:
project = get_project(proj_name)
probability_names = [p['name'] for p in project.industrial_parcels['probability_categories']]
race_names = [r['name'] for r in project.demography['race_categories']]
occupation_names = [o['name'] for o in project.demography['occupation_categories']]
query_template = """
SELECT *, AsText(CastToMultiPolygon(geom)) as geom_wkt from {industrial} i
"""
if settings.USE_DEMOGRAPHY:
query_template += " LEFT JOIN {race} r ON r.gid = i.gid LEFT JOIN {occupation} o ON o.gid = i.gid "
query_template += " ORDER BY size_metric DESC "
query = query_template.format(
# pk=settings.SPATIALITE_PK_NAME,
industrial=project.industrial_table,
race=project.race_table,
occupation=project.occupation_table,
chunk_size=settings.FEATURE_CHUNK_SIZE,
)
cur = conn.execute(query)
chunk_num = 0
chunk = cur.fetchmany(settings.FEATURE_CHUNK_SIZE)
json_dir = project.app_data_dir('json')
lazy_mkdir(json_dir)
for f in os.listdir(json_dir):
if f.startswith('industrial-') and f.endswith('.geojson'):
# print f
os.remove(os.path.join(json_dir, f))
while chunk:
with open(os.path.join(json_dir, 'industrial-{0}.geojson'.format(chunk_num)), 'w') as f:
industrial_features = []
for row in chunk:
properties = {
'gid': row['gid'],
'naics': row['naics'],
'size_metric': row['size_metric'],
'probability': {},
'demography': {
'race': {},
'occupation': {},
},
}
for name in probability_names:
properties['probability'][name] = row[name]
if settings.USE_DEMOGRAPHY:
for name in race_names:
properties['demography']['race'][name] = row[name]
for name in occupation_names:
properties['demography']['occupation'][name] = row[name]
geom = wkt.loads(row['geom_wkt'])
industrial_features.append({
'type': 'Feature',
'geometry': geojson.dumps(geom),
'properties': properties,
})
f.write(json.dumps({
'type': 'FeatureCollection',
'features': industrial_features,
}))
chunk = cur.fetchmany(settings.FEATURE_CHUNK_SIZE)
chunk_num += 1
def task_generate_naics(*project_names):
# Generate industry employment data from 3 files
# countywide and nationwide data are per-NAICS
# statewide data is only the total across ALL NAICS codes
YEAR_START = 1990
NAICS_COLUMN = 'industry_code'
FIPS_COLUMN = 'area_fips'
FIRST_YEAR_COLUMN = 'fyear'
NA_VALUE = 'NA'
for proj_name in project_names:
project = get_project(proj_name)
def parseCSV(name, path):
data = {}
columns = {}
years = []
is_statewide = name == 'statewide'
with open(path, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
head = reader.next()
# get indices of columns
for i, v in enumerate(head):
columns[v] = i
# get consecutive years
for y in xrange(YEAR_START, sys.maxint):
if str(y) in columns:
years.append(y)
else:
break
for row in reader:
def get(column_name):
return row[columns[column_name]]
if is_statewide:
use_row = project.fips_list[0][0:2] == get(FIPS_COLUMN)[0:2]
elif name == 'nationwide':
use_row = True
else:
use_row = get(FIPS_COLUMN) in project.fips_list
if use_row:
code = get(NAICS_COLUMN).strip()
base_year = get(FIRST_YEAR_COLUMN)
first_nonnull_year = None
if is_statewide or len(code) == 4:
values = []
for year in years:
value = get(str(year))
if value != NA_VALUE:
value = float(value)
if first_nonnull_year is None:
first_nonnull_year = int(year)
else:
value = None
values.append(value)
if first_nonnull_year is not None:
assert(first_nonnull_year == int(base_year)) # sanity
base_year_value = float(get(base_year))
emp_growth = []
for year, value in zip(years, values):
ratio = None if value is None else value / base_year_value
emp_growth.append({'year': year, 'value': ratio})
data[code] = {
'base_year': base_year,
'emp_initial': base_year_value,
'emp_growth': emp_growth,
}
return data
data = {}
for name, path in settings.naics_csv.items():
data[name] = parseCSV(name, os.path.join(settings.RAW_DATA_DIR, path))
with open(os.path.join(project.app_data_dir(), 'naics-trends.json'), 'w') as f:
print 'writing NAICS data to', f
f.write(json.dumps({'naics_trends': data}))
def setup_project_directories():
lazy_mkdir(settings.APP_DATA_DIR) # main project dir
for project in projects:
lazy_mkdir(project.app_data_dir())
def task_end2end(*project_names):
'''
Do everything!
'''
task_load_global_shapefiles()
task_load_project_shapefiles(*project_names)
task_localize_brownfields(*project_names)
task_localize_demography(*project_names)
task_process_demography(*project_names)
task_generate_brownfields(*project_names)
task_generate_converted(*project_names)
task_generate_industrial(*project_names)
task_generate_naics(*project_names)
def task_kokoromi(*project_names):
for proj_name in project_names:
project = get_project(proj_name)
with db_connect() as conn:
query_transform = """select AsText( Transform( Centroid( GUnion( Centroid(
( select geom from {table} limit 100 )
)) ), {srid} ) ) from {table}"""
query_plain = """select AsText( Centroid( GUnion( Centroid(
(select geom from {table} limit 100)
)) ) ) from {table}"""
q1 = query_transform.format(
table=project.raw_demography_table,
srid=2163,
)
q2 = query_transform.format(
table=project.raw_industrial_table,
srid=2163,
)
print conn.execute(q1).fetchall()
print conn.execute(q2).fetchone()
def main():
tasks = {
# 'build': task_build,
'load': task_load_project_shapefiles,
'load-global': task_load_global_shapefiles,
'load-all': task_load_all,
'localize-demography': task_localize_demography,
'localize-brownfields': task_localize_brownfields,
'process-demography': task_process_demography,
'generate-brownfields': task_generate_brownfields,
'generate-industrial': task_generate_industrial,
'generate-converted': task_generate_converted,
'generate-naics': task_generate_naics,
'kokoromi': task_kokoromi,
'end2end': task_end2end,
}
parser = argparse.ArgumentParser(
description='Manage the backend.'
)
parser.add_argument(
'task',
type=str,
# nargs=1,
help='see below for list of tasks',
choices=tasks.keys()
)
parser.add_argument(
'param',
type=str,
nargs='*',
)
args = parser.parse_args()
method = tasks.get(args.task)
method(*args.param)
if __name__ == '__main__':
main()
| gpl-3.0 | -2,435,972,187,406,067,000 | 35.685259 | 149 | 0.503077 | false |
jschuecker/nest-simulator | pynest/examples/plot_weight_matrices.py | 17 | 6243 | # -*- coding: utf-8 -*-
#
# plot_weight_matrices.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Plot weight matrices example
----------------------------
This example demonstrates how to extract the connection strength
for all the synapses among two populations of neurons and gather
these values in weight matrices for further analysis and visualization.
All connection types between these populations are considered, i.e.,
four weight matrices are created and plotted.
'''
'''
First, we import all necessary modules to extract, handle and plot
the connectivity matrices
'''
import numpy as np
import pylab
import nest
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
'''
We now specify a function which takes as arguments lists of neuron gids
corresponding to each population
'''
def plot_weight_matrices(E_neurons, I_neurons):
'''
Function to extract and plot weight matrices for all connections
among E_neurons and I_neurons
'''
'''
First, we initialize all the matrices, whose dimensionality is
determined by the number of elements in each population
Since in this example, we have 2 populations (E/I), 2^2 possible
synaptic connections exist (EE, EI, IE, II)
'''
W_EE = np.zeros([len(E_neurons), len(E_neurons)])
W_EI = np.zeros([len(I_neurons), len(E_neurons)])
W_IE = np.zeros([len(E_neurons), len(I_neurons)])
W_II = np.zeros([len(I_neurons), len(I_neurons)])
'''
Using `GetConnections`, we extract the information about all the
connections involving the populations of interest. `GetConnections`
returns a list of arrays (connection objects), one per connection.
Each array has the following elements:
[source-gid target-gid target-thread synapse-model-id port]
'''
a_EE = nest.GetConnections(E_neurons, E_neurons)
'''
Using `GetStatus`, we can extract the value of the connection weight,
for all the connections between these populations
'''
c_EE = nest.GetStatus(a_EE, keys='weight')
'''
Repeat the two previous steps for all other connection types
'''
a_EI = nest.GetConnections(I_neurons, E_neurons)
c_EI = nest.GetStatus(a_EI, keys='weight')
a_IE = nest.GetConnections(E_neurons, I_neurons)
c_IE = nest.GetStatus(a_IE, keys='weight')
a_II = nest.GetConnections(I_neurons, I_neurons)
c_II = nest.GetStatus(a_II, keys='weight')
'''
We now iterate through the list of all connections of each type.
To populate the corresponding weight matrix, we begin by identifying
the source-gid (first element of each connection object, n[0])
and the target-gid (second element of each connection object, n[1]).
For each gid, we subtract the minimum gid within the corresponding
population, to assure the matrix indices range from 0 to the size of
the population.
After determining the matrix indices [i, j], for each connection
object, the corresponding weight is added to the entry W[i,j].
The procedure is then repeated for all the different connection types.
'''
for idx, n in enumerate(a_EE):
W_EE[n[0] - min(E_neurons), n[1] - min(E_neurons)] += c_EE[idx]
for idx, n in enumerate(a_EI):
W_EI[n[0] - min(I_neurons), n[1] - min(E_neurons)] += c_EI[idx]
for idx, n in enumerate(a_IE):
W_IE[n[0] - min(E_neurons), n[1] - min(I_neurons)] += c_IE[idx]
for idx, n in enumerate(a_II):
W_II[n[0] - min(I_neurons), n[1] - min(I_neurons)] += c_II[idx]
'''
We can now specify the figure and axes properties. For this specific
example, we wish to display all the weight matrices in a single
figure, which requires us to use ``GridSpec`` (for example)
to specify the spatial arrangement of the axes.
A subplot is subsequently created for each connection type.
'''
fig = pylab.figure()
fig.suptitle('Weight matrices', fontsize=14)
gs = gridspec.GridSpec(4, 4)
ax1 = pylab.subplot(gs[:-1, :-1])
ax2 = pylab.subplot(gs[:-1, -1])
ax3 = pylab.subplot(gs[-1, :-1])
ax4 = pylab.subplot(gs[-1, -1])
'''
Using ``imshow``, we can visualize the weight matrix in the corresponding
axis. We can also specify the colormap for this image.
'''
plt1 = ax1.imshow(W_EE, cmap='jet')
'''
Using the ``axis_divider`` module from ``mpl_toolkits``, we can
allocate a small extra space on the right of the current axis,
which we reserve for a colorbar.
'''
divider = make_axes_locatable(ax1)
cax = divider.append_axes("right", "5%", pad="3%")
pylab.colorbar(plt1, cax=cax)
'''
We now set the title of each axis and adjust the axis subplot parameters
'''
ax1.set_title('W_{EE}')
pylab.tight_layout()
'''
Finally, the last three steps are repeated for each synapse type
'''
plt2 = ax2.imshow(W_IE)
plt2.set_cmap('jet')
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", "5%", pad="3%")
pylab.colorbar(plt2, cax=cax)
ax2.set_title('W_{EI}')
pylab.tight_layout()
plt3 = ax3.imshow(W_EI)
plt3.set_cmap('jet')
divider = make_axes_locatable(ax3)
cax = divider.append_axes("right", "5%", pad="3%")
pylab.colorbar(plt3, cax=cax)
ax3.set_title('W_{IE}')
pylab.tight_layout()
plt4 = ax4.imshow(W_II)
plt4.set_cmap('jet')
divider = make_axes_locatable(ax4)
cax = divider.append_axes("right", "5%", pad="3%")
pylab.colorbar(plt4, cax=cax)
ax4.set_title('W_{II}')
pylab.tight_layout()
| gpl-2.0 | 4,416,392,864,332,955,000 | 32.564516 | 77 | 0.670351 | false |
yland/coala | coalib/output/printers/HTMLWriter.py | 14 | 3253 | from pyprint.ClosableObject import ClosableObject
class HTMLWriter(ClosableObject):
"""
Printer for outputting HTML Log files.
:param filename: the name of the file to put the data into
(string).
:param indentation_per_tag: spaces used to indent every subsequent HTML
tag.
:raises TypeError: if directory of given file doesn't exist or in
case of access problems.
"""
def __init__(self, filename, indentation_per_tag=2, indentation=0):
ClosableObject.__init__(self)
self.indentation_per_tag = indentation_per_tag
self.indentation = indentation
self.file = None
self.filename = filename
if not isinstance(filename, str):
raise TypeError("filename must be a string")
self.file = open(filename, 'w+')
self.__write_header()
def _close(self):
# Check if the file object is NoneType, trying to close a None object
# does not make sense
if self.file is not None:
self.__write_footer()
self.file.close()
def __write_header(self):
self.write("<!DOCTYPE html>")
self.open_tag("html")
def __write_footer(self):
self.close_tag("html")
def write_comment(self, *comments):
"""
Function for writing HTML comments in the output HTML log files.
:param comments: an arbitrary number of comments to add to the HTML
log file
"""
for comment in comments:
self.write("<!-- " + comment + " -->")
def write_tag(self, tag, content="", **tagargs):
"""
Function for writing an HTML tag, along with the required tag
attributes and content.
:param tag: HTML Tag for formatting the content.
:param content: content to output into the HTML Log file.
:param tagargs: arbitrary HTML tag attributes mapped to their
respective values. Ordering of the tags is
not preserved.
"""
name = tag
for arg in tagargs:
name += " " + arg + "=\"" + tagargs[arg] + "\""
if content == "":
self.write("<"+name+"/>")
return
self.open_tag(name)
self.write(content)
self.close_tag(tag)
def open_tag(self, tag_name):
"""
Function to open HTML tag. e.g. <p>
:param tag_name: the name of HTML Tag to written in the output logfile.
"""
self.write("<"+tag_name+">")
self.indentation += 4
def close_tag(self, tag_name):
"""
Function to close an open HTML tag. e.g. </p>
:param tag_name: the name of HTML Tag to be written to output logfile.
"""
self.indentation -= 4
self.write("</"+tag_name+">")
def write(self, *args):
"""
Function to write in the given output HTML log file.
:param args: arbitrary number of arguments to be written to output
logfile.
"""
for line in args:
self.file.write(" "*self.indentation + line + "\n")
| agpl-3.0 | -6,998,465,960,163,367,000 | 30.582524 | 79 | 0.547187 | false |
lepistone/stock-logistics-workflow | __unported__/stock_split_picking/__openerp__.py | 4 | 1686 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi, Guewen Baconnier
# Copyright 2013 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{'name': 'Stock picking no confirm split',
'version': 'version',
'author': 'Camptocamp',
'maintainer': 'Camptocamp',
'category': 'stock',
'complexity': "normal", # easy, normal, expert
'depends': ['stock'],
'description': """
Split picking without delivery
------------------------------
This addon adds a "Split" button on the out picking form header.
It works like the classical picking split (when you deliver) but does not pass the backorder
and backorder lines to state "done".
""",
'website': 'http://www.camptocamp.com',
'data': ['view/stock_partial_picking.xml'],
'demo': [],
'test': ['test/test_picking_split.yml'],
'installable': False,
'auto_install': False,
'license': 'AGPL-3',
'application': False,
}
| agpl-3.0 | 590,598,808,028,302,800 | 36.466667 | 92 | 0.62159 | false |
Regner/will | will/settings.py | 11 | 7860 | import os
from utils import show_valid, warn, note
from clint.textui import puts, indent
from urlparse import urlparse
def import_settings(quiet=True):
"""This method takes care of importing settings from the environment, and config.py file.
Order of operations:
1. Imports all WILL_ settings from the environment, and strips off the WILL_
2. Imports settings from config.py
3. Sets defaults for any missing, required settings.
This method takes a quiet kwarg, that when False, prints helpful output. Called that way during bootstrapping.
"""
settings = {}
# Import from environment, handle environment-specific parsing.
for k, v in os.environ.items():
if k[:5] == "WILL_":
k = k[5:]
settings[k] = v
if "ROOMS" in settings:
settings["ROOMS"] = settings["ROOMS"].split(";")
# If HIPCHAT_SERVER is set, we need to change the USERNAME slightly
# for XMPP to work.
if "HIPCHAT_SERVER" in settings:
settings["USERNAME"] = "{user}@{host}".\
format(user=settings["USERNAME"].split("@")[0],
host=settings["HIPCHAT_SERVER"])
else:
settings["HIPCHAT_SERVER"] = "api.hipchat.com"
# Import from config
if not quiet:
puts("Importing config.py... ")
with indent(2):
try:
had_warning = False
import config
for k, v in config.__dict__.items():
# Ignore private variables
if "__" not in k:
if k in os.environ and v != os.environ[k] and not quiet:
warn("%s is set in the environment as '%s', but overridden in"
" config.py as '%s'." % (k, os.environ[k], v))
had_warning = True
settings[k] = v
if not had_warning and not quiet:
show_valid("Valid.")
except:
# TODO: Check to see if there's a config.py.dist
if not quiet:
warn("no config.py found. This might be ok, but more likely, "
"you haven't copied config.py.dist over to config.py")
if not quiet:
puts("Verifying settings... ")
with indent(2):
# Set defaults
if "ROOMS" not in settings:
if not quiet:
warn("no ROOMS list found in the environment or config. "
"This is ok - Will will just join all available rooms.")
settings["ROOMS"] = None
if "DEFAULT_ROOM" not in settings and "ROOMS" in settings and settings["ROOMS"] and len(settings["ROOMS"]) > 0:
if not quiet:
warn("no DEFAULT_ROOM found in the environment or config. "
"Defaulting to '%s', the first one." % settings["ROOMS"][0])
settings["DEFAULT_ROOM"] = settings["ROOMS"][0]
if "HTTPSERVER_PORT" not in settings:
# For heroku
if "PORT" in os.environ:
settings["HTTPSERVER_PORT"] = os.environ["PORT"]
else:
if not quiet:
warn("no HTTPSERVER_PORT found in the environment or config. Defaulting to ':80'.")
settings["HTTPSERVER_PORT"] = "80"
if "STORAGE_BACKEND" not in settings:
settings["STORAGE_BACKEND"] = "redis"
if settings["STORAGE_BACKEND"] == "redis":
if "REDIS_URL" not in settings:
# For heroku
if "REDISCLOUD_URL" in os.environ:
settings["REDIS_URL"] = os.environ["REDISCLOUD_URL"]
if not quiet:
note("WILL_REDIS_URL not set, but it appears you're using RedisCloud. If so, all good.")
elif "REDISTOGO_URL" in os.environ:
settings["REDIS_URL"] = os.environ["REDISTOGO_URL"]
if not quiet:
note("WILL_REDIS_URL not set, but it appears you're using RedisToGo. If so, all good.")
elif "OPENREDIS_URL" in os.environ:
settings["REDIS_URL"] = os.environ["OPENREDIS_URL"]
if not quiet:
note("WILL_REDIS_URL not set, but it appears you're using OpenRedis. If so, all good.")
else:
settings["REDIS_URL"] = "redis://localhost:6379/7"
if not quiet:
note("WILL_REDIS_URL not set. Defaulting to redis://localhost:6379/7.")
if not settings["REDIS_URL"].startswith("redis://"):
settings["REDIS_URL"] = "redis://%s" % settings["REDIS_URL"]
if "REDIS_MAX_CONNECTIONS" not in settings:
settings["REDIS_MAX_CONNECTIONS"] = 4
if not quiet:
note("REDIS_MAX_CONNECTIONS not set. Defaulting to 4.")
if settings["STORAGE_BACKEND"] == "file":
if "FILE_DIR" not in settings:
settings["FILE_DIR"] = "~/.will/"
if not quiet:
note("FILE_DIR not set. Defaulting to ~/.will/")
if settings["STORAGE_BACKEND"] == "couchbase":
if "COUCHBASE_URL" not in settings:
settings["COUCHBASE_URL"] = "couchbase:///will"
if not quiet:
note("COUCHBASE_URL not set. Defaulting to couchbase:///will")
if "PUBLIC_URL" not in settings:
default_public = "http://localhost:%s" % settings["HTTPSERVER_PORT"]
settings["PUBLIC_URL"] = default_public
if not quiet:
warn("no PUBLIC_URL found in the environment or config. Defaulting to '%s'." % default_public)
if "V1_TOKEN" not in settings:
if not quiet:
warn(
"no V1_TOKEN found in the environment or config."
"This is generally ok, but if you have more than 30 rooms, "
"you may recieve rate-limit errors without one."
)
if "TEMPLATE_DIRS" not in settings:
if "WILL_TEMPLATE_DIRS_PICKLED" in os.environ:
# All good
pass
else:
settings["TEMPLATE_DIRS"] = []
if "ALLOW_INSECURE_HIPCHAT_SERVER" in settings and\
(settings["ALLOW_INSECURE_HIPCHAT_SERVER"] is True or
settings["ALLOW_INSECURE_HIPCHAT_SERVER"].lower() == "true"):
warn("You are choosing to run will with SSL disabled. "
"This is INSECURE and should NEVER be deployed outside a development environment.")
settings["ALLOW_INSECURE_HIPCHAT_SERVER"] = True
settings["REQUESTS_OPTIONS"] = {
"verify": False,
}
else:
settings["ALLOW_INSECURE_HIPCHAT_SERVER"] = False
settings["REQUESTS_OPTIONS"] = {}
if "ADMINS" not in settings:
settings["ADMINS"] = "*"
else:
if "WILL_ADMINS" in os.environ:
settings["ADMINS"] = [a.strip().lower() for a in settings.get('ADMINS', '').split(';') if a.strip()]
if "PROXY_URL" in settings:
parsed_proxy_url = urlparse(settings["PROXY_URL"])
settings["USE_PROXY"] = True
settings["PROXY_HOSTNAME"] = parsed_proxy_url.hostname
settings["PROXY_USERNAME"] = parsed_proxy_url.username
settings["PROXY_PASSWORD"] = parsed_proxy_url.password
settings["PROXY_PORT"] = parsed_proxy_url.port
else:
settings["USE_PROXY"] = False
# Set them in the module namespace
for k in sorted(settings, key=lambda x: x[0]):
if not quiet:
show_valid(k)
globals()[k] = settings[k]
import_settings()
| mit | 8,993,271,245,932,201,000 | 41.486486 | 119 | 0.536514 | false |
mr-ping/WebTesting | test/test.py | 1 | 3234 | import unittest
from log import Log
class LogTest(unittest.TestCase):
def test_get_last_logs_one(self):
file = '/var/log/siege.log'
content = Log.get_last_logs(file, 121)
content_list = content.split(",")
self.assertEqual(len(content_list), 10)
def test_get_last_logs_three(self):
file = '/var/log/siege.log'
content = Log.get_last_logs(file, 121, 3)
content_list = content.split(",")
self.assertEqual(len(content_list), 28)
def test_add_new_log(self):
import os
#from StringIO import StringIO
#file = StringIO()
file = 'unittest_new_log'
text = 'test content which will be writed to the file'
for i in range(2):
Log.add_new_log(file, text)
try:
with open(file, 'r') as f:
self.assertEqual(f.read(), text*2)
finally:
os.remove(file)
def test_get_last_fails_rate(self):
log = Log('/var/log/siege.log')
rate = log.get_last_fails_rate(3)
self.assertIsInstance(rate, float)
self.assertTrue(rate>=0 and rate<=1)
def test_get_groups(self):
log = Log('sync.log')
groups = log._get_groups()
self.assertIsInstance(groups, dict)
def test_get_steps_fails_rate(self):
log = Log('sync.log')
groups = log.get_steps_fails_rate()
self.assertIsInstance(groups, dict)
def test_get_steps_trans_rate(self):
log = Log('sync.log')
groups = log.get_steps_trans_rate()
self.assertIsInstance(groups, dict)
from chat import Trend
class TrendTest(unittest.TestCase):
def test_get_points(self):
log = Log('sync.log')
fails_rate_dict = log.get_steps_fails_rate()
trend = Trend('test title',
'xlabel name',
'ylabel name',
'r',
2,
'line')
trend.get_points(fails_rate_dict)
self.assertIsNotNone(trend.xcoordinates)
self.assertIsNotNone(trend.ycoordinates)
import main
class UrlSourceTest(unittest.TestCase):
def test_check_url_source_neither(self):
main.url = None
main.url_file = None
res = main.check_url_source()
self.assertIsNone(res)
self.assertFalse(main.plotting)
def test_check_url_source_both(self):
main.url = True
main.url_file = True
res = main.check_url_source()
self.assertIsNone(res)
self.assertFalse(main.plotting)
def test_check_url_source_url(self):
main.url = 'url_command'
main.url_file = None
main.plotting = True
res = main.check_url_source()
self.assertEqual(res, 'address')
self.assertTrue(main.plotting)
def test_check_url_source_file(self):
main.url = None
main.url_file = 'test.py'
main.plotting = True
res = main.check_url_source()
self.assertEqual(res, 'file')
self.assertTrue(main.plotting)
#class OutlierTest(unittest.TestCase):
# def test_remove_outlier(self):
# results = (10, 11, 9, 9, 23)
if __name__ == '__main__':
unittest.main()
| mit | 5,447,924,264,857,259,000 | 28.669725 | 62 | 0.576994 | false |
sunny94/temp | sympy/polys/agca/tests/test_modules.py | 121 | 13526 | """Test modules.py code."""
from sympy.polys.agca.modules import FreeModule, ModuleOrder, FreeModulePolyRing
from sympy.polys import CoercionFailed, QQ, lex, grlex, ilex, ZZ
from sympy.abc import x, y, z
from sympy.utilities.pytest import raises
from sympy import S
def test_FreeModuleElement():
M = QQ.old_poly_ring(x).free_module(3)
e = M.convert([1, x, x**2])
f = [QQ.old_poly_ring(x).convert(1), QQ.old_poly_ring(x).convert(x), QQ.old_poly_ring(x).convert(x**2)]
assert list(e) == f
assert f[0] == e[0]
assert f[1] == e[1]
assert f[2] == e[2]
raises(IndexError, lambda: e[3])
g = M.convert([x, 0, 0])
assert e + g == M.convert([x + 1, x, x**2])
assert f + g == M.convert([x + 1, x, x**2])
assert -e == M.convert([-1, -x, -x**2])
assert e - g == M.convert([1 - x, x, x**2])
assert e != g
assert M.convert([x, x, x]) / QQ.old_poly_ring(x).convert(x) == [1, 1, 1]
R = QQ.old_poly_ring(x, order="ilex")
assert R.free_module(1).convert([x]) / R.convert(x) == [1]
def test_FreeModule():
M1 = FreeModule(QQ.old_poly_ring(x), 2)
assert M1 == FreeModule(QQ.old_poly_ring(x), 2)
assert M1 != FreeModule(QQ.old_poly_ring(y), 2)
assert M1 != FreeModule(QQ.old_poly_ring(x), 3)
M2 = FreeModule(QQ.old_poly_ring(x, order="ilex"), 2)
assert [x, 1] in M1
assert [x] not in M1
assert [2, y] not in M1
assert [1/(x + 1), 2] not in M1
e = M1.convert([x, x**2 + 1])
X = QQ.old_poly_ring(x).convert(x)
assert e == [X, X**2 + 1]
assert e == [x, x**2 + 1]
assert 2*e == [2*x, 2*x**2 + 2]
assert e*2 == [2*x, 2*x**2 + 2]
assert e/2 == [x/2, (x**2 + 1)/2]
assert x*e == [x**2, x**3 + x]
assert e*x == [x**2, x**3 + x]
assert X*e == [x**2, x**3 + x]
assert e*X == [x**2, x**3 + x]
assert [x, 1] in M2
assert [x] not in M2
assert [2, y] not in M2
assert [1/(x + 1), 2] in M2
e = M2.convert([x, x**2 + 1])
X = QQ.old_poly_ring(x, order="ilex").convert(x)
assert e == [X, X**2 + 1]
assert e == [x, x**2 + 1]
assert 2*e == [2*x, 2*x**2 + 2]
assert e*2 == [2*x, 2*x**2 + 2]
assert e/2 == [x/2, (x**2 + 1)/2]
assert x*e == [x**2, x**3 + x]
assert e*x == [x**2, x**3 + x]
assert e/(1 + x) == [x/(1 + x), (x**2 + 1)/(1 + x)]
assert X*e == [x**2, x**3 + x]
assert e*X == [x**2, x**3 + x]
M3 = FreeModule(QQ.old_poly_ring(x, y), 2)
assert M3.convert(e) == M3.convert([x, x**2 + 1])
assert not M3.is_submodule(0)
assert not M3.is_zero()
raises(NotImplementedError, lambda: ZZ.old_poly_ring(x).free_module(2))
raises(NotImplementedError, lambda: FreeModulePolyRing(ZZ, 2))
raises(CoercionFailed, lambda: M1.convert(QQ.old_poly_ring(x).free_module(3)
.convert([1, 2, 3])))
raises(CoercionFailed, lambda: M3.convert(1))
def test_ModuleOrder():
o1 = ModuleOrder(lex, grlex, False)
o2 = ModuleOrder(ilex, lex, False)
assert o1 == ModuleOrder(lex, grlex, False)
assert (o1 != ModuleOrder(lex, grlex, False)) is False
assert o1 != o2
assert o1((1, 2, 3)) == (1, (5, (2, 3)))
assert o2((1, 2, 3)) == (-1, (2, 3))
def test_SubModulePolyRing_global():
R = QQ.old_poly_ring(x, y)
F = R.free_module(3)
Fd = F.submodule([1, 0, 0], [1, 2, 0], [1, 2, 3])
M = F.submodule([x**2 + y**2, 1, 0], [x, y, 1])
assert F == Fd
assert Fd == F
assert F != M
assert M != F
assert Fd != M
assert M != Fd
assert Fd == F.submodule(*F.basis())
assert Fd.is_full_module()
assert not M.is_full_module()
assert not Fd.is_zero()
assert not M.is_zero()
assert Fd.submodule().is_zero()
assert M.contains([x**2 + y**2 + x, 1 + y, 1])
assert not M.contains([x**2 + y**2 + x, 1 + y, 2])
assert M.contains([y**2, 1 - x*y, -x])
assert not F.submodule([1 + x, 0, 0]) == F.submodule([1, 0, 0])
assert F.submodule([1, 0, 0], [0, 1, 0]).union(F.submodule([0, 0, 1])) == F
assert not M.is_submodule(0)
m = F.convert([x**2 + y**2, 1, 0])
n = M.convert(m)
assert m.module is F
assert n.module is M
raises(ValueError, lambda: M.submodule([1, 0, 0]))
raises(TypeError, lambda: M.union(1))
raises(ValueError, lambda: M.union(R.free_module(1).submodule([x])))
assert F.submodule([x, x, x]) != F.submodule([x, x, x], order="ilex")
def test_SubModulePolyRing_local():
R = QQ.old_poly_ring(x, y, order=ilex)
F = R.free_module(3)
Fd = F.submodule([1 + x, 0, 0], [1 + y, 2 + 2*y, 0], [1, 2, 3])
M = F.submodule([x**2 + y**2, 1, 0], [x, y, 1])
assert F == Fd
assert Fd == F
assert F != M
assert M != F
assert Fd != M
assert M != Fd
assert Fd == F.submodule(*F.basis())
assert Fd.is_full_module()
assert not M.is_full_module()
assert not Fd.is_zero()
assert not M.is_zero()
assert Fd.submodule().is_zero()
assert M.contains([x**2 + y**2 + x, 1 + y, 1])
assert not M.contains([x**2 + y**2 + x, 1 + y, 2])
assert M.contains([y**2, 1 - x*y, -x])
assert F.submodule([1 + x, 0, 0]) == F.submodule([1, 0, 0])
assert F.submodule(
[1, 0, 0], [0, 1, 0]).union(F.submodule([0, 0, 1 + x*y])) == F
raises(ValueError, lambda: M.submodule([1, 0, 0]))
def test_SubModulePolyRing_nontriv_global():
R = QQ.old_poly_ring(x, y, z)
F = R.free_module(1)
def contains(I, f):
return F.submodule(*[[g] for g in I]).contains([f])
assert contains([x, y], x)
assert contains([x, y], x + y)
assert not contains([x, y], 1)
assert not contains([x, y], z)
assert contains([x**2 + y, x**2 + x], x - y)
assert not contains([x + y + z, x*y + x*z + y*z, x*y*z], x**2)
assert contains([x + y + z, x*y + x*z + y*z, x*y*z], x**3)
assert contains([x + y + z, x*y + x*z + y*z, x*y*z], x**4)
assert not contains([x + y + z, x*y + x*z + y*z, x*y*z], x*y**2)
assert contains([x + y + z, x*y + x*z + y*z, x*y*z], x**4 + y**3 + 2*z*y*x)
assert contains([x + y + z, x*y + x*z + y*z, x*y*z], x*y*z)
assert contains([x, 1 + x + y, 5 - 7*y], 1)
assert contains(
[x**3 + y**3, y**3 + z**3, z**3 + x**3, x**2*y + x**2*z + y**2*z],
x**3)
assert not contains(
[x**3 + y**3, y**3 + z**3, z**3 + x**3, x**2*y + x**2*z + y**2*z],
x**2 + y**2)
# compare local order
assert not contains([x*(1 + x + y), y*(1 + z)], x)
assert not contains([x*(1 + x + y), y*(1 + z)], x + y)
def test_SubModulePolyRing_nontriv_local():
R = QQ.old_poly_ring(x, y, z, order=ilex)
F = R.free_module(1)
def contains(I, f):
return F.submodule(*[[g] for g in I]).contains([f])
assert contains([x, y], x)
assert contains([x, y], x + y)
assert not contains([x, y], 1)
assert not contains([x, y], z)
assert contains([x**2 + y, x**2 + x], x - y)
assert not contains([x + y + z, x*y + x*z + y*z, x*y*z], x**2)
assert contains([x*(1 + x + y), y*(1 + z)], x)
assert contains([x*(1 + x + y), y*(1 + z)], x + y)
def test_syzygy():
R = QQ.old_poly_ring(x, y, z)
M = R.free_module(1).submodule([x*y], [y*z], [x*z])
S = R.free_module(3).submodule([0, x, -y], [z, -x, 0])
assert M.syzygy_module() == S
M2 = M / ([x*y*z],)
S2 = R.free_module(3).submodule([z, 0, 0], [0, x, 0], [0, 0, y])
assert M2.syzygy_module() == S2
F = R.free_module(3)
assert F.submodule(*F.basis()).syzygy_module() == F.submodule()
R2 = QQ.old_poly_ring(x, y, z) / [x*y*z]
M3 = R2.free_module(1).submodule([x*y], [y*z], [x*z])
S3 = R2.free_module(3).submodule([z, 0, 0], [0, x, 0], [0, 0, y])
assert M3.syzygy_module() == S3
def test_in_terms_of_generators():
R = QQ.old_poly_ring(x, order="ilex")
M = R.free_module(2).submodule([2*x, 0], [1, 2])
assert M.in_terms_of_generators(
[x, x]) == [R.convert(S(1)/4), R.convert(x/2)]
raises(ValueError, lambda: M.in_terms_of_generators([1, 0]))
M = R.free_module(2) / ([x, 0], [1, 1])
SM = M.submodule([1, x])
assert SM.in_terms_of_generators([2, 0]) == [R.convert(-2/(x - 1))]
R = QQ.old_poly_ring(x, y) / [x**2 - y**2]
M = R.free_module(2)
SM = M.submodule([x, 0], [0, y])
assert SM.in_terms_of_generators(
[x**2, x**2]) == [R.convert(x), R.convert(y)]
def test_QuotientModuleElement():
R = QQ.old_poly_ring(x)
F = R.free_module(3)
N = F.submodule([1, x, x**2])
M = F/N
e = M.convert([x**2, 2, 0])
assert M.convert([x + 1, x**2 + x, x**3 + x**2]) == 0
assert e == [x**2, 2, 0] + N == F.convert([x**2, 2, 0]) + N == \
M.convert(F.convert([x**2, 2, 0]))
assert M.convert([x**2 + 1, 2*x + 2, x**2]) == e + [0, x, 0] == \
e + M.convert([0, x, 0]) == e + F.convert([0, x, 0])
assert M.convert([x**2 + 1, 2, x**2]) == e - [0, x, 0] == \
e - M.convert([0, x, 0]) == e - F.convert([0, x, 0])
assert M.convert([0, 2, 0]) == M.convert([x**2, 4, 0]) - e == \
[x**2, 4, 0] - e == F.convert([x**2, 4, 0]) - e
assert M.convert([x**3 + x**2, 2*x + 2, 0]) == (1 + x)*e == \
R.convert(1 + x)*e == e*(1 + x) == e*R.convert(1 + x)
assert -e == [-x**2, -2, 0]
f = [x, x, 0] + N
assert M.convert([1, 1, 0]) == f / x == f / R.convert(x)
M2 = F/[(2, 2*x, 2*x**2), (0, 0, 1)]
G = R.free_module(2)
M3 = G/[[1, x]]
M4 = F.submodule([1, x, x**2], [1, 0, 0]) / N
raises(CoercionFailed, lambda: M.convert(G.convert([1, x])))
raises(CoercionFailed, lambda: M.convert(M3.convert([1, x])))
raises(CoercionFailed, lambda: M.convert(M2.convert([1, x, x])))
assert M2.convert(M.convert([2, x, x**2])) == [2, x, 0]
assert M.convert(M4.convert([2, 0, 0])) == [2, 0, 0]
def test_QuotientModule():
R = QQ.old_poly_ring(x)
F = R.free_module(3)
N = F.submodule([1, x, x**2])
M = F/N
assert M != F
assert M != N
assert M == F / [(1, x, x**2)]
assert not M.is_zero()
assert (F / F.basis()).is_zero()
SQ = F.submodule([1, x, x**2], [2, 0, 0]) / N
assert SQ == M.submodule([2, x, x**2])
assert SQ != M.submodule([2, 1, 0])
assert SQ != M
assert M.is_submodule(SQ)
assert not SQ.is_full_module()
raises(ValueError, lambda: N/F)
raises(ValueError, lambda: F.submodule([2, 0, 0]) / N)
raises(ValueError, lambda: R.free_module(2)/F)
raises(CoercionFailed, lambda: F.convert(M.convert([1, x, x**2])))
M1 = F / [[1, 1, 1]]
M2 = M1.submodule([1, 0, 0], [0, 1, 0])
assert M1 == M2
def test_ModulesQuotientRing():
R = QQ.old_poly_ring(x, y, order=(("lex", x), ("ilex", y))) / [x**2 + 1]
M1 = R.free_module(2)
assert M1 == R.free_module(2)
assert M1 != QQ.old_poly_ring(x).free_module(2)
assert M1 != R.free_module(3)
assert [x, 1] in M1
assert [x] not in M1
assert [1/(R.convert(x) + 1), 2] in M1
assert [1, 2/(1 + y)] in M1
assert [1, 2/y] not in M1
assert M1.convert([x**2, y]) == [-1, y]
F = R.free_module(3)
Fd = F.submodule([x**2, 0, 0], [1, 2, 0], [1, 2, 3])
M = F.submodule([x**2 + y**2, 1, 0], [x, y, 1])
assert F == Fd
assert Fd == F
assert F != M
assert M != F
assert Fd != M
assert M != Fd
assert Fd == F.submodule(*F.basis())
assert Fd.is_full_module()
assert not M.is_full_module()
assert not Fd.is_zero()
assert not M.is_zero()
assert Fd.submodule().is_zero()
assert M.contains([x**2 + y**2 + x, -x**2 + y, 1])
assert not M.contains([x**2 + y**2 + x, 1 + y, 2])
assert M.contains([y**2, 1 - x*y, -x])
assert F.submodule([x, 0, 0]) == F.submodule([1, 0, 0])
assert not F.submodule([y, 0, 0]) == F.submodule([1, 0, 0])
assert F.submodule([1, 0, 0], [0, 1, 0]).union(F.submodule([0, 0, 1])) == F
assert not M.is_submodule(0)
def test_module_mul():
R = QQ.old_poly_ring(x)
M = R.free_module(2)
S1 = M.submodule([x, 0], [0, x])
S2 = M.submodule([x**2, 0], [0, x**2])
I = R.ideal(x)
assert I*M == M*I == S1 == x*M == M*x
assert I*S1 == S2 == x*S1
def test_intersection():
# SCA, example 2.8.5
F = QQ.old_poly_ring(x, y).free_module(2)
M1 = F.submodule([x, y], [y, 1])
M2 = F.submodule([0, y - 1], [x, 1], [y, x])
I = F.submodule([x, y], [y**2 - y, y - 1], [x*y + y, x + 1])
I1, rel1, rel2 = M1.intersect(M2, relations=True)
assert I1 == M2.intersect(M1) == I
for i, g in enumerate(I1.gens):
assert g == sum(c*x for c, x in zip(rel1[i], M1.gens)) \
== sum(d*y for d, y in zip(rel2[i], M2.gens))
assert F.submodule([x, y]).intersect(F.submodule([y, x])).is_zero()
def test_quotient():
# SCA, example 2.8.6
R = QQ.old_poly_ring(x, y, z)
F = R.free_module(2)
assert F.submodule([x*y, x*z], [y*z, x*y]).module_quotient(
F.submodule([y, z], [z, y])) == QQ.old_poly_ring(x, y, z).ideal(x**2*y**2 - x*y*z**2)
assert F.submodule([x, y]).module_quotient(F.submodule()).is_whole_ring()
M = F.submodule([x**2, x**2], [y**2, y**2])
N = F.submodule([x + y, x + y])
q, rel = M.module_quotient(N, relations=True)
assert q == R.ideal(y**2, x - y)
for i, g in enumerate(q.gens):
assert g*N.gens[0] == sum(c*x for c, x in zip(rel[i], M.gens))
def test_groebner_extendend():
M = QQ.old_poly_ring(x, y, z).free_module(3).submodule([x + 1, y, 1], [x*y, z, z**2])
G, R = M._groebner_vec(extended=True)
for i, g in enumerate(G):
assert g == sum(c*gen for c, gen in zip(R[i], M.gens))
| bsd-3-clause | -2,570,000,883,159,519,000 | 32.151961 | 107 | 0.51767 | false |
darcyfdu/findlicense | etc/scripts/json2csv.py | 4 | 9466 | #!/usr/bin/python2
#
# Copyright (c) 2017 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import codecs
from collections import OrderedDict
import json
import os
import click
import unicodecsv
"""
Convert a ScanCode JSON scan file to a nexb-toolkit-like CSV.
Ensure you are in the scancode virtualenv and call: etc/scripts/json2csv -h
"""
def load_scan(json_input):
"""
Return a list of scan results loaded from a json_input, either in ScanCode
standard JSON format or the data.json html-app format.
"""
with codecs.open(json_input, 'rb', encoding='utf-8') as jsonf:
scan = jsonf.read()
# strip the leading data padding if any (used in the html-app JSON)
html_app_lead = 'data='
is_html_app_json = scan.startswith(html_app_lead)
if is_html_app_json:
scan = scan[len(html_app_lead):]
scan_results = json.loads(scan, object_pairs_hook=OrderedDict)
if not is_html_app_json:
scan_results = scan_results['files']
return scan_results
def json_scan_to_csv(json_input, csv_output):
"""
Convert a scancode JSON output file to a nexb-toolkit-like CSV.
csv_output is an open file descriptor.
"""
scan_results = load_scan(json_input)
headers = OrderedDict([
('info', []),
('license', []),
('copyright', []),
('email', []),
('url', []),
('package', []),
])
rows = list(flatten_scan(scan_results, headers))
ordered_headers = []
for key_group in headers.values():
ordered_headers.extend(key_group)
w = unicodecsv.DictWriter(csv_output, ordered_headers)
w.writeheader()
for r in rows:
w.writerow(r)
def flatten_scan(scan, headers):
"""
Yield ordered dictionaries of key/values flattening the data and
keying always by path, given a ScanCode scan results list.
Update the headers mapping list with seen keys as a side effect.
"""
seen = set()
def collect_keys(mapping, key_group):
"""Update the headers with new keys."""
keys = mapping.keys()
headers[key_group].extend(k for k in keys if k not in seen)
seen.update(keys)
for scanned_file in scan:
path = scanned_file.pop('path')
# alway use a root slash
path = path if path.startswith('/') else '/' + path
# alway use a trailing slash for directories
if scanned_file.get('type', '') == 'directory':
if not path.endswith('/'):
path = path + '/'
# alway create a root directory
path = '/code' + path
errors = scanned_file.pop('scan_errors', [])
file_info = OrderedDict()
file_info['Resource'] = path
# info are NOT lists: lists are the actual scans
file_info.update(((k, v) for k, v in scanned_file.items() if not isinstance(v, list)))
# Scan errors are joined in a single multi-line value
file_info['scan_errors'] = '\n'.join(errors)
collect_keys(file_info, 'info')
yield file_info
for licensing in scanned_file.get('licenses', []):
lic = OrderedDict()
lic['Resource'] = path
for k, val in licensing.items():
# do not include matched rule details for now.
if k == 'matched_rule':
continue
if k == 'score':
# normalize the string representation of this number
val = '{:.2f}'.format(val)
# lines are present in multiple scans: keep their column name as not scan-specific
# Prefix othe columns with license__
if k not in ('start_line', 'end_line',):
k = 'license__' + k
lic[k] = val
collect_keys(lic, 'license')
yield lic
key_to_header_mapping = [
('statements', 'copyright'),
('holders', 'copyright_holder'),
('authors', 'author')
]
for copy_info in scanned_file.get('copyrights', []):
start_line = copy_info['start_line']
end_line = copy_info['end_line']
# rename some keys to a different column header
for key, header in key_to_header_mapping:
for cop in copy_info.get(key, []):
inf = OrderedDict()
inf['Resource'] = path
inf[header] = cop
inf['start_line'] = start_line
inf['end_line'] = end_line
collect_keys(inf, 'copyright')
yield inf
for email in scanned_file.get('emails', []):
email_info = OrderedDict()
email_info['Resource'] = path
email_info.update(email)
collect_keys(email_info, 'email')
yield email_info
for url in scanned_file.get('urls', []):
url_info = OrderedDict()
url_info['Resource'] = path
url_info.update(url)
collect_keys(url_info, 'url')
yield url_info
# exclude some columns from the packages for now
excluded_package_columns = {
'packaging',
'payload_type',
'keywords_doc_url',
'download_sha1',
'download_sha256',
'download_md5',
'code_view_url',
'vcs_tool',
'vcs_revision',
'license_expression'
}
for package in scanned_file.get('packages', []):
pack = OrderedDict()
pack['Resource'] = path
for k, val in package.items():
# prefix columns with "package__"
nk = 'package__' + k
# keep all non-excluded plain string values
if k not in excluded_package_columns and not isinstance(val, (list, dict, OrderedDict)):
# prefix versions with a v to avoid spreadsheet tools to mistake
# a version for a number or date.
if k == 'version' and val:
val = 'v ' + val
pack[nk] = val
# FIXME: we only keep for now some of the value lists
elif k in ('authors', 'download_urls', 'copyrights', 'asserted_licenses'):
pack[nk] = ''
if val and len(val):
if k == 'authors':
# FIXME: we only keep the first author name for now
pack[nk] = val[0]['name']
if k == 'download_urls':
# FIXME: we only keep the first URL for now
pack[nk] = val[0]
if k == 'copyrights':
# All copyright statements are joined in a single multiline value
pack[nk] = '\n'.join(val)
if k == 'asserted_licenses':
# All licenses are joined in a single multi-line value
licenses = [license_info.get('license') for license_info in val]
licenses = [lic for lic in licenses if lic]
pack[nk] = '\n'.join(licenses)
collect_keys(pack, 'package')
yield pack
@click.command()
@click.argument('json_input', type=click.Path(exists=True, readable=True))
@click.argument('csv_output', type=click.File('wb', lazy=False))
@click.help_option('-h', '--help')
def cli(json_input, csv_output):
"""
Convert a ScanCode JSON scan file to a nexb-toolkit-like CSV.
JSON_INPUT is either a ScanCode json format scan or the data.json file from a ScanCode html-app format scan.
Paths will be prefixed with '/code/' to provide a common base directory for scanned resources.
"""
json_input = os.path.abspath(os.path.expanduser(json_input))
json_scan_to_csv(json_input, csv_output)
if __name__ == '__main__':
cli()
| apache-2.0 | -562,356,641,273,342,300 | 35.976563 | 112 | 0.573315 | false |
muraliselva10/cloudkitty-dashboard | cloudkittydashboard/dashboards/admin/pyscripts/tables.py | 1 | 2361 | # Copyright 2015 Objectif Libre
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from cloudkittydashboard.api import cloudkitty as api
def get_detail_link(datum):
if datum.script_id:
url = "horizon:admin:pyscripts:script_details"
return reverse(url, kwargs={'script_id': datum.script_id})
class CreatePyScript(tables.LinkAction):
name = "createpyscript"
verbose_name = _("Create Script")
url = "horizon:admin:pyscripts:script_create"
icon = "create"
ajax = True
classes = ("ajax-modal",)
class UpdateScript(tables.LinkAction):
name = "updatepyscript"
verbose_name = _("Edit Script")
classes = ("ajax-modal",)
icon = "pencil"
def get_link_url(self, datum=None):
url = "horizon:admin:pyscripts:script_update"
return reverse(url, kwargs={'script_id': datum.script_id})
class DeletePyScript(tables.DeleteAction):
name = "deletepyscript"
verbose_name = _("Delete Script")
action_present = _("Delete")
action_past = _("Deleted")
data_type_singular = _("PyScript")
data_type_plural = _("PyScripts")
icon = "remove"
def action(self, request, script_id):
api.cloudkittyclient(request).pyscripts.scripts.delete(
script_id=script_id)
class PyScriptsTable(tables.DataTable):
id = tables.Column("id", verbose_name=_("id"), link=get_detail_link)
name = tables.Column("name", verbose_name=_("Name"))
checksum = tables.Column("checksum", verbose_name=_("Checksum"))
class Meta(object):
name = "pyscripts"
verbose_name = _("pyscripts")
table_actions = (CreatePyScript, DeletePyScript)
row_actions = (UpdateScript, DeletePyScript)
| apache-2.0 | -3,579,277,093,303,672,300 | 32.253521 | 78 | 0.682338 | false |
maxamillion/ansible-modules-extras | cloud/cloudstack/cs_cluster.py | 44 | 12964 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, René Moser <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cs_cluster
short_description: Manages host clusters on Apache CloudStack based clouds.
description:
- Create, update and remove clusters.
version_added: "2.1"
author: "René Moser (@resmo)"
options:
name:
description:
- name of the cluster.
required: true
zone:
description:
- Name of the zone in which the cluster belongs to.
- If not set, default zone is used.
required: false
default: null
pod:
description:
- Name of the pod in which the cluster belongs to.
required: false
default: null
cluster_type:
description:
- Type of the cluster.
- Required if C(state=present)
required: false
default: null
choices: [ 'CloudManaged', 'ExternalManaged' ]
hypervisor:
description:
- Name the hypervisor to be used.
- Required if C(state=present).
required: false
default: none
choices: [ 'KVM', 'VMware', 'BareMetal', 'XenServer', 'LXC', 'HyperV', 'UCS', 'OVM' ]
url:
description:
- URL for the cluster
required: false
default: null
username:
description:
- Username for the cluster.
required: false
default: null
password:
description:
- Password for the cluster.
required: false
default: null
guest_vswitch_name:
description:
- Name of virtual switch used for guest traffic in the cluster.
- This would override zone wide traffic label setting.
required: false
default: null
guest_vswitch_type:
description:
- Type of virtual switch used for guest traffic in the cluster.
- Allowed values are, vmwaresvs (for VMware standard vSwitch) and vmwaredvs (for VMware distributed vSwitch)
required: false
default: null
choices: [ 'vmwaresvs', 'vmwaredvs' ]
public_vswitch_name:
description:
- Name of virtual switch used for public traffic in the cluster.
- This would override zone wide traffic label setting.
required: false
default: null
public_vswitch_type:
description:
- Type of virtual switch used for public traffic in the cluster.
- Allowed values are, vmwaresvs (for VMware standard vSwitch) and vmwaredvs (for VMware distributed vSwitch)
required: false
default: null
choices: [ 'vmwaresvs', 'vmwaredvs' ]
vms_ip_address:
description:
- IP address of the VSM associated with this cluster.
required: false
default: null
vms_username:
description:
- Username for the VSM associated with this cluster.
required: false
default: null
vms_password:
description:
- Password for the VSM associated with this cluster.
required: false
default: null
ovm3_cluster:
description:
- Ovm3 native OCFS2 clustering enabled for cluster.
required: false
default: null
ovm3_pool:
description:
- Ovm3 native pooling enabled for cluster.
required: false
default: null
ovm3_vip:
description:
- Ovm3 vip to use for pool (and cluster).
required: false
default: null
state:
description:
- State of the cluster.
required: false
default: 'present'
choices: [ 'present', 'absent', 'disabled', 'enabled' ]
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Ensure a cluster is present
- local_action:
module: cs_cluster
name: kvm-cluster-01
zone: ch-zrh-ix-01
hypervisor: KVM
cluster_type: CloudManaged
# Ensure a cluster is disabled
- local_action:
module: cs_cluster
name: kvm-cluster-01
zone: ch-zrh-ix-01
state: disabled
# Ensure a cluster is enabled
- local_action:
module: cs_cluster
name: kvm-cluster-01
zone: ch-zrh-ix-01
state: enabled
# Ensure a cluster is absent
- local_action:
module: cs_cluster
name: kvm-cluster-01
zone: ch-zrh-ix-01
state: absent
'''
RETURN = '''
---
id:
description: UUID of the cluster.
returned: success
type: string
sample: 04589590-ac63-4ffc-93f5-b698b8ac38b6
name:
description: Name of the cluster.
returned: success
type: string
sample: cluster01
allocation_state:
description: State of the cluster.
returned: success
type: string
sample: Enabled
cluster_type:
description: Type of the cluster.
returned: success
type: string
sample: ExternalManaged
cpu_overcommit_ratio:
description: The CPU overcommit ratio of the cluster.
returned: success
type: string
sample: 1.0
memory_overcommit_ratio:
description: The memory overcommit ratio of the cluster.
returned: success
type: string
sample: 1.0
managed_state:
description: Whether this cluster is managed by CloudStack.
returned: success
type: string
sample: Managed
ovm3_vip:
description: Ovm3 VIP to use for pooling and/or clustering
returned: success
type: string
sample: 10.10.10.101
hypervisor:
description: Hypervisor of the cluster
returned: success
type: string
sample: VMware
zone:
description: Name of zone the cluster is in.
returned: success
type: string
sample: ch-gva-2
pod:
description: Name of pod the cluster is in.
returned: success
type: string
sample: pod01
'''
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackCluster(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackCluster, self).__init__(module)
self.returns = {
'allocationstate': 'allocation_state',
'hypervisortype': 'hypervisor',
'clustertype': 'cluster_type',
'podname': 'pod',
'managedstate': 'managed_state',
'memoryovercommitratio': 'memory_overcommit_ratio',
'cpuovercommitratio': 'cpu_overcommit_ratio',
'ovm3vip': 'ovm3_vip',
}
self.cluster = None
def _get_common_cluster_args(self):
args = {
'clustername': self.module.params.get('name'),
'hypervisor': self.module.params.get('hypervisor'),
'clustertype': self.module.params.get('cluster_type'),
}
state = self.module.params.get('state')
if state in ['enabled', 'disabled']:
args['allocationstate'] = state.capitalize()
return args
def get_pod(self, key=None):
args = {
'name': self.module.params.get('pod'),
'zoneid': self.get_zone(key='id'),
}
pods = self.cs.listPods(**args)
if pods:
return self._get_by_key(key, pods['pod'][0])
self.module.fail_json(msg="Pod %s not found in zone %s." % (self.module.params.get('pod'), self.get_zone(key='name')))
def get_cluster(self):
if not self.cluster:
args = {}
uuid = self.module.params.get('id')
if uuid:
args['id'] = uuid
clusters = self.cs.listClusters(**args)
if clusters:
self.cluster = clusters['cluster'][0]
return self.cluster
args['name'] = self.module.params.get('name')
clusters = self.cs.listClusters(**args)
if clusters:
self.cluster = clusters['cluster'][0]
# fix differnt return from API then request argument given
self.cluster['hypervisor'] = self.cluster['hypervisortype']
self.cluster['clustername'] = self.cluster['name']
return self.cluster
def present_cluster(self):
cluster = self.get_cluster()
if cluster:
cluster = self._update_cluster()
else:
cluster = self._create_cluster()
return cluster
def _create_cluster(self):
required_params = [
'cluster_type',
'hypervisor',
]
self.module.fail_on_missing_params(required_params=required_params)
args = self._get_common_cluster_args()
args['zoneid'] = self.get_zone(key='id')
args['podid'] = self.get_pod(key='id')
args['url'] = self.module.params.get('url')
args['username'] = self.module.params.get('username')
args['password'] = self.module.params.get('password')
args['guestvswitchname'] = self.module.params.get('guest_vswitch_name')
args['guestvswitchtype'] = self.module.params.get('guest_vswitch_type')
args['publicvswitchtype'] = self.module.params.get('public_vswitch_name')
args['publicvswitchtype'] = self.module.params.get('public_vswitch_type')
args['vsmipaddress'] = self.module.params.get('vms_ip_address')
args['vsmusername'] = self.module.params.get('vms_username')
args['vmspassword'] = self.module.params.get('vms_password')
args['ovm3cluster'] = self.module.params.get('ovm3_cluster')
args['ovm3pool'] = self.module.params.get('ovm3_pool')
args['ovm3vip'] = self.module.params.get('ovm3_vip')
self.result['changed'] = True
cluster = None
if not self.module.check_mode:
res = self.cs.addCluster(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
# API returns a list as result CLOUDSTACK-9205
if isinstance(res['cluster'], list):
cluster = res['cluster'][0]
else:
cluster = res['cluster']
return cluster
def _update_cluster(self):
cluster = self.get_cluster()
args = self._get_common_cluster_args()
args['id'] = cluster['id']
if self.has_changed(args, cluster):
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.updateCluster(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
cluster = res['cluster']
return cluster
def absent_cluster(self):
cluster = self.get_cluster()
if cluster:
self.result['changed'] = True
args = {
'id': cluster['id'],
}
if not self.module.check_mode:
res = self.cs.deleteCluster(**args)
if 'errortext' in res:
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
return cluster
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
zone=dict(default=None),
pod=dict(default=None),
cluster_type=dict(choices=['CloudManaged', 'ExternalManaged'], default=None),
hypervisor=dict(choices=CS_HYPERVISORS, default=None),
state=dict(choices=['present', 'enabled', 'disabled', 'absent'], default='present'),
url=dict(default=None),
username=dict(default=None),
password=dict(default=None, no_log=True),
guest_vswitch_name=dict(default=None),
guest_vswitch_type=dict(choices=['vmwaresvs', 'vmwaredvs'], default=None),
public_vswitch_name=dict(default=None),
public_vswitch_type=dict(choices=['vmwaresvs', 'vmwaredvs'], default=None),
vms_ip_address=dict(default=None),
vms_username=dict(default=None),
vms_password=dict(default=None, no_log=True),
ovm3_cluster=dict(default=None),
ovm3_pool=dict(default=None),
ovm3_vip=dict(default=None),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
try:
acs_cluster = AnsibleCloudStackCluster(module)
state = module.params.get('state')
if state in ['absent']:
cluster = acs_cluster.absent_cluster()
else:
cluster = acs_cluster.present_cluster()
result = acs_cluster.get_result(cluster)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 | 3,232,549,471,124,773,000 | 30.083933 | 126 | 0.623129 | false |
tamihiro/grpc | src/python/grpcio/tests/unit/framework/face/testing/__init__.py | 1496 | 1530 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| bsd-3-clause | -1,241,403,873,276,167,400 | 50 | 72 | 0.781699 | false |
Unow/edx-platform | common/djangoapps/course_groups/views.py | 8 | 7573 | from django_future.csrf import ensure_csrf_cookie
from django.views.decorators.http import require_POST
from django.contrib.auth.models import User
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.core.urlresolvers import reverse
from django.http import HttpResponse
import json
import logging
import re
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from courseware.courses import get_course_with_access
from edxmako.shortcuts import render_to_response
from . import cohorts
log = logging.getLogger(__name__)
def json_http_response(data):
"""
Return an HttpResponse with the data json-serialized and the right content
type header.
"""
return HttpResponse(json.dumps(data), content_type="application/json")
def split_by_comma_and_whitespace(s):
"""
Split a string both by commas and whitespice. Returns a list.
"""
return re.split(r'[\s,]+', s)
@ensure_csrf_cookie
def list_cohorts(request, course_key):
"""
Return json dump of dict:
{'success': True,
'cohorts': [{'name': name, 'id': id}, ...]}
"""
# this is a string when we get it here
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_key)
get_course_with_access(request.user, 'staff', course_key)
all_cohorts = [{'name': c.name, 'id': c.id}
for c in cohorts.get_course_cohorts(course_key)]
return json_http_response({'success': True,
'cohorts': all_cohorts})
@ensure_csrf_cookie
@require_POST
def add_cohort(request, course_key):
"""
Return json of dict:
{'success': True,
'cohort': {'id': id,
'name': name}}
or
{'success': False,
'msg': error_msg} if there's an error
"""
# this is a string when we get it here
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_key)
get_course_with_access(request.user, 'staff', course_key)
name = request.POST.get("name")
if not name:
return json_http_response({'success': False,
'msg': "No name specified"})
try:
cohort = cohorts.add_cohort(course_key, name)
except ValueError as err:
return json_http_response({'success': False,
'msg': str(err)})
return json_http_response({'success': 'True',
'cohort': {
'id': cohort.id,
'name': cohort.name
}})
@ensure_csrf_cookie
def users_in_cohort(request, course_key, cohort_id):
"""
Return users in the cohort. Show up to 100 per page, and page
using the 'page' GET attribute in the call. Format:
Returns:
Json dump of dictionary in the following format:
{'success': True,
'page': page,
'num_pages': paginator.num_pages,
'users': [{'username': ..., 'email': ..., 'name': ...}]
}
"""
# this is a string when we get it here
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_key)
get_course_with_access(request.user, 'staff', course_key)
# this will error if called with a non-int cohort_id. That's ok--it
# shoudn't happen for valid clients.
cohort = cohorts.get_cohort_by_id(course_key, int(cohort_id))
paginator = Paginator(cohort.users.all(), 100)
page = request.GET.get('page')
try:
users = paginator.page(page)
except PageNotAnInteger:
# return the first page
page = 1
users = paginator.page(page)
except EmptyPage:
# Page is out of range. Return last page
page = paginator.num_pages
contacts = paginator.page(page)
user_info = [{'username': u.username,
'email': u.email,
'name': '{0} {1}'.format(u.first_name, u.last_name)}
for u in users]
return json_http_response({'success': True,
'page': page,
'num_pages': paginator.num_pages,
'users': user_info})
@ensure_csrf_cookie
@require_POST
def add_users_to_cohort(request, course_key, cohort_id):
"""
Return json dict of:
{'success': True,
'added': [{'username': ...,
'name': ...,
'email': ...}, ...],
'changed': [{'username': ...,
'name': ...,
'email': ...,
'previous_cohort': ...}, ...],
'present': [str1, str2, ...], # already there
'unknown': [str1, str2, ...]}
"""
# this is a string when we get it here
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_key)
get_course_with_access(request.user, 'staff', course_key)
cohort = cohorts.get_cohort_by_id(course_key, cohort_id)
users = request.POST.get('users', '')
added = []
changed = []
present = []
unknown = []
for username_or_email in split_by_comma_and_whitespace(users):
if not username_or_email:
continue
try:
(user, previous_cohort) = cohorts.add_user_to_cohort(cohort, username_or_email)
info = {
'username': user.username,
'name': user.profile.name,
'email': user.email,
}
if previous_cohort:
info['previous_cohort'] = previous_cohort
changed.append(info)
else:
added.append(info)
except ValueError:
present.append(username_or_email)
except User.DoesNotExist:
unknown.append(username_or_email)
return json_http_response({'success': True,
'added': added,
'changed': changed,
'present': present,
'unknown': unknown})
@ensure_csrf_cookie
@require_POST
def remove_user_from_cohort(request, course_key, cohort_id):
"""
Expects 'username': username in POST data.
Return json dict of:
{'success': True} or
{'success': False,
'msg': error_msg}
"""
# this is a string when we get it here
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_key)
get_course_with_access(request.user, 'staff', course_key)
username = request.POST.get('username')
if username is None:
return json_http_response({'success': False,
'msg': 'No username specified'})
cohort = cohorts.get_cohort_by_id(course_key, cohort_id)
try:
user = User.objects.get(username=username)
cohort.users.remove(user)
return json_http_response({'success': True})
except User.DoesNotExist:
log.debug('no user')
return json_http_response({'success': False,
'msg': "No user '{0}'".format(username)})
def debug_cohort_mgmt(request, course_key):
"""
Debugging view for dev.
"""
# this is a string when we get it here
course_key = SlashSeparatedCourseKey.from_deprecated_string(course_key)
# add staff check to make sure it's safe if it's accidentally deployed.
get_course_with_access(request.user, 'staff', course_key)
context = {'cohorts_ajax_url': reverse(
'cohorts',
kwargs={'course_key': course_key.to_deprecated_string()}
)}
return render_to_response('/course_groups/debug.html', context)
| agpl-3.0 | -5,039,435,894,278,112,000 | 30.293388 | 91 | 0.576522 | false |
daimajia/duktape | src/dukutil.py | 14 | 3679 | #!/usr/bin/python
#
# Python utilities shared by the build scripts.
#
import datetime
import json
class BitEncoder:
"Bitstream encoder."
_bits = None
def __init__(self):
self._bits = []
def bits(self, x, nbits):
if (x >> nbits) != 0:
raise Exception('input value has too many bits (value: %d, bits: %d)' % (x, nbits))
for i in xrange(nbits):
t = (x >> (nbits - i - 1)) & 0x01
self._bits.append(t)
def string(self, x):
nbits = len(x) * 8
for i in xrange(nbits):
byteidx = i / 8
bitidx = i % 8
if byteidx < 0 or byteidx >= len(x):
self._bits.append(0)
else:
t = (ord(x[byteidx]) >> (7 - bitidx)) & 0x01
self._bits.append(t)
def getNumBits(self):
"Get current number of encoded bits."
return len(self._bits)
def getNumBytes(self):
"Get current number of encoded bytes, rounded up."
nbits = len(self._bits)
while (nbits % 8) != 0:
nbits += 1
return nbits / 8
def getBytes(self):
"Get current bitstream as a byte sequence, padded with zero bits."
bytes = []
for i in xrange(self.getNumBytes()):
t = 0
for j in xrange(8):
off = i*8 + j
if off >= len(self._bits):
t = (t << 1)
else:
t = (t << 1) + self._bits[off]
bytes.append(t)
return bytes
def getByteString(self):
"Get current bitstream as a string."
return ''.join([chr(i) for i in self.getBytes()])
class GenerateC:
"Helper for generating C source and header files."
_data = None
wrap_col = 76
def __init__(self):
self._data = []
def emitRaw(self, text):
"Emit raw text (without automatic newline)."
self._data.append(text)
def emitLine(self, text):
"Emit a raw line (with automatic newline)."
self._data.append(text + '\n')
def emitHeader(self, autogen_by):
"Emit file header comments."
# Note: a timestamp would be nice but it breaks incremental building
self.emitLine('/*')
self.emitLine(' * Automatically generated by %s, do not edit!' % autogen_by)
self.emitLine(' */')
self.emitLine('')
def emitArray(self, data, tablename, visibility=None, typename='char', size=None, intvalues=False, const=True):
"Emit an array as a C array."
# lenient input
if isinstance(data, unicode):
data = data.encode('utf-8')
if isinstance(data, str):
tmp = []
for i in xrange(len(data)):
tmp.append(ord(data[i]))
data = tmp
size_spec = ''
if size is not None:
size_spec = '%d' % size
visib_qual = ''
if visibility is not None:
visib_qual = visibility + ' '
const_qual = ''
if const:
const_qual = 'const '
self.emitLine('%s%s%s %s[%s] = {' % (visib_qual, const_qual, typename, tablename, size_spec))
line = ''
for i in xrange(len(data)):
if intvalues:
t = "%d," % data[i]
else:
t = "(%s)'\\x%02x', " % (typename, data[i])
if len(line) + len(t) >= self.wrap_col:
self.emitLine(line)
line = t
else:
line += t
if line != '':
self.emitLine(line)
self.emitLine('};')
def emitDefine(self, name, value, comment=None):
"Emit a C define with an optional comment."
# XXX: there is no escaping right now (for comment or value)
if comment is not None:
self.emitLine('#define %-60s %-30s /* %s */' % (name, value, comment))
else:
self.emitLine('#define %-60s %s' % (name, value))
def getString(self):
"Get the entire file as a string."
return ''.join(self._data)
def json_encode(x):
"JSON encode a value."
try:
return json.dumps(x)
except AttributeError:
pass
# for older library versions
return json.write(x)
def json_decode(x):
"JSON decode a value."
try:
return json.loads(x)
except AttributeError:
pass
# for older library versions
return json.read(x)
| mit | -2,131,732,980,080,553,200 | 21.570552 | 112 | 0.621908 | false |
PennyQ/astro-vispy | glue_vispy_viewers/common/tools.py | 3 | 3653 | import os
from qtpy import QtGui, compat
from glue.viewers.common.tool import Tool, CheckableTool
from glue.config import viewer_tool
from vispy import app, io
RECORD_START_ICON = os.path.join(os.path.dirname(__file__), 'glue_record_start.png')
RECORD_STOP_ICON = os.path.join(os.path.dirname(__file__), 'glue_record_stop.png')
ROTATE_ICON = os.path.join(os.path.dirname(__file__), 'glue_rotate.png')
@viewer_tool
class ResetTool(Tool):
icon = 'glue_home'
tool_id = 'vispy:reset'
action_text = 'Reset the view'
tool_tip = 'Reset the view'
def activate(self):
self.viewer._vispy_widget.view.camera.reset()
self.viewer._vispy_widget._toggle_perspective()
self.viewer.state.reset_limits()
@viewer_tool
class SaveTool(Tool):
icon = 'glue_filesave'
tool_id = 'vispy:save'
action_text = 'Save the figure to a file'
tool_tip = 'Save the figure to a file'
def activate(self):
outfile, file_filter = compat.getsavefilename(caption='Save File',
filters='PNG Files (*.png);;'
'JPEG Files (*.jpeg);;'
'TIFF Files (*.tiff);;',
selectedfilter='PNG Files (*.png);;')
# This indicates that the user cancelled
if not outfile:
return
img = self.viewer._vispy_widget.canvas.render()
try:
file_filter = str(file_filter).split()[0]
io.imsave(outfile, img, format=file_filter)
except ImportError:
# TODO: give out a window to notify that only .png file format is supported
if '.' not in outfile:
outfile += '.png'
io.write_png(outfile, img)
@viewer_tool
class RecordTool(CheckableTool):
icon = RECORD_START_ICON
tool_id = 'vispy:record'
action_text = 'Record an animation'
tool_tip = 'Start/Stop the recording'
def __init__(self, viewer):
super(RecordTool, self).__init__(viewer=viewer)
self.record_timer = app.Timer(connect=self.record)
self.writer = None
self.next_action = 'start'
def activate(self):
# pop up a window for file saving
outfile, file_filter = compat.getsavefilename(caption='Save Animation',
filters='GIF Files (*.gif);;')
# if outfile is not set, the user cancelled
if outfile:
import imageio
self.set_icon(RECORD_STOP_ICON)
self.writer = imageio.get_writer(outfile)
self.record_timer.start(0.1)
def deactivate(self):
self.record_timer.stop()
if self.writer is not None:
self.writer.close()
self.set_icon(RECORD_START_ICON)
def set_icon(self, icon):
self.viewer.toolbar.actions[self.tool_id].setIcon(QtGui.QIcon(icon))
def record(self, event):
im = self.viewer._vispy_widget.canvas.render()
self.writer.append_data(im)
@viewer_tool
class RotateTool(CheckableTool):
icon = ROTATE_ICON
tool_id = 'vispy:rotate'
action_text = 'Continuously rotate view'
tool_tip = 'Start/Stop rotation'
timer = None
def activate(self):
if self.timer is None:
self.timer = app.Timer(connect=self.rotate)
self.timer.start(0.1)
def deactivate(self):
self.timer.stop()
def rotate(self, event):
self.viewer._vispy_widget.view.camera.azimuth -= 1. # set speed as constant first
| bsd-2-clause | -4,131,193,739,916,652,000 | 29.190083 | 91 | 0.578155 | false |
MobinRanjbar/hue | desktop/core/ext-py/boto-2.38.0/boto/opsworks/layer1.py | 132 | 129869 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.compat import json
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.opsworks import exceptions
class OpsWorksConnection(AWSQueryConnection):
"""
AWS OpsWorks
Welcome to the AWS OpsWorks API Reference . This guide provides
descriptions, syntax, and usage examples about AWS OpsWorks
actions and data types, including common parameters and error
codes.
AWS OpsWorks is an application management service that provides an
integrated experience for overseeing the complete application
lifecycle. For information about this product, go to the `AWS
OpsWorks`_ details page.
**SDKs and CLI**
The most common way to use the AWS OpsWorks API is by using the
AWS Command Line Interface (CLI) or by using one of the AWS SDKs
to implement applications in your preferred language. For more
information, see:
+ `AWS CLI`_
+ `AWS SDK for Java`_
+ `AWS SDK for .NET`_
+ `AWS SDK for PHP 2`_
+ `AWS SDK for Ruby`_
+ `AWS SDK for Node.js`_
+ `AWS SDK for Python(Boto)`_
**Endpoints**
AWS OpsWorks supports only one endpoint, opsworks.us-
east-1.amazonaws.com (HTTPS), so you must connect to that
endpoint. You can then use the API to direct AWS OpsWorks to
create stacks in any AWS Region.
**Chef Versions**
When you call CreateStack, CloneStack, or UpdateStack we recommend
you use the `ConfigurationManager` parameter to specify the Chef
version, 0.9, 11.4, or 11.10. The default value is currently
11.10. For more information, see `Chef Versions`_.
You can still specify Chef 0.9 for your stack, but new features
are not available for Chef 0.9 stacks, and support is scheduled to
end on July 24, 2014. We do not recommend using Chef 0.9 for new
stacks, and we recommend migrating your existing Chef 0.9 stacks
to Chef 11.10 as soon as possible.
"""
APIVersion = "2013-02-18"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "opsworks.us-east-1.amazonaws.com"
ServiceName = "OpsWorks"
TargetPrefix = "OpsWorks_20130218"
ResponseError = JSONResponseError
_faults = {
"ResourceNotFoundException": exceptions.ResourceNotFoundException,
"ValidationException": exceptions.ValidationException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs or kwargs['host'] is None:
kwargs['host'] = region.endpoint
super(OpsWorksConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def assign_instance(self, instance_id, layer_ids):
"""
Assign a registered instance to a custom layer. You cannot use
this action with instances that were created with AWS
OpsWorks.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
:type layer_ids: list
:param layer_ids: The layer ID, which must correspond to a custom
layer. You cannot assign a registered instance to a built-in layer.
"""
params = {
'InstanceId': instance_id,
'LayerIds': layer_ids,
}
return self.make_request(action='AssignInstance',
body=json.dumps(params))
def assign_volume(self, volume_id, instance_id=None):
"""
Assigns one of the stack's registered Amazon EBS volumes to a
specified instance. The volume must first be registered with
the stack by calling RegisterVolume. For more information, see
`Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'VolumeId': volume_id, }
if instance_id is not None:
params['InstanceId'] = instance_id
return self.make_request(action='AssignVolume',
body=json.dumps(params))
def associate_elastic_ip(self, elastic_ip, instance_id=None):
"""
Associates one of the stack's registered Elastic IP addresses
with a specified instance. The address must first be
registered with the stack by calling RegisterElasticIp. For
more information, see `Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'ElasticIp': elastic_ip, }
if instance_id is not None:
params['InstanceId'] = instance_id
return self.make_request(action='AssociateElasticIp',
body=json.dumps(params))
def attach_elastic_load_balancer(self, elastic_load_balancer_name,
layer_id):
"""
Attaches an Elastic Load Balancing load balancer to a
specified layer. For more information, see `Elastic Load
Balancing`_.
You must create the Elastic Load Balancing instance
separately, by using the Elastic Load Balancing console, API,
or CLI. For more information, see ` Elastic Load Balancing
Developer Guide`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_load_balancer_name: string
:param elastic_load_balancer_name: The Elastic Load Balancing
instance's name.
:type layer_id: string
:param layer_id: The ID of the layer that the Elastic Load Balancing
instance is to be attached to.
"""
params = {
'ElasticLoadBalancerName': elastic_load_balancer_name,
'LayerId': layer_id,
}
return self.make_request(action='AttachElasticLoadBalancer',
body=json.dumps(params))
def clone_stack(self, source_stack_id, service_role_arn, name=None,
region=None, vpc_id=None, attributes=None,
default_instance_profile_arn=None, default_os=None,
hostname_theme=None, default_availability_zone=None,
default_subnet_id=None, custom_json=None,
configuration_manager=None, chef_configuration=None,
use_custom_cookbooks=None,
use_opsworks_security_groups=None,
custom_cookbooks_source=None, default_ssh_key_name=None,
clone_permissions=None, clone_app_ids=None,
default_root_device_type=None):
"""
Creates a clone of a specified stack. For more information,
see `Clone a Stack`_.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type source_stack_id: string
:param source_stack_id: The source stack ID.
:type name: string
:param name: The cloned stack name.
:type region: string
:param region: The cloned stack AWS region, such as "us-east-1". For
more information about AWS regions, see `Regions and Endpoints`_.
:type vpc_id: string
:param vpc_id: The ID of the VPC that the cloned stack is to be
launched into. It must be in the specified region. All instances
are launched into this VPC, and you cannot change the ID later.
+ If your account supports EC2 Classic, the default value is no VPC.
+ If your account does not support EC2 Classic, the default value is
the default VPC for the specified region.
If the VPC ID corresponds to a default VPC and you have specified
either the `DefaultAvailabilityZone` or the `DefaultSubnetId`
parameter only, AWS OpsWorks infers the value of the other
parameter. If you specify neither parameter, AWS OpsWorks sets
these parameters to the first valid Availability Zone for the
specified region and the corresponding default VPC subnet ID,
respectively.
If you specify a nondefault VPC ID, note the following:
+ It must belong to a VPC in your account that is in the specified
region.
+ You must specify a value for `DefaultSubnetId`.
For more information on how to use AWS OpsWorks with a VPC, see
`Running a Stack in a VPC`_. For more information on default VPC
and EC2 Classic, see `Supported Platforms`_.
:type attributes: map
:param attributes: A list of stack attributes and values as key/value
pairs to be added to the cloned stack.
:type service_role_arn: string
:param service_role_arn:
The stack AWS Identity and Access Management (IAM) role, which allows
AWS OpsWorks to work with AWS resources on your behalf. You must
set this parameter to the Amazon Resource Name (ARN) for an
existing IAM role. If you create a stack by using the AWS OpsWorks
console, it creates the role for you. You can obtain an existing
stack's IAM ARN programmatically by calling DescribePermissions.
For more information about IAM ARNs, see `Using Identifiers`_.
You must set this parameter to a valid service role ARN or the action
will fail; there is no default value. You can specify the source
stack's service role ARN, if you prefer, but you must do so
explicitly.
:type default_instance_profile_arn: string
:param default_instance_profile_arn: The ARN of an IAM profile that is
the default profile for all of the stack's EC2 instances. For more
information about IAM ARNs, see `Using Identifiers`_.
:type default_os: string
:param default_os: The stacks's operating system, which must be set to
one of the following.
+ Standard operating systems: an Amazon Linux version such as `Amazon
Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ Custom AMIs: `Custom`. You specify the custom AMI you want to use
when you create instances.
The default option is the current Amazon Linux version.
:type hostname_theme: string
:param hostname_theme: The stack's host name theme, with spaces are
replaced by underscores. The theme is used to generate host names
for the stack's instances. By default, `HostnameTheme` is set to
`Layer_Dependent`, which creates host names by appending integers
to the layer's short name. The other themes are:
+ `Baked_Goods`
+ `Clouds`
+ `European_Cities`
+ `Fruits`
+ `Greek_Deities`
+ `Legendary_Creatures_from_Japan`
+ `Planets_and_Moons`
+ `Roman_Deities`
+ `Scottish_Islands`
+ `US_Cities`
+ `Wild_Cats`
To obtain a generated host name, call `GetHostNameSuggestion`, which
returns a host name based on the current theme.
:type default_availability_zone: string
:param default_availability_zone: The cloned stack's default
Availability Zone, which must be in the specified region. For more
information, see `Regions and Endpoints`_. If you also specify a
value for `DefaultSubnetId`, the subnet must be in the same zone.
For more information, see the `VpcId` parameter description.
:type default_subnet_id: string
:param default_subnet_id: The stack's default VPC subnet ID. This
parameter is required if you specify a value for the `VpcId`
parameter. All instances are launched into this subnet unless you
specify otherwise when you create the instance. If you also specify
a value for `DefaultAvailabilityZone`, the subnet must be in that
zone. For information on default values and when this parameter is
required, see the `VpcId` parameter description.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
It is used to override the corresponding default stack
configuration JSON values. The string should be in the following
format and must escape characters such as '"'.:
`"{\"key1\": \"value1\", \"key2\": \"value2\",...}"`
For more information on custom JSON, see `Use Custom JSON to Modify the
Stack Configuration JSON`_
:type configuration_manager: dict
:param configuration_manager: The configuration manager. When you clone
a stack we recommend that you use the configuration manager to
specify the Chef version, 0.9, 11.4, or 11.10. The default value is
currently 11.4.
:type chef_configuration: dict
:param chef_configuration: A `ChefConfiguration` object that specifies
whether to enable Berkshelf and the Berkshelf version on Chef 11.10
stacks. For more information, see `Create a New Stack`_.
:type use_custom_cookbooks: boolean
:param use_custom_cookbooks: Whether to use custom cookbooks.
:type use_opsworks_security_groups: boolean
:param use_opsworks_security_groups: Whether to associate the AWS
OpsWorks built-in security groups with the stack's layers.
AWS OpsWorks provides a standard set of built-in security groups, one
for each layer, which are associated with layers by default. With
`UseOpsworksSecurityGroups` you can instead provide your own custom
security groups. `UseOpsworksSecurityGroups` has the following
settings:
+ True - AWS OpsWorks automatically associates the appropriate built-in
security group with each layer (default setting). You can associate
additional security groups with a layer after you create it but you
cannot delete the built-in security group.
+ False - AWS OpsWorks does not associate built-in security groups with
layers. You must create appropriate EC2 security groups and
associate a security group with each layer that you create.
However, you can still manually associate a built-in security group
with a layer on creation; custom security groups are required only
for those layers that need custom settings.
For more information, see `Create a New Stack`_.
:type custom_cookbooks_source: dict
:param custom_cookbooks_source: Contains the information required to
retrieve an app or cookbook from a repository. For more
information, see `Creating Apps`_ or `Custom Recipes and
Cookbooks`_.
:type default_ssh_key_name: string
:param default_ssh_key_name: A default SSH key for the stack instances.
You can override this value when you create or update an instance.
:type clone_permissions: boolean
:param clone_permissions: Whether to clone the source stack's
permissions.
:type clone_app_ids: list
:param clone_app_ids: A list of source stack app IDs to be included in
the cloned stack.
:type default_root_device_type: string
:param default_root_device_type: The default root device type. This
value is used by default for all instances in the cloned stack, but
you can override it when you create an instance. For more
information, see `Storage for the Root Device`_.
"""
params = {
'SourceStackId': source_stack_id,
'ServiceRoleArn': service_role_arn,
}
if name is not None:
params['Name'] = name
if region is not None:
params['Region'] = region
if vpc_id is not None:
params['VpcId'] = vpc_id
if attributes is not None:
params['Attributes'] = attributes
if default_instance_profile_arn is not None:
params['DefaultInstanceProfileArn'] = default_instance_profile_arn
if default_os is not None:
params['DefaultOs'] = default_os
if hostname_theme is not None:
params['HostnameTheme'] = hostname_theme
if default_availability_zone is not None:
params['DefaultAvailabilityZone'] = default_availability_zone
if default_subnet_id is not None:
params['DefaultSubnetId'] = default_subnet_id
if custom_json is not None:
params['CustomJson'] = custom_json
if configuration_manager is not None:
params['ConfigurationManager'] = configuration_manager
if chef_configuration is not None:
params['ChefConfiguration'] = chef_configuration
if use_custom_cookbooks is not None:
params['UseCustomCookbooks'] = use_custom_cookbooks
if use_opsworks_security_groups is not None:
params['UseOpsworksSecurityGroups'] = use_opsworks_security_groups
if custom_cookbooks_source is not None:
params['CustomCookbooksSource'] = custom_cookbooks_source
if default_ssh_key_name is not None:
params['DefaultSshKeyName'] = default_ssh_key_name
if clone_permissions is not None:
params['ClonePermissions'] = clone_permissions
if clone_app_ids is not None:
params['CloneAppIds'] = clone_app_ids
if default_root_device_type is not None:
params['DefaultRootDeviceType'] = default_root_device_type
return self.make_request(action='CloneStack',
body=json.dumps(params))
def create_app(self, stack_id, name, type, shortname=None,
description=None, data_sources=None, app_source=None,
domains=None, enable_ssl=None, ssl_configuration=None,
attributes=None, environment=None):
"""
Creates an app for a specified stack. For more information,
see `Creating Apps`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type shortname: string
:param shortname: The app's short name.
:type name: string
:param name: The app name.
:type description: string
:param description: A description of the app.
:type data_sources: list
:param data_sources: The app's data source.
:type type: string
:param type: The app type. Each supported type is associated with a
particular layer. For example, PHP applications are associated with
a PHP layer. AWS OpsWorks deploys an application to those instances
that are members of the corresponding layer.
:type app_source: dict
:param app_source: A `Source` object that specifies the app repository.
:type domains: list
:param domains: The app virtual host settings, with multiple domains
separated by commas. For example: `'www.example.com, example.com'`
:type enable_ssl: boolean
:param enable_ssl: Whether to enable SSL for the app.
:type ssl_configuration: dict
:param ssl_configuration: An `SslConfiguration` object with the SSL
configuration.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
:type environment: list
:param environment:
An array of `EnvironmentVariable` objects that specify environment
variables to be associated with the app. You can specify up to ten
environment variables. After you deploy the app, these variables
are defined on the associated app server instance.
This parameter is supported only by Chef 11.10 stacks. If you have
specified one or more environment variables, you cannot modify the
stack's Chef version.
"""
params = {'StackId': stack_id, 'Name': name, 'Type': type, }
if shortname is not None:
params['Shortname'] = shortname
if description is not None:
params['Description'] = description
if data_sources is not None:
params['DataSources'] = data_sources
if app_source is not None:
params['AppSource'] = app_source
if domains is not None:
params['Domains'] = domains
if enable_ssl is not None:
params['EnableSsl'] = enable_ssl
if ssl_configuration is not None:
params['SslConfiguration'] = ssl_configuration
if attributes is not None:
params['Attributes'] = attributes
if environment is not None:
params['Environment'] = environment
return self.make_request(action='CreateApp',
body=json.dumps(params))
def create_deployment(self, stack_id, command, app_id=None,
instance_ids=None, comment=None, custom_json=None):
"""
Runs deployment or stack commands. For more information, see
`Deploying Apps`_ and `Run Stack Commands`_.
**Required Permissions**: To use this action, an IAM user must
have a Deploy or Manage permissions level for the stack, or an
attached policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type app_id: string
:param app_id: The app ID. This parameter is required for app
deployments, but not for other deployment commands.
:type instance_ids: list
:param instance_ids: The instance IDs for the deployment targets.
:type command: dict
:param command: A `DeploymentCommand` object that specifies the
deployment command and any associated arguments.
:type comment: string
:param comment: A user-defined comment.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
It is used to override the corresponding default stack
configuration JSON values. The string should be in the following
format and must escape characters such as '"'.:
`"{\"key1\": \"value1\", \"key2\": \"value2\",...}"`
For more information on custom JSON, see `Use Custom JSON to Modify the
Stack Configuration JSON`_.
"""
params = {'StackId': stack_id, 'Command': command, }
if app_id is not None:
params['AppId'] = app_id
if instance_ids is not None:
params['InstanceIds'] = instance_ids
if comment is not None:
params['Comment'] = comment
if custom_json is not None:
params['CustomJson'] = custom_json
return self.make_request(action='CreateDeployment',
body=json.dumps(params))
def create_instance(self, stack_id, layer_ids, instance_type,
auto_scaling_type=None, hostname=None, os=None,
ami_id=None, ssh_key_name=None,
availability_zone=None, virtualization_type=None,
subnet_id=None, architecture=None,
root_device_type=None, install_updates_on_boot=None,
ebs_optimized=None):
"""
Creates an instance in a specified stack. For more
information, see `Adding an Instance to a Layer`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type layer_ids: list
:param layer_ids: An array that contains the instance layer IDs.
:type instance_type: string
:param instance_type: The instance type. AWS OpsWorks supports all
instance types except Cluster Compute, Cluster GPU, and High Memory
Cluster. For more information, see `Instance Families and Types`_.
The parameter values that you use to specify the various types are
in the API Name column of the Available Instance Types table.
:type auto_scaling_type: string
:param auto_scaling_type: For load-based or time-based instances, the
type.
:type hostname: string
:param hostname: The instance host name.
:type os: string
:param os: The instance's operating system, which must be set to one of
the following.
+ Standard operating systems: an Amazon Linux version such as `Amazon
Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ Custom AMIs: `Custom`
The default option is the current Amazon Linux version. If you set this
parameter to `Custom`, you must use the CreateInstance action's
AmiId parameter to specify the custom AMI that you want to use. For
more information on the standard operating systems, see `Operating
Systems`_For more information on how to use custom AMIs with
OpsWorks, see `Using Custom AMIs`_.
:type ami_id: string
:param ami_id:
A custom AMI ID to be used to create the instance. The AMI should be
based on one of the standard AWS OpsWorks AMIs: Amazon Linux,
Ubuntu 12.04 LTS, or Ubuntu 14.04 LTS. For more information, see
`Instances`_.
If you specify a custom AMI, you must set `Os` to `Custom`.
:type ssh_key_name: string
:param ssh_key_name: The instance SSH key name.
:type availability_zone: string
:param availability_zone: The instance Availability Zone. For more
information, see `Regions and Endpoints`_.
:type virtualization_type: string
:param virtualization_type: The instance's virtualization type,
`paravirtual` or `hvm`.
:type subnet_id: string
:param subnet_id: The ID of the instance's subnet. If the stack is
running in a VPC, you can use this parameter to override the
stack's default subnet ID value and direct AWS OpsWorks to launch
the instance in a different subnet.
:type architecture: string
:param architecture: The instance architecture. The default option is
`x86_64`. Instance types do not necessarily support both
architectures. For a list of the architectures that are supported
by the different instance types, see `Instance Families and
Types`_.
:type root_device_type: string
:param root_device_type: The instance root device type. For more
information, see `Storage for the Root Device`_.
:type install_updates_on_boot: boolean
:param install_updates_on_boot:
Whether to install operating system and package updates when the
instance boots. The default value is `True`. To control when
updates are installed, set this value to `False`. You must then
update your instances manually by using CreateDeployment to run the
`update_dependencies` stack command or manually running `yum`
(Amazon Linux) or `apt-get` (Ubuntu) on the instances.
We strongly recommend using the default value of `True` to ensure that
your instances have the latest security updates.
:type ebs_optimized: boolean
:param ebs_optimized: Whether to create an Amazon EBS-optimized
instance.
"""
params = {
'StackId': stack_id,
'LayerIds': layer_ids,
'InstanceType': instance_type,
}
if auto_scaling_type is not None:
params['AutoScalingType'] = auto_scaling_type
if hostname is not None:
params['Hostname'] = hostname
if os is not None:
params['Os'] = os
if ami_id is not None:
params['AmiId'] = ami_id
if ssh_key_name is not None:
params['SshKeyName'] = ssh_key_name
if availability_zone is not None:
params['AvailabilityZone'] = availability_zone
if virtualization_type is not None:
params['VirtualizationType'] = virtualization_type
if subnet_id is not None:
params['SubnetId'] = subnet_id
if architecture is not None:
params['Architecture'] = architecture
if root_device_type is not None:
params['RootDeviceType'] = root_device_type
if install_updates_on_boot is not None:
params['InstallUpdatesOnBoot'] = install_updates_on_boot
if ebs_optimized is not None:
params['EbsOptimized'] = ebs_optimized
return self.make_request(action='CreateInstance',
body=json.dumps(params))
def create_layer(self, stack_id, type, name, shortname, attributes=None,
custom_instance_profile_arn=None,
custom_security_group_ids=None, packages=None,
volume_configurations=None, enable_auto_healing=None,
auto_assign_elastic_ips=None,
auto_assign_public_ips=None, custom_recipes=None,
install_updates_on_boot=None,
use_ebs_optimized_instances=None,
lifecycle_event_configuration=None):
"""
Creates a layer. For more information, see `How to Create a
Layer`_.
You should use **CreateLayer** for noncustom layer types such
as PHP App Server only if the stack does not have an existing
layer of that type. A stack can have at most one instance of
each noncustom layer; if you attempt to create a second
instance, **CreateLayer** fails. A stack can have an arbitrary
number of custom layers, so you can call **CreateLayer** as
many times as you like for that layer type.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The layer stack ID.
:type type: string
:param type: The layer type. A stack cannot have more than one built-in
layer of the same type. It can have any number of custom layers.
:type name: string
:param name: The layer name, which is used by the console.
:type shortname: string
:param shortname: The layer short name, which is used internally by AWS
OpsWorks and by Chef recipes. The short name is also used as the
name for the directory where your app files are installed. It can
have a maximum of 200 characters, which are limited to the
alphanumeric characters, '-', '_', and '.'.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
:type custom_instance_profile_arn: string
:param custom_instance_profile_arn: The ARN of an IAM profile that to
be used for the layer's EC2 instances. For more information about
IAM ARNs, see `Using Identifiers`_.
:type custom_security_group_ids: list
:param custom_security_group_ids: An array containing the layer custom
security group IDs.
:type packages: list
:param packages: An array of `Package` objects that describe the layer
packages.
:type volume_configurations: list
:param volume_configurations: A `VolumeConfigurations` object that
describes the layer's Amazon EBS volumes.
:type enable_auto_healing: boolean
:param enable_auto_healing: Whether to disable auto healing for the
layer.
:type auto_assign_elastic_ips: boolean
:param auto_assign_elastic_ips: Whether to automatically assign an
`Elastic IP address`_ to the layer's instances. For more
information, see `How to Edit a Layer`_.
:type auto_assign_public_ips: boolean
:param auto_assign_public_ips: For stacks that are running in a VPC,
whether to automatically assign a public IP address to the layer's
instances. For more information, see `How to Edit a Layer`_.
:type custom_recipes: dict
:param custom_recipes: A `LayerCustomRecipes` object that specifies the
layer custom recipes.
:type install_updates_on_boot: boolean
:param install_updates_on_boot:
Whether to install operating system and package updates when the
instance boots. The default value is `True`. To control when
updates are installed, set this value to `False`. You must then
update your instances manually by using CreateDeployment to run the
`update_dependencies` stack command or manually running `yum`
(Amazon Linux) or `apt-get` (Ubuntu) on the instances.
We strongly recommend using the default value of `True`, to ensure that
your instances have the latest security updates.
:type use_ebs_optimized_instances: boolean
:param use_ebs_optimized_instances: Whether to use Amazon EBS-optimized
instances.
:type lifecycle_event_configuration: dict
:param lifecycle_event_configuration: A LifeCycleEventConfiguration
object that you can use to configure the Shutdown event to specify
an execution timeout and enable or disable Elastic Load Balancer
connection draining.
"""
params = {
'StackId': stack_id,
'Type': type,
'Name': name,
'Shortname': shortname,
}
if attributes is not None:
params['Attributes'] = attributes
if custom_instance_profile_arn is not None:
params['CustomInstanceProfileArn'] = custom_instance_profile_arn
if custom_security_group_ids is not None:
params['CustomSecurityGroupIds'] = custom_security_group_ids
if packages is not None:
params['Packages'] = packages
if volume_configurations is not None:
params['VolumeConfigurations'] = volume_configurations
if enable_auto_healing is not None:
params['EnableAutoHealing'] = enable_auto_healing
if auto_assign_elastic_ips is not None:
params['AutoAssignElasticIps'] = auto_assign_elastic_ips
if auto_assign_public_ips is not None:
params['AutoAssignPublicIps'] = auto_assign_public_ips
if custom_recipes is not None:
params['CustomRecipes'] = custom_recipes
if install_updates_on_boot is not None:
params['InstallUpdatesOnBoot'] = install_updates_on_boot
if use_ebs_optimized_instances is not None:
params['UseEbsOptimizedInstances'] = use_ebs_optimized_instances
if lifecycle_event_configuration is not None:
params['LifecycleEventConfiguration'] = lifecycle_event_configuration
return self.make_request(action='CreateLayer',
body=json.dumps(params))
def create_stack(self, name, region, service_role_arn,
default_instance_profile_arn, vpc_id=None,
attributes=None, default_os=None, hostname_theme=None,
default_availability_zone=None, default_subnet_id=None,
custom_json=None, configuration_manager=None,
chef_configuration=None, use_custom_cookbooks=None,
use_opsworks_security_groups=None,
custom_cookbooks_source=None, default_ssh_key_name=None,
default_root_device_type=None):
"""
Creates a new stack. For more information, see `Create a New
Stack`_.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type name: string
:param name: The stack name.
:type region: string
:param region: The stack AWS region, such as "us-east-1". For more
information about Amazon regions, see `Regions and Endpoints`_.
:type vpc_id: string
:param vpc_id: The ID of the VPC that the stack is to be launched into.
It must be in the specified region. All instances are launched into
this VPC, and you cannot change the ID later.
+ If your account supports EC2 Classic, the default value is no VPC.
+ If your account does not support EC2 Classic, the default value is
the default VPC for the specified region.
If the VPC ID corresponds to a default VPC and you have specified
either the `DefaultAvailabilityZone` or the `DefaultSubnetId`
parameter only, AWS OpsWorks infers the value of the other
parameter. If you specify neither parameter, AWS OpsWorks sets
these parameters to the first valid Availability Zone for the
specified region and the corresponding default VPC subnet ID,
respectively.
If you specify a nondefault VPC ID, note the following:
+ It must belong to a VPC in your account that is in the specified
region.
+ You must specify a value for `DefaultSubnetId`.
For more information on how to use AWS OpsWorks with a VPC, see
`Running a Stack in a VPC`_. For more information on default VPC
and EC2 Classic, see `Supported Platforms`_.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
:type service_role_arn: string
:param service_role_arn: The stack AWS Identity and Access Management
(IAM) role, which allows AWS OpsWorks to work with AWS resources on
your behalf. You must set this parameter to the Amazon Resource
Name (ARN) for an existing IAM role. For more information about IAM
ARNs, see `Using Identifiers`_.
:type default_instance_profile_arn: string
:param default_instance_profile_arn: The ARN of an IAM profile that is
the default profile for all of the stack's EC2 instances. For more
information about IAM ARNs, see `Using Identifiers`_.
:type default_os: string
:param default_os: The stack's operating system, which must be set to
one of the following.
+ Standard operating systems: an Amazon Linux version such as `Amazon
Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ Custom AMIs: `Custom`. You specify the custom AMI you want to use
when you create instances.
The default option is the current Amazon Linux version.
:type hostname_theme: string
:param hostname_theme: The stack's host name theme, with spaces are
replaced by underscores. The theme is used to generate host names
for the stack's instances. By default, `HostnameTheme` is set to
`Layer_Dependent`, which creates host names by appending integers
to the layer's short name. The other themes are:
+ `Baked_Goods`
+ `Clouds`
+ `European_Cities`
+ `Fruits`
+ `Greek_Deities`
+ `Legendary_Creatures_from_Japan`
+ `Planets_and_Moons`
+ `Roman_Deities`
+ `Scottish_Islands`
+ `US_Cities`
+ `Wild_Cats`
To obtain a generated host name, call `GetHostNameSuggestion`, which
returns a host name based on the current theme.
:type default_availability_zone: string
:param default_availability_zone: The stack's default Availability
Zone, which must be in the specified region. For more information,
see `Regions and Endpoints`_. If you also specify a value for
`DefaultSubnetId`, the subnet must be in the same zone. For more
information, see the `VpcId` parameter description.
:type default_subnet_id: string
:param default_subnet_id: The stack's default VPC subnet ID. This
parameter is required if you specify a value for the `VpcId`
parameter. All instances are launched into this subnet unless you
specify otherwise when you create the instance. If you also specify
a value for `DefaultAvailabilityZone`, the subnet must be in that
zone. For information on default values and when this parameter is
required, see the `VpcId` parameter description.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
It is used to override the corresponding default stack
configuration JSON values. The string should be in the following
format and must escape characters such as '"'.:
`"{\"key1\": \"value1\", \"key2\": \"value2\",...}"`
For more information on custom JSON, see `Use Custom JSON to Modify the
Stack Configuration JSON`_.
:type configuration_manager: dict
:param configuration_manager: The configuration manager. When you clone
a stack we recommend that you use the configuration manager to
specify the Chef version, 0.9, 11.4, or 11.10. The default value is
currently 11.4.
:type chef_configuration: dict
:param chef_configuration: A `ChefConfiguration` object that specifies
whether to enable Berkshelf and the Berkshelf version on Chef 11.10
stacks. For more information, see `Create a New Stack`_.
:type use_custom_cookbooks: boolean
:param use_custom_cookbooks: Whether the stack uses custom cookbooks.
:type use_opsworks_security_groups: boolean
:param use_opsworks_security_groups: Whether to associate the AWS
OpsWorks built-in security groups with the stack's layers.
AWS OpsWorks provides a standard set of built-in security groups, one
for each layer, which are associated with layers by default. With
`UseOpsworksSecurityGroups` you can instead provide your own custom
security groups. `UseOpsworksSecurityGroups` has the following
settings:
+ True - AWS OpsWorks automatically associates the appropriate built-in
security group with each layer (default setting). You can associate
additional security groups with a layer after you create it but you
cannot delete the built-in security group.
+ False - AWS OpsWorks does not associate built-in security groups with
layers. You must create appropriate EC2 security groups and
associate a security group with each layer that you create.
However, you can still manually associate a built-in security group
with a layer on creation; custom security groups are required only
for those layers that need custom settings.
For more information, see `Create a New Stack`_.
:type custom_cookbooks_source: dict
:param custom_cookbooks_source: Contains the information required to
retrieve an app or cookbook from a repository. For more
information, see `Creating Apps`_ or `Custom Recipes and
Cookbooks`_.
:type default_ssh_key_name: string
:param default_ssh_key_name: A default SSH key for the stack instances.
You can override this value when you create or update an instance.
:type default_root_device_type: string
:param default_root_device_type: The default root device type. This
value is used by default for all instances in the stack, but you
can override it when you create an instance. The default option is
`instance-store`. For more information, see `Storage for the Root
Device`_.
"""
params = {
'Name': name,
'Region': region,
'ServiceRoleArn': service_role_arn,
'DefaultInstanceProfileArn': default_instance_profile_arn,
}
if vpc_id is not None:
params['VpcId'] = vpc_id
if attributes is not None:
params['Attributes'] = attributes
if default_os is not None:
params['DefaultOs'] = default_os
if hostname_theme is not None:
params['HostnameTheme'] = hostname_theme
if default_availability_zone is not None:
params['DefaultAvailabilityZone'] = default_availability_zone
if default_subnet_id is not None:
params['DefaultSubnetId'] = default_subnet_id
if custom_json is not None:
params['CustomJson'] = custom_json
if configuration_manager is not None:
params['ConfigurationManager'] = configuration_manager
if chef_configuration is not None:
params['ChefConfiguration'] = chef_configuration
if use_custom_cookbooks is not None:
params['UseCustomCookbooks'] = use_custom_cookbooks
if use_opsworks_security_groups is not None:
params['UseOpsworksSecurityGroups'] = use_opsworks_security_groups
if custom_cookbooks_source is not None:
params['CustomCookbooksSource'] = custom_cookbooks_source
if default_ssh_key_name is not None:
params['DefaultSshKeyName'] = default_ssh_key_name
if default_root_device_type is not None:
params['DefaultRootDeviceType'] = default_root_device_type
return self.make_request(action='CreateStack',
body=json.dumps(params))
def create_user_profile(self, iam_user_arn, ssh_username=None,
ssh_public_key=None, allow_self_management=None):
"""
Creates a new user profile.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN.
:type ssh_username: string
:param ssh_username: The user's SSH user name. The allowable characters
are [a-z], [A-Z], [0-9], '-', and '_'. If the specified name
includes other punctuation marks, AWS OpsWorks removes them. For
example, `my.name` will be changed to `myname`. If you do not
specify an SSH user name, AWS OpsWorks generates one from the IAM
user name.
:type ssh_public_key: string
:param ssh_public_key: The user's public SSH key.
:type allow_self_management: boolean
:param allow_self_management: Whether users can specify their own SSH
public key through the My Settings page. For more information, see
`Setting an IAM User's Public SSH Key`_.
"""
params = {'IamUserArn': iam_user_arn, }
if ssh_username is not None:
params['SshUsername'] = ssh_username
if ssh_public_key is not None:
params['SshPublicKey'] = ssh_public_key
if allow_self_management is not None:
params['AllowSelfManagement'] = allow_self_management
return self.make_request(action='CreateUserProfile',
body=json.dumps(params))
def delete_app(self, app_id):
"""
Deletes a specified app.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type app_id: string
:param app_id: The app ID.
"""
params = {'AppId': app_id, }
return self.make_request(action='DeleteApp',
body=json.dumps(params))
def delete_instance(self, instance_id, delete_elastic_ip=None,
delete_volumes=None):
"""
Deletes a specified instance, which terminates the associated
Amazon EC2 instance. You must stop an instance before you can
delete it.
For more information, see `Deleting Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
:type delete_elastic_ip: boolean
:param delete_elastic_ip: Whether to delete the instance Elastic IP
address.
:type delete_volumes: boolean
:param delete_volumes: Whether to delete the instance's Amazon EBS
volumes.
"""
params = {'InstanceId': instance_id, }
if delete_elastic_ip is not None:
params['DeleteElasticIp'] = delete_elastic_ip
if delete_volumes is not None:
params['DeleteVolumes'] = delete_volumes
return self.make_request(action='DeleteInstance',
body=json.dumps(params))
def delete_layer(self, layer_id):
"""
Deletes a specified layer. You must first stop and then delete
all associated instances or unassign registered instances. For
more information, see `How to Delete a Layer`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type layer_id: string
:param layer_id: The layer ID.
"""
params = {'LayerId': layer_id, }
return self.make_request(action='DeleteLayer',
body=json.dumps(params))
def delete_stack(self, stack_id):
"""
Deletes a specified stack. You must first delete all
instances, layers, and apps or deregister registered
instances. For more information, see `Shut Down a Stack`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
return self.make_request(action='DeleteStack',
body=json.dumps(params))
def delete_user_profile(self, iam_user_arn):
"""
Deletes a user profile.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN.
"""
params = {'IamUserArn': iam_user_arn, }
return self.make_request(action='DeleteUserProfile',
body=json.dumps(params))
def deregister_elastic_ip(self, elastic_ip):
"""
Deregisters a specified Elastic IP address. The address can
then be registered by another stack. For more information, see
`Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
"""
params = {'ElasticIp': elastic_ip, }
return self.make_request(action='DeregisterElasticIp',
body=json.dumps(params))
def deregister_instance(self, instance_id):
"""
Deregister a registered Amazon EC2 or on-premises instance.
This action removes the instance from the stack and returns it
to your control. This action can not be used with instances
that were created with AWS OpsWorks.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'InstanceId': instance_id, }
return self.make_request(action='DeregisterInstance',
body=json.dumps(params))
def deregister_rds_db_instance(self, rds_db_instance_arn):
"""
Deregisters an Amazon RDS instance.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type rds_db_instance_arn: string
:param rds_db_instance_arn: The Amazon RDS instance's ARN.
"""
params = {'RdsDbInstanceArn': rds_db_instance_arn, }
return self.make_request(action='DeregisterRdsDbInstance',
body=json.dumps(params))
def deregister_volume(self, volume_id):
"""
Deregisters an Amazon EBS volume. The volume can then be
registered by another stack. For more information, see
`Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
"""
params = {'VolumeId': volume_id, }
return self.make_request(action='DeregisterVolume',
body=json.dumps(params))
def describe_apps(self, stack_id=None, app_ids=None):
"""
Requests a description of a specified set of apps.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The app stack ID. If you use this parameter,
`DescribeApps` returns a description of the apps in the specified
stack.
:type app_ids: list
:param app_ids: An array of app IDs for the apps to be described. If
you use this parameter, `DescribeApps` returns a description of the
specified apps. Otherwise, it returns a description of every app.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if app_ids is not None:
params['AppIds'] = app_ids
return self.make_request(action='DescribeApps',
body=json.dumps(params))
def describe_commands(self, deployment_id=None, instance_id=None,
command_ids=None):
"""
Describes the results of specified commands.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type deployment_id: string
:param deployment_id: The deployment ID. If you include this parameter,
`DescribeCommands` returns a description of the commands associated
with the specified deployment.
:type instance_id: string
:param instance_id: The instance ID. If you include this parameter,
`DescribeCommands` returns a description of the commands associated
with the specified instance.
:type command_ids: list
:param command_ids: An array of command IDs. If you include this
parameter, `DescribeCommands` returns a description of the
specified commands. Otherwise, it returns a description of every
command.
"""
params = {}
if deployment_id is not None:
params['DeploymentId'] = deployment_id
if instance_id is not None:
params['InstanceId'] = instance_id
if command_ids is not None:
params['CommandIds'] = command_ids
return self.make_request(action='DescribeCommands',
body=json.dumps(params))
def describe_deployments(self, stack_id=None, app_id=None,
deployment_ids=None):
"""
Requests a description of a specified set of deployments.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID. If you include this parameter,
`DescribeDeployments` returns a description of the commands
associated with the specified stack.
:type app_id: string
:param app_id: The app ID. If you include this parameter,
`DescribeDeployments` returns a description of the commands
associated with the specified app.
:type deployment_ids: list
:param deployment_ids: An array of deployment IDs to be described. If
you include this parameter, `DescribeDeployments` returns a
description of the specified deployments. Otherwise, it returns a
description of every deployment.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if app_id is not None:
params['AppId'] = app_id
if deployment_ids is not None:
params['DeploymentIds'] = deployment_ids
return self.make_request(action='DescribeDeployments',
body=json.dumps(params))
def describe_elastic_ips(self, instance_id=None, stack_id=None, ips=None):
"""
Describes `Elastic IP addresses`_.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type instance_id: string
:param instance_id: The instance ID. If you include this parameter,
`DescribeElasticIps` returns a description of the Elastic IP
addresses associated with the specified instance.
:type stack_id: string
:param stack_id: A stack ID. If you include this parameter,
`DescribeElasticIps` returns a description of the Elastic IP
addresses that are registered with the specified stack.
:type ips: list
:param ips: An array of Elastic IP addresses to be described. If you
include this parameter, `DescribeElasticIps` returns a description
of the specified Elastic IP addresses. Otherwise, it returns a
description of every Elastic IP address.
"""
params = {}
if instance_id is not None:
params['InstanceId'] = instance_id
if stack_id is not None:
params['StackId'] = stack_id
if ips is not None:
params['Ips'] = ips
return self.make_request(action='DescribeElasticIps',
body=json.dumps(params))
def describe_elastic_load_balancers(self, stack_id=None, layer_ids=None):
"""
Describes a stack's Elastic Load Balancing instances.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: A stack ID. The action describes the stack's Elastic
Load Balancing instances.
:type layer_ids: list
:param layer_ids: A list of layer IDs. The action describes the Elastic
Load Balancing instances for the specified layers.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if layer_ids is not None:
params['LayerIds'] = layer_ids
return self.make_request(action='DescribeElasticLoadBalancers',
body=json.dumps(params))
def describe_instances(self, stack_id=None, layer_id=None,
instance_ids=None):
"""
Requests a description of a set of instances.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: A stack ID. If you use this parameter,
`DescribeInstances` returns descriptions of the instances
associated with the specified stack.
:type layer_id: string
:param layer_id: A layer ID. If you use this parameter,
`DescribeInstances` returns descriptions of the instances
associated with the specified layer.
:type instance_ids: list
:param instance_ids: An array of instance IDs to be described. If you
use this parameter, `DescribeInstances` returns a description of
the specified instances. Otherwise, it returns a description of
every instance.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if layer_id is not None:
params['LayerId'] = layer_id
if instance_ids is not None:
params['InstanceIds'] = instance_ids
return self.make_request(action='DescribeInstances',
body=json.dumps(params))
def describe_layers(self, stack_id=None, layer_ids=None):
"""
Requests a description of one or more layers in a specified
stack.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type layer_ids: list
:param layer_ids: An array of layer IDs that specify the layers to be
described. If you omit this parameter, `DescribeLayers` returns a
description of every layer in the specified stack.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if layer_ids is not None:
params['LayerIds'] = layer_ids
return self.make_request(action='DescribeLayers',
body=json.dumps(params))
def describe_load_based_auto_scaling(self, layer_ids):
"""
Describes load-based auto scaling configurations for specified
layers.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type layer_ids: list
:param layer_ids: An array of layer IDs.
"""
params = {'LayerIds': layer_ids, }
return self.make_request(action='DescribeLoadBasedAutoScaling',
body=json.dumps(params))
def describe_my_user_profile(self):
"""
Describes a user's SSH information.
**Required Permissions**: To use this action, an IAM user must
have self-management enabled or an attached policy that
explicitly grants permissions. For more information on user
permissions, see `Managing User Permissions`_.
"""
params = {}
return self.make_request(action='DescribeMyUserProfile',
body=json.dumps(params))
def describe_permissions(self, iam_user_arn=None, stack_id=None):
"""
Describes the permissions for a specified stack.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN. For more information about IAM
ARNs, see `Using Identifiers`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {}
if iam_user_arn is not None:
params['IamUserArn'] = iam_user_arn
if stack_id is not None:
params['StackId'] = stack_id
return self.make_request(action='DescribePermissions',
body=json.dumps(params))
def describe_raid_arrays(self, instance_id=None, stack_id=None,
raid_array_ids=None):
"""
Describe an instance's RAID arrays.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type instance_id: string
:param instance_id: The instance ID. If you use this parameter,
`DescribeRaidArrays` returns descriptions of the RAID arrays
associated with the specified instance.
:type stack_id: string
:param stack_id: The stack ID.
:type raid_array_ids: list
:param raid_array_ids: An array of RAID array IDs. If you use this
parameter, `DescribeRaidArrays` returns descriptions of the
specified arrays. Otherwise, it returns a description of every
array.
"""
params = {}
if instance_id is not None:
params['InstanceId'] = instance_id
if stack_id is not None:
params['StackId'] = stack_id
if raid_array_ids is not None:
params['RaidArrayIds'] = raid_array_ids
return self.make_request(action='DescribeRaidArrays',
body=json.dumps(params))
def describe_rds_db_instances(self, stack_id, rds_db_instance_arns=None):
"""
Describes Amazon RDS instances.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID that the instances are registered with.
The operation returns descriptions of all registered Amazon RDS
instances.
:type rds_db_instance_arns: list
:param rds_db_instance_arns: An array containing the ARNs of the
instances to be described.
"""
params = {'StackId': stack_id, }
if rds_db_instance_arns is not None:
params['RdsDbInstanceArns'] = rds_db_instance_arns
return self.make_request(action='DescribeRdsDbInstances',
body=json.dumps(params))
def describe_service_errors(self, stack_id=None, instance_id=None,
service_error_ids=None):
"""
Describes AWS OpsWorks service errors.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID. If you use this parameter,
`DescribeServiceErrors` returns descriptions of the errors
associated with the specified stack.
:type instance_id: string
:param instance_id: The instance ID. If you use this parameter,
`DescribeServiceErrors` returns descriptions of the errors
associated with the specified instance.
:type service_error_ids: list
:param service_error_ids: An array of service error IDs. If you use
this parameter, `DescribeServiceErrors` returns descriptions of the
specified errors. Otherwise, it returns a description of every
error.
"""
params = {}
if stack_id is not None:
params['StackId'] = stack_id
if instance_id is not None:
params['InstanceId'] = instance_id
if service_error_ids is not None:
params['ServiceErrorIds'] = service_error_ids
return self.make_request(action='DescribeServiceErrors',
body=json.dumps(params))
def describe_stack_provisioning_parameters(self, stack_id):
"""
Requests a description of a stack's provisioning parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the stack
or an attached policy that explicitly grants permissions. For
more information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID
"""
params = {'StackId': stack_id, }
return self.make_request(action='DescribeStackProvisioningParameters',
body=json.dumps(params))
def describe_stack_summary(self, stack_id):
"""
Describes the number of layers and apps in a specified stack,
and the number of instances in each state, such as
`running_setup` or `online`.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
return self.make_request(action='DescribeStackSummary',
body=json.dumps(params))
def describe_stacks(self, stack_ids=None):
"""
Requests a description of one or more stacks.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type stack_ids: list
:param stack_ids: An array of stack IDs that specify the stacks to be
described. If you omit this parameter, `DescribeStacks` returns a
description of every stack.
"""
params = {}
if stack_ids is not None:
params['StackIds'] = stack_ids
return self.make_request(action='DescribeStacks',
body=json.dumps(params))
def describe_time_based_auto_scaling(self, instance_ids):
"""
Describes time-based auto scaling configurations for specified
instances.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type instance_ids: list
:param instance_ids: An array of instance IDs.
"""
params = {'InstanceIds': instance_ids, }
return self.make_request(action='DescribeTimeBasedAutoScaling',
body=json.dumps(params))
def describe_user_profiles(self, iam_user_arns=None):
"""
Describe specified users.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arns: list
:param iam_user_arns: An array of IAM user ARNs that identify the users
to be described.
"""
params = {}
if iam_user_arns is not None:
params['IamUserArns'] = iam_user_arns
return self.make_request(action='DescribeUserProfiles',
body=json.dumps(params))
def describe_volumes(self, instance_id=None, stack_id=None,
raid_array_id=None, volume_ids=None):
"""
Describes an instance's Amazon EBS volumes.
You must specify at least one of the parameters.
**Required Permissions**: To use this action, an IAM user must
have a Show, Deploy, or Manage permissions level for the
stack, or an attached policy that explicitly grants
permissions. For more information on user permissions, see
`Managing User Permissions`_.
:type instance_id: string
:param instance_id: The instance ID. If you use this parameter,
`DescribeVolumes` returns descriptions of the volumes associated
with the specified instance.
:type stack_id: string
:param stack_id: A stack ID. The action describes the stack's
registered Amazon EBS volumes.
:type raid_array_id: string
:param raid_array_id: The RAID array ID. If you use this parameter,
`DescribeVolumes` returns descriptions of the volumes associated
with the specified RAID array.
:type volume_ids: list
:param volume_ids: Am array of volume IDs. If you use this parameter,
`DescribeVolumes` returns descriptions of the specified volumes.
Otherwise, it returns a description of every volume.
"""
params = {}
if instance_id is not None:
params['InstanceId'] = instance_id
if stack_id is not None:
params['StackId'] = stack_id
if raid_array_id is not None:
params['RaidArrayId'] = raid_array_id
if volume_ids is not None:
params['VolumeIds'] = volume_ids
return self.make_request(action='DescribeVolumes',
body=json.dumps(params))
def detach_elastic_load_balancer(self, elastic_load_balancer_name,
layer_id):
"""
Detaches a specified Elastic Load Balancing instance from its
layer.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_load_balancer_name: string
:param elastic_load_balancer_name: The Elastic Load Balancing
instance's name.
:type layer_id: string
:param layer_id: The ID of the layer that the Elastic Load Balancing
instance is attached to.
"""
params = {
'ElasticLoadBalancerName': elastic_load_balancer_name,
'LayerId': layer_id,
}
return self.make_request(action='DetachElasticLoadBalancer',
body=json.dumps(params))
def disassociate_elastic_ip(self, elastic_ip):
"""
Disassociates an Elastic IP address from its instance. The
address remains registered with the stack. For more
information, see `Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
"""
params = {'ElasticIp': elastic_ip, }
return self.make_request(action='DisassociateElasticIp',
body=json.dumps(params))
def get_hostname_suggestion(self, layer_id):
"""
Gets a generated host name for the specified layer, based on
the current host name theme.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type layer_id: string
:param layer_id: The layer ID.
"""
params = {'LayerId': layer_id, }
return self.make_request(action='GetHostnameSuggestion',
body=json.dumps(params))
def reboot_instance(self, instance_id):
"""
Reboots a specified instance. For more information, see
`Starting, Stopping, and Rebooting Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'InstanceId': instance_id, }
return self.make_request(action='RebootInstance',
body=json.dumps(params))
def register_elastic_ip(self, elastic_ip, stack_id):
"""
Registers an Elastic IP address with a specified stack. An
address can be registered with only one stack at a time. If
the address is already registered, you must first deregister
it by calling DeregisterElasticIp. For more information, see
`Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The Elastic IP address.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'ElasticIp': elastic_ip, 'StackId': stack_id, }
return self.make_request(action='RegisterElasticIp',
body=json.dumps(params))
def register_instance(self, stack_id, hostname=None, public_ip=None,
private_ip=None, rsa_public_key=None,
rsa_public_key_fingerprint=None,
instance_identity=None):
"""
Registers instances with a specified stack that were created
outside of AWS OpsWorks.
We do not recommend using this action to register instances.
The complete registration operation has two primary steps,
installing the AWS OpsWorks agent on the instance and
registering the instance with the stack. `RegisterInstance`
handles only the second step. You should instead use the AWS
CLI `register` command, which performs the entire registration
operation.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The ID of the stack that the instance is to be
registered with.
:type hostname: string
:param hostname: The instance's hostname.
:type public_ip: string
:param public_ip: The instance's public IP address.
:type private_ip: string
:param private_ip: The instance's private IP address.
:type rsa_public_key: string
:param rsa_public_key: The instances public RSA key. This key is used
to encrypt communication between the instance and the service.
:type rsa_public_key_fingerprint: string
:param rsa_public_key_fingerprint: The instances public RSA key
fingerprint.
:type instance_identity: dict
:param instance_identity: An InstanceIdentity object that contains the
instance's identity.
"""
params = {'StackId': stack_id, }
if hostname is not None:
params['Hostname'] = hostname
if public_ip is not None:
params['PublicIp'] = public_ip
if private_ip is not None:
params['PrivateIp'] = private_ip
if rsa_public_key is not None:
params['RsaPublicKey'] = rsa_public_key
if rsa_public_key_fingerprint is not None:
params['RsaPublicKeyFingerprint'] = rsa_public_key_fingerprint
if instance_identity is not None:
params['InstanceIdentity'] = instance_identity
return self.make_request(action='RegisterInstance',
body=json.dumps(params))
def register_rds_db_instance(self, stack_id, rds_db_instance_arn,
db_user, db_password):
"""
Registers an Amazon RDS instance with a stack.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type rds_db_instance_arn: string
:param rds_db_instance_arn: The Amazon RDS instance's ARN.
:type db_user: string
:param db_user: The database's master user name.
:type db_password: string
:param db_password: The database password.
"""
params = {
'StackId': stack_id,
'RdsDbInstanceArn': rds_db_instance_arn,
'DbUser': db_user,
'DbPassword': db_password,
}
return self.make_request(action='RegisterRdsDbInstance',
body=json.dumps(params))
def register_volume(self, stack_id, ec_2_volume_id=None):
"""
Registers an Amazon EBS volume with a specified stack. A
volume can be registered with only one stack at a time. If the
volume is already registered, you must first deregister it by
calling DeregisterVolume. For more information, see `Resource
Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type ec_2_volume_id: string
:param ec_2_volume_id: The Amazon EBS volume ID.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
if ec_2_volume_id is not None:
params['Ec2VolumeId'] = ec_2_volume_id
return self.make_request(action='RegisterVolume',
body=json.dumps(params))
def set_load_based_auto_scaling(self, layer_id, enable=None,
up_scaling=None, down_scaling=None):
"""
Specify the load-based auto scaling configuration for a
specified layer. For more information, see `Managing Load with
Time-based and Load-based Instances`_.
To use load-based auto scaling, you must create a set of load-
based auto scaling instances. Load-based auto scaling operates
only on the instances from that set, so you must ensure that
you have created enough instances to handle the maximum
anticipated load.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type layer_id: string
:param layer_id: The layer ID.
:type enable: boolean
:param enable: Enables load-based auto scaling for the layer.
:type up_scaling: dict
:param up_scaling: An `AutoScalingThresholds` object with the upscaling
threshold configuration. If the load exceeds these thresholds for a
specified amount of time, AWS OpsWorks starts a specified number of
instances.
:type down_scaling: dict
:param down_scaling: An `AutoScalingThresholds` object with the
downscaling threshold configuration. If the load falls below these
thresholds for a specified amount of time, AWS OpsWorks stops a
specified number of instances.
"""
params = {'LayerId': layer_id, }
if enable is not None:
params['Enable'] = enable
if up_scaling is not None:
params['UpScaling'] = up_scaling
if down_scaling is not None:
params['DownScaling'] = down_scaling
return self.make_request(action='SetLoadBasedAutoScaling',
body=json.dumps(params))
def set_permission(self, stack_id, iam_user_arn, allow_ssh=None,
allow_sudo=None, level=None):
"""
Specifies a user's permissions. For more information, see
`Security and Permissions`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type iam_user_arn: string
:param iam_user_arn: The user's IAM ARN.
:type allow_ssh: boolean
:param allow_ssh: The user is allowed to use SSH to communicate with
the instance.
:type allow_sudo: boolean
:param allow_sudo: The user is allowed to use **sudo** to elevate
privileges.
:type level: string
:param level: The user's permission level, which must be set to one of
the following strings. You cannot set your own permissions level.
+ `deny`
+ `show`
+ `deploy`
+ `manage`
+ `iam_only`
For more information on the permissions associated with these levels,
see `Managing User Permissions`_
"""
params = {'StackId': stack_id, 'IamUserArn': iam_user_arn, }
if allow_ssh is not None:
params['AllowSsh'] = allow_ssh
if allow_sudo is not None:
params['AllowSudo'] = allow_sudo
if level is not None:
params['Level'] = level
return self.make_request(action='SetPermission',
body=json.dumps(params))
def set_time_based_auto_scaling(self, instance_id,
auto_scaling_schedule=None):
"""
Specify the time-based auto scaling configuration for a
specified instance. For more information, see `Managing Load
with Time-based and Load-based Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
:type auto_scaling_schedule: dict
:param auto_scaling_schedule: An `AutoScalingSchedule` with the
instance schedule.
"""
params = {'InstanceId': instance_id, }
if auto_scaling_schedule is not None:
params['AutoScalingSchedule'] = auto_scaling_schedule
return self.make_request(action='SetTimeBasedAutoScaling',
body=json.dumps(params))
def start_instance(self, instance_id):
"""
Starts a specified instance. For more information, see
`Starting, Stopping, and Rebooting Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'InstanceId': instance_id, }
return self.make_request(action='StartInstance',
body=json.dumps(params))
def start_stack(self, stack_id):
"""
Starts a stack's instances.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
return self.make_request(action='StartStack',
body=json.dumps(params))
def stop_instance(self, instance_id):
"""
Stops a specified instance. When you stop a standard instance,
the data disappears and must be reinstalled when you restart
the instance. You can stop an Amazon EBS-backed instance
without losing data. For more information, see `Starting,
Stopping, and Rebooting Instances`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'InstanceId': instance_id, }
return self.make_request(action='StopInstance',
body=json.dumps(params))
def stop_stack(self, stack_id):
"""
Stops a specified stack.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
"""
params = {'StackId': stack_id, }
return self.make_request(action='StopStack',
body=json.dumps(params))
def unassign_instance(self, instance_id):
"""
Unassigns a registered instance from all of it's layers. The
instance remains in the stack as an unassigned instance and
can be assigned to another layer, as needed. You cannot use
this action with instances that were created with AWS
OpsWorks.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
"""
params = {'InstanceId': instance_id, }
return self.make_request(action='UnassignInstance',
body=json.dumps(params))
def unassign_volume(self, volume_id):
"""
Unassigns an assigned Amazon EBS volume. The volume remains
registered with the stack. For more information, see `Resource
Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
"""
params = {'VolumeId': volume_id, }
return self.make_request(action='UnassignVolume',
body=json.dumps(params))
def update_app(self, app_id, name=None, description=None,
data_sources=None, type=None, app_source=None,
domains=None, enable_ssl=None, ssl_configuration=None,
attributes=None, environment=None):
"""
Updates a specified app.
**Required Permissions**: To use this action, an IAM user must
have a Deploy or Manage permissions level for the stack, or an
attached policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type app_id: string
:param app_id: The app ID.
:type name: string
:param name: The app name.
:type description: string
:param description: A description of the app.
:type data_sources: list
:param data_sources: The app's data sources.
:type type: string
:param type: The app type.
:type app_source: dict
:param app_source: A `Source` object that specifies the app repository.
:type domains: list
:param domains: The app's virtual host settings, with multiple domains
separated by commas. For example: `'www.example.com, example.com'`
:type enable_ssl: boolean
:param enable_ssl: Whether SSL is enabled for the app.
:type ssl_configuration: dict
:param ssl_configuration: An `SslConfiguration` object with the SSL
configuration.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
:type environment: list
:param environment:
An array of `EnvironmentVariable` objects that specify environment
variables to be associated with the app. You can specify up to ten
environment variables. After you deploy the app, these variables
are defined on the associated app server instances.
This parameter is supported only by Chef 11.10 stacks. If you have
specified one or more environment variables, you cannot modify the
stack's Chef version.
"""
params = {'AppId': app_id, }
if name is not None:
params['Name'] = name
if description is not None:
params['Description'] = description
if data_sources is not None:
params['DataSources'] = data_sources
if type is not None:
params['Type'] = type
if app_source is not None:
params['AppSource'] = app_source
if domains is not None:
params['Domains'] = domains
if enable_ssl is not None:
params['EnableSsl'] = enable_ssl
if ssl_configuration is not None:
params['SslConfiguration'] = ssl_configuration
if attributes is not None:
params['Attributes'] = attributes
if environment is not None:
params['Environment'] = environment
return self.make_request(action='UpdateApp',
body=json.dumps(params))
def update_elastic_ip(self, elastic_ip, name=None):
"""
Updates a registered Elastic IP address's name. For more
information, see `Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type elastic_ip: string
:param elastic_ip: The address.
:type name: string
:param name: The new name.
"""
params = {'ElasticIp': elastic_ip, }
if name is not None:
params['Name'] = name
return self.make_request(action='UpdateElasticIp',
body=json.dumps(params))
def update_instance(self, instance_id, layer_ids=None,
instance_type=None, auto_scaling_type=None,
hostname=None, os=None, ami_id=None,
ssh_key_name=None, architecture=None,
install_updates_on_boot=None, ebs_optimized=None):
"""
Updates a specified instance.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type instance_id: string
:param instance_id: The instance ID.
:type layer_ids: list
:param layer_ids: The instance's layer IDs.
:type instance_type: string
:param instance_type: The instance type. AWS OpsWorks supports all
instance types except Cluster Compute, Cluster GPU, and High Memory
Cluster. For more information, see `Instance Families and Types`_.
The parameter values that you use to specify the various types are
in the API Name column of the Available Instance Types table.
:type auto_scaling_type: string
:param auto_scaling_type: For load-based or time-based instances, the
type.
:type hostname: string
:param hostname: The instance host name.
:type os: string
:param os: The instance's operating system, which must be set to one of
the following.
+ Standard operating systems: An Amazon Linux version such as `Amazon
Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ Custom AMIs: `Custom`
The default option is the current Amazon Linux version, such as `Amazon
Linux 2014.09`. If you set this parameter to `Custom`, you must use
the CreateInstance action's AmiId parameter to specify the custom
AMI that you want to use. For more information on the standard
operating systems, see `Operating Systems`_For more information on
how to use custom AMIs with OpsWorks, see `Using Custom AMIs`_.
:type ami_id: string
:param ami_id:
A custom AMI ID to be used to create the instance. The AMI should be
based on one of the standard AWS OpsWorks AMIs: Amazon Linux,
Ubuntu 12.04 LTS, or Ubuntu 14.04 LTS. For more information, see
`Instances`_
If you specify a custom AMI, you must set `Os` to `Custom`.
:type ssh_key_name: string
:param ssh_key_name: The instance SSH key name.
:type architecture: string
:param architecture: The instance architecture. Instance types do not
necessarily support both architectures. For a list of the
architectures that are supported by the different instance types,
see `Instance Families and Types`_.
:type install_updates_on_boot: boolean
:param install_updates_on_boot:
Whether to install operating system and package updates when the
instance boots. The default value is `True`. To control when
updates are installed, set this value to `False`. You must then
update your instances manually by using CreateDeployment to run the
`update_dependencies` stack command or manually running `yum`
(Amazon Linux) or `apt-get` (Ubuntu) on the instances.
We strongly recommend using the default value of `True`, to ensure that
your instances have the latest security updates.
:type ebs_optimized: boolean
:param ebs_optimized: Whether this is an Amazon EBS-optimized instance.
"""
params = {'InstanceId': instance_id, }
if layer_ids is not None:
params['LayerIds'] = layer_ids
if instance_type is not None:
params['InstanceType'] = instance_type
if auto_scaling_type is not None:
params['AutoScalingType'] = auto_scaling_type
if hostname is not None:
params['Hostname'] = hostname
if os is not None:
params['Os'] = os
if ami_id is not None:
params['AmiId'] = ami_id
if ssh_key_name is not None:
params['SshKeyName'] = ssh_key_name
if architecture is not None:
params['Architecture'] = architecture
if install_updates_on_boot is not None:
params['InstallUpdatesOnBoot'] = install_updates_on_boot
if ebs_optimized is not None:
params['EbsOptimized'] = ebs_optimized
return self.make_request(action='UpdateInstance',
body=json.dumps(params))
def update_layer(self, layer_id, name=None, shortname=None,
attributes=None, custom_instance_profile_arn=None,
custom_security_group_ids=None, packages=None,
volume_configurations=None, enable_auto_healing=None,
auto_assign_elastic_ips=None,
auto_assign_public_ips=None, custom_recipes=None,
install_updates_on_boot=None,
use_ebs_optimized_instances=None,
lifecycle_event_configuration=None):
"""
Updates a specified layer.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type layer_id: string
:param layer_id: The layer ID.
:type name: string
:param name: The layer name, which is used by the console.
:type shortname: string
:param shortname: The layer short name, which is used internally by AWS
OpsWorksand by Chef. The short name is also used as the name for
the directory where your app files are installed. It can have a
maximum of 200 characters and must be in the following format:
/\A[a-z0-9\-\_\.]+\Z/.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
:type custom_instance_profile_arn: string
:param custom_instance_profile_arn: The ARN of an IAM profile to be
used for all of the layer's EC2 instances. For more information
about IAM ARNs, see `Using Identifiers`_.
:type custom_security_group_ids: list
:param custom_security_group_ids: An array containing the layer's
custom security group IDs.
:type packages: list
:param packages: An array of `Package` objects that describe the
layer's packages.
:type volume_configurations: list
:param volume_configurations: A `VolumeConfigurations` object that
describes the layer's Amazon EBS volumes.
:type enable_auto_healing: boolean
:param enable_auto_healing: Whether to disable auto healing for the
layer.
:type auto_assign_elastic_ips: boolean
:param auto_assign_elastic_ips: Whether to automatically assign an
`Elastic IP address`_ to the layer's instances. For more
information, see `How to Edit a Layer`_.
:type auto_assign_public_ips: boolean
:param auto_assign_public_ips: For stacks that are running in a VPC,
whether to automatically assign a public IP address to the layer's
instances. For more information, see `How to Edit a Layer`_.
:type custom_recipes: dict
:param custom_recipes: A `LayerCustomRecipes` object that specifies the
layer's custom recipes.
:type install_updates_on_boot: boolean
:param install_updates_on_boot:
Whether to install operating system and package updates when the
instance boots. The default value is `True`. To control when
updates are installed, set this value to `False`. You must then
update your instances manually by using CreateDeployment to run the
`update_dependencies` stack command or manually running `yum`
(Amazon Linux) or `apt-get` (Ubuntu) on the instances.
We strongly recommend using the default value of `True`, to ensure that
your instances have the latest security updates.
:type use_ebs_optimized_instances: boolean
:param use_ebs_optimized_instances: Whether to use Amazon EBS-optimized
instances.
:type lifecycle_event_configuration: dict
:param lifecycle_event_configuration:
"""
params = {'LayerId': layer_id, }
if name is not None:
params['Name'] = name
if shortname is not None:
params['Shortname'] = shortname
if attributes is not None:
params['Attributes'] = attributes
if custom_instance_profile_arn is not None:
params['CustomInstanceProfileArn'] = custom_instance_profile_arn
if custom_security_group_ids is not None:
params['CustomSecurityGroupIds'] = custom_security_group_ids
if packages is not None:
params['Packages'] = packages
if volume_configurations is not None:
params['VolumeConfigurations'] = volume_configurations
if enable_auto_healing is not None:
params['EnableAutoHealing'] = enable_auto_healing
if auto_assign_elastic_ips is not None:
params['AutoAssignElasticIps'] = auto_assign_elastic_ips
if auto_assign_public_ips is not None:
params['AutoAssignPublicIps'] = auto_assign_public_ips
if custom_recipes is not None:
params['CustomRecipes'] = custom_recipes
if install_updates_on_boot is not None:
params['InstallUpdatesOnBoot'] = install_updates_on_boot
if use_ebs_optimized_instances is not None:
params['UseEbsOptimizedInstances'] = use_ebs_optimized_instances
if lifecycle_event_configuration is not None:
params['LifecycleEventConfiguration'] = lifecycle_event_configuration
return self.make_request(action='UpdateLayer',
body=json.dumps(params))
def update_my_user_profile(self, ssh_public_key=None):
"""
Updates a user's SSH public key.
**Required Permissions**: To use this action, an IAM user must
have self-management enabled or an attached policy that
explicitly grants permissions. For more information on user
permissions, see `Managing User Permissions`_.
:type ssh_public_key: string
:param ssh_public_key: The user's SSH public key.
"""
params = {}
if ssh_public_key is not None:
params['SshPublicKey'] = ssh_public_key
return self.make_request(action='UpdateMyUserProfile',
body=json.dumps(params))
def update_rds_db_instance(self, rds_db_instance_arn, db_user=None,
db_password=None):
"""
Updates an Amazon RDS instance.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type rds_db_instance_arn: string
:param rds_db_instance_arn: The Amazon RDS instance's ARN.
:type db_user: string
:param db_user: The master user name.
:type db_password: string
:param db_password: The database password.
"""
params = {'RdsDbInstanceArn': rds_db_instance_arn, }
if db_user is not None:
params['DbUser'] = db_user
if db_password is not None:
params['DbPassword'] = db_password
return self.make_request(action='UpdateRdsDbInstance',
body=json.dumps(params))
def update_stack(self, stack_id, name=None, attributes=None,
service_role_arn=None,
default_instance_profile_arn=None, default_os=None,
hostname_theme=None, default_availability_zone=None,
default_subnet_id=None, custom_json=None,
configuration_manager=None, chef_configuration=None,
use_custom_cookbooks=None, custom_cookbooks_source=None,
default_ssh_key_name=None,
default_root_device_type=None,
use_opsworks_security_groups=None):
"""
Updates a specified stack.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type stack_id: string
:param stack_id: The stack ID.
:type name: string
:param name: The stack's new name.
:type attributes: map
:param attributes: One or more user-defined key/value pairs to be added
to the stack attributes.
:type service_role_arn: string
:param service_role_arn:
The stack AWS Identity and Access Management (IAM) role, which allows
AWS OpsWorks to work with AWS resources on your behalf. You must
set this parameter to the Amazon Resource Name (ARN) for an
existing IAM role. For more information about IAM ARNs, see `Using
Identifiers`_.
You must set this parameter to a valid service role ARN or the action
will fail; there is no default value. You can specify the stack's
current service role ARN, if you prefer, but you must do so
explicitly.
:type default_instance_profile_arn: string
:param default_instance_profile_arn: The ARN of an IAM profile that is
the default profile for all of the stack's EC2 instances. For more
information about IAM ARNs, see `Using Identifiers`_.
:type default_os: string
:param default_os: The stack's operating system, which must be set to
one of the following.
+ Standard operating systems: an Amazon Linux version such as `Amazon
Linux 2014.09`, `Ubuntu 12.04 LTS`, or `Ubuntu 14.04 LTS`.
+ Custom AMIs: `Custom`. You specify the custom AMI you want to use
when you create instances.
The default option is the current Amazon Linux version.
:type hostname_theme: string
:param hostname_theme: The stack's new host name theme, with spaces are
replaced by underscores. The theme is used to generate host names
for the stack's instances. By default, `HostnameTheme` is set to
`Layer_Dependent`, which creates host names by appending integers
to the layer's short name. The other themes are:
+ `Baked_Goods`
+ `Clouds`
+ `European_Cities`
+ `Fruits`
+ `Greek_Deities`
+ `Legendary_Creatures_from_Japan`
+ `Planets_and_Moons`
+ `Roman_Deities`
+ `Scottish_Islands`
+ `US_Cities`
+ `Wild_Cats`
To obtain a generated host name, call `GetHostNameSuggestion`, which
returns a host name based on the current theme.
:type default_availability_zone: string
:param default_availability_zone: The stack's default Availability
Zone, which must be in the specified region. For more information,
see `Regions and Endpoints`_. If you also specify a value for
`DefaultSubnetId`, the subnet must be in the same zone. For more
information, see CreateStack.
:type default_subnet_id: string
:param default_subnet_id: The stack's default VPC subnet ID. This
parameter is required if you specify a value for the `VpcId`
parameter. All instances are launched into this subnet unless you
specify otherwise when you create the instance. If you also specify
a value for `DefaultAvailabilityZone`, the subnet must be in that
zone. For information on default values and when this parameter is
required, see the `VpcId` parameter description.
:type custom_json: string
:param custom_json: A string that contains user-defined, custom JSON.
It is used to override the corresponding default stack
configuration JSON values. The string should be in the following
format and must escape characters such as '"'.:
`"{\"key1\": \"value1\", \"key2\": \"value2\",...}"`
For more information on custom JSON, see `Use Custom JSON to Modify the
Stack Configuration JSON`_.
:type configuration_manager: dict
:param configuration_manager: The configuration manager. When you clone
a stack we recommend that you use the configuration manager to
specify the Chef version, 0.9, 11.4, or 11.10. The default value is
currently 11.4.
:type chef_configuration: dict
:param chef_configuration: A `ChefConfiguration` object that specifies
whether to enable Berkshelf and the Berkshelf version on Chef 11.10
stacks. For more information, see `Create a New Stack`_.
:type use_custom_cookbooks: boolean
:param use_custom_cookbooks: Whether the stack uses custom cookbooks.
:type custom_cookbooks_source: dict
:param custom_cookbooks_source: Contains the information required to
retrieve an app or cookbook from a repository. For more
information, see `Creating Apps`_ or `Custom Recipes and
Cookbooks`_.
:type default_ssh_key_name: string
:param default_ssh_key_name: A default SSH key for the stack instances.
You can override this value when you create or update an instance.
:type default_root_device_type: string
:param default_root_device_type: The default root device type. This
value is used by default for all instances in the stack, but you
can override it when you create an instance. For more information,
see `Storage for the Root Device`_.
:type use_opsworks_security_groups: boolean
:param use_opsworks_security_groups: Whether to associate the AWS
OpsWorks built-in security groups with the stack's layers.
AWS OpsWorks provides a standard set of built-in security groups, one
for each layer, which are associated with layers by default.
`UseOpsworksSecurityGroups` allows you to instead provide your own
custom security groups. `UseOpsworksSecurityGroups` has the
following settings:
+ True - AWS OpsWorks automatically associates the appropriate built-in
security group with each layer (default setting). You can associate
additional security groups with a layer after you create it but you
cannot delete the built-in security group.
+ False - AWS OpsWorks does not associate built-in security groups with
layers. You must create appropriate EC2 security groups and
associate a security group with each layer that you create.
However, you can still manually associate a built-in security group
with a layer on creation; custom security groups are required only
for those layers that need custom settings.
For more information, see `Create a New Stack`_.
"""
params = {'StackId': stack_id, }
if name is not None:
params['Name'] = name
if attributes is not None:
params['Attributes'] = attributes
if service_role_arn is not None:
params['ServiceRoleArn'] = service_role_arn
if default_instance_profile_arn is not None:
params['DefaultInstanceProfileArn'] = default_instance_profile_arn
if default_os is not None:
params['DefaultOs'] = default_os
if hostname_theme is not None:
params['HostnameTheme'] = hostname_theme
if default_availability_zone is not None:
params['DefaultAvailabilityZone'] = default_availability_zone
if default_subnet_id is not None:
params['DefaultSubnetId'] = default_subnet_id
if custom_json is not None:
params['CustomJson'] = custom_json
if configuration_manager is not None:
params['ConfigurationManager'] = configuration_manager
if chef_configuration is not None:
params['ChefConfiguration'] = chef_configuration
if use_custom_cookbooks is not None:
params['UseCustomCookbooks'] = use_custom_cookbooks
if custom_cookbooks_source is not None:
params['CustomCookbooksSource'] = custom_cookbooks_source
if default_ssh_key_name is not None:
params['DefaultSshKeyName'] = default_ssh_key_name
if default_root_device_type is not None:
params['DefaultRootDeviceType'] = default_root_device_type
if use_opsworks_security_groups is not None:
params['UseOpsworksSecurityGroups'] = use_opsworks_security_groups
return self.make_request(action='UpdateStack',
body=json.dumps(params))
def update_user_profile(self, iam_user_arn, ssh_username=None,
ssh_public_key=None, allow_self_management=None):
"""
Updates a specified user profile.
**Required Permissions**: To use this action, an IAM user must
have an attached policy that explicitly grants permissions.
For more information on user permissions, see `Managing User
Permissions`_.
:type iam_user_arn: string
:param iam_user_arn: The user IAM ARN.
:type ssh_username: string
:param ssh_username: The user's SSH user name. The allowable characters
are [a-z], [A-Z], [0-9], '-', and '_'. If the specified name
includes other punctuation marks, AWS OpsWorks removes them. For
example, `my.name` will be changed to `myname`. If you do not
specify an SSH user name, AWS OpsWorks generates one from the IAM
user name.
:type ssh_public_key: string
:param ssh_public_key: The user's new SSH public key.
:type allow_self_management: boolean
:param allow_self_management: Whether users can specify their own SSH
public key through the My Settings page. For more information, see
`Managing User Permissions`_.
"""
params = {'IamUserArn': iam_user_arn, }
if ssh_username is not None:
params['SshUsername'] = ssh_username
if ssh_public_key is not None:
params['SshPublicKey'] = ssh_public_key
if allow_self_management is not None:
params['AllowSelfManagement'] = allow_self_management
return self.make_request(action='UpdateUserProfile',
body=json.dumps(params))
def update_volume(self, volume_id, name=None, mount_point=None):
"""
Updates an Amazon EBS volume's name or mount point. For more
information, see `Resource Management`_.
**Required Permissions**: To use this action, an IAM user must
have a Manage permissions level for the stack, or an attached
policy that explicitly grants permissions. For more
information on user permissions, see `Managing User
Permissions`_.
:type volume_id: string
:param volume_id: The volume ID.
:type name: string
:param name: The new name.
:type mount_point: string
:param mount_point: The new mount point.
"""
params = {'VolumeId': volume_id, }
if name is not None:
params['Name'] = name
if mount_point is not None:
params['MountPoint'] = mount_point
return self.make_request(action='UpdateVolume',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| apache-2.0 | 8,575,868,229,364,841,000 | 40.974467 | 81 | 0.626046 | false |
mmezzavilla/ns3-mmwave | .waf-1.8.19-b1fc8f7baef51bd2db4c2971909a568d/waflib/Tools/suncc.py | 11 | 1168 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! https://waf.io/book/index.html#_obtaining_the_waf_file
from waflib.Tools import ccroot,ar
from waflib.Configure import conf
@conf
def find_scc(conf):
v=conf.env
cc=conf.find_program('cc',var='CC')
try:
conf.cmd_and_log(cc+['-flags'])
except Exception:
conf.fatal('%r is not a Sun compiler'%cc)
v.CC_NAME='sun'
conf.get_suncc_version(cc)
@conf
def scc_common_flags(conf):
v=conf.env
v['CC_SRC_F']=[]
v['CC_TGT_F']=['-c','-o']
if not v['LINK_CC']:v['LINK_CC']=v['CC']
v['CCLNK_SRC_F']=''
v['CCLNK_TGT_F']=['-o']
v['CPPPATH_ST']='-I%s'
v['DEFINES_ST']='-D%s'
v['LIB_ST']='-l%s'
v['LIBPATH_ST']='-L%s'
v['STLIB_ST']='-l%s'
v['STLIBPATH_ST']='-L%s'
v['SONAME_ST']='-Wl,-h,%s'
v['SHLIB_MARKER']='-Bdynamic'
v['STLIB_MARKER']='-Bstatic'
v['cprogram_PATTERN']='%s'
v['CFLAGS_cshlib']=['-xcode=pic32','-DPIC']
v['LINKFLAGS_cshlib']=['-G']
v['cshlib_PATTERN']='lib%s.so'
v['LINKFLAGS_cstlib']=['-Bstatic']
v['cstlib_PATTERN']='lib%s.a'
def configure(conf):
conf.find_scc()
conf.find_ar()
conf.scc_common_flags()
conf.cc_load_tools()
conf.cc_add_flags()
conf.link_add_flags()
| gpl-2.0 | -2,771,855,335,719,540,700 | 24.391304 | 78 | 0.621575 | false |
JaDogg/__py_playground | reference/pages/sum1/_stops.py | 1 | 9394 |
nltkStops = {
'.': True,
',': True,
':': True,
'a': True,
"a's": True,
'able': True,
'about': True,
'above': True,
'according': True,
'accordingly': True,
'across': True,
'actually': True,
'after': True,
'afterwards': True,
'again': True,
'against': True,
"ain't": True,
'all': True,
'allow': True,
'allows': True,
'almost': True,
'alone': True,
'along': True,
'already': True,
'also': True,
'although': True,
'always': True,
'am': True,
'among': True,
'amongst': True,
'an': True,
'and': True,
'another': True,
'any': True,
'anybody': True,
'anyhow': True,
'anyone': True,
'anything': True,
'anyway': True,
'anyways': True,
'anywhere': True,
'apart': True,
'appear': True,
'appreciate': True,
'appropriate': True,
'are': True,
"aren't": True,
'around': True,
'as': True,
'aside': True,
'ask': True,
'asking': True,
'associated': True,
'at': True,
'available': True,
'away': True,
'awfully': True,
'b': True,
'be': True,
'became': True,
'because': True,
'become': True,
'becomes': True,
'becoming': True,
'been': True,
'before': True,
'beforehand': True,
'behind': True,
'being': True,
'believe': True,
'below': True,
'beside': True,
'besides': True,
'best': True,
'better': True,
'between': True,
'beyond': True,
'both': True,
'brief': True,
'but': True,
'by': True,
'c': True,
"c'mon": True,
"c's": True,
'came': True,
'can': True,
"can't": True,
'cannot': True,
'cant': True,
'cause': True,
'causes': True,
'certain': True,
'certainly': True,
'changes': True,
'clearly': True,
'co': True,
'com': True,
'come': True,
'comes': True,
'concerning': True,
'consequently': True,
'consider': True,
'considering': True,
'contain': True,
'containing': True,
'contains': True,
'corresponding': True,
'could': True,
"couldn't": True,
'course': True,
'currently': True,
'd': True,
'definitely': True,
'described': True,
'despite': True,
'did': True,
"didn't": True,
'different': True,
'do': True,
'does': True,
"doesn't": True,
'doing': True,
"don't": True,
'done': True,
'down': True,
'downwards': True,
'during': True,
'e': True,
'each': True,
'edu': True,
'eg': True,
'eight': True,
'either': True,
'else': True,
'elsewhere': True,
'enough': True,
'entirely': True,
'especially': True,
'et': True,
'etc': True,
'even': True,
'ever': True,
'every': True,
'everybody': True,
'everyone': True,
'everything': True,
'everywhere': True,
'ex': True,
'exactly': True,
'example': True,
'except': True,
'f': True,
'far': True,
'few': True,
'fifth': True,
'first': True,
'five': True,
'followed': True,
'following': True,
'follows': True,
'for': True,
'former': True,
'formerly': True,
'forth': True,
'four': True,
'from': True,
'further': True,
'furthermore': True,
'g': True,
'get': True,
'gets': True,
'getting': True,
'given': True,
'gives': True,
'go': True,
'goes': True,
'going': True,
'gone': True,
'got': True,
'gotten': True,
'greetings': True,
'h': True,
'had': True,
"hadn't": True,
'happens': True,
'hardly': True,
'has': True,
"hasn't": True,
'have': True,
"haven't": True,
'having': True,
'he': True,
"he's": True,
'hello': True,
'help': True,
'hence': True,
'her': True,
'here': True,
"here's": True,
'hereafter': True,
'hereby': True,
'herein': True,
'hereupon': True,
'hers': True,
'herself': True,
'hi': True,
'him': True,
'himself': True,
'his': True,
'hither': True,
'hopefully': True,
'how': True,
'howbeit': True,
'however': True,
'i': True,
"i'd": True,
"i'll": True,
"i'm": True,
"i've": True,
'ie': True,
'if': True,
'ignored': True,
'immediate': True,
'in': True,
'inasmuch': True,
'inc': True,
'indeed': True,
'indicate': True,
'indicated': True,
'indicates': True,
'inner': True,
'insofar': True,
'instead': True,
'into': True,
'inward': True,
'is': True,
"isn't": True,
'it': True,
"it'd": True,
"it'll": True,
"it's": True,
'its': True,
'itself': True,
'j': True,
'just': True,
'k': True,
'keep': True,
'keeps': True,
'kept': True,
'know': True,
'knows': True,
'known': True,
'l': True,
'last': True,
'lately': True,
'later': True,
'latter': True,
'latterly': True,
'least': True,
'less': True,
'lest': True,
'let': True,
"let's": True,
'like': True,
'liked': True,
'likely': True,
'little': True,
'look': True,
'looking': True,
'looks': True,
'ltd': True,
'm': True,
'mainly': True,
'many': True,
'may': True,
'maybe': True,
'me': True,
'mean': True,
'meanwhile': True,
'merely': True,
'might': True,
'more': True,
'moreover': True,
'most': True,
'mostly': True,
'much': True,
'must': True,
'my': True,
'myself': True,
'n': True,
'name': True,
'namely': True,
'nd': True,
'near': True,
'nearly': True,
'necessary': True,
'need': True,
'needs': True,
'neither': True,
'never': True,
'nevertheless': True,
'new': True,
'next': True,
'nine': True,
'no': True,
'nobody': True,
'non': True,
'none': True,
'noone': True,
'nor': True,
'normally': True,
'not': True,
'nothing': True,
'novel': True,
'now': True,
'nowhere': True,
'o': True,
'obviously': True,
'of': True,
'off': True,
'often': True,
'oh': True,
'ok': True,
'okay': True,
'old': True,
'on': True,
'once': True,
'one': True,
'ones': True,
'only': True,
'onto': True,
'or': True,
'other': True,
'others': True,
'otherwise': True,
'ought': True,
'our': True,
'ours': True,
'ourselves': True,
'out': True,
'outside': True,
'over': True,
'overall': True,
'own': True,
'p': True,
'particular': True,
'particularly': True,
'per': True,
'perhaps': True,
'placed': True,
'please': True,
'plus': True,
'possible': True,
'presumably': True,
'probably': True,
'provides': True,
'q': True,
'que': True,
'quite': True,
'qv': True,
'r': True,
'rather': True,
'rd': True,
're': True,
'really': True,
'reasonably': True,
'regarding': True,
'regardless': True,
'regards': True,
'relatively': True,
'respectively': True,
'right': True,
's': True,
'said': True,
'same': True,
'saw': True,
'say': True,
'saying': True,
'says': True,
'second': True,
'secondly': True,
'see': True,
'seeing': True,
'seem': True,
'seemed': True,
'seeming': True,
'seems': True,
'seen': True,
'self': True,
'selves': True,
'sensible': True,
'sent': True,
'serious': True,
'seriously': True,
'seven': True,
'several': True,
'shall': True,
'she': True,
'should': True,
"shouldn't": True,
'since': True,
'six': True,
'so': True,
'some': True,
'somebody': True,
'somehow': True,
'someone': True,
'something': True,
'sometime': True,
'sometimes': True,
'somewhat': True,
'somewhere': True,
'soon': True,
'sorry': True,
'specified': True,
'specify': True,
'specifying': True,
'still': True,
'sub': True,
'such': True,
'sup': True,
'sure': True,
't': True,
"t's": True,
'take': True,
'taken': True,
'tell': True,
'tends': True,
'th': True,
'than': True,
'thank': True,
'thanks': True,
'thanx': True,
'that': True,
"that's": True,
'thats': True,
'the': True,
'their': True,
'theirs': True,
'them': True,
'themselves': True,
'then': True,
'thence': True,
'there': True,
"there's": True,
'thereafter': True,
'thereby': True,
'therefore': True,
'therein': True,
'theres': True,
'thereupon': True,
'these': True,
'they': True,
"they'd": True,
"they'll": True,
"they're": True,
"they've": True,
'think': True,
'third': True,
'this': True,
'thorough': True,
'thoroughly': True,
'those': True,
'though': True,
'three': True,
'through': True,
'throughout': True,
'thru': True,
'thus': True,
'to': True,
'together': True,
'too': True,
'took': True,
'toward': True,
'towards': True,
'tried': True,
'tries': True,
'truly': True,
'try': True,
'trying': True,
'twice': True,
'two': True,
'u': True,
'un': True,
'under': True,
'unfortunately': True,
'unless': True,
'unlikely': True,
'until': True,
'unto': True,
'up': True,
'upon': True,
'us': True,
'use': True,
'used': True,
'useful': True,
'uses': True,
'using': True,
'usually': True,
'uucp': True,
'v': True,
'value': True,
'various': True,
'very': True,
'via': True,
'viz': True,
'vs': True,
'w': True,
'want': True,
'wants': True,
'was': True,
"wasn't": True,
'way': True,
'we': True,
"we'd": True,
"we'll": True,
"we're": True,
"we've": True,
'welcome': True,
'well': True,
'went': True,
'were': True,
"weren't": True,
'what': True,
"what's": True,
'whatever': True,
'when': True,
'whence': True,
'whenever': True,
'where': True,
"where's": True,
'whereafter': True,
'whereas': True,
'whereby': True,
'wherein': True,
'whereupon': True,
'wherever': True,
'whether': True,
'which': True,
'while': True,
'whither': True,
'who': True,
"who's": True,
'whoever': True,
'whole': True,
'whom': True,
'whose': True,
'why': True,
'will': True,
'willing': True,
'wish': True,
'with': True,
'within': True,
'without': True,
"won't": True,
'wonder': True,
'would': True,
'would': True,
"wouldn't": True,
'x': True,
'y': True,
'yes': True,
'yet': True,
'you': True,
"you'd": True,
"you'll": True,
"you're": True,
"you've": True,
'your': True,
'yours': True,
'yourself': True,
'yourselves': True,
'z': True,
'zero': True,
"'s": True,
"'ll": True,
"'re": True}
| mit | 532,217,711,085,866,940 | 15.224525 | 23 | 0.563445 | false |
kxxoling/support-tools | googlecode-issues-exporter/github_issue_converter.py | 4 | 16928 | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tool for uploading Google Code issues to GitHub.
Issue migration from Google Code to GitHub.
This tools allows you to easily move your downloaded Google Code issues to
GitHub.
To use this tool:
1. Follow the instructions at https://code.google.com/p/support-tools/ to
download your issues from Google.
2. Go to https://github.com/settings/applications and create a new "Personal
Access Token".
3. Get the GitHub username of the owner of the repository and the repositories
name you wish to add the issues to. For example username: TheDoctor and
repository: SonicScrewdriver
4. (Optional) If this option is skipped all issues will be assigned to the
owner of the repo. Make a file that contains a mapping from the Google
Code email address to the GitHub username for each user you wish to assign
issues too. The file should be newline seperated with one user per line.
The email address and username should be colon (':') seperated. For example
a file may look like this:
<Google Code Email>:<GitHub Username>
[email protected]:coolperson
[email protected]:userother
5. Then run the command:
python ./issue_migration.py \
--github_oauth_token=<oauth-token> \
--github_owner_username=<your-github-username> \
--github_repo_name=<repository-name> \
--issue_file_path=<path-to-issue-file> \
--user_file_path="<optional-path-to-user-mapping-file>"
"""
import argparse
import json
import sys
import time
import urllib
import httplib2
import issues
# The URL used for calls to GitHub.
GITHUB_API_URL = "https://api.github.com"
# The maximum number of retries to make for an HTTP request that has failed.
MAX_HTTP_REQUESTS = 3
# The time (in seconds) to wait before trying to see if more requests are
# available.
REQUEST_CHECK_TIME = 60 * 5
# A real kludge. GitHub orders the comments based on time alone, and because
# we upload ours relatively quickly we need at least a second in between
# comments to keep them in chronological order.
COMMENT_DELAY = 2
def _CheckSuccessful(response):
"""Checks if the request was successful.
Args:
response: An HTTP response that contains a mapping from 'status' to an
HTTP response code integer.
Returns:
True if the request was succesful.
"""
return "status" in response and 200 <= int(response["status"]) < 300
class GitHubService(object):
"""A connection to GitHub.
Handles basic HTTP operations to the GitHub API.
Attributes:
github_owner_username: The username of the owner of the repository.
github_repo_name: The GitHub repository name.
rate_limit: Whether or not to rate limit API calls.
"""
def __init__(self, github_owner_username, github_repo_name,
github_oauth_token, rate_limit, http_instance=None):
"""Initialize the GitHubService.
Args:
github_owner_username: The username of the owner of the repository.
github_repo_name: The GitHub repository name.
github_oauth_token: The oauth token to use for the requests.
http_instance: The HTTP instance to use, if not set a default will be
used.
"""
self.github_owner_username = github_owner_username
self.github_repo_name = github_repo_name
self._github_oauth_token = github_oauth_token
self._rate_limit = rate_limit
self._http = http_instance if http_instance else httplib2.Http()
def _PerformHttpRequest(self, method, url, body="{}", params=None):
"""Attemps to make an HTTP request for given method, url, body and params.
If the request fails try again 'MAX_HTTP_REQUESTS' number of times. If the
request fails due to the the request limit being hit, wait until more
requests can be made.
Args:
method: The HTTP request method as a string ('GET', 'POST', etc.).
url: The URL to make the call to.
body: The body of the request.
params: A dictionary of parameters to be used in the http call.
Returns:
A tuple of an HTTP response (https://developer.github.com/v3/#schema) and
its content from the server which is decoded JSON.
"""
headers = { "User-Agent": "GoogleCodeIssueExporter/1.0" }
query = params.copy() if params else {}
query["access_token"] = self._github_oauth_token
request_url = "%s%s?%s" % (GITHUB_API_URL, url, urllib.urlencode(query))
requests = 0
while requests < MAX_HTTP_REQUESTS:
requests += 1
response, content = self._http.request(request_url, method,
headers=headers, body=body)
if _CheckSuccessful(response):
return response, json.loads(content)
elif self._RequestLimitReached():
requests -= 1
self._WaitForApiThrottlingToEnd()
return response, json.loads(content)
def PerformGetRequest(self, url, params=None):
"""Makes a GET request.
Args:
url: The URL to make the call to.
params: A dictionary of parameters to be used in the http call.
Returns:
A tuple of an HTTP response (https://developer.github.com/v3/#schema) and
its content from the server which is decoded JSON.
"""
return self._PerformHttpRequest("GET", url, params=params)
def PerformPostRequest(self, url, body):
"""Makes a POST request.
Args:
url: The URL to make the call to.
body: The body of the request.
Returns:
A tuple of an HTTP response (https://developer.github.com/v3/#schema) and
its content from the server which is decoded JSON.
"""
if self._rate_limit and self._rate_limit in ["True", "true"]:
# Add a delay to all outgoing request to GitHub, as to not trigger their
# anti-abuse mechanism. This is separate from your typical rate limit, and
# only applies to certain API calls (like creating issues). And, alas, the
# exact quota is undocumented. So the value below is simply a guess. See:
# https://developer.github.com/v3/#abuse-rate-limits
req_min = 15
time.sleep(60 / req_min)
return self._PerformHttpRequest("POST", url, body)
def PerformPatchRequest(self, url, body):
"""Makes a PATCH request.
Args:
url: The URL to make the call to.
body: The body of the request.
Returns:
A tuple of an HTTP response (https://developer.github.com/v3/#schema) and
its content from the server which is decoded JSON.
"""
return self._PerformHttpRequest("PATCH", url, body)
def _GetRemainingRequests(self):
"""Gets the number of remaining requests the user has this hour.
Makes GET request to GitHub to get the number of remaining requests before
the hourly request limit is reached.
Returns:
The number of remaining requests.
"""
url = ("%s/rate_limit?access_token=%s" %
(GITHUB_API_URL, self._github_oauth_token))
_, content = self._http.request(url, "GET")
content = json.loads(content)
if "rate" in content and "remaining" in content["rate"]:
return int(content["rate"]["remaining"])
return 0
def _RequestLimitReached(self):
"""Returns true if the request limit has been reached."""
return self._GetRemainingRequests() == 0
def _WaitForApiThrottlingToEnd(self):
"""Waits until the user is allowed to make more requests."""
sys.stdout.write("Hourly request limit reached. Waiting for new limit, "
"checking every %d minutes" % (REQUEST_CHECK_TIME/60))
while True:
sys.stdout.write(".")
sys.stdout.flush()
time.sleep(REQUEST_CHECK_TIME)
if not self._RequestLimitReached():
return
class UserService(issues.UserService):
"""GitHub user operations.
Handles user operations on the GitHub API.
"""
GITHUB_USERS_URL = "/users"
def __init__(self, github_service):
"""Initialize the UserService.
Args:
github_service: The GitHub service.
"""
self._github_service = github_service
def _GetUser(self, username):
"""Gets a GitHub user.
Args:
username: The GitHub username to get.
Returns:
A tuple of an HTTP response (https://developer.github.com/v3/#schema) and
its content from the server which is decoded JSON.
"""
user_url = "%s/%s" % (self.GITHUB_USERS_URL, username)
return self._github_service.PerformGetRequest(user_url)
def IsUser(self, username):
"""Checks if the GitHub user exists.
Args:
username: The GitHub username to check.
Returns:
True if the username exists.
"""
response, _ = self._GetUser(username)
return _CheckSuccessful(response)
class IssueService(issues.IssueService):
"""GitHub issue operations.
Handles creating and updating issues and comments on the GitHub API.
"""
def __init__(self, github_service, comment_delay=COMMENT_DELAY):
"""Initialize the IssueService.
Args:
github_service: The GitHub service.
"""
self._github_service = github_service
self._comment_delay = comment_delay
# If the repo is of the form "login/reponame" then don't inject the
# username as it (or the organization) is already embedded.
if '/' in self._github_service.github_repo_name:
self._github_issues_url = "/repos/%s/issues" % \
self._github_service.github_repo_name
else:
self._github_issues_url = ("/repos/%s/%s/issues" %
(self._github_service.github_owner_username,
self._github_service.github_repo_name))
def GetIssues(self, state="open"):
"""Gets all of the issue for the GitHub repository.
Args:
state: The state of the repository can be either 'open' or 'closed'.
Returns:
The list of all of the issues for the given repository.
Raises:
IOError: An error occurred accessing previously created issues.
"""
github_issues = []
params = {"state": state, "per_page": 100, "page": 0}
while True:
params["page"] += 1
response, content = self._github_service.PerformGetRequest(
self._github_issues_url, params=params)
if not _CheckSuccessful(response):
raise IOError("Failed to retrieve previous issues.\n\n%s" % content)
if not content:
return github_issues
else:
github_issues += content
return github_issues
def CreateIssue(self, googlecode_issue):
"""Creates a GitHub issue.
Args:
googlecode_issue: An instance of GoogleCodeIssue
Returns:
The issue number of the new issue.
Raises:
issues.ServiceError: An error occurred creating the issue.
"""
issue_title = googlecode_issue.GetTitle()
# It is not possible to create a Google Code issue without a title, but you
# can edit an issue to remove its title afterwards.
if issue_title.isspace():
issue_title = "<empty title>"
issue = {
"title": issue_title,
"body": googlecode_issue.GetDescription(),
"assignee": googlecode_issue.GetOwner(),
"labels": googlecode_issue.GetLabels(),
}
response, content = self._github_service.PerformPostRequest(
self._github_issues_url, json.dumps(issue))
if not _CheckSuccessful(response):
# Newline character at the beginning of the line to allows for in-place
# updating of the counts of the issues and comments.
raise issues.ServiceError(
"\nFailed to create issue #%d '%s'.\n\n\n"
"Response:\n%s\n\n\nContent:\n%s" % (
googlecode_issue.GetId(), issue_title, response, content))
return self._GetIssueNumber(content)
def CloseIssue(self, issue_number):
"""Closes a GitHub issue.
Args:
issue_number: The issue number.
Raises:
issues.ServiceError: An error occurred closing the issue.
"""
issue_url = "%s/%d" % (self._github_issues_url, issue_number)
json_state = json.dumps({"state": "closed"})
response, content = self._github_service.PerformPatchRequest(
issue_url, json_state)
if not _CheckSuccessful(response):
raise issues.ServiceError("\nFailed to close issue #%s.\n%s" % (
issue_number, content))
def CreateComment(self, issue_number, source_issue_id,
googlecode_comment, project_name):
"""Creates a comment on a GitHub issue.
Args:
issue_number: The issue number.
source_issue_id: The Google Code issue id.
googlecode_comment: A GoogleCodeComment instance.
project_name: The Google Code project name.
Raises:
issues.ServiceError: An error occurred creating the comment.
"""
comment_url = "%s/%d/comments" % (self._github_issues_url, issue_number)
comment = googlecode_comment.GetDescription()
json_body = json.dumps({"body": comment})
response, content = self._github_service.PerformPostRequest(
comment_url, json_body)
if not _CheckSuccessful(response):
raise issues.ServiceError(
"\nFailed to create issue comment for issue #%d\n\n"
"Response:\n%s\n\nContent:\n%s\n\n" %
(issue_number, response, content))
time.sleep(self._comment_delay)
def _GetIssueNumber(self, content):
"""Get the issue number from a newly created GitHub issue.
Args:
content: The content from an HTTP response.
Returns:
The GitHub issue number.
"""
assert "number" in content, "Getting issue number from: %s" % content
return content["number"]
def ExportIssues(github_owner_username, github_repo_name, github_oauth_token,
issue_file_path, project_name, user_file_path, rate_limit):
"""Exports all issues for a given project.
"""
github_service = GitHubService(
github_owner_username, github_repo_name, github_oauth_token,
rate_limit)
issue_service = IssueService(github_service)
user_service = UserService(github_service)
issue_data = issues.LoadIssueData(issue_file_path, project_name)
user_map = issues.LoadUserData(user_file_path, user_service)
# Add a special "user_requesting_export" user, which comes in handy.
user_map["user_requesting_export"] = github_owner_username
issue_exporter = issues.IssueExporter(
issue_service, user_service, issue_data, project_name, user_map)
try:
issue_exporter.Init()
issue_exporter.Start()
print "\nDone!\n"
except IOError, e:
print "[IOError] ERROR: %s" % e
except issues.InvalidUserError, e:
print "[InvalidUserError] ERROR: %s" % e
def main(args):
"""The main function.
Args:
args: The command line arguments.
Raises:
ProjectNotFoundError: The user passed in an invalid project name.
"""
parser = argparse.ArgumentParser()
parser.add_argument("--github_oauth_token", required=True,
help="You can generate an oauth token here: "
"https://github.com/settings/applications")
parser.add_argument("--github_owner_username", required=True,
help="The project ownsers GitHub username")
parser.add_argument("--github_repo_name", required=True,
help="The GitHub repository you wish to add the issues"
"to.")
parser.add_argument("--issue_file_path", required=True,
help="The path to the file containing the issues from"
"Google Code.")
parser.add_argument("--project_name", required=True,
help="The name of the Google Code project you wish to"
"export")
parser.add_argument("--user_file_path", required=False,
help="The path to the file containing a mapping from"
"email address to github username.")
parser.add_argument("--rate_limit", required=False, default="True",
help="Rate limit GitHub requests to not run into"
"anti-abuse limits.")
parsed_args, _ = parser.parse_known_args(args)
ExportIssues(
parsed_args.github_owner_username, parsed_args.github_repo_name,
parsed_args.github_oauth_token, parsed_args.issue_file_path,
parsed_args.project_name, parsed_args.user_file_path,
parsed_args.rate_limit)
if __name__ == "__main__":
main(sys.argv)
| apache-2.0 | 5,167,044,544,049,124,000 | 34.340292 | 80 | 0.663339 | false |
aakashsinha19/Aspectus | Image Classification/models/differential_privacy/dp_sgd/dp_optimizer/sanitizer.py | 19 | 4433 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines Sanitizer class for sanitizing tensors.
A sanitizer first limits the sensitivity of a tensor and then adds noise
to the tensor. The parameters are determined by the privacy_spending and the
other parameters. It also uses an accountant to keep track of the privacy
spending.
"""
from __future__ import division
import collections
import tensorflow as tf
from differential_privacy.dp_sgd.dp_optimizer import utils
ClipOption = collections.namedtuple("ClipOption",
["l2norm_bound", "clip"])
class AmortizedGaussianSanitizer(object):
"""Sanitizer with Gaussian noise and amoritzed privacy spending accounting.
This sanitizes a tensor by first clipping the tensor, summing the tensor
and then adding appropriate amount of noise. It also uses an amortized
accountant to keep track of privacy spending.
"""
def __init__(self, accountant, default_option):
"""Construct an AmortizedGaussianSanitizer.
Args:
accountant: the privacy accountant. Expect an amortized one.
default_option: the default ClipOptoin.
"""
self._accountant = accountant
self._default_option = default_option
self._options = {}
def set_option(self, tensor_name, option):
"""Set options for an individual tensor.
Args:
tensor_name: the name of the tensor.
option: clip option.
"""
self._options[tensor_name] = option
def sanitize(self, x, eps_delta, sigma=None,
option=ClipOption(None, None), tensor_name=None,
num_examples=None, add_noise=True):
"""Sanitize the given tensor.
This santize a given tensor by first applying l2 norm clipping and then
adding Gaussian noise. It calls the privacy accountant for updating the
privacy spending.
Args:
x: the tensor to sanitize.
eps_delta: a pair of eps, delta for (eps,delta)-DP. Use it to
compute sigma if sigma is None.
sigma: if sigma is not None, use sigma.
option: a ClipOption which, if supplied, used for
clipping and adding noise.
tensor_name: the name of the tensor.
num_examples: if None, use the number of "rows" of x.
add_noise: if True, then add noise, else just clip.
Returns:
a pair of sanitized tensor and the operation to accumulate privacy
spending.
"""
if sigma is None:
# pylint: disable=unpacking-non-sequence
eps, delta = eps_delta
with tf.control_dependencies(
[tf.Assert(tf.greater(eps, 0),
["eps needs to be greater than 0"]),
tf.Assert(tf.greater(delta, 0),
["delta needs to be greater than 0"])]):
# The following formula is taken from
# Dwork and Roth, The Algorithmic Foundations of Differential
# Privacy, Appendix A.
# http://www.cis.upenn.edu/~aaroth/Papers/privacybook.pdf
sigma = tf.sqrt(2.0 * tf.log(1.25 / delta)) / eps
l2norm_bound, clip = option
if l2norm_bound is None:
l2norm_bound, clip = self._default_option
if ((tensor_name is not None) and
(tensor_name in self._options)):
l2norm_bound, clip = self._options[tensor_name]
if clip:
x = utils.BatchClipByL2norm(x, l2norm_bound)
if add_noise:
if num_examples is None:
num_examples = tf.slice(tf.shape(x), [0], [1])
privacy_accum_op = self._accountant.accumulate_privacy_spending(
eps_delta, sigma, num_examples)
with tf.control_dependencies([privacy_accum_op]):
saned_x = utils.AddGaussianNoise(tf.reduce_sum(x, 0),
sigma * l2norm_bound)
else:
saned_x = tf.reduce_sum(x, 0)
return saned_x
| apache-2.0 | -6,854,037,978,217,485,000 | 35.04065 | 80 | 0.655087 | false |
spacewalkproject/spacewalk | client/tools/rhn-virtualization/virtualization/init_action.py | 7 | 1286 | #
# Copyright (c) 2008--2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import sys
from virtualization import support
actions = {
'shutdown' : support.shutdown,
'start' : support.start,
'suspend' : support.suspend,
'resume' : support.resume,
'reboot' : support.reboot,
'destroy' : support.destroy,
}
action_type = sys.argv[1]
uuid = sys.argv[2]
if not action_type in list(actions.keys()):
sys.stderr.write("Unknown action: %s \n" % action_type)
sys.exit(1)
try:
actions[action_type](uuid)
except Exception:
e = sys.exc_info()[1]
sys.stderr.write(str(e))
sys.exit(1)
sys.exit(0)
| gpl-2.0 | -6,343,433,644,573,975,000 | 29.619048 | 73 | 0.644635 | false |
denis-pitul/django | django/core/management/commands/dumpdata.py | 51 | 7861 | from collections import OrderedDict
from django.apps import apps
from django.core import serializers
from django.core.management.base import BaseCommand, CommandError
from django.db import DEFAULT_DB_ALIAS, router
class Command(BaseCommand):
help = ("Output the contents of the database as a fixture of the given "
"format (using each model's default manager unless --all is "
"specified).")
def add_arguments(self, parser):
parser.add_argument('args', metavar='app_label[.ModelName]', nargs='*',
help='Restricts dumped data to the specified app_label or app_label.ModelName.')
parser.add_argument('--format', default='json', dest='format',
help='Specifies the output serialization format for fixtures.')
parser.add_argument('--indent', default=None, dest='indent', type=int,
help='Specifies the indent level to use when pretty-printing output.')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS,
help='Nominates a specific database to dump fixtures from. '
'Defaults to the "default" database.')
parser.add_argument('-e', '--exclude', dest='exclude', action='append', default=[],
help='An app_label or app_label.ModelName to exclude '
'(use multiple --exclude to exclude multiple apps/models).')
parser.add_argument('--natural-foreign', action='store_true', dest='use_natural_foreign_keys', default=False,
help='Use natural foreign keys if they are available.')
parser.add_argument('--natural-primary', action='store_true', dest='use_natural_primary_keys', default=False,
help='Use natural primary keys if they are available.')
parser.add_argument('-a', '--all', action='store_true', dest='use_base_manager', default=False,
help="Use Django's base manager to dump all models stored in the database, "
"including those that would otherwise be filtered or modified by a custom manager.")
parser.add_argument('--pks', dest='primary_keys',
help="Only dump objects with given primary keys. "
"Accepts a comma separated list of keys. "
"This option will only work when you specify one model.")
parser.add_argument('-o', '--output', default=None, dest='output',
help='Specifies file to which the output is written.')
def handle(self, *app_labels, **options):
format = options.get('format')
indent = options.get('indent')
using = options.get('database')
excludes = options.get('exclude')
output = options.get('output')
show_traceback = options.get('traceback')
use_natural_foreign_keys = options.get('use_natural_foreign_keys')
use_natural_primary_keys = options.get('use_natural_primary_keys')
use_base_manager = options.get('use_base_manager')
pks = options.get('primary_keys')
if pks:
primary_keys = pks.split(',')
else:
primary_keys = []
excluded_apps = set()
excluded_models = set()
for exclude in excludes:
if '.' in exclude:
try:
model = apps.get_model(exclude)
except LookupError:
raise CommandError('Unknown model in excludes: %s' % exclude)
excluded_models.add(model)
else:
try:
app_config = apps.get_app_config(exclude)
except LookupError as e:
raise CommandError(str(e))
excluded_apps.add(app_config)
if len(app_labels) == 0:
if primary_keys:
raise CommandError("You can only use --pks option with one model")
app_list = OrderedDict((app_config, None)
for app_config in apps.get_app_configs()
if app_config.models_module is not None and app_config not in excluded_apps)
else:
if len(app_labels) > 1 and primary_keys:
raise CommandError("You can only use --pks option with one model")
app_list = OrderedDict()
for label in app_labels:
try:
app_label, model_label = label.split('.')
try:
app_config = apps.get_app_config(app_label)
except LookupError as e:
raise CommandError(str(e))
if app_config.models_module is None or app_config in excluded_apps:
continue
try:
model = app_config.get_model(model_label)
except LookupError:
raise CommandError("Unknown model: %s.%s" % (app_label, model_label))
app_list_value = app_list.setdefault(app_config, [])
# We may have previously seen a "all-models" request for
# this app (no model qualifier was given). In this case
# there is no need adding specific models to the list.
if app_list_value is not None:
if model not in app_list_value:
app_list_value.append(model)
except ValueError:
if primary_keys:
raise CommandError("You can only use --pks option with one model")
# This is just an app - no model qualifier
app_label = label
try:
app_config = apps.get_app_config(app_label)
except LookupError as e:
raise CommandError(str(e))
if app_config.models_module is None or app_config in excluded_apps:
continue
app_list[app_config] = None
# Check that the serialization format exists; this is a shortcut to
# avoid collating all the objects and _then_ failing.
if format not in serializers.get_public_serializer_formats():
try:
serializers.get_serializer(format)
except serializers.SerializerDoesNotExist:
pass
raise CommandError("Unknown serialization format: %s" % format)
def get_objects():
# Collate the objects to be serialized.
for model in serializers.sort_dependencies(app_list.items()):
if model in excluded_models:
continue
if not model._meta.proxy and router.allow_migrate_model(using, model):
if use_base_manager:
objects = model._base_manager
else:
objects = model._default_manager
queryset = objects.using(using).order_by(model._meta.pk.name)
if primary_keys:
queryset = queryset.filter(pk__in=primary_keys)
for obj in queryset.iterator():
yield obj
try:
self.stdout.ending = None
stream = open(output, 'w') if output else None
try:
serializers.serialize(format, get_objects(), indent=indent,
use_natural_foreign_keys=use_natural_foreign_keys,
use_natural_primary_keys=use_natural_primary_keys,
stream=stream or self.stdout)
finally:
if stream:
stream.close()
except Exception as e:
if show_traceback:
raise
raise CommandError("Unable to serialize database: %s" % e)
| bsd-3-clause | -4,149,776,207,703,098,400 | 47.826087 | 117 | 0.555527 | false |
TRox1972/youtube-dl | youtube_dl/extractor/drtuber.py | 4 | 3040 | from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
NO_DEFAULT,
str_to_int,
)
class DrTuberIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?drtuber\.com/(?:video|embed)/(?P<id>\d+)(?:/(?P<display_id>[\w-]+))?'
_TESTS = [{
'url': 'http://www.drtuber.com/video/1740434/hot-perky-blonde-naked-golf',
'md5': '93e680cf2536ad0dfb7e74d94a89facd',
'info_dict': {
'id': '1740434',
'display_id': 'hot-perky-blonde-naked-golf',
'ext': 'mp4',
'title': 'hot perky blonde naked golf',
'like_count': int,
'comment_count': int,
'categories': ['Babe', 'Blonde', 'Erotic', 'Outdoor', 'Softcore', 'Solo'],
'thumbnail': 're:https?://.*\.jpg$',
'age_limit': 18,
}
}, {
'url': 'http://www.drtuber.com/embed/489939',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage):
return re.findall(
r'<iframe[^>]+?src=["\'](?P<url>(?:https?:)?//(?:www\.)?drtuber\.com/embed/\d+)',
webpage)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id') or video_id
webpage = self._download_webpage(
'http://www.drtuber.com/video/%s' % video_id, display_id)
video_url = self._html_search_regex(
r'<source src="([^"]+)"', webpage, 'video URL')
title = self._html_search_regex(
(r'class="title_watch"[^>]*><(?:p|h\d+)[^>]*>([^<]+)<',
r'<p[^>]+class="title_substrate">([^<]+)</p>',
r'<title>([^<]+) - \d+'),
webpage, 'title')
thumbnail = self._html_search_regex(
r'poster="([^"]+)"',
webpage, 'thumbnail', fatal=False)
def extract_count(id_, name, default=NO_DEFAULT):
return str_to_int(self._html_search_regex(
r'<span[^>]+(?:class|id)="%s"[^>]*>([\d,\.]+)</span>' % id_,
webpage, '%s count' % name, default=default, fatal=False))
like_count = extract_count('rate_likes', 'like')
dislike_count = extract_count('rate_dislikes', 'dislike', default=None)
comment_count = extract_count('comments_count', 'comment')
cats_str = self._search_regex(
r'<div[^>]+class="categories_list">(.+?)</div>',
webpage, 'categories', fatal=False)
categories = [] if not cats_str else re.findall(
r'<a title="([^"]+)"', cats_str)
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'title': title,
'thumbnail': thumbnail,
'like_count': like_count,
'dislike_count': dislike_count,
'comment_count': comment_count,
'categories': categories,
'age_limit': self._rta_search(webpage),
}
| unlicense | -4,283,156,187,513,633,000 | 34.348837 | 107 | 0.504605 | false |
elaginm/python_training | fixture/contact.py | 1 | 11101 | from selenium.webdriver.support.ui import Select
from model.contact import Contact
import re
class ContactHelper:
def __init__(self, app):
self.app = app
def open_new_address(self):
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
def create(self, contact):
wd = self.app.wd
self.open_new_address()
self.fill_contact_form(contact)
# Нажимаем кнопку Enter
wd.find_element_by_xpath('//input[@value="Enter"][2]').click()
self.return_to_homepage()
self.contact_cache = None
def add_contact_to_group_by_name(self, contact_id, group_id):
wd = self.app.wd
self.app.open_home_page()
self.select_contact_by_id(contact_id)
wd.find_element_by_name("to_group").click()
select = Select(wd.find_element_by_name("to_group"))
select.select_by_value(group_id)
wd.find_element_by_xpath('//input[@value="Add to"]').click()
wd.find_element_by_xpath('//i//*[contains(text(),"group page")]').click()
def sort_by_group_by_id(self, group_id):
wd = self.app.wd
select = Select(wd.find_element_by_xpath('//select[@name="group"]'))
select.select_by_value(group_id)
def edit_empty_name(self, index):
wd = self.app.wd
self.app.open_home_page()
self.select_contact_by_index(index)
wd.find_elements_by_xpath('//img[@title="Edit"]')[index].click()
return wd.find_element_by_xpath('//input[@name="firstname"]').get_attribute("value")
def edit_empty_name_by_id(self, id):
wd = self.app.wd
self.app.open_home_page()
self.select_contact_by_id(id)
wd.find_element_by_xpath("//input[@id=" + id + "]//../following-sibling::td//img[@title='Edit']").click()
return wd.find_element_by_xpath('//input[@name="firstname"]').get_attribute("value")
def edit_first_contact(self):
self.edit_contact_by_index(0)
def edit_contact_by_index(self, index, new_contact_data):
wd = self.app.wd
self.app.open_home_page()
self.select_contact_by_index(index)
# Нажимаем кнопку Edit
wd.find_elements_by_xpath('//img[@title="Edit"]')[index].click()
self.fill_contact_form(new_contact_data)
# Нажимаем кнопку Update
wd.find_element_by_xpath('//input[@value="Update"][2]').click()
self.return_to_homepage()
self.contact_cache = None
def edit_contact_by_id(self, id, new_contact_data):
wd = self.app.wd
self.app.open_home_page()
self.select_contact_by_id(id)
# Нажимаем кнопку Edit
wd.find_element_by_xpath("//input[@id="+id+"]//../following-sibling::td//img[@title='Edit']").click()
self.fill_contact_form(new_contact_data)
# Нажимаем кнопку Update
wd.find_element_by_xpath('//input[@value="Update"][2]').click()
self.return_to_homepage()
self.contact_cache = None
def change_field_value(self, field_name, text):
wd = self.app.wd
if text is not None:
wd.find_element_by_name(field_name).click()
wd.find_element_by_name(field_name).clear()
wd.find_element_by_name(field_name).send_keys(text)
def fill_contact_form(self, contact):
wd = self.app.wd
self.change_field_value("firstname", contact.firstname)
self.change_field_value("lastname", contact.lastname)
self.change_field_value("nickname", contact.nickname)
self.change_field_value("title", contact.title)
self.change_field_value("company", contact.company)
self.change_field_value("address", contact.address)
self.change_field_value("home", contact.homephone)
self.change_field_value("mobile", contact.mobilephone)
self.change_field_value("email", contact.email)
self.change_field_value("homepage", contact.homepage)
select = Select(wd.find_element_by_xpath('//select[@name="bday"]'))
select.select_by_value(contact.birthdayday)
select = Select(wd.find_element_by_xpath('//select[@name="bmonth"]'))
select.select_by_value(contact.birthdaymonth)
self.change_field_value("byear", contact.birthdayyear)
self.change_field_value("address2", contact.address2)
def select_first_contact(self):
wd = self.app.wd
wd.find_element_by_name("selected[]").click()
def select_contact_by_index(self, index):
wd = self.app.wd
wd.find_elements_by_name("selected[]")[index].click()
def select_contact_by_id(self, id):
wd = self.app.wd
wd.find_element_by_id(id).click()
def delete_first_contact(self):
self.delete_contact_by_index(0)
def delete_contact_by_index(self, index):
wd = self.app.wd
self.app.open_home_page()
self.select_contact_by_index(index)
# Нажимаем кнопку удалить
wd.find_element_by_xpath('//input[@value="Delete"]').click()
# Подтверждаем удаление
wd.switch_to_alert().accept()
self.app.open_home_page()
self.contact_cache = None
def delete_contact_by_id(self, id):
wd = self.app.wd
self.app.open_home_page()
self.select_contact_by_id(id)
# Нажимаем кнопку удалить
wd.find_element_by_xpath('//input[@value="Delete"]').click()
# Подтверждаем удаление
wd.switch_to_alert().accept()
self.app.open_home_page()
self.contact_cache = None
def return_to_homepage(self):
wd = self.app.wd
wd.find_element_by_link_text("home page").click()
def count(self):
wd = self.app.wd
self.app.open_home_page()
return len(wd.find_elements_by_name("selected[]"))
contact_cache = None
def get_contact_list(self):
if self.contact_cache is None:
wd = self.app.wd
self.app.open_home_page()
self.contact_cache = []
for row in wd.find_elements_by_name("entry"):
cells = row.find_elements_by_tag_name("td")
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
firstname = cells[2].text
lastname = cells[1].text
address = cells[3].text
all_emails = cells[4].text
all_phones = cells[5].text
self.contact_cache.append(
Contact(id=id, firstname=firstname, lastname=lastname, address=address,
all_emails_from_home_page=all_emails, all_phones_from_home_page=all_phones))
return list(self.contact_cache)
def open_contact_to_edit_by_index(self, index):
wd = self.app.wd
self.app.open_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[7]
cell.find_element_by_tag_name("a").click()
def open_contact_details_by_index(self, index):
wd = self.app.wd
self.app.open_home_page()
row = wd.find_elements_by_name("entry")[index]
cell = row.find_elements_by_tag_name("td")[6]
cell.find_element_by_tag_name("a").click()
def get_contact_info_from_edit_page(self, index):
wd = self.app.wd
self.open_contact_to_edit_by_index(index)
id = wd.find_element_by_name("id").get_attribute("value")
firstname = wd.find_element_by_name("firstname").get_attribute("value")
lastname = wd.find_element_by_name("lastname").get_attribute("value")
address = wd.find_element_by_name("address").text
homephone = wd.find_element_by_name("home").get_attribute("value")
workphone = wd.find_element_by_name("work").get_attribute("value")
mobilephone = wd.find_element_by_name("mobile").get_attribute("value")
secondaryphone = wd.find_element_by_name("phone2").get_attribute("value")
email = wd.find_element_by_name("email").get_attribute("value")
email2 = wd.find_element_by_name("email2").get_attribute("value")
email3 = wd.find_element_by_name("email3").get_attribute("value")
return Contact(id=id, firstname=firstname, lastname=lastname, address=address, homephone=homephone,
mobilephone=mobilephone, workphone=workphone, secondaryphone=secondaryphone, email=email,
email2=email2, email3=email3)
def get_contact_from_view_page(self, index):
wd = self.app.wd
self.open_contact_details_by_index(index)
id = wd.find_element_by_name("id").get_attribute("value")
text = wd.find_element_by_id("content").text
homephone = re.search("H: (.*)", text)
workphone = re.search("W: (.*)", text)
mobilephone = re.search("M: (.*)", text)
secondaryphone = re.search("P: (.*)", text)
return Contact(id=id, homephone=homephone, mobilephone=mobilephone, workphone=workphone,
secondaryphone=secondaryphone)
def get_contact_info_by_index(self, index):
wd = self.app.wd
self.app.open_home_page()
row = wd.find_elements_by_name("entry")[index]
cells = row.find_elements_by_tag_name("td")
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
firstname = cells[2].text
lastname = cells[1].text
address = cells[3].text
all_emails = cells[4].text
all_phones = cells[5].text
return Contact(id=id, firstname=firstname, lastname=lastname, address=address,
all_emails_from_home_page=all_emails, all_phones_from_home_page=all_phones)
def get_contact_info_by_id(self, contact_id):
wd = self.app.wd
self.app.open_home_page()
row = wd.find_element_by_xpath('//td/*[@id="'+contact_id+'"]/../..')
cells = row.find_elements_by_tag_name("td")
id = cells[0].find_element_by_tag_name("input").get_attribute("value")
firstname = cells[2].text
lastname = cells[1].text
address = cells[3].text
all_emails = cells[4].text
all_phones = cells[5].text
return Contact(id=id, firstname=firstname, lastname=lastname, address=address,
all_emails_from_home_page=all_emails, all_phones_from_home_page=all_phones)
def select_group_in_filter_by_id(self, group_id):
wd = self.app.wd
self.app.open_home_page()
# wd.find_element_by_name("group").click()
select = Select(wd.find_element_by_xpath('//select[@name="group"]'))
select.select_by_value(group_id)
def delete_contact_from_group(self, id):
wd = self.app.wd
self.select_contact_by_id(id)
wd.find_element_by_xpath('//input[@name="remove"]').click()
wd.find_element_by_xpath('//i//*[contains(text(),"group page")]').click()
self.contact_cache = None
| apache-2.0 | -7,223,912,260,295,392,000 | 41.937255 | 117 | 0.605717 | false |
Neamar/django | django/contrib/sessions/backends/file.py | 336 | 7715 | import datetime
import errno
import logging
import os
import shutil
import tempfile
from django.conf import settings
from django.contrib.sessions.backends.base import (
VALID_KEY_CHARS, CreateError, SessionBase,
)
from django.contrib.sessions.exceptions import InvalidSessionKey
from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation
from django.utils import timezone
from django.utils.encoding import force_text
class SessionStore(SessionBase):
"""
Implements a file based session store.
"""
def __init__(self, session_key=None):
self.storage_path = type(self)._get_storage_path()
self.file_prefix = settings.SESSION_COOKIE_NAME
super(SessionStore, self).__init__(session_key)
@classmethod
def _get_storage_path(cls):
try:
return cls._storage_path
except AttributeError:
storage_path = getattr(settings, "SESSION_FILE_PATH", None)
if not storage_path:
storage_path = tempfile.gettempdir()
# Make sure the storage path is valid.
if not os.path.isdir(storage_path):
raise ImproperlyConfigured(
"The session storage path %r doesn't exist. Please set your"
" SESSION_FILE_PATH setting to an existing directory in which"
" Django can store session data." % storage_path)
cls._storage_path = storage_path
return storage_path
def _key_to_file(self, session_key=None):
"""
Get the file associated with this session key.
"""
if session_key is None:
session_key = self._get_or_create_session_key()
# Make sure we're not vulnerable to directory traversal. Session keys
# should always be md5s, so they should never contain directory
# components.
if not set(session_key).issubset(set(VALID_KEY_CHARS)):
raise InvalidSessionKey(
"Invalid characters in session key")
return os.path.join(self.storage_path, self.file_prefix + session_key)
def _last_modification(self):
"""
Return the modification time of the file storing the session's content.
"""
modification = os.stat(self._key_to_file()).st_mtime
if settings.USE_TZ:
modification = datetime.datetime.utcfromtimestamp(modification)
modification = modification.replace(tzinfo=timezone.utc)
else:
modification = datetime.datetime.fromtimestamp(modification)
return modification
def load(self):
session_data = {}
try:
with open(self._key_to_file(), "rb") as session_file:
file_data = session_file.read()
# Don't fail if there is no data in the session file.
# We may have opened the empty placeholder file.
if file_data:
try:
session_data = self.decode(file_data)
except (EOFError, SuspiciousOperation) as e:
if isinstance(e, SuspiciousOperation):
logger = logging.getLogger('django.security.%s' %
e.__class__.__name__)
logger.warning(force_text(e))
self.create()
# Remove expired sessions.
expiry_age = self.get_expiry_age(
modification=self._last_modification(),
expiry=session_data.get('_session_expiry'))
if expiry_age < 0:
session_data = {}
self.delete()
self.create()
except (IOError, SuspiciousOperation):
self._session_key = None
return session_data
def create(self):
while True:
self._session_key = self._get_new_session_key()
try:
self.save(must_create=True)
except CreateError:
continue
self.modified = True
return
def save(self, must_create=False):
if self.session_key is None:
return self.create()
# Get the session data now, before we start messing
# with the file it is stored within.
session_data = self._get_session(no_load=must_create)
session_file_name = self._key_to_file()
try:
# Make sure the file exists. If it does not already exist, an
# empty placeholder file is created.
flags = os.O_WRONLY | os.O_CREAT | getattr(os, 'O_BINARY', 0)
if must_create:
flags |= os.O_EXCL
fd = os.open(session_file_name, flags)
os.close(fd)
except OSError as e:
if must_create and e.errno == errno.EEXIST:
raise CreateError
raise
# Write the session file without interfering with other threads
# or processes. By writing to an atomically generated temporary
# file and then using the atomic os.rename() to make the complete
# file visible, we avoid having to lock the session file, while
# still maintaining its integrity.
#
# Note: Locking the session file was explored, but rejected in part
# because in order to be atomic and cross-platform, it required a
# long-lived lock file for each session, doubling the number of
# files in the session storage directory at any given time. This
# rename solution is cleaner and avoids any additional overhead
# when reading the session data, which is the more common case
# unless SESSION_SAVE_EVERY_REQUEST = True.
#
# See ticket #8616.
dir, prefix = os.path.split(session_file_name)
try:
output_file_fd, output_file_name = tempfile.mkstemp(dir=dir,
prefix=prefix + '_out_')
renamed = False
try:
try:
os.write(output_file_fd, self.encode(session_data).encode())
finally:
os.close(output_file_fd)
# This will atomically rename the file (os.rename) if the OS
# supports it. Otherwise this will result in a shutil.copy2
# and os.unlink (for example on Windows). See #9084.
shutil.move(output_file_name, session_file_name)
renamed = True
finally:
if not renamed:
os.unlink(output_file_name)
except (OSError, IOError, EOFError):
pass
def exists(self, session_key):
return os.path.exists(self._key_to_file(session_key))
def delete(self, session_key=None):
if session_key is None:
if self.session_key is None:
return
session_key = self.session_key
try:
os.unlink(self._key_to_file(session_key))
except OSError:
pass
def clean(self):
pass
@classmethod
def clear_expired(cls):
storage_path = cls._get_storage_path()
file_prefix = settings.SESSION_COOKIE_NAME
for session_file in os.listdir(storage_path):
if not session_file.startswith(file_prefix):
continue
session_key = session_file[len(file_prefix):]
session = cls(session_key)
# When an expired session is loaded, its file is removed, and a
# new file is immediately created. Prevent this by disabling
# the create() method.
session.create = lambda: None
session.load()
| bsd-3-clause | 7,620,896,581,612,116,000 | 36.634146 | 82 | 0.578354 | false |
boundarydevices/android_external_chromium_org | build/android/pylib/valgrind_tools.py | 8 | 8691 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Classes in this file define additional actions that need to be taken to run a
test under some kind of runtime error detection tool.
The interface is intended to be used as follows.
1. For tests that simply run a native process (i.e. no activity is spawned):
Call tool.CopyFiles().
Prepend test command line with tool.GetTestWrapper().
2. For tests that spawn an activity:
Call tool.CopyFiles().
Call tool.SetupEnvironment().
Run the test as usual.
Call tool.CleanUpEnvironment().
"""
# pylint: disable=R0201
import glob
import logging
import os.path
import subprocess
import sys
from pylib.constants import DIR_SOURCE_ROOT
from pylib.device import device_errors
def SetChromeTimeoutScale(device, scale):
"""Sets the timeout scale in /data/local/tmp/chrome_timeout_scale to scale."""
path = '/data/local/tmp/chrome_timeout_scale'
if not scale or scale == 1.0:
# Delete if scale is None/0.0/1.0 since the default timeout scale is 1.0
device.RunShellCommand('rm %s' % path)
else:
device.old_interface.SetProtectedFileContents(path, '%f' % scale)
class BaseTool(object):
"""A tool that does nothing."""
def __init__(self):
"""Does nothing."""
pass
def GetTestWrapper(self):
"""Returns a string that is to be prepended to the test command line."""
return ''
def GetUtilWrapper(self):
"""Returns the wrapper name for the utilities.
Returns:
A string that is to be prepended to the command line of utility
processes (forwarder, etc.).
"""
return ''
def CopyFiles(self):
"""Copies tool-specific files to the device, create directories, etc."""
pass
def SetupEnvironment(self):
"""Sets up the system environment for a test.
This is a good place to set system properties.
"""
pass
def CleanUpEnvironment(self):
"""Cleans up environment."""
pass
def GetTimeoutScale(self):
"""Returns a multiplier that should be applied to timeout values."""
return 1.0
def NeedsDebugInfo(self):
"""Whether this tool requires debug info.
Returns:
True if this tool can not work with stripped binaries.
"""
return False
class AddressSanitizerTool(BaseTool):
"""AddressSanitizer tool."""
WRAPPER_NAME = '/system/bin/asanwrapper'
# Disable memcmp overlap check.There are blobs (gl drivers)
# on some android devices that use memcmp on overlapping regions,
# nothing we can do about that.
EXTRA_OPTIONS = 'strict_memcmp=0,use_sigaltstack=1'
def __init__(self, device):
super(AddressSanitizerTool, self).__init__()
self._device = device
# Configure AndroidCommands to run utils (such as md5sum_bin) under ASan.
# This is required because ASan is a compiler-based tool, and md5sum
# includes instrumented code from base.
device.old_interface.SetUtilWrapper(self.GetUtilWrapper())
libs = glob.glob(os.path.join(DIR_SOURCE_ROOT,
'third_party/llvm-build/Release+Asserts/',
'lib/clang/*/lib/linux/',
'libclang_rt.asan-arm-android.so'))
assert len(libs) == 1
self._lib = libs[0]
def CopyFiles(self):
"""Copies ASan tools to the device."""
subprocess.call([os.path.join(DIR_SOURCE_ROOT,
'tools/android/asan/asan_device_setup.sh'),
'--device', self._device.old_interface.GetDevice(),
'--lib', self._lib,
'--extra-options', AddressSanitizerTool.EXTRA_OPTIONS])
self._device.WaitUntilFullyBooted()
def GetTestWrapper(self):
return AddressSanitizerTool.WRAPPER_NAME
def GetUtilWrapper(self):
"""Returns the wrapper for utilities, such as forwarder.
AddressSanitizer wrapper must be added to all instrumented binaries,
including forwarder and the like. This can be removed if such binaries
were built without instrumentation. """
return self.GetTestWrapper()
def SetupEnvironment(self):
try:
self._device.EnableRoot()
except device_errors.CommandFailedError as e:
# Try to set the timeout scale anyway.
# TODO(jbudorick) Handle this exception appropriately after interface
# conversions are finished.
logging.error(str(e))
SetChromeTimeoutScale(self._device, self.GetTimeoutScale())
def CleanUpEnvironment(self):
SetChromeTimeoutScale(self._device, None)
def GetTimeoutScale(self):
# Very slow startup.
return 20.0
class ValgrindTool(BaseTool):
"""Base abstract class for Valgrind tools."""
VG_DIR = '/data/local/tmp/valgrind'
VGLOGS_DIR = '/data/local/tmp/vglogs'
def __init__(self, device):
super(ValgrindTool, self).__init__()
self._device = device
# exactly 31 chars, SystemProperties::PROP_NAME_MAX
self._wrap_properties = ['wrap.com.google.android.apps.ch',
'wrap.org.chromium.native_test']
def CopyFiles(self):
"""Copies Valgrind tools to the device."""
self._device.RunShellCommand(
'rm -r %s; mkdir %s' % (ValgrindTool.VG_DIR, ValgrindTool.VG_DIR))
self._device.RunShellCommand(
'rm -r %s; mkdir %s' % (ValgrindTool.VGLOGS_DIR,
ValgrindTool.VGLOGS_DIR))
files = self.GetFilesForTool()
for f in files:
self._device.old_interface.PushIfNeeded(
os.path.join(DIR_SOURCE_ROOT, f),
os.path.join(ValgrindTool.VG_DIR, os.path.basename(f)))
def SetupEnvironment(self):
"""Sets up device environment."""
self._device.RunShellCommand('chmod 777 /data/local/tmp')
self._device.RunShellCommand('setenforce 0')
for prop in self._wrap_properties:
self._device.RunShellCommand(
'setprop %s "logwrapper %s"' % (prop, self.GetTestWrapper()))
SetChromeTimeoutScale(self._device, self.GetTimeoutScale())
def CleanUpEnvironment(self):
"""Cleans up device environment."""
for prop in self._wrap_properties:
self._device.RunShellCommand('setprop %s ""' % (prop,))
SetChromeTimeoutScale(self._device, None)
def GetFilesForTool(self):
"""Returns a list of file names for the tool."""
raise NotImplementedError()
def NeedsDebugInfo(self):
"""Whether this tool requires debug info.
Returns:
True if this tool can not work with stripped binaries.
"""
return True
class MemcheckTool(ValgrindTool):
"""Memcheck tool."""
def __init__(self, device):
super(MemcheckTool, self).__init__(device)
def GetFilesForTool(self):
"""Returns a list of file names for the tool."""
return ['tools/valgrind/android/vg-chrome-wrapper.sh',
'tools/valgrind/memcheck/suppressions.txt',
'tools/valgrind/memcheck/suppressions_android.txt']
def GetTestWrapper(self):
"""Returns a string that is to be prepended to the test command line."""
return ValgrindTool.VG_DIR + '/' + 'vg-chrome-wrapper.sh'
def GetTimeoutScale(self):
"""Returns a multiplier that should be applied to timeout values."""
return 30
class TSanTool(ValgrindTool):
"""ThreadSanitizer tool. See http://code.google.com/p/data-race-test ."""
def __init__(self, device):
super(TSanTool, self).__init__(device)
def GetFilesForTool(self):
"""Returns a list of file names for the tool."""
return ['tools/valgrind/android/vg-chrome-wrapper-tsan.sh',
'tools/valgrind/tsan/suppressions.txt',
'tools/valgrind/tsan/suppressions_android.txt',
'tools/valgrind/tsan/ignores.txt']
def GetTestWrapper(self):
"""Returns a string that is to be prepended to the test command line."""
return ValgrindTool.VG_DIR + '/' + 'vg-chrome-wrapper-tsan.sh'
def GetTimeoutScale(self):
"""Returns a multiplier that should be applied to timeout values."""
return 30.0
TOOL_REGISTRY = {
'memcheck': MemcheckTool,
'memcheck-renderer': MemcheckTool,
'tsan': TSanTool,
'tsan-renderer': TSanTool,
'asan': AddressSanitizerTool,
}
def CreateTool(tool_name, device):
"""Creates a tool with the specified tool name.
Args:
tool_name: Name of the tool to create.
device: A DeviceUtils instance.
Returns:
A tool for the specified tool_name.
"""
if not tool_name:
return BaseTool()
ctor = TOOL_REGISTRY.get(tool_name)
if ctor:
return ctor(device)
else:
print 'Unknown tool %s, available tools: %s' % (
tool_name, ', '.join(sorted(TOOL_REGISTRY.keys())))
sys.exit(1)
| bsd-3-clause | 5,358,655,682,575,429,000 | 30.26259 | 80 | 0.667817 | false |
LeartS/odoo | addons/website_blog/wizard/document_page_show_diff.py | 372 | 2184 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class showdiff(osv.osv_memory):
""" Disp[ay Difference for History """
_name = 'blog.post.history.show_diff'
def get_diff(self, cr, uid, context=None):
if context is None:
context = {}
history = self.pool.get('blog.post.history')
ids = context.get('active_ids', [])
diff = ""
if len(ids) == 2:
if ids[0] > ids[1]:
diff = history.getDiff(cr, uid, ids[1], ids[0])
else:
diff = history.getDiff(cr, uid, ids[0], ids[1])
elif len(ids) == 1:
old = history.browse(cr, uid, ids[0])
nids = history.search(cr, uid, [('post_id', '=', old.post_id.id)])
nids.sort()
diff = history.getDiff(cr, uid, ids[0], nids[-1])
else:
raise osv.except_osv(_('Warning!'), _('You need to select minimum one or maximum two history revisions!'))
return diff
_columns = {
'diff': fields.text('Diff', readonly=True),
}
_defaults = {
'diff': get_diff
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | -1,258,101,384,202,335,000 | 34.803279 | 118 | 0.565476 | false |
TheWardoctor/Wardoctors-repo | script.module.uncoded/lib/resources/lib/sources/en/movie4uch.py | 6 | 4375 | # -*- coding: utf-8 -*-
'''
Covenant Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re, urlparse, urllib
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import dom_parser2
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['movie4u.ch']
self.base_link = 'http://movie4u.ch'
self.search_link = '/?s=%s'
def movie(self, imdb, title, localtitle, aliases, year):
try:
clean_title = cleantitle.geturl(title)
search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
r = client.request(search_url)
r = client.parseDOM(r, 'div', {'class': 'result-item'})
r = [(dom_parser2.parse_dom(i, 'a', req='href')[0],
client.parseDOM(i, 'img', ret='alt')[0],
dom_parser2.parse_dom(i, 'span', attrs={'class': 'year'})) for i in r]
r = [(i[0].attrs['href'], i[1], i[2][0].content) for i in r if
(cleantitle.get(i[1]) == cleantitle.get(title) and i[2][0].content == year)]
url = r[0][0]
return url
except Exception:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
clean_title = cleantitle.geturl(tvshowtitle)
search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
r = client.request(search_url)
r = client.parseDOM(r, 'div', {'class': 'result-item'})
r = [(dom_parser2.parse_dom(i, 'a', req='href')[0],
client.parseDOM(i, 'img', ret='alt')[0],
dom_parser2.parse_dom(i, 'span', attrs={'class': 'year'})) for i in r]
r = [(i[0].attrs['href'], i[1], i[2][0].content) for i in r if
(cleantitle.get(i[1]) == cleantitle.get(tvshowtitle) and i[2][0].content == year)]
url = source_utils.strip_domain(r[0][0])
return url
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url:
return
t = url.split('/')[2]
url = self.base_link + '/episodes/%s-%dx%d' % (t, int(season), int(episode))
return url
except:
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
r = client.request(url)
try:
data = client.parseDOM(r, 'div', attrs={'class': 'playex'})
data = [client.parseDOM(i, 'iframe', ret='src') for i in data if i]
try:
for url in data[0]:
quality, info = source_utils.get_release_quality(url, None)
valid, host = source_utils.is_host_valid(url,hostDict)
if not valid: continue
host = host.encode('utf-8')
sources.append({
'source': host,
'quality': quality,
'language': 'en',
'url': url.replace('\/', '/'),
'direct': False,
'debridonly': False
})
except:
pass
except:
pass
return sources
except Exception:
return
def resolve(self, url):
return url | apache-2.0 | -7,096,783,112,712,064,000 | 36.724138 | 107 | 0.526629 | false |
pam-bot/SMSQuery | lib/flask/testsuite/helpers.py | 405 | 21973 | # -*- coding: utf-8 -*-
"""
flask.testsuite.helpers
~~~~~~~~~~~~~~~~~~~~~~~
Various helpers.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
import flask
import unittest
from logging import StreamHandler
from flask.testsuite import FlaskTestCase, catch_warnings, catch_stderr
from werkzeug.http import parse_cache_control_header, parse_options_header
from flask._compat import StringIO, text_type
def has_encoding(name):
try:
import codecs
codecs.lookup(name)
return True
except LookupError:
return False
class JSONTestCase(FlaskTestCase):
def test_json_bad_requests(self):
app = flask.Flask(__name__)
@app.route('/json', methods=['POST'])
def return_json():
return flask.jsonify(foo=text_type(flask.request.get_json()))
c = app.test_client()
rv = c.post('/json', data='malformed', content_type='application/json')
self.assert_equal(rv.status_code, 400)
def test_json_body_encoding(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
return flask.request.get_json()
c = app.test_client()
resp = c.get('/', data=u'"Hällo Wörld"'.encode('iso-8859-15'),
content_type='application/json; charset=iso-8859-15')
self.assert_equal(resp.data, u'Hällo Wörld'.encode('utf-8'))
def test_jsonify(self):
d = dict(a=23, b=42, c=[1, 2, 3])
app = flask.Flask(__name__)
@app.route('/kw')
def return_kwargs():
return flask.jsonify(**d)
@app.route('/dict')
def return_dict():
return flask.jsonify(d)
c = app.test_client()
for url in '/kw', '/dict':
rv = c.get(url)
self.assert_equal(rv.mimetype, 'application/json')
self.assert_equal(flask.json.loads(rv.data), d)
def test_json_as_unicode(self):
app = flask.Flask(__name__)
app.config['JSON_AS_ASCII'] = True
with app.app_context():
rv = flask.json.dumps(u'\N{SNOWMAN}')
self.assert_equal(rv, '"\\u2603"')
app.config['JSON_AS_ASCII'] = False
with app.app_context():
rv = flask.json.dumps(u'\N{SNOWMAN}')
self.assert_equal(rv, u'"\u2603"')
def test_json_attr(self):
app = flask.Flask(__name__)
@app.route('/add', methods=['POST'])
def add():
json = flask.request.get_json()
return text_type(json['a'] + json['b'])
c = app.test_client()
rv = c.post('/add', data=flask.json.dumps({'a': 1, 'b': 2}),
content_type='application/json')
self.assert_equal(rv.data, b'3')
def test_template_escaping(self):
app = flask.Flask(__name__)
render = flask.render_template_string
with app.test_request_context():
rv = flask.json.htmlsafe_dumps('</script>')
self.assert_equal(rv, u'"\\u003c/script\\u003e"')
self.assert_equal(type(rv), text_type)
rv = render('{{ "</script>"|tojson }}')
self.assert_equal(rv, '"\\u003c/script\\u003e"')
rv = render('{{ "<\0/script>"|tojson }}')
self.assert_equal(rv, '"\\u003c\\u0000/script\\u003e"')
rv = render('{{ "<!--<script>"|tojson }}')
self.assert_equal(rv, '"\\u003c!--\\u003cscript\\u003e"')
rv = render('{{ "&"|tojson }}')
self.assert_equal(rv, '"\\u0026"')
rv = render('{{ "\'"|tojson }}')
self.assert_equal(rv, '"\\u0027"')
rv = render("<a ng-data='{{ data|tojson }}'></a>",
data={'x': ["foo", "bar", "baz'"]})
self.assert_equal(rv,
'<a ng-data=\'{"x": ["foo", "bar", "baz\\u0027"]}\'></a>')
def test_json_customization(self):
class X(object):
def __init__(self, val):
self.val = val
class MyEncoder(flask.json.JSONEncoder):
def default(self, o):
if isinstance(o, X):
return '<%d>' % o.val
return flask.json.JSONEncoder.default(self, o)
class MyDecoder(flask.json.JSONDecoder):
def __init__(self, *args, **kwargs):
kwargs.setdefault('object_hook', self.object_hook)
flask.json.JSONDecoder.__init__(self, *args, **kwargs)
def object_hook(self, obj):
if len(obj) == 1 and '_foo' in obj:
return X(obj['_foo'])
return obj
app = flask.Flask(__name__)
app.testing = True
app.json_encoder = MyEncoder
app.json_decoder = MyDecoder
@app.route('/', methods=['POST'])
def index():
return flask.json.dumps(flask.request.get_json()['x'])
c = app.test_client()
rv = c.post('/', data=flask.json.dumps({
'x': {'_foo': 42}
}), content_type='application/json')
self.assertEqual(rv.data, b'"<42>"')
def test_modified_url_encoding(self):
class ModifiedRequest(flask.Request):
url_charset = 'euc-kr'
app = flask.Flask(__name__)
app.testing = True
app.request_class = ModifiedRequest
app.url_map.charset = 'euc-kr'
@app.route('/')
def index():
return flask.request.args['foo']
rv = app.test_client().get(u'/?foo=정상처리'.encode('euc-kr'))
self.assert_equal(rv.status_code, 200)
self.assert_equal(rv.data, u'정상처리'.encode('utf-8'))
if not has_encoding('euc-kr'):
test_modified_url_encoding = None
def test_json_key_sorting(self):
app = flask.Flask(__name__)
app.testing = True
self.assert_equal(app.config['JSON_SORT_KEYS'], True)
d = dict.fromkeys(range(20), 'foo')
@app.route('/')
def index():
return flask.jsonify(values=d)
c = app.test_client()
rv = c.get('/')
lines = [x.strip() for x in rv.data.strip().decode('utf-8').splitlines()]
self.assert_equal(lines, [
'{',
'"values": {',
'"0": "foo",',
'"1": "foo",',
'"2": "foo",',
'"3": "foo",',
'"4": "foo",',
'"5": "foo",',
'"6": "foo",',
'"7": "foo",',
'"8": "foo",',
'"9": "foo",',
'"10": "foo",',
'"11": "foo",',
'"12": "foo",',
'"13": "foo",',
'"14": "foo",',
'"15": "foo",',
'"16": "foo",',
'"17": "foo",',
'"18": "foo",',
'"19": "foo"',
'}',
'}'
])
class SendfileTestCase(FlaskTestCase):
def test_send_file_regular(self):
app = flask.Flask(__name__)
with app.test_request_context():
rv = flask.send_file('static/index.html')
self.assert_true(rv.direct_passthrough)
self.assert_equal(rv.mimetype, 'text/html')
with app.open_resource('static/index.html') as f:
rv.direct_passthrough = False
self.assert_equal(rv.data, f.read())
rv.close()
def test_send_file_xsendfile(self):
app = flask.Flask(__name__)
app.use_x_sendfile = True
with app.test_request_context():
rv = flask.send_file('static/index.html')
self.assert_true(rv.direct_passthrough)
self.assert_in('x-sendfile', rv.headers)
self.assert_equal(rv.headers['x-sendfile'],
os.path.join(app.root_path, 'static/index.html'))
self.assert_equal(rv.mimetype, 'text/html')
rv.close()
def test_send_file_object(self):
app = flask.Flask(__name__)
with catch_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f)
rv.direct_passthrough = False
with app.open_resource('static/index.html') as f:
self.assert_equal(rv.data, f.read())
self.assert_equal(rv.mimetype, 'text/html')
rv.close()
# mimetypes + etag
self.assert_equal(len(captured), 2)
app.use_x_sendfile = True
with catch_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f)
self.assert_equal(rv.mimetype, 'text/html')
self.assert_in('x-sendfile', rv.headers)
self.assert_equal(rv.headers['x-sendfile'],
os.path.join(app.root_path, 'static/index.html'))
rv.close()
# mimetypes + etag
self.assert_equal(len(captured), 2)
app.use_x_sendfile = False
with app.test_request_context():
with catch_warnings() as captured:
f = StringIO('Test')
rv = flask.send_file(f)
rv.direct_passthrough = False
self.assert_equal(rv.data, b'Test')
self.assert_equal(rv.mimetype, 'application/octet-stream')
rv.close()
# etags
self.assert_equal(len(captured), 1)
with catch_warnings() as captured:
f = StringIO('Test')
rv = flask.send_file(f, mimetype='text/plain')
rv.direct_passthrough = False
self.assert_equal(rv.data, b'Test')
self.assert_equal(rv.mimetype, 'text/plain')
rv.close()
# etags
self.assert_equal(len(captured), 1)
app.use_x_sendfile = True
with catch_warnings() as captured:
with app.test_request_context():
f = StringIO('Test')
rv = flask.send_file(f)
self.assert_not_in('x-sendfile', rv.headers)
rv.close()
# etags
self.assert_equal(len(captured), 1)
def test_attachment(self):
app = flask.Flask(__name__)
with catch_warnings() as captured:
with app.test_request_context():
f = open(os.path.join(app.root_path, 'static/index.html'))
rv = flask.send_file(f, as_attachment=True)
value, options = parse_options_header(rv.headers['Content-Disposition'])
self.assert_equal(value, 'attachment')
rv.close()
# mimetypes + etag
self.assert_equal(len(captured), 2)
with app.test_request_context():
self.assert_equal(options['filename'], 'index.html')
rv = flask.send_file('static/index.html', as_attachment=True)
value, options = parse_options_header(rv.headers['Content-Disposition'])
self.assert_equal(value, 'attachment')
self.assert_equal(options['filename'], 'index.html')
rv.close()
with app.test_request_context():
rv = flask.send_file(StringIO('Test'), as_attachment=True,
attachment_filename='index.txt',
add_etags=False)
self.assert_equal(rv.mimetype, 'text/plain')
value, options = parse_options_header(rv.headers['Content-Disposition'])
self.assert_equal(value, 'attachment')
self.assert_equal(options['filename'], 'index.txt')
rv.close()
def test_static_file(self):
app = flask.Flask(__name__)
# default cache timeout is 12 hours
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 12 * 60 * 60)
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 12 * 60 * 60)
rv.close()
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 3600
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 3600)
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 3600)
rv.close()
class StaticFileApp(flask.Flask):
def get_send_file_max_age(self, filename):
return 10
app = StaticFileApp(__name__)
with app.test_request_context():
# Test with static file handler.
rv = app.send_static_file('index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 10)
rv.close()
# Test again with direct use of send_file utility.
rv = flask.send_file('static/index.html')
cc = parse_cache_control_header(rv.headers['Cache-Control'])
self.assert_equal(cc.max_age, 10)
rv.close()
class LoggingTestCase(FlaskTestCase):
def test_logger_cache(self):
app = flask.Flask(__name__)
logger1 = app.logger
self.assert_true(app.logger is logger1)
self.assert_equal(logger1.name, __name__)
app.logger_name = __name__ + '/test_logger_cache'
self.assert_true(app.logger is not logger1)
def test_debug_log(self):
app = flask.Flask(__name__)
app.debug = True
@app.route('/')
def index():
app.logger.warning('the standard library is dead')
app.logger.debug('this is a debug statement')
return ''
@app.route('/exc')
def exc():
1 // 0
with app.test_client() as c:
with catch_stderr() as err:
c.get('/')
out = err.getvalue()
self.assert_in('WARNING in helpers [', out)
self.assert_in(os.path.basename(__file__.rsplit('.', 1)[0] + '.py'), out)
self.assert_in('the standard library is dead', out)
self.assert_in('this is a debug statement', out)
with catch_stderr() as err:
try:
c.get('/exc')
except ZeroDivisionError:
pass
else:
self.assert_true(False, 'debug log ate the exception')
def test_debug_log_override(self):
app = flask.Flask(__name__)
app.debug = True
app.logger_name = 'flask_tests/test_debug_log_override'
app.logger.level = 10
self.assert_equal(app.logger.level, 10)
def test_exception_logging(self):
out = StringIO()
app = flask.Flask(__name__)
app.logger_name = 'flask_tests/test_exception_logging'
app.logger.addHandler(StreamHandler(out))
@app.route('/')
def index():
1 // 0
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 500)
self.assert_in(b'Internal Server Error', rv.data)
err = out.getvalue()
self.assert_in('Exception on / [GET]', err)
self.assert_in('Traceback (most recent call last):', err)
self.assert_in('1 // 0', err)
self.assert_in('ZeroDivisionError:', err)
def test_processor_exceptions(self):
app = flask.Flask(__name__)
@app.before_request
def before_request():
if trigger == 'before':
1 // 0
@app.after_request
def after_request(response):
if trigger == 'after':
1 // 0
return response
@app.route('/')
def index():
return 'Foo'
@app.errorhandler(500)
def internal_server_error(e):
return 'Hello Server Error', 500
for trigger in 'before', 'after':
rv = app.test_client().get('/')
self.assert_equal(rv.status_code, 500)
self.assert_equal(rv.data, b'Hello Server Error')
def test_url_for_with_anchor(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
self.assert_equal(flask.url_for('index', _anchor='x y'),
'/#x%20y')
def test_url_for_with_scheme(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
self.assert_equal(flask.url_for('index',
_external=True,
_scheme='https'),
'https://localhost/')
def test_url_for_with_scheme_not_external(self):
app = flask.Flask(__name__)
@app.route('/')
def index():
return '42'
with app.test_request_context():
self.assert_raises(ValueError,
flask.url_for,
'index',
_scheme='https')
def test_url_with_method(self):
from flask.views import MethodView
app = flask.Flask(__name__)
class MyView(MethodView):
def get(self, id=None):
if id is None:
return 'List'
return 'Get %d' % id
def post(self):
return 'Create'
myview = MyView.as_view('myview')
app.add_url_rule('/myview/', methods=['GET'],
view_func=myview)
app.add_url_rule('/myview/<int:id>', methods=['GET'],
view_func=myview)
app.add_url_rule('/myview/create', methods=['POST'],
view_func=myview)
with app.test_request_context():
self.assert_equal(flask.url_for('myview', _method='GET'),
'/myview/')
self.assert_equal(flask.url_for('myview', id=42, _method='GET'),
'/myview/42')
self.assert_equal(flask.url_for('myview', _method='POST'),
'/myview/create')
class NoImportsTestCase(FlaskTestCase):
"""Test Flasks are created without import.
Avoiding ``__import__`` helps create Flask instances where there are errors
at import time. Those runtime errors will be apparent to the user soon
enough, but tools which build Flask instances meta-programmatically benefit
from a Flask which does not ``__import__``. Instead of importing to
retrieve file paths or metadata on a module or package, use the pkgutil and
imp modules in the Python standard library.
"""
def test_name_with_import_error(self):
try:
flask.Flask('importerror')
except NotImplementedError:
self.fail('Flask(import_name) is importing import_name.')
class StreamingTestCase(FlaskTestCase):
def test_streaming_with_context(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(flask.stream_with_context(generate()))
c = app.test_client()
rv = c.get('/?name=World')
self.assertEqual(rv.data, b'Hello World!')
def test_streaming_with_context_as_decorator(self):
app = flask.Flask(__name__)
app.testing = True
@app.route('/')
def index():
@flask.stream_with_context
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(generate())
c = app.test_client()
rv = c.get('/?name=World')
self.assertEqual(rv.data, b'Hello World!')
def test_streaming_with_context_and_custom_close(self):
app = flask.Flask(__name__)
app.testing = True
called = []
class Wrapper(object):
def __init__(self, gen):
self._gen = gen
def __iter__(self):
return self
def close(self):
called.append(42)
def __next__(self):
return next(self._gen)
next = __next__
@app.route('/')
def index():
def generate():
yield 'Hello '
yield flask.request.args['name']
yield '!'
return flask.Response(flask.stream_with_context(
Wrapper(generate())))
c = app.test_client()
rv = c.get('/?name=World')
self.assertEqual(rv.data, b'Hello World!')
self.assertEqual(called, [42])
def suite():
suite = unittest.TestSuite()
if flask.json_available:
suite.addTest(unittest.makeSuite(JSONTestCase))
suite.addTest(unittest.makeSuite(SendfileTestCase))
suite.addTest(unittest.makeSuite(LoggingTestCase))
suite.addTest(unittest.makeSuite(NoImportsTestCase))
suite.addTest(unittest.makeSuite(StreamingTestCase))
return suite
| gpl-2.0 | 8,825,034,492,708,832,000 | 36.020236 | 89 | 0.519656 | false |
NeCTAR-RC/horizon | openstack_dashboard/test/unit/api/test_network.py | 3 | 6498 | # Copyright 2013 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import mock
import netaddr
from django.test.utils import override_settings
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
class NetworkApiNeutronTests(test.APIMockTestCase):
def setUp(self):
super(NetworkApiNeutronTests, self).setUp()
neutronclient = mock.patch.object(api.neutron, 'neutronclient').start()
self.qclient = neutronclient.return_value
def _get_expected_addresses(self, server, no_fip_expected=True):
server_ports = self.ports.filter(device_id=server.id)
addresses = collections.defaultdict(list)
for p in server_ports:
net_name = self.networks.get(id=p['network_id']).name
for ip in p.fixed_ips:
version = netaddr.IPAddress(ip['ip_address']).version
addresses[net_name].append(
{'version': version,
'addr': ip['ip_address'],
'OS-EXT-IPS-MAC:mac_addr': p.mac_address,
'OS-EXT-IPS:type': 'fixed'})
if no_fip_expected:
continue
fips = self.floating_ips.filter(port_id=p['id'])
if not fips:
continue
# Only one FIP should match.
fip = fips[0]
addresses[net_name].append(
{'version': 4,
'addr': fip.floating_ip_address,
'OS-EXT-IPS-MAC:mac_addr': p.mac_address,
'OS-EXT-IPS:type': 'floating'})
return addresses
def _check_server_address(self, res_server_data, no_fip_expected=False):
expected_addresses = self._get_expected_addresses(res_server_data,
no_fip_expected)
self.assertEqual(len(expected_addresses),
len(res_server_data.addresses))
for net, addresses in expected_addresses.items():
self.assertIn(net, res_server_data.addresses)
self.assertEqual(addresses, res_server_data.addresses[net])
def _test_servers_update_addresses(self, router_enabled=True):
tenant_id = self.request.user.tenant_id
servers = self.servers.list()
server_ids = tuple([server.id for server in servers])
server_ports = [p for p in self.api_ports.list()
if p['device_id'] in server_ids]
server_port_ids = tuple([p['id'] for p in server_ports])
if router_enabled:
assoc_fips = [fip for fip in self.api_floating_ips.list()
if fip['port_id'] in server_port_ids]
server_network_ids = [p['network_id'] for p in server_ports]
server_networks = [net for net in self.api_networks.list()
if net['id'] in server_network_ids]
list_ports_retvals = [{'ports': server_ports}]
self.qclient.list_ports.side_effect = list_ports_retvals
if router_enabled:
self.qclient.list_floatingips.return_value = {'floatingips':
assoc_fips}
list_ports_retvals.append({'ports': self.api_ports.list()})
self.qclient.list_networks.return_value = {'networks': server_networks}
self.qclient.list_subnets.return_value = {'subnets':
self.api_subnets.list()}
api.network.servers_update_addresses(self.request, servers)
self.assertEqual(self.servers.count(), len(servers))
self.assertEqual([server.id for server in self.servers.list()],
[server.id for server in servers])
no_fip_expected = not router_enabled
# server[0] has one fixed IP and one floating IP
# if router ext isenabled.
self._check_server_address(servers[0], no_fip_expected)
# The expected is also calculated, we examine the result manually once.
addrs = servers[0].addresses['net1']
if router_enabled:
self.assertEqual(3, len(addrs))
self.assertEqual('fixed', addrs[0]['OS-EXT-IPS:type'])
self.assertEqual('fixed', addrs[1]['OS-EXT-IPS:type'])
self.assertEqual('floating', addrs[2]['OS-EXT-IPS:type'])
else:
self.assertEqual(2, len(addrs))
self.assertEqual('fixed', addrs[0]['OS-EXT-IPS:type'])
self.assertEqual('fixed', addrs[1]['OS-EXT-IPS:type'])
# server[1] has one fixed IP.
self._check_server_address(servers[1], no_fip_expected)
# manual check.
addrs = servers[1].addresses['net2']
self.assertEqual(1, len(addrs))
self.assertEqual('fixed', addrs[0]['OS-EXT-IPS:type'])
# server[2] has no corresponding ports in neutron_data,
# so it should be an empty dict.
self.assertFalse(servers[2].addresses)
expected_list_ports = [mock.call(device_id=server_ids)]
if router_enabled:
self.qclient.list_floatingips.assert_called_once_with(
tenant_id=tenant_id, port_id=server_port_ids)
expected_list_ports.append(mock.call(tenant_id=tenant_id))
else:
self.assertEqual(0, self.qclient.list_floatingips.call_count)
self.qclient.list_ports.assert_has_calls(expected_list_ports)
self.qclient.list_networks.assert_called_once_with(
id=frozenset(server_network_ids))
self.qclient.list_subnets.assert_called_once_with()
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_router': True})
def test_servers_update_addresses(self):
self._test_servers_update_addresses()
@override_settings(OPENSTACK_NEUTRON_NETWORK={'enable_router': False})
def test_servers_update_addresses_router_disabled(self):
self._test_servers_update_addresses(router_enabled=False)
| apache-2.0 | 5,954,110,665,244,904,000 | 44.125 | 79 | 0.60911 | false |
shakamunyi/tensorflow | tensorflow/python/ops/sets.py | 109 | 1167 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tensorflow set operations.
@@set_size
@@set_intersection
@@set_union
@@set_difference
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.sets_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
_allowed_symbols = []
remove_undocumented(__name__, _allowed_symbols)
| apache-2.0 | 9,110,681,560,006,499,000 | 32.342857 | 80 | 0.709512 | false |
381426068/MissionPlanner | Lib/ihooks.py | 59 | 19540 | """Import hook support.
Consistent use of this module will make it possible to change the
different mechanisms involved in loading modules independently.
While the built-in module imp exports interfaces to the built-in
module searching and loading algorithm, and it is possible to replace
the built-in function __import__ in order to change the semantics of
the import statement, until now it has been difficult to combine the
effect of different __import__ hacks, like loading modules from URLs
by rimport.py, or restricted execution by rexec.py.
This module defines three new concepts:
1) A "file system hooks" class provides an interface to a filesystem.
One hooks class is defined (Hooks), which uses the interface provided
by standard modules os and os.path. It should be used as the base
class for other hooks classes.
2) A "module loader" class provides an interface to search for a
module in a search path and to load it. It defines a method which
searches for a module in a single directory; by overriding this method
one can redefine the details of the search. If the directory is None,
built-in and frozen modules are searched instead.
Two module loader class are defined, both implementing the search
strategy used by the built-in __import__ function: ModuleLoader uses
the imp module's find_module interface, while HookableModuleLoader
uses a file system hooks class to interact with the file system. Both
use the imp module's load_* interfaces to actually load the module.
3) A "module importer" class provides an interface to import a
module, as well as interfaces to reload and unload a module. It also
provides interfaces to install and uninstall itself instead of the
default __import__ and reload (and unload) functions.
One module importer class is defined (ModuleImporter), which uses a
module loader instance passed in (by default HookableModuleLoader is
instantiated).
The classes defined here should be used as base classes for extended
functionality along those lines.
If a module importer class supports dotted names, its import_module()
must return a different value depending on whether it is called on
behalf of a "from ... import ..." statement or not. (This is caused
by the way the __import__ hook is used by the Python interpreter.) It
would also do wise to install a different version of reload().
"""
from warnings import warnpy3k, warn
warnpy3k("the ihooks module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
import __builtin__
import imp
import os
import sys
__all__ = ["BasicModuleLoader","Hooks","ModuleLoader","FancyModuleLoader",
"BasicModuleImporter","ModuleImporter","install","uninstall"]
VERBOSE = 0
from imp import C_EXTENSION, PY_SOURCE, PY_COMPILED
from imp import C_BUILTIN, PY_FROZEN, PKG_DIRECTORY
BUILTIN_MODULE = C_BUILTIN
FROZEN_MODULE = PY_FROZEN
class _Verbose:
def __init__(self, verbose = VERBOSE):
self.verbose = verbose
def get_verbose(self):
return self.verbose
def set_verbose(self, verbose):
self.verbose = verbose
# XXX The following is an experimental interface
def note(self, *args):
if self.verbose:
self.message(*args)
def message(self, format, *args):
if args:
print format%args
else:
print format
class BasicModuleLoader(_Verbose):
"""Basic module loader.
This provides the same functionality as built-in import. It
doesn't deal with checking sys.modules -- all it provides is
find_module() and a load_module(), as well as find_module_in_dir()
which searches just one directory, and can be overridden by a
derived class to change the module search algorithm when the basic
dependency on sys.path is unchanged.
The interface is a little more convenient than imp's:
find_module(name, [path]) returns None or 'stuff', and
load_module(name, stuff) loads the module.
"""
def find_module(self, name, path = None):
if path is None:
path = [None] + self.default_path()
for dir in path:
stuff = self.find_module_in_dir(name, dir)
if stuff: return stuff
return None
def default_path(self):
return sys.path
def find_module_in_dir(self, name, dir):
if dir is None:
return self.find_builtin_module(name)
else:
try:
return imp.find_module(name, [dir])
except ImportError:
return None
def find_builtin_module(self, name):
# XXX frozen packages?
if imp.is_builtin(name):
return None, '', ('', '', BUILTIN_MODULE)
if imp.is_frozen(name):
return None, '', ('', '', FROZEN_MODULE)
return None
def load_module(self, name, stuff):
file, filename, info = stuff
try:
return imp.load_module(name, file, filename, info)
finally:
if file: file.close()
class Hooks(_Verbose):
"""Hooks into the filesystem and interpreter.
By deriving a subclass you can redefine your filesystem interface,
e.g. to merge it with the URL space.
This base class behaves just like the native filesystem.
"""
# imp interface
def get_suffixes(self): return imp.get_suffixes()
def new_module(self, name): return imp.new_module(name)
def is_builtin(self, name): return imp.is_builtin(name)
def init_builtin(self, name): return imp.init_builtin(name)
def is_frozen(self, name): return imp.is_frozen(name)
def init_frozen(self, name): return imp.init_frozen(name)
def get_frozen_object(self, name): return imp.get_frozen_object(name)
def load_source(self, name, filename, file=None):
return imp.load_source(name, filename, file)
def load_compiled(self, name, filename, file=None):
return imp.load_compiled(name, filename, file)
def load_dynamic(self, name, filename, file=None):
return imp.load_dynamic(name, filename, file)
def load_package(self, name, filename, file=None):
return imp.load_module(name, file, filename, ("", "", PKG_DIRECTORY))
def add_module(self, name):
d = self.modules_dict()
if name in d: return d[name]
d[name] = m = self.new_module(name)
return m
# sys interface
def modules_dict(self): return sys.modules
def default_path(self): return sys.path
def path_split(self, x): return os.path.split(x)
def path_join(self, x, y): return os.path.join(x, y)
def path_isabs(self, x): return os.path.isabs(x)
# etc.
def path_exists(self, x): return os.path.exists(x)
def path_isdir(self, x): return os.path.isdir(x)
def path_isfile(self, x): return os.path.isfile(x)
def path_islink(self, x): return os.path.islink(x)
# etc.
def openfile(self, *x): return open(*x)
openfile_error = IOError
def listdir(self, x): return os.listdir(x)
listdir_error = os.error
# etc.
class ModuleLoader(BasicModuleLoader):
"""Default module loader; uses file system hooks.
By defining suitable hooks, you might be able to load modules from
other sources than the file system, e.g. from compressed or
encrypted files, tar files or (if you're brave!) URLs.
"""
def __init__(self, hooks = None, verbose = VERBOSE):
BasicModuleLoader.__init__(self, verbose)
self.hooks = hooks or Hooks(verbose)
def default_path(self):
return self.hooks.default_path()
def modules_dict(self):
return self.hooks.modules_dict()
def get_hooks(self):
return self.hooks
def set_hooks(self, hooks):
self.hooks = hooks
def find_builtin_module(self, name):
# XXX frozen packages?
if self.hooks.is_builtin(name):
return None, '', ('', '', BUILTIN_MODULE)
if self.hooks.is_frozen(name):
return None, '', ('', '', FROZEN_MODULE)
return None
def find_module_in_dir(self, name, dir, allow_packages=1):
if dir is None:
return self.find_builtin_module(name)
if allow_packages:
fullname = self.hooks.path_join(dir, name)
if self.hooks.path_isdir(fullname):
stuff = self.find_module_in_dir("__init__", fullname, 0)
if stuff:
file = stuff[0]
if file: file.close()
return None, fullname, ('', '', PKG_DIRECTORY)
for info in self.hooks.get_suffixes():
suff, mode, type = info
fullname = self.hooks.path_join(dir, name+suff)
try:
fp = self.hooks.openfile(fullname, mode)
return fp, fullname, info
except self.hooks.openfile_error:
pass
return None
def load_module(self, name, stuff):
file, filename, info = stuff
(suff, mode, type) = info
try:
if type == BUILTIN_MODULE:
return self.hooks.init_builtin(name)
if type == FROZEN_MODULE:
return self.hooks.init_frozen(name)
if type == C_EXTENSION:
m = self.hooks.load_dynamic(name, filename, file)
elif type == PY_SOURCE:
m = self.hooks.load_source(name, filename, file)
elif type == PY_COMPILED:
m = self.hooks.load_compiled(name, filename, file)
elif type == PKG_DIRECTORY:
m = self.hooks.load_package(name, filename, file)
else:
raise ImportError, "Unrecognized module type (%r) for %s" % \
(type, name)
finally:
if file: file.close()
m.__file__ = filename
return m
class FancyModuleLoader(ModuleLoader):
"""Fancy module loader -- parses and execs the code itself."""
def load_module(self, name, stuff):
file, filename, (suff, mode, type) = stuff
realfilename = filename
path = None
if type == PKG_DIRECTORY:
initstuff = self.find_module_in_dir("__init__", filename, 0)
if not initstuff:
raise ImportError, "No __init__ module in package %s" % name
initfile, initfilename, initinfo = initstuff
initsuff, initmode, inittype = initinfo
if inittype not in (PY_COMPILED, PY_SOURCE):
if initfile: initfile.close()
raise ImportError, \
"Bad type (%r) for __init__ module in package %s" % (
inittype, name)
path = [filename]
file = initfile
realfilename = initfilename
type = inittype
if type == FROZEN_MODULE:
code = self.hooks.get_frozen_object(name)
elif type == PY_COMPILED:
import marshal
file.seek(8)
code = marshal.load(file)
elif type == PY_SOURCE:
data = file.read()
code = compile(data, realfilename, 'exec')
else:
return ModuleLoader.load_module(self, name, stuff)
m = self.hooks.add_module(name)
if path:
m.__path__ = path
m.__file__ = filename
try:
exec code in m.__dict__
except:
d = self.hooks.modules_dict()
if name in d:
del d[name]
raise
return m
class BasicModuleImporter(_Verbose):
"""Basic module importer; uses module loader.
This provides basic import facilities but no package imports.
"""
def __init__(self, loader = None, verbose = VERBOSE):
_Verbose.__init__(self, verbose)
self.loader = loader or ModuleLoader(None, verbose)
self.modules = self.loader.modules_dict()
def get_loader(self):
return self.loader
def set_loader(self, loader):
self.loader = loader
def get_hooks(self):
return self.loader.get_hooks()
def set_hooks(self, hooks):
return self.loader.set_hooks(hooks)
def import_module(self, name, globals={}, locals={}, fromlist=[]):
name = str(name)
if name in self.modules:
return self.modules[name] # Fast path
stuff = self.loader.find_module(name)
if not stuff:
raise ImportError, "No module named %s" % name
return self.loader.load_module(name, stuff)
def reload(self, module, path = None):
name = str(module.__name__)
stuff = self.loader.find_module(name, path)
if not stuff:
raise ImportError, "Module %s not found for reload" % name
return self.loader.load_module(name, stuff)
def unload(self, module):
del self.modules[str(module.__name__)]
# XXX Should this try to clear the module's namespace?
def install(self):
self.save_import_module = __builtin__.__import__
self.save_reload = __builtin__.reload
if not hasattr(__builtin__, 'unload'):
__builtin__.unload = None
self.save_unload = __builtin__.unload
__builtin__.__import__ = self.import_module
__builtin__.reload = self.reload
__builtin__.unload = self.unload
def uninstall(self):
__builtin__.__import__ = self.save_import_module
__builtin__.reload = self.save_reload
__builtin__.unload = self.save_unload
if not __builtin__.unload:
del __builtin__.unload
class ModuleImporter(BasicModuleImporter):
"""A module importer that supports packages."""
def import_module(self, name, globals=None, locals=None, fromlist=None,
level=-1):
parent = self.determine_parent(globals, level)
q, tail = self.find_head_package(parent, str(name))
m = self.load_tail(q, tail)
if not fromlist:
return q
if hasattr(m, "__path__"):
self.ensure_fromlist(m, fromlist)
return m
def determine_parent(self, globals, level=-1):
if not globals or not level:
return None
pkgname = globals.get('__package__')
if pkgname is not None:
if not pkgname and level > 0:
raise ValueError, 'Attempted relative import in non-package'
else:
# __package__ not set, figure it out and set it
modname = globals.get('__name__')
if modname is None:
return None
if "__path__" in globals:
# __path__ is set so modname is already the package name
pkgname = modname
else:
# normal module, work out package name if any
if '.' not in modname:
if level > 0:
raise ValueError, ('Attempted relative import in '
'non-package')
globals['__package__'] = None
return None
pkgname = modname.rpartition('.')[0]
globals['__package__'] = pkgname
if level > 0:
dot = len(pkgname)
for x in range(level, 1, -1):
try:
dot = pkgname.rindex('.', 0, dot)
except ValueError:
raise ValueError('attempted relative import beyond '
'top-level package')
pkgname = pkgname[:dot]
try:
return sys.modules[pkgname]
except KeyError:
if level < 1:
warn("Parent module '%s' not found while handling "
"absolute import" % pkgname, RuntimeWarning, 1)
return None
else:
raise SystemError, ("Parent module '%s' not loaded, cannot "
"perform relative import" % pkgname)
def find_head_package(self, parent, name):
if '.' in name:
i = name.find('.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = self.import_it(head, qname, parent)
if q: return q, tail
if parent:
qname = head
parent = None
q = self.import_it(head, qname, parent)
if q: return q, tail
raise ImportError, "No module named '%s'" % qname
def load_tail(self, q, tail):
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = self.import_it(head, mname, m)
if not m:
raise ImportError, "No module named '%s'" % mname
return m
def ensure_fromlist(self, m, fromlist, recursive=0):
for sub in fromlist:
if sub == "*":
if not recursive:
try:
all = m.__all__
except AttributeError:
pass
else:
self.ensure_fromlist(m, all, 1)
continue
if sub != "*" and not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = self.import_it(sub, subname, m)
if not submod:
raise ImportError, "No module named '%s'" % subname
def import_it(self, partname, fqname, parent, force_load=0):
if not partname:
# completely empty module name should only happen in
# 'from . import' or __import__("")
return parent
if not force_load:
try:
return self.modules[fqname]
except KeyError:
pass
try:
path = parent and parent.__path__
except AttributeError:
return None
partname = str(partname)
stuff = self.loader.find_module(partname, path)
if not stuff:
return None
fqname = str(fqname)
m = self.loader.load_module(fqname, stuff)
if parent:
setattr(parent, partname, m)
return m
def reload(self, module):
name = str(module.__name__)
if '.' not in name:
return self.import_it(name, name, None, force_load=1)
i = name.rfind('.')
pname = name[:i]
parent = self.modules[pname]
return self.import_it(name[i+1:], name, parent, force_load=1)
default_importer = None
current_importer = None
def install(importer = None):
global current_importer
current_importer = importer or default_importer or ModuleImporter()
current_importer.install()
def uninstall():
global current_importer
current_importer.uninstall()
| gpl-3.0 | -1,229,957,319,604,256,800 | 33.270758 | 77 | 0.566786 | false |
Batterfii/django | django/dispatch/weakref_backports.py | 414 | 2151 | """
weakref_backports is a partial backport of the weakref module for python
versions below 3.4.
Copyright (C) 2013 Python Software Foundation, see license.python.txt for
details.
The following changes were made to the original sources during backporting:
* Added `self` to `super` calls.
* Removed `from None` when raising exceptions.
"""
from weakref import ref
class WeakMethod(ref):
"""
A custom `weakref.ref` subclass which simulates a weak reference to
a bound method, working around the lifetime problem of bound methods.
"""
__slots__ = "_func_ref", "_meth_type", "_alive", "__weakref__"
def __new__(cls, meth, callback=None):
try:
obj = meth.__self__
func = meth.__func__
except AttributeError:
raise TypeError("argument should be a bound method, not {}"
.format(type(meth)))
def _cb(arg):
# The self-weakref trick is needed to avoid creating a reference
# cycle.
self = self_wr()
if self._alive:
self._alive = False
if callback is not None:
callback(self)
self = ref.__new__(cls, obj, _cb)
self._func_ref = ref(func, _cb)
self._meth_type = type(meth)
self._alive = True
self_wr = ref(self)
return self
def __call__(self):
obj = super(WeakMethod, self).__call__()
func = self._func_ref()
if obj is None or func is None:
return None
return self._meth_type(func, obj)
def __eq__(self, other):
if isinstance(other, WeakMethod):
if not self._alive or not other._alive:
return self is other
return ref.__eq__(self, other) and self._func_ref == other._func_ref
return False
def __ne__(self, other):
if isinstance(other, WeakMethod):
if not self._alive or not other._alive:
return self is not other
return ref.__ne__(self, other) or self._func_ref != other._func_ref
return True
__hash__ = ref.__hash__
| bsd-3-clause | -2,260,852,891,896,577,300 | 30.632353 | 80 | 0.562994 | false |
efortuna/AndroidSDKClone | ndk_experimental/prebuilt/linux-x86_64/lib/python2.7/json/tests/test_fail.py | 108 | 3915 | from json.tests import PyTest, CTest
# 2007-10-05
JSONDOCS = [
# http://json.org/JSON_checker/test/fail1.json
'"A JSON payload should be an object or array, not a string."',
# http://json.org/JSON_checker/test/fail2.json
'["Unclosed array"',
# http://json.org/JSON_checker/test/fail3.json
'{unquoted_key: "keys must be quoted"}',
# http://json.org/JSON_checker/test/fail4.json
'["extra comma",]',
# http://json.org/JSON_checker/test/fail5.json
'["double extra comma",,]',
# http://json.org/JSON_checker/test/fail6.json
'[ , "<-- missing value"]',
# http://json.org/JSON_checker/test/fail7.json
'["Comma after the close"],',
# http://json.org/JSON_checker/test/fail8.json
'["Extra close"]]',
# http://json.org/JSON_checker/test/fail9.json
'{"Extra comma": true,}',
# http://json.org/JSON_checker/test/fail10.json
'{"Extra value after close": true} "misplaced quoted value"',
# http://json.org/JSON_checker/test/fail11.json
'{"Illegal expression": 1 + 2}',
# http://json.org/JSON_checker/test/fail12.json
'{"Illegal invocation": alert()}',
# http://json.org/JSON_checker/test/fail13.json
'{"Numbers cannot have leading zeroes": 013}',
# http://json.org/JSON_checker/test/fail14.json
'{"Numbers cannot be hex": 0x14}',
# http://json.org/JSON_checker/test/fail15.json
'["Illegal backslash escape: \\x15"]',
# http://json.org/JSON_checker/test/fail16.json
'[\\naked]',
# http://json.org/JSON_checker/test/fail17.json
'["Illegal backslash escape: \\017"]',
# http://json.org/JSON_checker/test/fail18.json
'[[[[[[[[[[[[[[[[[[[["Too deep"]]]]]]]]]]]]]]]]]]]]',
# http://json.org/JSON_checker/test/fail19.json
'{"Missing colon" null}',
# http://json.org/JSON_checker/test/fail20.json
'{"Double colon":: null}',
# http://json.org/JSON_checker/test/fail21.json
'{"Comma instead of colon", null}',
# http://json.org/JSON_checker/test/fail22.json
'["Colon instead of comma": false]',
# http://json.org/JSON_checker/test/fail23.json
'["Bad value", truth]',
# http://json.org/JSON_checker/test/fail24.json
"['single quote']",
# http://json.org/JSON_checker/test/fail25.json
'["\ttab\tcharacter\tin\tstring\t"]',
# http://json.org/JSON_checker/test/fail26.json
'["tab\\ character\\ in\\ string\\ "]',
# http://json.org/JSON_checker/test/fail27.json
'["line\nbreak"]',
# http://json.org/JSON_checker/test/fail28.json
'["line\\\nbreak"]',
# http://json.org/JSON_checker/test/fail29.json
'[0e]',
# http://json.org/JSON_checker/test/fail30.json
'[0e+]',
# http://json.org/JSON_checker/test/fail31.json
'[0e+-1]',
# http://json.org/JSON_checker/test/fail32.json
'{"Comma instead if closing brace": true,',
# http://json.org/JSON_checker/test/fail33.json
'["mismatch"}',
# http://code.google.com/p/simplejson/issues/detail?id=3
u'["A\u001FZ control characters in string"]',
]
SKIPS = {
1: "why not have a string payload?",
18: "spec doesn't specify any nesting limitations",
}
class TestFail(object):
def test_failures(self):
for idx, doc in enumerate(JSONDOCS):
idx = idx + 1
if idx in SKIPS:
self.loads(doc)
continue
try:
self.loads(doc)
except ValueError:
pass
else:
self.fail("Expected failure for fail{0}.json: {1!r}".format(idx, doc))
def test_non_string_keys_dict(self):
data = {'a' : 1, (1, 2) : 2}
#This is for c encoder
self.assertRaises(TypeError, self.dumps, data)
#This is for python encoder
self.assertRaises(TypeError, self.dumps, data, indent=True)
class TestPyFail(TestFail, PyTest): pass
class TestCFail(TestFail, CTest): pass
| apache-2.0 | 800,683,366,616,176,400 | 36.285714 | 86 | 0.605109 | false |
lanselin/pysal | pysal/esda/tests/test_join_counts.py | 6 | 2224 | import unittest
import numpy as np
from ..join_counts import Join_Counts
from ...weights import lat2W
from ...common import pandas
PANDAS_EXTINCT = pandas is None
class Join_Counts_Tester(unittest.TestCase):
"""Unit test for Join Counts"""
def setUp(self):
self.w = lat2W(4, 4)
self.y = np.ones(16)
self.y[0:8] = 0
def test_Join_Counts(self):
"""Test method"""
np.random.seed(12345)
jc = Join_Counts(self.y, self.w)
self.assertAlmostEquals(jc.bb, 10.0)
self.assertAlmostEquals(jc.bw, 4.0)
self.assertAlmostEquals(jc.ww, 10.0)
self.assertAlmostEquals(jc.J, 24.0)
self.assertAlmostEquals(len(jc.sim_bb), 999)
self.assertAlmostEquals(jc.p_sim_bb, 0.0030000000000000001)
self.assertAlmostEquals(np.mean(jc.sim_bb), 5.5465465465465469)
self.assertAlmostEquals(np.max(jc.sim_bb), 10.0)
self.assertAlmostEquals(np.min(jc.sim_bb), 0.0)
self.assertAlmostEquals(len(jc.sim_bw), 999)
self.assertAlmostEquals(jc.p_sim_bw, 1.0)
self.assertAlmostEquals(np.mean(jc.sim_bw), 12.811811811811811)
self.assertAlmostEquals(np.max(jc.sim_bw), 24.0)
self.assertAlmostEquals(np.min(jc.sim_bw), 7.0)
@unittest.skipIf(PANDAS_EXTINCT, 'missing pandas')
def test_by_col(self):
import pandas as pd
df = pd.DataFrame(self.y, columns=['y'])
np.random.seed(12345)
r1 = Join_Counts.by_col(df, ['y'], w=self.w, permutations=999)
bb = np.unique(r1.y_bb.values)
bw = np.unique(r1.y_bw.values)
bb_p = np.unique(r1.y_p_sim_bb.values)
bw_p = np.unique(r1.y_p_sim_bw.values)
np.random.seed(12345)
c = Join_Counts(self.y, self.w, permutations=999)
self.assertAlmostEquals(bb, c.bb)
self.assertAlmostEquals(bw, c.bw)
self.assertAlmostEquals(bb_p, c.p_sim_bb)
self.assertAlmostEquals(bw_p, c.p_sim_bw)
suite = unittest.TestSuite()
test_classes = [Join_Counts_Tester]
for i in test_classes:
a = unittest.TestLoader().loadTestsFromTestCase(i)
suite.addTest(a)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
| bsd-3-clause | 8,560,935,902,251,163,000 | 34.870968 | 71 | 0.633993 | false |
alvations/Sensible-SemEval | CWI-data/evaluate_system_original.py | 2 | 1946 | import sys
import argparse
def evaluateIdentifier(gold, pred):
"""
Performs an intrinsic evaluation of a Complex Word Identification approach.
@param gold: A vector containing gold-standard labels.
@param pred: A vector containing predicted labels.
@return: Precision, Recall and F-1.
"""
#Initialize variables:
precisionc = 0
precisiont = 0
recallc = 0
recallt = 0
#Calculate measures:
for i in range(0, len(gold)):
gold_label = gold[i]
predicted_label = pred[i]
if gold_label==predicted_label:
precisionc += 1
if gold_label==1:
recallc += 1
if gold_label==1:
recallt += 1
precisiont += 1
precision = float(precisionc)/float(precisiont)
recall = float(recallc)/float(recallt)
fmean = 0.0
if precision==0.0 and recall==0.0:
fmean = 0.0
else:
fmean = 2*(precision*recall)/(precision+recall)
#Return measures:
return precision, recall, fmean
if __name__=='__main__':
#Parse arguments:
description = 'Evaluation script for Task 11: Complex Word Identification.'
description += ' The gold-standard file is a dataset with labels in the format provided by the task organizers.'
description += ' The predicted labels file must contain one label 0 or 1 per line, and must have the same number of lines as the gold-standard.'
epilog = 'Returns: Precision, Recall and F1.'
parser=argparse.ArgumentParser(description=description, epilog=epilog)
parser.add_argument('--gold', required=True, help='File containing dataset with gold-standard labels.')
parser.add_argument('--pred', required=True, help='File containing predicted labels.')
args = vars(parser.parse_args())
#Retrieve labels:
gold = [int(line.strip().split('\t')[3]) for line in open(args['gold'])]
pred = [int(line.strip()) for line in open(args['pred'])]
#Calculate scores:
p, r, f = evaluateIdentifier(gold, pred)
#Present scores:
print('Precision: ' + str(p))
print('Recall: ' + str(r))
print('F1: ' + str(f))
| mit | -3,889,798,525,430,688,300 | 28.938462 | 145 | 0.705036 | false |
lanyuwen/openthread | tools/harness-automation/cases_R140/router_5_3_4.py | 18 | 1876 | #!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from autothreadharness.harness_case import HarnessCase
import unittest
class Router_5_3_4(HarnessCase):
role = HarnessCase.ROLE_ROUTER
case = '5 3 4'
golden_devices_required = 6
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | -8,938,273,940,463,214,000 | 41.636364 | 77 | 0.76226 | false |
chuan9/chromium-crosswalk | tools/perf/page_sets/key_idle_power_cases.py | 6 | 2704 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from page_sets import android_screen_restoration_shared_state
from telemetry.page import page as page_module
from telemetry.page import shared_page_state
from telemetry import story
class KeyIdlePowerPage(page_module.Page):
def __init__(self, url, page_set, turn_screen_off,
shared_page_state_class=shared_page_state.SharedMobilePageState):
super(KeyIdlePowerPage, self).__init__(
url=url,
page_set=page_set,
shared_page_state_class=(android_screen_restoration_shared_state
.AndroidScreenRestorationSharedState))
self._turn_screen_off = turn_screen_off
def RunNavigateSteps(self, action_runner):
super(KeyIdlePowerPage, self).RunNavigateSteps(action_runner)
action_runner.Wait(2)
if self._turn_screen_off:
# TODO(jdduke): Remove this API violation after the shared page state is
# exposed here, crbug.com/470147.
# pylint: disable=protected-access
action_runner._tab.browser.platform.android_action_runner.TurnScreenOff()
# We're not interested in tracking activity that occurs immediately after
# the screen is turned off. Several seconds should be enough time for the
# browser to "settle down" into an idle state.
action_runner.Wait(2)
def RunPageInteractions(self, action_runner):
# The page interaction is simply waiting in an idle state.
with action_runner.CreateInteraction('IdleWaiting'):
action_runner.Wait(20)
class KeyIdlePowerCasesPageSet(story.StorySet):
""" Key idle power cases """
def __init__(self):
super(KeyIdlePowerCasesPageSet, self).__init__()
foreground_urls_list = [
# Why: Ensure minimal activity for static, empty pages in the foreground.
'file://key_idle_power_cases/blank.html',
]
for url in foreground_urls_list:
self.AddStory(KeyIdlePowerPage(url, self, False))
background_urls_list = [
# Why: Ensure animated GIFs aren't processed when Chrome is backgrounded.
'file://key_idle_power_cases/animated-gif.html',
# Why: Ensure CSS animations aren't processed when Chrome is backgrounded.
'file://key_idle_power_cases/css-animation.html',
# Why: Ensure rAF is suppressed when Chrome is backgrounded.
'file://key_idle_power_cases/request-animation-frame.html',
# Why: Ensure setTimeout is throttled when Chrome is backgrounded.
'file://key_idle_power_cases/set-timeout.html',
]
for url in background_urls_list:
self.AddStory(KeyIdlePowerPage(url, self, True))
| bsd-3-clause | -3,171,268,725,044,818,400 | 38.764706 | 80 | 0.710799 | false |
Thraxis/pymedusa | lib/unidecode/x00b.py | 252 | 4132 | data = (
'[?]', # 0x00
'N', # 0x01
'N', # 0x02
'H', # 0x03
'[?]', # 0x04
'a', # 0x05
'aa', # 0x06
'i', # 0x07
'ii', # 0x08
'u', # 0x09
'uu', # 0x0a
'R', # 0x0b
'L', # 0x0c
'[?]', # 0x0d
'[?]', # 0x0e
'e', # 0x0f
'ai', # 0x10
'[?]', # 0x11
'[?]', # 0x12
'o', # 0x13
'au', # 0x14
'k', # 0x15
'kh', # 0x16
'g', # 0x17
'gh', # 0x18
'ng', # 0x19
'c', # 0x1a
'ch', # 0x1b
'j', # 0x1c
'jh', # 0x1d
'ny', # 0x1e
'tt', # 0x1f
'tth', # 0x20
'dd', # 0x21
'ddh', # 0x22
'nn', # 0x23
't', # 0x24
'th', # 0x25
'd', # 0x26
'dh', # 0x27
'n', # 0x28
'[?]', # 0x29
'p', # 0x2a
'ph', # 0x2b
'b', # 0x2c
'bh', # 0x2d
'm', # 0x2e
'y', # 0x2f
'r', # 0x30
'[?]', # 0x31
'l', # 0x32
'll', # 0x33
'[?]', # 0x34
'', # 0x35
'sh', # 0x36
'ss', # 0x37
's', # 0x38
'h', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'\'', # 0x3c
'\'', # 0x3d
'aa', # 0x3e
'i', # 0x3f
'ii', # 0x40
'u', # 0x41
'uu', # 0x42
'R', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'[?]', # 0x46
'e', # 0x47
'ai', # 0x48
'[?]', # 0x49
'[?]', # 0x4a
'o', # 0x4b
'au', # 0x4c
'', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'+', # 0x56
'+', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'rr', # 0x5c
'rh', # 0x5d
'[?]', # 0x5e
'yy', # 0x5f
'RR', # 0x60
'LL', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'0', # 0x66
'1', # 0x67
'2', # 0x68
'3', # 0x69
'4', # 0x6a
'5', # 0x6b
'6', # 0x6c
'7', # 0x6d
'8', # 0x6e
'9', # 0x6f
'', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'N', # 0x82
'H', # 0x83
'[?]', # 0x84
'a', # 0x85
'aa', # 0x86
'i', # 0x87
'ii', # 0x88
'u', # 0x89
'uu', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'e', # 0x8e
'ee', # 0x8f
'ai', # 0x90
'[?]', # 0x91
'o', # 0x92
'oo', # 0x93
'au', # 0x94
'k', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'ng', # 0x99
'c', # 0x9a
'[?]', # 0x9b
'j', # 0x9c
'[?]', # 0x9d
'ny', # 0x9e
'tt', # 0x9f
'[?]', # 0xa0
'[?]', # 0xa1
'[?]', # 0xa2
'nn', # 0xa3
't', # 0xa4
'[?]', # 0xa5
'[?]', # 0xa6
'[?]', # 0xa7
'n', # 0xa8
'nnn', # 0xa9
'p', # 0xaa
'[?]', # 0xab
'[?]', # 0xac
'[?]', # 0xad
'm', # 0xae
'y', # 0xaf
'r', # 0xb0
'rr', # 0xb1
'l', # 0xb2
'll', # 0xb3
'lll', # 0xb4
'v', # 0xb5
'[?]', # 0xb6
'ss', # 0xb7
's', # 0xb8
'h', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'aa', # 0xbe
'i', # 0xbf
'ii', # 0xc0
'u', # 0xc1
'uu', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'e', # 0xc6
'ee', # 0xc7
'ai', # 0xc8
'[?]', # 0xc9
'o', # 0xca
'oo', # 0xcb
'au', # 0xcc
'', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'+', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'0', # 0xe6
'1', # 0xe7
'2', # 0xe8
'3', # 0xe9
'4', # 0xea
'5', # 0xeb
'6', # 0xec
'7', # 0xed
'8', # 0xee
'9', # 0xef
'+10+', # 0xf0
'+100+', # 0xf1
'+1000+', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-3.0 | -987,273,519,638,611,000 | 15.077821 | 19 | 0.295983 | false |
xuru/pyvisdk | pyvisdk/do/vmfs_datastore_base_option.py | 1 | 1045 |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def VmfsDatastoreBaseOption(vim, *args, **kwargs):
'''Base class that describes a VMFS datastore provisioning option.'''
obj = vim.client.factory.create('ns0:VmfsDatastoreBaseOption')
# do some validation checking...
if (len(args) + len(kwargs)) < 1:
raise IndexError('Expected at least 2 arguments got: %d' % len(args))
required = [ 'layout' ]
optional = [ 'partitionFormatChange', 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| mit | 3,912,797,045,908,335,000 | 30.69697 | 124 | 0.605742 | false |
stendarr/searchengine-imagescraper | bingscraperv3.py | 1 | 2359 | from bs4 import BeautifulSoup, SoupStrainer
from html.parser import *
import http.client
import urllib.request
from urllib.request import urlopen, Request
#99 questions
yes = ['y','ye','yes']
search_term = str(input('Bing Image Search: ')).replace(" ", "+")
link_limit = int(input("Enter link limit (1-100): "))
save_links_yn = str(input("Write links to a file? (y/n) ")).lower()
if save_links_yn in yes:
filename_links = str(input("How should the file be named? "))
download_pictures_yn = str(input("Download pictures? (y/n) ")).lower()
if download_pictures_yn in yes:
filename_pictures = str(input("How should the image files be named? "))
filepath_pictures = filename_pictures+'/'+filename_pictures
#sets bing url according to input
bing_url = 'http://www.bing.com/images/search?q='+search_term
#just checking the search url for mistakes
print("Checking following URL:\n"+bing_url+"\n")
#adding headers to fool bing
req = Request(bing_url, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36'})
soup = BeautifulSoup(urllib.request.urlopen(req), 'html.parser')
# for debugging and reverse engineering purposes
#open('souptest1.html', 'w').write(str(soup.encode("utf-8")))
#open('souptest2.txt', 'w').write(str(soup))
#find all a tags with attribute m because that's where the links are
divs = soup.findAll("a", attrs={"m": True})
link_counter = 0
exception_counter = 0
for div in divs:
try:
#stripping elements of unnecessary characters
div = str(div).partition('",imgurl:"')[-1]
div = div.rpartition('",tid:"')[0]
div = str(div)
#writing links to a file
if save_links_yn in yes:
open(filename_links+'.txt', 'a').write(div+"\n")
#downloading the images
if download_pictures_yn in yes:
urllib.request.urlretrieve(div, filename_pictures+str(link_counter+1)+".jpg")
#if counter's limit reached, stop
link_counter += 1
if link_counter == link_limit:
break
except IOError:
print("Error with:",div)
exception_counter += 1
link_counter -= 1
print("\nlinks found:", link_counter)
print("\nexceptions thrown:", exception_counter)
input("\n\n-----------------------\n EOP")
| mit | -8,017,445,782,539,123,000 | 27.421687 | 161 | 0.650699 | false |
Whisper-Cao/802.15.4-revision | docs/doxygen/swig_doc.py | 6 | 8675 | #
# Copyright 2010,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
Creates the swig_doc.i SWIG interface file.
Execute using: python swig_doc.py xml_path outputfilename
The file instructs SWIG to transfer the doxygen comments into the
python docstrings.
"""
import sys
try:
from doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile, base
except ImportError:
from gnuradio.doxyxml import DoxyIndex, DoxyClass, DoxyFriend, DoxyFunction, DoxyFile, base
def py_name(name):
bits = name.split('_')
return '_'.join(bits[1:])
def make_name(name):
bits = name.split('_')
return bits[0] + '_make_' + '_'.join(bits[1:])
class Block(object):
"""
Checks if doxyxml produced objects correspond to a gnuradio block.
"""
@classmethod
def includes(cls, item):
if not isinstance(item, DoxyClass):
return False
# Check for a parsing error.
if item.error():
return False
return item.has_member(make_name(item.name()), DoxyFriend)
def utoascii(text):
"""
Convert unicode text into ascii and escape quotes.
"""
if text is None:
return ''
out = text.encode('ascii', 'replace')
out = out.replace('"', '\\"')
return out
def combine_descriptions(obj):
"""
Combines the brief and detailed descriptions of an object together.
"""
description = []
bd = obj.brief_description.strip()
dd = obj.detailed_description.strip()
if bd:
description.append(bd)
if dd:
description.append(dd)
return utoascii('\n\n'.join(description)).strip()
entry_templ = '%feature("docstring") {name} "{docstring}"'
def make_entry(obj, name=None, templ="{description}", description=None):
"""
Create a docstring entry for a swig interface file.
obj - a doxyxml object from which documentation will be extracted.
name - the name of the C object (defaults to obj.name())
templ - an optional template for the docstring containing only one
variable named 'description'.
description - if this optional variable is set then it's value is
used as the description instead of extracting it from obj.
"""
if name is None:
name=obj.name()
if "operator " in name:
return ''
if description is None:
description = combine_descriptions(obj)
docstring = templ.format(description=description)
if not docstring:
return ''
return entry_templ.format(
name=name,
docstring=docstring,
)
def make_func_entry(func, name=None, description=None, params=None):
"""
Create a function docstring entry for a swig interface file.
func - a doxyxml object from which documentation will be extracted.
name - the name of the C object (defaults to func.name())
description - if this optional variable is set then it's value is
used as the description instead of extracting it from func.
params - a parameter list that overrides using func.params.
"""
if params is None:
params = func.params
params = [prm.declname for prm in params]
if params:
sig = "Params: (%s)" % ", ".join(params)
else:
sig = "Params: (NONE)"
templ = "{description}\n\n" + sig
return make_entry(func, name=name, templ=utoascii(templ),
description=description)
def make_class_entry(klass, description=None):
"""
Create a class docstring for a swig interface file.
"""
output = []
output.append(make_entry(klass, description=description))
for func in klass.in_category(DoxyFunction):
name = klass.name() + '::' + func.name()
output.append(make_func_entry(func, name=name))
return "\n\n".join(output)
def make_block_entry(di, block):
"""
Create class and function docstrings of a gnuradio block for a
swig interface file.
"""
descriptions = []
# Get the documentation associated with the class.
class_desc = combine_descriptions(block)
if class_desc:
descriptions.append(class_desc)
# Get the documentation associated with the make function
make_func = di.get_member(make_name(block.name()), DoxyFunction)
make_func_desc = combine_descriptions(make_func)
if make_func_desc:
descriptions.append(make_func_desc)
# Get the documentation associated with the file
try:
block_file = di.get_member(block.name() + ".h", DoxyFile)
file_desc = combine_descriptions(block_file)
if file_desc:
descriptions.append(file_desc)
except base.Base.NoSuchMember:
# Don't worry if we can't find a matching file.
pass
# And join them all together to make a super duper description.
super_description = "\n\n".join(descriptions)
# Associate the combined description with the class and
# the make function.
output = []
output.append(make_class_entry(block, description=super_description))
creator = block.get_member(block.name(), DoxyFunction)
output.append(make_func_entry(make_func, description=super_description,
params=creator.params))
return "\n\n".join(output)
def make_swig_interface_file(di, swigdocfilename, custom_output=None):
output = ["""
/*
* This file was automatically generated using swig_doc.py.
*
* Any changes to it will be lost next time it is regenerated.
*/
"""]
if custom_output is not None:
output.append(custom_output)
# Create docstrings for the blocks.
blocks = di.in_category(Block)
make_funcs = set([])
for block in blocks:
try:
make_func = di.get_member(make_name(block.name()), DoxyFunction)
make_funcs.add(make_func.name())
output.append(make_block_entry(di, block))
except block.ParsingError:
print('Parsing error for block %s' % block.name())
# Create docstrings for functions
# Don't include the make functions since they have already been dealt with.
funcs = [f for f in di.in_category(DoxyFunction) if f.name() not in make_funcs]
for f in funcs:
try:
output.append(make_func_entry(f))
except f.ParsingError:
print('Parsing error for function %s' % f.name())
# Create docstrings for classes
block_names = [block.name() for block in blocks]
klasses = [k for k in di.in_category(DoxyClass) if k.name() not in block_names]
for k in klasses:
try:
output.append(make_class_entry(k))
except k.ParsingError:
print('Parsing error for class %s' % k.name())
# Docstrings are not created for anything that is not a function or a class.
# If this excludes anything important please add it here.
output = "\n\n".join(output)
swig_doc = file(swigdocfilename, 'w')
swig_doc.write(output)
swig_doc.close()
if __name__ == "__main__":
# Parse command line options and set up doxyxml.
err_msg = "Execute using: python swig_doc.py xml_path outputfilename"
if len(sys.argv) != 3:
raise StandardError(err_msg)
xml_path = sys.argv[1]
swigdocfilename = sys.argv[2]
di = DoxyIndex(xml_path)
# gnuradio.gr.msq_queue.insert_tail and delete_head create errors unless docstrings are defined!
# This is presumably a bug in SWIG.
#msg_q = di.get_member(u'gr_msg_queue', DoxyClass)
#insert_tail = msg_q.get_member(u'insert_tail', DoxyFunction)
#delete_head = msg_q.get_member(u'delete_head', DoxyFunction)
output = []
#output.append(make_func_entry(insert_tail, name='gr_py_msg_queue__insert_tail'))
#output.append(make_func_entry(delete_head, name='gr_py_msg_queue__delete_head'))
custom_output = "\n\n".join(output)
# Generate the docstrings interface file.
make_swig_interface_file(di, swigdocfilename, custom_output=custom_output)
| gpl-3.0 | -3,040,831,430,250,062,000 | 33.019608 | 100 | 0.658674 | false |
igordejanovic/textX | tests/functional/test_scoping/test_model_repository.py | 1 | 4915 | from __future__ import unicode_literals
from os.path import dirname, abspath, join
import textx.scoping.providers as scoping_providers
from textx import metamodel_from_file
from textx.scoping import is_file_included
def test_inclusion_check_1():
"""
Test to demonstrate how to check if a file is used by a model.
This can be used by an IDE to determine, if a model has to be
updated/reloaded.
"""
#################################
# META MODEL DEF
#################################
my_meta_model = metamodel_from_file(
join(abspath(dirname(__file__)), 'issue66', 'task_specification.tx'))
search_path = [
join(abspath(dirname(__file__)), 'issue66', 'somewhere1'), # assembly
join(abspath(dirname(__file__)), 'issue66', 'somewhere2') # position
]
my_meta_model.register_scope_providers(
{"*.*": scoping_providers.PlainNameImportURI(search_path=search_path)})
#################################
# MODEL PARSING
#################################
# This model load two files
# * one file exists locally and in a search path --> the local one should
# be preferred.
# * one only exists locally.
m = my_meta_model.model_from_file(
join(abspath(dirname(__file__)),
'issue66', 'assembly_car1.prog'))
# the model file itself is "included" (special case)
assert is_file_included(
join(abspath(dirname(__file__)), 'issue66',
'assembly_car1.prog'),
m
)
# another model file
assert not is_file_included(
join(abspath(dirname(__file__)), 'issue66', 'local',
'assembly_car3.prog'),
m
)
# file in folder "local"
assert not is_file_included(
join(abspath(dirname(__file__)), 'issue66', 'local',
'mylib', 'local.tasks'),
m
)
# file in folder "local"
assert not is_file_included(
join(abspath(dirname(__file__)), 'issue66', 'local',
'mylib', 'position.tasks'),
m
)
# distant file (part of search path)
assert is_file_included(
join(abspath(dirname(__file__)), 'issue66', 'somewhere1',
'mylib', 'assembly.tasks'),
m
)
# distant file (part of search path)
assert is_file_included(
join(abspath(dirname(__file__)), 'issue66', 'somewhere2',
'mylib', 'position.tasks'),
m
)
#################################
# END
#################################
def test_inclusion_check_2():
"""
Test to demonstrate how to check if a file is used by a model.
This can be used by an IDE to determine, if a model has to be
updated/reloaded.
"""
#################################
# META MODEL DEF
#################################
my_meta_model = metamodel_from_file(
join(abspath(dirname(__file__)), 'issue66', 'task_specification.tx'))
search_path = [
join(abspath(dirname(__file__)), 'issue66', 'somewhere1'), # assembly
join(abspath(dirname(__file__)), 'issue66', 'somewhere2') # position
]
my_meta_model.register_scope_providers(
{"*.*": scoping_providers.PlainNameImportURI(search_path=search_path)})
#################################
# MODEL PARSING
#################################
# This model load two files
# * one file exists locally and in a search path --> the local one should
# be preferred.
# * one only exists locally.
m = my_meta_model.model_from_file(
join(abspath(dirname(__file__)),
'issue66', 'local', 'assembly_car3.prog'))
# the model file itself is "included" (special case)
assert is_file_included(
join(abspath(dirname(__file__)), 'issue66', 'local',
'assembly_car3.prog'),
m
)
# local file
assert is_file_included(
join(abspath(dirname(__file__)), 'issue66', 'local',
'mylib', 'local.tasks'),
m
)
# local file
assert is_file_included(
join(abspath(dirname(__file__)), 'issue66', 'local',
'mylib', 'position.tasks'),
m
)
# distant file
assert not is_file_included(
join(abspath(dirname(__file__)), 'issue66', 'somewhere1',
'mylib', 'assembly.tasks'),
m
)
# distant file
assert not is_file_included(
join(abspath(dirname(__file__)), 'issue66', 'somewhere2',
'mylib', 'position.tasks'),
m
)
#################################
# END
#################################
def test_no_tx_model_repos():
from textx import metamodel_from_str
mm = metamodel_from_str("Model: 'A';")
m = mm.model_from_str("A")
assert not is_file_included(
join(abspath(dirname(__file__)), 'issue66', 'local',
'mylib', 'position.tasks'),
m
)
| mit | -316,587,254,507,203,500 | 30.107595 | 79 | 0.527772 | false |
medspx/QGIS | python/plugins/processing/gui/wrappers_map_theme.py | 30 | 1829 | # -*- coding: utf-8 -*-
"""
***************************************************************************
wrappers_map_theme.py - Map theme widget wrappers
---------------------
Date : August 2017
Copyright : (C) 2017 by OPENGIS.ch
Email : [email protected]
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from qgis.core import QgsProject
from qgis.PyQt.QtWidgets import QComboBox
from processing.gui.wrappers import (
BasicWidgetWrapper
)
class MapThemeWrapper(BasicWidgetWrapper):
"""
WidgetWrapper for ParameterString that createe a combobox widget
with the existing map themes.
"""
def createWidget(self):
self._combo = QComboBox()
self._combo.addItem('', '')
for item in self.items():
self._combo.addItem(item, item)
self._combo.currentIndexChanged.connect(lambda:
self.widgetValueHasChanged.emit(self))
return self._combo
def items(self):
return QgsProject.instance().mapThemeCollection().mapThemes()
def setValue(self, value):
self.setComboValue(value, self._combo)
def value(self):
return self.comboValue(combobox=self._combo)
| gpl-2.0 | 1,001,300,684,197,698,400 | 34.173077 | 86 | 0.48059 | false |
appsembler/edx-platform | cms/djangoapps/contentstore/management/commands/delete_course.py | 16 | 3568 | from __future__ import print_function
from six import text_type
from django.core.management.base import BaseCommand, CommandError
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from contentstore.utils import delete_course
from xmodule.contentstore.django import contentstore
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from .prompt import query_yes_no
class Command(BaseCommand):
"""
Delete a MongoDB backed course
Example usage:
$ ./manage.py cms delete_course 'course-v1:edX+DemoX+Demo_Course' --settings=devstack
$ ./manage.py cms delete_course 'course-v1:edX+DemoX+Demo_Course' --keep-instructors --settings=devstack
$ ./manage.py cms delete_course 'course-v1:edX+DemoX+Demo_Course' --remove-assets --settings=devstack
Note:
The keep-instructors option is useful for resolving issues that arise when a course run's ID is duplicated
in a case-insensitive manner. MongoDB is case-sensitive, but MySQL is case-insensitive. This results in
course-v1:edX+DemoX+1t2017 being treated differently in MongoDB from course-v1:edX+DemoX+1T2017 (capital 'T').
If you need to remove a duplicate that has resulted from casing issues, use the --keep-instructors flag
to ensure that permissions for the remaining course run are not deleted.
Use the remove-assets option to ensure all assets are deleted. This is especially relevant to users of the
split Mongo modulestore.
"""
help = 'Delete a MongoDB backed course'
def add_arguments(self, parser):
parser.add_argument(
'course_key',
help='ID of the course to delete.',
)
parser.add_argument(
'--keep-instructors',
action='store_true',
default=False,
help='Do not remove permissions of users and groups for course',
)
parser.add_argument(
'--remove-assets',
action='store_true',
help='Remove all assets associated with the course. '
'Be careful! These assets may be associated with another course',
)
def handle(self, *args, **options):
try:
# a course key may have unicode chars in it
try:
course_key = text_type(options['course_key'], 'utf8')
# May already be decoded to unicode if coming in through tests, this is ok.
except TypeError:
course_key = text_type(options['course_key'])
course_key = CourseKey.from_string(course_key)
except InvalidKeyError:
raise CommandError('Invalid course_key: {}'.format(options['course_key']))
if not modulestore().get_course(course_key):
raise CommandError('Course not found: {}'.format(options['course_key']))
print('Preparing to delete course %s from module store....' % options['course_key'])
if query_yes_no('Are you sure you want to delete course {}?'.format(course_key), default='no'):
if query_yes_no('Are you sure? This action cannot be undone!', default='no'):
delete_course(course_key, ModuleStoreEnum.UserID.mgmt_command, options['keep_instructors'])
if options['remove_assets']:
contentstore().delete_all_course_assets(course_key)
print('Deleted assets for course'.format(course_key))
print('Deleted course {}'.format(course_key))
| agpl-3.0 | -3,616,656,670,771,902,500 | 42.512195 | 118 | 0.653307 | false |
katstalk/android_external_chromium_org | third_party/re2/re2/make_unicode_groups.py | 219 | 2849 | #!/usr/bin/python
# Copyright 2008 The RE2 Authors. All Rights Reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Generate C++ tables for Unicode Script and Category groups."""
import sys
import unicode
_header = """
// GENERATED BY make_unicode_groups.py; DO NOT EDIT.
// make_unicode_groups.py >unicode_groups.cc
#include "re2/unicode_groups.h"
namespace re2 {
"""
_trailer = """
} // namespace re2
"""
n16 = 0
n32 = 0
def MakeRanges(codes):
"""Turn a list like [1,2,3,7,8,9] into a range list [[1,3], [7,9]]"""
ranges = []
last = -100
for c in codes:
if c == last+1:
ranges[-1][1] = c
else:
ranges.append([c, c])
last = c
return ranges
def PrintRanges(type, name, ranges):
"""Print the ranges as an array of type named name."""
print "static %s %s[] = {" % (type, name,)
for lo, hi in ranges:
print "\t{ %d, %d }," % (lo, hi)
print "};"
# def PrintCodes(type, name, codes):
# """Print the codes as an array of type named name."""
# print "static %s %s[] = {" % (type, name,)
# for c in codes:
# print "\t%d," % (c,)
# print "};"
def PrintGroup(name, codes):
"""Print the data structures for the group of codes.
Return a UGroup literal for the group."""
# See unicode_groups.h for a description of the data structure.
# Split codes into 16-bit ranges and 32-bit ranges.
range16 = MakeRanges([c for c in codes if c < 65536])
range32 = MakeRanges([c for c in codes if c >= 65536])
# Pull singleton ranges out of range16.
# code16 = [lo for lo, hi in range16 if lo == hi]
# range16 = [[lo, hi] for lo, hi in range16 if lo != hi]
global n16
global n32
n16 += len(range16)
n32 += len(range32)
ugroup = "{ \"%s\", +1" % (name,)
# if len(code16) > 0:
# PrintCodes("uint16", name+"_code16", code16)
# ugroup += ", %s_code16, %d" % (name, len(code16))
# else:
# ugroup += ", 0, 0"
if len(range16) > 0:
PrintRanges("URange16", name+"_range16", range16)
ugroup += ", %s_range16, %d" % (name, len(range16))
else:
ugroup += ", 0, 0"
if len(range32) > 0:
PrintRanges("URange32", name+"_range32", range32)
ugroup += ", %s_range32, %d" % (name, len(range32))
else:
ugroup += ", 0, 0"
ugroup += " }"
return ugroup
def main():
print _header
ugroups = []
for name, codes in unicode.Categories().iteritems():
ugroups.append(PrintGroup(name, codes))
for name, codes in unicode.Scripts().iteritems():
ugroups.append(PrintGroup(name, codes))
print "// %d 16-bit ranges, %d 32-bit ranges" % (n16, n32)
print "UGroup unicode_groups[] = {";
ugroups.sort()
for ug in ugroups:
print "\t%s," % (ug,)
print "};"
print "int num_unicode_groups = %d;" % (len(ugroups),)
print _trailer
if __name__ == '__main__':
main()
| bsd-3-clause | 7,432,405,191,888,053,000 | 24.666667 | 71 | 0.602317 | false |
rchekaluk/cloudbiolinux | cloudbio/package/cpan.py | 9 | 3266 | """Install perl packages using CPAN and cpanminus (cpanm).
"""
import os
from fabric.api import cd, settings
from cloudbio.flavor.config import get_config_file
from cloudbio.fabutils import find_cmd
from cloudbio.package.shared import _yaml_to_packages
from cloudbio.custom import shared as cshared
def install_packages(env):
config_file = get_config_file(env, "perl-libs.yaml")
(packages, _) = _yaml_to_packages(config_file.base, subs_yaml_file=config_file.dist, namesort=False)
cpanm_cmd = find_cmd(env, "cpanm", "--version")
for package in packages:
if package.count("==") > 1:
_install_from_url(env, cpanm_cmd, package)
else:
_install_from_cpan(env, cpanm_cmd, package)
def _install_from_cpan(env, cpanm_cmd, package):
"""Install from CPAN using cpanm, handling special arguments.
The simplest input string is just a package to install (like XML::Simple) but
users can also specify build arguments and exports as additional items separated
by ';'
"""
parts = package.split(";")
if len(parts) == 1:
perl_lib = parts[0]
args = ""
exports = []
elif len(parts) == 2:
perl_lib, args = parts
exports = []
else:
perl_lib, args = parts[:2]
exports = parts[2:]
export_strs = []
for export in exports:
export_strs.append("export " + export.format(system_install=env.system_install))
export = " && ".join(export_strs) + " && " if export_strs else ""
build_args = ("--build-args='%s'" % args) if args else ""
env.safe_run("%s %s -i --notest --local-lib=%s %s '%s'" % (export, cpanm_cmd, env.system_install,
build_args, perl_lib))
def _install_from_url(env, cpanm_cmd, package):
"""Check version of a dependency and download and install with cpanm if not up to date.
Packages installed via URL have the package name, target version and URL separated
with '=='. They can also optionally have a build directory or dependency to remove.
"""
parts = package.split("==")
package, target_version, url = parts[:3]
args = {}
if len(parts) > 3:
for key, value in (x.split("=") for x in parts[3:]):
args[key] = value
with settings(warn_only=True):
cur_version = env.safe_run_output("export PERL5LIB=%s/lib/perl5:${PERL5LIB} && " % env.system_install +
"""perl -le 'eval "require $ARGV[0]" and print $ARGV[0]->VERSION' %s"""
% package)
if cur_version != target_version:
with cshared._make_tmp_dir() as work_dir:
with cd(work_dir):
dl_dir = cshared._fetch_and_unpack(url)
if args.get("build"):
dl_dir = os.path.join(dl_dir, args["build"])
with cd(dl_dir):
if args.get("depremove"):
for fname in ["Makefile.PL", "MYMETA.json", "MYMETA.yml"]:
env.safe_run(r"""sed -i.bak -e '/^.*%s.*/s/^/#/' %s""" % (args["depremove"], fname))
env.safe_run("%s -i --notest --local-lib=%s ." % (cpanm_cmd, env.system_install))
| mit | -31,958,989,676,459,468 | 43.135135 | 113 | 0.572872 | false |
azumimuo/family-xbmc-addon | script.module.liveresolver/lib/liveresolver/resolvers/zoomtv.py | 1 | 1639 | # -*- coding: utf-8 -*-
import re,urlparse,urllib
from liveresolver.modules import client,decryptionUtils
from liveresolver.modules.log_utils import log
def resolve(url):
try:
referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
headers = { 'referer': referer,
'Accept' : 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Content-Type' :'application/x-www-form-urlencoded',
'Connection' : 'keep-alive',
'Host' : 'www.zoomtv.me',
'Origin' : urlparse.urlparse(referer).netloc,
'User-Agent' : client.agent()
}
fid = urlparse.parse_qs(urlparse.urlparse(url).query)['v'][0]
pid = urlparse.parse_qs(urlparse.urlparse(url).query)['pid'][0]
url = 'http://www.zoomtv.me/embed.php?v=%s&vw=660&vh=450'%fid
page = url
post_data = 'uagent=uagent&pid='+pid
result = client.request(url, post=post_data,headers = headers)
result = decryptionUtils.doDemystify(result)
var = re.compile('var\s(.+?)\s*=\s*\'(.+?)\'').findall(result)
for v in var:
if 'm3u8' in v[1]:
m3u8 = v[1]
if 'file' in v[1]:
file = v[1]
url = m3u8 + file
url += '|%s' % urllib.urlencode({'User-Agent': client.agent(), 'Referer': page,'X-Requested-With':'ShockwaveFlash/20.0.0.286'})
return url
except:
return | gpl-2.0 | 1,185,564,609,960,311,300 | 40 | 135 | 0.514338 | false |
tarballs-are-good/sympy | sympy/physics/quantum/tests/test_matrixutils.py | 2 | 3418 | from sympy import Matrix, zeros, ones, Integer
from sympy.physics.quantum.matrixutils import (
to_sympy, to_numpy, to_scipy_sparse, matrix_tensor_product,
matrix_to_zero
)
m = Matrix([[1,2],[3,4]])
def test_sympy_to_sympy():
assert to_sympy(m) == m
def test_matrix_to_zero():
assert matrix_to_zero(m) == m
assert matrix_to_zero(Matrix([[0,0],[0,0]])) == Integer(0)
try:
import numpy as np
except ImportError:
pass
else:
def test_to_numpy():
result = np.matrix([[1,2],[3,4]], dtype='complex')
assert (to_numpy(m) == result).all()
def test_matrix_tensor_product():
l1 = zeros(4)
for i in range(16):
l1[i] = 2**i
l2 = zeros(4)
for i in range(16):
l2[i] = i
l3 = zeros(2)
for i in range(4):
l3[i] = i
vec = Matrix([1,2,3])
#test for Matrix known 4x4 matricies
numpyl1 = np.matrix(l1.tolist())
numpyl2 = np.matrix(l2.tolist())
numpy_product = np.kron(numpyl1,numpyl2)
args = [l1, l2]
sympy_product = matrix_tensor_product(*args)
assert numpy_product.tolist() == sympy_product.tolist()
numpy_product = np.kron(numpyl2,numpyl1)
args = [l2, l1]
sympy_product = matrix_tensor_product(*args)
assert numpy_product.tolist() == sympy_product.tolist()
#test for other known matrix of different dimensions
numpyl2 = np.matrix(l3.tolist())
numpy_product = np.kron(numpyl1,numpyl2)
args = [l1, l3]
sympy_product = matrix_tensor_product(*args)
assert numpy_product.tolist() == sympy_product.tolist()
numpy_product = np.kron(numpyl2,numpyl1)
args = [l3, l1]
sympy_product = matrix_tensor_product(*args)
assert numpy_product.tolist() == sympy_product.tolist()
#test for non square matrix
numpyl2 = np.matrix(vec.tolist())
numpy_product = np.kron(numpyl1,numpyl2)
args = [l1, vec]
sympy_product = matrix_tensor_product(*args)
assert numpy_product.tolist() == sympy_product.tolist()
numpy_product = np.kron(numpyl2,numpyl1)
args = [vec, l1]
sympy_product = matrix_tensor_product(*args)
assert numpy_product.tolist() == sympy_product.tolist()
#test for random matrix with random values that are floats
random_matrix1 = np.random.rand(np.random.rand()*5+1,np.random.rand()*5+1)
random_matrix2 = np.random.rand(np.random.rand()*5+1,np.random.rand()*5+1)
numpy_product = np.kron(random_matrix1,random_matrix2)
args = [Matrix(random_matrix1.tolist()),Matrix(random_matrix2.tolist())]
sympy_product = matrix_tensor_product(*args)
assert not (sympy_product - Matrix(numpy_product.tolist())).tolist() > \
(ones((sympy_product.rows,sympy_product.cols))*epsilon).tolist()
#test for three matrix kronecker
sympy_product = matrix_tensor_product(l1,vec,l2)
numpy_product = np.kron(l1,np.kron(vec,l2))
assert numpy_product.tolist() == sympy_product.tolist()
try:
import numpy as np
from scipy import sparse
except ImportError:
pass
else:
def test_to_scipy_sparse():
result = sparse.csr_matrix([[1,2],[3,4]], dtype='complex')
assert np.linalg.norm((to_scipy_sparse(m) - result).todense()) == 0.0
epsilon = .000001
| bsd-3-clause | 4,332,148,937,329,089,500 | 31.865385 | 82 | 0.607665 | false |
ah391/sc-python | datalook.py | 1 | 1722 |
# coding: utf-8
import sys
import numpy
import matplotlib.pyplot
def analyse(filename, outfile=None):
"""Load data and create plots.
Subplots with placeholders, with set lables, layout tight
"""
data = numpy.loadtxt(fname=filename, delimiter=',')
# Create a wide figure to hold the subplots
fig = matplotlib.pyplot.figure(figsize=(10.3, 3.0))
# create placeholders for plots
subplot1 = fig.add_subplot (1,3,1)
subplot2 = fig.add_subplot (1,3,2)
subplot3 = fig.add_subplot (1,3,3)
subplot1.set_ylabel('average')
subplot1.plot(numpy.mean(data, axis=0))
subplot2.set_ylabel('maximum')
subplot2.plot(numpy.max(data, axis=0))
subplot3.set_ylabel('minimum')
subplot3.plot(numpy.min(data, axis=0))
fig.tight_layout()
if outfile is None:
matplotlib.pyplot.show()
else:
matplotlib.pyplot.savefig(outfile)
def detect_problems(filename):
"""Some of our temperature files have problems, check for these
This function reads a file (filename argument) and reports on odd looking maxima and minima that add up to 0.
This seems to happen when the sensors break.
The function does not return any data.
"""
data = numpy.loadtxt(fname=filename, delimiter=',')
if numpy.max(data, axis=0)[0] ==0 and numpy.max(data, axis=0)[20]==20:
print("Suspicious looking maxima")
elif numpy.sum(numpy.min(data, axis=0))==0:
print("Minima add up to zero")
else:
print("Data looks OK")
if __name__ == "__main__":
print("Running",sys.argv[0])
print(sys.argv[1])
analyse(sys.argv[1], outfile=sys.argv[2])
detect_problems(sys.argv[1])
| mit | 7,377,298,487,821,775,000 | 26.333333 | 114 | 0.643438 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.