repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
praekelt/jmbo-foundry
|
foundry/migrations/0008_auto.py
|
1
|
23410
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding M2M table for field content_type on 'Listing'
db.create_table('foundry_listing_content_type', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('listing', models.ForeignKey(orm['foundry.listing'], null=False)),
('contenttype', models.ForeignKey(orm['contenttypes.contenttype'], null=False))
))
db.create_unique('foundry_listing_content_type', ['listing_id', 'contenttype_id'])
def backwards(self, orm):
# Removing M2M table for field content_type on 'Listing'
db.delete_table('foundry_listing_content_type')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'category.category': {
'Meta': {'ordering': "('title',)", 'object_name': 'Category'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'category.tag': {
'Meta': {'ordering': "('title',)", 'object_name': 'Tag'},
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'comments.comment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': "orm['auth.User']"}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'foundry.blogpost': {
'Meta': {'ordering': "('-created',)", 'object_name': 'BlogPost', '_ormbases': ['jmbo.ModelBase']},
'content': ('ckeditor.fields.RichTextField', [], {}),
'modelbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['jmbo.ModelBase']", 'unique': 'True', 'primary_key': 'True'})
},
'foundry.chatroom': {
'Meta': {'ordering': "('-created',)", 'object_name': 'ChatRoom', '_ormbases': ['jmbo.ModelBase']},
'modelbase_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['jmbo.ModelBase']", 'unique': 'True', 'primary_key': 'True'})
},
'foundry.column': {
'Meta': {'object_name': 'Column'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'row': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Row']"}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {'default': '8'})
},
'foundry.country': {
'Meta': {'ordering': "('title',)", 'object_name': 'Country'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'minimum_age': ('django.db.models.fields.PositiveIntegerField', [], {'default': '18'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
'foundry.defaultavatar': {
'Meta': {'object_name': 'DefaultAvatar'},
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'defaultavatar_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'foundry.foundrycomment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'FoundryComment', '_ormbases': ['comments.Comment']},
'comment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['comments.Comment']", 'unique': 'True', 'primary_key': 'True'}),
'in_reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.FoundryComment']", 'null': 'True', 'blank': 'True'})
},
'foundry.link': {
'Meta': {'object_name': 'Link'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'view_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'})
},
'foundry.listing': {
'Meta': {'object_name': 'Listing'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['jmbo.ModelBase']", 'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'count': ('django.db.models.fields.IntegerField', [], {}),
'display_likes': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'style': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'foundry.member': {
'Meta': {'object_name': 'Member', '_ormbases': ['auth.User']},
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'member_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'facebook_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'mobile_number': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'twitter_username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'foundry.menu': {
'Meta': {'object_name': 'Menu'},
'display_title': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'foundry.menulinkposition': {
'Meta': {'ordering': "('position',)", 'object_name': 'MenuLinkPosition'},
'condition_expression': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Link']"}),
'menu': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Menu']"}),
'position': ('django.db.models.fields.IntegerField', [], {})
},
'foundry.navbar': {
'Meta': {'object_name': 'Navbar'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'foundry.navbarlinkposition': {
'Meta': {'ordering': "('position',)", 'object_name': 'NavbarLinkPosition'},
'condition_expression': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Link']"}),
'navbar': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Navbar']"}),
'position': ('django.db.models.fields.IntegerField', [], {})
},
'foundry.page': {
'Meta': {'object_name': 'Page'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_homepage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'foundry.row': {
'Meta': {'object_name': 'Row'},
'block_name': ('django.db.models.fields.CharField', [], {'default': "'content'", 'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Page']"})
},
'foundry.tile': {
'Meta': {'object_name': 'Tile'},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'column': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['foundry.Column']"}),
'condition_expression': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'enable_ajax': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'target_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tile_target_content_type'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'target_object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'view_name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
'jmbo.modelbase': {
'Meta': {'ordering': "('-created',)", 'object_name': 'ModelBase'},
'anonymous_comments': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'anonymous_likes': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Category']", 'null': 'True', 'blank': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'comments_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comments_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'crop_from': ('django.db.models.fields.CharField', [], {'default': "'center'", 'max_length': '10', 'blank': 'True'}),
'date_taken': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'effect': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modelbase_related'", 'null': 'True', 'to': "orm['photologue.PhotoEffect']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'likes_closed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'likes_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'primary_category': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'primary_modelbase_set'", 'null': 'True', 'to': "orm['category.Category']"}),
'publish_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'publishers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['publisher.Publisher']", 'null': 'True', 'blank': 'True'}),
'retract_on': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255', 'db_index': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'unpublished'", 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['category.Tag']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'photologue.photoeffect': {
'Meta': {'object_name': 'PhotoEffect'},
'background_color': ('django.db.models.fields.CharField', [], {'default': "'#FFFFFF'", 'max_length': '7'}),
'brightness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'color': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'contrast': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'filters': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'reflection_size': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'reflection_strength': ('django.db.models.fields.FloatField', [], {'default': '0.59999999999999998'}),
'sharpness': ('django.db.models.fields.FloatField', [], {'default': '1.0'}),
'transpose_method': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'})
},
'publisher.publisher': {
'Meta': {'object_name': 'Publisher'},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'secretballot.vote': {
'Meta': {'unique_together': "(('token', 'content_type', 'object_id'),)", 'object_name': 'Vote'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'vote': ('django.db.models.fields.SmallIntegerField', [], {})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['foundry']
|
bsd-3-clause
| 8,287,324,540,936,429,000 | 80.567944 | 195 | 0.547117 | false | 3.665832 | false | false | false |
cpennington/edx-platform
|
common/lib/xmodule/xmodule/library_content_module.py
|
1
|
28855
|
# -*- coding: utf-8 -*-
"""
LibraryContent: The XBlock used to include blocks from a library in a course.
"""
import json
import logging
import random
from copy import copy
from gettext import ngettext
from pkg_resources import resource_string
import six
from capa.responsetypes import registry
from lazy import lazy
from lxml import etree
from opaque_keys.edx.locator import LibraryLocator
from six import text_type
from six.moves import zip
from web_fragments.fragment import Fragment
from webob import Response
from xblock.core import XBlock
from xblock.fields import Integer, List, Scope, String
from xmodule.studio_editable import StudioEditableDescriptor, StudioEditableModule
from xmodule.validation import StudioValidation, StudioValidationMessage
from xmodule.x_module import STUDENT_VIEW, XModule
from .mako_module import MakoModuleDescriptor
from .xml_module import XmlDescriptor
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
logger = logging.getLogger(__name__)
ANY_CAPA_TYPE_VALUE = 'any'
def _get_human_name(problem_class):
"""
Get the human-friendly name for a problem type.
"""
return getattr(problem_class, 'human_name', problem_class.__name__)
def _get_capa_types():
"""
Gets capa types tags and labels
"""
capa_types = {tag: _get_human_name(registry.get_class_for_tag(tag)) for tag in registry.registered_tags()}
return [{'value': ANY_CAPA_TYPE_VALUE, 'display_name': _('Any Type')}] + sorted([
{'value': capa_type, 'display_name': caption}
for capa_type, caption in capa_types.items()
], key=lambda item: item.get('display_name'))
class LibraryContentFields(object):
"""
Fields for the LibraryContentModule.
Separated out for now because they need to be added to the module and the
descriptor.
"""
# Please note the display_name of each field below is used in
# common/test/acceptance/pages/studio/library.py:StudioLibraryContentXBlockEditModal
# to locate input elements - keep synchronized
display_name = String(
display_name=_("Display Name"),
help=_("The display name for this component."),
default="Randomized Content Block",
scope=Scope.settings,
)
source_library_id = String(
display_name=_("Library"),
help=_("Select the library from which you want to draw content."),
scope=Scope.settings,
values_provider=lambda instance: instance.source_library_values(),
)
source_library_version = String(
# This is a hidden field that stores the version of source_library when we last pulled content from it
display_name=_("Library Version"),
scope=Scope.settings,
)
mode = String(
display_name=_("Mode"),
help=_("Determines how content is drawn from the library"),
default="random",
values=[
{"display_name": _("Choose n at random"), "value": "random"}
# Future addition: Choose a new random set of n every time the student refreshes the block, for self tests
# Future addition: manually selected blocks
],
scope=Scope.settings,
)
max_count = Integer(
display_name=_("Count"),
help=_("Enter the number of components to display to each student."),
default=1,
scope=Scope.settings,
)
capa_type = String(
display_name=_("Problem Type"),
help=_('Choose a problem type to fetch from the library. If "Any Type" is selected no filtering is applied.'),
default=ANY_CAPA_TYPE_VALUE,
values=_get_capa_types(),
scope=Scope.settings,
)
selected = List(
# This is a list of (block_type, block_id) tuples used to record
# which random/first set of matching blocks was selected per user
default=[],
scope=Scope.user_state,
)
has_children = True
@property
def source_library_key(self):
"""
Convenience method to get the library ID as a LibraryLocator and not just a string
"""
return LibraryLocator.from_string(self.source_library_id)
#pylint: disable=abstract-method
@XBlock.wants('library_tools') # Only needed in studio
class LibraryContentModule(LibraryContentFields, XModule, StudioEditableModule):
"""
An XBlock whose children are chosen dynamically from a content library.
Can be used to create randomized assessments among other things.
Note: technically, all matching blocks from the content library are added
as children of this block, but only a subset of those children are shown to
any particular student.
"""
@classmethod
def make_selection(cls, selected, children, max_count, mode):
"""
Dynamically selects block_ids indicating which of the possible children are displayed to the current user.
Arguments:
selected - list of (block_type, block_id) tuples assigned to this student
children - children of this block
max_count - number of components to display to each student
mode - how content is drawn from the library
Returns:
A dict containing the following keys:
'selected' (set) of (block_type, block_id) tuples assigned to this student
'invalid' (set) of dropped (block_type, block_id) tuples that are no longer valid
'overlimit' (set) of dropped (block_type, block_id) tuples that were previously selected
'added' (set) of newly added (block_type, block_id) tuples
"""
rand = random.Random()
selected = set(tuple(k) for k in selected) # set of (block_type, block_id) tuples assigned to this student
# Determine which of our children we will show:
valid_block_keys = set([(c.block_type, c.block_id) for c in children])
# Remove any selected blocks that are no longer valid:
invalid_block_keys = (selected - valid_block_keys)
if invalid_block_keys:
selected -= invalid_block_keys
# If max_count has been decreased, we may have to drop some previously selected blocks:
overlimit_block_keys = set()
if len(selected) > max_count:
num_to_remove = len(selected) - max_count
overlimit_block_keys = set(rand.sample(selected, num_to_remove))
selected -= overlimit_block_keys
# Do we have enough blocks now?
num_to_add = max_count - len(selected)
added_block_keys = None
if num_to_add > 0:
# We need to select [more] blocks to display to this user:
pool = valid_block_keys - selected
if mode == "random":
num_to_add = min(len(pool), num_to_add)
added_block_keys = set(rand.sample(pool, num_to_add))
# We now have the correct n random children to show for this user.
else:
raise NotImplementedError("Unsupported mode.")
selected |= added_block_keys
return {
'selected': selected,
'invalid': invalid_block_keys,
'overlimit': overlimit_block_keys,
'added': added_block_keys,
}
def _publish_event(self, event_name, result, **kwargs):
"""
Helper method to publish an event for analytics purposes
"""
event_data = {
"location": six.text_type(self.location),
"result": result,
"previous_count": getattr(self, "_last_event_result_count", len(self.selected)),
"max_count": self.max_count,
}
event_data.update(kwargs)
self.runtime.publish(self, "edx.librarycontentblock.content.{}".format(event_name), event_data)
self._last_event_result_count = len(result) # pylint: disable=attribute-defined-outside-init
@classmethod
def publish_selected_children_events(cls, block_keys, format_block_keys, publish_event):
"""
Helper method for publishing events when children blocks are
selected/updated for a user. This helper is also used by
the ContentLibraryTransformer.
Arguments:
block_keys -
A dict describing which events to publish (add or
remove), see `make_selection` above for format details.
format_block_keys -
A function to convert block keys to the format expected
by publish_event. Must have the signature:
[(block_type, block_id)] -> T
Where T is a collection of block keys as accepted by
`publish_event`.
publish_event -
Function that handles the actual publishing. Must have
the signature:
<'removed'|'assigned'> -> result:T -> removed:T -> reason:str -> None
Where T is a collection of block_keys as returned by
`format_block_keys`.
"""
if block_keys['invalid']:
# reason "invalid" means deleted from library or a different library is now being used.
publish_event(
"removed",
result=format_block_keys(block_keys['selected']),
removed=format_block_keys(block_keys['invalid']),
reason="invalid"
)
if block_keys['overlimit']:
publish_event(
"removed",
result=format_block_keys(block_keys['selected']),
removed=format_block_keys(block_keys['overlimit']),
reason="overlimit"
)
if block_keys['added']:
publish_event(
"assigned",
result=format_block_keys(block_keys['selected']),
added=format_block_keys(block_keys['added'])
)
def selected_children(self):
"""
Returns a set() of block_ids indicating which of the possible children
have been selected to display to the current user.
This reads and updates the "selected" field, which has user_state scope.
Note: self.selected and the return value contain block_ids. To get
actual BlockUsageLocators, it is necessary to use self.children,
because the block_ids alone do not specify the block type.
"""
if hasattr(self, "_selected_set"):
# Already done:
return self._selected_set # pylint: disable=access-member-before-definition
block_keys = self.make_selection(self.selected, self.children, self.max_count, "random") # pylint: disable=no-member
# Publish events for analytics purposes:
lib_tools = self.runtime.service(self, 'library_tools')
format_block_keys = lambda keys: lib_tools.create_block_analytics_summary(self.location.course_key, keys)
self.publish_selected_children_events(
block_keys,
format_block_keys,
self._publish_event,
)
# Save our selections to the user state, to ensure consistency:
selected = block_keys['selected']
self.selected = list(selected) # TODO: this doesn't save from the LMS "Progress" page.
# Cache the results
self._selected_set = selected # pylint: disable=attribute-defined-outside-init
return selected
def _get_selected_child_blocks(self):
"""
Generator returning XBlock instances of the children selected for the
current user.
"""
for block_type, block_id in self.selected_children():
child = self.runtime.get_block(self.location.course_key.make_usage_key(block_type, block_id))
if child is None:
logger.info("Child not found for %s %s", str(block_type), str(block_id))
yield child
def student_view(self, context):
fragment = Fragment()
contents = []
child_context = {} if not context else copy(context)
for child in self._get_selected_child_blocks():
for displayable in child.displayable_items():
rendered_child = displayable.render(STUDENT_VIEW, child_context)
fragment.add_fragment_resources(rendered_child)
contents.append({
'id': text_type(displayable.location),
'content': rendered_child.content,
})
fragment.add_content(self.system.render_template('vert_module.html', {
'items': contents,
'xblock_context': context,
'show_bookmark_button': False,
'watched_completable_blocks': set(),
'completion_delay_ms': None,
}))
return fragment
def validate(self):
"""
Validates the state of this Library Content Module Instance.
"""
return self.descriptor.validate()
def author_view(self, context):
"""
Renders the Studio views.
Normal studio view: If block is properly configured, displays library status summary
Studio container view: displays a preview of all possible children.
"""
fragment = Fragment()
root_xblock = context.get('root_xblock')
is_root = root_xblock and root_xblock.location == self.location
if is_root:
# User has clicked the "View" link. Show a preview of all possible children:
if self.children: # pylint: disable=no-member
fragment.add_content(self.system.render_template("library-block-author-preview-header.html", {
'max_count': self.max_count,
'display_name': self.display_name or self.url_name,
}))
context['can_edit_visibility'] = False
context['can_move'] = False
self.render_children(context, fragment, can_reorder=False, can_add=False)
# else: When shown on a unit page, don't show any sort of preview -
# just the status of this block in the validation area.
# The following JS is used to make the "Update now" button work on the unit page and the container view:
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/library_content_edit.js'))
fragment.initialize_js('LibraryContentAuthorView')
return fragment
def get_child_descriptors(self):
"""
Return only the subset of our children relevant to the current student.
"""
return list(self._get_selected_child_blocks())
@XBlock.wants('user')
@XBlock.wants('library_tools') # Only needed in studio
@XBlock.wants('studio_user_permissions') # Only available in studio
class LibraryContentDescriptor(LibraryContentFields, MakoModuleDescriptor, XmlDescriptor, StudioEditableDescriptor):
"""
Descriptor class for LibraryContentModule XBlock.
"""
resources_dir = 'assets/library_content'
module_class = LibraryContentModule
mako_template = 'widgets/metadata-edit.html'
js = {'js': [resource_string(__name__, 'js/src/vertical/edit.js')]}
js_module_name = "VerticalDescriptor"
show_in_read_only_mode = True
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(LibraryContentDescriptor, self).non_editable_metadata_fields
# The only supported mode is currently 'random'.
# Add the mode field to non_editable_metadata_fields so that it doesn't
# render in the edit form.
non_editable_fields.extend([LibraryContentFields.mode, LibraryContentFields.source_library_version])
return non_editable_fields
@lazy
def tools(self):
"""
Grab the library tools service or raise an error.
"""
return self.runtime.service(self, 'library_tools')
def get_user_id(self):
"""
Get the ID of the current user.
"""
user_service = self.runtime.service(self, 'user')
if user_service:
# May be None when creating bok choy test fixtures
user_id = user_service.get_current_user().opt_attrs.get('edx-platform.user_id', None)
else:
user_id = None
return user_id
@XBlock.handler
def refresh_children(self, request=None, suffix=None): # pylint: disable=unused-argument
"""
Refresh children:
This method is to be used when any of the libraries that this block
references have been updated. It will re-fetch all matching blocks from
the libraries, and copy them as children of this block. The children
will be given new block_ids, but the definition ID used should be the
exact same definition ID used in the library.
This method will update this block's 'source_library_id' field to store
the version number of the libraries used, so we easily determine if
this block is up to date or not.
"""
user_perms = self.runtime.service(self, 'studio_user_permissions')
user_id = self.get_user_id()
if not self.tools:
return Response("Library Tools unavailable in current runtime.", status=400)
self.tools.update_children(self, user_id, user_perms)
return Response()
# Copy over any overridden settings the course author may have applied to the blocks.
def _copy_overrides(self, store, user_id, source, dest):
"""
Copy any overrides the user has made on blocks in this library.
"""
for field in six.itervalues(source.fields):
if field.scope == Scope.settings and field.is_set_on(source):
setattr(dest, field.name, field.read_from(source))
if source.has_children:
source_children = [self.runtime.get_block(source_key) for source_key in source.children]
dest_children = [self.runtime.get_block(dest_key) for dest_key in dest.children]
for source_child, dest_child in zip(source_children, dest_children):
self._copy_overrides(store, user_id, source_child, dest_child)
store.update_item(dest, user_id)
def studio_post_duplicate(self, store, source_block):
"""
Used by the studio after basic duplication of a source block. We handle the children
ourselves, because we have to properly reference the library upstream and set the overrides.
Otherwise we'll end up losing data on the next refresh.
"""
# The first task will be to refresh our copy of the library to generate the children.
# We must do this at the currently set version of the library block. Otherwise we may not have
# exactly the same children-- someone may be duplicating an out of date block, after all.
user_id = self.get_user_id()
user_perms = self.runtime.service(self, 'studio_user_permissions')
if not self.tools:
raise RuntimeError("Library tools unavailable, duplication will not be sane!")
self.tools.update_children(self, user_id, user_perms, version=self.source_library_version)
self._copy_overrides(store, user_id, source_block, self)
# Children have been handled.
return True
def _validate_library_version(self, validation, lib_tools, version, library_key):
"""
Validates library version
"""
latest_version = lib_tools.get_library_version(library_key)
if latest_version is not None:
if version is None or version != six.text_type(latest_version):
validation.set_summary(
StudioValidationMessage(
StudioValidationMessage.WARNING,
_(u'This component is out of date. The library has new content.'),
# TODO: change this to action_runtime_event='...' once the unit page supports that feature.
# See https://openedx.atlassian.net/browse/TNL-993
action_class='library-update-btn',
# Translators: {refresh_icon} placeholder is substituted to "↻" (without double quotes)
action_label=_(u"{refresh_icon} Update now.").format(refresh_icon=u"↻")
)
)
return False
else:
validation.set_summary(
StudioValidationMessage(
StudioValidationMessage.ERROR,
_(u'Library is invalid, corrupt, or has been deleted.'),
action_class='edit-button',
action_label=_(u"Edit Library List.")
)
)
return False
return True
def _set_validation_error_if_empty(self, validation, summary):
""" Helper method to only set validation summary if it's empty """
if validation.empty:
validation.set_summary(summary)
def validate(self):
"""
Validates the state of this Library Content Module Instance. This
is the override of the general XBlock method, and it will also ask
its superclass to validate.
"""
validation = super(LibraryContentDescriptor, self).validate()
if not isinstance(validation, StudioValidation):
validation = StudioValidation.copy(validation)
library_tools = self.runtime.service(self, "library_tools")
if not (library_tools and library_tools.can_use_library_content(self)):
validation.set_summary(
StudioValidationMessage(
StudioValidationMessage.ERROR,
_(
u"This course does not support content libraries. "
u"Contact your system administrator for more information."
)
)
)
return validation
if not self.source_library_id:
validation.set_summary(
StudioValidationMessage(
StudioValidationMessage.NOT_CONFIGURED,
_(u"A library has not yet been selected."),
action_class='edit-button',
action_label=_(u"Select a Library.")
)
)
return validation
lib_tools = self.runtime.service(self, 'library_tools')
self._validate_library_version(validation, lib_tools, self.source_library_version, self.source_library_key)
# Note: we assume refresh_children() has been called
# since the last time fields like source_library_id or capa_types were changed.
matching_children_count = len(self.children) # pylint: disable=no-member
if matching_children_count == 0:
self._set_validation_error_if_empty(
validation,
StudioValidationMessage(
StudioValidationMessage.WARNING,
_(u'There are no matching problem types in the specified libraries.'),
action_class='edit-button',
action_label=_(u"Select another problem type.")
)
)
if matching_children_count < self.max_count:
self._set_validation_error_if_empty(
validation,
StudioValidationMessage(
StudioValidationMessage.WARNING,
(
ngettext(
u'The specified library is configured to fetch {count} problem, ',
u'The specified library is configured to fetch {count} problems, ',
self.max_count
) +
ngettext(
u'but there is only {actual} matching problem.',
u'but there are only {actual} matching problems.',
matching_children_count
)
).format(count=self.max_count, actual=matching_children_count),
action_class='edit-button',
action_label=_(u"Edit the library configuration.")
)
)
return validation
def source_library_values(self):
"""
Return a list of possible values for self.source_library_id
"""
lib_tools = self.runtime.service(self, 'library_tools')
user_perms = self.runtime.service(self, 'studio_user_permissions')
all_libraries = [
(key, name) for key, name in lib_tools.list_available_libraries()
if user_perms.can_read(key) or self.source_library_id == six.text_type(key)
]
all_libraries.sort(key=lambda entry: entry[1]) # Sort by name
if self.source_library_id and self.source_library_key not in [entry[0] for entry in all_libraries]:
all_libraries.append((self.source_library_id, _(u"Invalid Library")))
all_libraries = [(u"", _("No Library Selected"))] + all_libraries
values = [{"display_name": name, "value": six.text_type(key)} for key, name in all_libraries]
return values
def editor_saved(self, user, old_metadata, old_content):
"""
If source_library_id or capa_type has been edited, refresh_children automatically.
"""
old_source_library_id = old_metadata.get('source_library_id', [])
if (old_source_library_id != self.source_library_id or
old_metadata.get('capa_type', ANY_CAPA_TYPE_VALUE) != self.capa_type):
try:
self.refresh_children()
except ValueError:
pass # The validation area will display an error message, no need to do anything now.
def has_dynamic_children(self):
"""
Inform the runtime that our children vary per-user.
See get_child_descriptors() above
"""
return True
def get_content_titles(self):
"""
Returns list of friendly titles for our selected children only; without
thi, all possible children's titles would be seen in the sequence bar in
the LMS.
This overwrites the get_content_titles method included in x_module by default.
"""
titles = []
for child in self._xmodule.get_child_descriptors():
titles.extend(child.get_content_titles())
return titles
@classmethod
def definition_from_xml(cls, xml_object, system):
children = [
system.process_xml(etree.tostring(child)).scope_ids.usage_id
for child in xml_object.getchildren()
]
definition = {
attr_name: json.loads(attr_value)
for attr_name, attr_value in xml_object.attrib.items()
}
return definition, children
def definition_to_xml(self, resource_fs):
""" Exports Library Content Module to XML """
xml_object = etree.Element('library_content')
for child in self.get_children():
self.runtime.add_block_as_child_node(child, xml_object)
# Set node attributes based on our fields.
for field_name, field in six.iteritems(self.fields): # pylint: disable=no-member
if field_name in ('children', 'parent', 'content'):
continue
if field.is_set_on(self):
xml_object.set(field_name, six.text_type(field.read_from(self)))
return xml_object
class LibrarySummary(object):
"""
A library summary object which contains the fields required for library listing on studio.
"""
def __init__(self, library_locator, display_name):
"""
Initialize LibrarySummary
Arguments:
library_locator (LibraryLocator): LibraryLocator object of the library.
display_name (unicode): display name of the library.
"""
self.display_name = display_name if display_name else _(u"Empty")
self.id = library_locator # pylint: disable=invalid-name
self.location = library_locator.make_usage_key('library', 'library')
@property
def display_org_with_default(self):
"""
Org display names are not implemented. This just provides API compatibility with CourseDescriptor.
Always returns the raw 'org' field from the key.
"""
return self.location.library_key.org
@property
def display_number_with_default(self):
"""
Display numbers are not implemented. This just provides API compatibility with CourseDescriptor.
Always returns the raw 'library' field from the key.
"""
return self.location.library_key.library
|
agpl-3.0
| 5,388,732,644,370,925,000 | 40.452586 | 125 | 0.614121 | false | 4.475105 | false | false | false |
thetomcraig/redwood
|
examples/tcp-ip/serverClean.py
|
1
|
7235
|
#!/usr/bin/python
from PyQt4 import QtGui
from PyQt4 import QtCore
import time
import sys
import math
import myGui
import functions
import SocketServer
import threading
import socket
import random
import parameters
import datetime
from collections import deque
params=parameters.getParams()
socket.setdefaulttimeout(2)
class Server(QtGui.QWidget):
def __init__(self,server, bind_and_activate=True):
super(Server, self).__init__()
# Activate the server; this will keep running until you
# interrupt the program with Ctrl-C
HOST=params['serverIP']
PORT=params['port']
self.server=server
self.server.queue=[]
t = threading.Thread(target=self.server.serve_forever)
t.setDaemon(True) # don't hang on exit
t.start()
self.initialize()
def initialize(self):
self.status=0
self.experimentStarted=0
self.playerDict={}
self.makeQueue()
self.assignGroups()
self.makeDataFiles()
self.initUI()
def makeQueue(self):
self.queue={}
self.queues=['unimportant','important','linksToChange','resend']
for Q in self.queues:
self.queue[Q]=deque([])
def assignGroups(self):
self.totalPlayers=params['groups']*params['playersPerGroup']
this=[]
j=1
for group in range(1,params['groups']+1):
for player in range(1,params['playersPerGroup']+1):
#ComputerNumber,GroupNumber,PlayerNumberInGroup
this.append([j,group,player])
j=j+1
random.shuffle(this)
self.groupParameters={}
for group in range(1,params['groups']+1):
self.groupParameters[group]={}
self.groupParameters[group]['timeVectorNumber']=[]
self.groupParameters[group]['timeVectorReveal']=[]
for period in range(1,params['totalPeriods']+1):
self.timeVectorNumber,self.timeVectorReveal=functions.getInvestments(params['p0'],params['mu'])
self.groupParameters[group]['timeVectorNumber'].append(self.timeVectorNumber)
self.groupParameters[group]['timeVectorReveal'].append(self.timeVectorReveal)
self.groupAssignments=this
def makeDataFiles(self):
self.dataFile=datetime.datetime.now().strftime("sessions/%Y%m%d-%H%M%S/data.csv")
self.playerFile=self.dataFile.replace("data","players")
self.parameterFile=self.dataFile.replace("data.csv","parameters.py")
myGui.ensure_dir(self.dataFile)
file = open(self.playerFile,'w')
file.writelines("computerNumber,subjectNumber,groupNumber,IP,localStartTime,payoffPoints,payoffDollars\n")
file.close()
file = open(self.dataFile,'a')
file.writelines("group,linkStart,linkEnd,addLink,cost,globalTime\n")
file.close()
filename='parameters.py'
file = open(filename,'r')
fileData=file.read()
file.close()
file = open(self.parameterFile,'w')
file.writelines(fileData)
file.close()
def initUI(self):
self.currentPage="Overview"
self.pageNavigator()
def pageNavigator(self):
if self.currentPage=="Overview":
self.makePageOverview()
def makePageOverview(self):
#Titles
self.statusLabel = QtGui.QLabel('Waiting For People To Register')
self.statusLabel.setStyleSheet('QLabel {color: black;font-size: 24pt; font-family: Courier;}')
self.statusLabel.setAlignment(QtCore.Qt.AlignCenter)
#Tables
self.statusTable = myGui.Table()
self.statusTable.data=[]
for player in range(self.totalPlayers):
self.statusTable.data.append(["","","","","",""])
self.statusTable.columnWidths=[100,100,100,100,100,100]
self.statusTable.rowHeight=50
self.statusTable.columnHeaders=['Computer','IP Address','Group','GroupID','Status','Other']
self.statusTable.updateTable()
self.grid = QtGui.QGridLayout()
self.button=myGui.ButtonDoubleClick()
self.button.title1="Start Experiment"
self.button.title2="You Sure?!?"
self.button.title3="Started!"
self.grid.addWidget(self.statusLabel,1,1,QtCore.Qt.AlignCenter)
self.grid.addWidget(self.statusTable,2,1,QtCore.Qt.AlignCenter)
self.grid.addWidget(self.button,3,1,QtCore.Qt.AlignCenter)
self.grid.setRowMinimumHeight(2,600)
self.grid.setRowMinimumHeight(3,100)
self.setLayout(self.grid)
self.show()
self.checkStatus()
def queueManager(self):
while len(self.server.queue)>0:
k=self.server.queue.pop()
if k[1]=="getPlayerNumber":
self.queue['important'].append(k)
for Q in self.queues:
while len(self.queue[Q])>0:
thisMessage=self.queue[Q].popleft()
messageIp=thisMessage[0]
messageType=thisMessage[1]
messageValue=thisMessage[2]
if messageType=="getPlayerNumber":
self.getPlayerNumber(messageIp)
elif messageType=="periodSummary":
self.manageData(messageIp,messageValue)
def manageData(self,messageIp,messageValue):
periodSummary=eval(messageData)
#subjectID,group,groupID,period,
def getPlayerNumber(self,messageIp):
print "getting new number"
if messageIp not in self.playerDict:
this=self.groupAssignments.pop()
self.playerDict[messageIp]={}
self.playerDict[messageIp]['computerID']=this[0]
self.playerDict[messageIp]['group']=this[1]
self.playerDict[messageIp]['groupID']=this[2]
m=[messageIp,params['port'],"assignPlayerNumber",self.playerDict[messageIp]['computerID']]
this=myGui.sendMessage(m[0],m[1],m[2],m[3])
self.statusTable.data[self.playerDict[messageIp]['computerID']-1][0]=str(self.playerDict[messageIp]['computerID'])
self.statusTable.data[self.playerDict[messageIp]['computerID']-1][1]=messageIp
self.statusTable.data[self.playerDict[messageIp]['computerID']-1][2]=str(self.playerDict[messageIp]['group'])
self.statusTable.data[self.playerDict[messageIp]['computerID']-1][3]=str(self.playerDict[messageIp]['groupID'])
self.statusTable.updateTable()
def checkStatus(self):
#print "check status %s"%(time.time())
self.queueManager()
if self.button.stage==3 and self.experimentStarted==0:
#Experiment has started:
self.experimentStarted=1
self.period=0
self.periodStage=0
elif self.experimentStarted==1:
if self.periodStage==0:
#Start Period
for ip in self.playerDict:
group=self.playerDict[ip]['group']
timesString="["
numbersString="["
for j,k in zip(self.groupParameters[group]['timeVectorNumber'][self.period-1],self.groupParameters[group]['timeVectorReveal'][self.period-1]):
timesString=timesString+"%.02f,"%(k)
numbersString=numbersString+"%s,"%(j)
timesString=timesString[:-1]+"]"
numbersString=numbersString[:-1]+"]"
m=[ip,params['port'],"periodStarted",[self.period,timesString,numbersString]]
this=myGui.sendMessage(m[0],m[1],m[2],m[3])
self.periodStage=1
elif self.periodStage==1:
#Wait for all responses
#m=[ip,params['port'],"periodFinished",["SUMMARY STATS HERE"]]
print "wainting"
elif self.periodStage==2:
#Finish period
m=[ip,params['port'],"periodFinished",["SUMMARY STATS HERE"]]
QtCore.QTimer.singleShot(10,self.checkStatus)
def main():
HOST, PORT = "", 9989
# Create the server, binding to localhost on port 9999
server = SocketServer.TCPServer((HOST, PORT),myGui.MyTCPHandler, bind_and_activate=True)
app = QtGui.QApplication(sys.argv)
window = Server(server)
###
server.allow_reuse_address = True
window.allow_reuse_address = True
###
window.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
isc
| -6,759,639,312,788,060,000 | 29.787234 | 147 | 0.719281 | false | 3.068278 | false | false | false |
cjgrady/stinkbait
|
reports/providerAggregate.py
|
1
|
3807
|
"""
@summary: Creates a report with comparative statistics across providers
@author: CJ Grady
@version: 1.0
@status: alpha
@license: gpl2
@copyright: Copyright (C) 2014, University of Kansas Center for Research
Lifemapper Project, lifemapper [at] ku [dot] edu,
Biodiversity Institute,
1345 Jayhawk Boulevard, Lawrence, Kansas, 66045, USA
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or (at
your option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
"""
from modules.providers import PROVIDERS
from results.resultsRetrieverNumpy import getNumberOfRareTaxaByPoints, \
getNumberOfRareTaxaByProviders, getNumberOfTaxaRepresented, \
getNumberOfUniqueTaxa, providerOfRareTaxa, providerRankedInTopX, \
subsetResults
outFn = "/home/cjgrady/workspace/occDataMiningPOC/data/reports/fish/providers.html"
if __name__ == "__main__":
numUnique = []
numRareSpecies = []
numRareProv = []
numTaxa = []
results = subsetResults()
for k in PROVIDERS:
print PROVIDERS[k]
nUnique = getNumberOfUniqueTaxa(results, providerId=k)
nRareSp = getNumberOfRareTaxaByPoints(results, providerId=k)
nRareP = getNumberOfRareTaxaByProviders(results, providerId=k)
numTax = getNumberOfTaxaRepresented(results, providerId=k)
numUnique.append((nUnique, k))
numRareSpecies.append((nRareSp, k))
numRareProv.append((nRareP, k))
numTaxa.append((numTax, k))
numUnique.sort(reverse=True)
numRareSpecies.sort(reverse=True)
numRareProv.sort(reverse=True)
numTaxa.sort(reverse=True)
with open(outFn, 'w') as outF:
outF.write('<html>\n')
outF.write(' <head>\n')
outF.write(' <title>Providers report</title>\n')
outF.write(' </head>\n')
outF.write(' <body>\n')
# Unique
outF.write(' <h1>Most unique taxa</h1>\n')
outF.write(' <ol>\n')
for n, k in numUnique:
outF.write(' <li>%s - %s</li>\n' % (PROVIDERS[k], n))
outF.write(' </ol>\n')
outF.write(' <br /><br />')
# Rare by species
outF.write(' <h1>Most rare species (<= 10 points)</h1>\n')
outF.write(' <ol>\n')
for n, k in numRareSpecies:
outF.write(' <li>%s - %s</li>\n' % (PROVIDERS[k], n))
outF.write(' </ol>\n')
outF.write(' <br /><br />')
# Rare by provider
outF.write(' <h1>Most rare species (<= 5 providers)</h1>\n')
outF.write(' <ol>\n')
for n, k in numRareProv:
outF.write(' <li>%s - %s</li>\n' % (PROVIDERS[k], n))
outF.write(' </ol>\n')
outF.write(' <br /><br />')
# Number of taxa
outF.write(' <h1>Number of species</h1>\n')
outF.write(' <ol>\n')
for n, k in numTaxa:
outF.write(' <li>%s - %s</li>\n' % (PROVIDERS[k], n))
outF.write(' </ol>\n')
outF.write(' <br /><br />')
outF.write(' </body>\n')
outF.write('</html>\n')
|
gpl-2.0
| -6,075,054,366,014,210,000 | 36.323529 | 83 | 0.589441 | false | 3.387011 | false | false | false |
dfreedman55/LearningPython
|
week4/exercise3.py
|
1
|
2047
|
#!/usr/bin/env python
def main():
parsestring(uptime1)
sumsec(stats)
parsestring(uptime2)
sumsec(stats)
parsestring(uptime3)
sumsec(stats)
parsestring(uptime4)
sumsec(stats)
def yrs2sec(numyrs):
seconds = numyrs * 12 * 4 * 7 * 24 * 60 * 60
stats['years'] = seconds
def mth2sec(nummth):
seconds = nummth * 4 * 7 * 24 * 60 * 60
stats['months'] = seconds
def wks2sec(numwks):
seconds = numwks * 7 * 24 * 60 * 60
stats['weeks'] = seconds
def dys2sec(numdys):
seconds = numdys * 24 * 60 * 60
stats['days'] = seconds
def hrs2sec(numhrs):
seconds = numhrs * 60 * 60
stats['hours'] = seconds
def min2sec(nummin):
seconds = nummin * 60
stats['minutes'] = seconds
def sumsec(stats):
total = int(0)
for k, v in stats.items():
if type(v) != type('string'):
total = total + v
print stats
print '\n'
print 'Total Seconds for %s is: %s' % (stats['devicename'], total)
print '\n'
def parsestring(uptimestr):
stats['devicename'] = uptimestr.split(' ')[0]
if 'year' in uptimestr:
numyrs = int(uptimestr.split('year')[0].strip().split(' ')[-1])
yrs2sec(numyrs)
if 'month' in uptimestr:
nummth = int(uptimestr.split('month')[0].strip().split(' ')[-1])
mth2sec(nummth)
if 'week' in uptimestr:
numwks = int(uptimestr.split('week')[0].strip().split(' ')[-1])
wks2sec(numwks)
if 'day' in uptimestr:
numdys = int(uptimestr.split('day')[0].strip().split(' ')[-1])
dys2sec(numdys)
if 'hour' in uptimestr:
numhrs = int(uptimestr.split('hour')[0].strip().split(' ')[-1])
hrs2sec(numhrs)
if 'minute' in uptimestr:
nummin = int(uptimestr.split('minute')[0].strip().split(' ')[-1])
min2sec(nummin)
if __name__ == '__main__':
uptime1 = 'twb-sf-881 uptime is 6 weeks, 4 days, 2 hours, 25 minutes'
uptime2 = '3750RJ uptime is 1 hour, 29 minutes'
uptime3 = 'CATS3560 uptime is 8 weeks, 4 days, 18 hours, 16 minutes'
uptime4 = 'rtr1 uptime is 5 years, 18 weeks, 8 hours, 23 minutes'
stats = {'devicename': '', 'years': '', 'months': '', 'weeks': '', 'days': '', 'hours': '', 'minutes': ''}
main()
|
gpl-2.0
| 8,404,460,959,718,783,000 | 23.662651 | 107 | 0.635076 | false | 2.52716 | false | false | false |
sgordon007/jcvi_062915
|
assembly/preprocess.py
|
1
|
22843
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Wrapper to trim and correct sequence data.
"""
import os
import os.path as op
import sys
import logging
from jcvi.formats.base import BaseFile, write_file, must_open
from jcvi.formats.fastq import guessoffset
from jcvi.utils.cbook import depends, human_size
from jcvi.apps.base import OptionParser, ActionDispatcher, download, \
sh, mkdir, need_update, datadir
class FastQCdata (BaseFile, dict):
def __init__(self, filename, human=False):
super(FastQCdata, self).__init__(filename)
if not op.exists(filename):
logging.debug("File `{0}` not found.".format(filename))
# Sample_RF37-1/RF37-1_GATCAG_L008_R2_fastqc =>
# RF37-1_GATCAG_L008_R2
self["Filename"] = op.basename(\
op.split(filename)[0]).rsplit("_", 1)[0]
self["Total Sequences"] = self["Sequence length"] = \
self["Total Bases"] = "na"
return
fp = open(filename)
for row in fp:
atoms = row.rstrip().split("\t")
if atoms[0] in ("#", ">"):
continue
if len(atoms) != 2:
continue
a, b = atoms
self[a] = b
ts = self["Total Sequences"]
sl = self["Sequence length"]
if "-" in sl:
a, b = sl.split("-")
sl = (int(a) + int(b)) / 2
if a == "30":
sl = int(b)
ts, sl = int(ts), int(sl)
tb = ts * sl
self["Total Sequences"] = human_size(ts).rstrip("b") if human else ts
self["Total Bases"] = human_size(tb).rstrip("b") if human else tb
def main():
actions = (
('count', 'count reads based on FASTQC results'),
('trim', 'trim reads using TRIMMOMATIC'),
('correct', 'correct reads using ALLPATHS-LG'),
('hetsmooth', 'reduce K-mer diversity using het-smooth'),
('alignextend', 'increase read length by extending based on alignments'),
('contamination', 'check reads contamination against Ecoli'),
('diginorm', 'run K-mer based normalization'),
('expand', 'expand sequences using short reads'),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def diginorm(args):
"""
%prog diginorm fastqfile
Run K-mer based normalization. Based on tutorial:
<http://ged.msu.edu/angus/diginorm-2012/tutorial.html>
Assume input is either an interleaved pairs file, or two separate files.
To set up khmer:
$ git clone git://github.com/ged-lab/screed.git
$ git clone git://github.com/ged-lab/khmer.git
$ cd screed
$ python setup.py install
$ cd ../khmer
$ make test
$ export PYTHONPATH=~/export/khmer
"""
from jcvi.formats.fastq import shuffle, pairinplace, split
from jcvi.apps.base import getfilesize
p = OptionParser(diginorm.__doc__)
p.add_option("--single", default=False, action="store_true",
help="Single end reads")
p.add_option("--tablesize", help="Memory size")
p.add_option("--npass", default="1", choices=("1", "2"),
help="How many passes of normalization")
p.set_depth(depth=50)
p.set_home("khmer", default="/usr/local/bin/")
opts, args = p.parse_args(args)
if len(args) not in (1, 2):
sys.exit(not p.print_help())
if len(args) == 2:
fastq = shuffle(args + ["--tag"])
else:
fastq, = args
kh = opts.khmer_home
depth = opts.depth
PE = not opts.single
sys.path.insert(0, op.join(kh, "python"))
pf = fastq.rsplit(".", 1)[0]
keepfile = fastq + ".keep"
hashfile = pf + ".kh"
mints = 10000000
ts = opts.tablesize or ((getfilesize(fastq) / 16 / mints + 1) * mints)
norm_cmd = op.join(kh, "normalize-by-median.py")
filt_cmd = op.join(kh, "filter-abund.py")
if need_update(fastq, (hashfile, keepfile)):
cmd = norm_cmd
cmd += " -C {0} -k 20 -N 4 -x {1}".format(depth, ts)
if PE:
cmd += " -p"
cmd += " -s {0} {1}".format(hashfile, fastq)
sh(cmd)
abundfiltfile = keepfile + ".abundfilt"
if need_update((hashfile, keepfile), abundfiltfile):
cmd = filt_cmd
cmd += " {0} {1}".format(hashfile, keepfile)
sh(cmd)
if opts.npass == "1":
seckeepfile = abundfiltfile
else:
seckeepfile = abundfiltfile + ".keep"
if need_update(abundfiltfile, seckeepfile):
cmd = norm_cmd
cmd += " -C {0} -k 20 -N 4 -x {1}".format(depth - 10, ts / 2)
cmd += " {0}".format(abundfiltfile)
sh(cmd)
if PE:
pairsfile = pairinplace([seckeepfile,
"--base={0}".format(pf + "_norm"), "--rclip=2"])
split([pairsfile])
def expand(args):
"""
%prog expand bes.fasta reads.fastq
Expand sequences using short reads. Useful, for example for getting BAC-end
sequences. The template to use, in `bes.fasta` may just contain the junction
sequences, then align the reads to get the 'flanks' for such sequences.
"""
import math
from jcvi.formats.fasta import Fasta, SeqIO
from jcvi.formats.fastq import readlen, first, fasta
from jcvi.formats.blast import Blast
from jcvi.formats.base import FileShredder
from jcvi.apps.bowtie import align, get_samfile
from jcvi.apps.align import blast
p = OptionParser(expand.__doc__)
p.set_depth(depth=200)
p.set_firstN()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
bes, reads = args
size = Fasta(bes).totalsize
rl = readlen([reads])
expected_size = size + 2 * rl
nreads = expected_size * opts.depth / rl
nreads = int(math.ceil(nreads / 1000.)) * 1000
# Attract reads
samfile, logfile = align([bes, reads, "--reorder", "--mapped",
"--firstN={0}".format(opts.firstN)])
samfile, mapped, _ = get_samfile(reads, bes, bowtie=True, mapped=True)
logging.debug("Extract first {0} reads from `{1}`.".format(nreads, mapped))
pf = mapped.split(".")[0]
pf = pf.split("-")[0]
bespf = bes.split(".")[0]
reads = pf + ".expand.fastq"
first([str(nreads), mapped, "-o", reads])
# Perform mini-assembly
fastafile = reads.rsplit(".", 1)[0] + ".fasta"
qualfile = ""
if need_update(reads, fastafile):
fastafile, qualfile = fasta([reads])
contigs = op.join(pf, "454LargeContigs.fna")
if need_update(fastafile, contigs):
cmd = "runAssembly -o {0} -cpu 8 {1}".format(pf, fastafile)
sh(cmd)
assert op.exists(contigs)
# Annotate contigs
blastfile = blast([bes, contigs])
mapping = {}
for query, b in Blast(blastfile).iter_best_hit():
mapping[query] = b
f = Fasta(contigs, lazy=True)
annotatedfasta = ".".join((pf, bespf, "fasta"))
fw = open(annotatedfasta, "w")
keys = list(Fasta(bes).iterkeys_ordered()) # keep an ordered list
recs = []
for key, v in f.iteritems_ordered():
vid = v.id
if vid not in mapping:
continue
b = mapping[vid]
subject = b.subject
rec = v.reverse_complement() if b.orientation == '-' else v
rec.id = rid = "_".join((pf, vid, subject))
rec.description = ""
recs.append((keys.index(subject), rid, rec))
recs = [x[-1] for x in sorted(recs)]
SeqIO.write(recs, fw, "fasta")
fw.close()
FileShredder([samfile, logfile, mapped, reads, fastafile, qualfile, blastfile, pf])
logging.debug("Annotated seqs (n={0}) written to `{1}`.".\
format(len(recs), annotatedfasta))
return annotatedfasta
def contamination(args):
"""
%prog contamination Ecoli.fasta genome.fasta read.fastq
Check read contamination on a folder of paired reads. Use bowtie2 to compare
the reads against:
1. Ecoli.fsata - this will tell us the lower bound of contamination
2. genome.fasta - this will tell us the upper bound of contamination
"""
from jcvi.apps.bowtie import BowtieLogFile, align
p = OptionParser(contamination.__doc__)
p.set_firstN()
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
ecoli, genome, fq = args
firstN_opt = "--firstN={0}".format(opts.firstN)
samfile, logfile = align([ecoli, fq, firstN_opt])
bl = BowtieLogFile(logfile)
lowerbound = bl.rate
samfile, logfile = align([genome, fq, firstN_opt])
bl = BowtieLogFile(logfile)
upperbound = 100 - bl.rate
median = (lowerbound + upperbound) / 2
clogfile = fq + ".Ecoli"
fw = open(clogfile, "w")
lowerbound = "{0:.1f}".format(lowerbound)
upperbound = "{0:.1f}".format(upperbound)
median = "{0:.1f}".format(median)
print >> fw, "\t".join((fq, lowerbound, median, upperbound))
print >> sys.stderr, "{0}: Ecoli contamination rate {1}-{2}".\
format(fq, lowerbound, upperbound)
fw.close()
def alignextend(args):
"""
%prog alignextend ref.fasta read.1.fastq read.2.fastq
Wrapper around AMOS alignextend.
"""
choices = "prepare,align,filter,rmdup,genreads".split(",")
p = OptionParser(alignextend.__doc__)
p.add_option("--nosuffix", default=False, action="store_true",
help="Do not add /1/2 suffix to the read [default: %default]")
p.add_option("--rc", default=False, action="store_true",
help="Reverse complement the reads before alignment")
p.add_option("--len", default=100, type="int",
help="Extend to this length")
p.add_option("--stage", default="prepare", choices=choices,
help="Start from certain stage")
p.add_option("--dup", default=10, type="int",
help="Filter duplicates with coordinates within this distance")
p.add_option("--maxdiff", default=1, type="int",
help="Maximum number of differences")
p.set_home("amos")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
ref, r1, r2 = args
pf = op.basename(r1).split(".")[0]
cmd = op.join(opts.amos_home, "src/Experimental/alignextend.pl")
if not opts.nosuffix:
cmd += " -suffix"
bwa_idx = "{0}.ref.fa.sa".format(pf)
if not need_update(ref, bwa_idx):
cmd += " -noindex"
cmd += " -threads {0}".format(opts.cpus)
offset = guessoffset([r1])
if offset == 64:
cmd += " -I"
if opts.rc:
cmd += " -rc"
cmd += " -allow -len {0} -dup {1}".format(opts.len, opts.dup)
cmd += " -min {0} -max {1}".format(2 * opts.len, 20 * opts.len)
cmd += " -maxdiff {0}".format(opts.maxdiff)
cmd += " -stage {0}".format(opts.stage)
cmd += " ".join(("", pf, ref, r1, r2))
sh(cmd)
def count(args):
"""
%prog count *.gz
Count reads based on FASTQC results. FASTQC needs to be run on all the input
data given before running this command.
"""
from jcvi.utils.table import loadtable, write_csv
p = OptionParser(count.__doc__)
p.add_option("--dir",
help="Sub-directory where FASTQC was run [default: %default]")
p.add_option("--human", default=False, action="store_true",
help="Human friendly numbers [default: %default]")
p.set_table()
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
filenames = args
subdir = opts.dir
header = "Filename|Total Sequences|Sequence length|Total Bases".split("|")
rows = []
human = opts.human
for f in filenames:
folder = f.replace(".gz", "").rsplit(".", 1)[0] + "_fastqc"
if subdir:
folder = op.join(subdir, folder)
summaryfile = op.join(folder, "fastqc_data.txt")
fqcdata = FastQCdata(summaryfile, human=human)
row = [fqcdata[x] for x in header]
rows.append(row)
print >> sys.stderr, loadtable(header, rows)
write_csv(header, rows, sep=opts.sep,
filename=opts.outfile, align=opts.align)
def hetsmooth(args):
"""
%prog hetsmooth reads_1.fq reads_2.fq jf-23_0
Wrapper against het-smooth. Below is the command used in het-smooth manual.
$ het-smooth --kmer-len=23 --bottom-threshold=38 --top-threshold=220
--no-multibase-replacements --jellyfish-hash-file=23-mers.jf
reads_1.fq reads_2.fq
"""
p = OptionParser(hetsmooth.__doc__)
p.add_option("-K", default=23, type="int",
help="K-mer size [default: %default]")
p.add_option("-L", type="int",
help="Bottom threshold, first min [default: %default]")
p.add_option("-U", type="int",
help="Top threshold, second min [default: %default]")
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
reads1fq, reads2fq, jfdb = args
K = opts.K
L = opts.L
U = opts.U
assert L is not None and U is not None, "Please specify -L and -U"
cmd = "het-smooth --kmer-len={0}".format(K)
cmd += " --bottom-threshold={0} --top-threshold={1}".format(L, U)
cmd += " --no-multibase-replacements --jellyfish-hash-file={0}".format(jfdb)
cmd += " --no-reads-log"
cmd += " " + " ".join((reads1fq, reads2fq))
sh(cmd)
def trim(args):
"""
%prog trim fastqfiles
Trim reads using TRIMMOMATIC. If two fastqfiles are given, then it invokes
the paired reads mode. See manual:
<http://www.usadellab.org/cms/index.php?page=trimmomatic>
"""
tv = "0.32"
TrimJar = "trimmomatic-{0}.jar".format(tv)
phdchoices = ("33", "64")
p = OptionParser(trim.__doc__)
p.add_option("--path", default=op.join("~/bin", TrimJar),
help="Path to trimmomatic jar file [default: %default]")
p.add_option("--phred", default=None, choices=phdchoices,
help="Phred score offset [default: guess]")
p.add_option("--nofrags", default=False, action="store_true",
help="Discard frags file in PE mode [default: %default]")
p.add_option("--minqv", default=15, type="int",
help="Average qv after trimming [default: %default]")
p.add_option("--minlen", default=36, type="int",
help="Minimum length after trimming [default: %default]")
p.add_option("--adapteronly", default=False, action="store_true",
help="Only trim adapters with no qv trimming [default: %default]")
p.add_option("--nogz", default=False, action="store_true",
help="Do not write to gzipped files [default: %default]")
p.add_option("--log", default=None, dest="trimlog",
help="Specify a `trimlog` file [default: %default]")
p.set_cpus(cpus=4)
opts, args = p.parse_args(args)
if len(args) not in (1, 2):
sys.exit(not p.print_help())
path = op.expanduser(opts.path)
url = \
"http://www.usadellab.org/cms/uploads/supplementary/Trimmomatic/Trimmomatic-{0}.zip"\
.format(tv)
if not op.exists(path):
path = download(url)
TrimUnzipped = "Trimmomatic-" + tv
if not op.exists(TrimUnzipped):
sh("unzip " + path)
os.remove(path)
path = op.join(TrimUnzipped, TrimJar)
assert op.exists(path), \
"Couldn't find Trimmomatic jar file at `{0}`".\
format(path)
adaptersfile = "adapters.fasta"
Adapters = must_open(op.join(datadir, adaptersfile)).read()
write_file(adaptersfile, Adapters, skipcheck=True)
assert op.exists(adaptersfile), \
"Please place the illumina adapter sequence in `{0}`".\
format(adaptersfile)
if opts.phred is None:
offset = guessoffset([args[0]])
else:
offset = int(opts.phred)
phredflag = " -phred{0}".format(offset)
threadsflag = " -threads {0}".format(opts.cpus)
if opts.trimlog:
trimlog = " -trimlog {0}".format(opts.trimlog)
cmd = "java -Xmx4g -jar {0}".format(path)
frags = ".frags.fastq"
pairs = ".pairs.fastq"
if not opts.nogz:
frags += ".gz"
pairs += ".gz"
get_prefix = lambda x: op.basename(x).replace(".gz", "").rsplit(".", 1)[0]
if len(args) == 1:
cmd += " SE"
cmd += phredflag
cmd += threadsflag
if opts.trimlog:
cmd += trimlog
fastqfile, = args
prefix = get_prefix(fastqfile)
frags1 = prefix + frags
cmd += " {0}".format(" ".join((fastqfile, frags1)))
else:
cmd += " PE"
cmd += phredflag
cmd += threadsflag
if opts.trimlog:
cmd += trimlog
fastqfile1, fastqfile2 = args
prefix1 = get_prefix(fastqfile1)
prefix2 = get_prefix(fastqfile2)
pairs1 = prefix1 + pairs
pairs2 = prefix2 + pairs
frags1 = prefix1 + frags
frags2 = prefix2 + frags
if opts.nofrags:
frags1 = "/dev/null"
frags2 = "/dev/null"
cmd += " {0}".format(" ".join((fastqfile1, fastqfile2, \
pairs1, frags1, pairs2, frags2)))
cmd += " ILLUMINACLIP:{0}:2:30:10".format(adaptersfile)
if not opts.adapteronly:
cmd += " LEADING:3 TRAILING:3"
cmd += " SLIDINGWINDOW:4:{0}".format(opts.minqv)
cmd += " MINLEN:{0}".format(opts.minlen)
if offset != 33:
cmd += " TOPHRED33"
sh(cmd)
@depends
def run_RemoveDodgyReads(infile=None, outfile=None, workdir=None,
removeDuplicates=True, rc=False, nthreads=32):
# orig.fastb => filt.fastb
assert op.exists(infile)
orig = infile.rsplit(".", 1)[0]
filt = outfile.rsplit(".", 1)[0]
cmd = "RemoveDodgyReads IN_HEAD={0} OUT_HEAD={1}".format(orig, filt)
if not removeDuplicates:
cmd += " REMOVE_DUPLICATES=False"
if rc:
cmd += " RC=True"
cmd += nthreads
sh(cmd)
@depends
def run_FastbAndQualb2Fastq(infile=None, outfile=None, rc=False):
corr = op.basename(infile).rsplit(".", 1)[0]
cmd = "FastbQualbToFastq HEAD_IN={0} HEAD_OUT={0}".format(corr)
cmd += " PAIRED=False PHRED_OFFSET=33"
if rc:
cmd += " FLIP=True"
sh(cmd)
@depends
def run_pairs(infile=None, outfile=None):
from jcvi.assembly.allpaths import pairs
pairs(infile)
def correct(args):
"""
%prog correct *.fastq
Correct the fastqfile and generated corrected fastqfiles. This calls
assembly.allpaths.prepare() to generate input files for ALLPATHS-LG. The
naming convention for your fastqfiles are important, and are listed below.
By default, this will correct all PE reads, and remove duplicates of all MP
reads, and results will be placed in `frag_reads.corr.{pairs,frags}.fastq`
and `jump_reads.corr.{pairs,frags}.fastq`.
"""
from jcvi.assembly.allpaths import prepare
from jcvi.assembly.base import FastqNamings
p = OptionParser(correct.__doc__ + FastqNamings)
p.add_option("--dir", default="data",
help="Working directory [default: %default]")
p.add_option("--fragsdedup", default=False, action="store_true",
help="Don't deduplicate the fragment reads [default: %default]")
p.add_option("--ploidy", default="2", choices=("1", "2"),
help="Ploidy [default: %default]")
p.add_option("--haploidify", default=False, action="store_true",
help="Set HAPLOIDIFY=True [default: %default]")
p.set_cpus()
opts, args = p.parse_args(args)
if len(args) < 1:
sys.exit(not p.print_help())
fastq = args
tag, tagj = "frag_reads", "jump_reads"
ploidy = opts.ploidy
haploidify = opts.haploidify
assert (not haploidify) or (haploidify and ploidy == '2')
prepare(["Unknown"] + fastq + ["--norun"])
datadir = opts.dir
mkdir(datadir)
fullpath = op.join(os.getcwd(), datadir)
nthreads = " NUM_THREADS={0}".format(opts.cpus)
phred64 = (guessoffset([args[0]]) == 64)
orig = datadir + "/{0}_orig".format(tag)
origfastb = orig + ".fastb"
if need_update(fastq, origfastb):
cmd = "PrepareAllPathsInputs.pl DATA_DIR={0} HOSTS='{1}' PLOIDY={2}".\
format(fullpath, opts.cpus, ploidy)
if phred64:
cmd += " PHRED_64=True"
sh(cmd)
if op.exists(origfastb):
correct_frag(datadir, tag, origfastb, nthreads, dedup=opts.fragsdedup,
haploidify=haploidify)
origj = datadir + "/{0}_orig".format(tagj)
origjfastb = origj + ".fastb"
if op.exists(origjfastb):
correct_jump(datadir, tagj, origjfastb, nthreads)
def export_fastq(datadir, corrfastb, rc=False):
pf = op.basename(corrfastb.rsplit(".", 1)[0])
cwd = os.getcwd()
os.chdir(datadir)
corrfastq = pf + ".fastq"
run_FastbAndQualb2Fastq(infile=op.basename(corrfastb), \
outfile=corrfastq, rc=rc)
os.chdir(cwd)
pairsfile = pf + ".pairs"
fragsfastq = pf + ".corr.fastq"
run_pairs(infile=[op.join(datadir, pairsfile), op.join(datadir, corrfastq)],
outfile=fragsfastq)
def correct_frag(datadir, tag, origfastb, nthreads,
dedup=False, haploidify=False):
filt = datadir + "/{0}_filt".format(tag)
filtfastb = filt + ".fastb"
run_RemoveDodgyReads(infile=origfastb, outfile=filtfastb,
removeDuplicates=dedup, rc=False, nthreads=nthreads)
filtpairs = filt + ".pairs"
edit = datadir + "/{0}_edit".format(tag)
editpairs = edit + ".pairs"
if need_update(filtpairs, editpairs):
cmd = "ln -sf {0} {1}.pairs".format(op.basename(filtpairs), edit)
sh(cmd)
editfastb = edit + ".fastb"
if need_update(filtfastb, editfastb):
cmd = "FindErrors HEAD_IN={0} HEAD_OUT={1}".format(filt, edit)
cmd += " PLOIDY_FILE=data/ploidy"
cmd += nthreads
sh(cmd)
corr = datadir + "/{0}_corr".format(tag)
corrfastb = corr + ".fastb"
if need_update(editfastb, corrfastb):
cmd = "CleanCorrectedReads DELETE=True"
cmd += " HEAD_IN={0} HEAD_OUT={1}".format(edit, corr)
cmd += " PLOIDY_FILE={0}/ploidy".format(datadir)
if haploidify:
cmd += " HAPLOIDIFY=True"
cmd += nthreads
sh(cmd)
export_fastq(datadir, corrfastb)
def correct_jump(datadir, tagj, origjfastb, nthreads):
# Pipeline for jump reads does not involve correction
filt = datadir + "/{0}_filt".format(tagj)
filtfastb = filt + ".fastb"
run_RemoveDodgyReads(infile=origjfastb, outfile=filtfastb, \
removeDuplicates=True, rc=True, nthreads=nthreads)
export_fastq(datadir, filtfastb, rc=True)
if __name__ == '__main__':
main()
|
bsd-2-clause
| 9,061,038,318,958,718,000 | 31.773314 | 89 | 0.590246 | false | 3.306267 | false | false | false |
kmike/morphine
|
morphine/pos_model.py
|
1
|
1683
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from morphine import features
from morphine.feature_extractor import FeatureExtractor
from morphine.basetagger import PartialTagger
from pymorphy2.tagset import OpencorporaTag
class POSFeatureExtractor(FeatureExtractor):
IGNORE = {
'Arch', 'intg', 'real', '1per', '2per', '3per', 'GNdr', 'Ms-f',
'anim', 'inan',
'masc', 'femn', 'neut',
'Geox', 'Name',
} | OpencorporaTag.CASES | OpencorporaTag.NUMBERS | OpencorporaTag.MOODS \
| OpencorporaTag.INVOLVEMENT
def __init__(self):
super(POSFeatureExtractor, self).__init__(
token_features=[
features.bias,
features.token_lower,
features.suffix2,
features.suffix3,
features.Grammeme(threshold=0.01, add_unambig=False, ignore=self.IGNORE),
features.GrammemePair(threshold=0.01**2, add_unambig=False, ignore=self.IGNORE),
],
global_features=[
features.sentence_start,
features.sentence_end,
# features.the_only_verb,
features.Pattern([-1, 'token_lower']),
# features.Pattern([+1, 'token_lower']),
features.Pattern([-1, 'Grammeme']),
features.Pattern([+1, 'Grammeme']),
features.Pattern([-1, 'GrammemePair']),
features.Pattern([+1, 'GrammemePair']),
# features.Pattern([-1, 'GrammemePair'], [0, 'GrammemePair']),
],
)
class Tagger(PartialTagger):
def outval(self, tag):
return tag._POS
|
mit
| -2,966,662,456,207,913,000 | 32 | 96 | 0.562092 | false | 3.74 | false | false | false |
PhloxAR/phloxar
|
PhloxAR/compat.py
|
1
|
1393
|
# -*- coding: utf-8 -*-
"""
Compatibility module for Python 2.7 and > 3.3
"""
from __future__ import unicode_literals
import sys
import time
try:
import queue
except ImportError:
import Queue as queue
PY2 = sys.version < '3'
clock = None
if PY2:
unichr = unichr
long = long
fileopen = file
else:
unichr = chr
long = int
fileopen = open
if PY2:
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
else:
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values)
itervalues = lambda d: iter(d.items())
if PY2:
if sys.platform in ('win32', 'cygwin'):
clock = time.clock
else:
clock = time.time
else:
clock = time.perf_counter
if PY2:
from urllib2 import urlopen, build_opener
from urllib2 import HTTPBasicAuthHandler, HTTPPasswordMgrWithDefaultRealm
else:
from urllib import urlopen
from urllib.request import build_opener, HTTPBasicAuthHandler
from urllib.request import HTTPPasswordMgrWithDefaultRealm
if PY2:
from UserDict import UserDict
from cStringIO import StringIO
import SocketServer as socketserver
import SimpleHTTPServer
else:
from collections import UserDict, MutableMapping
import http.server as SimpleHTTPServer
import io.StringIO as StringIO
import socketserver
|
apache-2.0
| -4,027,996,947,203,010,000 | 20.765625 | 77 | 0.694903 | false | 3.946176 | false | false | false |
sdss/marvin
|
python/marvin/utils/datamodel/docudatamodel.py
|
1
|
14193
|
# !usr/bin/env python
# -*- coding: utf-8 -*-
#
# Licensed under a 3-clause BSD license.
#
# @Author: Brian Cherinka
# @Date: 2017-11-21 11:56:56
# @Last modified by: Brian Cherinka
# @Last Modified time: 2018-07-19 15:42:46
from __future__ import print_function, division, absolute_import
from docutils import nodes
from docutils.parsers import rst
from docutils.parsers.rst import directives
from docutils import statemachine
import traceback
def _indent(text, level=1):
''' Format Bintypes '''
prefix = ' ' * (4 * level)
def prefixed_lines():
for line in text.splitlines(True):
yield (prefix + line if line.strip() else line)
return ''.join(prefixed_lines())
def _format_datacubes(datacubes):
''' Format Datacubes table '''
yield '.. list-table:: Datacubes'
yield _indent(':widths: 15 50 50 10 10 20 20')
yield _indent(':header-rows: 1')
yield ''
yield _indent('* - Name')
yield _indent(' - Description')
yield _indent(' - Unit')
yield _indent(' - Ivar')
yield _indent(' - Mask')
yield _indent(' - FITS')
yield _indent(' - DB')
for datacube in datacubes:
dbcolumn = '{0}.{1}'.format(datacube.db_table, datacube.db_column())
yield _indent('* - {0}'.format(datacube.name))
yield _indent(' - {0}'.format(datacube.description))
yield _indent(' - {0}'.format(datacube.unit.to_string()))
yield _indent(' - {0}'.format(datacube.has_ivar()))
yield _indent(' - {0}'.format(datacube.has_mask()))
yield _indent(' - {0}'.format(datacube.fits_extension()))
yield _indent(' - {0}'.format(dbcolumn))
yield ''
def _format_rss(rss):
''' Format Rss table '''
yield '.. list-table:: Rss'
yield _indent(':widths: 15 50 50 10 10 20 20')
yield _indent(':header-rows: 1')
yield ''
yield _indent('* - Name')
yield _indent(' - Description')
yield _indent(' - Unit')
yield _indent(' - Ivar')
yield _indent(' - Mask')
yield _indent(' - FITS')
yield _indent(' - DB')
for rs in rss:
dbcolumn = '{0}.{1}'.format(rs.db_table, rs.db_column())
yield _indent('* - {0}'.format(rs.name))
yield _indent(' - {0}'.format(rs.description))
yield _indent(' - {0}'.format(rs.unit.to_string()))
yield _indent(' - {0}'.format(rs.has_ivar()))
yield _indent(' - {0}'.format(rs.has_mask()))
yield _indent(' - {0}'.format(rs.fits_extension()))
yield _indent(' - {0}'.format(dbcolumn))
yield ''
def _format_spectra(spectra):
''' Format Spectra '''
yield '.. topic:: Spectra'
yield '.. list-table:: Spectra'
yield _indent(':widths: 15 100 20 20 20')
yield _indent(':header-rows: 1')
yield ''
yield _indent('* - Name')
yield _indent(' - Description')
yield _indent(' - Unit')
yield _indent(' - FITS')
yield _indent(' - DB')
for spectrum in spectra:
dbcolumn = '{0}.{1}'.format(spectrum.db_table, spectrum.db_column())
yield _indent('* - {0}'.format(spectrum.name))
yield _indent(' - {0}'.format(spectrum.description))
yield _indent(' - {0}'.format(spectrum.unit.to_string()))
yield _indent(' - {0}'.format(spectrum.fits_extension()))
yield _indent(' - {0}'.format(dbcolumn))
yield ''
def _format_bintypes(bintypes):
''' Format Bintypes '''
yield '.. list-table:: Bintypes'
yield _indent(':widths: 15 100 10')
yield _indent(':header-rows: 1')
yield ''
yield _indent('* - Name')
yield _indent(' - Description')
yield _indent(' - Binned')
for bintype in bintypes:
yield _indent('* - {0}'.format(bintype.name))
yield _indent(' - {0}'.format(bintype.description))
yield _indent(' - {0}'.format(bintype.binned))
yield ''
def _format_templates(templates):
''' Format Templates '''
yield '.. list-table:: Templates'
yield _indent(':widths: 15 100')
yield _indent(':header-rows: 1')
yield ''
yield _indent('* - Name')
yield _indent(' - Description')
for template in templates:
yield _indent('* - {0}'.format(template.name))
yield _indent(' - {0}'.format(template.description))
yield ''
def _format_models(models):
''' Format Models '''
yield '.. list-table:: Models'
yield _indent(':widths: 15 100 50 20 15 15')
yield _indent(':header-rows: 1')
yield ''
yield _indent('* - Name')
yield _indent(' - Description')
yield _indent(' - Unit')
yield _indent(' - BinId')
yield _indent(' - Ivar')
yield _indent(' - Mask')
for model in models:
yield _indent('* - {0}'.format(model.name))
yield _indent(' - {0}'.format(model.description))
yield _indent(' - {0}'.format(model.unit))
yield _indent(' - {0}'.format(model.binid.name))
yield _indent(' - {0}'.format(model.has_ivar()))
yield _indent(' - {0}'.format(model.has_mask()))
yield ''
def _format_properties(properties):
''' Format Properties '''
exts = properties.extensions
yield '.. list-table:: Properties'
yield _indent(':widths: 15 100 100 15 15 50 100')
yield _indent(':header-rows: 1')
yield ''
yield _indent('* - Name')
yield _indent(' - Channels')
yield _indent(' - Description')
yield _indent(' - Ivar')
yield _indent(' - Mask')
yield _indent(' - FITS')
yield _indent(' - DB')
for prop in exts:
yield _indent('* - {0}'.format(prop.name))
if 'MultiChannelProperty' in str(prop.__class__):
channels = ', '.join([c.name for c in prop.channels])
dbcolumn = ', '.join(['{0}.{1}'.format(prop.db_table, c) for c in prop.db_columns()])
else:
channels = prop.channel
dbcolumn = '{0}.{1}'.format(prop.db_table, prop.db_column())
yield _indent(' - {0}'.format(channels))
yield _indent(' - {0}'.format(prop.description))
yield _indent(' - {0}'.format(prop.ivar))
yield _indent(' - {0}'.format(prop.mask))
yield _indent(' - {0}'.format(prop.fits_extension()))
yield _indent(' - {0}'.format(dbcolumn))
yield ''
def _format_parameters(parameters):
''' Format Query Parameters '''
yield '.. topic:: Query Parameters'
yield '.. list-table:: Query Parameters'
yield _indent(':widths: 25 50 10 20 20 20 20')
yield _indent(':header-rows: 1')
yield ''
yield _indent('* - Group')
yield _indent(' - Full Name')
yield _indent(' - Best')
yield _indent(' - Name')
yield _indent(' - DB Schema')
yield _indent(' - DB Table')
yield _indent(' - DB Column')
for param in parameters:
yield _indent('* - {0}'.format(param.group))
yield _indent(' - {0}'.format(param.full))
yield _indent(' - {0}'.format(param.best))
yield _indent(' - {0}'.format(param.name))
yield _indent(' - {0}'.format(param.db_schema))
yield _indent(' - {0}'.format(param.db_table))
yield _indent(' - {0}'.format(param.db_column))
yield ''
def _format_schema(schema):
''' Format a maskbit schema '''
schema_dict = schema.to_dict()
indices = schema_dict['bit'].keys()
yield '.. list-table:: Schema'
yield _indent(':widths: 5 50 50')
yield _indent(':header-rows: 1')
yield ''
yield _indent('* - Bit')
yield _indent(' - Label')
yield _indent(' - Description')
for index in indices:
yield _indent('* - {0}'.format(schema_dict['bit'][index]))
yield _indent(' - {0}'.format(schema_dict['label'][index].strip()))
yield _indent(' - {0}'.format(schema_dict['description'][index].strip()))
yield ''
def _format_bitmasks(maskbit, bittype):
''' Format Maskbits '''
for name, mask in maskbit.items():
if bittype.lower() in name.lower():
#yield '.. program:: {0}'.format(name)
yield '{0}: {1}'.format(name, mask.description)
yield ''
for line in _format_schema(mask.schema):
yield line
def _format_vacs(vacs, release):
''' Format a vac schema '''
yield '.. list-table:: VACs'
yield _indent(':widths: 20 10 50')
yield _indent(':header-rows: 1')
yield ''
yield _indent('* - Name')
yield _indent(' - Version')
yield _indent(' - Description')
for vac in vacs:
yield _indent('* - {0}'.format(vac.name))
yield _indent(' - {0}'.format(vac.version[release]))
yield _indent(' - {0}'.format(vac.description))
yield ''
def _format_command(name, command, **kwargs):
"""Format the output of `click.Command`."""
# docstring
# yield command.__doc__
# yield ''
# bintypes
if 'bintypes' in kwargs:
for line in _format_bintypes(command.bintypes):
yield line
# templates
if 'templates' in kwargs:
for line in _format_templates(command.templates):
yield line
# models
if 'models' in kwargs:
for line in _format_models(command.models):
yield line
# properties
if 'properties' in kwargs:
for line in _format_properties(command.properties):
yield line
# spectra
if 'spectra' in kwargs:
for line in _format_spectra(command.spectra):
yield line
# datacubes
if 'datacubes' in kwargs:
for line in _format_datacubes(command.datacubes):
yield line
# rss
if 'rss' in kwargs:
rssdm = kwargs.get('rssdm')
for line in _format_rss(rssdm.rss):
yield line
# query parameters
if 'parameters' in kwargs:
for line in _format_parameters(command.parameters):
yield line
# bitmasks
if 'bitmasks' in kwargs:
for line in _format_bitmasks(command.bitmasks, kwargs.get('bittype', None)):
yield line
# vacs
if 'vac' in kwargs:
vac_release = kwargs.get('vac', None)
if vac_release and vac_release in command:
vacdm = command[vac_release]
for line in _format_vacs(vacdm.vacs, vacdm.release):
yield line
class DataModelDirective(rst.Directive):
has_content = False
required_arguments = 1
option_spec = {
'prog': directives.unchanged_required,
'title': directives.unchanged,
'subtitle': directives.unchanged,
'description': directives.unchanged,
'bintypes': directives.flag,
'templates': directives.flag,
'models': directives.flag,
'properties': directives.flag,
'datacubes': directives.flag,
'rss': directives.flag,
'spectra': directives.flag,
'bitmasks': directives.flag,
'parameters': directives.flag,
'bittype': directives.unchanged,
'vac': directives.unchanged,
}
def _load_module(self, module_path):
"""Load the module."""
# __import__ will fail on unicode,
# so we ensure module path is a string here.
module_path = str(module_path)
try:
module_name, attr_name = module_path.split(':', 1)
except ValueError: # noqa
raise self.error('"{0}" is not of format "module:parser"'.format(module_path))
try:
mod = __import__(module_name, globals(), locals(), [attr_name])
except (Exception, SystemExit) as exc: # noqa
err_msg = 'Failed to import "{0}" from "{1}". '.format(attr_name, module_name)
if isinstance(exc, SystemExit):
err_msg += 'The module appeared to call sys.exit()'
else:
err_msg += 'The following exception was raised:\n{0}'.format(traceback.format_exc())
raise self.error(err_msg)
if not hasattr(mod, attr_name):
raise self.error('Module "{0}" has no attribute "{1}"'.format(module_name, attr_name))
return getattr(mod, attr_name)
def _generate_nodes(self, name, command, parent=None, options={}):
"""Generate the relevant Sphinx nodes.
Format a `click.Group` or `click.Command`.
:param name: Name of command, as used on the command line
:param command: Instance of `click.Group` or `click.Command`
:param parent: Instance of `click.Context`, or None
:param show_nested: Whether subcommands should be included in output
:returns: A list of nested docutil nodes
"""
# Title
source_name = name
content = [nodes.title(text=name)]
subtitle = self.options.get('subtitle', None)
description = self.options.get('description', None)
if subtitle:
content.append(nodes.subtitle(text=subtitle))
if description:
content.append(nodes.paragraph(text=description))
section = nodes.section(
'',
*content,
ids=[nodes.make_id(source_name)],
names=[nodes.fully_normalize_name(source_name)])
# Summary
result = statemachine.ViewList()
lines = _format_command(name, command, **options)
for line in lines:
result.append(line, source_name)
self.state.nested_parse(result, 0, section)
return [section]
def run(self):
self.env = self.state.document.settings.env
# load the designated class object from the module file
command = self._load_module(self.arguments[0])
# do something special to access the RSS datamodel
if 'rss' in self.options:
rssarg = self.arguments[0].split(':')[0] + ':datamodel_rss'
rssdms = self._load_module(rssarg)
rssdm = rssdms[command.release]
self.options['rssdm'] = rssdm
if 'prog' in self.options:
prog_name = self.options.get('prog')
else:
raise self.error(':prog: must be specified')
return self._generate_nodes(prog_name, command, None, options=self.options)
def setup(app):
app.add_directive('datamodel', DataModelDirective)
|
bsd-3-clause
| 6,809,721,066,906,021,000 | 30.262115 | 100 | 0.573804 | false | 3.738936 | false | false | false |
richardliaw/ray
|
rllib/agents/ddpg/ddpg_torch_model.py
|
1
|
7776
|
import numpy as np
from ray.rllib.models.torch.misc import SlimFC
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.utils.framework import try_import_torch, get_activation_fn
torch, nn = try_import_torch()
class DDPGTorchModel(TorchModelV2, nn.Module):
"""Extension of standard TorchModelV2 for DDPG.
Data flow:
obs -> forward() -> model_out
model_out -> get_policy_output() -> pi(s)
model_out, actions -> get_q_values() -> Q(s, a)
model_out, actions -> get_twin_q_values() -> Q_twin(s, a)
Note that this class by itself is not a valid model unless you
implement forward() in a subclass."""
def __init__(self,
obs_space,
action_space,
num_outputs,
model_config,
name,
actor_hidden_activation="relu",
actor_hiddens=(256, 256),
critic_hidden_activation="relu",
critic_hiddens=(256, 256),
twin_q=False,
add_layer_norm=False):
"""Initialize variables of this model.
Extra model kwargs:
actor_hidden_activation (str): activation for actor network
actor_hiddens (list): hidden layers sizes for actor network
critic_hidden_activation (str): activation for critic network
critic_hiddens (list): hidden layers sizes for critic network
twin_q (bool): build twin Q networks.
add_layer_norm (bool): Enable layer norm (for param noise).
Note that the core layers for forward() are not defined here, this
only defines the layers for the output heads. Those layers for
forward() should be defined in subclasses of DDPGTorchModel.
"""
nn.Module.__init__(self)
super(DDPGTorchModel, self).__init__(obs_space, action_space,
num_outputs, model_config, name)
self.bounded = np.logical_and(self.action_space.bounded_above,
self.action_space.bounded_below).any()
low_action = nn.Parameter(
torch.from_numpy(self.action_space.low).float())
low_action.requires_grad = False
self.register_parameter("low_action", low_action)
action_range = nn.Parameter(
torch.from_numpy(self.action_space.high -
self.action_space.low).float())
action_range.requires_grad = False
self.register_parameter("action_range", action_range)
self.action_dim = np.product(self.action_space.shape)
# Build the policy network.
self.policy_model = nn.Sequential()
ins = num_outputs
self.obs_ins = ins
activation = get_activation_fn(
actor_hidden_activation, framework="torch")
for i, n in enumerate(actor_hiddens):
self.policy_model.add_module(
"action_{}".format(i),
SlimFC(
ins,
n,
initializer=torch.nn.init.xavier_uniform_,
activation_fn=activation))
# Add LayerNorm after each Dense.
if add_layer_norm:
self.policy_model.add_module("LayerNorm_A_{}".format(i),
nn.LayerNorm(n))
ins = n
self.policy_model.add_module(
"action_out",
SlimFC(
ins,
self.action_dim,
initializer=torch.nn.init.xavier_uniform_,
activation_fn=None))
# Use sigmoid to scale to [0,1], but also double magnitude of input to
# emulate behaviour of tanh activation used in DDPG and TD3 papers.
# After sigmoid squashing, re-scale to env action space bounds.
class _Lambda(nn.Module):
def forward(self_, x):
sigmoid_out = nn.Sigmoid()(2.0 * x)
squashed = self.action_range * sigmoid_out + self.low_action
return squashed
# Only squash if we have bounded actions.
if self.bounded:
self.policy_model.add_module("action_out_squashed", _Lambda())
# Build the Q-net(s), including target Q-net(s).
def build_q_net(name_):
activation = get_activation_fn(
critic_hidden_activation, framework="torch")
# For continuous actions: Feed obs and actions (concatenated)
# through the NN. For discrete actions, only obs.
q_net = nn.Sequential()
ins = self.obs_ins + self.action_dim
for i, n in enumerate(critic_hiddens):
q_net.add_module(
"{}_hidden_{}".format(name_, i),
SlimFC(
ins,
n,
initializer=torch.nn.init.xavier_uniform_,
activation_fn=activation))
ins = n
q_net.add_module(
"{}_out".format(name_),
SlimFC(
ins,
1,
initializer=torch.nn.init.xavier_uniform_,
activation_fn=None))
return q_net
self.q_model = build_q_net("q")
if twin_q:
self.twin_q_model = build_q_net("twin_q")
else:
self.twin_q_model = None
def get_q_values(self, model_out, actions):
"""Return the Q estimates for the most recent forward pass.
This implements Q(s, a).
Args:
model_out (Tensor): obs embeddings from the model layers, of shape
[BATCH_SIZE, num_outputs].
actions (Tensor): Actions to return the Q-values for.
Shape: [BATCH_SIZE, action_dim].
Returns:
tensor of shape [BATCH_SIZE].
"""
return self.q_model(torch.cat([model_out, actions], -1))
def get_twin_q_values(self, model_out, actions):
"""Same as get_q_values but using the twin Q net.
This implements the twin Q(s, a).
Args:
model_out (Tensor): obs embeddings from the model layers, of shape
[BATCH_SIZE, num_outputs].
actions (Optional[Tensor]): Actions to return the Q-values for.
Shape: [BATCH_SIZE, action_dim].
Returns:
tensor of shape [BATCH_SIZE].
"""
return self.twin_q_model(torch.cat([model_out, actions], -1))
def get_policy_output(self, model_out):
"""Return the action output for the most recent forward pass.
This outputs the support for pi(s). For continuous action spaces, this
is the action directly. For discrete, is is the mean / std dev.
Args:
model_out (Tensor): obs embeddings from the model layers, of shape
[BATCH_SIZE, num_outputs].
Returns:
tensor of shape [BATCH_SIZE, action_out_size]
"""
return self.policy_model(model_out)
def policy_variables(self, as_dict=False):
"""Return the list of variables for the policy net."""
if as_dict:
return self.policy_model.state_dict()
return list(self.policy_model.parameters())
def q_variables(self, as_dict=False):
"""Return the list of variables for Q / twin Q nets."""
if as_dict:
return {
**self.q_model.state_dict(),
**(self.twin_q_model.state_dict() if self.twin_q_model else {})
}
return list(self.q_model.parameters()) + \
(list(self.twin_q_model.parameters()) if self.twin_q_model else [])
|
apache-2.0
| 8,141,705,014,407,034,000 | 37.88 | 79 | 0.547968 | false | 4.1472 | false | false | false |
FedoraScientific/salome-yacs
|
src/pyqt/gui/CItems.py
|
1
|
17573
|
# Copyright (C) 2006-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : [email protected]
#
import sys,traceback
from qt import *
from qtcanvas import *
import pilot
import pypilot
import Item
import math
dispatcher=pilot.Dispatcher.getDispatcher()
class TextItem(QCanvasText):
"""A text in a composite object"""
def __init__(self,obj,canvas):
QCanvasText.__init__(self,canvas)
self.obj=obj
self.item=None
def getObj(self):
"""The composite object which contains the text"""
return self.obj
def moveBy(self,dx,dy):
"""Request the text move by x,y"""
if self.obj:
#the text is a part of a composite object
self.obj.moveBy(dx,dy)
else:
#the text is independant
self.myMove(dx,dy)
def myMove(self,dx,dy):
"""The real move"""
QCanvasText.moveBy(self,dx,dy)
def selected(self):
"""The canvas item has been selected"""
if self.obj:
self.obj.selected()
class PointItem(QCanvasEllipse):
def __init__(self,obj,x,y,canvas):
"""Create a point contained in a composite line (obj)"""
QCanvasEllipse.__init__(self,6,6,canvas)
self.obj=obj
self.item=None
self.inline=None
self.outline=None
self.setPen(QPen(Qt.black))
self.setBrush(QBrush(Qt.red))
self.setX(x)
self.setY(y)
self.setVisible(True)
def setInline(self,inline):
self.inline=inline
if inline.z() >= self.z():
self.setZ(inline.z()+1)
def setOutline(self,outline):
self.outline=outline
if outline.z() >= self.z():
self.setZ(outline.z()+1)
def moveBy(self,dx,dy):
"""Request the point move by x,y"""
self.myMove(dx,dy)
def myMove(self,dx,dy):
"""The real move"""
QCanvasEllipse.moveBy(self,dx,dy)
if self.outline:
self.outline.setFromPoint( int(self.x()), int(self.y()) )
if self.inline:
self.inline.setToPoint( int(self.x()), int(self.y()) )
def getObj(self):
"""The object which contains the point"""
return self.obj
def handleDoubleClick(self,pos):
self.obj.deletePoint(self,pos)
#def __del__(self):
# print "PointItem.__del__"
def clear(self):
"""To remove from canvas"""
self.setCanvas(None)
self.obj=None
self.inline=None
self.outline=None
def selected(self):
"""The canvas item has been selected"""
class LineItem(QCanvasLine):
"""A line between 2 points"""
def __init__(self,obj,fromPoint, toPoint,canvas):
QCanvasLine.__init__(self,canvas)
self.obj=obj
self.item=None
self.fromPoint=fromPoint
self.toPoint=toPoint
self.setPen(QPen(Qt.black))
self.setBrush(QBrush(Qt.red))
self.setPoints(int(fromPoint.x()),int(fromPoint.y()), int(toPoint.x()), int(toPoint.y()))
self.setZ(min(fromPoint.z(),toPoint.z())-1)
self.setVisible(True)
self.arrow = QCanvasPolygon(self.canvas())
self.arrow.setBrush(QBrush(Qt.black))
self.setArrow()
self.arrow.show()
def setFromPoint(self,x,y):
self.setPoints(x,y,self.endPoint().x(),self.endPoint().y())
self.setArrow()
def setToPoint(self,x,y):
self.setPoints(self.startPoint().x(), self.startPoint().y(),x,y)
self.setArrow()
def moveBy(self,dx,dy):
"""Disable line move"""
pass
def setArrow(self):
x1,y1=self.startPoint().x(),self.startPoint().y()
x2,y2=self.endPoint().x(),self.endPoint().y()
d=math.hypot(x2-x1,y2-y1)
sina=(y2-y1)/d
cosa=(x2-x1)/d
x=(x1+x2)/2.
y=(y1+y2)/2.
l,e=6,3
pa=QPointArray(3)
pa.setPoint(0, QPoint(x+l*cosa,y+l*sina))
pa.setPoint(1, QPoint(x-e*sina,y+e*cosa))
pa.setPoint(2, QPoint(x+e*sina,y-e*cosa))
self.arrow.setPoints(pa)
def getObj(self):
"""The object which contains the line"""
return self.obj
def handleDoubleClick(self,pos):
#split the line
self.obj.splitline(self,pos)
#def __del__(self):
# print "LineItem.__del__"
def clear(self):
"""To remove from canvas"""
self.setCanvas(None)
self.fromPoint=None
self.toPoint=None
self.obj=None
self.arrow.setCanvas(None)
self.arrow=None
def selected(self):
"""The canvas item has been selected"""
class LinkItem:
def __init__(self,fromPort, toPort,canvas):
self.fromPort=fromPort
self.toPort=toPort
self.canvas=canvas
self.item=None
fromPort.addOutLink(self)
toPort.addInLink(self)
self.lines=[]
self.points=[]
self.lines.append(LineItem(self,fromPort, toPort,canvas))
def deletePoint(self,point,pos):
"""Delete intermediate point"""
if point not in self.points:
return
self.points.remove(point)
inline=point.inline
outline=point.outline
inline.toPoint=outline.toPoint
inline.setToPoint(outline.toPoint.x(),outline.toPoint.y())
self.lines.remove(outline)
if inline.toPoint in self.points:
inline.toPoint.setInline(inline)
#remove from canvas
point.clear()
outline.clear()
def clearPoints(self):
#make a copy as deletePoint modify self.points
for point in self.points[:]:
self.deletePoint(point,0)
def splitline(self,line,pos):
self.splitLine(line,pos.x(),pos.y())
def splitLine(self,line,x,y):
"""Split line at position x,y"""
#The new point
point=PointItem(self,x,y,self.canvas)
self.points.append(point)
i=self.lines.index(line)
newline=LineItem(self,point,line.toPoint,self.canvas)
if line.toPoint in self.points:
#line not connected to port : reconnect newline
line.toPoint.setInline(newline)
self.lines.insert(i+1,newline)
line.setToPoint(x,y)
line.toPoint=point
point.setInline(line)
point.setOutline(newline)
def setFromPoint(self,x,y):
first=self.lines[0]
first.setFromPoint(x,y)
def setToPoint(self,x,y):
last=self.lines[-1]
last.setToPoint(x,y)
def moveBy(self,dx,dy):
pass
def popup(self,canvasView):
menu=QPopupMenu()
caption = QLabel( "<font color=darkblue><u><b>Link Menu</b></u></font>",menu )
caption.setAlignment( Qt.AlignCenter )
menu.insertItem( caption )
menu.insertItem("Delete", self.delete)
return menu
def delete(self):
print "delete link"
def tooltip(self,view,pos):
r = QRect(pos.x(), pos.y(), pos.x()+10, pos.y()+10)
s = QString( "link: "+self.fromPort.port.getNode().getName() +":"+self.fromPort.port.getName()+"->"+self.toPort.port.getNode().getName()+":"+self.toPort.port.getName() )
view.tip( r, s )
def selected(self):
"""The canvas item has been selected"""
class ControlLinkItem(LinkItem):
def tooltip(self,view,pos):
r = QRect(pos.x(), pos.y(), pos.x()+10, pos.y()+10)
s = QString( "link: "+self.fromPort.port.getNode().getName()+"->"+self.toPort.port.getNode().getName())
view.tip( r, s )
#QToolTip(view).tip( r, s )
class ControlItem(QCanvasRectangle):
def __init__(self,node,port,canvas):
QCanvasRectangle.__init__(self,canvas)
self.setSize(6,6)
self.port=port
self.setPen(QPen(Qt.black))
self.setBrush(QBrush(Qt.red))
self.setZ(node.z()+1)
self.node=node
self.item=Item.adapt(self.port)
def moveBy(self,dx,dy):
self.node.moveBy(dx,dy)
def myMove(self,dx,dy):
QCanvasRectangle.moveBy(self,dx,dy)
def getObj(self):
return self
def popup(self,canvasView):
self.context=canvasView
menu=QPopupMenu()
caption = QLabel( "<font color=darkblue><u><b>Port Menu</b></u></font>",menu )
caption.setAlignment( Qt.AlignCenter )
menu.insertItem( caption )
menu.insertItem("Connect", self.connect)
return menu
def connect(self):
print "ControlItem.connect",self.context
print self.port
item=Item.adapt(self.port)
print item
item.connect()
self.context.connecting(item)
#self.context.connecting(self)
def link(self,obj):
#Protocol to link 2 objects (ports, at first)
#First, notify the canvas View (or any view that can select) we are connecting (see method connect above)
#Second (and last) make the link in the link method of object that was declared connecting
print "link:",obj
def tooltip(self,view,pos):
r = QRect(pos.x(), pos.y(), self.width(), self.height())
s = QString( "gate:")
view.tip( r, s )
def selected(self):
"""The canvas item has been selected"""
#print "control port selected"
item=Item.adapt(self.port)
item.selected()
class InControlItem(ControlItem):
def __init__(self,node,port,canvas):
ControlItem.__init__(self,node,port,canvas)
self.__inList=[]
def myMove(self,dx,dy):
ControlItem.myMove(self,dx,dy)
for link in self.__inList:
link.setToPoint( int(self.x()), int(self.y()) )
def link(self,obj):
#Here we create the link between self and obj.
#self has been declared connecting in connect method
print "link:",obj
if isinstance(obj,OutControlItem):
#Connection possible
l=LinkItem(obj,self,self.canvas())
def addInLink(self,link):
self.__inList.append(link)
def tooltip(self,view,pos):
r = QRect(pos.x(), pos.y(), self.width(), self.height())
s = QString( "ingate:")
view.tip( r, s )
#QToolTip(view).tip( r, s )
class OutControlItem(ControlItem):
def __init__(self,node,port,canvas):
ControlItem.__init__(self,node,port,canvas)
self.__outList=[]
def myMove(self,dx,dy):
ControlItem.myMove(self,dx,dy)
for link in self.__outList:
link.setFromPoint( int(self.x()), int(self.y()) )
def link(self,obj):
#Here we create the link between self and obj.
#self has been declared connecting in connect method
print "link:",obj
if isinstance(obj,InControlItem):
#Connection possible
l=LinkItem(self,obj,self.canvas())
def addOutLink(self,link):
self.__outList.append(link)
def tooltip(self,view,pos):
r = QRect(pos.x(), pos.y(), self.width(), self.height())
s = QString( "outgate:")
view.tip( r, s )
#QToolTip(view).tip( r, s )
def links(self):
return self.__outList
class PortItem(QCanvasEllipse):
def __init__(self,node,port,canvas):
QCanvasEllipse.__init__(self,6,6,canvas)
self.port=port
self.item=None
self.item=Item.adapt(self.port)
self.setPen(QPen(Qt.black))
self.setBrush(QBrush(Qt.red))
self.setZ(node.z()+1)
self.node=node
def moveBy(self,dx,dy):
self.node.moveBy(dx,dy)
def myMove(self,dx,dy):
QCanvasEllipse.moveBy(self,dx,dy)
def getObj(self):
return self
def popup(self,canvasView):
self.context=canvasView
menu=QPopupMenu()
caption = QLabel( "<font color=darkblue><u><b>Port Menu</b></u></font>",menu )
caption.setAlignment( Qt.AlignCenter )
menu.insertItem( caption )
menu.insertItem("Connect", self.connect)
return menu
def connect(self):
print "PortItem.connect",self.context
print self.port
item=Item.adapt(self.port)
print item
self.context.connecting(item)
#self.context.connecting(self)
def link(self,obj):
print "PortItem.link:",obj
def tooltip(self,view,pos):
r = QRect(pos.x(),pos.y(),self.width(), self.height())
t=self.port.edGetType()
s = QString( "port: " + self.port.getName() + ":" + t.name())
view.tip( r, s )
def selected(self):
"""The canvas item has been selected"""
#print "port selected"
item=Item.adapt(self.port)
item.selected()
class InPortItem(PortItem):
def __init__(self,node,port,canvas):
PortItem.__init__(self,node,port,canvas)
self.__inList=[]
def myMove(self,dx,dy):
PortItem.myMove(self,dx,dy)
for link in self.__inList:
link.setToPoint( int(self.x()), int(self.y()) )
def link(self,obj):
#Here we create the link between self and obj.
#self has been declared connecting in connect method
print "link:",obj
if isinstance(obj,OutPortItem):
#Connection possible
l=LinkItem(obj,self,self.canvas())
def addInLink(self,link):
self.__inList.append(link)
class OutPortItem(PortItem):
def __init__(self,node,port,canvas):
PortItem.__init__(self,node,port,canvas)
self.__outList=[]
def myMove(self,dx,dy):
PortItem.myMove(self,dx,dy)
for link in self.__outList:
link.setFromPoint( int(self.x()), int(self.y()) )
def link(self,obj):
#Here we create the link between self and obj.
#self has been declared connecting in connect method
print "link:",obj
if isinstance(obj,InPortItem):
#Connection possible
l=LinkItem(self,obj,self.canvas())
def addOutLink(self,link):
self.__outList.append(link)
def links(self):
return self.__outList
class InStreamItem(InPortItem):
def __init__(self,node,port,canvas):
InPortItem.__init__(self,node,port,canvas)
self.setBrush(QBrush(Qt.green))
class OutStreamItem(OutPortItem):
def __init__(self,node,port,canvas):
OutPortItem.__init__(self,node,port,canvas)
self.setBrush(QBrush(Qt.green))
class Cell(QCanvasRectangle,pypilot.PyObserver):
colors={
"pink":Qt.cyan,
"green":Qt.green,
"magenta":Qt.magenta,
"purple":Qt.darkMagenta,
"blue":Qt.blue,
"red":Qt.red,
"orange":Qt.yellow,
"grey":Qt.gray,
"white":Qt.white,
}
def __init__(self,node,canvas):
QCanvasRectangle.__init__(self,canvas)
pypilot.PyObserver.__init__(self)
self.inports=[]
self.outports=[]
self.setSize(50,50)
#node is an instance of YACS::ENGINE::Node
self.node=node
self.item=Item.adapt(self.node)
dispatcher.addObserver(self,node,"status")
self.label=TextItem(self,canvas)
self.label.setText(self.node.getName())
self.label.setFont(QFont("Helvetica",8))
rect=self.label.boundingRect()
self.label.setZ(self.z()+1)
self.label.myMove(self.x()+self.width()/2-rect.width()/2,self.y()+self.height()/2-rect.height()/2)
color= self.colors.get(node.getColorState(node.getEffectiveState()),Qt.white)
self.setBrush(QBrush(color))
dy=6
y=0
for inport in self.node.getSetOfInputPort():
p=InPortItem(self,inport,canvas)
y=y+dy
p.myMove(0,y)
self.inports.append(p)
for instream in self.node.getSetOfInputDataStreamPort():
p=InStreamItem(self,instream,canvas)
y=y+dy
p.myMove(0,y)
self.inports.append(p)
ymax=y
dy=6
y=0
for outport in self.node.getSetOfOutputPort():
p=OutPortItem(self,outport,canvas)
y=y+dy
p.myMove(50,y)
self.outports.append(p)
for outstream in self.node.getSetOfOutputDataStreamPort():
p=OutStreamItem(self,outstream,canvas)
y=y+dy
p.myMove(50,y)
self.outports.append(p)
ymax=max(y,ymax)
#Control ports
y=ymax+dy
if y < 44:y=44
p=InControlItem(self,self.node.getInGate(),canvas)
p.myMove(0,y)
self.inports.append(p)
self.ingate=p
p=OutControlItem(self,self.node.getOutGate(),canvas)
p.myMove(44,y)
self.outports.append(p)
self.outgate=p
y=y+dy
self.setSize(50,y)
events={
"status":QEvent.User+1,
}
def pynotify(self,object,event):
#print "pynotify",event,object
try:
evType=self.events[event]
ev=QCustomEvent(evType)
ev.setData(self)
ev.yacsEvent=event
QApplication.postEvent(self.canvas(), ev)
#request immediate processing (deadlock risk ???)
#QApplication.sendPostedEvents(self.canvas(), evType)
#print "pynotify end"
except:
#traceback.print_exc()
raise
def customEvent(self,event):
if event.yacsEvent=="status":
object=self.node
state=object.getEffectiveState()
color=object.getColorState(state)
color= self.colors.get(color,Qt.white)
self.setBrush(QBrush(color))
else:
print "Unknown custom event type:", event.type()
def moveBy(self,dx,dy):
QCanvasRectangle.moveBy(self,dx,dy)
self.label.myMove(dx,dy)
for p in self.inports:
p.myMove(dx,dy)
for p in self.outports:
p.myMove(dx,dy)
def show(self):
QCanvasRectangle.show(self)
self.label.show()
for p in self.inports:
p.show()
for p in self.outports:
p.show()
def getObj(self):
return self
def popup(self,canvasView):
menu=QPopupMenu()
caption = QLabel( "<font color=darkblue><u><b>Node Menu</b></u></font>",menu )
caption.setAlignment( Qt.AlignCenter )
menu.insertItem( caption )
menu.insertItem("Browse", self.browse)
return menu
def tooltip(self,view,pos):
r = QRect(pos.x(), pos.y(), self.width(), self.height())
s = QString( "node: " + self.node.getName())
view.tip( r, s )
#QToolTip(view).tip( r, s )
def browse(self):
print "browse"
def selected(self):
"""The canvas item has been selected"""
#print "node selected"
item=Item.adapt(self.node)
item.selected()
|
gpl-2.0
| -5,207,753,178,458,812,000 | 26.761453 | 174 | 0.653275 | false | 3.156637 | false | false | false |
stscieisenhamer/glue
|
glue/app/qt/splash_screen.py
|
1
|
1493
|
import os
from qtpy import QtWidgets, QtGui
from qtpy.QtCore import Qt, QRect
__all__ = ['QtSplashScreen']
class QtSplashScreen(QtWidgets.QWidget):
def __init__(self, *args, **kwargs):
super(QtSplashScreen, self).__init__(*args, **kwargs)
self.resize(627, 310)
self.setStyleSheet("background-color:white;")
self.setWindowFlags(Qt.Window | Qt.FramelessWindowHint | Qt.WindowStaysOnTopHint)
self.center()
self.progress = QtWidgets.QProgressBar()
self.layout = QtWidgets.QVBoxLayout(self)
self.layout.addStretch()
self.layout.addWidget(self.progress)
pth = os.path.join(os.path.dirname(__file__), '..', '..', 'logo.png')
self.image = QtGui.QPixmap(pth)
def set_progress(self, value):
self.progress.setValue(value)
QtWidgets.qApp.processEvents() # update progress bar
def paintEvent(self, event):
painter = QtGui.QPainter(self)
painter.drawPixmap(QRect(20, 20, 587, 229), self.image)
def center(self):
# Adapted from StackOverflow
# https://stackoverflow.com/questions/20243637/pyqt4-center-window-on-active-screen
frameGm = self.frameGeometry()
screen = QtWidgets.QApplication.desktop().screenNumber(QtWidgets.QApplication.desktop().cursor().pos())
centerPoint = QtWidgets.QApplication.desktop().screenGeometry(screen).center()
frameGm.moveCenter(centerPoint)
self.move(frameGm.topLeft())
|
bsd-3-clause
| 1,320,943,867,231,352,300 | 32.177778 | 111 | 0.663094 | false | 3.760705 | false | false | false |
google-research/google-research
|
m_theory/dim4/so8_supergravity_extrema/code/symmetries.py
|
1
|
34942
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analyzes residual symmetries of solutions.
As all critical points with a rank-2 simple Lie group symmetry have been
known for many years, we can restrict ourselves to a residual Lie symmetry of
Spin(3)^A x U(1)^B. This considerably simplifies the analysis.
"""
import cmath
import collections
import glob
import itertools
import math
import numpy
import os
import pprint
# CAUTION: scipy.linalg.eigh() will produce an orthonormal basis, while
# scipy.linalg.eig(), when used on a hermitean matrix, typically will not
# orthonormalize eigenvectors in degenerate eigenspaces.
# This behavior is not documented properly, but "obvious" when considering
# the underlying algorithm.
import scipy.linalg
from dim4.so8_supergravity_extrema.code import algebra
CanonicalizedSymmetry = collections.namedtuple(
'CanonicalizedSymmetry',
['u1s', # Sequence of U(1) generators, each as a 28-vector acting on [ik].
'semisimple_part', # [28, d]-array, semisimple part of the algebra.
'spin3_cartan_gens' # Cartan generators, one per spin(3) subalgebra.
])
# A `Spin8Action` tuple consists of an einsum reduction-string,
# typically of the form 'aij,aN->jiN', as well as the 1st tensor-argument
# to the corresponding contraction.
Spin8Action = collections.namedtuple(
'Spin8Action', ['einsum', 'tensor'])
class BranchingFormatter(object):
"""Base class for branching-formatters."""
def format(self, num_spin3s, branching):
return self.sum_join(self.format_irreps(num_spin3s, b) for b in branching)
def format_branching_tag(self, tag):
"""Formats tag (8, 'v') -> '8v' etc."""
tag_dim, tag_subscript = tag
return '%s%s' % (tag_dim, tag_subscript)
def sum_join(self, formatted):
return ' + '.join(formatted)
def format_multiplicity(self, multiplicity, formatted_obj):
"""Adds a multiplicity prefix to a formatted object."""
if multiplicity == 1:
return formatted_obj
return '%dx%s' % (multiplicity, formatted_obj)
def format_irreps(self, num_spin3s, irreps_part):
"""Formats a group of identical irreducible representations."""
charges, mult = irreps_part
return self.format_multiplicity(mult,
self.format_irrep(num_spin3s, charges))
def format_irrep(self, num_spin3s, charges):
"""Formats a single irreducible representation."""
if set(charges[:num_spin3s]) == {0}:
spin3_part = ''
else:
spin3_part = 'x'.join('%s' % int(round(2 * c + 1))
for c in charges[:num_spin3s])
assert all(c == int(c) for c in charges[num_spin3s:])
u1_part = ', '.join(str(int(c)) for c in charges[num_spin3s:])
if spin3_part:
return ('[%s]{%s}' % (spin3_part, u1_part) if u1_part
else '[%s]' % spin3_part)
else:
return '{%s}' % u1_part
class LaTeXBranchingFormatter(BranchingFormatter):
"""BranchingFormatter that generates LaTeX code."""
def format_branching_tag(self, tag):
"""Formats tag (8, 'v') -> '8_{v}' etc."""
tag_dim, tag_subscript = tag
return '%s_{%s}' % (tag_dim, tag_subscript)
def format_multiplicity(self, multiplicity, formatted_obj):
if multiplicity == 1:
return formatted_obj
return r'%d\times%s' % (multiplicity, formatted_obj)
def _format_charge(self, c, sub_super):
assert c == int(c)
if c == 0:
return ''
return r'%s{\scriptscriptstyle %s}' % (sub_super, '-+'[c > 0] * abs(int(c)))
def format_irrep(self, num_spin3s, charges):
# We use style such as 33^{+++}_{--},
# i.e. 1st U(1) gets superscript charges,
# 2nd U(1) gets subscript charges.
assert all(c == int(c) for c in charges[num_spin3s:])
if set(charges[:num_spin3s]) <= {0}:
spin3_part = r'\mathbf{1}' # No Spin3s, or only singlet.
elif num_spin3s == 1:
spin3_part = r'\mathbf{%s}' % int(round(2 * charges[0] + 1))
else:
spin3_part = '(%s)' % (
','.join(r'\mathbf{%d}' % int(round(2 * c + 1))
for c in charges[:num_spin3s]))
num_u1s = len(charges) - num_spin3s
u1a_part = u1b_part = ''
if num_u1s >= 1:
u1a_part = self._format_charge(charges[num_spin3s], '^')
if num_u1s == 2:
u1b_part = self._format_charge(charges[num_spin3s + 1], '_')
return spin3_part + u1a_part + u1b_part
TEXT_FORMATTER = BranchingFormatter()
LATEX_FORMATTER = LaTeXBranchingFormatter()
# The Spin(8) structure constants.
_spin8_fabc = 2 * numpy.einsum('cik,abik->abc',
algebra.su8.m_28_8_8,
# We do not need to antisymmetrize [ik] here,
# as the above factor already does this.
numpy.einsum('aij,bjk->abik',
algebra.su8.m_28_8_8,
algebra.su8.m_28_8_8))
_spin8_action56 = numpy.einsum('aik,ABik->aAB',
algebra.su8.m_28_8_8,
algebra.su8.m_action_56_56_8_8)
# Branching-rules task specification, as used for the `decomposition_tasks`
# argument to spin3u1_decompose().
# One may generally want to pass an extended arg that adds tasks which also
# decompose e.g. degenerate mass-eigenstates w.r.t. symmetry.
# These are also used to find scaling for u(1) generators that makes all
# 8v, 8s, 8c charges integral.
SPIN8_ACTION_8V = Spin8Action(einsum='aij,aN->jiN',
tensor=algebra.su8.m_28_8_8)
SPIN8_ACTION_8V = Spin8Action(einsum='aij,aN->jiN',
tensor=algebra.su8.m_28_8_8)
SPIN8_ACTION_8S = Spin8Action(
einsum='aAB,aN->BAN',
tensor=numpy.einsum('aij,ijAB->aAB',
0.25 * algebra.su8.m_28_8_8,
algebra.spin8.gamma_vvss))
SPIN8_ACTION_8C = Spin8Action(
einsum='aAB,aN->BAN',
tensor=numpy.einsum('aij,ijAB->aAB',
0.25 * algebra.su8.m_28_8_8,
algebra.spin8.gamma_vvcc))
SPIN8_ACTION_AD = Spin8Action(einsum='aAB,aN->BAN', tensor=_spin8_fabc * 0.5)
SPIN8_ACTION_FERMIONS = Spin8Action(einsum='aAB,aN->BAN',
tensor=_spin8_action56)
SPIN8_ACTION_SCALARS = Spin8Action(
einsum='aAB,aN->BAN',
tensor=0.5 * algebra.e7.spin8_action_on_v70o)
SPIN8_BRANCHINGS_VSC = (
(SPIN8_ACTION_8V,
[((8, 'v'), numpy.eye(8))]),
(SPIN8_ACTION_8S,
[((8, 's'), numpy.eye(8))]),
(SPIN8_ACTION_8C,
[((8, 'c'), numpy.eye(8))]))
# Extended branching-rules task speficication, adds 28->... branching.
SPIN8_BRANCHINGS = (
SPIN8_BRANCHINGS_VSC +
((SPIN8_ACTION_AD, [((28, ''), numpy.eye(28))]),))
def round2(x):
"""Rounds number to 2 digits, canonicalizing -0.0 to 0.0."""
return numpy.round(x, 2) or 0.0
def allclose2(p, q):
"""Determines if `p` and `q` match to two digits."""
return numpy.allclose(p, q, rtol=0.01, atol=0.01)
def aggregate_eigenvectors(eigvals, eigvecs, tolerance=1e-6):
"""Collects eigenvectors by eigenvalue into eigenspaces.
The `eigvals` and `eigvecs` arguments must be as produced by
scipy.linalg.eigh().
Args:
eigvals, array of eigenvalues. Must be approximately-real.
eigvecs, array of eigenvectors.
tolerance, float. Tolerance threshold for considering eigenvalues
as degenerate.
Returns:
List of the form [(eigenvalue, eigenspace), ...],
where each `eigenspace` is a list of eigenvectors for the corresponding
eigenvalue.
Raises:
ValueError, if reality requirements are violated.
"""
if not numpy.allclose(eigvals, eigvals.real):
raise ValueError('Non-real eigenvalues.')
eigvalue_and_aggregated_eigvecs = []
for eigvalue, eigvec in sorted(zip(eigvals.real,
[tuple(v.astype(numpy.complex128))
for v in eigvecs.T]),
# Do not compare eigenvectors for degenerate
# eigenvalues. Sort by descending order.
key=lambda ev_evec: -ev_evec[0]):
for eigvalue_known, eigvecs_known in eigvalue_and_aggregated_eigvecs:
if abs(eigvalue - eigvalue_known) <= tolerance:
eigvecs_known.append(eigvec)
break
else: # Reached end of loop.
eigvalue_and_aggregated_eigvecs.append((eigvalue, [eigvec]))
return eigvalue_and_aggregated_eigvecs
def get_residual_gauge_symmetry(v70, threshold=0.05):
"""Maps scalar 70-vector to [a, n]-tensor of unbroken symmetry generators.
Index `a` is a Spin(8)-adjoint index, `n` counts (orthonormal) basis vectors.
Args:
v70: The e7/su8 70-vector describing a point on the scalar manifold.
threshold: Threshold on the generalized SVD-eigenvalue for considering
a direction as belonging to the residual symmetry.
"""
su, ss, svh = scipy.linalg.svd(
numpy.einsum('avw,v->aw',
algebra.e7.spin8_action_on_v70,
v70))
del svh # Unused.
# Select those columns for which the diagonal entry is essentially zero.
return su.T[ss <= threshold].T
def get_simultaneous_eigenbasis(commuting_gens,
gen_action_einsum='abc,aN->cbN',
gen_action_tensor=_spin8_fabc,
initial_space=None,
checks=True,
tolerance=1e-6):
"""Finds a simultaneous eigenbasis for a collection of commuting generators.
Args:
commuting_gens: [28, N]-array of real and mutually orthogonal generators.
gen_action_einsum: numpy.einsum() contraction specification that maps
`gen_action_tensor` and `commuting_gens` to a set of N matrices given as
[D, D, N]-array that represent the generators on the desired space.
initial_space: [D, K]-dimensional initial space to decompose into
eigenspaces, or `None`. If `None`, uses numpy.eye(D).
checks: If True, perform internal consistency checks.
tolerance: Tolerance difference-threshold for considering
two eigenvalues as identical.
Returns:
Pair of (simultaneous_eigenbasis, charges), where `simultaneous_eigenbasis`
is a [28, K]-dimensional array of eigenvectors, and `charges` is a list
of corresponding charge-tuples.
"""
# Map generators to endomorphisms. Our conventions are such that
# the result of contracting with `gen_action_tensor` also gets multiplied
# with 1j. For spin(8) action on 8v, 8s, 8c, 28, etc., this ensures that
# with all-real generators and all-real action-tensor, we get hermitean
# endomorphisms with all-real spectrum.
gens_action = numpy.einsum(gen_action_einsum,
gen_action_tensor,
commuting_gens) * 1j
if initial_space is None:
initial_space = numpy.eye(gens_action.shape[0])
#
def recursively_split_eigenspaces(num_generator, charge_tagged_eigenspaces):
"""Recursively splits an eigenspace.
Args:
num_generator: The number of the commuting generator to use for the next
splitting-step.
charge_tagged_eigenspaces: List [(partial_charges, subspace), ...]
where `partial_charges` is a tuple of charges w.r.t. the first
`num_generator` generators (so, () for num_generator == 0),
and `subspace` is a [D, K]-array of subspace directions.
Returns:
(Ultimately), fully split charge_tagged_eigenspaces, where the
`partial_charges` tags list as many charges as there are generators.
"""
if num_generator == gens_action.shape[-1]:
return charge_tagged_eigenspaces
gen_action = gens_action[:, :, num_generator]
split_eigenspaces = []
for charges, espace in charge_tagged_eigenspaces:
if checks:
eigenspace_sprod = numpy.einsum('aj,ak->jk', espace.conj(), espace)
assert allclose2(
eigenspace_sprod,
numpy.eye(espace.shape[1])), (
'Weird Eigenspace normalization: ' + repr(
numpy.round(eigenspace_sprod, 3)))
gen_on_eigenspace = numpy.einsum(
'aj,ak->jk',
espace.conj(),
numpy.einsum('ab,bj->aj', gen_action, espace))
sub_eigvals, sub_eigvecs_T = scipy.linalg.eigh(gen_on_eigenspace)
list_approx_eigval_and_eigvecs = []
for sub_eigval, sub_eigvec in zip(sub_eigvals, sub_eigvecs_T.T):
# Lift back to original space.
eigvec = numpy.einsum('gs,s->g', espace, sub_eigvec) # |v> <v| G |v>
if checks:
gv = numpy.dot(gen_action, eigvec)
ev = sub_eigval * eigvec
assert allclose2(gv, ev), (
'Sub-Eigval is bad: g*v=%r, e*v=%r' % (
numpy.round(gv, 3), numpy.round(ev, 3)))
assert allclose2(
numpy.dot(eigvec.conj(), eigvec), 1.0), (
'Eigenvector is not normalized.')
for seen_eigval, seen_eigvecs in list_approx_eigval_and_eigvecs:
if abs(sub_eigval - seen_eigval) <= tolerance:
assert all(allclose2(0, numpy.dot(s.conj(), eigvec))
for s in seen_eigvecs), 'Non-Orthogonality'
seen_eigvecs.append(eigvec)
break
else: # Reached end of list.
list_approx_eigval_and_eigvecs.append(
(sub_eigval, # This is also the actual eigenvalue.
[eigvec]))
for eigval, eigvecs in list_approx_eigval_and_eigvecs:
eigenspace = numpy.stack(eigvecs, axis=-1)
assert allclose2(
numpy.einsum('aj,ak->jk', eigenspace.conj(), eigenspace),
numpy.eye(eigenspace.shape[-1])), 'Bad Eigenspace'
split_eigenspaces.append((charges + (eigval,), eigenspace))
return recursively_split_eigenspaces(num_generator + 1, split_eigenspaces)
#
charge_tagged_eigenspaces = recursively_split_eigenspaces(
0, [((), initial_space)])
simultaneous_eigenbasis = numpy.stack(
[evec for _, espace in charge_tagged_eigenspaces for evec in espace.T],
axis=-1)
charges = [evec_charges
for evec_charges, espace in charge_tagged_eigenspaces
for evec in espace.T]
return simultaneous_eigenbasis, charges
def scale_u1_generator_to_8vsc_integral_charges(u1_gen, round_to_digits=3):
"""Scales a generator such that all 8v, 8s, 8c charges are integers."""
charges = []
for spin8action, _ in SPIN8_BRANCHINGS_VSC:
eigvals, _ = scipy.linalg.eigh(
numpy.einsum(spin8action.einsum,
spin8action.tensor,
1j * u1_gen.reshape((28, 1)))[:, :, 0])
assert numpy.allclose(eigvals, eigvals.real)
for eigval in eigvals:
charges.append(eigval)
approx_charges = sorted(set(abs(numpy.round(c, 6)) for c in charges) - {0.0})
factor = 1.0 / approx_charges[0]
for n in range(1, 25):
scaled_charges = [numpy.round(factor * n * c, round_to_digits)
for c in approx_charges]
if all(x == int(x) for x in scaled_charges):
return factor * n * u1_gen
raise ValueError('Could not re-scale U(1)-generator.')
def canonicalize_u1s(u1s, tolerance=1e-3):
"""Canonicalizes a collection of up to two u(1) generators."""
if u1s.shape[1] == 0:
return numpy.zeros([28, 0])
if u1s.shape[0] != 28:
raise ValueError(
'Each U(1) generator should be given as a 28-vector.')
num_u1s = u1s.shape[1]
if num_u1s > 2:
raise ValueError('Cannot handle more than two U(1)s')
if num_u1s == 1:
return scale_u1_generator_to_8vsc_integral_charges(u1s[:, 0]).reshape(28, 1)
eigvecs_T, evec_charges = get_simultaneous_eigenbasis(u1s)
a_vecs_eigvals = numpy.array(evec_charges).T
# Otherwise, we have exactly two U(1)s.
# How to reduce the charge-lattice?
zs = numpy.array([x + 1j * y for x, y in a_vecs_eigvals.T])
zs_by_origin_distance = sorted([z for z in zs if abs(z) >= tolerance],
key=abs)
z1 = zs_by_origin_distance[0]
angle = math.atan2(z1.imag, z1.real)
cos_angle = math.cos(angle)
sin_angle = math.sin(angle)
u1a = u1s[:, 0] * cos_angle + u1s[:, 1] * sin_angle
u1b = u1s[:, 0] * sin_angle - u1s[:, 1] * cos_angle
canon_u1s = numpy.stack([
scale_u1_generator_to_8vsc_integral_charges(u1a),
scale_u1_generator_to_8vsc_integral_charges(u1b)], axis=1)
return canon_u1s
def decompose_reductive_lie_algebra(residual_symmetry,
threshold=0.05):
"""Decomposes a residual symmetry into semisimple and u(1) parts.
Args:
residual_symmetry: Residual symmetry as produced by
`get_residual_gauge_symmetry()`.
threshold: Threshold for SVD generalized commutator-eigenvalue to consider
a generator as being part of the non-semisimple subalgebra.
"""
no_symmetry = numpy.zeros([28, 0])
if residual_symmetry.shape[1] == 0:
return no_symmetry, no_symmetry
commutators = numpy.einsum(
'avc,cw->avw',
numpy.einsum('abc,bv->avc', _spin8_fabc, residual_symmetry),
residual_symmetry)
su, ss, svh = scipy.linalg.svd(commutators.reshape(commutators.shape[0], -1))
del svh # Unused.
# We want those commutators that do not go to zero.
derivative_symmetry = su.T[:len(ss)][ss >= threshold].T
# By construction (via SVD), and using orthogonality of our spin(8) basis,
# `derivative_symmetry` already consists of orthogonal spin(8) generators, i.e.
# tr(AB) = 0 for basis vectors A != B.
# The 'complement' consists of u(1) factors that have zero inner product with
# `derivative_symmetry`.
if derivative_symmetry.size:
inner_products_with_input = numpy.einsum('av,aw->vw',
residual_symmetry,
derivative_symmetry)
su, ss, svh = scipy.linalg.svd(inner_products_with_input)
# Zero-pad the vector of 'generalized eigenvalues' to su's size.
ss_ext = numpy.concatenate(
[ss, numpy.zeros([max(0, su.shape[0] - len(ss))])])
u1s = numpy.einsum('av,vn->an',
residual_symmetry,
su.T[ss_ext <= threshold].T)
else: # All residual symmetry is in u(1)-factors.
return no_symmetry, residual_symmetry
# Assert that our U1s are orthogonal.
if u1s.size:
# Check generator orthonormality.
assert numpy.allclose(numpy.einsum('av,aw->vw', u1s, u1s),
numpy.eye(u1s.shape[1]), atol=1e-6)
else:
u1s = no_symmetry
return derivative_symmetry, u1s
def find_raw_cartan_subalgebra(spin8_subalgebra_generators, threshold=1e-3):
"""Finds a Cartan subalgebra for an algebra if the form A*so(3) + B*u(1)."""
if spin8_subalgebra_generators.shape[1] == 0:
return numpy.zeros([28, 0])
subalgebra_sprods = numpy.einsum(
'aj,ak->jk', spin8_subalgebra_generators, spin8_subalgebra_generators)
# Check that incoming subalgebra-generators really are reasonably orthonormal
# (up to overall scaling) w.r.t. Cartan-Killing metric.
assert numpy.allclose(subalgebra_sprods,
numpy.eye(spin8_subalgebra_generators.shape[1]))
cartan_generators_found = []
residual_charge_zero_subspace = spin8_subalgebra_generators
while True:
gen = residual_charge_zero_subspace[:, 0]
cartan_generators_found.append(gen)
assert numpy.allclose(gen, gen.real), 'Generator is not real!'
orthogonal_subalgebra = residual_charge_zero_subspace[:, 1:]
if not orthogonal_subalgebra.shape[1]:
return numpy.stack(cartan_generators_found, axis=-1)
gen_ad_action_on_spin8 = numpy.einsum('abc,a->cb', _spin8_fabc, gen)
gen_action_on_orthogonal_subalgebra = numpy.einsum(
'ai,aj->ij',
orthogonal_subalgebra,
numpy.einsum('bc,cj->bj',
gen_ad_action_on_spin8 * 1j,
orthogonal_subalgebra))
assert numpy.allclose(gen_action_on_orthogonal_subalgebra +
gen_action_on_orthogonal_subalgebra.T,
numpy.zeros_like(gen_action_on_orthogonal_subalgebra))
eigvals, eigvecs_T = scipy.linalg.eigh(gen_action_on_orthogonal_subalgebra)
nullspace_gens = []
for eigval, eigvec in zip(eigvals, eigvecs_T.T):
if abs(eigval) <= threshold:
assert numpy.allclose(eigvec, eigvec.real)
nullspace_gens.append(
numpy.einsum('ai,i->a', orthogonal_subalgebra, eigvec.real))
if not len(nullspace_gens):
return numpy.stack(cartan_generators_found, axis=-1)
nullspace = numpy.stack(nullspace_gens, axis=1)
assert numpy.allclose(nullspace, nullspace.real), 'Non-real nullspace'
assert numpy.allclose(numpy.einsum('ai,aj->ij', nullspace, nullspace),
numpy.eye(nullspace.shape[1])), 'Non-Ortho Nullspace'
residual_charge_zero_subspace = nullspace
def weightspace_decompose(generator_action,
cartan_subalgebra_generators,
space,
tolerance=1e-6):
"""Decomposes `space` into subspaces tagged by weight-vectors."""
seq_cartan_generators = list(cartan_subalgebra_generators.T)
def cartan_split(subspace_tagged_by_weight_prefix, num_cartan_generator):
cartan_action = numpy.einsum(
'aIJ,a->IJ',
generator_action,
seq_cartan_generators[num_cartan_generator] * 1j)
result = []
for weight_prefix, subspace in subspace_tagged_by_weight_prefix:
assert numpy.allclose(
numpy.einsum('aJ,aK->JK', subspace.conj(), subspace),
numpy.eye(subspace.shape[1])), (
'Non-orthonormalized subspace:\n' +
repr(numpy.round(numpy.einsum('aJ,aK->JK',
subspace.conj(),
subspace), 3)))
cartan_action_on_subspace = numpy.einsum(
'Jm,Jn->mn', subspace.conj(),
numpy.einsum('JK,Kn->Jn', cartan_action, subspace))
eigvals, eigvecs_T = scipy.linalg.eigh(cartan_action_on_subspace)
eigval_and_rel_eigenspace = aggregate_eigenvectors(eigvals, eigvecs_T)
for eigval, rel_eigenspace in eigval_and_rel_eigenspace:
ext_weight_prefix = (weight_prefix + (eigval,))
result.append((ext_weight_prefix,
numpy.einsum('In,nj->Ij',
subspace,
numpy.stack(rel_eigenspace, axis=-1))))
if num_cartan_generator == len(seq_cartan_generators) - 1:
return result
return cartan_split(result, num_cartan_generator + 1)
return cartan_split([((), space)], 0)
def get_simple_roots_info(rootspaces, threshold=0.01):
"""Extracts simple roots from weightspace-decomposition of a Lie algebra."""
# Finite-dimensional simple Lie algebras have one-dimensional root spaces.
# We use this to eliminate the Cartan subalgebra at the zero-root.
rank = len(rootspaces[0][0])
null_root = (0.0,) * rank
positive_roots = [root for root, subspace in rootspaces
if subspace.shape[1] == 1 and root > null_root]
def root_length_squared(root):
return sum(x * x for x in root)
def root_distance(root1, root2):
return max(abs(r1 - r2) for r1, r2 in zip(root1, root2))
# If the root is 'clearly too long', drop it rightaway.
# It does not hurt if we allow a large amount of slack,
# as this is just for increased performance.
threshold_root_length_squared = max(
map(root_length_squared, positive_roots)) * (1 + threshold)
sum_roots = []
for root1 in positive_roots:
for root2 in positive_roots:
root12 = tuple(r1 + r2 for r1, r2 in zip(root1, root2))
if root_length_squared(root12) > threshold_root_length_squared:
continue
for sum_root in sum_roots:
if root_distance(sum_root, root12) <= threshold:
break # We already know this sum-root.
else: # Reached end of loop.
sum_roots.append(root12)
simple_roots = [root for root in positive_roots
if not any(root_distance(sum_root, root) < threshold
for sum_root in sum_roots)]
a_simple_roots = numpy.array(simple_roots)
simple_root_sprods = numpy.einsum('rj,rk->jk', a_simple_roots, a_simple_roots)
# We always normalize the length-squared of the longest root to 2.
scaling_factor_squared = 2.0 / max(
simple_root_sprods[n, n] for n in range(simple_root_sprods.shape[0]))
scaling_factor = math.sqrt(scaling_factor_squared)
scaled_root_sprods = simple_root_sprods * scaling_factor_squared
# For spin(3)^N, the roots have to be mutually orthogonal
# with length-squared 2.
assert numpy.allclose(scaled_root_sprods,
2 * numpy.eye(simple_root_sprods.shape[0]) )
pos_simple_rootspaces = [(pos_root, scaling_factor * pos_rootspace)
for (pos_root, pos_rootspace) in rootspaces
for simple_root in simple_roots
if tuple(simple_root) == tuple(pos_root)]
canonicalized_cartan_subalgebra_generators = []
for pos_root, pos_rootspace in pos_simple_rootspaces:
# For finite-dimensional Lie algebras, root spaces are one-dimensional.
assert pos_rootspace.shape[1] == 1
l_plus = pos_rootspace[:, 0]
l_minus = l_plus.conj()
cartan_h = -1j * numpy.einsum('abc,a,b->c', _spin8_fabc, l_plus, l_minus)
canonicalized_cartan_subalgebra_generators.append(cartan_h)
# TODO(tfish): Only return what we need, and *not* in a dict.
return dict(simple_root_sprods=simple_root_sprods,
canonicalized_cartan_subalgebra=numpy.stack(
canonicalized_cartan_subalgebra_generators, axis=-1),
scaling_factor_squared=scaling_factor_squared,
pos_simple_rootspaces=pos_simple_rootspaces,
scaled_root_sprods=scaled_root_sprods,
scaled_roots=a_simple_roots * math.sqrt(scaling_factor_squared))
def canonicalize_residual_spin3u1_symmetry(residual_symmetry):
"""Canonicalizes a residual so(3)^M u(1)^N symmetry."""
semisimple_part, raw_u1s = decompose_reductive_lie_algebra(residual_symmetry)
u1s = canonicalize_u1s(raw_u1s)
spin3_cartan_gens_raw = find_raw_cartan_subalgebra(semisimple_part)
return CanonicalizedSymmetry(u1s=u1s,
semisimple_part=semisimple_part,
spin3_cartan_gens=spin3_cartan_gens_raw)
def group_charges_into_spin3u1_irreps(num_spin3s, charge_vecs):
"""Groups observed charges into irreducible representations.
Args:
num_spin3s: Length of the prefix of the charge-vector that belongs to
spin(3) angular momentum operators.
charge_vecs: List of charge-tuple vectors.
Returns:
List [((tuple(highest_spin3_weights) + tuple(u1_charges)), multiplicity),
...] of irreducible-representation descriptions, sorted by descending
combined-charge-vector.
"""
def spin3_weights(highest_weight):
"""Computes a list of spin3 weights for a given irrep highest weight.
E.g.: highest_weight = 1.5 -> [1.5, 0.5, -0.5, -1.5].
Args:
highest_weight: The highest weight (Element of [0, 0.5, 1.0, 1.5, ...]).
Returns: List of weights, in descending order.
"""
w2 = int(round(2 * highest_weight))
return [highest_weight - n for n in range(1 + w2)]
def descendants(cvec):
for spin3_part in itertools.product(
*[spin3_weights(w) for w in cvec[:num_spin3s]]):
yield spin3_part + cvec[num_spin3s:]
charges_todo = collections.Counter(charge_vecs)
irreps = collections.defaultdict(int)
while charges_todo:
cvec, cvec_mult = sorted(charges_todo.items(), reverse=True)[0]
for cvec_desc in descendants(cvec):
charges_todo[cvec_desc] -= cvec_mult
if charges_todo[cvec_desc] == 0:
del charges_todo[cvec_desc]
irreps[cvec] += cvec_mult
return sorted(irreps.items(), reverse=True) # Highest charges first.
def spin3u1_decompose(canonicalized_symmetry,
decomposition_tasks=SPIN8_BRANCHINGS,
simplify=round2):
"""Computes decompositions into so(3)^M x u(1)^N irreducible representations.
Args:
canonicalized_symmetry: A `CanonicalizedSymmetry` object.
decomposition_tasks: Sequence of pairs (spin8action, tasks),
where `tasks` is a sequence of pairs (tag, orthogonalized_subspace).
simplify: The rounding function used to map approximately-integer charges
to integers.
"""
spin3_gens = (canonicalized_symmetry.spin3_cartan_gens.T
if (canonicalized_symmetry.spin3_cartan_gens is not None
and len(canonicalized_symmetry.spin3_cartan_gens)) else [])
u1_gens = (canonicalized_symmetry.u1s.T
if (canonicalized_symmetry.u1s is not None
and len(canonicalized_symmetry.u1s)) else [])
num_spin3s = len(spin3_gens)
num_u1s = len(u1_gens)
def grouped(charges):
# Spin(3) angular momentum charges need to be half-integral.
# For U(1) generators, we are not requiring this.
assert all(round2(2 * c) == int(round2(2 * c))
for charge_vec in charges
for c in charge_vec[:num_spin3s])
return group_charges_into_spin3u1_irreps(
num_spin3s,
[tuple(map(simplify, charge_vec)) for charge_vec in charges])
if num_spin3s:
rootspaces = weightspace_decompose(
_spin8_fabc,
spin3_gens.T,
canonicalized_symmetry.semisimple_part)
sroot_info = get_simple_roots_info(rootspaces)
angular_momentum_u1s = list(sroot_info['canonicalized_cartan_subalgebra'].T)
else:
angular_momentum_u1s = []
list_commuting_gens = (
[g for g in [angular_momentum_u1s, u1_gens] if len(g)])
commuting_gens = (numpy.concatenate(list_commuting_gens).T
if list_commuting_gens else numpy.zeros([28, 0]))
ret = []
for spin8action, tasks in decomposition_tasks:
ret.append([])
for task_tag, space_to_decompose in tasks:
_, charges = get_simultaneous_eigenbasis(
commuting_gens,
gen_action_einsum=spin8action.einsum,
gen_action_tensor=spin8action.tensor,
initial_space=space_to_decompose)
ret[-1].append((task_tag, grouped(charges)))
return ret
def spin3u1_branching_and_spectra(canonicalized_symmetry,
decomposition_tasks=()):
"""Computes so(3)^M x u(1)^N spectra."""
vsc_ad_branching = spin3u1_decompose(canonicalized_symmetry)
spectra = spin3u1_decompose(canonicalized_symmetry,
decomposition_tasks)
return vsc_ad_branching, spectra
def spin3u1_physics(
canonicalized_symmetry,
mass_tagged_eigenspaces_gravitinos=(),
mass_tagged_eigenspaces_fermions=(),
mass_tagged_eigenspaces_scalars=(),
# Note that we see cases where we have very uneven parity-mixtures.
parity_tolerance=1e-7):
"""Computes so(3)^M x u(1)^N spectra."""
vsc_ad_branching = spin3u1_decompose(canonicalized_symmetry)
decomposition_tasks = []
# Gravitino tasks.
gravitino_tasks = []
for gravitino_mass, basis in mass_tagged_eigenspaces_gravitinos:
subspace = numpy.array(basis).T
task_tag = ('gravitinos', subspace.shape, gravitino_mass)
gravitino_tasks.append((task_tag, subspace))
decomposition_tasks.append(
(SPIN8_ACTION_8V, gravitino_tasks))
# Fermion tasks.
fermion_tasks = []
for fermion_mass, basis in mass_tagged_eigenspaces_fermions:
subspace = numpy.array(basis).T
task_tag = ('fermions', subspace.shape, fermion_mass)
fermion_tasks.append((task_tag, subspace))
decomposition_tasks.append(
(SPIN8_ACTION_FERMIONS, fermion_tasks))
# Scalar tasks.
scalar_tasks = []
# For scalars, we try to split off mass-eigenstates that are
# 35s-only or 35c-only.
p_op = numpy.eye(70)
p_op[35:, 35:] *= -1
for scalar_mass, basis in mass_tagged_eigenspaces_scalars:
a_basis = numpy.array(basis)
p_op_on_basis = numpy.einsum('jn,nm,km->jk', a_basis.conj(), p_op, a_basis)
assert numpy.allclose(p_op_on_basis, p_op_on_basis.real)
assert numpy.allclose(p_op_on_basis, p_op_on_basis.T)
p_op_eigvals, p_op_eigvecs_T = numpy.linalg.eigh(p_op_on_basis)
p_op_eigvals_re = p_op_eigvals.real
assert numpy.allclose(p_op_eigvals, p_op_eigvals_re)
# We have to lift the p_op_eigvecs_T to a_basis.
subspace_eigvecs = numpy.einsum('vn,vV->Vn', p_op_eigvecs_T, a_basis)
eigval_eigvecs = aggregate_eigenvectors(p_op_eigvals_re, subspace_eigvecs,
tolerance=1e-4)
# subspaces_35s and subspaces_35c each have <=1 entries.
subspaces_35s = [eigvecs for eigval, eigvecs in eigval_eigvecs
if eigval > 1 - parity_tolerance]
subspaces_35c = [eigvecs for eigval, eigvecs in eigval_eigvecs
if eigval < -1 + parity_tolerance]
merged_subspaces_other = [
eigvec for eigval, eigvecs in eigval_eigvecs
for eigvec in eigvecs
if -1 + parity_tolerance <= eigval <= 1 - parity_tolerance]
for subspace in subspaces_35s:
a_subspace = numpy.array(subspace).T
task_tag = ('scalars', a_subspace.shape, scalar_mass, 's')
scalar_tasks.append((task_tag, a_subspace))
for subspace in subspaces_35c:
a_subspace = numpy.array(subspace).T
task_tag = ('scalars', a_subspace.shape, scalar_mass, 'c')
scalar_tasks.append((task_tag, a_subspace))
# "Mixture" states. While we do get them in terms of parity-eigenstates,
# for 'weird' eigenvalues such as -1/3. Here, we just merge them all back
# together into one space, i.e. forget about resolving the spectrum.
# Why? Otherwise, we may see in the report
# "0.000m{1}, 0.000m{1}, 0.000m{1}, ...", which is not overly informative.
a_subspace = numpy.array(merged_subspaces_other).T
if len(merged_subspaces_other):
task_tag = ('scalars', a_subspace.shape, scalar_mass, 'm')
scalar_tasks.append((task_tag, a_subspace))
decomposition_tasks.append(
(SPIN8_ACTION_SCALARS, scalar_tasks))
spectra = spin3u1_decompose(canonicalized_symmetry,
decomposition_tasks)
return vsc_ad_branching, spectra
|
apache-2.0
| 8,383,226,262,256,209,000 | 42.08508 | 81 | 0.638372 | false | 3.303271 | false | false | false |
llvm-mirror/llvm
|
utils/llvm-locstats/llvm-locstats.py
|
6
|
7999
|
#!/usr/bin/env python
#
# This is a tool that works like debug location coverage calculator.
# It parses the llvm-dwarfdump --statistics output by reporting it
# in a more human readable way.
#
from __future__ import print_function
import argparse
import os
import sys
from json import loads
from math import ceil
from subprocess import Popen, PIPE
def coverage_buckets():
yield '0%'
yield '1-9%'
for start in range(10, 91, 10):
yield '{0}-{1}%'.format(start, start + 9)
yield '100%'
def locstats_output(
variables_total,
variables_total_locstats,
variables_with_loc,
scope_bytes_covered,
scope_bytes_from_first_def,
variables_coverage_map
):
pc_ranges_covered = int(ceil(scope_bytes_covered * 100.0)
/ scope_bytes_from_first_def)
variables_coverage_per_map = {}
for cov_bucket in coverage_buckets():
variables_coverage_per_map[cov_bucket] = \
int(ceil(variables_coverage_map[cov_bucket] * 100.0) \
/ variables_total_locstats)
print (' =================================================')
print (' Debug Location Statistics ')
print (' =================================================')
print (' cov% samples percentage(~) ')
print (' -------------------------------------------------')
for cov_bucket in coverage_buckets():
print (' {0:6} {1:8d} {2:3d}%'. \
format(cov_bucket, variables_coverage_map[cov_bucket], \
variables_coverage_per_map[cov_bucket]))
print (' =================================================')
print (' -the number of debug variables processed: ' \
+ str(variables_total_locstats))
print (' -PC ranges covered: ' + str(pc_ranges_covered) + '%')
# Only if we are processing all the variables output the total
# availability.
if variables_total and variables_with_loc:
total_availability = int(ceil(variables_with_loc * 100.0) \
/ variables_total)
print (' -------------------------------------------------')
print (' -total availability: ' + str(total_availability) + '%')
print (' =================================================')
def parse_program_args(parser):
parser.add_argument('-only-variables', action='store_true',
default=False,
help='calculate the location statistics only for '
'local variables'
)
parser.add_argument('-only-formal-parameters', action='store_true',
default=False,
help='calculate the location statistics only for '
'formal parameters'
)
parser.add_argument('-ignore-debug-entry-values', action='store_true',
default=False,
help='ignore the location statistics on locations with '
'entry values'
)
parser.add_argument('file_name', type=str, help='file to process')
return parser.parse_args()
def Main():
parser = argparse.ArgumentParser()
results = parse_program_args(parser)
if len(sys.argv) < 2:
print ('error: Too few arguments.')
parser.print_help()
sys.exit(1)
if results.only_variables and results.only_formal_parameters:
print ('error: Please use just one only* option.')
parser.print_help()
sys.exit(1)
# These will be different due to different options enabled.
variables_total = None
variables_total_locstats = None
variables_with_loc = None
variables_scope_bytes_covered = None
variables_scope_bytes_from_first_def = None
variables_scope_bytes_entry_values = None
variables_coverage_map = {}
binary = results.file_name
# Get the directory of the LLVM tools.
llvm_dwarfdump_cmd = os.path.join(os.path.dirname(__file__), \
"llvm-dwarfdump")
# The statistics llvm-dwarfdump option.
llvm_dwarfdump_stats_opt = "--statistics"
subproc = Popen([llvm_dwarfdump_cmd, llvm_dwarfdump_stats_opt, binary], \
stdin=PIPE, stdout=PIPE, stderr=PIPE, \
universal_newlines = True)
cmd_stdout, cmd_stderr = subproc.communicate()
# Get the JSON and parse it.
json_parsed = None
try:
json_parsed = loads(cmd_stdout)
except:
print ('error: No valid llvm-dwarfdump statistics found.')
sys.exit(1)
if results.only_variables:
# Read the JSON only for local variables.
variables_total_locstats = \
json_parsed['total vars procesed by location statistics']
variables_scope_bytes_covered = \
json_parsed['vars scope bytes covered']
variables_scope_bytes_from_first_def = \
json_parsed['vars scope bytes total']
if not results.ignore_debug_entry_values:
for cov_bucket in coverage_buckets():
cov_category = "vars with {} of its scope covered".format(cov_bucket)
variables_coverage_map[cov_bucket] = json_parsed[cov_category]
else:
variables_scope_bytes_entry_values = \
json_parsed['vars entry value scope bytes covered']
variables_scope_bytes_covered = variables_scope_bytes_covered \
- variables_scope_bytes_entry_values
for cov_bucket in coverage_buckets():
cov_category = \
"vars (excluding the debug entry values) " \
"with {} of its scope covered".format(cov_bucket)
variables_coverage_map[cov_bucket] = json_parsed[cov_category]
elif results.only_formal_parameters:
# Read the JSON only for formal parameters.
variables_total_locstats = \
json_parsed['total params procesed by location statistics']
variables_scope_bytes_covered = \
json_parsed['formal params scope bytes covered']
variables_scope_bytes_from_first_def = \
json_parsed['formal params scope bytes total']
if not results.ignore_debug_entry_values:
for cov_bucket in coverage_buckets():
cov_category = "params with {} of its scope covered".format(cov_bucket)
variables_coverage_map[cov_bucket] = json_parsed[cov_category]
else:
variables_scope_bytes_entry_values = \
json_parsed['formal params entry value scope bytes covered']
variables_scope_bytes_covered = variables_scope_bytes_covered \
- variables_scope_bytes_entry_values
for cov_bucket in coverage_buckets():
cov_category = \
"params (excluding the debug entry values) " \
"with {} of its scope covered".format(cov_bucket)
variables_coverage_map[cov_bucket] = json_parsed[cov_category]
else:
# Read the JSON for both local variables and formal parameters.
variables_total = \
json_parsed['source variables']
variables_with_loc = json_parsed['variables with location']
variables_total_locstats = \
json_parsed['total variables procesed by location statistics']
variables_scope_bytes_covered = \
json_parsed['scope bytes covered']
variables_scope_bytes_from_first_def = \
json_parsed['scope bytes total']
if not results.ignore_debug_entry_values:
for cov_bucket in coverage_buckets():
cov_category = "variables with {} of its scope covered". \
format(cov_bucket)
variables_coverage_map[cov_bucket] = json_parsed[cov_category]
else:
variables_scope_bytes_entry_values = \
json_parsed['entry value scope bytes covered']
variables_scope_bytes_covered = variables_scope_bytes_covered \
- variables_scope_bytes_entry_values
for cov_bucket in coverage_buckets():
cov_category = "variables (excluding the debug entry values) " \
"with {} of its scope covered". format(cov_bucket)
variables_coverage_map[cov_bucket] = json_parsed[cov_category]
# Pretty print collected info.
locstats_output(
variables_total,
variables_total_locstats,
variables_with_loc,
variables_scope_bytes_covered,
variables_scope_bytes_from_first_def,
variables_coverage_map
)
if __name__ == '__main__':
Main()
sys.exit(0)
|
apache-2.0
| -7,798,051,178,738,378,000 | 37.272727 | 79 | 0.627578 | false | 3.99351 | false | false | false |
hattwj/rainmaker
|
rainmaker/tests/unit/db/main_test.py
|
1
|
2268
|
from rainmaker.tests import test_helper, factory_helper
from rainmaker.main import Application
from rainmaker.db.main import init_db, HostFile, SyncFile, Sync, \
Host, Resolution, Download
from rainmaker.db import main
fh = factory_helper
def test_db_init():
init_db()
def test_sync_file_version_init():
init_db()
assert SyncFile(version=5).version == 5
def test_rain_base_before_changes():
session = init_db()
sync = factory_helper.Sync()
sync_file = factory_helper.SyncFile(sync, 1)
assert sync_file.before_changes()['sync_id'] == None
def test_sqlalchemy_property_assignment():
sf = HostFile()
sf.vers = [{'version': 0, 'file_size':5}]
assert sf.vers[0].file_size == 5
sf = SyncFile()
#print('Doing Set')
sf.vers = [{'version': 0, 'file_size':5}]
#print('Did Set')
assert sf.vers[0].file_size == 5
def test_sync_delete_cascades():
session = init_db()
sync = factory_helper.Sync()
sync_file = factory_helper.SyncFile(sync, 1, fake=True,
file_size=98765 ,is_dir=False)
host = factory_helper.Host(sync, 1)
host_file = factory_helper.HostFile(host, 1, is_dir=False)
session.add(sync)
session.commit()
sync = session.query(Sync).first()
assert len(sync.hosts) > 0
assert len(session.query(Host).all()) > 0
assert len(session.query(SyncFile).all()) > 0
assert len(session.query(HostFile).all()) > 0
session.delete(sync)
assert len(session.query(Sync).all()) == 0
assert len(session.query(Host).all()) == 0
assert len(session.query(SyncFile).all()) == 0
assert len(session.query(HostFile).all()) == 0
def test_resolution():
db = init_db()
r = Resolution()
sync = fh.SyncRand()
host = fh.HostRand(sync)
r.sync = sync
r.host = host
r.host_file = host.host_files[0]
r.sync_file = sync.sync_files[0]
r.status = Resolution.THEIRS_CHANGED
r.state = Resolution.DELETED
db.add(r)
db.commit()
return db
def test_lazy_loading():
db = test_resolution()
r = db.query(Resolution).first()
d = Download(rel_path="test", sync_id=r.sync_id)
r.download = d
db.add(r)
db.commit()
r = db.query(Resolution).first()
assert r.download is not None
|
gpl-3.0
| -435,736,149,443,689,500 | 28.076923 | 66 | 0.637566 | false | 3.167598 | true | false | false |
rvmoura96/projeto-almoxarifado
|
myvenv/Lib/site-packages/django_filters/rest_framework/filterset.py
|
1
|
1483
|
from __future__ import absolute_import
from copy import deepcopy
from django import forms
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_filters import filterset
from .. import compat, utils
from .filters import BooleanFilter, IsoDateTimeFilter
FILTER_FOR_DBFIELD_DEFAULTS = deepcopy(filterset.FILTER_FOR_DBFIELD_DEFAULTS)
FILTER_FOR_DBFIELD_DEFAULTS.update({
models.DateTimeField: {'filter_class': IsoDateTimeFilter},
models.BooleanField: {'filter_class': BooleanFilter},
})
class FilterSet(filterset.FilterSet):
FILTER_DEFAULTS = FILTER_FOR_DBFIELD_DEFAULTS
@property
def form(self):
form = super(FilterSet, self).form
if compat.is_crispy():
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit
layout_components = list(form.fields.keys()) + [
Submit('', _('Submit'), css_class='btn-default'),
]
helper = FormHelper()
helper.form_method = 'GET'
helper.template_pack = 'bootstrap3'
helper.layout = Layout(*layout_components)
form.helper = helper
return form
@property
def qs(self):
from rest_framework.exceptions import ValidationError
try:
return super(FilterSet, self).qs
except forms.ValidationError as e:
raise ValidationError(utils.raw_validation(e))
|
mit
| -1,568,155,611,122,657,300 | 27.519231 | 77 | 0.662846 | false | 4.286127 | false | false | false |
tom-f-oconnell/multi_tracker
|
multi_tracker_analysis/data_slicing.py
|
1
|
3233
|
import numpy as np
def get_keys_in_framerange(pd, framerange):
return np.unique(pd.ix[framerange[0]:framerange[-1]].objid)
def get_frames_for_key(pd, key):
return pd[pd.objid==key].frames.values
def get_data_in_framerange(pd, framerange):
# pd_subset
return pd.ix[framerange[0]:framerange[-1]]
def get_data_in_epoch_timerange(pd, timerange):
# pd_subset
return pd[(pd.time_epoch>timerange[0]) & (pd.time_epoch<timerange[1])]
def get_nframes_per_key(pd):
first_key = np.min(pd.objid)
last_key = np.max(pd.objid)
bins = np.arange(first_key, last_key+2, dtype=float)
bins -= 0.5
h, b = np.histogram(pd.objid, bins)
keys = np.arange(first_key, last_key+1, dtype=int)
return keys, h
def get_nkeys_per_frame(pd):
first_key = np.min(pd.frames)
last_key = np.max(pd.frames)
bins = np.arange(first_key, last_key, dtype=float)
bins -= 0.5
h, b = np.histogram(pd.frames, bins)
# can use pd.frames.groupby(pd.frames).agg('count')
return h
def calc_frames_with_object_in_circular_region(pd, center, radius, region_name='region'):
'''
center - list (x,y) units should match units of position_x and position_y
'''
x = pd.position_x
y = pd.position_y
r0 = (center[0]-x)**2 + (center[1]-y)**2
indices = np.where( r0<= radius**2 )
pd[region_name] = np.zeros_like(pd.position_x)
pd[region_name].iloc[indices] = 1
return pd
def calc_frames_with_object_NOT_in_circular_region(pd, center, radius, region_name='region'):
'''
center - list (x,y) units should match units of position_x and position_y
'''
x = pd.position_x
y = pd.position_y
r0 = (center[0]-x)**2 + (center[1]-y)**2
indices = np.where( r0> radius**2 )
pd[region_name] = np.zeros_like(pd.position_x)
pd[region_name].iloc[indices] = 1
return pd
def remove_objects_that_enter_area_outside_circular_region(pd, center, radius, region_name='outofbounds'):
pd = calc_frames_with_object_NOT_in_circular_region(pd, center, radius, region_name=region_name)
outofbounds = np.unique(pd[pd[region_name]==1].objid.values)
keys_ok = [key for key in pd.objid if key not in outofbounds]
indices_where_object_acceptable = pd.objid.isin(keys_ok)
culled_pd = pd[indices_where_object_acceptable]
return culled_pd
def calc_frames_with_object_in_rectangular_region(pd, x_range, y_range, z_range=None, region_name='region'):
'''
center - list (x,y) units should match units of position_x and position_y
'''
if z_range is None:
x = pd.position_x
y = pd.position_y
indices = np.where( (x>x_range[0]) & (x<x_range[-1]) & (y>y_range[0]) & (y<y_range[-1]) )
else:
x = pd.position_x
y = pd.position_y
z = pd.position_z
indices = np.where( (x>x_range[0]) & (x<x_range[-1]) & (y>y_range[0]) & (y<y_range[-1]) & (z>z_range[0]) & (z<z_range[-1]) )
pd[region_name] = np.zeros_like(pd.position_x)
pd[region_name].iloc[indices] = 1
return pd
def get_pd_subset_from_keys(pd, keys):
pd_subset = pd.query('objid in @keys')
return pd_subset
|
mit
| -5,570,509,066,048,745,000 | 31.33 | 132 | 0.615218 | false | 2.868678 | false | false | false |
globocom/database-as-a-service
|
dbaas/account/migrations/0010_auto__add_field_team_token.py
|
1
|
8543
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Team.token'
db.add_column(u'account_team', 'token',
self.gf('django.db.models.fields.CharField')(max_length=406, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Team.token'
db.delete_column(u'account_team', 'token')
models = {
u'account.organization': {
'Meta': {'object_name': 'Organization'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'external': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grafana_datasource': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'grafana_endpoint': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'grafana_hostgroup': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'grafana_orgid': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'account.roleenvironment': {
'Meta': {'object_name': 'RoleEnvironment'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'environments': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'roles'", 'blank': 'True', 'to': u"orm['physical.Environment']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'role_environment'", 'unique': 'True', 'to': u"orm['auth.Group']"}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'account.team': {
'Meta': {'ordering': "[u'name']", 'object_name': 'Team'},
'contacts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'database_alocation_limit': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '2'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'team_organization'", 'on_delete': 'models.PROTECT', 'to': u"orm['account.Organization']"}),
'role': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']"}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '406', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'physical.cloud': {
'Meta': {'object_name': 'Cloud'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'physical.environment': {
'Meta': {'object_name': 'Environment'},
'cloud': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'environment_cloud'", 'on_delete': 'models.PROTECT', 'to': u"orm['physical.Cloud']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'migrate_environment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'migrate_to'", 'null': 'True', 'to': u"orm['physical.Environment']"}),
'min_of_zones': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['account']
|
bsd-3-clause
| 7,315,269,892,506,538,000 | 75.285714 | 195 | 0.557415 | false | 3.61685 | false | false | false |
huggingface/transformers
|
src/transformers/models/t5/tokenization_t5_fast.py
|
1
|
8622
|
# coding=utf-8
# Copyright 2018 T5 Authors and HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Tokenization class for model T5."""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...file_utils import is_sentencepiece_available
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if is_sentencepiece_available():
from .tokenization_t5 import T5Tokenizer
else:
T5Tokenizer = None
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
PRETRAINED_VOCAB_FILES_MAP = {
"vocab_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/spiece.model",
"t5-base": "https://huggingface.co/t5-base/resolve/main/spiece.model",
"t5-large": "https://huggingface.co/t5-large/resolve/main/spiece.model",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/spiece.model",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/spiece.model",
},
"tokenizer_file": {
"t5-small": "https://huggingface.co/t5-small/resolve/main/tokenizer.json",
"t5-base": "https://huggingface.co/t5-base/resolve/main/tokenizer.json",
"t5-large": "https://huggingface.co/t5-large/resolve/main/tokenizer.json",
"t5-3b": "https://huggingface.co/t5-3b/resolve/main/tokenizer.json",
"t5-11b": "https://huggingface.co/t5-11b/resolve/main/tokenizer.json",
},
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
"t5-small": 512,
"t5-base": 512,
"t5-large": 512,
"t5-3b": 512,
"t5-11b": 512,
}
class T5TokenizerFast(PreTrainedTokenizerFast):
"""
Construct a "fast" T5 tokenizer (backed by HuggingFace's `tokenizers` library). Based on `Unigram
<https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=unigram#models>`__.
This tokenizer inherits from :class:`~transformers.PreTrainedTokenizerFast` which contains most of the main
methods. Users should refer to this superclass for more information regarding those methods.
Args:
vocab_file (:obj:`str`):
`SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a `.spm` extension) that
contains the vocabulary necessary to instantiate a tokenizer.
eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`):
The end of sequence token.
.. note::
When building a sequence using special tokens, this is not the token that is used for the end of
sequence. The token used is the :obj:`sep_token`.
unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`):
The token used for padding, for example when batching sequences of different lengths.
extra_ids (:obj:`int`, `optional`, defaults to 100):
Add a number of extra ids added to the end of the vocabulary for use as sentinels. These tokens are
accessible as "<extra_id_{%d}>" where "{%d}" is a number between 0 and extra_ids-1. Extra tokens are
indexed from the end of the vocabulary up to beginning ("<extra_id_0>" is the last token in the vocabulary
like in T5 preprocessing see `here
<https://github.com/google-research/text-to-text-transfer-transformer/blob/9fd7b14a769417be33bc6c850f9598764913c833/t5/data/preprocessors.py#L2117>`__).
additional_special_tokens (:obj:`List[str]`, `optional`):
Additional special tokens used by the tokenizer.
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = T5Tokenizer
prefix_tokens: List[int] = []
def __init__(
self,
vocab_file=None,
tokenizer_file=None,
eos_token="</s>",
unk_token="<unk>",
pad_token="<pad>",
extra_ids=100,
additional_special_tokens=None,
**kwargs
):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
additional_special_tokens = [f"<extra_id_{i}>" for i in range(extra_ids)]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
extra_tokens = len(set(filter(lambda x: bool("extra_id_" in str(x)), additional_special_tokens)))
if extra_tokens != extra_ids:
raise ValueError(
f"Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are provided to T5Tokenizer. "
"In this case the additional_special_tokens must include the extra_ids tokens"
)
super().__init__(
vocab_file,
tokenizer_file=tokenizer_file,
eos_token=eos_token,
unk_token=unk_token,
pad_token=pad_token,
extra_ids=extra_ids,
additional_special_tokens=additional_special_tokens,
**kwargs,
)
self.vocab_file = vocab_file
self._extra_ids = extra_ids
def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
if not os.path.isdir(save_directory):
logger.error(f"Vocabulary path ({save_directory}) should be a directory")
return
out_vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
logger.info(f"Copy vocab file to {out_vocab_file}")
return (out_vocab_file,)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A sequence has the following format:
- single sequence: ``X </s>``
- pair of sequences: ``A </s> B </s>``
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.
"""
token_ids_0 = token_ids_0 + [self.eos_token_id]
if token_ids_1 is None:
return self.prefix_tokens + token_ids_0
else:
token_ids_1 = token_ids_1 + [self.eos_token_id]
return self.prefix_tokens + token_ids_0 + token_ids_1
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
"""
Create a mask from the two sequences passed to be used in a sequence-pair classification task. T5 does not make
use of token type ids, therefore a list of zeros is returned.
Args:
token_ids_0 (:obj:`List[int]`):
List of IDs.
token_ids_1 (:obj:`List[int]`, `optional`):
Optional second list of IDs for sequence pairs.
Returns:
:obj:`List[int]`: List of zeros.
"""
eos = [self.eos_token_id]
if token_ids_1 is None:
return len(token_ids_0 + eos) * [0]
return len(token_ids_0 + eos + token_ids_1 + eos) * [0]
|
apache-2.0
| 2,695,773,239,925,617,700 | 41.683168 | 164 | 0.6299 | false | 3.63644 | false | false | false |
mayuanucas/notes
|
python/code/linearunit.py
|
1
|
1443
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from perceptron import Perceptron
class LinearUnit(Perceptron):
def __init__(self, input_num, activator):
'''
初始化感知器,设置输入参数的个数,以及激活函数。
'''
Perceptron.__init__(self, input_num, activator)
def func(x):
'''
定义激活函数func
'''
return x
def get_training_dataset():
'''
构建训练数
# 输入向量列表,每一项代表工作年限
'''
all_input_vecs = [[5], [3], [8], [1.4], [10.1]]
# 期望的输出列表,注意要与输入一一对应,代表 月薪
labels = [5500, 2300, 7600, 1800, 11400]
return all_input_vecs, labels
def train_linear_unit():
'''
使用数据训练线性单元
'''
# 创建感知器,输入参数个数为1,激活函数为func
lu = LinearUnit(1, func)
# 训练,迭代10轮, 学习速率为0.1
all_input_vecs, labels = get_training_dataset()
lu.train(all_input_vecs, labels, 10, 0.01)
#返回训练好的感知器
return lu
if __name__ == '__main__':
# 训练线性单元
linear_unit = train_linear_unit()
# 打印训练获得的权重
print(linear_unit)
# 测试
print('Work 3.4 years, monthly salary = %.2f' % linear_unit.predict([3.4]))
print('Work 15 years, monthly salary = %.2f' % linear_unit.predict([15]))
print('Work 1.5 years, monthly salary = %.2f' % linear_unit.predict([1.5]))
print('Work 6.3 years, monthly salary = %.2f' % linear_unit.predict([6.3]))
|
apache-2.0
| -2,600,775,387,799,649,300 | 22.28 | 76 | 0.647463 | false | 1.762121 | false | false | false |
xuxiao19910803/edx-platform
|
lms/djangoapps/oauth_tianyuyun/views.py
|
1
|
1363
|
# -*- coding: utf-8 -*-
from django.http import HttpResponseRedirect
from django.contrib.auth import authenticate, logout, login
from .utils import TianYuClient
from .settings import TIANYUYUN_LOGIN_URL
LOGIN_SUCCESS_REDIRECT_URL = '/dashboard'
LOGIN_CREATE_SUCCESS_REDIRECT_URL = '/dashboard' # '/account/settings'
LOGIN_ERROR_REDIRECT_URL = TIANYUYUN_LOGIN_URL.split('?')[0]
def login_tianyuyun(request):
ticket = request.GET.get('ticket', '')
if ticket:
client = TianYuClient()
usesessionid = client.get_usesessionid_by_ticket(ticket)
if usesessionid:
userinfo = client.get_userinfo_by_sessionid(usesessionid)
if userinfo.get('idcardno', ''):
user = request.user if request.user.is_authenticated() else None
oauth_obj, create = client.get_or_create_oauth_by_userinfo(userinfo, user)
if oauth_obj and oauth_obj.user:
user = authenticate(oauth_obj=oauth_obj, username='')
login(request, user)
if create:
return HttpResponseRedirect(LOGIN_CREATE_SUCCESS_REDIRECT_URL)
else:
return HttpResponseRedirect(LOGIN_SUCCESS_REDIRECT_URL)
return HttpResponseRedirect(LOGIN_SUCCESS_REDIRECT_URL)
|
agpl-3.0
| 3,580,869,867,609,412,000 | 40.59375 | 90 | 0.624358 | false | 4.068657 | false | false | false |
davy39/eric
|
Plugins/VcsPlugins/vcsSubversion/Ui_SvnUrlSelectionDialog.py
|
1
|
5231
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './Plugins/VcsPlugins/vcsSubversion/SvnUrlSelectionDialog.ui'
#
# Created: Tue Nov 18 17:53:57 2014
# by: PyQt5 UI code generator 5.3.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_SvnUrlSelectionDialog(object):
def setupUi(self, SvnUrlSelectionDialog):
SvnUrlSelectionDialog.setObjectName("SvnUrlSelectionDialog")
SvnUrlSelectionDialog.resize(542, 195)
SvnUrlSelectionDialog.setSizeGripEnabled(True)
self.vboxlayout = QtWidgets.QVBoxLayout(SvnUrlSelectionDialog)
self.vboxlayout.setObjectName("vboxlayout")
self.urlGroup1 = QtWidgets.QGroupBox(SvnUrlSelectionDialog)
self.urlGroup1.setObjectName("urlGroup1")
self.hboxlayout = QtWidgets.QHBoxLayout(self.urlGroup1)
self.hboxlayout.setObjectName("hboxlayout")
self.repoRootLabel1 = QtWidgets.QLabel(self.urlGroup1)
self.repoRootLabel1.setObjectName("repoRootLabel1")
self.hboxlayout.addWidget(self.repoRootLabel1)
self.typeCombo1 = QtWidgets.QComboBox(self.urlGroup1)
self.typeCombo1.setObjectName("typeCombo1")
self.hboxlayout.addWidget(self.typeCombo1)
self.labelCombo1 = QtWidgets.QComboBox(self.urlGroup1)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.labelCombo1.sizePolicy().hasHeightForWidth())
self.labelCombo1.setSizePolicy(sizePolicy)
self.labelCombo1.setEditable(True)
self.labelCombo1.setObjectName("labelCombo1")
self.hboxlayout.addWidget(self.labelCombo1)
self.vboxlayout.addWidget(self.urlGroup1)
self.urlGroup2 = QtWidgets.QGroupBox(SvnUrlSelectionDialog)
self.urlGroup2.setObjectName("urlGroup2")
self.hboxlayout1 = QtWidgets.QHBoxLayout(self.urlGroup2)
self.hboxlayout1.setObjectName("hboxlayout1")
self.repoRootLabel2 = QtWidgets.QLabel(self.urlGroup2)
self.repoRootLabel2.setObjectName("repoRootLabel2")
self.hboxlayout1.addWidget(self.repoRootLabel2)
self.typeCombo2 = QtWidgets.QComboBox(self.urlGroup2)
self.typeCombo2.setObjectName("typeCombo2")
self.hboxlayout1.addWidget(self.typeCombo2)
self.labelCombo2 = QtWidgets.QComboBox(self.urlGroup2)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.labelCombo2.sizePolicy().hasHeightForWidth())
self.labelCombo2.setSizePolicy(sizePolicy)
self.labelCombo2.setEditable(True)
self.labelCombo2.setObjectName("labelCombo2")
self.hboxlayout1.addWidget(self.labelCombo2)
self.vboxlayout.addWidget(self.urlGroup2)
self.summaryCheckBox = QtWidgets.QCheckBox(SvnUrlSelectionDialog)
self.summaryCheckBox.setObjectName("summaryCheckBox")
self.vboxlayout.addWidget(self.summaryCheckBox)
self.buttonBox = QtWidgets.QDialogButtonBox(SvnUrlSelectionDialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.vboxlayout.addWidget(self.buttonBox)
self.retranslateUi(SvnUrlSelectionDialog)
self.buttonBox.accepted.connect(SvnUrlSelectionDialog.accept)
self.buttonBox.rejected.connect(SvnUrlSelectionDialog.reject)
QtCore.QMetaObject.connectSlotsByName(SvnUrlSelectionDialog)
SvnUrlSelectionDialog.setTabOrder(self.typeCombo1, self.labelCombo1)
SvnUrlSelectionDialog.setTabOrder(self.labelCombo1, self.typeCombo2)
SvnUrlSelectionDialog.setTabOrder(self.typeCombo2, self.labelCombo2)
SvnUrlSelectionDialog.setTabOrder(self.labelCombo2, self.summaryCheckBox)
SvnUrlSelectionDialog.setTabOrder(self.summaryCheckBox, self.buttonBox)
def retranslateUi(self, SvnUrlSelectionDialog):
_translate = QtCore.QCoreApplication.translate
SvnUrlSelectionDialog.setWindowTitle(_translate("SvnUrlSelectionDialog", "Subversion Diff"))
self.urlGroup1.setTitle(_translate("SvnUrlSelectionDialog", "Repository URL 1"))
self.typeCombo1.setToolTip(_translate("SvnUrlSelectionDialog", "Select the URL type"))
self.labelCombo1.setToolTip(_translate("SvnUrlSelectionDialog", "Enter the label name or path"))
self.urlGroup2.setTitle(_translate("SvnUrlSelectionDialog", "Repository URL 2"))
self.typeCombo2.setToolTip(_translate("SvnUrlSelectionDialog", "Select the URL type"))
self.labelCombo2.setToolTip(_translate("SvnUrlSelectionDialog", "Enter the label name or path"))
self.summaryCheckBox.setToolTip(_translate("SvnUrlSelectionDialog", "Select to just show a summary of differences"))
self.summaryCheckBox.setText(_translate("SvnUrlSelectionDialog", "Summary only"))
|
gpl-3.0
| -6,944,341,487,784,356,000 | 57.775281 | 124 | 0.751673 | false | 3.933083 | false | false | false |
raffaellod/abamake
|
src/comk/argparser.py
|
1
|
7433
|
# -*- coding: utf-8; mode: python; tab-width: 3; indent-tabs-mode: nil -*-
#
# Copyright 2013-2017 Raffaello D. Di Napoli
#
# This file is part of Complemake.
#
# Complemake is free software: you can redistribute it and/or modify it under the terms of the GNU General
# Public License as published by the Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# Complemake is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along with Complemake. If not, see
# <http://www.gnu.org/licenses/>.
#-------------------------------------------------------------------------------------------------------------
"""Complemake command line argument parsing."""
import argparse
import os
import comk
##############################################################################################################
class Command(object):
_instances = {}
def __init__(self, name):
self._name = name
self._instances[name] = self
def __repr__(self):
return self._name
@classmethod
def from_str(cls, name):
return cls._instances.get(name, name)
Command.BUILD = Command('build')
Command.CLEAN = Command('clean')
Command.EXEC = Command('exec')
Command.QUERY = Command('query')
##############################################################################################################
class Parser(object):
"""Parses Complemake’s command line."""
_parser = None
def __init__(self):
"""Constructor."""
self._parser = argparse.ArgumentParser(add_help=False)
# Flags that apply to all commands.
self._parser.add_argument(
'--help', action='help',
help='Show this informative message and exit.'
)
self._parser.add_argument(
'-n', '--dry-run', action='store_true',
help='Don’t actually run any external commands. Useful to test if anything needs to be built.'
)
self._parser.add_argument(
'-o', '--output-dir', metavar='/path/to/output/dir', default='',
help='Location where all Complemake output for the project should be stored. Defaults to the ' +
'project’s directory.'
)
self._parser.add_argument(
'-p', '--project', metavar='PROJECT.comk',
help='Complemake project (.comk) containing instructions on how to build targets. If omitted and ' +
'the current directory contains a single file matching *.comk, that file will be used as the ' +
'project.'
)
if comk.os_is_windows():
default_shared_dir = 'Complemake'
user_apps_home_description = 'common repository for application-specific data (typically ' + \
'“Application Data”)'
else:
default_shared_dir = '.comk'
user_apps_home_description = 'user’s $HOME directory'
self._parser.add_argument(
'--shared-dir', metavar='path/to/shared/dir', type=self.get_abs_shared_dir,
default=default_shared_dir,
help=('Directory where Complemake will store data shared across all projects, such as projects’ ' +
'dependencies. Defaults to “{}” in the {}.').format(
default_shared_dir, user_apps_home_description
)
)
self._parser.add_argument(
'-s', '--system-type', metavar='SYSTEM-TYPE',
help='Use SYSTEM-TYPE as the system type for which to build; examples: x86_64-pc-linux-gnu, ' +
'i686-pc-win32. If omitted, detect a default for the machine on which Complemake is being run.'
)
self._parser.add_argument(
'--tool-c++', metavar='/path/to/c++', dest='tool_cxx',
help='Use /path/to/c++ as the C++ compiler (and linker driver, unless --tool-ld is also specified).'
)
self._parser.add_argument(
'--tool-ld', metavar='/path/to/ld',
help='Use /path/to/ld as the linker/linker driver.'
)
self._parser.add_argument(
'-v', '--verbose', action='count', default=0,
help='Increase verbosity level; can be specified multiple times.'
)
subparsers = self._parser.add_subparsers(dest='command')
subparsers.type = Command.from_str
subparsers.required = True
build_subparser = subparsers.add_parser(Command.BUILD)
build_subparser.add_argument(
'--force', action='store_true', dest='force_build',
help='Unconditionally rebuild all targets.'
)
build_subparser.add_argument(
'--force-test', action='store_true',
help='Unconditionally run all test targets.'
)
build_subparser.add_argument(
'-j', '--jobs', default=None, metavar='N', type=int,
help='Build using N processes at at time; if N is omitted, build all independent targets at the ' +
'same time. If not specified, the default is --jobs <number of processors>.'
)
build_subparser.add_argument(
'-k', '--keep-going', action='store_true',
help='Continue building targets even if other independent targets fail.'
)
build_subparser.add_argument(
'-f', '--target-file', metavar='/generated/file', action='append', dest='target_files', default=[],
help='Specify once or more to indicate which target files should be built. ' +
'If no -f or -t arguments are provided, all targets declared in the Complemake project ' +
'(.comk) will be built.'
)
build_subparser.add_argument(
'-t', '--target-name', action='append', dest='target_names', default=[],
help='Specify once or more to indicate which named targets should be built. ' +
'If no -f or -t arguments are provided, all targets declared in the Complemake project ' +
'(.comk) will be built.'
)
build_subparser.add_argument(
'-u', '--update-deps', action='store_true',
help='Update all dependencies (e.g. pull git repo) before building.'
)
clean_subparser = subparsers.add_parser(Command.CLEAN)
exec_subparser = subparsers.add_parser(Command.EXEC)
exec_subparser.add_argument(
'exec_exe', metavar='EXECUTABLE',
help='Command to execute.'
)
exec_subparser.add_argument(
'exec_args', metavar='...', nargs=argparse.REMAINDER,
help='Arguments to pass EXECUTABLE.'
)
query_subparser = subparsers.add_parser(Command.QUERY)
query_group = query_subparser.add_mutually_exclusive_group(required=True)
query_group.add_argument(
'--exec-env', dest='query_exec_env', action='store_true',
help='Print any environment variable assignments needed to execute binaries build by the project.'
)
@staticmethod
def get_abs_shared_dir(shared_dir):
if os.path.isabs(shared_dir):
return shared_dir
else:
return os.path.normpath(os.path.join(comk.get_user_apps_home(), shared_dir))
def parse_args(self, *args, **kwargs):
"""See argparse.ArgumentParser.parse_args()."""
return self._parser.parse_args(*args, **kwargs)
|
gpl-3.0
| 7,662,710,956,515,190,000 | 39.966851 | 110 | 0.598247 | false | 4.168072 | false | false | false |
Ginkgo-Biloba/Misc-Python
|
numpy/SciPyInt.py
|
1
|
3425
|
# coding=utf-8
import numpy as np
from scipy import integrate as intgrt
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from math import sqrt
# 计算半球的体积
def ballVolume():
def halfBall(x, y):
return sqrt(1 - x**2 - y**2)
def halfCircle(x):
return sqrt(1 - x**2)
(vol, error) = intgrt.dblquad(halfBall, -1, 1, lambda x: -halfCircle(x), lambda x: halfCircle(x))
print ("vol =", vol)
# 对常微分方程组积分
# 计算洛伦茨吸引子的轨迹
def LorenzAttactor():
# 给出位置矢量 w 和三个参数 sigma rho beta 计算出速度矢量 dx dy dz
def lorenz(w, t, sigma, rho, beta):
(x, y, z) = w.tolist()
return (sigma * (y - x), x * (rho - z), x * y - beta * z)
t = np.arange(0, 20, 0.01) # 创建时间点
# 调用 ode 对 lorenz 进行求解 用两个不同的初始值
track1 = intgrt.odeint(lorenz, (0.0, 1.0, 0.0), t, args=(10.0, 28.0, 2.7))
track2 = intgrt.odeint(lorenz, (0.0, 1.01, 0.0), t, args=(10.0, 28.0, 2.7))
# 绘图
fig = plt.figure()
ax = Axes3D(fig)
ax.plot(track1[:, 0], track1[:, 1], track1[:, 2], label="$y=1.0$")
ax.plot(track2[:, 0], track2[:, 1], track2[:, 2], label="$y=1.01$")
plt.legend(loc="best")
plt.show()
# 质量-弹簧-阻尼系统
# Mx'' + bx' + kx = F
def msd(xu, t, M, k, b, F):
(x, u) = xu.tolist()
dx = u
du = (F - k * x - b * u) / M
return (dx, du)
def msdDemo():
# 初始滑块在位移 x = -1.0 处 起始速度为 0 外部控制力恒为 1.0
initxu = (-1.0, 0.0)
(M, k, b, F) = (1.0, 0.5, 0.2, 1.0)
t = np.arange(0, 40, 0.02)
rst = intgrt.odeint(msd, initxu, t, args=(M, k, b, F))
(fig, (ax1, ax2)) = plt.subplots(2, 1)
ax1.plot(t, rst[:, 0], label=u"位移 x")
ax2.plot(t, rst[:, 1], label=u"速度 u")
ax1.legend(); ax2.legend()
plt.show()
# 质量-弹簧-阻尼系统
class MassSpringDamper(object):
def __init__(self, M, k, b, F):
(self.M, self.k, self.b, self.F) = (M, k, b, F)
# 求导函数
def dee(self, t, xu):
(x, u) = xu.tolist()
dx = u
du = (self.F - self.k * x - self.b * u) / self.M
return [dx, du] # 要求返回列表而不是元组
# 采用 PID 控制器
class PID(object):
def __init__(self, kp, ki, kd, dt):
(self.kp, self.ki, self.kd, self.dt) = (kp, ki, kd, dt)
self.lastErr = None
self.x = 0.0
def update(self, err):
p = self.kp * err
i = self.ki * self.x
if self.lastErr is None:
d = 0.0
else:
d = self.kd * (err - self.lastErr) / self.dt
self.x += err * self.dt
self.lastErr = err
return p + i + d
# 控制外力 F 使滑块更迅速地停止在位移 2.0 处
def msdPID(kp, ki, kd, dt):
stm = MassSpringDamper(M=1.0, k=0.5, b=0.2, F=1.0)
initxu = (-1.0, 0.0)
pid = PID(kp, ki, kd, dt)
r = intgrt.ode(stm.dee)
r.set_integrator("vode", method="bdf")
r.set_initial_value(initxu, 0)
t = list(); rst = list(); FArr = list()
while (r.successful() and (r.t + dt < 3)):
r.integrate(r.t + dt)
t.append(r.t)
rst.append(r.y)
err = 2.0 - r.y[0]
F = pid.update(err)
stm.F = F
FArr.append(F)
rst = np.array(rst)
t = np.array(t)
FArr = np.array(FArr)
(fig, (ax1, ax2, ax3)) = plt.subplots(3, 1)
ax1.plot(t, rst[:, 0], label=u"位移 x")
ax2.plot(t, rst[:, 1], label=u"速度 u")
ax3.plot(t, FArr, label=u"控制力 F")
ax1.legend(); ax2.legend(); ax3.legend()
plt.show()
if (__name__ == "__main__"):
# ballVolume()
LorenzAttactor()
# msdDemo()
# msdPID(19.29, 1.41, 6.25, 0.02) # 最优的一组数
|
gpl-3.0
| -104,215,309,055,111,760 | 25.347458 | 98 | 0.577999 | false | 1.778604 | false | false | false |
PythonScanClient/PyScanClient
|
example/opi/scripts/xy_scan.py
|
1
|
2047
|
"""
Schedule scan with parameters from BOY script
@author: Kay Kasemir
"""
from org.csstudio.scan.ui import SimulationDisplay
from org.csstudio.scan.server import SimulationResult
from org.eclipse.ui import PlatformUI
from errors import showException
from scan.commands.loop import Loop
from scan.commands.wait import Wait
from scan.commands.log import Log
from beamline_setup import scan_client
try:
# Fetch parameters from display
x0 = float(display.getWidget("x0").getValue())
x1 = float(display.getWidget("x1").getValue())
dx = float(display.getWidget("dx").getValue())
y0 = float(display.getWidget("y0").getValue())
y1 = float(display.getWidget("y1").getValue())
dy = float(display.getWidget("dy").getValue())
neutrons = float(display.getWidget("neutrons").getValue())
simu = str(display.getWidget("simu").getValue()) == "True"
if str(display.getWidget("updown").getValue()) == "True":
toggle = -1
else:
toggle = 1
#from org.eclipse.jface.dialogs import MessageDialog
#MessageDialog.openWarning(
# None, "Type", "Type is " + neutrons.__class__.__name__)
# Create scan
cmds =[
Loop('xpos', min(x0, x1), max(x0, x1), max(0.1, abs(dx)),
Loop('ypos', min(y0, y1), max(y0, y1), toggle * max(0.1, abs(dy)),
[
Wait('neutrons', neutrons, comparison='increase by'),
Log('xpos', 'ypos', 'readback')
]
)
)
]
if simu:
simulation = scan_client.simulate(cmds)
SimulationDisplay.show(SimulationResult(simulation['seconds'], simulation['simulation']))
else:
# Submit scan
id = scan_client.submit(cmds, "XY Scan")
workbench = PlatformUI.getWorkbench()
window = workbench.getActiveWorkbenchWindow()
page = window.getActivePage()
plot = page.showView("org.csstudio.scan.ui.plot.view")
plot.selectScan("XY Scan", id)
plot.selectDevices("xpos", "ypos")
except:
showException("XY Scan")
|
epl-1.0
| -92,103,422,554,939,620 | 31.492063 | 97 | 0.634587 | false | 3.535406 | false | true | false |
fiber-space/pip
|
pip/_vendor/cachecontrol/caches/file_cache.py
|
1
|
4069
|
import hashlib
import os
from textwrap import dedent
from ..cache import BaseCache
from ..controller import CacheController
try:
FileNotFoundError
except NameError:
# py2.X
FileNotFoundError = IOError
def _secure_open_write(filename, fmode):
# We only want to write to this file, so open it in write only mode
flags = os.O_WRONLY
# os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only
# will open *new* files.
# We specify this because we want to ensure that the mode we pass is the
# mode of the file.
flags |= os.O_CREAT | os.O_EXCL
# Do not follow symlinks to prevent someone from making a symlink that
# we follow and insecurely open a cache file.
if hasattr(os, "O_NOFOLLOW"):
flags |= os.O_NOFOLLOW
# On Windows we'll mark this file as binary
if hasattr(os, "O_BINARY"):
flags |= os.O_BINARY
# Before we open our file, we want to delete any existing file that is
# there
try:
os.remove(filename)
except (IOError, OSError):
# The file must not exist already, so we can just skip ahead to opening
pass
# Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a
# race condition happens between the os.remove and this line, that an
# error will be raised. Because we utilize a lockfile this should only
# happen if someone is attempting to attack us.
fd = os.open(filename, flags, fmode)
try:
return os.fdopen(fd, "wb")
except:
# An error occurred wrapping our FD in a file object
os.close(fd)
raise
class FileCache(BaseCache):
def __init__(self, directory, forever=False, filemode=0o0600,
dirmode=0o0700, use_dir_lock=None, lock_class=None):
if use_dir_lock is not None and lock_class is not None:
raise ValueError("Cannot use use_dir_lock and lock_class together")
try:
from pip._vendor.lockfile import LockFile
from pip._vendor.lockfile.mkdirlockfile import MkdirLockFile
except ImportError:
notice = dedent("""
NOTE: In order to use the FileCache you must have
lockfile installed. You can install it via pip:
pip install lockfile
""")
raise ImportError(notice)
else:
if use_dir_lock:
lock_class = MkdirLockFile
elif lock_class is None:
lock_class = LockFile
self.directory = directory
self.forever = forever
self.filemode = filemode
self.dirmode = dirmode
self.lock_class = lock_class
@staticmethod
def encode(x):
return hashlib.sha224(x.encode()).hexdigest()
def _fn(self, name):
# NOTE: This method should not change as some may depend on it.
# See: https://github.com/ionrock/cachecontrol/issues/63
hashed = self.encode(name)
parts = list(hashed[:5]) + [hashed]
return os.path.join(self.directory, *parts)
def get(self, key):
name = self._fn(key)
if not os.path.exists(name):
return None
with open(name, 'rb') as fh:
return fh.read()
def set(self, key, value):
name = self._fn(key)
# Make sure the directory exists
try:
os.makedirs(os.path.dirname(name), self.dirmode)
except (IOError, OSError):
pass
with self.lock_class(name) as lock:
# Write our actual file
with _secure_open_write(lock.path, self.filemode) as fh:
fh.write(value)
def delete(self, key):
name = self._fn(key)
if not self.forever:
try:
os.remove(name)
except FileNotFoundError:
pass
def url_to_file_path(url, filecache):
"""Return the file cache path based on the URL.
This does not ensure the file exists!
"""
key = CacheController.cache_url(url)
return filecache._fn(key)
|
mit
| -2,711,897,813,747,140,600 | 29.593985 | 79 | 0.606783 | false | 4.020751 | false | false | false |
ryepdx/account_payment_cim_authdotnet
|
xml2dic.py
|
1
|
1080
|
##Module that converts the Xml response to dictionary
from lxml import etree
import re
def dictlist(node):
res = {}
node_tag = re.findall(r'}(\w*)', node.tag)
node_tag = node_tag[0]
res[node_tag] = []
xmltodict(node, res[node_tag])
reply = {}
reply[node_tag] = res[node_tag]
return reply
def xmltodict(node, res):
rep = {}
node_tag = re.findall(r'}(\w*)', node.tag)
node_tag = node_tag[0]
if len(node):
#n = 0
for n in list(node):
rep[node_tag] = []
value = xmltodict(n, rep[node_tag])
if len(n):
n_tag = re.findall(r'}(\w*)', n.tag)
n_tag = n_tag[0]
value = rep[node_tag]
res.append({n_tag:value})
else :
res.append(rep[node_tag][0])
else:
value = {}
value = node.text
res.append({node_tag:value})
return
def main(xml_string):
tree = etree.fromstring(xml_string)
res = dictlist(tree)
return res
if __name__ == '__main__' :
main()
|
agpl-3.0
| -254,882,252,893,235,300 | 21.5 | 53 | 0.503704 | false | 3.233533 | false | false | false |
schaabs/sandbox
|
net/sandbox.keyvault/python/repl/key_vault_crypto.py
|
1
|
1939
|
import base64
import datetime
import sys
import argparse
from azure.keyvault.generated.models import KeyVaultErrorException
from python.key_vault_agent import KeyVaultAgent
from azure.keyvault.generated import KeyVaultClient
CLIENT_ID = '8fd4d3c4-efea-49aa-b1de-2c33c22da56e'
class KeyVaultCryptoAgent(KeyVaultAgent):
def __init__(self, client_id):
self._initialize(client_id)
def encrypt(self, f_in, f_out, vault_name, key_name, key_version=None):
vault = self.get_vault(vault_name)
buff = f_in.read()
buff = base64.encodebytes(buff)
buff = buff.replace(b'\n', b'')
try:
buff = self.data_client.encrypt(vault.properties.vault_uri, key_name, key_version or '', 'RSA1_5', buff)
except KeyVaultErrorException as e:
print(str(e))
buff = base64.decodebytes(buff)
f_out.write(buff)
def _parse_args(argv):
parser = argparse.ArgumentParser()
parser.add_argument('action', choices=['encrypt', 'decrypt'], help='specifies whether to encrypt or decrypt the specified "in" file')
parser.add_argument('infile', type=argparse.FileType('rb'), help='specifies the file on which to preform the crypto action')
parser.add_argument('outfile', type=argparse.FileType('wb'), help='specifies the file in which to store the crypto action result')
parser.add_argument('vault', help='the key to use for the crypto action')
parser.add_argument('key', help='the key to use for the crypto action')
return parser.parse_args(argv)
def main(argv):
argv = ['', 'encrypt', 'd:\\temp\\crypto_encrypt_in.txt', 'd:\\temp\\crypto_encrypt_out.txt', 'sdschaab-replkv', 'repl-key1']
args = _parse_args(argv[1:])
crypto_agent = KeyVaultCryptoAgent(CLIENT_ID)
if args.action == 'encrypt':
crypto_agent.encrypt(args.infile, args.outfile, args.vault, args.key)
if __name__ == '__main__':
main(sys.argv)
|
mit
| 1,837,548,955,547,874,800 | 33.017544 | 137 | 0.680248 | false | 3.44405 | false | false | false |
Catgroove/dotaninja
|
app/filters.py
|
1
|
1483
|
from app import app
from flask import url_for
from .models import Player, DoesNotExist
from .helpers import json_file_to_dict
from config import JSON_DIR
import datetime
import arrow
import os
@app.template_filter("game_mode")
def game_mode(mode_id):
return json_file_to_dict(os.path.join(JSON_DIR, "game_mode.json"))[str(mode_id)]["name"]
@app.template_filter("region")
def region(cluster):
regions = json_file_to_dict(os.path.join(JSON_DIR, "regions.json"))["regions"]
for region, values in regions.items():
if values.get("clusters") and str(cluster) in values.get("clusters"):
return (values["display_name"][len("#dota_region_"):].capitalize())
@app.template_filter("duration")
def duration(duration):
return str(datetime.timedelta(seconds=duration))
@app.template_filter("time_since")
def time_since(time):
return arrow.get(time).humanize()
@app.template_filter("result")
def result(result):
if result:
return "Won"
return "Lost"
@app.template_filter("hero_image")
def hero_image(hero_id):
return url_for("static", filename="assets/heroes/{}_sb.png".format(hero_id))
@app.template_filter("item_image")
def item_image(item_id):
return url_for("static", filename="assets/items/{}_lg.png".format(item_id))
@app.template_filter("player_name")
def player_name(account_id):
try:
return Player.get(Player.account_id == account_id).personaname
except DoesNotExist:
return account_id
|
gpl-3.0
| 3,359,745,822,291,278,000 | 25.482143 | 92 | 0.696561 | false | 3.26652 | false | false | false |
ktan2020/legacy-automation
|
win/Lib/site-packages/wx-3.0-msw/wx/lib/flashwin_old.py
|
1
|
19484
|
#----------------------------------------------------------------------
# Name: wx.lib.flashwin
# Purpose: A class that allows the use of the Shockwave Flash
# ActiveX control
#
# Author: Robin Dunn
#
# Created: 22-March-2004
# RCS-ID: $Id: flashwin.py 26301 2004-03-23 05:29:50Z RD $
# Copyright: (c) 2004 by Total Control Software
# Licence: wxWindows license
#----------------------------------------------------------------------
# This module was generated by the wx.activex.GernerateAXModule class
# (See also the genaxmodule script.)
import wx
import wx.activex
clsID = '{D27CDB6E-AE6D-11CF-96B8-444553540000}'
progID = 'ShockwaveFlash.ShockwaveFlash.1'
# Create eventTypes and event binders
wxEVT_ReadyStateChange = wx.activex.RegisterActiveXEvent('OnReadyStateChange')
wxEVT_Progress = wx.activex.RegisterActiveXEvent('OnProgress')
wxEVT_FSCommand = wx.activex.RegisterActiveXEvent('FSCommand')
EVT_ReadyStateChange = wx.PyEventBinder(wxEVT_ReadyStateChange, 1)
EVT_Progress = wx.PyEventBinder(wxEVT_Progress, 1)
EVT_FSCommand = wx.PyEventBinder(wxEVT_FSCommand, 1)
# Derive a new class from ActiveXWindow
class FlashWindow(wx.activex.ActiveXWindow):
def __init__(self, parent, ID=-1, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=0, name='FlashWindow'):
wx.activex.ActiveXWindow.__init__(self, parent,
wx.activex.CLSID('{D27CDB6E-AE6D-11CF-96B8-444553540000}'),
ID, pos, size, style, name)
# Methods exported by the ActiveX object
def QueryInterface(self, riid):
return self.CallAXMethod('QueryInterface', riid)
def AddRef(self):
return self.CallAXMethod('AddRef')
def Release(self):
return self.CallAXMethod('Release')
def GetTypeInfoCount(self):
return self.CallAXMethod('GetTypeInfoCount')
def GetTypeInfo(self, itinfo, lcid):
return self.CallAXMethod('GetTypeInfo', itinfo, lcid)
def GetIDsOfNames(self, riid, rgszNames, cNames, lcid):
return self.CallAXMethod('GetIDsOfNames', riid, rgszNames, cNames, lcid)
def Invoke(self, dispidMember, riid, lcid, wFlags, pdispparams):
return self.CallAXMethod('Invoke', dispidMember, riid, lcid, wFlags, pdispparams)
def SetZoomRect(self, left, top, right, bottom):
return self.CallAXMethod('SetZoomRect', left, top, right, bottom)
def Zoom(self, factor):
return self.CallAXMethod('Zoom', factor)
def Pan(self, x, y, mode):
return self.CallAXMethod('Pan', x, y, mode)
def Play(self):
return self.CallAXMethod('Play')
def Stop(self):
return self.CallAXMethod('Stop')
def Back(self):
return self.CallAXMethod('Back')
def Forward(self):
return self.CallAXMethod('Forward')
def Rewind(self):
return self.CallAXMethod('Rewind')
def StopPlay(self):
return self.CallAXMethod('StopPlay')
def GotoFrame(self, FrameNum):
return self.CallAXMethod('GotoFrame', FrameNum)
def CurrentFrame(self):
return self.CallAXMethod('CurrentFrame')
def IsPlaying(self):
return self.CallAXMethod('IsPlaying')
def PercentLoaded(self):
return self.CallAXMethod('PercentLoaded')
def FrameLoaded(self, FrameNum):
return self.CallAXMethod('FrameLoaded', FrameNum)
def FlashVersion(self):
return self.CallAXMethod('FlashVersion')
def LoadMovie(self, layer, url):
return self.CallAXMethod('LoadMovie', layer, url)
def TGotoFrame(self, target, FrameNum):
return self.CallAXMethod('TGotoFrame', target, FrameNum)
def TGotoLabel(self, target, label):
return self.CallAXMethod('TGotoLabel', target, label)
def TCurrentFrame(self, target):
return self.CallAXMethod('TCurrentFrame', target)
def TCurrentLabel(self, target):
return self.CallAXMethod('TCurrentLabel', target)
def TPlay(self, target):
return self.CallAXMethod('TPlay', target)
def TStopPlay(self, target):
return self.CallAXMethod('TStopPlay', target)
def SetVariable(self, name, value):
return self.CallAXMethod('SetVariable', name, value)
def GetVariable(self, name):
return self.CallAXMethod('GetVariable', name)
def TSetProperty(self, target, property, value):
return self.CallAXMethod('TSetProperty', target, property, value)
def TGetProperty(self, target, property):
return self.CallAXMethod('TGetProperty', target, property)
def TCallFrame(self, target, FrameNum):
return self.CallAXMethod('TCallFrame', target, FrameNum)
def TCallLabel(self, target, label):
return self.CallAXMethod('TCallLabel', target, label)
def TSetPropertyNum(self, target, property, value):
return self.CallAXMethod('TSetPropertyNum', target, property, value)
def TGetPropertyNum(self, target, property):
return self.CallAXMethod('TGetPropertyNum', target, property)
def TGetPropertyAsNumber(self, target, property):
return self.CallAXMethod('TGetPropertyAsNumber', target, property)
# Getters, Setters and properties
def _get_ReadyState(self):
return self.GetAXProp('ReadyState')
readystate = property(_get_ReadyState, None)
def _get_TotalFrames(self):
return self.GetAXProp('TotalFrames')
totalframes = property(_get_TotalFrames, None)
def _get_Playing(self):
return self.GetAXProp('Playing')
def _set_Playing(self, Playing):
self.SetAXProp('Playing', Playing)
playing = property(_get_Playing, _set_Playing)
def _get_Quality(self):
return self.GetAXProp('Quality')
def _set_Quality(self, Quality):
self.SetAXProp('Quality', Quality)
quality = property(_get_Quality, _set_Quality)
def _get_ScaleMode(self):
return self.GetAXProp('ScaleMode')
def _set_ScaleMode(self, ScaleMode):
self.SetAXProp('ScaleMode', ScaleMode)
scalemode = property(_get_ScaleMode, _set_ScaleMode)
def _get_AlignMode(self):
return self.GetAXProp('AlignMode')
def _set_AlignMode(self, AlignMode):
self.SetAXProp('AlignMode', AlignMode)
alignmode = property(_get_AlignMode, _set_AlignMode)
def _get_BackgroundColor(self):
return self.GetAXProp('BackgroundColor')
def _set_BackgroundColor(self, BackgroundColor):
self.SetAXProp('BackgroundColor', BackgroundColor)
backgroundcolor = property(_get_BackgroundColor, _set_BackgroundColor)
def _get_Loop(self):
return self.GetAXProp('Loop')
def _set_Loop(self, Loop):
self.SetAXProp('Loop', Loop)
loop = property(_get_Loop, _set_Loop)
def _get_Movie(self):
return self.GetAXProp('Movie')
def _set_Movie(self, Movie):
self.SetAXProp('Movie', Movie)
movie = property(_get_Movie, _set_Movie)
def _get_FrameNum(self):
return self.GetAXProp('FrameNum')
def _set_FrameNum(self, FrameNum):
self.SetAXProp('FrameNum', FrameNum)
framenum = property(_get_FrameNum, _set_FrameNum)
def _get_WMode(self):
return self.GetAXProp('WMode')
def _set_WMode(self, WMode):
self.SetAXProp('WMode', WMode)
wmode = property(_get_WMode, _set_WMode)
def _get_SAlign(self):
return self.GetAXProp('SAlign')
def _set_SAlign(self, SAlign):
self.SetAXProp('SAlign', SAlign)
salign = property(_get_SAlign, _set_SAlign)
def _get_Menu(self):
return self.GetAXProp('Menu')
def _set_Menu(self, Menu):
self.SetAXProp('Menu', Menu)
menu = property(_get_Menu, _set_Menu)
def _get_Base(self):
return self.GetAXProp('Base')
def _set_Base(self, Base):
self.SetAXProp('Base', Base)
base = property(_get_Base, _set_Base)
def _get_Scale(self):
return self.GetAXProp('Scale')
def _set_Scale(self, Scale):
self.SetAXProp('Scale', Scale)
scale = property(_get_Scale, _set_Scale)
def _get_DeviceFont(self):
return self.GetAXProp('DeviceFont')
def _set_DeviceFont(self, DeviceFont):
self.SetAXProp('DeviceFont', DeviceFont)
devicefont = property(_get_DeviceFont, _set_DeviceFont)
def _get_EmbedMovie(self):
return self.GetAXProp('EmbedMovie')
def _set_EmbedMovie(self, EmbedMovie):
self.SetAXProp('EmbedMovie', EmbedMovie)
embedmovie = property(_get_EmbedMovie, _set_EmbedMovie)
def _get_BGColor(self):
return self.GetAXProp('BGColor')
def _set_BGColor(self, BGColor):
self.SetAXProp('BGColor', BGColor)
bgcolor = property(_get_BGColor, _set_BGColor)
def _get_Quality2(self):
return self.GetAXProp('Quality2')
def _set_Quality2(self, Quality2):
self.SetAXProp('Quality2', Quality2)
quality2 = property(_get_Quality2, _set_Quality2)
def _get_SWRemote(self):
return self.GetAXProp('SWRemote')
def _set_SWRemote(self, SWRemote):
self.SetAXProp('SWRemote', SWRemote)
swremote = property(_get_SWRemote, _set_SWRemote)
def _get_FlashVars(self):
return self.GetAXProp('FlashVars')
def _set_FlashVars(self, FlashVars):
self.SetAXProp('FlashVars', FlashVars)
flashvars = property(_get_FlashVars, _set_FlashVars)
def _get_AllowScriptAccess(self):
return self.GetAXProp('AllowScriptAccess')
def _set_AllowScriptAccess(self, AllowScriptAccess):
self.SetAXProp('AllowScriptAccess', AllowScriptAccess)
allowscriptaccess = property(_get_AllowScriptAccess, _set_AllowScriptAccess)
def _get_MovieData(self):
return self.GetAXProp('MovieData')
def _set_MovieData(self, MovieData):
self.SetAXProp('MovieData', MovieData)
moviedata = property(_get_MovieData, _set_MovieData)
# PROPERTIES
# --------------------
# readystate
# type:int arg:VT_EMPTY canGet:True canSet:False
#
# totalframes
# type:int arg:VT_EMPTY canGet:True canSet:False
#
# playing
# type:bool arg:bool canGet:True canSet:True
#
# quality
# type:int arg:int canGet:True canSet:True
#
# scalemode
# type:int arg:int canGet:True canSet:True
#
# alignmode
# type:int arg:int canGet:True canSet:True
#
# backgroundcolor
# type:int arg:int canGet:True canSet:True
#
# loop
# type:bool arg:bool canGet:True canSet:True
#
# movie
# type:string arg:string canGet:True canSet:True
#
# framenum
# type:int arg:int canGet:True canSet:True
#
# wmode
# type:string arg:string canGet:True canSet:True
#
# salign
# type:string arg:string canGet:True canSet:True
#
# menu
# type:bool arg:bool canGet:True canSet:True
#
# base
# type:string arg:string canGet:True canSet:True
#
# scale
# type:string arg:string canGet:True canSet:True
#
# devicefont
# type:bool arg:bool canGet:True canSet:True
#
# embedmovie
# type:bool arg:bool canGet:True canSet:True
#
# bgcolor
# type:string arg:string canGet:True canSet:True
#
# quality2
# type:string arg:string canGet:True canSet:True
#
# swremote
# type:string arg:string canGet:True canSet:True
#
# flashvars
# type:string arg:string canGet:True canSet:True
#
# allowscriptaccess
# type:string arg:string canGet:True canSet:True
#
# moviedata
# type:string arg:string canGet:True canSet:True
#
#
#
#
# METHODS
# --------------------
# QueryInterface
# retType: VT_VOID
# params:
# riid
# in:True out:False optional:False type:unsupported type 29
# ppvObj
# in:False out:True optional:False type:unsupported type 26
#
# AddRef
# retType: int
#
# Release
# retType: int
#
# GetTypeInfoCount
# retType: VT_VOID
# params:
# pctinfo
# in:False out:True optional:False type:int
#
# GetTypeInfo
# retType: VT_VOID
# params:
# itinfo
# in:True out:False optional:False type:int
# lcid
# in:True out:False optional:False type:int
# pptinfo
# in:False out:True optional:False type:unsupported type 26
#
# GetIDsOfNames
# retType: VT_VOID
# params:
# riid
# in:True out:False optional:False type:unsupported type 29
# rgszNames
# in:True out:False optional:False type:unsupported type 26
# cNames
# in:True out:False optional:False type:int
# lcid
# in:True out:False optional:False type:int
# rgdispid
# in:False out:True optional:False type:int
#
# Invoke
# retType: VT_VOID
# params:
# dispidMember
# in:True out:False optional:False type:int
# riid
# in:True out:False optional:False type:unsupported type 29
# lcid
# in:True out:False optional:False type:int
# wFlags
# in:True out:False optional:False type:int
# pdispparams
# in:True out:False optional:False type:unsupported type 29
# pvarResult
# in:False out:True optional:False type:VT_VARIANT
# pexcepinfo
# in:False out:True optional:False type:unsupported type 29
# puArgErr
# in:False out:True optional:False type:int
#
# SetZoomRect
# retType: VT_VOID
# params:
# left
# in:True out:False optional:False type:int
# top
# in:True out:False optional:False type:int
# right
# in:True out:False optional:False type:int
# bottom
# in:True out:False optional:False type:int
#
# Zoom
# retType: VT_VOID
# params:
# factor
# in:True out:False optional:False type:int
#
# Pan
# retType: VT_VOID
# params:
# x
# in:True out:False optional:False type:int
# y
# in:True out:False optional:False type:int
# mode
# in:True out:False optional:False type:int
#
# Play
# retType: VT_VOID
#
# Stop
# retType: VT_VOID
#
# Back
# retType: VT_VOID
#
# Forward
# retType: VT_VOID
#
# Rewind
# retType: VT_VOID
#
# StopPlay
# retType: VT_VOID
#
# GotoFrame
# retType: VT_VOID
# params:
# FrameNum
# in:True out:False optional:False type:int
#
# CurrentFrame
# retType: int
#
# IsPlaying
# retType: bool
#
# PercentLoaded
# retType: int
#
# FrameLoaded
# retType: bool
# params:
# FrameNum
# in:True out:False optional:False type:int
#
# FlashVersion
# retType: int
#
# LoadMovie
# retType: VT_VOID
# params:
# layer
# in:True out:False optional:False type:int
# url
# in:True out:False optional:False type:string
#
# TGotoFrame
# retType: VT_VOID
# params:
# target
# in:True out:False optional:False type:string
# FrameNum
# in:True out:False optional:False type:int
#
# TGotoLabel
# retType: VT_VOID
# params:
# target
# in:True out:False optional:False type:string
# label
# in:True out:False optional:False type:string
#
# TCurrentFrame
# retType: int
# params:
# target
# in:True out:False optional:False type:string
#
# TCurrentLabel
# retType: string
# params:
# target
# in:True out:False optional:False type:string
#
# TPlay
# retType: VT_VOID
# params:
# target
# in:True out:False optional:False type:string
#
# TStopPlay
# retType: VT_VOID
# params:
# target
# in:True out:False optional:False type:string
#
# SetVariable
# retType: VT_VOID
# params:
# name
# in:True out:False optional:False type:string
# value
# in:True out:False optional:False type:string
#
# GetVariable
# retType: string
# params:
# name
# in:True out:False optional:False type:string
#
# TSetProperty
# retType: VT_VOID
# params:
# target
# in:True out:False optional:False type:string
# property
# in:True out:False optional:False type:int
# value
# in:True out:False optional:False type:string
#
# TGetProperty
# retType: string
# params:
# target
# in:True out:False optional:False type:string
# property
# in:True out:False optional:False type:int
#
# TCallFrame
# retType: VT_VOID
# params:
# target
# in:True out:False optional:False type:string
# FrameNum
# in:True out:False optional:False type:int
#
# TCallLabel
# retType: VT_VOID
# params:
# target
# in:True out:False optional:False type:string
# label
# in:True out:False optional:False type:string
#
# TSetPropertyNum
# retType: VT_VOID
# params:
# target
# in:True out:False optional:False type:string
# property
# in:True out:False optional:False type:int
# value
# in:True out:False optional:False type:double
#
# TGetPropertyNum
# retType: double
# params:
# target
# in:True out:False optional:False type:string
# property
# in:True out:False optional:False type:int
#
# TGetPropertyAsNumber
# retType: double
# params:
# target
# in:True out:False optional:False type:string
# property
# in:True out:False optional:False type:int
#
#
#
#
# EVENTS
# --------------------
# ReadyStateChange
# retType: VT_VOID
# params:
# newState
# in:False out:False optional:False type:int
#
# Progress
# retType: VT_VOID
# params:
# percentDone
# in:False out:False optional:False type:int
#
# FSCommand
# retType: VT_VOID
# params:
# command
# in:True out:False optional:False type:string
# args
# in:True out:False optional:False type:string
#
#
#
#
|
mit
| -5,230,802,995,228,288,000 | 27.883436 | 89 | 0.582991 | false | 3.126444 | false | false | false |
proversity-org/edx-platform
|
cms/djangoapps/contentstore/features/video.py
|
1
|
2082
|
# pylint: disable=missing-docstring
from lettuce import step, world
SELECTORS = {
'spinner': '.video-wrapper .spinner',
'controls': '.video-controls',
}
# We should wait 300 ms for event handler invocation + 200ms for safety.
DELAY = 0.5
@step('I have uploaded subtitles "([^"]*)"$')
def i_have_uploaded_subtitles(_step, sub_id):
_step.given('I go to the files and uploads page')
_step.given('I upload the test file "subs_{}.srt.sjson"'.format(sub_id.strip()))
@step('I have created a Video component$')
def i_created_a_video_component(step):
step.given('I am in Studio editing a new unit')
world.create_component_instance(
step=step,
category='video',
)
world.wait_for_xmodule()
world.disable_jquery_animations()
world.wait_for_present('.is-initialized')
world.wait(DELAY)
world.wait_for_invisible(SELECTORS['spinner'])
if not world.youtube.config.get('youtube_api_blocked'):
world.wait_for_visible(SELECTORS['controls'])
@step('I have created a Video component with subtitles$')
def i_created_a_video_with_subs(_step):
_step.given('I have created a Video component with subtitles "tPccVs9bg0c"')
@step('I have created a Video component with subtitles "([^"]*)"$')
def i_created_a_video_with_subs_with_name(_step, sub_id):
_step.given('I have created a Video component')
# Store the current URL so we can return here
video_url = world.browser.url
# Upload subtitles for the video using the upload interface
_step.given('I have uploaded subtitles "{}"'.format(sub_id))
# Return to the video
world.visit(video_url)
world.wait_for_xmodule()
# update .sub filed with proper subs name (which mimics real Studio/XML behavior)
# this is needed only for that videos which are created in acceptance tests.
_step.given('I edit the component')
world.wait_for_ajax_complete()
_step.given('I save changes')
world.disable_jquery_animations()
world.wait_for_present('.is-initialized')
world.wait_for_invisible(SELECTORS['spinner'])
|
agpl-3.0
| -6,644,738,434,728,490,000 | 30.074627 | 85 | 0.686359 | false | 3.565068 | false | false | false |
Mokona/python-p4lib
|
test/mocked/p4lib_describe_test.py
|
1
|
3184
|
import unittest
import p4lib
from mock23 import Mock
from test_utils import change_stdout, test_options, test_raw_result
CHANGE_NUM = 1234
USER = "someuser"
CLIENT = "someclient"
DATE = "2014/11/01"
DESCRIPTION = "Some changelist description"
FILE_0 = "//depot/file.cpp"
REV_0 = 3
ACTION_0 = "edit"
FILE_1 = "//depot/file2.cpp"
REV_1 = 4
ACTION_1 = "edit"
DESCRIBE_OUTPUT_BASE = """Change %i by %s@%s on %s
\t%s
Affected files ...
... %s#%i %s
... %s#%i %s
""" % (CHANGE_NUM, USER, CLIENT, DATE,
DESCRIPTION,
FILE_0, REV_0, ACTION_0,
FILE_1, REV_1, ACTION_1)
DESCRIBE_OUTPUT = DESCRIBE_OUTPUT_BASE + """
"""
DESCRIBE_OUTPUT_LONG = DESCRIBE_OUTPUT_BASE + """
Differences ...
==== //depot/apps/px/ReadMe.txt#5 (text/utf8) ====
DiffLine1
"""
DESCRIBE_OUTPUT_MOVE_DELETE = DESCRIBE_OUTPUT_BASE + """
Moved files ...
... //depot/file1.cpp#1 moved from ... //depot/file2.cpp#1
Differences ...
"""
class DescribeTestCase(unittest.TestCase):
def setUp(self):
p4lib._run = Mock(spec='p4lib._run', return_value=("", "", 0))
def _common_asserts(self, result):
self.assertEqual(CHANGE_NUM, result["change"])
self.assertEqual(DESCRIPTION, result["description"])
self.assertEqual(USER, result["user"])
self.assertEqual(CLIENT, result["client"])
self.assertIn("files", result)
files = result["files"]
self.assertEqual(2, len(files))
file_0 = files[0]
self.assertEqual(FILE_0, file_0["depotFile"])
self.assertEqual(REV_0, file_0["rev"])
self.assertEqual(ACTION_0, file_0["action"])
file_1 = files[1]
self.assertEqual(FILE_1, file_1["depotFile"])
self.assertEqual(REV_1, file_1["rev"])
self.assertEqual(ACTION_1, file_1["action"])
def test_with_change_short_form(self):
change_stdout(DESCRIBE_OUTPUT)
p4 = p4lib.P4()
result = p4.describe(change=CHANGE_NUM, shortForm=True)
p4lib._run.assert_called_with(['p4', 'describe', '-s', '1234'])
self._common_asserts(result)
self.assertNotIn("diff", result)
def test_with_change_long_form(self):
change_stdout(DESCRIBE_OUTPUT_LONG)
p4 = p4lib.P4()
result = p4.describe(change=CHANGE_NUM)
p4lib._run.assert_called_with(['p4', 'describe', '1234'])
self._common_asserts(result)
self.assertIn("diff", result)
def test_with_change_long_form_with_move_delete(self):
change_stdout(DESCRIBE_OUTPUT_MOVE_DELETE)
p4 = p4lib.P4()
result = p4.describe(change=CHANGE_NUM)
p4lib._run.assert_called_with(['p4', 'describe', '1234'])
self._common_asserts(result)
self.assertIn("diff", result)
def test_raw_result(self):
test_raw_result(self, DESCRIBE_OUTPUT_LONG, "describe",
change=CHANGE_NUM)
p4 = p4lib.P4()
result = p4.describe(change=CHANGE_NUM)
self._common_asserts(result)
def test_with_options(self):
change_stdout(DESCRIBE_OUTPUT_LONG)
test_options(self, "describe", change=CHANGE_NUM,
expected=["describe", "1234"])
|
mit
| -1,705,442,480,516,141,300 | 24.886179 | 71 | 0.611809 | false | 3.177645 | true | false | false |
phillynch7/sportsref
|
sportsref/nba/seasons.py
|
1
|
9902
|
from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import range, zip
from past.utils import old_div
import urllib.parse
import future
import future.utils
import numpy as np
import pandas as pd
from pyquery import PyQuery as pq
import sportsref
class Season(future.utils.with_metaclass(sportsref.decorators.Cached, object)):
"""Object representing a given NBA season."""
def __init__(self, year):
"""Initializes a Season object for an NBA season.
:year: The year of the season we want.
"""
self.yr = int(year)
def __eq__(self, other):
return (self.yr == other.yr)
def __hash__(self):
return hash(self.yr)
def __repr__(self):
return 'Season({})'.format(self.yr)
def _subpage_url(self, page):
return (sportsref.nba.BASE_URL +
'/leagues/NBA_{}_{}.html'.format(self.yr, page))
@sportsref.decorators.memoize
def get_main_doc(self):
"""Returns PyQuery object for the main season URL.
:returns: PyQuery object.
"""
url = (sportsref.nba.BASE_URL +
'/leagues/NBA_{}.html'.format(self.yr))
return pq(sportsref.utils.get_html(url))
@sportsref.decorators.memoize
def get_sub_doc(self, subpage):
"""Returns PyQuery object for a given subpage URL.
:subpage: The subpage of the season, e.g. 'per_game'.
:returns: PyQuery object.
"""
html = sportsref.utils.get_html(self._subpage_url(subpage))
return pq(html)
@sportsref.decorators.memoize
def get_team_ids(self):
"""Returns a list of the team IDs for the given year.
:returns: List of team IDs.
"""
df = self.team_stats_per_game()
if not df.empty:
return df.index.tolist()
else:
print('ERROR: no teams found')
return []
@sportsref.decorators.memoize
def team_ids_to_names(self):
"""Mapping from 3-letter team IDs to full team names.
:returns: Dictionary with team IDs as keys and full team strings as
values.
"""
doc = self.get_main_doc()
table = doc('table#team-stats-per_game')
flattened = sportsref.utils.parse_table(table, flatten=True)
unflattened = sportsref.utils.parse_table(table, flatten=False)
team_ids = flattened['team_id']
team_names = unflattened['team_name']
if len(team_names) != len(team_ids):
raise Exception("team names and team IDs don't align")
return dict(zip(team_ids, team_names))
@sportsref.decorators.memoize
def team_names_to_ids(self):
"""Mapping from full team names to 3-letter team IDs.
:returns: Dictionary with tean names as keys and team IDs as values.
"""
d = self.team_ids_to_names()
return {v: k for k, v in d.items()}
@sportsref.decorators.memoize
@sportsref.decorators.kind_rpb(include_type=True)
def schedule(self, kind='R'):
"""Returns a list of BoxScore IDs for every game in the season.
Only needs to handle 'R' or 'P' options because decorator handles 'B'.
:param kind: 'R' for regular season, 'P' for playoffs, 'B' for both.
Defaults to 'R'.
:returns: DataFrame of schedule information.
:rtype: pd.DataFrame
"""
kind = kind.upper()[0]
dfs = []
# get games from each month
for month in ('october', 'november', 'december', 'january', 'february',
'march', 'april', 'may', 'june'):
try:
doc = self.get_sub_doc('games-{}'.format(month))
except ValueError:
continue
table = doc('table#schedule')
df = sportsref.utils.parse_table(table)
dfs.append(df)
df = pd.concat(dfs).reset_index(drop=True)
# figure out how many regular season games
try:
sportsref.utils.get_html('{}/playoffs/NBA_{}.html'.format(
sportsref.nba.BASE_URL, self.yr)
)
is_past_season = True
except ValueError:
is_past_season = False
if is_past_season:
team_per_game = self.team_stats_per_game()
n_reg_games = int(team_per_game.g.sum() // 2)
else:
n_reg_games = len(df)
# subset appropriately based on `kind`
if kind == 'P':
return df.iloc[n_reg_games:]
else:
return df.iloc[:n_reg_games]
def finals_winner(self):
"""Returns the team ID for the winner of that year's NBA Finals.
:returns: 3-letter team ID for champ.
"""
raise NotImplementedError('nba.Season.finals_winner')
def finals_loser(self):
"""Returns the team ID for the loser of that year's NBA Finals.
:returns: 3-letter team ID for runner-up.
"""
raise NotImplementedError('nba.Season.finals_loser')
def standings(self):
"""Returns a DataFrame containing standings information."""
doc = self.get_sub_doc('standings')
east_table = doc('table#divs_standings_E')
east_df = pd.DataFrame(sportsref.utils.parse_table(east_table))
east_df.sort_values('wins', ascending=False, inplace=True)
east_df['seed'] = range(1, len(east_df) + 1)
east_df['conference'] = 'E'
west_table = doc('table#divs_standings_W')
west_df = sportsref.utils.parse_table(west_table)
west_df.sort_values('wins', ascending=False, inplace=True)
west_df['seed'] = range(1, len(west_df) + 1)
west_df['conference'] = 'W'
full_df = pd.concat([east_df, west_df], axis=0).reset_index(drop=True)
full_df['team_id'] = full_df.team_id.str.extract(r'(\w+)\W*\(\d+\)', expand=False)
full_df['gb'] = [gb if isinstance(gb, int) or isinstance(gb, float) else 0
for gb in full_df['gb']]
full_df = full_df.drop('has_class_full_table', axis=1)
expanded_table = doc('table#expanded_standings')
expanded_df = sportsref.utils.parse_table(expanded_table)
full_df = pd.merge(full_df, expanded_df, on='team_id')
return full_df
@sportsref.decorators.memoize
def _get_team_stats_table(self, selector):
"""Helper function for stats tables on season pages. Returns a
DataFrame."""
doc = self.get_main_doc()
table = doc(selector)
df = sportsref.utils.parse_table(table)
df.set_index('team_id', inplace=True)
return df
def team_stats_per_game(self):
"""Returns a Pandas DataFrame of each team's basic per-game stats for
the season."""
return self._get_team_stats_table('table#team-stats-per_game')
def opp_stats_per_game(self):
"""Returns a Pandas DataFrame of each team's opponent's basic per-game
stats for the season."""
return self._get_team_stats_table('table#opponent-stats-per_game')
def team_stats_totals(self):
"""Returns a Pandas DataFrame of each team's basic stat totals for the
season."""
return self._get_team_stats_table('table#team-stats-base')
def opp_stats_totals(self):
"""Returns a Pandas DataFrame of each team's opponent's basic stat
totals for the season."""
return self._get_team_stats_table('table#opponent-stats-base')
def misc_stats(self):
"""Returns a Pandas DataFrame of miscellaneous stats about each team's
season."""
return self._get_team_stats_table('table#misc_stats')
def team_stats_shooting(self):
"""Returns a Pandas DataFrame of each team's shooting stats for the
season."""
return self._get_team_stats_table('table#team_shooting')
def opp_stats_shooting(self):
"""Returns a Pandas DataFrame of each team's opponent's shooting stats
for the season."""
return self._get_team_stats_table('table#opponent_shooting')
@sportsref.decorators.memoize
def _get_player_stats_table(self, identifier):
"""Helper function for player season stats.
:identifier: string identifying the type of stat, e.g. 'per_game'.
:returns: A DataFrame of stats.
"""
doc = self.get_sub_doc(identifier)
table = doc('table#{}_stats'.format(identifier))
df = sportsref.utils.parse_table(table)
return df
def player_stats_per_game(self):
"""Returns a DataFrame of per-game player stats for a season."""
return self._get_player_stats_table('per_game')
def player_stats_totals(self):
"""Returns a DataFrame of player stat totals for a season."""
return self._get_player_stats_table('totals')
def player_stats_per36(self):
"""Returns a DataFrame of player per-36 min stats for a season."""
return self._get_player_stats_table('per_minute')
def player_stats_per100(self):
"""Returns a DataFrame of player per-100 poss stats for a season."""
return self._get_player_stats_table('per_poss')
def player_stats_advanced(self):
"""Returns a DataFrame of player per-100 poss stats for a season."""
return self._get_player_stats_table('advanced')
def mvp_voting(self):
"""Returns a DataFrame containing information about MVP voting."""
raise NotImplementedError('nba.Season.mvp_voting')
def roy_voting(self):
"""Returns a DataFrame containing information about ROY voting."""
url = '{}/awards/awards_{}.html'.format(sportsref.nba.BASE_URL, self.yr)
doc = pq(sportsref.utils.get_html(url))
table = doc('table#roy')
df = sportsref.utils.parse_table(table)
return df
|
gpl-3.0
| 5,210,937,238,460,397,000 | 35.538745 | 90 | 0.608362 | false | 3.685151 | false | false | false |
Andr3iC/courtlistener
|
cl/simple_pages/urls.py
|
1
|
4018
|
from django.conf.urls import url
from django.views.generic import RedirectView
from cl.simple_pages.sitemap import sitemap_maker
from cl.simple_pages.views import (
tools_page, validate_for_google, validate_for_google2, validate_for_wot,
validate_for_bing, robots, advanced_search, contact_thanks, contact, feeds,
coverage_graph, faq, about, browser_warning, serve_static_file, old_terms,
latest_terms, contribute, markdown_help, humans,
)
mime_types = ('pdf', 'wpd', 'txt', 'doc', 'html', 'mp3')
urlpatterns = [
# Footer stuff
url(r'^about/$', about, name='about'),
url(r'^faq/$', faq, name="faq"),
url(r'^coverage/$', coverage_graph, name='coverage'),
url(r'^feeds/$', feeds, name='feeds_info'),
url(r'^contribute/$', contribute, name='contribute'),
url(r'^contact/$', contact, name="contact"),
url(r'^contact/thanks/$', contact_thanks, name='contact_thanks'),
url(r'^help/markdown/$', markdown_help, name="markdown_help"),
# Serve a static file
url(r'^(?P<file_path>(?:' + "|".join(mime_types) + ')/.*)$',
serve_static_file),
# Advanced search page
url(
r'^search/advanced-techniques/$',
advanced_search,
name='advanced_search'
),
url(r'^terms/v/(\d{1,2})/$', old_terms, name='old_terms'),
url(r'^terms/$', latest_terms, name='terms'),
# Randoms
url(
r'^tools/$',
tools_page,
name='tools',
),
url(
r'^bad-browser/$',
browser_warning,
name='bad_browser',
),
# Robots & Humans
url(
r'^robots\.txt$',
robots,
name='robots'
),
url(
r'^humans\.txt$',
humans,
name='humans',
),
# Sitemap:
url(r'^sitemap-simple-pages\.xml$', sitemap_maker),
# SEO-related stuff
url(r'^BingSiteAuth.xml$', validate_for_bing),
url(r'^googleef3d845637ccb353.html$', validate_for_google),
url(r'^google646349975c2495b6.html$', validate_for_google2),
url(r'^mywot8f5568174e171ff0acff.html$', validate_for_wot),
# Favicon, touch icons, etc.
url(r'^favicon\.ico$',
RedirectView.as_view(
url='/static/ico/favicon.ico',
permanent=True)),
url(r'^touch-icon-192x192\.png',
RedirectView.as_view(
url='/static/png/touch-icon-192x192.png',
permanent=True)),
url(r'^apple-touch-icon\.png$',
RedirectView.as_view(
url='/static/png/apple-touch-icon.png',
permanent=True)),
url(r'^apple-touch-icon-72x72-precomposed\.png$',
RedirectView.as_view(
url='/static/png/apple-touch-icon-72x72-precomposed.png',
permanent=True)),
url(r'^apple-touch-icon-76x76-precomposed\.png$',
RedirectView.as_view(
url='/static/png/apple-touch-icon-76x76-precomposed.png',
permanent=True)),
url(r'^apple-touch-icon-114x114-precomposed\.png$',
RedirectView.as_view(
url='/static/png/apple-touch-icon-114x114-precomposed.png',
permanent=True)),
url(r'^apple-touch-icon-120x120-precomposed\.png$',
RedirectView.as_view(
url='/static/png/apple-touch-icon-120x120-precomposed.png',
permanent=True)),
url(r'^apple-touch-icon-144x144-precomposed\.png$',
RedirectView.as_view(
url='/static/png/apple-touch-icon-144x144-precomposed.png',
permanent=True)),
url(r'^apple-touch-icon-152x152-precomposed\.png$',
RedirectView.as_view(
url='/static/png/apple-touch-icon-152x152-precomposed.png',
permanent=True)),
url(r'^apple-touch-icon-180x180-precomposed\.png$',
RedirectView.as_view(
url='/static/png/apple-touch-icon-180x180-precomposed.png',
permanent=True)),
url(r'^apple-touch-icon-precomposed\.png$',
RedirectView.as_view(
url='/static/png/apple-touch-icon-precomposed.png',
permanent=True)),
]
|
agpl-3.0
| 1,652,597,468,934,639,900 | 33.050847 | 79 | 0.599552 | false | 3.285364 | false | true | false |
jdahlin/d-feet
|
dfeet/_ui/busnamebox.py
|
1
|
2042
|
import gobject
import gtk
from dfeet.dbus_introspector import BusWatch
from busnameview import BusNameView
class BusNameBox(gtk.VBox):
__gsignals__ = {
'busname-selected' : (gobject.SIGNAL_RUN_LAST, gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT,))
}
def __init__(self, watch):
super(BusNameBox, self).__init__()
self.tree_view = BusNameView(watch)
self.tree_view.connect('cursor_changed', self.busname_selected_cb)
scroll = gtk.ScrolledWindow()
scroll.add(self.tree_view)
self.pack_start(scroll, True, True)
scroll.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.show_all()
def _completion_match_func(self, completion, key, iter):
print completion, key, iter
return self.tree_view._is_iter_equal(completion.get_model(),
iter, key)
def get_selected_busname(self):
(model, iter) = self.tree_view.get_selection().get_selected()
if not iter:
return None
busname = model.get_value(iter, BusWatch.BUSNAME_OBJ_COL)
return busname
def busname_selected_cb(self, treeview):
busname = self.get_selected_busname()
self.emit('busname-selected', busname)
def set_filter_string(self, value):
self.tree_view.set_filter_string(value)
self.tree_view.refilter()
def set_hide_private(self, hide_private):
self.tree_view.set_hide_private(hide_private)
self.tree_view.refilter()
def set_sort_col(self, value):
if value == 'Common Name':
col = BusWatch.COMMON_NAME_COL
elif value == 'Unique Name':
col = BusWatch.UNIQUE_NAME_COL
elif value == 'Process Name':
col = BusWatch.PROCESS_NAME_COL
else:
raise Exception('Value "' + value + '" is not a valid sort value')
self.tree_view.set_sort_column(col)
#self.tree_view.sort_column_changed()
|
gpl-2.0
| -5,044,364,324,568,237,000 | 30.415385 | 78 | 0.599902 | false | 3.666068 | false | false | false |
JuanbingTeam/djangobbs
|
djangobbs/accounts/models.py
|
1
|
6806
|
#!/usr/bin/env python
#coding=utf-8
from django.db import models
from django.contrib.auth.models import User
from django.contrib import admin
from django.utils.translation import ugettext as _T
from djangobbs.addresses.models import Person
from djangobbs.accounts.config import LOGO_FOLDER
from cPickle import dumps
class UserProfile(models.Model):
# 默认关联django.contrib.auth.models.User. 这是UserProfile的标准用法
user = models.ForeignKey(User, unique=True)
# 用户名,由于User本身的用户名不允许使用中文,所以改用该名作为用户的真正登陆名称
nickname = models.CharField(max_length=200, unique=True, db_index=True, blank=False)
# 用户的头像。保存在LOGO_FOLDER目录下
logo = models.FileField(upload_to=LOGO_FOLDER, blank=True, default="")
# 用户的私人信息,用户可以可以不填。
personal_data = models.ForeignKey(Person, null=True, db_index=True, blank=True, default="")
# 用户的附加
extradata = models.ManyToManyField('accounts.ExtraProfileEntry', through='ExtraUserData')
def __unicode__(self):
return self.nickname
admin.site.register(UserProfile)
class ExtraUserData(models.Model):
"""此表真正保存用户的附加数据"""
# 对应该项的用户
user = models.ForeignKey(UserProfile)
# 对应的项
entry = models.ForeignKey('accounts.ExtraProfileEntry')
# 记录的内容
content = models.TextField(blank=True, default="")
# 记录的时间
time = models.DateTimeField(auto_now=True)
def __unicode__(self):
return unicode(self.user) + u'.' + unicode(self.entry) + u'@' + unicode(self.time) + u'.' + self.content
admin.site.register(ExtraUserData)
EXTERNAL_ENTRY_COLLECT_SOURCE = (
('U', _T('By User')), # 由用户填写
('?', _T('Undefined')), # 系统保留。由各个应用自行决定用法
('M', _T('request.META')), # 从request.META读出
('G', _T('request.GET')), # 从request.GET读出
('P', _T('request.POST')), # 从request.POST读出
('R', _T('request.REQUEST')),# 从request.REQUEST读出
('C', _T('request.COOKIE')), # 从request.COOKIES读出
('s', _T('request.session')),# 从request.session读出
('F', _T('request.FILES')), # 从request.FILES读出
)
EXTERNAL_ENTRY_COLLECT_TIME = (
('R', _T('At register')), # 注册时要求填写, 注册后用户不可更改
('M', _T('Manual')), # 注册时填写, 注册后可以由用户手动更改
('I', _T('At login')), # 登陆时自动记录,
('O', _T('At logout')), # 登出时自动记录,
('A', _T('At all request')), # 每次请求都记录,
('?', _T('Undefined')), # 系统保留。由各个应用自行决定用法
)
class ExtraProfileEntry(models.Model):
"""This model records all extra user information required by the bbs system."""
# 给该项取的一个名称。比如'login IP', 'access time'
name = models.CharField(max_length=100, unique=True, db_index=True)
# 取得数据的方式,可以从request里自动获取,或者由用户提供。默认由用户手动提供。参考EXTERNAL_ENTRY_COLLECT_TIME的注释
source = models.CharField(max_length=1, default='U', choices=EXTERNAL_ENTRY_COLLECT_SOURCE)
# 取得数据的时机。是每次登录记录,还是每次请求都记录,还是别的什么,参考EXTERNAL_ENTRY_COLLECT_TIME的注释
time = models.CharField(max_length=1, default='M', choices=EXTERNAL_ENTRY_COLLECT_TIME)
# 用于验证数据,具体还没想好怎么个用法。可能是正则表达式?或者别的什么。
type = models.TextField(blank=True, default='')
# 允许重复出现的次数,默认每个用户每个项目只记录一次。
dupli = models.PositiveIntegerField(null=True, default=1)
# 自动从request字典里读取时对应的关键字,如果是*则将整个字典的内容都记录下来(使用pickle序列化后保存)
keyword = models.TextField(blank=True, default="")
def __unicode__(self):
return self.name
def push(self, user, data):
"""保存额外的用户数据"""
record = ExtraUserData()
record.user = user
record.entry = self
record.content = data
record.save()
if self.dupli != None:
objs = ExtraUserData.objects.filter(user=user).filter(entry=self)
if objs.count() > self.dupli:
obj = objs.order_by('time')[0] # order by time. the 1st is the oldest record.
obj.delete()
def get_request_data(self, request):
"""该函数从request里取出需要保存的数据"""
dict = None
if self.source == 'M':
dict = request.META
elif self.source == 'G':
dict = request.GET
elif self.source == 'P':
dict = request.POST
elif self.source == 'R':
dict = request.REQUEST
elif self.source == 'C':
dict = request.COOKIE
elif self.source == 's':
dict = request.session
elif self.source == 'F':
dict = request.FILES
else:
dict = None
if dict != None:
if self.keyword == '*':
return dumps(dict)
elif dict.has_key(self.keyword):
return dict.get(self.keyword)
return ""
admin.site.register(ExtraProfileEntry)
"""以下是初始化数据内容的"""
try:
superuser = User.objects.get(id=1)
UserProfile.objects.get(user=superuser)
except UserProfile.DoesNotExist:
"""以下代码试图为第一个超级用户初始化一个UserProfile"""
profile = UserProfile()
profile.user = superuser
profile.nickname = superuser.username
profile.personal_data = None
profile.save()
except Exception, error:
pass
try:
if ExtraProfileEntry.objects.all().count() == 0:
"""以下代码使得系统默认记录用户登录的最后10个IP,在models被import时执行。"""
entry = ExtraProfileEntry()
entry.name = 'login IP' # 记录login的IP
entry.source = 'M' # 从request.META读出
entry.time = 'I' # 每次login时记录
entry.type = "IPAddressField"
entry.dupli = 10 # 记录最近10次的IP
entry.keyword = 'REMOTE_ADDR' # 记录request.META['REMOTE_ADDR']
entry.save()
except Exception, error:
pass
|
apache-2.0
| 233,650,022,630,310,980 | 31.511628 | 112 | 0.596808 | false | 2.718868 | false | false | false |
csningli/MultiAgent
|
examples/simple_move/simple_move_sim.py
|
1
|
2835
|
# MultiAgent
# (c) 2017-2019, NiL, [email protected]
import sys, random, datetime, math
random.seed(datetime.datetime.now())
sys.path.append("../..")
from mas.multiagent import *
AREA_SIZE = 200
POS_ERROR = 5
MIN_SPEED = 100
MAX_SPEED = 500
class TargetModule(Module) :
def __init__(self) :
super(TargetModule, self).__init__()
self.index = 0
self.targets = [
(100, 0), (100, 100),
(0, 100), (-100, 100),
(-100, 0), (-100, -100),
(0, -100), (100, -100),
]
def process(self) :
pos = self.mem.read("pos", None)
target = self.mem.read("target", None)
if pos is not None and (target is None or ppdist_l2(target, pos) <= POS_ERROR) :
self.mem.reg("target", self.targets[self.index])
self.index = (self.index + 1) % len(self.targets)
class SimpleMoveModule(ObjectModule) :
def act(self, resp) :
target = self.mem.read("target", None)
pos = self.mem.read("pos", None)
if target is not None and pos is not None:
diff = vec2_sub(target, pos)
resp.add_msg(Message(key = "angle", value = vec2_angle(diff)))
resp.add_msg(Message(key = "vel", value = vec2_min_max(vec2_scale(diff, 3), MIN_SPEED, MAX_SPEED)))
super(SimpleMoveModule, self).act(resp)
class SimpleMoveAgent(Agent) :
def __init__(self, name) :
super(SimpleMoveAgent, self).__init__(name)
self.mods = [SimpleMoveModule(), TargetModule()]
def focus(self) :
focus_info = super(SimpleMoveAgent, self).get_focus()
target = self.mem.read("target", None)
if target is not None :
focus_info["target"] = "(%4.2f, %4.2f)" % (target[0], target[1])
pos = self.mem.read("pos", None)
if pos is not None :
focus_info["pos"] = "(%4.2f, %4.2f)" % (pos[0], pos[1])
return focus_info
def run_sim(filename = None) :
'''
>>> run_sim()
'''
# create the oracle space
oracle = OracleSpace()
# create the context
context = Context(oracle = oracle)
# create the schedule for adding agents in the running
schedule = Schedule()
# add objects and agents to the context
obj = Object(name = "0")
obj.pos = (0, 0)
context.add_obj(obj)
schedule.add_agent(SimpleMoveAgent(name = "0"))
# create the driver
driver = Driver(context = context, schedule = schedule)
# create the inspector
# inspector = Inspector(delay = 10)
# create the simulator
sim = Simulator(driver = driver)
print("Simulating")
sim.simulate(graphics = True, filename = filename)
if __name__ == '__main__' :
filename = None
if (len(sys.argv) > 1) :
filename = sys.argv[1]
run_sim(filename = filename)
|
apache-2.0
| 8,272,226,968,732,636,000 | 24.540541 | 111 | 0.573192 | false | 3.407452 | false | false | false |
azotdata/azot-event-extractor
|
db_mongodb.py
|
1
|
4124
|
# -*- coding: utf-8 -*-
""""
Script which contains all class definition related to COUCHDB database
"""
from manage_db import *
from mongoengine import *
from utils import *
class Connection(ManageConnection):
"""
Manage connection to database
"""
def __init__(self,db_server):
self.db_server = db_server
ManageConnection.__init__(self)
def connect(self):
self.connection(self.db_server)
""" -------------------------------------------- """
""" ----Database connection must be done here--- """
if DB_SERVER=='couchdb':
from db_couchdb import *
elif DB_SERVER=='mongodb':
from db_mongodb import *
connecting = Connection(DB_SERVER)
connecting.connect()
""" -------------------------------------------- """
class Article(Document):
"""
Represents articles stored in DB. Used for both reading and inserting articles datas
"""
meta = {
'strict': False,
'collection': 'articles'
}
num_cluster = IntField()
pub_date = StringField()
source = StringField()
text = StringField()
title = StringField()
@staticmethod
def check_article_url(url):
if not Article.objects(source=url):
return True
def _set_article(self, article):
self.source = article.url
self.title = article.title
self.text = article.text
if article.publish_date:
self.pub_date = str(article.publish_date[0].date())
else: # just in case publishing date cannot be retrieved, stores 'None'
self.pub_date = str(article.publish_date)
def save_article(self,article):
self._set_article(article)
self.save()
@staticmethod
def get_all_articles():
return Article.objects.all()
def update_article(self,cluster_key):
self.update(set__num_cluster=cluster_key)
class Stopword(Document):
"""
Class for storing stopwords objects
"""
meta = {
'collection': 'stopword'
}
lang = StringField(max_length=50)
word = StringField(max_length=50)
@staticmethod
def sw_exist():
if Stopword.objects:
return True
@staticmethod
def set_stopwords():
word_list = stopwords_list(SW_PATH)
sw_list = []
for lang, word in word_list.iteritems():
for each_word in word:
sw_list.append(Stopword(**{"lang": lang, "word": each_word}))
Stopword.objects.insert(sw_list)
@staticmethod
def get_all_stopwords():
return Stopword.objects.all()
class ClusteringResume(Document):
"""
Stores main useful elements for each cluster
"""
meta = {
'collection': 'clustering_resume',
'strict': False
}
_id = IntField()
keywords = DictField()
cluster_title = StringField()
def set_dataclusters(self,cluster_id,keywords,title):
#cluster_list = []
#for i in range(cluster_number):
self._id = cluster_id
self.keywords = keywords
self.cluster_title = title
self.save()
@staticmethod
def remove_cluster_content():
return ClusteringResume.objects.delete()
class ClusteringReport(Document):
"""
Stores details after clustering.
"""
meta = {
'collection': 'clustering_report'
}
date = StringField()
count = IntField()
iterations = IntField()
nb_cluster = IntField()
cluster_list = DictField()
class ClusteringTagged(Document):
"""
Stores important words of a same topic which are generated after manual classification.
"""
meta = {
'collection': 'clustering_tagged'
}
nb_cluster_before = IntField()
nb_cluster_after = IntField()
tags = StringField()
clusters = DictField()
class TopicWord(Document):
"""
Class for storing important words, called topic, per cluster
"""
meta = {
'collection': 'topic_word'
}
word = StringField()
topic = StringField()
count = IntField()
stopword = BooleanField()
to_delete = BooleanField()
|
gpl-3.0
| -8,880,503,633,293,053,000 | 23.843373 | 95 | 0.592871 | false | 4.174089 | false | false | false |
drewcsillag/skunkweb
|
pylibs/pargen/initParGenGrammar.py
|
1
|
1108
|
#
# Copyright (C) 2001 Andrew T. Csillag <[email protected]>
#
# You may distribute under the terms of either the GNU General
# Public License or the SkunkWeb License, as specified in the
# README file.
#
from RuleItems import Rule
import CompileGrammar
ruleSet=[
#Rule("Start", ['S','$'], 0, 'noop'),
Rule('S', ['RuleList'], 0, 'noop'),
Rule('RuleList', ['Rule', 'RuleList'], 1, 'noop'),
Rule('RuleList', [], 2, 'noop'),
Rule('Rule', ['COLON', 'id', 'COLON', 'TokItem', 'COLON', 'TokList'],
3,'RuleLine'),
Rule('TokList', ['Token', 'TokList'], 4, 'TTtoTokList'),
Rule('TokList', ['id', 'TokList'], 5, 'TTtoTokList'),
Rule('TokList', [], 6, 'NullToTokList'),
Rule('TokItem', ['Token',], 7, 'TokenToTokItem'),
Rule('TokItem', ['id',], 8, 'idToTokItem'),
]
print 'Rules'
print '--------------------'
for i in ruleSet:
print i
grammar = CompileGrammar.compileGrammar('LALR1', ruleSet, showStates = 1, showItems = 1)
gf = open ('ParGenGrammar.py', 'w')
for i in grammar.items():
gf.write('%s = %s\n' % i)
gf.close()
|
gpl-2.0
| 5,383,967,760,084,567,000 | 31.588235 | 88 | 0.58213 | false | 2.994595 | false | false | false |
isomer/faucet
|
faucet/vlan.py
|
1
|
7169
|
# Copyright (C) 2015 Brad Cowie, Christopher Lorier and Joe Stringer.
# Copyright (C) 2015 Research and Education Advanced Network New Zealand Ltd.
# Copyright (C) 2015--2017 The Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ipaddress
from conf import Conf
from valve_util import btos
class VLAN(Conf):
tagged = None
untagged = None
vid = None
faucet_vips = None
bgp_as = None
bgp_local_address = None
bgp_port = None
bgp_routerid = None
bgp_neighbor_addresses = []
bgp_neighbour_addresses = []
bgp_neighbor_as = None
bgp_neighbour_as = None
routes = None
max_hosts = None
unicast_flood = None
acl_in = None
# Define dynamic variables with prefix dyn_ to distinguish from variables set
# configuration
dyn_ipv4_routes = None
dyn_ipv6_routes = None
dyn_arp_cache = None
dyn_nd_cache = None
dyn_host_cache = None
defaults = {
'name': None,
'description': None,
'acl_in': None,
'faucet_vips': None,
'unicast_flood': True,
'bgp_as': 0,
'bgp_local_address': None,
'bgp_port': 9179,
'bgp_routerid': '',
'bgp_neighbour_addresses': [],
'bgp_neighbor_addresses': [],
'bgp_neighbour_as': 0,
'bgp_neighbor_as': None,
'routes': None,
'max_hosts': None,
}
def __init__(self, _id, dp_id, conf=None):
if conf is None:
conf = {}
self._id = _id
self.dp_id = dp_id
self.update(conf)
self.set_defaults()
self._id = _id
self.tagged = []
self.untagged = []
self.dyn_ipv4_routes = {}
self.dyn_ipv6_routes = {}
self.dyn_arp_cache = {}
self.dyn_nd_cache = {}
self.dyn_host_cache = {}
if self.faucet_vips:
self.faucet_vips = [
ipaddress.ip_interface(btos(ip)) for ip in self.faucet_vips]
if self.bgp_as:
assert self.bgp_port
assert ipaddress.IPv4Address(btos(self.bgp_routerid))
for neighbor_ip in self.bgp_neighbor_addresses:
assert ipaddress.ip_address(btos(neighbor_ip))
assert self.bgp_neighbor_as
if self.routes:
self.routes = [route['route'] for route in self.routes]
for route in self.routes:
ip_gw = ipaddress.ip_address(btos(route['ip_gw']))
ip_dst = ipaddress.ip_network(btos(route['ip_dst']))
assert ip_gw.version == ip_dst.version
if ip_gw.version == 4:
self.ipv4_routes[ip_dst] = ip_gw
else:
self.ipv6_routes[ip_dst] = ip_gw
@property
def ipv4_routes(self):
return self.dyn_ipv4_routes
@ipv4_routes.setter
def ipv4_routes(self, value):
self.dyn_ipv4_routes = value
@property
def ipv6_routes(self):
return self.dyn_ipv6_routes
@ipv6_routes.setter
def ipv6_routes(self, value):
self.dyn_ipv6_routes = value
@property
def arp_cache(self):
return self.dyn_arp_cache
@arp_cache.setter
def arp_cache(self, value):
self.dyn_arp_cache = value
@property
def nd_cache(self):
return self.dyn_nd_cache
@nd_cache.setter
def nd_cache(self, value):
self.dyn_nd_cache = value
@property
def host_cache(self):
return self.dyn_host_cache
@host_cache.setter
def host_cache(self, value):
self.dyn_host_cache = value
def set_defaults(self):
for key, value in list(self.defaults.items()):
self._set_default(key, value)
self._set_default('vid', self._id)
self._set_default('name', str(self._id))
self._set_default('faucet_vips', [])
self._set_default('bgp_neighbor_as', self.bgp_neighbour_as)
self._set_default(
'bgp_neighbor_addresses', self.bgp_neighbour_addresses)
def __str__(self):
port_list = [str(x) for x in self.get_ports()]
ports = ','.join(port_list)
return 'vid:%s ports:%s' % (self.vid, ports)
def get_ports(self):
return self.tagged + self.untagged
def mirrored_ports(self):
return [port for port in self.get_ports() if port.mirror]
def mirror_destination_ports(self):
return [port for port in self.get_ports() if port.mirror_destination]
def flood_ports(self, configured_ports, exclude_unicast):
ports = []
for port in configured_ports:
if not port.running:
continue
if exclude_unicast:
if not port.unicast_flood:
continue
ports.append(port)
return ports
def tagged_flood_ports(self, exclude_unicast):
return self.flood_ports(self.tagged, exclude_unicast)
def untagged_flood_ports(self, exclude_unicast):
return self.flood_ports(self.untagged, exclude_unicast)
def port_is_tagged(self, port_number):
for port in self.tagged:
if port.number == port_number:
return True
return False
def port_is_untagged(self, port_number):
for port in self.untagged:
if port.number == port_number:
return True
return False
def is_faucet_vip(self, ip):
for faucet_vip in self.faucet_vips:
if ip == faucet_vip.ip:
return True
return False
def ip_in_vip_subnet(self, ip):
for faucet_vip in self.faucet_vips:
if ip in faucet_vip.network:
return True
return False
def ips_in_vip_subnet(self, ips):
for ip in ips:
if not self.ip_in_vip_subnet(ip):
return False
return True
def from_connected_to_vip(self, src_ip, dst_ip):
"""Return True if src_ip in connected network and dst_ip is a VIP.
Args:
src_ip (ipaddress.ip_address): source IP.
dst_ip (ipaddress.ip_address): destination IP
Returns:
True if local traffic for a VIP.
"""
if self.is_faucet_vip(dst_ip) and self.ip_in_vip_subnet(src_ip):
return True
return False
def to_conf(self):
return self._to_conf()
def __hash__(self):
items = [(k, v) for k, v in list(self.__dict__.items()) if 'dyn' not in k]
return hash(frozenset(list(map(str, items))))
def __eq__(self, other):
return hash(self) == hash(other)
def __ne__(self, other):
return not self.__eq__(other)
|
apache-2.0
| -7,152,596,814,654,182,000 | 28.746888 | 82 | 0.580834 | false | 3.631712 | false | false | false |
cltrudeau/django-awl
|
extras/sample_site/app/migrations/0001_initial.py
|
1
|
1338
|
# Generated by Django 3.0.5 on 2020-06-12 14:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Writer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Show',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=50)),
('writer', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app.Writer')),
],
),
migrations.CreateModel(
name='Episode',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('show', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='app.Show')),
],
),
]
|
mit
| 4,384,991,229,205,189,600 | 34.210526 | 131 | 0.562033 | false | 4.207547 | false | false | false |
jjmalina/pyinfluxql
|
tests/test_query.py
|
1
|
16698
|
# -*- coding: utf-8 -*-
"""
test_query
~~~~~~~~~~
Tests the query generator
"""
import six
import pytest
from datetime import datetime, timedelta
import dateutil
from pyinfluxql.functions import Sum, Min, Max, Count, Distinct, Percentile
from pyinfluxql.query import Query, ContinuousQuery
@pytest.mark.unit
def test_clone():
"""Cloning a query instance should return a new query instance with the
same data but different references
"""
query = Query(Count(Distinct('col'))).from_('measurement')
query._limit = 100
query._group_by_time = timedelta(hours=24)
query._group_by.append('col2')
new_query = query.clone()
assert new_query._measurement == query._measurement
assert len(new_query._select_expressions) == len(query._select_expressions)
assert new_query._select_expressions != query._select_expressions
assert new_query._limit == query._limit
assert new_query._group_by_time == query._group_by_time
assert new_query._group_by == query._group_by
new_query._select_expressions.append(Count('blah'))
new_query._limit = 10
new_query._group_by_time = timedelta(days=7)
new_query._group_by.append('col3')
assert len(new_query._select_expressions) != len(query._select_expressions)
assert len(new_query._select_expressions) == 2
assert len(query._select_expressions) == 1
assert new_query._limit != query._limit
assert new_query._limit == 10
assert query._limit == 100
assert new_query._group_by_time != query._group_by_time
assert new_query._group_by_time == timedelta(days=7)
assert query._group_by_time == timedelta(hours=24)
assert new_query._group_by != query._group_by
assert new_query._group_by == ['col2', 'col3']
assert query._group_by == ['col2']
@pytest.mark.unit
def test_select():
"""Selecting should be chainable and add to the `_select_expressions`
list.
"""
q = Query('colname')
query = q.from_('test_measurement')
assert isinstance(query, Query)
assert len(query._select_expressions) == 1
query.select('colname2').select('colname3')
assert isinstance(query, Query)
assert len(query._select_expressions) == 3
query.select('colname4', 'colname5')
assert len(query._select_expressions) == 5
@pytest.mark.unit
def test_format_select():
q = Query().from_('test_measurement')
q._select_expressions = ['hello']
assert q._format_select() == 'SELECT hello'
q._select_expressions = ['hello', 'goodbye']
assert q._format_select() == 'SELECT hello, goodbye'
q = Query().from_('test_measurement')
q._select_expressions = [Sum('hello')]
assert q._format_select() == 'SELECT SUM(hello)'
q._select_expressions = [Sum('hello'), Min('bye')]
assert q._format_select() == 'SELECT SUM(hello), MIN(bye)'
q = Query().from_('1').select(Max(Min('hello')))
assert q._format_select() == 'SELECT MAX(MIN(hello))'
@pytest.mark.unit
def test_format_select_expressions():
"""_format_select_expressions should take multiple arguments and
format functions correctly
"""
q = Query()
assert q._format_select_expressions('1 + 1') == '1 + 1'
assert q._format_select_expressions('1 + 1', 'BLAH') == '1 + 1, BLAH'
assert q._format_select_expressions('1 + 1', 'BLAH', '2') == \
'1 + 1, BLAH, 2'
assert q._format_select_expressions(*[Distinct('a'), 'BLAH', '2']) == \
'DISTINCT(a), BLAH, 2'
@pytest.mark.unit
def test_format_select_rexpression():
"""_format_select_expression should take one argument and if a function
format it correctly
"""
q = Query()
assert q._format_select_expression('a') == 'a'
assert q._format_select_expression(Sum('a')) == 'SUM(a)'
assert q._format_select_expression(Sum(Max('a'))) == 'SUM(MAX(a))'
@pytest.mark.unit
def test_format_measurement():
q = Query().from_('test_measurement')
assert q._format_measurement('test_measurement') == 'test_measurement'
assert q._format_measurement('test series') == '"test series"'
assert q._format_measurement('test-series') == '"test-series"'
assert q._format_measurement('/test series*/') == '/test series*/'
assert q._format_measurement('/test-series*/') == '/test-series*/'
@pytest.mark.unit
def test_format_from():
"""_format_from should format correctly
"""
assert Query().from_('test_measurement')._format_from() == 'FROM test_measurement'
assert Query().from_('test series')._format_from() == 'FROM "test series"'
assert Query().from_('a_series')._format_from() == 'FROM a_series'
assert Query().from_('a series')._format_from() == 'FROM "a series"'
@pytest.mark.unit
def test_where():
"""where should insert into the _where dict and be chainable
"""
q = Query('test_measurement').where(a=1, b=3, c__gt=3)
assert q._where['a'] == 1
assert q._where['b'] == 3
assert q._where['c__gt'] == 3
assert isinstance(q, Query)
@pytest.mark.unit
def test_format_value():
"""_format_value should format strings, ints, floats, bools and
datetimes correctly
"""
q = Query('test_measurement')
assert q._format_value('hello') == "'hello'"
assert q._format_value(1) == "1"
assert q._format_value(1.0) == "1.0"
assert q._format_value(True) == "true"
assert q._format_value(False) == "false"
assert q._format_value('/stats.*/') == "/stats.*/"
assert q._format_value(datetime(2014, 2, 10, 18, 4, 53, 834825)) == \
"'2014-02-10 18:04:53.834'"
assert q._format_value(
datetime(2014, 2, 10, 18, 4, 53, 834825)
.replace(tzinfo=dateutil.tz.gettz('US/Eastern'))) == \
"'2014-02-10 23:04:53.834'"
@pytest.mark.unit
def test_date_range():
q = Query()
start = datetime.utcnow() - timedelta(hours=1)
end = datetime.utcnow() - timedelta(minutes=1)
q.date_range(start)
assert q._where['time__gt'] == start
q = Query()
q.date_range(start, end)
assert q._where['time__gt'] == start
assert q._where['time__lt'] == end
q = Query()
q.date_range(start=start, end=end)
assert q._where['time__gt'] == start
assert q._where['time__lt'] == end
q = Query()
q.date_range(start=10, end=100)
assert q._where['time__gt'] == 10
assert q._where['time__lt'] == 100
with pytest.raises(ValueError):
Query().date_range(end, start)
with pytest.raises(ValueError):
Query().date_range()
@pytest.mark.unit
def test_format_where():
"""_format_where should format an entire where clause correctly
"""
q = Query().where(foo=4)
assert q._format_where() == 'WHERE foo = 4'
q = Query().where(foo__bar=4)
assert q._format_where() == 'WHERE foo.bar = 4'
q = Query().where(foo__bar__lt=4)
assert q._format_where() == 'WHERE foo.bar < 4'
q = Query().where(foo__bar__baz__lt=4)
assert q._format_where() == 'WHERE foo.bar.baz < 4'
query = Query().where(
col1='a',
col2__ne='b',
col3__lt=5,
col4__gt=7.0)
assert query._format_where() == \
"WHERE col1 = 'a' AND col2 != 'b' AND col3 < 5 AND col4 > 7.0"
@pytest.mark.unit
def test_format_where_eq():
"""equals expressions should be formatted correctly in a where clause
"""
q = Query()
assert q._format_where_expression(['col'], 'eq', 'hi') == "col = 'hi'"
@pytest.mark.unit
def test_format_where_ne():
"""not equals expressions should be formatted correctly
in a where clause
"""
q = Query()
assert q._format_where_expression(['col'], 'ne', False) == "col != false"
assert q._format_where_expression(['col'], 'ne', True) == "col != true"
@pytest.mark.unit
def test_format_where_lt():
"""less than expressions should be formatted correctly
in a where clause
"""
q = Query()
assert q._format_where_expression(['col'], 'lt', 1.0) == "col < 1.0"
assert q._format_where_expression(['col'], 'lt', 50) == "col < 50"
@pytest.mark.unit
def test_format_where_gt():
"""greater than expressions should be formatted correctly
in a where clause
"""
q = Query()
assert q._format_where_expression(['col'], 'gt', 1.0) == "col > 1.0"
assert q._format_where_expression(['col'], 'gt', 50) == "col > 50"
@pytest.mark.unit
def test_group_by():
"""group_by should correctly set the query's group by arguments and
be chainable
"""
td = timedelta(hours=1)
q = Query().group_by('col1', 'col2', time=td)
assert isinstance(q, Query)
assert q._group_by_time == td
assert q._group_by == ['col1', 'col2']
q = Query().group_by(time=td, fill=True)
assert q._group_by_time == td
assert q._group_by_fill
q = Query().group_by(time=td, fill=False)
assert not q._group_by_fill
@pytest.mark.unit
def test_group_by_time():
td = timedelta(hours=1)
q = Query().group_by_time(td)
assert q._group_by_time == td
td = timedelta(hours=2)
q.group_by_time(td)
assert q._group_by_time == td
q.group_by_time('1h')
assert q._group_by_time == '1h'
@pytest.mark.unit
def test_format_group_by():
"""_format_group_by should correctly format one or more
group by statements
"""
q = Query().group_by('col1')
assert q._format_group_by() == 'GROUP BY col1'
q.group_by('col2')
assert q._format_group_by() == 'GROUP BY col1, col2'
q.group_by(time=timedelta(days=1))
assert q._format_group_by() == 'GROUP BY time(1d), col1, col2'
q = Query().group_by(time=timedelta(hours=5))
assert q._format_group_by() == 'GROUP BY time(5h)'
q = Query().group_by(time=timedelta(hours=5), fill=True)
assert q._format_group_by() == 'GROUP BY time(5h) fill(0)'
q = Query().group_by(time=timedelta(hours=5), fill=False)
assert q._format_group_by() == 'GROUP BY time(5h)'
q = Query().group_by(time='1h', fill=False)
assert q._format_group_by() == 'GROUP BY time(1h)'
q = Query().group_by_time('1h', fill=True)
assert q._format_group_by() == 'GROUP BY time(1h) fill(0)'
@pytest.mark.unit
def test_limit():
"""limit should set the query's limit argument and be chainable
"""
q = Query().limit(1000)
assert isinstance(q, Query)
assert q._limit == 1000
@pytest.mark.unit
def test_format_limit():
"""_format_lmit should correctly format the limit clause
"""
q = Query().limit(1000)
assert q._format_limit() == 'LIMIT 1000'
@pytest.mark.unit
def test_order():
q = Query().order('time', 'asc')
assert q._order == 'ASC'
q = Query().order('time', 'ASC')
assert q._order == 'ASC'
q = Query().order('time', 'desc')
assert q._order == 'DESC'
q = Query().order('time', 'DESC')
assert q._order == 'DESC'
with pytest.raises(TypeError):
Query().order('-time')
@pytest.mark.unit
def test_format_order():
"""_format_order should correctly format the order clause
"""
q = Query().order('time', 'asc')
assert q._format_order() == 'ORDER BY time ASC'
q.order('time', 'desc')
assert q._format_order() == 'ORDER BY time DESC'
q = Query().order('time', 'ASC')
assert q._format_order() == 'ORDER BY time ASC'
q.order('time', 'DESC')
assert q._format_order() == 'ORDER BY time DESC'
@pytest.mark.unit
def test_into():
q = Query().into('another_series')
assert q._into_series == 'another_series'
@pytest.mark.unit
def test_format_into():
q = Query().into('another_series')
assert q._format_into() == 'INTO another_series'
q = Query()
assert q._format_into() == ''
@pytest.mark.unit
def test_format_query():
q = Query().from_('x')
expected = "SELECT * FROM x;"
assert q._format_query("SELECT * FROM x ") == expected
expected = 'DELETE FROM x;'
assert q._format_query('DELETE FROM x ') == expected
@pytest.mark.unit
def test_format_select_query():
"""_format should correctly format the entire query
"""
# Test simple selects
assert Query('*').from_('x')._format_select_query() == \
"SELECT * FROM x;"
assert Query('a', 'b').from_('x')._format_select_query() == \
"SELECT a, b FROM x;"
# Test limit
assert Query('*').from_('x').limit(100) \
._format_select_query() == "SELECT * FROM x LIMIT 100;"
# Test order
assert Query('*').from_('x').order('time', 'asc') \
._format_select_query() == "SELECT * FROM x ORDER BY time ASC;"
assert Query('*').from_('x').order('time', 'desc') \
._format_select_query() == "SELECT * FROM x ORDER BY time DESC;"
# Test functions
assert Query(Count('a')).from_('x') \
._format_select_query() == "SELECT COUNT(a) FROM x;"
assert Query(Sum(Count('a'))).from_('x') \
._format_select_query() == "SELECT SUM(COUNT(a)) FROM x;"
# Test where, comparators and value formatting
assert Query('*').from_('x').where(a='something') \
._format_select_query() == "SELECT * FROM x WHERE a = 'something';"
assert Query('*').from_('x').where(a='something', b=1) \
._format_select_query() == \
"SELECT * FROM x WHERE a = 'something' AND b = 1;"
assert Query('*').from_('x').where(a__ne='something') \
._format_select_query() == "SELECT * FROM x WHERE a != 'something';"
assert Query('*').from_('x').where(a=True, b=False) \
._format_select_query() == \
"SELECT * FROM x WHERE a = true AND b = false;"
assert Query('*').from_('x').where(a=True, b=False) \
._format_select_query() == \
"SELECT * FROM x WHERE a = true AND b = false;"
assert Query('*').from_('x').where(a__lt=4, b__gt=6.0) \
._format_select_query() == "SELECT * FROM x WHERE a < 4 AND b > 6.0;"
# Test group by
assert Query('*').from_('x').group_by('a') \
._format_select_query() == "SELECT * FROM x GROUP BY a;"
assert Query('*').from_('x').group_by('a', 'b') \
._format_select_query() == "SELECT * FROM x GROUP BY a, b;"
q = Query('*').from_('x') \
.group_by(time=timedelta(hours=1))
assert q._format_select_query() == "SELECT * FROM x GROUP BY time(1h);"
q = Query('*').from_('x') \
.group_by('a', 'b', time=timedelta(hours=1))
assert q._format_select_query() == "SELECT * FROM x GROUP BY time(1h), a, b;"
# Test something really crazy
fmt = "SELECT COUNT(a), SUM(b), PERCENTILE(d, 99) FROM x "
fmt += "WHERE e = false AND f != true AND g < 4 AND h > 5 "
fmt += "GROUP BY time(1h), a, b fill(0) "
fmt += "LIMIT 100 ORDER BY time ASC;"
q = Query(Count('a'), Sum('b'), Percentile('d', 99)) \
.from_('x') \
.where(e=False, f__ne=True, g__lt=4, h__gt=5) \
.group_by('a', 'b', time=timedelta(minutes=60), fill=True) \
.limit(100).order('time', 'asc')
assert q._format_select_query() == fmt
@pytest.mark.unit
def test_format_delete_query():
q = Query().from_('series')
q._is_delete = True
assert q._format_delete_query() == 'DELETE FROM series;'
q.date_range(start=20, end=40)
expected = 'DELETE FROM series WHERE time > 20 AND time < 40;'
assert q._format_delete_query() == expected
q = Query().from_('series')
q.date_range(end=40)
expected = 'DELETE FROM series WHERE time < 40;'
assert q._format_delete_query() == expected
@pytest.mark.unit
def test_format():
q = Query('blah').from_('series')
q._is_delete = True
assert q._format() == 'DELETE FROM series;'
q.date_range(start=20, end=40)
q._is_delete = False
expected = 'SELECT blah FROM series WHERE time > 20 AND time < 40;'
assert q._format() == expected
@pytest.mark.unit
def test_str():
q = Query('blah').from_('series')
q._is_delete = True
assert str(q) == 'DELETE FROM series;'
q.date_range(start=20, end=40)
q._is_delete = False
expected = 'SELECT blah FROM series WHERE time > 20 AND time < 40;'
assert str(q) == expected
@pytest.mark.unit
def test_unicode():
q = Query('blah').from_('series')
q._is_delete = True
assert six.u(str(q)) == u'DELETE FROM series;'
q.date_range(start=20, end=40)
q._is_delete = False
expected = u'SELECT blah FROM series WHERE time > 20 AND time < 40;'
assert six.u(str(q)) == expected
@pytest.mark.unit
def test_format_continuous_query():
q = Query(Count('col')).from_('clicks') \
.group_by(time=timedelta(hours=1)).into('clicks.count.1h')
cq = ContinuousQuery("1h_clicks_count", "test", q)
expected = 'CREATE CONTINUOUS QUERY "1h_clicks_count" ON test BEGIN SELECT COUNT(col) FROM clicks GROUP BY time(1h) INTO clicks.count.1h END'
assert cq._format() == expected
|
mit
| 6,504,522,673,743,951,000 | 31.486381 | 145 | 0.604324 | false | 3.303918 | true | false | false |
EricssonResearch/calvin-base
|
calvin/runtime/north/authorization/policy_retrieval_point.py
|
1
|
4113
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from abc import ABCMeta, abstractmethod
import os
import glob
import json
from calvin.utilities import calvinuuid
from calvin.utilities.calvinlogger import get_logger
_log = get_logger(__name__)
# This is an abstract class for the PRP (Policy Retrieval Point)
class PolicyRetrievalPoint(object):
__metaclass__ = ABCMeta # Metaclass for defining Abstract Base Classes
@abstractmethod
def get_policy(self, id):
"""Return a JSON representation of the policy identified by id"""
return
@abstractmethod
def get_policies(self, filter):
"""Return a JSON representation of all policies found by using filter"""
return
@abstractmethod
def create_policy(self, data):
"""Create policy based on the JSON representation in data"""
return
@abstractmethod
def update_policy(self, data, id):
"""Change the content of the policy identified by id to data (JSON representation of policy)"""
return
@abstractmethod
def delete_policy(self, id):
"""Delete the policy identified by id"""
return
class FilePolicyRetrievalPoint(PolicyRetrievalPoint):
def __init__(self, path):
# Replace ~ by the user's home directory.
self.path = os.path.expanduser(path)
if not os.path.exists(self.path):
try:
os.makedirs(self.path)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
def get_policy(self, policy_id):
"""Return the policy identified by policy_id"""
try:
with open(os.path.join(self.path, policy_id + ".json"), 'rt') as data:
return json.load(data)
except Exception as err:
_log.error("Failed to open policy file for policy_id={}".format(policy_id))
raise
def get_policies(self, name_pattern='*'):
"""Return all policies found using the name_pattern"""
policies = {}
for filename in glob.glob(os.path.join(self.path, name_pattern + ".json")):
try:
with open(filename, 'rb') as data:
policy_id = os.path.splitext(os.path.basename(filename))[0]
policies[policy_id] = json.load(data)
except ValueError as err:
_log.error("Failed to parse policy as json, file={}".format(filename))
raise
except (OSError, IOError) as err:
_log.error("Failed to open file={}".format(filename))
raise
return policies
def create_policy(self, data):
"""Create policy based on the JSON representation in data"""
policy_id = calvinuuid.uuid("POLICY")
with open(os.path.join(self.path, policy_id + ".json"), "w") as file:
json.dump(data, file)
return policy_id
def update_policy(self, data, policy_id):
"""Change the content of the policy identified by policy_id to data (JSON representation of policy)"""
file_path = os.path.join(self.path, policy_id + ".json")
if os.path.isfile(file_path):
with open(file_path, "w") as file:
json.dump(data, file)
else:
raise IOError # Raise exception if policy named filename doesn't exist
def delete_policy(self, policy_id):
"""Delete the policy named policy_id"""
os.remove(os.path.join(self.path, policy_id + ".json"))
|
apache-2.0
| -4,371,406,419,679,389,000 | 36.054054 | 110 | 0.627279 | false | 4.205521 | false | false | false |
shawncaojob/LC
|
PY/679_24_game.py
|
1
|
2347
|
# 679. 24 Game
# DescriptionHintsSubmissionsDiscussSolution
# DiscussPick One
# You have 4 cards each containing a number from 1 to 9. You need to judge whether they could operated through *, /, +, -, (, ) to get the value of 24.
#
# Example 1:
# Input: [4, 1, 8, 7]
# Output: True
# Explanation: (8-4) * (7-1) = 24
# Example 2:
# Input: [1, 2, 1, 2]
# Output: False
# Note:
# The division operator / represents real division, not integer division. For example, 4 / (1 - 2/3) = 12.
# Every operation done is between two numbers. In particular, we cannot use - as a unary operator. For example, with [1, 1, 1, 1] as input, the expression -1 - 1 - 1 - 1 is not allowed.
# You cannot concatenate numbers together. For example, if the input is [1, 2, 1, 2], we cannot write this as 12 + 12.
# 2018.02.04
# Basic DFS
# Special case, when left 2 elements, we need to check the result from other two element first by saving the existing result.
class Solution(object):
def judgePoint24(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
return self.dfs(nums, None, None)
# "save" is only needed when handling similar case of (a +/- b) * or / (c +/- d).
# However, this solution is not checking those symbols. Some further optimization can be made to reduce the redundant works.
def dfs(self, nums, cur, save):
if not nums and save is None and self.is24(cur): return True
if not nums and save and self.dfs([save], cur, None): return True
for i in xrange(len(nums)):
num, next_nums = nums[i], nums[:i] + nums[i+1:]
if cur is None: # BUG, if not cur, just set the cur.
if self.dfs(next_nums, num, save): return True
else:
next_cur = [cur + num, cur - num, cur * num, float(cur) / num, num - cur]
if cur != 0: next_cur += [float(num) / cur]
for x in next_cur:
if self.dfs(next_nums, x, save): return True
if len(nums) == 3 and self.dfs(next_nums, None, x): return True # Case (1 + 9) * (1 + 2)
return False
def is24(self, x):
return True if x == 24 or abs(24 - x) < 0.00001 else False
|
gpl-3.0
| 7,296,170,151,600,796,000 | 41.672727 | 185 | 0.572646 | false | 3.566869 | false | false | false |
zoranzhao/NoSSim
|
NoS_Vgraph/core_util_plot.py
|
1
|
5568
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
def plot(srv_app, srv_lwip, cli_app, cli_lwip):
#srv_app = {0:[],1:[],2:[]}
#srv_lwip = {0:[],1:[],2:[]}
#cli_app = {0:[],1:[],2:[]}
#cli_lwip = {0:[],1:[],2:[]}
O2lwip=cli_lwip[2]
O2comp=cli_app[2]
O1lwip=cli_lwip[1]
O1comp=cli_app[1]
O0lwip=cli_lwip[0]
O0comp=cli_app[0]
colorsred = ['brown', 'red', 'tomato', 'lightsalmon']
colorsgreen = ['darkgreen', 'seagreen', 'limegreen', 'springgreen']
colorsblue =['navy', 'blue', 'steelblue', 'lightsteelblue']
hatches = ['//', '++', 'xxx', 'oo','\\\\\\', 'OO', '..' , '---', "**"]
label_size=15
font_size=15
#client
N = 3
width = 0.25 # the width of the bars
xtra_space = 0.02
ind = np.arange(N) + 2 - (width*3+xtra_space*2)/2 # the x locations for the groups
ind1 = np.arange(N) + 2 - (width*3+xtra_space*2)/2 # the x locations for the groups
ind2 = np.arange(N) + 2+(N+1) - (width*3+xtra_space*2)/2 # the x locations for the groups
ind3 = np.arange(N) + 2+N+1+N+1 - (width*3+xtra_space*2)/2 # the x locations for the groups
ind = np.append(ind1, ind2)
ind = np.append(ind, ind3)
#ind = np.append(ind, ind4)
#ind = np.append(ind, ind5)
fig, ax = plt.subplots(2)
a1 = ax[0].bar(ind, O2comp, width, color=[0,0.5,1])
a2 = ax[0].bar(ind, O2lwip, width, fill=False, hatch=hatches[0], edgecolor=[0,0.5,1], bottom=O2comp)
b1 = ax[0].bar(ind+ width + xtra_space, O1comp, width, color=[0,1,0.5])
b2 = ax[0].bar(ind+ width + xtra_space, O1lwip, width, fill=False, hatch=hatches[0], edgecolor=[0,1,0.5], bottom=O1comp)
c1 = ax[0].bar(ind+ 2*(width + xtra_space), O0comp, width, color=[1,0.5,0])
c2 = ax[0].bar(ind+ 2*(width + xtra_space), O0lwip, width, fill=False, hatch=hatches[0], edgecolor=[1,0.5,0], bottom=O0comp)
OLevel = ["O-0", "O-1", "O-2", "O-3"]
channels = ["b@11Mbps", "g@9Mbps", "g@54Mbps"]
duration_type = [" - lwIP", " - App."]
legend_size=16
plt.figlegend(
(
a1, a2,
b1, b2,
c1, c2
),
(
OLevel[2]+duration_type[1], OLevel[2]+duration_type[0],
OLevel[1]+duration_type[1], OLevel[1]+duration_type[0],
OLevel[0]+duration_type[1], OLevel[0]+duration_type[0]
),
scatterpoints=1,
loc='upper center',
ncol=3,
prop={'size':legend_size})
xticks = [ 2, 2.9, 3, 4, 6, 6.9, 7, 8, 10, 10.9, 11, 12]
xticks_minor = [ 1, 5, 9, 13 ]#longer
xlbls = [channels[0], '6-Cli.', channels[1], channels[2],
channels[0], '4-Cli.', channels[1], channels[2],
channels[0], '2-Cli.', channels[1], channels[2]]
ax[0].set_xticks( xticks )
ax[0].set_xticks( xticks_minor, minor=True )
ax[0].set_xticklabels( xlbls )
ax[0].set_xlim( 1, 13 )
ax[0].grid( 'off', axis='x' )
ax[0].grid( 'off', axis='x', which='minor' )
# vertical alignment of xtick labels
va = [ 0, -.1, 0, 0, 0, -.1, 0, 0, 0, -.1, 0, 0]
for t, y in zip( ax[0].get_xticklabels( ), va ):
t.set_y( y )
ax[0].tick_params( axis='x', which='minor', direction='out', length=40 , top='off')
#ax.tick_params( axis='x', which='major', direction='out', length=10 )
ax[0].tick_params( axis='x', which='major', bottom='off', top='off' )
vals = ax[0].get_yticks()
ax[0].set_yticklabels(['{:3.0f}%'.format(x*100) for x in vals])
#server
O2lwip=srv_lwip[2]
O2comp=srv_app[2]
O1lwip=srv_lwip[1]
O1comp=srv_app[1]
O0lwip=srv_lwip[0]
O0comp=srv_app[0]
a1 = ax[1].bar(ind, O2comp, width, color=[0,0.5,1])
a2 = ax[1].bar(ind, O2lwip, width, fill=False, hatch=hatches[0], edgecolor=[0,0.5,1], bottom=O2comp)
b1 = ax[1].bar(ind+ width + xtra_space, O1comp, width, color=[0,1,0.5])
b2 = ax[1].bar(ind+ width + xtra_space, O1lwip, width, fill=False, hatch=hatches[0], edgecolor=[0,1,0.5], bottom=O1comp)
c1 = ax[1].bar(ind+ 2*(width + xtra_space), O0comp, width, color=[1,0.5,0])
c2 = ax[1].bar(ind+ 2*(width + xtra_space), O0lwip, width, fill=False, hatch=hatches[0], edgecolor=[1,0.5,0], bottom=O0comp)
channels = ["b@11Mbps", "g@9Mbps", "g@54Mbps"]
duration_type = [" - Communication", " - Computation"]
xticks = [ 2, 2.9, 3, 4, 6, 6.9, 7, 8, 10, 10.9, 11, 12]
xticks_minor = [ 1, 5, 9, 13 ]#longer
xlbls = [channels[0], '6-Cli.', channels[1], channels[2],
channels[0], '4-Cli.', channels[1], channels[2],
channels[0], '2-Cli.', channels[1], channels[2]]
ax[1].set_xticks( xticks )
ax[1].set_xticks( xticks_minor, minor=True )
ax[1].set_xticklabels( xlbls )
ax[1].set_xlim( 1, 13 )
ax[1].grid( 'off', axis='x' )
ax[1].grid( 'off', axis='x', which='minor' )
va = [ 0, -.1, 0, 0, 0, -.1, 0, 0, 0, -.1, 0, 0]
for t, y in zip( ax[1].get_xticklabels( ), va ):
t.set_y( y )
ax[1].tick_params( axis='x', which='minor', direction='out', length=40 , top='off')
ax[1].tick_params( axis='x', which='major', bottom='off', top='off' )
vals = ax[1].get_yticks()
ax[1].set_yticklabels(['{:3.0f}%'.format(x*100) for x in vals])
# add some text for labels, title and axes ticks
ax[0].set_ylabel('Core Utilization', fontsize=label_size)
ax[0].set_xlabel('Client', fontsize=label_size)
ax[1].set_ylabel('Core Utilization', fontsize=label_size)
ax[1].set_xlabel('Server', fontsize=label_size)
ax[0].tick_params(axis='y', labelsize=font_size)
ax[1].tick_params(axis='y', labelsize=font_size)
ax[0].tick_params(axis='x', labelsize=font_size)
ax[1].tick_params(axis='x', labelsize=font_size)
plt.show()
|
bsd-3-clause
| -7,547,278,008,441,325,000 | 30.106145 | 126 | 0.604885 | false | 2.270799 | false | false | false |
PetroDE/control
|
control/service/buildable.py
|
1
|
5256
|
"""
Specialize Service for services that can't be containers. Images only club
"""
import logging
from os.path import abspath, dirname, isfile, join
from control.cli_builder import builder
from control.repository import Repository
from control.options import options
from control.service.service import ImageService
class Buildable(ImageService):
"""
Okay I lied. There are 3 kinds of services. The problem is that there are
base images that need to be built, but don't know enough to be long running
containers. Control doesn't control containers that you use like
executables. Control handles the starting of long running service daemons
in development and testing environments.
"""
service_options = {
'dockerfile',
'events',
'fromline',
} | ImageService.service_options
all_options = service_options
def __init__(self, service, controlfile):
super().__init__(service, controlfile)
self.logger = logging.getLogger('control.service.Buildable')
self.dockerfile = {'dev': '', 'prod': ''}
self.fromline = {'dev': '', 'prod': ''}
try:
self.events = service.pop('events')
except KeyError:
self.events = {}
self.logger.debug('No events defined')
try:
dkrfile = service.pop('dockerfile')
if isinstance(dkrfile, dict):
self.dockerfile = {
'dev': abspath(join(dirname(self.controlfile),
dkrfile['dev'])),
'prod': abspath(join(dirname(self.controlfile),
dkrfile['prod'])),
}
elif dkrfile == "":
self.dockerfile = {'dev': "", 'prod': ""}
else:
self.dockerfile = {
'dev': abspath(join(dirname(self.controlfile), dkrfile)),
'prod': abspath(join(dirname(self.controlfile), dkrfile)),
}
self.logger.debug('setting dockerfile %s', self.dockerfile)
except KeyError as e:
# Guess that there's a Dockerfile next to the Controlfile
dkrfile = join(abspath(dirname(self.controlfile)), 'Dockerfile')
devfile = dkrfile + '.dev'
prdfile = dkrfile + '.prod'
try:
self.dockerfile['dev'], self.dockerfile['prod'] = {
# devProdAreEmpty, DockerfileExists, DevProdExists
(True, True, False): lambda f, d, p: (f, f),
(True, False, True): lambda f, d, p: (d, p),
(True, False, False): lambda f, d, p: ('', ''),
# This list is sparsely populated because these are the
# only conditions that mean the values need to be guessed
}[(
not self.dockerfile['dev'] and not self.dockerfile['prod'],
isfile(dkrfile),
isfile(devfile) and isfile(prdfile)
)](dkrfile, devfile, prdfile)
self.logger.debug('setting dockerfile: %s', self.dockerfile)
except KeyError as e:
self.logger.warning(
'%s: problem setting dockerfile: %s missing',
self.service,
e)
if 'fromline' in service:
fline = service.pop('fromline')
if isinstance(fline, dict):
self.fromline = {
'dev': fline.get('dev', ''),
'prod': fline.get('prod', '')
}
else:
self.fromline = {
'dev': fline,
'prod': fline
}
# The rest of the options can be straight assigned
# for key, val in (
# (key, val)
# for key, val in service.items()
# if key in self.service_options):
# self.logger.debug('buildable assigning key %s value %s', key, val)
# self.__dict__[key] = val
if not self.service:
self.logger.debug('setting service name from guess')
self.service = Repository.match(self.image).image
self.logger.debug('Found Buildable %s', self.service)
def dump_build(self, prod=False, pretty=True):
"""dump out a CLI version of how this image would be built"""
rep = builder('build', pretty=pretty) \
.tag(self.image) \
.path(dirname(self.controlfile)) \
.file(self.dockerfile['prod'] if prod else self.dockerfile['dev']) \
.pull(options.pull) \
.rm(options.no_rm) \
.force_rm(options.force) \
.no_cache(not options.cache)
return rep
def buildable(self):
"""Check if the service is buildable"""
return self.dockerfile['dev'] or self.dockerfile['prod']
def dev_buildable(self):
"""Check if the service is buildable in a dev environment"""
return self.dockerfile['prod']
def prod_buildable(self):
"""Check if the service is buildable in a prod environment"""
return self.dockerfile['prod']
|
mit
| -7,254,068,709,873,107,000 | 38.223881 | 80 | 0.53672 | false | 4.398326 | false | false | false |
blockstack/blockstack-server
|
integration_tests/blockstack_integration_tests/scenarios/name_preorder.py
|
1
|
2869
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Blockstack
~~~~~
copyright: (c) 2014-2015 by Halfmoon Labs, Inc.
copyright: (c) 2016 by Blockstack.org
This file is part of Blockstack
Blockstack is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Blockstack is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Blockstack. If not, see <http://www.gnu.org/licenses/>.
"""
import testlib
import virtualchain
import blockstack
import blockstack.blockstackd as blockstackd
wallets = [
testlib.Wallet( "5JesPiN68qt44Hc2nT8qmyZ1JDwHebfoh9KQ52Lazb1m1LaKNj9", 100000000000 ),
testlib.Wallet( "5KHqsiU9qa77frZb6hQy9ocV7Sus9RWJcQGYYBJJBb2Efj1o77e", 100000000000 ),
testlib.Wallet( "5Kg5kJbQHvk1B64rJniEmgbD83FpZpbw2RjdAZEzTefs9ihN3Bz", 100000000000 ),
testlib.Wallet( "5JuVsoS9NauksSkqEjbUZxWwgGDQbMwPsEfoRBSpLpgDX1RtLX7", 100000000000 ),
testlib.Wallet( "5KEpiSRr1BrT8vRD7LKGCEmudokTh1iMHbiThMQpLdwBwhDJB1T", 100000000000 )
]
consensus = "17ac43c1d8549c3181b200f1bf97eb7d"
def scenario( wallets, **kw ):
testlib.blockstack_namespace_preorder( "test", wallets[1].addr, wallets[0].privkey )
testlib.next_block( **kw )
testlib.blockstack_namespace_reveal( "test", wallets[1].addr, 52595, 250, 4, [6,5,4,3,2,1,0,0,0,0,0,0,0,0,0,0], 10, 10, wallets[0].privkey )
testlib.next_block( **kw )
testlib.blockstack_namespace_ready( "test", wallets[1].privkey )
testlib.next_block( **kw )
resp = testlib.blockstack_name_preorder( "foo.test", wallets[2].privkey, wallets[3].addr )
testlib.next_block( **kw )
def check( state_engine ):
# not revealed, but ready
ns = state_engine.get_namespace_reveal( "test" )
if ns is not None:
return False
ns = state_engine.get_namespace( "test" )
if ns is None:
return False
if ns['namespace_id'] != 'test':
return False
# preordered
preorder = state_engine.get_name_preorder( "foo.test", virtualchain.make_payment_script(wallets[2].addr), wallets[3].addr )
if preorder is None:
return False
# paid fee
if preorder['op_fee'] < blockstack.lib.client.get_name_cost('foo.test', hostport='http://localhost:16264'):
print "{} < {}".format(preorder['op_fee'], blockstack.lib.client.get_name_cost('foo.test', hostport='http://localhost:16264'))
print "Insufficient fee"
return False
return True
|
gpl-3.0
| -3,451,106,283,767,778,000 | 35.782051 | 144 | 0.700941 | false | 2.939549 | true | false | false |
ValdisVitolins/espeak-ng
|
src/ucd-tools/tools/categories.py
|
8
|
8186
|
#!/usr/bin/python
# Copyright (C) 2012-2018 Reece H. Dunn
#
# This file is part of ucd-tools.
#
# ucd-tools is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ucd-tools is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ucd-tools. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import ucd
ucd_rootdir = sys.argv[1]
ucd_version = sys.argv[2]
unicode_chars = {}
for data in ucd.parse_ucd_data(ucd_rootdir, 'UnicodeData'):
for codepoint in data['CodePoint']:
unicode_chars[codepoint] = data['GeneralCategory']
if '--with-csur' in sys.argv:
for csur in ['Klingon']:
for data in ucd.parse_ucd_data('data/csur', csur):
for codepoint in data['CodePoint']:
unicode_chars[codepoint] = data['GeneralCategory']
# This map is a combination of the information in the UnicodeData and Blocks
# data files. It is intended to reduce the number of character tables that
# need to be generated.
category_sets = [
(ucd.CodeRange('000000..00D7FF'), None, 'Multiple Blocks'),
(ucd.CodeRange('00D800..00DFFF'), 'Cs', 'Surrogates'),
(ucd.CodeRange('00E000..00F7FF'), 'Co', 'Private Use Area'),
(ucd.CodeRange('00F800..02FAFF'), None, 'Multiple Blocks'),
(ucd.CodeRange('02FB00..0DFFFF'), 'Cn', 'Unassigned'),
(ucd.CodeRange('0E0000..0E01FF'), None, 'Multiple Blocks'),
(ucd.CodeRange('0E0200..0EFFFF'), 'Cn', 'Unassigned'),
(ucd.CodeRange('0F0000..0FFFFD'), 'Co', 'Plane 15 Private Use'),
(ucd.CodeRange('0FFFFE..0FFFFF'), 'Cn', 'Plane 15 Private Use'),
(ucd.CodeRange('100000..10FFFD'), 'Co', 'Plane 16 Private Use'),
(ucd.CodeRange('10FFFE..10FFFF'), 'Cn', 'Plane 16 Private Use'),
]
# These categories have many pages consisting of just this category:
# Cn -- Unassigned
# Lo -- CJK Ideographs
special_categories = ['Cn', 'Co', 'Lo', 'Sm', 'So']
category_tables = {}
for codepoints, category, comment in category_sets:
if not category:
table = {}
table_entry = None
table_codepoint = None
table_category = None
for i, codepoint in enumerate(codepoints):
try:
category = unicode_chars[codepoint]
except KeyError:
category = 'Cn' # Unassigned
if (i % 256) == 0:
if table_entry:
if table_category in special_categories:
table[table_codepoint] = table_category
elif table_category:
raise Exception('%s only table not in the special_categories list.' % table_category)
else:
table[table_codepoint] = table_entry
table_entry = []
table_codepoint = codepoint
table_category = category
if category != table_category:
table_category = None
table_entry.append(category)
if table_entry:
if table_category in special_categories:
table[table_codepoint] = table_category
else:
table[table_codepoint] = table_entry
category_tables['%s_%s' % (codepoints.first, codepoints.last)] = table
if __name__ == '__main__':
sys.stdout.write("""/* Unicode General Categories
*
* Copyright (C) 2012-2018 Reece H. Dunn
*
* This file is part of ucd-tools.
*
* ucd-tools is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* ucd-tools is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with ucd-tools. If not, see <http://www.gnu.org/licenses/>.
*/
/* NOTE: This file is automatically generated from the UnicodeData.txt file in
* the Unicode Character database by the ucd-tools/tools/categories.py script.
*/
#include "ucd/ucd.h"
#include <stddef.h>
#define Cc UCD_CATEGORY_Cc
#define Cf UCD_CATEGORY_Cf
#define Cn UCD_CATEGORY_Cn
#define Co UCD_CATEGORY_Co
#define Cs UCD_CATEGORY_Cs
#define Ll UCD_CATEGORY_Ll
#define Lm UCD_CATEGORY_Lm
#define Lo UCD_CATEGORY_Lo
#define Lt UCD_CATEGORY_Lt
#define Lu UCD_CATEGORY_Lu
#define Mc UCD_CATEGORY_Mc
#define Me UCD_CATEGORY_Me
#define Mn UCD_CATEGORY_Mn
#define Nd UCD_CATEGORY_Nd
#define Nl UCD_CATEGORY_Nl
#define No UCD_CATEGORY_No
#define Pc UCD_CATEGORY_Pc
#define Pd UCD_CATEGORY_Pd
#define Pe UCD_CATEGORY_Pe
#define Pf UCD_CATEGORY_Pf
#define Pi UCD_CATEGORY_Pi
#define Po UCD_CATEGORY_Po
#define Ps UCD_CATEGORY_Ps
#define Sc UCD_CATEGORY_Sc
#define Sk UCD_CATEGORY_Sk
#define Sm UCD_CATEGORY_Sm
#define So UCD_CATEGORY_So
#define Zl UCD_CATEGORY_Zl
#define Zp UCD_CATEGORY_Zp
#define Zs UCD_CATEGORY_Zs
#define Ii UCD_CATEGORY_Ii
/* Unicode Character Data %s */
""" % ucd_version)
for category in special_categories:
sys.stdout.write('\n')
sys.stdout.write('static const uint8_t categories_%s[256] =\n' % category)
sys.stdout.write('{')
for i in range(0, 256):
if (i % 16) == 0:
sys.stdout.write('\n\t/* %02X */' % i)
sys.stdout.write(' %s,' % category)
sys.stdout.write('\n};\n')
for codepoints, category, comment in category_sets:
if not category:
tables = category_tables['%s_%s' % (codepoints.first, codepoints.last)]
for codepoint in sorted(tables.keys()):
table = tables[codepoint]
if table in special_categories:
continue
sys.stdout.write('\n')
sys.stdout.write('static const uint8_t categories_%s[256] =\n' % codepoint)
sys.stdout.write('{')
for i, category in enumerate(table):
if (i % 16) == 0:
sys.stdout.write('\n\t/* %02X */' % i)
sys.stdout.write(' %s,' % category)
sys.stdout.write('\n};\n')
for codepoints, category, comment in category_sets:
if not category:
table_index = '%s_%s' % (codepoints.first, codepoints.last)
sys.stdout.write('\n')
sys.stdout.write('static const uint8_t *categories_%s[] =\n' % table_index)
sys.stdout.write('{\n')
for codepoint, table in sorted(category_tables[table_index].items()):
if isinstance(table, str):
sys.stdout.write('\tcategories_%s, /* %s */\n' % (table, codepoint))
else:
sys.stdout.write('\tcategories_%s,\n' % codepoint)
sys.stdout.write('};\n')
sys.stdout.write('\n')
sys.stdout.write('ucd_category ucd_lookup_category(codepoint_t c)\n')
sys.stdout.write('{\n')
for codepoints, category, comment in category_sets:
if category:
sys.stdout.write('\tif (c <= 0x%s) return %s; /* %s : %s */\n' % (codepoints.last, category, codepoints, comment))
else:
sys.stdout.write('\tif (c <= 0x%s) /* %s */\n' % (codepoints.last, codepoints))
sys.stdout.write('\t{\n')
sys.stdout.write('\t\tconst uint8_t *table = categories_%s_%s[(c - 0x%s) / 256];\n' % (codepoints.first, codepoints.last, codepoints.first))
sys.stdout.write('\t\treturn (ucd_category)table[c % 256];\n')
sys.stdout.write('\t}\n')
sys.stdout.write('\treturn Ii; /* Invalid Unicode Codepoint */\n')
sys.stdout.write('}\n')
sys.stdout.write("""
ucd_category_group ucd_get_category_group_for_category(ucd_category c)
{
switch (c)
{
case Cc: case Cf: case Cn: case Co: case Cs:
return UCD_CATEGORY_GROUP_C;
case Ll: case Lm: case Lo: case Lt: case Lu:
return UCD_CATEGORY_GROUP_L;
case Mc: case Me: case Mn:
return UCD_CATEGORY_GROUP_M;
case Nd: case Nl: case No:
return UCD_CATEGORY_GROUP_N;
case Pc: case Pd: case Pe: case Pf: case Pi: case Po: case Ps:
return UCD_CATEGORY_GROUP_P;
case Sc: case Sk: case Sm: case So:
return UCD_CATEGORY_GROUP_S;
case Zl: case Zp: case Zs:
return UCD_CATEGORY_GROUP_Z;
case Ii:
default:
return UCD_CATEGORY_GROUP_I;
}
}
ucd_category_group ucd_lookup_category_group(codepoint_t c)
{
return (ucd_category_group)ucd_get_category_group_for_category(ucd_lookup_category(c));
}
""")
|
gpl-3.0
| -2,705,840,602,450,405,000 | 33.108333 | 143 | 0.691669 | false | 2.973483 | false | false | false |
rgllm/uminho
|
04/CN/TP3/src/src/parser/PsoTools.py
|
1
|
4783
|
import itertools
import json
import matplotlib.pyplot as plt
from matplotlib import style
import os
style.use('ggplot')
import numpy as np
from pprint import pprint
from os.path import basename
xrange=range
class PsoTools(object):
def __init__(self):
pass
# Convert a data raw file to a json file
def rawToJson(self, inputFilePath, outputFilePath):
inFile = open(inputFilePath, mode='r')
outFile = open(outputFilePath, mode='w')
meta_data = dict.fromkeys(['nb_customers', 'nb_depots',
'vehicle_cap', 'vehicle_cost', 'cost_type'])
cust_dict = dict.fromkeys(['x', 'y', 'demand'])
dep_dict = dict.fromkeys(['x', 'y', 'capacity'])
customers = {}
depots = {}
# Number of customers and available depots
nb_customers = int(inFile.readline())
nb_depots = int(inFile.readline())
meta_data['nb_customers'] = nb_customers
meta_data['nb_depots'] = nb_depots
inFile.readline() # Empty line
# Depots cordinates
for i, line in enumerate(inFile):
if i < nb_depots:
x = float(line.split()[0])
y = float(line.split()[1])
depots['d'+str(i)] = {}
depots['d'+str(i)]['x'] = x
depots['d'+str(i)]['y'] = y
else:
i=i-1
break
# Customers cordinates and vehicule capacity
for i, line in enumerate(inFile):
if i < nb_customers:
x = float(line.split()[0])
y = float(line.split()[1])
customers['c'+str(i)] = {}
customers['c'+str(i)]['x'] = x
customers['c'+str(i)]['y'] = y
else:
break
# Vehicules and depots capacity
for i, line in enumerate(inFile):
if i == 0:
vehicle_cap = float(line)
meta_data['vehicle_cap'] = vehicle_cap
elif i == 1:
pass
elif i < nb_depots+2:
depot_cap = float(line)
depots['d'+str(i-2)]['capacity'] = depot_cap
else:
break
# Customers demands
for i, line in enumerate(inFile):
if i < nb_customers:
demand = float(line)
customers['c'+str(i)]['demand'] = demand
else:
break
# Depots openning costs
for i, line in enumerate(inFile):
if i < nb_depots:
openning_cost = float(line)
depots['d'+str(i)]['opening_cost'] = openning_cost
elif i == nb_depots:
pass
elif i == nb_depots+1:
vehicle_cost = float(line)
meta_data['vehicle_cost'] = vehicle_cost
elif i == nb_depots+2:
pass
elif i == nb_depots+3:
cost_type = float(line)
meta_data['cost_type'] = cost_type
else:
break
final_output = {}
final_output['customers'] = customers
final_output['depots'] = depots
final_output['meta_data'] = meta_data
json.dump(final_output, outFile, indent=4)
inFile.close()
outFile.close()
# Plot the customers on the map
def plotCustomers(self, jsonInputFile):
if os.path.isfile(jsonInputFile):
with open(jsonInputFile) as data_file:
data = json.load(data_file)
nb_customers = data['meta_data']['nb_customers']
coords_cust = np.zeros(shape=(nb_customers,2))
for i in xrange(nb_customers):
x = data['customers']['c{0}'.format(i)]['x']
y = data['customers']['c{0}'.format(i)]['y']
coords_cust[i] = [x,y]
plt.scatter(coords_cust[:,0], coords_cust[:,1], marker='P', s=10, linewidth=5)
plt.show()
# Plot the depots on the map
def plotDepots(self, jsonInputFile):
if os.path.isfile(jsonInputFile):
with open(jsonInputFile) as data_file:
data = json.load(data_file)
nb_depots = data['meta_data']['nb_depots']
coords_depot = np.zeros(shape=(nb_depots,2))
for i in xrange(nb_depots):
x = data['depots']['d{0}'.format(i)]['x']
y = data['depots']['d{0}'.format(i)]['y']
coords_depot[i] = [x,y]
plt.scatter(coords_depot[:,0], coords_depot[:,1], marker='P', s=10, linewidth=5)
plt.show()
# Plot both depots and customers on the map
def plotAll(self, jsonInputFile):
if os.path.isfile(jsonInputFile):
with open(jsonInputFile) as data_file:
data = json.load(data_file)
nb_customers = data['meta_data']['nb_customers']
nb_depots = data['meta_data']['nb_depots']
coords_cust = np.zeros(shape=(nb_customers,2))
coords_depot = np.zeros(shape=(nb_depots,2))
for i in xrange(nb_customers):
x = data['customers']['c{0}'.format(i)]['x']
y = data['customers']['c{0}'.format(i)]['y']
coords_cust[i] = [x,y]
for i in xrange(nb_depots):
x = data['depots']['d{0}'.format(i)]['x']
y = data['depots']['d{0}'.format(i)]['y']
coords_depot[i] = [x,y]
filename = str(basename(os.path.splitext(jsonInputFile)[0]) + '.pdf')
plt.scatter(coords_cust[:,0], coords_cust[:,1], marker='s', s=10, linewidth=5)
plt.scatter(coords_depot[:,0], coords_depot[:,1], marker='8', s=10, linewidth=5)
plt.savefig(filename, format='pdf')
#~ plt.show()
|
mit
| 7,930,167,950,086,020,000 | 25.870787 | 83 | 0.612795 | false | 2.763143 | false | false | false |
OpenTechFund/WebApp
|
opentech/public/funds/migrations/0004_fundpagerelatedpage.py
|
1
|
1183
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2018-01-12 15:39
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0040_page_draft_title'),
('public_funds', '0003_icon_and_related_pages'),
]
operations = [
migrations.CreateModel(
name='FundPageRelatedPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page')),
('source_page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_pages', to='public_funds.FundPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
|
gpl-2.0
| 5,493,090,841,830,758,000 | 37.16129 | 168 | 0.607777 | false | 3.996622 | false | false | false |
vvvityaaa/tornado-challenger
|
challenge_manager.py
|
1
|
4420
|
import os.path
import tornado.ioloop
import tornado.web
import tornado.options
import tornado.httpserver
import mongoengine
from models import Challenge, ChallengePoint
from tornado.options import define, options
define("port", default=8000, help="run on the given port", type=int)
class Application(tornado.web.Application):
def __init__(self):
handlers = [
(r'/', IndexHandler),
(r'/challenges/$', ChallengeListHandler),
(r'/challenges/(\w+)$', ChallengeDetailHandler),
(r'/edit/(\w+)$', EditHandler),
(r'/add', EditHandler),
(r'/add_point/(\w+)$', EditPointHandler),
(r'/edit_point/(\w+)x(\d+)$', EditPointHandler),#contains url and key parameter
]
settings = dict(
template_path = os.path.join(os.path.dirname(__file__),'templates'),
static_path = os.path.join(os.path.dirname(__file__),'static'),
debug = True
)
mongoengine.connect('challenger') #connection to DB named 'challenger via mongoengine driver
tornado.web.Application.__init__(self,handlers,**settings)
class IndexHandler(tornado.web.RequestHandler):
def get(self):
self.render('base.html')
class ChallengeListHandler(tornado.web.RequestHandler):
def get(self):
self.render('challenge_list.html', challenges=Challenge.objects)
class ChallengeDetailHandler(tornado.web.RequestHandler):
def get(self, url):
self.render('challenge_detail.html', challenge=Challenge.objects.get(url=url))
class EditHandler(tornado.web.RequestHandler):
'''
Handles both adding and editing of Challenge model.
You can create new Challenge instance via form, but you need to create at least one ChallengePoint to send the form.
'''
def get(self, url=None):
'''
If in the url you have url keyword, then it is the argument by get method and it will be only edited
'''
if url:
self.render('edit_challenge.html', challenge=Challenge.objects.get(url=url))
else:
self.render('add.html')
def post(self, url=None):
'''
If url, then model will be only edited.
'''
challenge = dict()
challenge_fields = ['header', 'url', 'date_start', 'date_end']
if url:
challenge = Challenge.objects.get(url=url) #gets challenge object parameters to edit them
for field in challenge_fields:
challenge[field] = self.get_argument(field, None)
if url:
challenge.save()
else:
point = dict()
point_fields=['title', 'key', 'done', 'required_time']
for field in point_fields:
point[field] = self.get_argument(field, None)
challenge['points'] = [ChallengePoint(**point)] #you have to create at least one point entry to send the form
Challenge(**challenge).save()#you call new Challenge instance giving it arguments taken from dictionary and saves it
self.redirect('/challenges/')
class EditPointHandler(tornado.web.RequestHandler):
'''
Implements editting and adding of challenge points.
If key is fetched by url, then point will be just edited
'''
def get(self, url, key = None):
if key:
self.render('edit_challenge_point.html',
challenge_point = Challenge.objects.get(url=url).points.get(key=key))
else:
self.render('add_challenge_point.html')
def post(self, url, key = None):
challenge_point = dict()
challenge_point_fields = ['title','key','done',
'required_time']
if key:
challenge = Challenge.objects.get(url=url)
challenge_point = challenge.points.get(key=key)
for field in challenge_point_fields:
challenge_point[field] = self.get_argument(field, None)
if key:
challenge.points.save()
else:
c = Challenge.objects.get(url=url).points.create(**challenge_point)
c.save()
def main():
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
|
gpl-2.0
| -2,984,514,721,142,183,400 | 33.80315 | 128 | 0.610181 | false | 4.126984 | false | false | false |
reebalazs/gf.rejuice
|
setup.py
|
1
|
1731
|
__version__ = '0.3'
import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'docs/README.txt')).read()
CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
setup(
name = 'gf.rejuice',
version = __version__,
description = '`gf.rejuice` provides additional tools for developers to use `Juicer` '
'for the compression of Javascript and CSS resources, '
'in the context of python web applications that run via WSGI.',
long_description = README + '\n\n' + CHANGES,
classifiers = [
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
keywords = 'web middleware wsgi css js juicer merging minifying development',
author = "Balazs Ree",
author_email = "[email protected]",
url = "https://launchpad.net/gf.rejuice",
license = "GPL",
packages = find_packages(),
include_package_data = True,
namespace_packages = ['gf'],
zip_safe = False,
install_requires=[
'setuptools',
'lxml >= 2.1.1',
'WebOb',
],
test_suite = "gf.rejuice",
tests_require=[
'BeautifulSoup',
'setuptools',
'lxml >= 2.1.1',
'WebOb',
],
entry_points = """\
[paste.filter_app_factory]
develjuice = gf.rejuice.develjuice:make_middleware
[console_scripts]
rejuice = gf.rejuice.rejuice_script:main
"""
)
|
gpl-2.0
| -4,068,187,978,773,046,300 | 29.910714 | 90 | 0.574235 | false | 3.690832 | false | false | false |
munin/munin
|
munin/mod/launch.py
|
1
|
2955
|
"""
Loadable.Loadable subclass
"""
# This file is part of Munin.
# Munin is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# Munin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Munin; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# This work is Copyright (C)2006 by Andreas Jacobsen
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
import re
import datetime
from munin import loadable
class launch(loadable.loadable):
def __init__(self, cursor):
super().__init__(cursor, 1)
self.paramre = re.compile(r"^\s*(\S+|\d+)\s+(\d+)")
self.usage = self.__class__.__name__ + " <class|eta> <land_tick>"
self.helptext = [
"Calculate launch tick, launch time, prelaunch tick and prelaunch modifier for a given ship class or eta, and land tick."
]
self.class_eta = {"fi": 8, "co": 8, "fr": 9, "de": 9, "cr": 10, "bs": 10}
def execute(self, user, access, irc_msg):
if access < self.level:
irc_msg.reply("You do not have enough access to use this command")
return 0
m = self.paramre.search(irc_msg.command_parameters)
if not m:
irc_msg.reply("Usage: %s" % (self.usage,))
return 0
eta = m.group(1)
land_tick = int(m.group(2))
if eta.lower() in list(self.class_eta.keys()):
eta = self.class_eta[eta.lower()]
else:
try:
eta = int(eta)
except ValueError:
irc_msg.reply("Usage: %s" % (self.usage,))
return 0
current_tick = self.current_tick(irc_msg.round)
current_time = datetime.datetime.utcnow()
launch_tick = land_tick - eta
launch_time = current_time + datetime.timedelta(
hours=(launch_tick - current_tick)
)
prelaunch_tick = land_tick - eta + 1
prelaunch_mod = launch_tick - current_tick
irc_msg.reply(
"eta %d landing pt %d (currently %d) must launch at pt %d (%s), or with prelaunch tick %d (currently %+d)"
% (
eta,
land_tick,
current_tick,
launch_tick,
(launch_time.strftime("%m-%d %H:55")),
prelaunch_tick,
prelaunch_mod,
)
)
return 1
|
gpl-2.0
| 4,536,700,690,433,954,300 | 31.833333 | 133 | 0.59357 | false | 3.847656 | false | false | false |
todor943/mapEngine
|
MapApi/views.py
|
1
|
3383
|
import json
import pprint
import time
import datetime
import django.core.serializers
from django.contrib.auth import *
from django.contrib.auth.decorators import *
from django.contrib.gis.geos import GEOSGeometry
from django.contrib.gis.serializers import geojson
from django.core import *
from django.http import *
from django.shortcuts import *
from django.views.decorators.csrf import csrf_exempt
from django.views.generic import *
from rest_framework import parsers, renderers
from rest_framework.authtoken.models import Token
# from rest_framework.authtoken.models import Token
from rest_framework.authtoken.serializers import AuthTokenSerializer
from rest_framework_gis.serializers import GeoFeatureModelSerializer,GeoFeatureModelListSerializer
from rest_framework_gis.serializers import ListSerializer, ModelSerializer
from rest_framework.serializers import Serializer, FloatField, CharField
from rest_framework.serializers import DecimalField, IntegerField, DateTimeField
from rest_framework.response import Response
from rest_framework.views import APIView
import MapApp
class FakeApiView(View):
def get(self, request):
data = django.core.serializers.serialize(
"geojson", MapApp.models.MapEntity.objects.all()
)
return HttpResponse(data)
def post(self, request, *args, **kwargs):
data = {}
if request.user.is_authenticated():
requestData = json.loads(request.POST['jsonData'])
now = time.time()
if 'position' not in requestData:
return JsonResponse({})
request.session['location'] = requestData['position']
request.session['mapOptions'] = requestData['mapOptions']
request.session['lastUpdate'] = time.time()
radius = requestData['radius']
searchPnt = self.locationToPoint(requestData['position']);
# now = datetime.datetime.now()
# earlier = now - datetime.timedelta(hours=1)
time_filter = datetime.datetime.now() - datetime.timedelta(hours = 1)
data = MapApp.models.MapEntity.objects.filter(
geoLocationField__distance_lte=(searchPnt, radius),
publishDate__gte=time_filter
)
data = django.core.serializers.serialize("geojson", data)
print ("Updated the user's map state in session")
# print request.user.get_username()
return HttpResponse(data)
def locationToPoint(self, position):
return GEOSGeometry('POINT(' + str(position['lng']) + ' '+ str(position['lat']) + ')', srid=4326)
def handleRequest(self, request):
pass
def getEventsInRadius(self, centerPnt, distance):
pass
def updateSession(self, request):
pass
class ObtainAuthToken(APIView):
throttle_classes = ()
permission_classes = ()
parser_classes = (parsers.FormParser, parsers.MultiPartParser, parsers.JSONParser,)
renderer_classes = (renderers.JSONRenderer,)
serializer_class = AuthTokenSerializer
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data)
serializer.is_valid(raise_exception=True)
user = serializer.validated_data['user']
# Token.objects
# token, created = Token.objects.create(user=user)
doDelete = True
try:
currentToken = Token.objects.get(user=user)
# TODO
except Exception:
doDelete = False
if doDelete:
print("Renewing user token")
currentToken.delete()
else :
print("Attempting to create new user token")
token = Token.objects.create(user=user)
return Response({'token': token.key})
|
apache-2.0
| -7,913,719,095,525,884,000 | 30.616822 | 101 | 0.754064 | false | 3.673181 | false | false | false |
LamCiuLoeng/bbb
|
rpac/model/interface.py
|
1
|
2192
|
# -*- coding: utf-8 -*-
import json
from datetime import datetime as dt
from sqlalchemy import Column
from sqlalchemy.types import Integer, DateTime, Text
from sqlalchemy.sql.expression import and_, desc
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import synonym
from tg import request
from rpac.model import qry
__all__ = ['SysMixin', ]
def getUserID():
user_id = 1
try:
user_id = request.identity["user"].user_id
finally:
return user_id
class SysMixin( object ):
remark = Column( 'remark', Text, doc = u'Remark' )
createTime = Column( 'create_time', DateTime, default = dt.now )
updateTime = Column( 'update_time', DateTime, default = dt.now )
createById = Column( 'create_by_id', Integer, default = getUserID )
updateById = Column( 'update_by_id', Integer, default = getUserID )
sysCreateTime = Column( 'system_create_time', DateTime, default = dt.now )
sysUpdateTime = Column( 'system_update_time', DateTime, default = dt.now, onupdate = dt.now )
active = Column( 'active', Integer, default = 0 ) # 0 is active ,1 is inactive
@property
def createBy( self ):
from auth import User
return qry( User ).get( self.createById )
@property
def updateBy( self ):
from auth import User
return qry( User ).get( self.updateById )
@property
def approveBy( self ):
from auth import User
return qry( User ).get( self.approveById )
def _getAttachment( self ):
from logic import FileObject
ids = filter( bool, self._attachment.split( "|" ) )
if not ids : return []
return qry( FileObject ).filter( and_( FileObject.active == 0, FileObject.id.in_( ids ) ) ).order_by( FileObject.id )
def _setAttachment( self, v ):
ids = None
if v :
if type( v ) == list:
ids = "|".join( map( unicode, v ) )
elif isinstance( v, basestring ):
ids = v
self._attachment = ids
@declared_attr
def attachment( self ): return synonym( '_attachment', descriptor = property( self._getAttachment, self._setAttachment ) )
|
mit
| -8,494,137,192,439,039,000 | 29.027397 | 126 | 0.626825 | false | 3.872792 | false | false | false |
0111001101111010/cs595-f13
|
assignment2/q3/carbondate-master/getFirstAppearanceInArchives.py
|
1
|
7051
|
import re
import time
import urllib2
import os
import sys
import datetime
import urllib
import simplejson
import calendar
import commands
import math
from datetime import datetime
def getMementos(uri):
uri = uri.replace(' ', '')
orginalExpression = re.compile( r"<http://[A-Za-z0-9.:=/%-_ ]*>; rel=\"original\"," )
mementoExpression = re.compile( r"<http://[A-Za-z0-9.:=/&,%-_ \?]*>;rel=\"(memento|first memento|last memento|first memento last memento|first last memento)\";datetime=\"(Sat|Sun|Mon|Tue|Wed|Thu|Fri), \d{2} (Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec) (19|20)\d\d \d\d:\d\d:\d\d GMT\"" )
zeroMementoExpression = re.compile(r"Resource: http://[A-Za-z0-9.:=/&,%-_ ]*")
baseURI = 'http://mementoproxy.cs.odu.edu/aggr/timemap/link/'
memento_list = []
try:
search_results = urllib.urlopen(baseURI+uri)
the_page = search_results.read()
timemapList = the_page.split('\n')
count = 0
for line in timemapList:
if count <= 1:
if line.find('Resource not in archive') > -1:
result = zeroMementoExpression.search( line )
count = count + 1
continue
elif count == 2:
result = orginalExpression.search( line )
if result:
originalResult = result.group(0)
originalUri = originalResult[1:len(originalResult)-17]
else:
if(line.find("</memento")>0):
line = line.replace("</memento", "<http://api.wayback.archive.org/memento")
loc = line.find('>;rel="')
tofind = ';datetime="'
loc2 = line.find(tofind)
if(loc!=-1 and loc2!=-1):
mementoURL = line[2:loc]
timestamp = line[loc2+len(tofind):line.find('"',loc2+len(tofind)+3)]
epoch = int(calendar.timegm(time.strptime(timestamp, '%a, %d %b %Y %H:%M:%S %Z')))
day_string = time.strftime('%Y-%m-%dT%H:%M:%S', time.gmtime(epoch))
uri = mementoURL
cdlib = 'webarchives.cdlib.org'
archiefweb = 'enterprise.archiefweb.eu'
webARchive= 'api.wayback.archive.org'
yahoo1 = 'uk.wrs.yahoo.com'
yahoo2 = 'rds.yahoo.com'
yahoo3 = 'wrs.yahoo.com'
diigo = 'www.diigo.com'
bing = 'cc.bingj.com'
wayback = 'wayback.archive-it.org'
webArchiveNationalUK = 'webarchive.nationalarchives.gov.uk'
webHarvest = 'webharvest.gov'
webArchiveOrgUK = 'www.webarchive.org.uk'
webCitation = 'webcitation.org'
mementoWayBack='memento.waybackmachine.org'
type = ''
category = ''
# @type uri str
if (uri.find(webARchive)!=-1):
type = 'Internet Archive'
category = 'IA'
elif (uri.find(yahoo1)!=-1 or uri.find(yahoo2)!=-1 or uri.find(yahoo3)!=-1):
type = 'Yahoo'
category = 'SE'
elif (uri.find(diigo)!=-1):
type = 'diigo'
category = 'Others'
elif (uri.find(bing)!=-1):
type = 'Bing'
category = 'SE'
elif (uri.find(wayback)!=-1):
type = 'Archive-It'
category = 'Others'
elif (uri.find(webArchiveNationalUK)!=-1):
type = 'UK National Archive'
category = 'Others'
elif (uri.find(webHarvest)!=-1):
type = 'Web Harvest'
category = 'Others'
elif (uri.find(webArchiveOrgUK)!=-1):
type = 'UK Web Archive'
category = 'Others'
elif (uri.find(webCitation)!=-1):
type = 'Web Citation'
category = 'Others'
elif (uri.find(cdlib)!=-1):
type = 'CD Lib'
category = 'Others'
elif (uri.find(archiefweb)!=-1):
type = 'ArchiefWeb'
category = 'Others'
elif (uri.find(mementoWayBack)!=-1):
type = 'Wayback Machine'
category = 'Others'
else:
type = 'Not Known'
category = 'Others'
memento = {}
memento["type"] = type
memento["category"] = category
memento["time"] = day_string
memento["link"] = mementoURL
memento["link"] = urllib.quote(memento["link"])
memento["link"] = memento["link"].replace("http%3A//", "http://")
memento["link"] = memento["link"][memento["link"].find("http://"):]
memento_list.append(memento)
else:
pass
count = count + 1
except urllib2.URLError:
pass
return memento_list
def isInPage(url,page):
co = 'curl -i --silent -L -A "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/534.30 (KHTML, like Gecko) Chrome/12.0.742.112 Safari/534.30" "'+page+'"'
page = commands.getoutput(co)
loc = page.find(url)
date = ""
if(loc==-1):
return False, date
to_find = "X-Archive-Orig-Last-modified: "
loc = page.find(to_find)
if(loc !=-1):
end = page.find("\r", loc)
date = page[loc+len(to_find):end]
date = date.strip()
if(date ==""):
to_find = "X-Archive-Orig-Date: "
loc = page.find(to_find)
if(loc !=-1):
end = page.find("\r", loc)
date = page[loc+len(to_find):end]
date = date.strip()
epoch = int(calendar.timegm(time.strptime(date, '%a, %d %b %Y %H:%M:%S %Z')))
date = time.strftime('%Y-%m-%dT%H:%M:%S', time.gmtime(epoch))
return True, date
def getFirstAppearance(url, inurl):
try:
mementos = getMementos(inurl)
if(len(mementos) == 0):
return ""
start = 0
end = len(mementos)
previous = -1
i = 0
foundbefore = False
count = 0
for mem in mementos:
res, date = isInPage(url,mem["link"])
if(res==True):
break
while(True):
res, date = isInPage(url,mementos[i]["link"])
if(res==True and i==0):
return date
if(int(math.fabs(previous-i))==0):
return ""
if( (res==True and int(math.fabs(previous-i))==1 and foundbefore == False) or (res==False and int(math.fabs(previous-i))==1 and foundbefore == True) ):
return date
previous = i
if(res == False):
start = i
i = (end-start)/2 + start
foundbefore = False
else:
end = i
i = (end-start)/2 + start
foundbefore = True
count = count + 1
except:
print sys.exc_info()
|
mit
| -4,108,445,885,518,813,000 | 32.259434 | 296 | 0.494823 | false | 3.330657 | false | false | false |
domoran/dxlparser
|
dxlparser/test/Preprocessor_Test.py
|
1
|
1393
|
# -*- coding: utf-8 -*-
from dxlparser import DXLPreprocessor
def preprocess(data, expected):
result = DXLPreprocessor().preprocess(data)
if not result == expected:
print ("Expected: |" + expected + "|\nObserved: |" + result + "|\n")
assert result == expected
testdata = [
("", ""), # empty string
("Hallo \n", "Hallo \n"), # simple statement
("Hallo // single line Comment\nSecond Line", "Hallo \nSecond Line"), # normal single line comment
("Hallo // single line Comment-\nSecond Line", "Hallo Second Line"), # single line comment ending with - lf
("Hallo // single line Comment-\r\nSecond Line", "Hallo Second Line"), # single line comment ending with - cr lf
("Hallo // single line Comment- \r\nSecond Line", "Hallo \nSecond Line"), # single line comment with minus in middle
("Multi/*Line*/Comment", "MultiComment"), # multi line comment 1
("Multi/*Li/*\nne*/Comment", "MultiComment"), # multi line comment 2
("Multi\n/*\nne*/\r\nComment", "Multi\n\r\nComment"), # multi line comment 2
# real code test
("""
int c = 4 /* important */
string s = "some text" //-
"more text"
int d = 5;""",
"""
int c = 4
string s = "some text" "more text"
int d = 5;"""
),
]
def test_preprocessor():
for data, expected in testdata:
yield preprocess, data, expected
|
gpl-3.0
| -1,977,564,771,191,190,500 | 31.214286 | 120 | 0.601579 | false | 3.491228 | false | false | false |
FMNSSun/PyPLUSPacket
|
pluspacket/packet.py
|
1
|
8518
|
import struct
_fmt_u64 = ">Q"
_fmt_u32 = ">L"
_magic_shift = 4
_flags_mask = 0x0F
_default_magic = 0xd8007ff
_min_packet_len = 20
_l_mask = 0x08
_r_mask = 0x04
_s_mask = 0x02
_x_mask = 0x01
_cat_pos = (4, 12)
_psn_pos = (12, 16)
_pse_pos = (16, 20)
_magic_pos = (0, 4)
_udp_header_len = 8
_pcf_type_plus_payload = 0xFF
PCF_INTEGRITY_FULL = 0x03
PCF_INTEGRITY_HALF = 0x02
PCF_INTEGRITY_QUARTER = 0x01
PCF_INTEGRITY_ZERO = 0x00
def _get_u32(s):
"""
Returns s -> u32
"""
return struct.unpack(_fmt_u32, s)[0]
def _get_u64(s):
"""
Returns s -> u64
"""
return struct.unpack(_fmt_u64, s)[0]
def _put_u64(i, buf):
"""
Writes an u64
"""
buf += struct.pack(_fmt_u64, i)
def _put_u32(i, buf):
"""
Writes an u32
"""
buf += struct.pack(_fmt_u32, i)
def get_psn(buf):
"""
Extracts PSN out of a buffer. It's the caller's responsibility
to make sure that buffer is large enough.
"""
return _get_u32(buf[_psn_pos[0] : _psn_pos[1]])
def get_pse(buf):
"""
Extracts PSE out of a buffer. It's the caller's responsibility
to make sure that buffer is large enough.
"""
return _get_u32(buf[_pse_pos[0] : _pse_pos[1]])
def get_cat(buf):
"""
Extracts CAT out of a buffer. It's the caller's responsibility
to make sure that buffer is large enough.
"""
return _get_u64(buf[_cat_pos[0] : _cat_pos[1]])
def get_magic(buf):
"""
Extracts Magic out of a buffer. It's the caller's responsibility
to make sure that buffer is large enough.
"""
return _get_u32(buf[_magic_pos[0] : _magic_pos[1]]) >> _magic_shift
def get_flags(buf):
"""
Returns the flags as ORed bits.
"""
return _get_u32(buf[_magic_pos[0] : _magic_pos[1]]) & _flags_mask
def get_l(buf):
"""
Returns True if L is set, otherwise False
"""
return bool(get_flags(buf) & _l_mask)
def get_r(buf):
"""
Returns True if R is set, otherwise False
"""
return bool(get_flags(buf) & _r_mask)
def get_s(buf):
"""
Returns True if S is set, otherwise False
"""
return bool(get_flags(buf) & _s_mask)
def get_x(buf):
"""
Returns True if X is set, otherwise False
"""
return bool(get_flags(buf) & _x_mask)
def is_extended_packet(buf):
"""
Just an alias for get_x.
"""
return get_x(buf)
def parse_packet(buf):
"""
Parses a packet completely. This is a wrapper for the from_bytes method
of the Packet class.
"""
return Packet().from_bytes(buf)
def detect_plus_in_udp(buf):
"""
Tries to detect the presence of a PLUS header in UDP (incl. header)
"""
if len(buf) < _udp_header_len:
raise ValueError("Buffer too small. UDP header is at least 8 bytes long.")
udp_payload = buf[_udp_header_len:]
return detect_plus(udp_payload)
def detect_plus(buf):
"""
Tries to detect the presence of a PLUS header in payload (excl. UDP header)
"""
if len(buf) < _min_packet_len:
# Technically the magic value could be present here but if the packet
# is this small then there can't be a complete basic header present and
# this is best counted as 'not plus'.
return False
magic = get_magic(buf)
return magic == _default_magic
def _any(xs):
for x in xs:
if x:
return True
return False
def new_basic_packet(l, r, s, cat, psn, pse, payload):
"""
Creates a new packet with a basic header.
"""
p = Packet()
p.l = l
p.r = r
p.s = s
p.cat = cat
p.psn = psn
p.pse = pse
p.payload = payload
p.x = False
if not p.is_valid():
raise ValueError("Illegal combination of arguments!")
return p
def new_extended_packet(l, r, s, cat, psn, pse, pcf_type, pcf_integrity, pcf_value, payload):
"""
Creates a new packet with an extended header.
"""
p = new_basic_packet(l, r, s, cat, psn, pse, payload)
p.x = True
if pcf_value == None and pcf_type != _pcf_type_plus_payload:
p.pcf_len = None
elif pcf_value == None:
p.pcf_len = None
else:
p.pcf_len = len(pcf_value)
p.pcf_type = pcf_type
p.pcf_integrity = pcf_integrity
p.pcf_value = pcf_value
if not p.is_valid():
raise ValueError("Illegal combination of arguments!")
return p
class Packet():
def __init__(self):
"""
Creates a zero packet.
"""
# Initialize all the fields to None
self.psn = None
self.pse = None
self.cat = None
self.pcf_integrity = None
self.pcf_value = None
self.pcf_len = None
self.pcf_type = None
self.l = None
self.r = None
self.s = None
self.x = None
self.payload = None
self.magic = _default_magic
def to_dict(self):
return {
"psn" : self.psn,
"pse" : self.pse,
"cat" : self.cat,
"pcf_integrity" : self.pcf_integrity,
"pcf_value" : self.pcf_value,
"pcf_type" : self.pcf_type,
"l" : self.l,
"r" : self.r,
"s" : self.s,
"x" : self.x,
"magic" : self.magic,
"payload" : self.payload
}
def is_valid(self):
"""
Returns true if the packet's attributes/fields are in a valid state.
"""
if _any ([ self.psn == None, self.pse == None,
self.cat == None, self.magic == None,
self.l == None, self.r == None,
self.s == None, self.x == None]):
return False
if not self.x:
return True
if self.pcf_type == None:
return False
if self.pcf_type == 0x00:
return False
if self.pcf_type == _pcf_type_plus_payload:
if _any ([ self.pcf_integrity != None,
self.pcf_len != None,
self.pcf_value != None]):
return False
return True
if _any ([ self.pcf_integrity == None,
self.pcf_len == None,
self.pcf_value == None]):
return False
if self.pcf_len != len(self.pcf_value):
return False
if self.pcf_len > 63:
return False
if self.pcf_integrity < 0 or self.pcf_integrity > 3:
return False
return True
def from_bytes(self, bytes):
"""
Parses a packet from bytes. This function does not set PCF Integrity to zero
if PCF Len is zero. If you want that behaviour as mentioned in the PLUS spec
you must do this yourself.
"""
if len(bytes) < _min_packet_len:
raise ValueError("Minimum length of a PLUS packet is 20 bytes.")
magicAndFlags = _get_u32(bytes[_magic_pos[0] : _magic_pos[1]])
magic = magicAndFlags >> _magic_shift
if magic != _default_magic:
raise ValueError("Invalid Magic value.")
self.magic = magic
flags = magicAndFlags & _flags_mask
self.l = bool(flags & _l_mask)
self.r = bool(flags & _r_mask)
self.s = bool(flags & _s_mask)
self.x = bool(flags & _x_mask)
self.cat = _get_u64(bytes[_cat_pos[0] : _cat_pos[1]])
self.psn = _get_u32(bytes[_psn_pos[0] : _psn_pos[1]])
self.pse = _get_u32(bytes[_pse_pos[0] : _pse_pos[1]])
if not self.x:
self.payload = bytes[_min_packet_len:]
else:
self._extended(bytes[_min_packet_len:])
return self
def _extended(self, buf):
"""
Internal. Continues parsing extended headers.
"""
if len(buf) < 1:
raise ValueError("Extended header must have PCF_TYPE")
pcf_type = buf[0]
if pcf_type == 0xFF:
# This means no pcf_integry, pcf_len, pcf_value is present.
self.payload = buf[1:]
self.pcf_type = pcf_type
else:
if pcf_type == 0x00:
# One additional pcf_type byte
buf = buf[1:]
if len(buf) == 0:
raise ValueError("Missing additional PCF_TYPE byte")
pcf_type = buf[0] << 8
buf = buf[1:]
if len(buf) == 0:
raise ValueError("Missing PCF_LEN and PCF_INTEGRITY")
pcf_leni = buf[0]
pcf_len = pcf_leni >> 2
pcf_integrity = pcf_leni & 0x03
buf = buf[1:]
if len(buf) < pcf_len:
raise ValueError("Incomplete PCF_VALUE")
pcf_value = buf[:pcf_len]
payload = buf[pcf_len:]
self.pcf_len = pcf_len
self.pcf_integrity = pcf_integrity
self.pcf_value = pcf_value
self.payload = payload
self.pcf_type = pcf_type
def to_bytes(self):
"""
Unparses the packet to bytes.
"""
if not self.is_valid():
raise ValueError("Internal state is not valid!")
buf = bytearray()
magicAndFlags = self.magic << 4
if self.l: magicAndFlags |= _l_mask
if self.r: magicAndFlags |= _r_mask
if self.s: magicAndFlags |= _s_mask
if self.x: magicAndFlags |= _x_mask
_put_u32(magicAndFlags, buf)
_put_u64(self.cat, buf)
_put_u32(self.psn, buf)
_put_u32(self.pse, buf)
if not self.x:
buf += self.payload
return buf
if self.pcf_type == 0xFF:
buf.append(0xFF)
buf += self.payload
return buf
if self.pcf_type & 0x00FF == 0:
pcf_type = self.pcf_type >> 8
buf.append(0x00)
buf.append(pcf_type)
else:
buf.append(self.pcf_type)
buf.append(self.pcf_len << 2 | self.pcf_integrity)
buf += self.pcf_value
buf += self.payload
return buf
|
bsd-2-clause
| 4,748,410,429,267,642,000 | 17.679825 | 93 | 0.627495 | false | 2.559495 | false | false | false |
markrwilliams/pydivsufsort
|
setup.py
|
1
|
1025
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import subprocess
from os.path import exists
from setuptools import setup
from setuptools.extension import Extension
from setuptools.command.build_py import build_py as _build_py
from Cython.Distutils import build_ext
class build_py(_build_py):
if exists('lib/Makefile'):
make_clean = subprocess.Popen(['make', 'distclean'], cwd='lib/')
make_clean.wait()
configure = subprocess.Popen(['./configure', '--with-pic'],
cwd='lib/')
configure.wait()
make = subprocess.Popen(['make', '-j'], cwd='lib/')
make.wait()
setup(
cmdclass={'build_py': build_py,
'build_ext': build_ext},
name='divsufsort',
ext_modules=[
Extension(
"suffixarray",
sources=['src/suffix.pyx'],
libraries=['lib/lib/.libs/libdivsufsort.a'],
extra_objects=['lib/lib/.libs/libdivsufsort.a'],
include_dirs=['lib/include'],
language="c")])
|
mit
| 5,025,191,733,688,460,000 | 29.147059 | 72 | 0.590244 | false | 3.853383 | false | false | false |
allison-group/indigo-bondorder
|
archive/src/indigox/astar.py
|
1
|
9383
|
from heapq import heappop, heappush
from itertools import count as _count
from itertools import product
from time import perf_counter
from indigox.config import (INFINITY, BASIS_LEVEL, TIMEOUT, HEURISTIC,
COUNTERPOISE_CORRECTED, ELECTRON_PAIRS,
INITIAL_LO_ENERGY)
from indigox.data import atom_enes, bond_enes
from indigox.exception import IndigoMissingParameters
from indigox.lopt import LocalOptimisation
from indigox.misc import (graph_to_dist_graph, electron_spots, electrons_to_add,
locs_sort, BondOrderAssignment, graph_setup, HashBitArray,
node_energy, bitarray_to_assignment, calculable_nodes)
BSSE = int(not COUNTERPOISE_CORRECTED)
class AStar(BondOrderAssignment):
def __init__(self, G):
self.init_G = G
def initialise(self):
if HEURISTIC.lower() == 'tight':
self.heuristic = abstemious
elif HEURISTIC.lower() == 'loose':
self.heuristic = promiscuous
else:
raise IndigoMissingParameters('Unknown A* heuristic type: {}'
''.format(HEURISTIC))
self.h_count = 0
self.c_count = 0
self.G = graph_to_dist_graph(self.init_G)
self.target = electrons_to_add(self.init_G)
self.locs = locs_sort(electron_spots(self.init_G), self.G)
self.choices = []
for n in self.locs:
n_count = self.locs.count(n)
if (n,n_count) not in self.choices:
self.choices.append((n,n_count))
for i in range(len(self.choices)):
self.choices[i] = self.choices[i][1]
if not INITIAL_LO_ENERGY:
self.max_queue_energy = INFINITY / 2
else:
lo = LocalOptimisation(self.init_G)
_, self.max_queue_energy = lo.run()
def run(self):
self.start_time = perf_counter()
push = heappush
pop = heappop
c = _count()
self.initialise()
source = HashBitArray(len(self.locs))
source.setall(False)
i_count = 0
explored_count = 0;
enqueued_count = 0;
start = 0
try:
stop = self.choices[0]
except IndexError:
stop = 0
child = 1
always_calculable = calculable_nodes(self.G, source, 0, self.locs,
self.target)
q = [(0, next(c), (source, 0), start, stop, child,
self.calc_energy(source, always_calculable, stop), None)]
enqueued = {}
explored = {}
while q:
qcost, _, curvert, start, stop, child, dist, parent = pop(q)
i_count += 1
if i_count < 20:
print(curvert, start, stop)
# print(curvert[0])
if stop >= len(self.locs) and curvert[0].count() == self.target:
bitarray_to_assignment(self.init_G, curvert[0], self.locs)
print(i_count + len(q), "items passed through queue")
print("Explored:", explored_count, "Enqueued:", enqueued_count)
print("{:.3f} seconds run time".format(perf_counter()-self.start_time))
return self.init_G, dist
# if curvert in explored:
# explored_explored += 1
# continue
# explored[curvert] = parent
for n in self.neighbours(curvert[0], start, stop):
if i_count < 20:
print(" ",n)
# if n in explored:
# explored_count += 1
# continue
calculable = calculable_nodes(self.G, n[0], stop, self.locs,
self.target)
ncost = self.calc_energy(n[0], calculable, stop)
# if n in enqueued:
# enqueued_count += 1
# qcost, h = enqueued[n]
# if qcost <= ncost:
# continue
# else:
# self.h_count += 1
h = self.heuristic(self.G, n[0], calculable, stop,
self.target, self.locs)
if ncost + h > self.max_queue_energy:
continue
# enqueued[n] = ncost, h
try:
push(q, (ncost + h, next(c), n, stop, stop + self.choices[child],
child + 1, ncost, curvert))
except IndexError:
push(q, (ncost + h, next(c), n, stop, stop + 1,child, ncost,
curvert))
print(i_count, "items passed through queue")
print("{:.3f} seconds run time".format(perf_counter()-self.start_time))
def neighbours(self, a, start, stop):
num = stop - start
for i in range(num + 1):
b = HashBitArray(a.to01())
j = 0
while j < i:
b[start + j] = True
j += 1
yield b, stop
def calc_energy(self, a, calculable, stop, g_info=None):
self.c_count += 1
placed = a[:stop].count()
to_place = self.target - placed
available_places = a.length() - stop - to_place
if to_place < 0 or available_places < 0:
return INFINITY
if g_info is None:
graph_setup(self.G, a, self.locs)
else:
for n in self.G:
self.G.node[n]['e-'] = g_info[n]['e-']
if len(n) == 1:
self.G.node[n]['fc'] = g_info[n]['fc']
ene = sum(node_energy(self.G, n) for n in calculable)
if ene > INFINITY / 2:
ene = INFINITY
return round(ene, 5)
# Heuristics
def promiscuous(G, a, calculable, stop, target, locs):
h_ene = 0
placed = a[:stop].count()
to_place = target - placed
if not to_place:
return h_ene
if to_place < 0:
return INFINITY
# Doesn't worry about charged bonds
a_enes = atom_enes[BASIS_LEVEL]
b_enes = bond_enes[BASIS_LEVEL]
for n in G:
if n in calculable:
continue
if len(n) == 1:
h_ene += min(a_enes[G.node[n]['Z']].values())
elif len(n) == 2:
a_element = G.node[(n[0],)]['Z']
b_element = G.node[(n[1],)]['Z']
if b_element < a_element:
a_element, b_element = b_element, a_element
min_ene = 0
for o in (1,2,3):
try:
o_ene = b_enes[(a_element, b_element, o)][BSSE]
except KeyError:
continue
if o_ene < min_ene:
min_ene = o_ene
h_ene += min_ene
return h_ene
def abstemious(G, a, calculable, stop, target, locs):
h_ene = 0
placed = a[:stop].count()
to_place = target - placed
if not to_place:
return h_ene
if to_place < 0:
return INFINITY
extra_counts = {k:locs.count(k) for k in set(locs)}
extra_able = set(locs[stop:])
graph_setup(G, a, locs)
# note where all the extra electrons can go
for n in G:
G.node[n]['h'] = 0
if n not in extra_able:
continue
added = 0
while added < to_place and added < extra_counts[n]:
if ELECTRON_PAIRS:
G.node[n]['h'] += 2
else:
G.node[n]['h'] += 1
added += 1
# figure out the lowest possible energy attainable for each node
for n in sorted(G, key=len):
# atoms formal charges
if len(n) == 1:
addable = []
step = 2 if ELECTRON_PAIRS else 1
addable.append(range(0, G.node[n]['h'] + 1, step))
for nb in G[n]:
addable.append(range(0, G.node[nb]['h'] // 2 + 1))
fcs = set()
for x in product(*addable):
real_sum = (x[0]//2 + sum(x[1:]) if ELECTRON_PAIRS
else x[0] + 2 * sum(x[1:]))
if real_sum <= to_place:
fcs.add((G.node[n]['fc'] - sum(x), real_sum))
G.node[n]['poss_fcs'] = fcs
# need all possible formal charges for all atoms
if n in calculable:
continue
fcs = set(x[0] for x in fcs)
a_enes = atom_enes[BASIS_LEVEL][G.node[n]['Z']]
try:
h_ene += min(v for k, v in a_enes.items() if k in fcs)
except ValueError:
h_ene = INFINITY
if len(n) == 2:
if n in calculable:
continue
step = 2
bos = {G.node[n]['e-'] + x
for x in range(0, G.node[n]['h'] + 1, step)}
a_ele = G.node[(n[0],)]['Z']
b_ele = G.node[(n[1],)]['Z']
if b_ele < a_ele:
a_ele, b_ele = b_ele, a_ele
b_enes = bond_enes[BASIS_LEVEL]
h_ene += min(b_enes[(a_ele, b_ele, o//2)][BSSE] for o in bos)
return h_ene
|
mit
| 7,778,853,065,744,993,000 | 34.950192 | 87 | 0.470638 | false | 3.641055 | false | false | false |
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247972723/PyKDE4/kio/KUriFilter.py
|
1
|
1212
|
# encoding: utf-8
# module PyKDE4.kio
# from /usr/lib/python2.7/dist-packages/PyKDE4/kio.so
# by generator 1.135
# no doc
# imports
import PyKDE4.kdeui as __PyKDE4_kdeui
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
class KUriFilter(): # skipped bases: <type 'sip.wrapper'>
# no doc
def filteredUri(self, *args, **kwargs): # real signature unknown
pass
def filterSearchUri(self, *args, **kwargs): # real signature unknown
pass
def filterUri(self, *args, **kwargs): # real signature unknown
pass
def loadPlugins(self, *args, **kwargs): # real signature unknown
pass
def pluginNames(self, *args, **kwargs): # real signature unknown
pass
def self(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
NormalTextFilter = 1
SearchFilterType = None # (!) real value is ''
SearchFilterTypes = None # (!) real value is ''
WebShortcutFilter = 2
|
gpl-2.0
| 3,505,578,454,334,459,000 | 25.933333 | 101 | 0.648515 | false | 3.63964 | false | false | false |
albertoibm/Thesis
|
Archivo2.py
|
1
|
3593
|
import libardrone
import numpy
from time import time,sleep
from sys import stdout,exit,argv
from math import sqrt,pi
from PID import PID
import diffOrdN
GRADOS=pi/180
class Integral:
""" Calcula la integral de una funcion dada como parametro"""
def __init__(self,f):
self.f=f
self.integral=0.
self.val=0.
def update(self,x,dt):
self.integral+=(self.f(x)+self.val)/2*dt
self.val=self.f(x)
ttotal=10 ## Tiempo total de vuelo
factor=1
### Parametros del controlador
delta=numpy.matrix(numpy.diag([12,12,8]))
K1=4.5*numpy.sqrt(delta)
K2=1.1*delta
lamda=numpy.matrix(numpy.diag([3,3,3]))
### Parametros del cuadricoptero
l=0.13
Ixx=24.1e-3
Iyy=23.2e-3
Izz=45.1e-2
b=0.0006646195542576290
b=0.000064601020673842
d=b*9.72
## Matriz de inercia y su inversa
g=numpy.matrix(numpy.diag([l/Ixx,l/Iyy,1/Izz]))
ginv=g.I
## Vectores de x deseada y su derivada
xd=numpy.array([[0],[0],[0]])
xpd=numpy.array([[0],[0],[0]])
Altd=260
## Objeto diferenciador numerico por modos deslizantes
difN = 4 ## Orden del diferenciador
dif = diffOrdN.DiffOrdN(difN,[12,8,4,3.5,2.1])
## Objeto que calcula la integral de la funcion signo
intsgn=Integral(lambda x:numpy.sign(x))
## Controlador de altitud
ctrlalt=PID(.7,.2,.1)
### Se establece configuracion con el ardrone, se apagan los motores y se cambia el modo de camara
stdout.write("Estableciendo comunicacion con el ARDrone\n")
stdout.flush()
drone=libardrone.ARDrone()
sleep(1)
print "Listo!"
stdout.write("Estableciendo configuracion inicial\n")
stdout.flush()
drone.reset()
sleep(0.1)
drone.trim()
sleep(1.5)
drone.reset()
print "Encendiendo motores"
drone.pwm(1,1,1,1)
sleep(5)
drone.zap(libardrone.ZAP_CHANNEL_LARGE_HORI_SMALL_VERT)
sleep(0.1)
print "Listo!"
## Vectores para guardar datos de vuelo
angs=[]
us=[]
oms=[]
ts=[]
## define el tiempo inicial
ta=time()
t0=ta
xa=0
while time()-t0<ttotal:
dt = -ta + time()
ta = time()
Alt = 260#drone.navdata[0]['altitude']
Th = drone.navdata[0]['theta']*GRADOS
Ph = drone.navdata[0]['phi']*GRADOS
Ps = drone.navdata[0]['psi']*GRADOS
x = numpy.matrix([[Th],[Ph],[Ps]])
dif.update(x,dt)
o = dif.output()
x = o[difN]
xp = o[difN-1]
# xp = (x - xa)/dt
xa = x+0
e = xd-x
ep = xpd-xp
s = ep+lamda*e
intsgn.update(s,dt)
u = -lamda*ep-\
K1*numpy.matrix(numpy.array(numpy.sqrt(numpy.abs(s)))*numpy.array(numpy.sign(s)))\
-K2*intsgn.integral
u = ginv*u
om1=om2=om3=om4 = 0
U4 = max(0,ctrlalt.sal(Altd-Alt,dt))
try:om1=int(round(sqrt(-(b*u[2]+d*u[0]-d*U4+d*u[1])/(4*b*d))*factor))
except:pass
try:om2=int(round(sqrt((-d*u[0]+d*u[1]+b*u[2]+d*U4)/(4*b*d))*factor))
except:pass
try:om3=int(round(sqrt(-(-d*u[1]+b*u[2]-d*U4-d*u[0])/(4*b*d))*factor))
except:pass
try:om4=int(round(sqrt((d*U4+b*u[2]+d*u[0]-d*u[1])/(4*b*d))*factor))
except:pass
om1=10+om1 if om1<10 else om1
om2=10+om2 if om2<10 else om2
om3=10+om3 if om3<10 else om3
om4=10+om4 if om4<10 else om4
stdout.write("\b"*100+"(%.2f,%.2f,%.2f,%.2f)"%(U4,u[0],u[1],u[2]))
stdout.write("\b"*0+"|[%.2f,%.2f,%.2f,%.2f]"%(om1,om2,om3,om4))
stdout.write("{%.4f} "%dt)
stdout.flush()
if "-p" not in argv:
drone.pwm(om1,om2,om3,om4)
angs.append([x[0][0],x[1][0],x[2][0]]) ## Th,Ph,Ps
us.append([U4,u[0],u[1],u[2]])
oms.append([om1,om2,om3,om4])
ts.append(time()-t0)
drone.pwm(0,0,0,0)
drone.halt()
print ""
archivo=open("res_supertwisting.txt",'w')
for i in range(len(ts)):
archivo.write("%.3f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f,%.4f\n"%\
(ts[i],angs[i][0],angs[i][1],us[i][0],us[i][1],us[i][2],\
us[i][3],oms[i][1],oms[i][2],oms[i][3]))
archivo.close()
exit()
|
gpl-2.0
| 8,193,664,117,015,958,000 | 23.442177 | 98 | 0.651266 | false | 2.053143 | false | false | false |
drusk/fishcounter
|
fishcounter/tracking/camshift.py
|
1
|
3739
|
# Copyright (C) 2013 David Rusk
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
Tracking based on the Camshift algorithm.
"""
import cv2
import numpy as np
from fishcounter.segment import HSVColourSegmenter
class CamShiftTracker(object):
"""
Uses colour information to track fish regardless of whether they are
moving or not.
"""
def __init__(self):
self.mask_detector = HSVColourSegmenter()
def track(self, current_image, frame_number, moving_objects, stationary_objects):
hsv = cv2.cvtColor(current_image, cv2.COLOR_BGR2HSV)
mask = self.mask_detector.segment(current_image)
for obj in stationary_objects:
bbox = obj.bbox
if bbox.has_negative_area:
print "BBOX has negative area: %s" % bbox
continue
hsv_roi = hsv[bbox.y0:bbox.y1, bbox.x0:bbox.x1]
mask_roi = mask[bbox.y0:bbox.y1, bbox.x0:bbox.x1]
bin_range = [self.mask_detector.hue_min, self.mask_detector.hue_max]
hist = cv2.calcHist([hsv_roi], # source image(s)
[0], # channels to use - just Hue
mask_roi, # mask which source pixels to count
[16], # number of bins
bin_range # first bin min, last bin max
)
cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX)
hist = hist.reshape(-1)
prob = cv2.calcBackProject([hsv], # input image
[0], # channels to use - just Hue
hist, # histogram
bin_range, # first bin min, last bin max
1 # scale factor
)
prob &= mask
stop_criteria = (cv2.TERM_CRITERIA_COUNT | cv2.TERM_CRITERIA_EPS,
10, # max iterations
1 # desired accuracy of window center
)
# track_box also contains rotation information which we are not using right now
track_box, track_window = cv2.CamShift(prob, bbox.cv2rect, stop_criteria)
prev_center = bbox.center
bbox.update(track_window)
obj.last_frame_tracked = frame_number
new_center = bbox.center
displacement = np.sqrt(np.square(prev_center[0] - new_center[0]) +
np.square(prev_center[1] - new_center[1]))
if displacement > 6:
stationary_objects.remove(obj)
moving_objects.append(obj)
return moving_objects, stationary_objects
|
mit
| -920,504,979,399,676,300 | 39.641304 | 91 | 0.598556 | false | 4.263398 | false | false | false |
ericchuhong/WellSoonWeb
|
www/models.py
|
1
|
1460
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Model for users comment,blog
"""
__author__ = 'Chuhong Ma'
import time, uuid
from orm import Model, StringField, BoolenField, FloatField, TextField
def next_id():
return '%015d%s000' % (int(time.time() * 1000), uuid.uuid4().hex)
class User(Model):
__table__ = 'users'
id = StringField(primary_key=True, default=next_id, ddl='varchar(50)')
email = StringField(ddl='varchar(50)')
passwd = StringField(ddl='varchar(50)')
admin = BoolenField()
name = StringField(ddl='varchar(50)')
image = StringField(ddl='varchar(500)')
created_at = FloatField(default=time.time)
class Blog(Model):
__table__ = 'blogs'
id = StringField(primary_key=True, default=next_id,ddl='varchar(50)')
user_id = StringField(ddl='varchar(50)')
user_name = StringField(ddl='varchar(50)')
user_image = StringField(ddl='varchar(50)')
name = StringField(ddl='varchar(50)')
summary = StringField(ddl='varchar(50)')
content = TextField()
created_at = FloatField(default=time.time)
class Comment(Model):
__table__ = 'comments'
id = StringField(primary_key=True, default=next_id,ddl='varchar(50)')
blog_id = StringField(ddl='varchar(50)')
user_id = StringField(ddl='varchar(50)')
user_name = StringField(ddl='varchar(50)')
user_image = StringField(ddl='varchar(50)')
content = TextField()
created_at = FloatField(default=time.time)
|
mit
| -4,328,421,956,334,911,500 | 27.076923 | 74 | 0.65411 | false | 3.348624 | false | false | false |
sschmeier/genomics-tutorial
|
conf.py
|
1
|
13833
|
# -*- coding: utf-8 -*-
#
# Genomics Tutorial documentation build configuration file, created by
# sphinx-quickstart on Sat Nov 19 11:28:35 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# for enabling markdown
#from recommonmark.parser import CommonMarkParser
#source_parsers = {
# '.md': CommonMarkParser,
#}
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#source_suffix = ['.rst', '.md']
source_suffix = ['.rst']
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Genomics Tutorial'
copyright = u'2016-2019, Sebastian Schmeier (https://sschmeier.com)'
author = u'Sebastian Schmeier'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'2019.03'
# The full version, including alpha/beta/rc tags.
release = u'2019.03'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# if you want to exclude certain section bsed on a tag on "sphinx build -t restrictivemode ..."
#if tags.has('restictivemode'):
# exclude_patterns = ['**/*bla*']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Use automatic figure numbering
numfig=True
# you need to specify all three in this section otherwise throws error for latex
#numfig_format={'figure': 'Figure %s', 'table': 'Table %s', 'code-block': 'Listing %s'}
#numfig_secnum_depth = 1
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#from better import better_theme_path
#html_theme_path = [better_theme_path]
#html_theme = 'better'
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
#html_theme = 'alabaster'
#html_theme = "classic"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the documentation.
#html_theme_options = {
# 'collapse_navigation': False,
# 'display_version': True
#}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# rtd
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
html_title = u''
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
html_logo = 'images/icon.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
html_favicon = 'images/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
html_last_updated_fmt = ''
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Genomicsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
'papersize': 'a4paper',
# The font size ('10pt', '11pt' or '12pt').
#
'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
'preamble': r'''
\usepackage{charter}
\usepackage[defaultsans]{lato}
\usepackage{inconsolata}
''',
# Latex figure (float) alignment
#
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Genomics.tex', u'Computational Genomics Tutorial',
u'Sebastian Schmeier (https://sschmeier.com)', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
latex_logo = 'images/icon-latex.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
latex_use_parts = False
# If true, show page references after internal links.
#
latex_show_pagerefs = True
# If true, show URL addresses after external links.
# one of:
# 'no' – do not display URLs (default)
# 'footnote' – display URLs in footnotes
# 'inline' – display URLs inline in parentheses
latex_show_urls = 'footnote'
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'Genomics Tutorial', u'Genomics Tutorial Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Genomics', u'Computational Genomics Tutorial',
author, 'Computational Genomics Tutorial', 'Computational Genomics Tutorial Content.',
'teaching'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
#-----------------------------------------------------
# SEB:
# global substitutions
# epilog will be added to the end of each rst-file
# we define some shortcuts here
rst_epilog = """
.. |fileanc| replace:: ancestor
.. |fileanc1| replace:: ancestor-R1
.. |fileanc2| replace:: ancestor-R2
.. |fileevol| replace:: evolved-6
.. |fileevol1| replace:: evolved-6-R1
.. |fileevol2| replace:: evolved-6-R2
.. |conda| replace:: `conda <http://conda.pydata.org/miniconda.html>`__
.. |kraken| replace:: `Kraken2 <https://www.ccb.jhu.edu/software/kraken2/>`__
.. |bracken| replace:: `Bracken <https://ccb.jhu.edu/software/bracken/index.shtml>`__
.. |centrifuge| replace:: `Centrifuge <http://www.ccb.jhu.edu/software/centrifuge/index.shtml>`__
.. |ncbitax| replace:: `NCBI Taxonomy <https://www.ncbi.nlm.nih.gov/taxonomy>`__
.. |spades| replace:: `SPAdes <http://bioinf.spbau.ru/spades>`__
.. |krona| replace:: `Krona <https://github.com/marbl/Krona/wiki>`__
.. |solexaqa| replace:: `SolexaQA++ <http://solexaqa.sourceforge.net>`__
.. |fastqc| replace:: `FastQC <http://www.bioinformatics.babraham.ac.uk/projects/fastqc/>`__
.. |sickle| replace:: `Sickle <https://github.com/najoshi/sickle>`__
.. |quast| replace:: `Quast <http://quast.bioinf.spbau.ru/>`__
.. |freebayes| replace:: `freebayes <https://github.com/ekg/freebayes>`__
.. |samtools| replace:: `SAMtools <http://samtools.sourceforge.net/>`__
.. |bwa| replace:: `BWA <http://bio-bwa.sourceforge.net/>`__
.. |bowtie| replace:: `Bowtie2 <http://bowtie-bio.sourceforge.net/bowtie2/index.shtml>`__
.. |qualimap| replace:: `QualiMap <http://qualimap.bioinfo.cipf.es/>`__
.. |R| replace:: `R <https://www.r-project.org/>`__
.. |bcftools| replace:: `BCFtools <http://www.htslib.org/doc/bcftools.html>`__
.. |vcflib| replace:: `vcflib <https://github.com/vcflib/vcflib#vcflib>`__
.. |illumina| replace:: `Illumina <http://illumina.com>`__
.. |augustus| replace:: `Augustus <http://augustus.gobics.de>`__
.. |busco| replace:: `BUSCO <http://busco.ezlab.org>`__
.. |blastn| replace:: `blastn <https://blast.ncbi.nlm.nih.gov/Blast.cgi?PAGE_TYPE=BlastSearch>`__
.. |blast| replace:: `BLAST <https://blast.ncbi.nlm.nih.gov/Blast.cgi>`__
.. |igv| replace:: `IGV <http://software.broadinstitute.org/software/igv/>`__
.. |muscle| replace:: `MUSCLE <http://www.ebi.ac.uk/Tools/msa/muscle/>`__
.. |raxml| replace:: `RAxML-NG <https://github.com/amkozlov/raxml-ng>`__
.. |snpeff| replace:: `SnpEff <http://snpeff.sourceforge.net/index.html>`__
"""
# prolog will be added to the beginning of each file
# rst_prolog=""
# to be able to use two dashes in my own blocks I turn off smartypants
#html_use_smartypants=False
smart_quotes = False
def setup(app):
app.add_stylesheet('css/seb.css')
|
mit
| -4,872,524,209,302,796,000 | 31.081206 | 97 | 0.688725 | false | 3.295281 | true | false | false |
kaiw/meld
|
meld/filediff.py
|
1
|
85802
|
### Copyright (C) 2002-2006 Stephen Kennedy <[email protected]>
### Copyright (C) 2009-2012 Kai Willadsen <[email protected]>
### This program is free software; you can redistribute it and/or modify
### it under the terms of the GNU General Public License as published by
### the Free Software Foundation; either version 2 of the License, or
### (at your option) any later version.
### This program is distributed in the hope that it will be useful,
### but WITHOUT ANY WARRANTY; without even the implied warranty of
### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
### GNU General Public License for more details.
### You should have received a copy of the GNU General Public License
### along with this program; if not, write to the Free Software
### Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
### USA.
import copy
import functools
import io
import os
from gettext import gettext as _
import time
from multiprocessing import Pool
from multiprocessing.pool import ThreadPool
import pango
import glib
import gobject
import gtk
import gtk.keysyms
from . import diffutil
from . import matchers
from . import meldbuffer
from . import melddoc
from . import merge
from . import misc
from . import patchdialog
from . import paths
from . import recent
from . import undo
from .ui import findbar
from .ui import gnomeglade
from .meldapp import app
from .util.compat import text_type
from .util.sourceviewer import srcviewer
class CachedSequenceMatcher(object):
"""Simple class for caching diff results, with LRU-based eviction
Results from the SequenceMatcher are cached and timestamped, and
subsequently evicted based on least-recent generation/usage. The LRU-based
eviction is overly simplistic, but is okay for our usage pattern.
"""
process_pool = None
def __init__(self):
if self.process_pool is None:
if os.name == "nt":
CachedSequenceMatcher.process_pool = ThreadPool(None)
else:
# maxtasksperchild is new in Python 2.7; this is for 2.6 compat
try:
CachedSequenceMatcher.process_pool = Pool(
None, matchers.init_worker, maxtasksperchild=1)
except TypeError:
CachedSequenceMatcher.process_pool = Pool(
None, matchers.init_worker)
self.cache = {}
def match(self, text1, textn, cb):
try:
self.cache[(text1, textn)][1] = time.time()
cb(self.cache[(text1, textn)][0])
except KeyError:
def inline_cb(opcodes):
self.cache[(text1, textn)] = [opcodes, time.time()]
gobject.idle_add(lambda: cb(opcodes))
self.process_pool.apply_async(matchers.matcher_worker,
(text1, textn),
callback=inline_cb)
def clean(self, size_hint):
"""Clean the cache if necessary
@param size_hint: the recommended minimum number of cache entries
"""
if len(self.cache) < size_hint * 3:
return
items = self.cache.items()
items.sort(key=lambda it: it[1][1])
for item in items[:-size_hint * 2]:
del self.cache[item[0]]
MASK_SHIFT, MASK_CTRL = 1, 2
MODE_REPLACE, MODE_DELETE, MODE_INSERT = 0, 1, 2
class CursorDetails(object):
__slots__ = ("pane", "pos", "line", "offset", "chunk", "prev", "next",
"prev_conflict", "next_conflict")
def __init__(self):
for var in self.__slots__:
setattr(self, var, None)
class TaskEntry(object):
__slots__ = ("filename", "file", "buf", "codec", "pane", "was_cr")
def __init__(self, *args):
for var, val in zip(self.__slots__, args):
setattr(self, var, val)
class TextviewLineAnimation(object):
__slots__ = ("start_mark", "end_mark", "start_rgba", "end_rgba",
"start_time", "duration")
def __init__(self, mark0, mark1, rgba0, rgba1, duration):
self.start_mark = mark0
self.end_mark = mark1
self.start_rgba = rgba0
self.end_rgba = rgba1
self.start_time = glib.get_current_time()
self.duration = duration
class FileDiff(melddoc.MeldDoc, gnomeglade.Component):
"""Two or three way diff of text files.
"""
differ = diffutil.Differ
keylookup = {gtk.keysyms.Shift_L : MASK_SHIFT,
gtk.keysyms.Control_L : MASK_CTRL,
gtk.keysyms.Shift_R : MASK_SHIFT,
gtk.keysyms.Control_R : MASK_CTRL}
# Identifiers for MsgArea messages
(MSG_SAME, MSG_SLOW_HIGHLIGHT) = list(range(2))
__gsignals__ = {
'next-conflict-changed': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (bool, bool)),
'action-mode-changed': (gobject.SIGNAL_RUN_FIRST, gobject.TYPE_NONE, (int,)),
}
def __init__(self, prefs, num_panes):
"""Start up an filediff with num_panes empty contents.
"""
melddoc.MeldDoc.__init__(self, prefs)
gnomeglade.Component.__init__(self, paths.ui_dir("filediff.ui"), "filediff")
self.map_widgets_into_lists(["textview", "fileentry", "diffmap",
"scrolledwindow", "linkmap",
"statusimage", "msgarea_mgr", "vbox",
"selector_hbox", "readonlytoggle"])
# This SizeGroup isn't actually necessary for FileDiff; it's for
# handling non-homogenous selectors in FileComp. It's also fragile.
column_sizes = gtk.SizeGroup(gtk.SIZE_GROUP_HORIZONTAL)
column_sizes.set_ignore_hidden(True)
for widget in self.selector_hbox:
column_sizes.add_widget(widget)
self.warned_bad_comparison = False
# Some sourceviews bind their own undo mechanism, which we replace
gtk.binding_entry_remove(srcviewer.GtkTextView, gtk.keysyms.z,
gtk.gdk.CONTROL_MASK)
gtk.binding_entry_remove(srcviewer.GtkTextView, gtk.keysyms.z,
gtk.gdk.CONTROL_MASK | gtk.gdk.SHIFT_MASK)
for v in self.textview:
v.set_buffer(meldbuffer.MeldBuffer())
v.set_show_line_numbers(self.prefs.show_line_numbers)
v.set_insert_spaces_instead_of_tabs(self.prefs.spaces_instead_of_tabs)
v.set_wrap_mode(self.prefs.edit_wrap_lines)
if self.prefs.show_whitespace:
v.set_draw_spaces(srcviewer.spaces_flag)
srcviewer.set_tab_width(v, self.prefs.tab_size)
self._keymask = 0
self.load_font()
self.deleted_lines_pending = -1
self.textview_overwrite = 0
self.focus_pane = None
self.textview_overwrite_handlers = [ t.connect("toggle-overwrite", self.on_textview_toggle_overwrite) for t in self.textview ]
self.textbuffer = [v.get_buffer() for v in self.textview]
self.buffer_texts = [meldbuffer.BufferLines(b) for b in self.textbuffer]
self.undosequence = undo.UndoSequence()
self.text_filters = []
self.create_text_filters()
self.app_handlers = [app.connect("text-filters-changed",
self.on_text_filters_changed)]
self.buffer_filtered = [meldbuffer.BufferLines(b, self._filter_text)
for b in self.textbuffer]
for (i, w) in enumerate(self.scrolledwindow):
w.get_vadjustment().connect("value-changed", self._sync_vscroll, i)
w.get_hadjustment().connect("value-changed", self._sync_hscroll)
self._connect_buffer_handlers()
self._sync_vscroll_lock = False
self._sync_hscroll_lock = False
self._scroll_lock = False
self.linediffer = self.differ()
self.linediffer.ignore_blanks = self.prefs.ignore_blank_lines
self.force_highlight = False
self.syncpoints = []
self.in_nested_textview_gutter_expose = False
self._cached_match = CachedSequenceMatcher()
self.anim_source_id = [None for buf in self.textbuffer]
self.animating_chunks = [[] for buf in self.textbuffer]
for buf in self.textbuffer:
buf.create_tag("inline")
buf.connect("notify::has-selection",
self.update_text_actions_sensitivity)
actions = (
("MakePatch", None, _("Format as Patch..."), None,
_("Create a patch using differences between files"),
self.make_patch),
("SaveAll", None, _("Save A_ll"), "<Ctrl><Shift>L",
_("Save all files in the current comparison"),
self.on_save_all_activate),
("Revert", gtk.STOCK_REVERT_TO_SAVED, None, None,
_("Revert files to their saved versions"),
self.on_revert_activate),
("SplitAdd", None, _("Add Synchronization Point"), None,
_("Add a manual point for synchronization of changes between "
"files"),
self.add_sync_point),
("SplitClear", None, _("Clear Synchronization Points"), None,
_("Clear manual change sychronization points"),
self.clear_sync_points),
("PrevConflict", None, _("Previous Conflict"), "<Ctrl>I",
_("Go to the previous conflict"),
lambda x: self.on_next_conflict(gtk.gdk.SCROLL_UP)),
("NextConflict", None, _("Next Conflict"), "<Ctrl>K",
_("Go to the next conflict"),
lambda x: self.on_next_conflict(gtk.gdk.SCROLL_DOWN)),
("PushLeft", gtk.STOCK_GO_BACK, _("Push to Left"), "<Alt>Left",
_("Push current change to the left"),
lambda x: self.push_change(-1)),
("PushRight", gtk.STOCK_GO_FORWARD,
_("Push to Right"), "<Alt>Right",
_("Push current change to the right"),
lambda x: self.push_change(1)),
# FIXME: using LAST and FIRST is terrible and unreliable icon abuse
("PullLeft", gtk.STOCK_GOTO_LAST,
_("Pull from Left"), "<Alt><Shift>Right",
_("Pull change from the left"),
lambda x: self.pull_change(-1)),
("PullRight", gtk.STOCK_GOTO_FIRST,
_("Pull from Right"), "<Alt><Shift>Left",
_("Pull change from the right"),
lambda x: self.pull_change(1)),
("CopyLeftUp", None, _("Copy Above Left"), "<Alt>bracketleft",
_("Copy change above the left chunk"),
lambda x: self.copy_change(-1, -1)),
("CopyLeftDown", None, _("Copy Below Left"), "<Alt>semicolon",
_("Copy change below the left chunk"),
lambda x: self.copy_change(-1, 1)),
("CopyRightUp", None, _("Copy Above Right"), "<Alt>bracketright",
_("Copy change above the right chunk"),
lambda x: self.copy_change(1, -1)),
("CopyRightDown", None, _("Copy Below Right"), "<Alt>quoteright",
_("Copy change below the right chunk"),
lambda x: self.copy_change(1, 1)),
("Delete", gtk.STOCK_DELETE, _("Delete"), "<Alt>Delete",
_("Delete change"),
self.delete_change),
("MergeFromLeft", None, _("Merge All from Left"), None,
_("Merge all non-conflicting changes from the left"),
lambda x: self.pull_all_non_conflicting_changes(-1)),
("MergeFromRight", None, _("Merge All from Right"), None,
_("Merge all non-conflicting changes from the right"),
lambda x: self.pull_all_non_conflicting_changes(1)),
("MergeAll", None, _("Merge All"), None,
_("Merge all non-conflicting changes from left and right "
"panes"),
lambda x: self.merge_all_non_conflicting_changes()),
("CycleDocuments", None,
_("Cycle Through Documents"), "<control>Escape",
_("Move keyboard focus to the next document in this "
"comparison"),
self.action_cycle_documents),
)
toggle_actions = (
("LockScrolling", None, _("Lock Scrolling"), None,
_("Lock scrolling of all panes"),
self.on_action_lock_scrolling_toggled, True),
)
self.ui_file = paths.ui_dir("filediff-ui.xml")
self.actiongroup = gtk.ActionGroup('FilediffPopupActions')
self.actiongroup.set_translation_domain("meld")
self.actiongroup.add_actions(actions)
self.actiongroup.add_toggle_actions(toggle_actions)
self.main_actiongroup = None
self.findbar = findbar.FindBar(self.table)
self.widget.ensure_style()
self.on_style_set(self.widget, None)
self.widget.connect("style-set", self.on_style_set)
self.set_num_panes(num_panes)
gobject.idle_add( lambda *args: self.load_font()) # hack around Bug 316730
gnomeglade.connect_signal_handlers(self)
self.cursor = CursorDetails()
self.connect("current-diff-changed", self.on_current_diff_changed)
for t in self.textview:
t.connect("focus-in-event", self.on_current_diff_changed)
t.connect("focus-out-event", self.on_current_diff_changed)
self.linediffer.connect("diffs-changed", self.on_diffs_changed)
self.undosequence.connect("checkpointed", self.on_undo_checkpointed)
self.connect("next-conflict-changed", self.on_next_conflict_changed)
overwrite_label = gtk.Label()
overwrite_label.show()
cursor_label = gtk.Label()
cursor_label.show()
self.status_info_labels = [overwrite_label, cursor_label]
def get_keymask(self):
return self._keymask
def set_keymask(self, value):
if value & MASK_SHIFT:
mode = MODE_DELETE
elif value & MASK_CTRL:
mode = MODE_INSERT
else:
mode = MODE_REPLACE
self._keymask = value
self.emit("action-mode-changed", mode)
keymask = property(get_keymask, set_keymask)
def on_style_set(self, widget, prev_style):
style = widget.get_style()
lookup = lambda color_id, default: style.lookup_color(color_id) or \
gtk.gdk.color_parse(default)
for buf in self.textbuffer:
tag = buf.get_tag_table().lookup("inline")
tag.props.background = lookup("inline-bg", "LightSteelBlue2")
tag.props.foreground = lookup("inline-fg", "Red")
self.fill_colors = {"insert" : lookup("insert-bg", "DarkSeaGreen1"),
"delete" : lookup("insert-bg", "DarkSeaGreen1"),
"conflict": lookup("conflict-bg", "Pink"),
"replace" : lookup("replace-bg", "#ddeeff"),
"current-chunk-highlight":
lookup("current-chunk-highlight", '#ffffff')}
self.line_colors = {"insert" : lookup("insert-outline", "#77f077"),
"delete" : lookup("insert-outline", "#77f077"),
"conflict": lookup("conflict-outline", "#f0768b"),
"replace" : lookup("replace-outline", "#8bbff3")}
self.highlight_color = lookup("current-line-highlight", "#ffff00")
self.syncpoint_color = lookup("syncpoint-outline", "#555555")
for associated in self.diffmap + self.linkmap:
associated.set_color_scheme([self.fill_colors, self.line_colors])
self.queue_draw()
def on_focus_change(self):
self.keymask = 0
def on_container_switch_in_event(self, ui):
self.main_actiongroup = [a for a in ui.get_action_groups()
if a.get_name() == "MainActions"][0]
melddoc.MeldDoc.on_container_switch_in_event(self, ui)
# FIXME: If no focussed textview, action sensitivity will be unset
def on_text_filters_changed(self, app):
relevant_change = self.create_text_filters()
if relevant_change:
self.refresh_comparison()
def create_text_filters(self):
# In contrast to file filters, ordering of text filters can matter
old_active = [f.filter_string for f in self.text_filters if f.active]
new_active = [f.filter_string for f in app.text_filters if f.active]
active_filters_changed = old_active != new_active
self.text_filters = [copy.copy(f) for f in app.text_filters]
return active_filters_changed
def _disconnect_buffer_handlers(self):
for textview in self.textview:
textview.set_editable(0)
for buf in self.textbuffer:
assert hasattr(buf,"handlers")
for h in buf.handlers:
buf.disconnect(h)
def _connect_buffer_handlers(self):
for textview, buf in zip(self.textview, self.textbuffer):
textview.set_editable(buf.data.editable)
for buf in self.textbuffer:
id0 = buf.connect("insert-text", self.on_text_insert_text)
id1 = buf.connect("delete-range", self.on_text_delete_range)
id2 = buf.connect_after("insert-text", self.after_text_insert_text)
id3 = buf.connect_after("delete-range", self.after_text_delete_range)
id4 = buf.connect("notify::cursor-position",
self.on_cursor_position_changed)
buf.handlers = id0, id1, id2, id3, id4
# Abbreviations for insert and overwrite that fit in the status bar
_insert_overwrite_text = (_("INS"), _("OVR"))
# Abbreviation for line, column so that it will fit in the status bar
_line_column_text = _("Ln %i, Col %i")
def on_cursor_position_changed(self, buf, pspec, force=False):
pane = self.textbuffer.index(buf)
pos = buf.props.cursor_position
if pane == self.cursor.pane and pos == self.cursor.pos and not force:
return
self.cursor.pane, self.cursor.pos = pane, pos
cursor_it = buf.get_iter_at_offset(pos)
offset = cursor_it.get_line_offset()
line = cursor_it.get_line()
insert_overwrite = self._insert_overwrite_text[self.textview_overwrite]
line_column = self._line_column_text % (line + 1, offset + 1)
self.status_info_labels[0].set_text(insert_overwrite)
self.status_info_labels[1].set_text(line_column)
if line != self.cursor.line or force:
chunk, prev, next_ = self.linediffer.locate_chunk(pane, line)
if chunk != self.cursor.chunk or force:
self.cursor.chunk = chunk
self.emit("current-diff-changed")
if prev != self.cursor.prev or next_ != self.cursor.next or force:
self.emit("next-diff-changed", prev is not None,
next_ is not None)
prev_conflict, next_conflict = None, None
for conflict in self.linediffer.conflicts:
if prev is not None and conflict <= prev:
prev_conflict = conflict
if next_ is not None and conflict >= next_:
next_conflict = conflict
break
if prev_conflict != self.cursor.prev_conflict or \
next_conflict != self.cursor.next_conflict or force:
self.emit("next-conflict-changed", prev_conflict is not None,
next_conflict is not None)
self.cursor.prev, self.cursor.next = prev, next_
self.cursor.prev_conflict = prev_conflict
self.cursor.next_conflict = next_conflict
self.cursor.line, self.cursor.offset = line, offset
def on_current_diff_changed(self, widget, *args):
pane = self._get_focused_pane()
if pane != -1:
# While this *should* be redundant, it's possible for focus pane
# and cursor pane to be different in several situations.
pane = self.cursor.pane
chunk_id = self.cursor.chunk
if pane == -1 or chunk_id is None:
push_left, push_right, pull_left, pull_right, delete, \
copy_left, copy_right = (False,) * 7
else:
push_left, push_right, pull_left, pull_right, delete, \
copy_left, copy_right = (True,) * 7
# Push and Delete are active if the current pane has something to
# act on, and the target pane exists and is editable. Pull is
# sensitive if the source pane has something to get, and the
# current pane is editable. Copy actions are sensitive if the
# conditions for push are met, *and* there is some content in the
# target pane.
editable = self.textview[pane].get_editable()
editable_left = pane > 0 and self.textview[pane - 1].get_editable()
editable_right = pane < self.num_panes - 1 and \
self.textview[pane + 1].get_editable()
if pane == 0 or pane == 2:
chunk = self.linediffer.get_chunk(chunk_id, pane)
insert_chunk = chunk[1] == chunk[2]
delete_chunk = chunk[3] == chunk[4]
push_left = editable_left and not insert_chunk
push_right = editable_right and not insert_chunk
pull_left = pane == 2 and editable and not delete_chunk
pull_right = pane == 0 and editable and not delete_chunk
delete = editable and not insert_chunk
copy_left = push_left and not delete_chunk
copy_right = push_right and not delete_chunk
elif pane == 1:
chunk0 = self.linediffer.get_chunk(chunk_id, 1, 0)
chunk2 = None
if self.num_panes == 3:
chunk2 = self.linediffer.get_chunk(chunk_id, 1, 2)
left_mid_exists = chunk0 is not None and chunk0[1] != chunk0[2]
left_exists = chunk0 is not None and chunk0[3] != chunk0[4]
right_mid_exists = chunk2 is not None and chunk2[1] != chunk2[2]
right_exists = chunk2 is not None and chunk2[3] != chunk2[4]
push_left = editable_left and left_mid_exists
push_right = editable_right and right_mid_exists
pull_left = editable and left_exists
pull_right = editable and right_exists
delete = editable and (left_mid_exists or right_mid_exists)
copy_left = push_left and left_exists
copy_right = push_right and right_exists
self.actiongroup.get_action("PushLeft").set_sensitive(push_left)
self.actiongroup.get_action("PushRight").set_sensitive(push_right)
self.actiongroup.get_action("PullLeft").set_sensitive(pull_left)
self.actiongroup.get_action("PullRight").set_sensitive(pull_right)
self.actiongroup.get_action("Delete").set_sensitive(delete)
self.actiongroup.get_action("CopyLeftUp").set_sensitive(copy_left)
self.actiongroup.get_action("CopyLeftDown").set_sensitive(copy_left)
self.actiongroup.get_action("CopyRightUp").set_sensitive(copy_right)
self.actiongroup.get_action("CopyRightDown").set_sensitive(copy_right)
# FIXME: don't queue_draw() on everything... just on what changed
self.queue_draw()
def on_next_conflict_changed(self, doc, have_prev, have_next):
self.actiongroup.get_action("PrevConflict").set_sensitive(have_prev)
self.actiongroup.get_action("NextConflict").set_sensitive(have_next)
def on_next_conflict(self, direction):
if direction == gtk.gdk.SCROLL_DOWN:
target = self.cursor.next_conflict
else: # direction == gtk.gdk.SCROLL_UP
target = self.cursor.prev_conflict
if target is None:
return
buf = self.textbuffer[self.cursor.pane]
chunk = self.linediffer.get_chunk(target, self.cursor.pane)
buf.place_cursor(buf.get_iter_at_line(chunk[1]))
self.textview[self.cursor.pane].scroll_to_mark(buf.get_insert(), 0.1)
def push_change(self, direction):
src = self._get_focused_pane()
dst = src + direction
chunk = self.linediffer.get_chunk(self.cursor.chunk, src, dst)
assert(src != -1 and self.cursor.chunk is not None)
assert(dst in (0, 1, 2))
assert(chunk is not None)
self.replace_chunk(src, dst, chunk)
def pull_change(self, direction):
dst = self._get_focused_pane()
src = dst + direction
chunk = self.linediffer.get_chunk(self.cursor.chunk, src, dst)
assert(dst != -1 and self.cursor.chunk is not None)
assert(src in (0, 1, 2))
assert(chunk is not None)
self.replace_chunk(src, dst, chunk)
def copy_change(self, direction, copy_direction):
src = self._get_focused_pane()
dst = src + direction
chunk = self.linediffer.get_chunk(self.cursor.chunk, src, dst)
assert(src != -1 and self.cursor.chunk is not None)
assert(dst in (0, 1, 2))
assert(chunk is not None)
copy_up = True if copy_direction < 0 else False
self.copy_chunk(src, dst, chunk, copy_up)
def pull_all_non_conflicting_changes(self, direction):
assert direction in (-1, 1)
dst = self._get_focused_pane()
src = dst + direction
assert src in range(self.num_panes)
merger = merge.Merger()
merger.differ = self.linediffer
merger.texts = self.buffer_texts
for mergedfile in merger.merge_2_files(src, dst):
pass
self._sync_vscroll_lock = True
self.on_textbuffer__begin_user_action()
self.textbuffer[dst].set_text(mergedfile)
self.on_textbuffer__end_user_action()
def resync():
self._sync_vscroll_lock = False
self._sync_vscroll(self.scrolledwindow[src].get_vadjustment(), src)
self.scheduler.add_task(resync)
def merge_all_non_conflicting_changes(self):
dst = 1
merger = merge.Merger()
merger.differ = self.linediffer
merger.texts = self.buffer_texts
for mergedfile in merger.merge_3_files(False):
pass
self._sync_vscroll_lock = True
self.on_textbuffer__begin_user_action()
self.textbuffer[dst].set_text(mergedfile)
self.on_textbuffer__end_user_action()
def resync():
self._sync_vscroll_lock = False
self._sync_vscroll(self.scrolledwindow[0].get_vadjustment(), 0)
self.scheduler.add_task(resync)
def delete_change(self, widget):
pane = self._get_focused_pane()
chunk = self.linediffer.get_chunk(self.cursor.chunk, pane)
assert(pane != -1 and self.cursor.chunk is not None)
assert(chunk is not None)
self.delete_chunk(pane, chunk)
def _synth_chunk(self, pane0, pane1, line):
"""Returns the Same chunk that would exist at
the given location if we didn't remove Same chunks"""
# This method is a hack around our existing diffutil data structures;
# getting rid of the Same chunk removal is difficult, as several places
# have baked in the assumption of only being given changed blocks.
buf0, buf1 = self.textbuffer[pane0], self.textbuffer[pane1]
start0, end0 = 0, buf0.get_line_count() - 1
start1, end1 = 0, buf1.get_line_count() - 1
# This hack is required when pane0's prev/next chunk doesn't exist
# (i.e., is Same) between pane0 and pane1.
prev_chunk0, prev_chunk1, next_chunk0, next_chunk1 = (None,) * 4
_, prev, next_ = self.linediffer.locate_chunk(pane0, line)
if prev is not None:
while prev >= 0:
prev_chunk0 = self.linediffer.get_chunk(prev, pane0, pane1)
prev_chunk1 = self.linediffer.get_chunk(prev, pane1, pane0)
if None not in (prev_chunk0, prev_chunk1):
start0 = prev_chunk0[2]
start1 = prev_chunk1[2]
break
prev -= 1
if next_ is not None:
while next_ < self.linediffer.diff_count():
next_chunk0 = self.linediffer.get_chunk(next_, pane0, pane1)
next_chunk1 = self.linediffer.get_chunk(next_, pane1, pane0)
if None not in (next_chunk0, next_chunk1):
end0 = next_chunk0[1]
end1 = next_chunk1[1]
break
next_ += 1
return "Same", start0, end0, start1, end1
def _corresponding_chunk_line(self, chunk, line, pane, new_pane):
"""Approximates the corresponding line between panes"""
old_buf, new_buf = self.textbuffer[pane], self.textbuffer[new_pane]
# Special-case cross-pane jumps
if (pane == 0 and new_pane == 2) or (pane == 2 and new_pane == 0):
proxy = self._corresponding_chunk_line(chunk, line, pane, 1)
return self._corresponding_chunk_line(chunk, proxy, 1, new_pane)
# Either we are currently in a identifiable chunk, or we are in a Same
# chunk; if we establish the start/end of that chunk in both panes, we
# can figure out what our new offset should be.
cur_chunk = None
if chunk is not None:
cur_chunk = self.linediffer.get_chunk(chunk, pane, new_pane)
if cur_chunk is None:
cur_chunk = self._synth_chunk(pane, new_pane, line)
cur_start, cur_end, new_start, new_end = cur_chunk[1:5]
# If the new buffer's current cursor is already in the correct chunk,
# assume that we have in-progress editing, and don't move it.
cursor_it = new_buf.get_iter_at_mark(new_buf.get_insert())
cursor_line = cursor_it.get_line()
cursor_chunk, _, _ = self.linediffer.locate_chunk(new_pane, cursor_line)
if cursor_chunk is not None:
already_in_chunk = cursor_chunk == chunk
else:
cursor_chunk = self._synth_chunk(pane, new_pane, cursor_line)
already_in_chunk = cursor_chunk[3] == new_start and \
cursor_chunk[4] == new_end
if already_in_chunk:
new_line = cursor_line
else:
# Guess where to put the cursor: in the same chunk, at about the
# same place within the chunk, calculated proportionally by line.
# Insert chunks and one-line chunks are placed at the top.
if cur_end == cur_start:
chunk_offset = 0.0
else:
chunk_offset = (line - cur_start) / float(cur_end - cur_start)
new_line = new_start + int(chunk_offset * (new_end - new_start))
return new_line
def action_cycle_documents(self, widget):
pane = self._get_focused_pane()
new_pane = (pane + 1) % self.num_panes
chunk, line = self.cursor.chunk, self.cursor.line
new_line = self._corresponding_chunk_line(chunk, line, pane, new_pane)
new_buf = self.textbuffer[new_pane]
self.textview[new_pane].grab_focus()
new_buf.place_cursor(new_buf.get_iter_at_line(new_line))
self.textview[new_pane].scroll_to_mark(new_buf.get_insert(), 0.1)
def on_textview_focus_in_event(self, view, event):
self.focus_pane = view
self.findbar.textview = view
self.on_cursor_position_changed(view.get_buffer(), None, True)
self._set_save_action_sensitivity()
self._set_merge_action_sensitivity()
self.update_text_actions_sensitivity()
def on_textview_focus_out_event(self, view, event):
self._set_merge_action_sensitivity()
def _after_text_modified(self, buffer, startline, sizechange):
if self.num_panes > 1:
pane = self.textbuffer.index(buffer)
self.linediffer.change_sequence(pane, startline, sizechange,
self.buffer_filtered)
# FIXME: diff-changed signal for the current buffer would be cleaner
focused_pane = self._get_focused_pane()
if focused_pane != -1:
self.on_cursor_position_changed(self.textbuffer[focused_pane],
None, True)
self.queue_draw()
def _filter_text(self, txt):
def killit(m):
assert m.group().count("\n") == 0
if len(m.groups()):
s = m.group()
for g in m.groups():
if g:
s = s.replace(g,"")
return s
else:
return ""
try:
for filt in self.text_filters:
if filt.active:
txt = filt.filter.sub(killit, txt)
except AssertionError:
if not self.warned_bad_comparison:
misc.run_dialog(_("Filter '%s' changed the number of lines in the file. "
"Comparison will be incorrect. See the user manual for more details.") % filt.label)
self.warned_bad_comparison = True
return txt
def after_text_insert_text(self, buf, it, newtext, textlen):
start_mark = buf.get_mark("insertion-start")
starting_at = buf.get_iter_at_mark(start_mark).get_line()
buf.delete_mark(start_mark)
lines_added = it.get_line() - starting_at
self._after_text_modified(buf, starting_at, lines_added)
def after_text_delete_range(self, buffer, it0, it1):
starting_at = it0.get_line()
assert self.deleted_lines_pending != -1
self._after_text_modified(buffer, starting_at, -self.deleted_lines_pending)
self.deleted_lines_pending = -1
def load_font(self):
fontdesc = pango.FontDescription(self.prefs.get_current_font())
context = self.textview0.get_pango_context()
metrics = context.get_metrics( fontdesc, context.get_language() )
line_height_points = metrics.get_ascent() + metrics.get_descent()
self.pixels_per_line = line_height_points // 1024
self.pango_char_width = metrics.get_approximate_char_width()
tabs = pango.TabArray(10, 0)
tab_size = self.prefs.tab_size
for i in range(10):
tabs.set_tab(i, pango.TAB_LEFT, i*tab_size*self.pango_char_width)
for i in range(3):
self.textview[i].modify_font(fontdesc)
self.textview[i].set_tabs(tabs)
for i in range(2):
self.linkmap[i].queue_draw()
def on_preference_changed(self, key, value):
if key == "tab_size":
tabs = pango.TabArray(10, 0)
for i in range(10):
tabs.set_tab(i, pango.TAB_LEFT, i*value*self.pango_char_width)
for i in range(3):
self.textview[i].set_tabs(tabs)
for t in self.textview:
srcviewer.set_tab_width(t, value)
elif key == "use_custom_font" or key == "custom_font":
self.load_font()
elif key == "show_line_numbers":
for t in self.textview:
t.set_show_line_numbers( value )
elif key == "show_whitespace":
spaces_flag = srcviewer.spaces_flag if value else 0
for v in self.textview:
v.set_draw_spaces(spaces_flag)
elif key == "use_syntax_highlighting":
for i in range(self.num_panes):
srcviewer.set_highlight_syntax(self.textbuffer[i], value)
elif key == "edit_wrap_lines":
for t in self.textview:
t.set_wrap_mode(self.prefs.edit_wrap_lines)
# FIXME: On changing wrap mode, we get one redraw using cached
# coordinates, followed by a second redraw (e.g., on refocus) with
# correct coordinates. Overly-aggressive textview lazy calculation?
self.diffmap0.queue_draw()
self.diffmap1.queue_draw()
elif key == "spaces_instead_of_tabs":
for t in self.textview:
t.set_insert_spaces_instead_of_tabs(value)
elif key == "ignore_blank_lines":
self.linediffer.ignore_blanks = self.prefs.ignore_blank_lines
self.refresh_comparison()
def on_key_press_event(self, object, event):
# The correct way to handle these modifiers would be to use
# gdk_keymap_get_modifier_state method, available from GDK 3.4.
keymap = gtk.gdk.keymap_get_default()
x = self.keylookup.get(keymap.translate_keyboard_state(
event.hardware_keycode, 0, event.group)[0], 0)
if self.keymask | x != self.keymask:
self.keymask |= x
elif event.keyval == gtk.keysyms.Escape:
self.findbar.hide()
def on_key_release_event(self, object, event):
keymap = gtk.gdk.keymap_get_default()
x = self.keylookup.get(keymap.translate_keyboard_state(
event.hardware_keycode, 0, event.group)[0], 0)
if self.keymask & ~x != self.keymask:
self.keymask &= ~x
def check_save_modified(self, label=None):
response = gtk.RESPONSE_OK
modified = [b.data.modified for b in self.textbuffer]
if True in modified:
ui_path = paths.ui_dir("filediff.ui")
dialog = gnomeglade.Component(ui_path, "check_save_dialog")
dialog.widget.set_transient_for(self.widget.get_toplevel())
if label:
dialog.widget.props.text = label
# FIXME: Should be packed into dialog.widget.get_message_area(),
# but this is unbound on currently required PyGTK.
buttons = []
for i in range(self.num_panes):
button = gtk.CheckButton(self.textbuffer[i].data.label)
button.set_use_underline(False)
button.set_sensitive(modified[i])
button.set_active(modified[i])
dialog.extra_vbox.pack_start(button, expand=True, fill=True)
buttons.append(button)
dialog.extra_vbox.show_all()
response = dialog.widget.run()
try_save = [b.get_active() for b in buttons]
dialog.widget.destroy()
if response == gtk.RESPONSE_OK:
for i in range(self.num_panes):
if try_save[i]:
if not self.save_file(i):
return gtk.RESPONSE_CANCEL
elif response == gtk.RESPONSE_DELETE_EVENT:
response = gtk.RESPONSE_CANCEL
return response
def on_delete_event(self, appquit=0):
response = self.check_save_modified()
if response == gtk.RESPONSE_OK:
for h in self.app_handlers:
app.disconnect(h)
return response
#
# text buffer undo/redo
#
def on_undo_activate(self):
if self.undosequence.can_undo():
self.undosequence.undo()
def on_redo_activate(self):
if self.undosequence.can_redo():
self.undosequence.redo()
def on_textbuffer__begin_user_action(self, *buffer):
self.undosequence.begin_group()
def on_textbuffer__end_user_action(self, *buffer):
self.undosequence.end_group()
def on_text_insert_text(self, buf, it, text, textlen):
text = text_type(text, 'utf8')
self.undosequence.add_action(
meldbuffer.BufferInsertionAction(buf, it.get_offset(), text))
buf.create_mark("insertion-start", it, True)
def on_text_delete_range(self, buf, it0, it1):
text = text_type(buf.get_text(it0, it1, False), 'utf8')
assert self.deleted_lines_pending == -1
self.deleted_lines_pending = it1.get_line() - it0.get_line()
self.undosequence.add_action(
meldbuffer.BufferDeletionAction(buf, it0.get_offset(), text))
def on_undo_checkpointed(self, undosequence, buf, checkpointed):
self.set_buffer_modified(buf, not checkpointed)
#
#
#
def open_external(self):
pane = self._get_focused_pane()
if pane >= 0:
if self.textbuffer[pane].data.filename:
pos = self.textbuffer[pane].props.cursor_position
cursor_it = self.textbuffer[pane].get_iter_at_offset(pos)
line = cursor_it.get_line() + 1
self._open_files([self.textbuffer[pane].data.filename], line)
def update_text_actions_sensitivity(self, *args):
widget = self.focus_pane
if not widget:
cut, copy, paste = False, False, False
else:
cut = copy = widget.get_buffer().get_has_selection()
# Ideally, this would check whether the clipboard included
# something pasteable. However, there is no changed signal.
# widget.get_clipboard(
# gtk.gdk.SELECTION_CLIPBOARD).wait_is_text_available()
paste = widget.get_editable()
for action, sens in zip(("Cut", "Copy", "Paste"), (cut, copy, paste)):
self.main_actiongroup.get_action(action).set_sensitive(sens)
def get_selected_text(self):
"""Returns selected text of active pane"""
pane = self._get_focused_pane()
if pane != -1:
buf = self.textbuffer[pane]
sel = buf.get_selection_bounds()
if sel:
return text_type(buf.get_text(sel[0], sel[1], False), 'utf8')
return None
def on_find_activate(self, *args):
selected_text = self.get_selected_text()
self.findbar.start_find(self.focus_pane, selected_text)
self.keymask = 0
def on_replace_activate(self, *args):
selected_text = self.get_selected_text()
self.findbar.start_replace(self.focus_pane, selected_text)
self.keymask = 0
def on_find_next_activate(self, *args):
self.findbar.start_find_next(self.focus_pane)
def on_find_previous_activate(self, *args):
self.findbar.start_find_previous(self.focus_pane)
def on_filediff__key_press_event(self, entry, event):
if event.keyval == gtk.keysyms.Escape:
self.findbar.hide()
def on_scrolledwindow__size_allocate(self, scrolledwindow, allocation):
index = self.scrolledwindow.index(scrolledwindow)
if index == 0 or index == 1:
self.linkmap[0].queue_draw()
if index == 1 or index == 2:
self.linkmap[1].queue_draw()
def on_textview_popup_menu(self, textview):
self.popup_menu.popup(None, None, None, 0,
gtk.get_current_event_time())
return True
def on_textview_button_press_event(self, textview, event):
if event.button == 3:
textview.grab_focus()
self.popup_menu.popup(None, None, None, event.button, event.time)
return True
return False
def on_textview_toggle_overwrite(self, view):
self.textview_overwrite = not self.textview_overwrite
for v,h in zip(self.textview, self.textview_overwrite_handlers):
v.disconnect(h)
if v != view:
v.emit("toggle-overwrite")
self.textview_overwrite_handlers = [ t.connect("toggle-overwrite", self.on_textview_toggle_overwrite) for t in self.textview ]
self.on_cursor_position_changed(view.get_buffer(), None, True)
#
# text buffer loading/saving
#
def set_labels(self, labels):
labels = labels[:len(self.textbuffer)]
for label, buf in zip(labels, self.textbuffer):
if label:
buf.data.label = label
def set_merge_output_file(self, filename):
if len(self.textbuffer) < 2:
return
buf = self.textbuffer[1]
buf.data.savefile = os.path.abspath(filename)
buf.data.set_label(filename)
self.set_buffer_writable(buf, os.access(buf.data.savefile, os.W_OK))
self.fileentry[1].set_filename(os.path.abspath(filename))
self.recompute_label()
def _set_save_action_sensitivity(self):
pane = self._get_focused_pane()
modified = False if pane == -1 else self.textbuffer[pane].data.modified
if self.main_actiongroup:
self.main_actiongroup.get_action("Save").set_sensitive(modified)
any_modified = any(b.data.modified for b in self.textbuffer)
self.actiongroup.get_action("SaveAll").set_sensitive(any_modified)
def recompute_label(self):
self._set_save_action_sensitivity()
filenames = []
for i in range(self.num_panes):
filenames.append(self.textbuffer[i].data.label)
shortnames = misc.shorten_names(*filenames)
for i in range(self.num_panes):
stock = None
if self.textbuffer[i].data.modified:
shortnames[i] += "*"
if self.textbuffer[i].data.writable:
stock = gtk.STOCK_SAVE
else:
stock = gtk.STOCK_SAVE_AS
if stock:
self.statusimage[i].show()
self.statusimage[i].set_from_stock(stock, gtk.ICON_SIZE_MENU)
self.statusimage[i].set_size_request(self.diffmap[0].size_request()[0],-1)
else:
self.statusimage[i].hide()
self.label_text = " : ".join(shortnames)
self.tooltip_text = self.label_text
self.label_changed()
def set_files(self, files):
"""Set num panes to len(files) and load each file given.
If an element is None, the text of a pane is left as is.
"""
self._disconnect_buffer_handlers()
files = list(files)
for i, f in enumerate(files):
if not f:
continue
if not isinstance(f, unicode):
files[i] = f = f.decode('utf8')
absfile = os.path.abspath(f)
self.fileentry[i].set_filename(absfile)
self.fileentry[i].prepend_history(absfile)
self.textbuffer[i].reset_buffer(absfile)
self.msgarea_mgr[i].clear()
self.recompute_label()
self.textview[len(files) >= 2].grab_focus()
self._connect_buffer_handlers()
self.scheduler.add_task(self._set_files_internal(files))
def get_comparison(self):
files = [b.data.filename for b in self.textbuffer[:self.num_panes]]
return recent.TYPE_FILE, files
def _load_files(self, files, textbuffers):
self.undosequence.clear()
yield _("[%s] Set num panes") % self.label_text
self.set_num_panes( len(files) )
self._disconnect_buffer_handlers()
self.linediffer.clear()
self.queue_draw()
try_codecs = self.prefs.text_codecs.split() or ['utf_8', 'utf_16']
yield _("[%s] Opening files") % self.label_text
tasks = []
def add_dismissable_msg(pane, icon, primary, secondary):
msgarea = self.msgarea_mgr[pane].new_from_text_and_icon(
icon, primary, secondary)
button = msgarea.add_stock_button_with_text(_("Hi_de"),
gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE)
msgarea.connect("response",
lambda *args: self.msgarea_mgr[pane].clear())
msgarea.show_all()
return msgarea
for pane, filename in enumerate(files):
buf = textbuffers[pane]
if filename:
try:
handle = io.open(filename, "r", encoding=try_codecs[0])
task = TaskEntry(filename, handle, buf, try_codecs[:],
pane, False)
tasks.append(task)
except (IOError, LookupError) as e:
buf.delete(*buf.get_bounds())
add_dismissable_msg(pane, gtk.STOCK_DIALOG_ERROR,
_("Could not read file"), str(e))
yield _("[%s] Reading files") % self.label_text
while len(tasks):
for t in tasks[:]:
try:
nextbit = t.file.read(4096)
if nextbit.find("\x00") != -1:
t.buf.delete(*t.buf.get_bounds())
filename = gobject.markup_escape_text(t.filename)
add_dismissable_msg(t.pane, gtk.STOCK_DIALOG_ERROR,
_("Could not read file"),
_("%s appears to be a binary file.") % filename)
tasks.remove(t)
continue
except ValueError as err:
t.codec.pop(0)
if len(t.codec):
t.buf.delete(*t.buf.get_bounds())
t.file = io.open(t.filename, "r", encoding=t.codec[0])
else:
t.buf.delete(*t.buf.get_bounds())
filename = gobject.markup_escape_text(t.filename)
add_dismissable_msg(t.pane, gtk.STOCK_DIALOG_ERROR,
_("Could not read file"),
_("%s is not in encodings: %s") %
(filename, try_codecs))
tasks.remove(t)
except IOError as ioerr:
add_dismissable_msg(t.pane, gtk.STOCK_DIALOG_ERROR,
_("Could not read file"), str(ioerr))
tasks.remove(t)
else:
# The handling here avoids inserting split CR/LF pairs into
# GtkTextBuffers; this is relevant only when universal
# newline support is unavailable or broken.
if t.was_cr:
nextbit = "\r" + nextbit
t.was_cr = False
if len(nextbit):
if nextbit[-1] == "\r" and len(nextbit) > 1:
t.was_cr = True
nextbit = nextbit[0:-1]
t.buf.insert(t.buf.get_end_iter(), nextbit)
else:
if t.buf.data.savefile:
writable = os.access(t.buf.data.savefile, os.W_OK)
else:
writable = os.access(t.filename, os.W_OK)
self.set_buffer_writable(t.buf, writable)
t.buf.data.encoding = t.codec[0]
if hasattr(t.file, "newlines"):
t.buf.data.newlines = t.file.newlines
tasks.remove(t)
yield 1
for b in self.textbuffer:
self.undosequence.checkpoint(b)
def _diff_files(self, refresh=False):
yield _("[%s] Computing differences") % self.label_text
texts = self.buffer_filtered[:self.num_panes]
step = self.linediffer.set_sequences_iter(texts)
while next(step) is None:
yield 1
if not refresh:
chunk, prev, next_ = self.linediffer.locate_chunk(1, 0)
self.cursor.next = chunk
if self.cursor.next is None:
self.cursor.next = next_
for buf in self.textbuffer:
buf.place_cursor(buf.get_start_iter())
if self.cursor.next is not None:
self.scheduler.add_task(
lambda: self.next_diff(gtk.gdk.SCROLL_DOWN, True), True)
else:
buf = self.textbuffer[1 if self.num_panes > 1 else 0]
self.on_cursor_position_changed(buf, None, True)
self.queue_draw()
self._connect_buffer_handlers()
self._set_merge_action_sensitivity()
langs = []
for i in range(self.num_panes):
filename = self.textbuffer[i].data.filename
if filename:
langs.append(srcviewer.get_language_from_file(filename))
else:
langs.append(None)
# If we have only one identified language then we assume that all of
# the files are actually of that type.
real_langs = [l for l in langs if l]
if real_langs and real_langs.count(real_langs[0]) == len(real_langs):
langs = (real_langs[0],) * len(langs)
for i in range(self.num_panes):
srcviewer.set_language(self.textbuffer[i], langs[i])
srcviewer.set_highlight_syntax(self.textbuffer[i],
self.prefs.use_syntax_highlighting)
def _set_files_internal(self, files):
for i in self._load_files(files, self.textbuffer):
yield i
for i in self._diff_files():
yield i
def refresh_comparison(self):
"""Refresh the view by clearing and redoing all comparisons"""
self._disconnect_buffer_handlers()
self.linediffer.clear()
for buf in self.textbuffer:
tag = buf.get_tag_table().lookup("inline")
buf.remove_tag(tag, buf.get_start_iter(), buf.get_end_iter())
self.queue_draw()
self.scheduler.add_task(self._diff_files(refresh=True))
def _set_merge_action_sensitivity(self):
pane = self._get_focused_pane()
if pane != -1:
editable = self.textview[pane].get_editable()
mergeable = self.linediffer.has_mergeable_changes(pane)
else:
editable = False
mergeable = (False, False)
self.actiongroup.get_action("MergeFromLeft").set_sensitive(mergeable[0] and editable)
self.actiongroup.get_action("MergeFromRight").set_sensitive(mergeable[1] and editable)
if self.num_panes == 3 and self.textview[1].get_editable():
mergeable = self.linediffer.has_mergeable_changes(1)
else:
mergeable = (False, False)
self.actiongroup.get_action("MergeAll").set_sensitive(mergeable[0] or mergeable[1])
def on_diffs_changed(self, linediffer, chunk_changes):
removed_chunks, added_chunks, modified_chunks = chunk_changes
# We need to clear removed and modified chunks, and need to
# re-highlight added and modified chunks.
need_clearing = sorted(list(removed_chunks))
need_highlighting = sorted(list(added_chunks) + [modified_chunks])
alltags = [b.get_tag_table().lookup("inline") for b in self.textbuffer]
for chunk in need_clearing:
for i, c in enumerate(chunk):
if not c or c[0] != "replace":
continue
to_idx = 2 if i == 1 else 0
bufs = self.textbuffer[1], self.textbuffer[to_idx]
tags = alltags[1], alltags[to_idx]
starts = [b.get_iter_at_line_or_eof(l) for b, l in
zip(bufs, (c[1], c[3]))]
ends = [b.get_iter_at_line_or_eof(l) for b, l in
zip(bufs, (c[2], c[4]))]
bufs[0].remove_tag(tags[0], starts[0], ends[0])
bufs[1].remove_tag(tags[1], starts[1], ends[1])
for chunk in need_highlighting:
clear = chunk == modified_chunks
for i, c in enumerate(chunk):
if not c or c[0] != "replace":
continue
to_idx = 2 if i == 1 else 0
bufs = self.textbuffer[1], self.textbuffer[to_idx]
tags = alltags[1], alltags[to_idx]
starts = [b.get_iter_at_line_or_eof(l) for b, l in
zip(bufs, (c[1], c[3]))]
ends = [b.get_iter_at_line_or_eof(l) for b, l in
zip(bufs, (c[2], c[4]))]
# We don't use self.buffer_texts here, as removing line
# breaks messes with inline highlighting in CRLF cases
text1 = bufs[0].get_text(starts[0], ends[0], False)
text1 = text_type(text1, 'utf8')
textn = bufs[1].get_text(starts[1], ends[1], False)
textn = text_type(textn, 'utf8')
# Bail on long sequences, rather than try a slow comparison
inline_limit = 10000
if len(text1) + len(textn) > inline_limit and \
not self.force_highlight:
for i in range(2):
bufs[i].apply_tag(tags[i], starts[i], ends[i])
self._prompt_long_highlighting()
continue
def apply_highlight(bufs, tags, starts, ends, texts, matches):
starts = [bufs[0].get_iter_at_mark(starts[0]),
bufs[1].get_iter_at_mark(starts[1])]
ends = [bufs[0].get_iter_at_mark(ends[0]),
bufs[1].get_iter_at_mark(ends[1])]
text1 = bufs[0].get_text(starts[0], ends[0], False)
text1 = text_type(text1, 'utf8')
textn = bufs[1].get_text(starts[1], ends[1], False)
textn = text_type(textn, 'utf8')
if texts != (text1, textn):
return
# Remove equal matches of size less than 3; highlight
# the remainder.
matches = [m for m in matches if m.tag != "equal" or
(m.end_a - m.start_a < 3) or
(m.end_b - m.start_b < 3)]
for i in range(2):
start, end = starts[i].copy(), starts[i].copy()
offset = start.get_offset()
for o in matches:
start.set_offset(offset + o[1 + 2 * i])
end.set_offset(offset + o[2 + 2 * i])
bufs[i].apply_tag(tags[i], start, end)
if clear:
bufs[0].remove_tag(tags[0], starts[0], ends[0])
bufs[1].remove_tag(tags[1], starts[1], ends[1])
starts = [bufs[0].create_mark(None, starts[0], True),
bufs[1].create_mark(None, starts[1], True)]
ends = [bufs[0].create_mark(None, ends[0], True),
bufs[1].create_mark(None, ends[1], True)]
match_cb = functools.partial(apply_highlight, bufs, tags,
starts, ends, (text1, textn))
self._cached_match.match(text1, textn, match_cb)
self._cached_match.clean(self.linediffer.diff_count())
self._set_merge_action_sensitivity()
if self.linediffer.sequences_identical():
error_message = True in [m.has_message() for m in self.msgarea_mgr]
if self.num_panes == 1 or error_message:
return
for index, mgr in enumerate(self.msgarea_mgr):
secondary_text = None
# TODO: Currently this only checks to see whether text filters
# are active, and may be altering the comparison. It would be
# better if we only showed this message if the filters *did*
# change the text in question.
active_filters = any([f.active for f in self.text_filters])
if active_filters:
secondary_text = _("Text filters are being used, and may "
"be masking differences between files. "
"Would you like to compare the "
"unfiltered files?")
msgarea = mgr.new_from_text_and_icon(gtk.STOCK_INFO,
_("Files are identical"),
secondary_text)
mgr.set_msg_id(FileDiff.MSG_SAME)
button = msgarea.add_stock_button_with_text(_("Hide"),
gtk.STOCK_CLOSE,
gtk.RESPONSE_CLOSE)
if index == 0:
button.props.label = _("Hi_de")
if active_filters:
msgarea.add_button(_("Show without filters"),
gtk.RESPONSE_OK)
msgarea.connect("response", self.on_msgarea_identical_response)
msgarea.show_all()
else:
for m in self.msgarea_mgr:
if m.get_msg_id() == FileDiff.MSG_SAME:
m.clear()
def _prompt_long_highlighting(self):
def on_msgarea_highlighting_response(msgarea, respid):
for mgr in self.msgarea_mgr:
mgr.clear()
if respid == gtk.RESPONSE_OK:
self.force_highlight = True
self.refresh_comparison()
for index, mgr in enumerate(self.msgarea_mgr):
msgarea = mgr.new_from_text_and_icon(
gtk.STOCK_INFO,
_("Change highlighting incomplete"),
_("Some changes were not highlighted because they were too "
"large. You can force Meld to take longer to highlight "
"larger changes, though this may be slow."))
mgr.set_msg_id(FileDiff.MSG_SLOW_HIGHLIGHT)
button = msgarea.add_stock_button_with_text(
_("Hide"), gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE)
if index == 0:
button.props.label = _("Hi_de")
button = msgarea.add_button(
_("Keep highlighting"), gtk.RESPONSE_OK)
if index == 0:
button.props.label = _("_Keep highlighting")
msgarea.connect("response",
on_msgarea_highlighting_response)
msgarea.show_all()
def on_msgarea_identical_response(self, msgarea, respid):
for mgr in self.msgarea_mgr:
mgr.clear()
if respid == gtk.RESPONSE_OK:
self.text_filters = []
self.refresh_comparison()
def on_textview_expose_event(self, textview, event):
if self.num_panes == 1:
return
if event.window != textview.get_window(gtk.TEXT_WINDOW_TEXT) \
and event.window != textview.get_window(gtk.TEXT_WINDOW_LEFT):
return
# Hack to redraw the line number gutter used by post-2.10 GtkSourceView
if event.window == textview.get_window(gtk.TEXT_WINDOW_LEFT) and \
self.in_nested_textview_gutter_expose:
self.in_nested_textview_gutter_expose = False
return
visible = textview.get_visible_rect()
pane = self.textview.index(textview)
textbuffer = textview.get_buffer()
area = event.area
x, y = textview.window_to_buffer_coords(gtk.TEXT_WINDOW_WIDGET,
area.x, area.y)
bounds = (textview.get_line_num_for_y(y),
textview.get_line_num_for_y(y + area.height + 1))
width, height = textview.allocation.width, textview.allocation.height
context = event.window.cairo_create()
context.rectangle(area.x, area.y, area.width, area.height)
context.clip()
context.set_line_width(1.0)
for change in self.linediffer.single_changes(pane, bounds):
ypos0 = textview.get_y_for_line_num(change[1]) - visible.y
ypos1 = textview.get_y_for_line_num(change[2]) - visible.y
context.rectangle(-0.5, ypos0 - 0.5, width + 1, ypos1 - ypos0)
if change[1] != change[2]:
context.set_source_color(self.fill_colors[change[0]])
context.fill_preserve()
if self.linediffer.locate_chunk(pane, change[1])[0] == self.cursor.chunk:
h = self.fill_colors['current-chunk-highlight']
context.set_source_rgba(
h.red_float, h.green_float, h.blue_float, 0.5)
context.fill_preserve()
context.set_source_color(self.line_colors[change[0]])
context.stroke()
if textview.is_focus() and self.cursor.line is not None:
it = textbuffer.get_iter_at_line(self.cursor.line)
ypos, line_height = textview.get_line_yrange(it)
context.save()
context.rectangle(0, ypos - visible.y, width, line_height)
context.clip()
context.set_source_color(self.highlight_color)
context.paint_with_alpha(0.25)
context.restore()
for syncpoint in [p[pane] for p in self.syncpoints]:
if bounds[0] <= syncpoint <= bounds[1]:
ypos = textview.get_y_for_line_num(syncpoint) - visible.y
context.rectangle(-0.5, ypos - 0.5, width + 1, 1)
context.set_source_color(self.syncpoint_color)
context.stroke()
current_time = glib.get_current_time()
new_anim_chunks = []
for c in self.animating_chunks[pane]:
percent = min(1.0, (current_time - c.start_time) / c.duration)
rgba_pairs = zip(c.start_rgba, c.end_rgba)
rgba = [s + (e - s) * percent for s, e in rgba_pairs]
it = textbuffer.get_iter_at_mark(c.start_mark)
ystart, _ = textview.get_line_yrange(it)
it = textbuffer.get_iter_at_mark(c.end_mark)
yend, _ = textview.get_line_yrange(it)
if ystart == yend:
ystart -= 1
context.set_source_rgba(*rgba)
context.rectangle(0, ystart - visible.y, width, yend - ystart)
context.fill()
if current_time <= c.start_time + c.duration:
new_anim_chunks.append(c)
else:
textbuffer.delete_mark(c.start_mark)
textbuffer.delete_mark(c.end_mark)
self.animating_chunks[pane] = new_anim_chunks
if self.animating_chunks[pane] and self.anim_source_id[pane] is None:
def anim_cb():
textview.queue_draw()
return True
# Using timeout_add interferes with recalculation of inline
# highlighting; this mechanism could be improved.
self.anim_source_id[pane] = gobject.idle_add(anim_cb)
elif not self.animating_chunks[pane] and self.anim_source_id[pane]:
gobject.source_remove(self.anim_source_id[pane])
self.anim_source_id[pane] = None
if event.window == textview.get_window(gtk.TEXT_WINDOW_LEFT):
self.in_nested_textview_gutter_expose = True
textview.emit("expose-event", event)
def _get_filename_for_saving(self, title ):
dialog = gtk.FileChooserDialog(title,
parent=self.widget.get_toplevel(),
action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OK, gtk.RESPONSE_OK) )
dialog.set_default_response(gtk.RESPONSE_OK)
response = dialog.run()
filename = None
if response == gtk.RESPONSE_OK:
filename = dialog.get_filename()
dialog.destroy()
if filename:
if os.path.exists(filename):
response = misc.run_dialog(
_('"%s" exists!\nOverwrite?') % os.path.basename(filename),
parent = self,
buttonstype = gtk.BUTTONS_YES_NO)
if response == gtk.RESPONSE_NO:
return None
return filename
return None
def _save_text_to_filename(self, filename, text):
try:
open(filename, "wb").write(text)
except IOError as e:
misc.run_dialog(
_("Error writing to %s\n\n%s.") % (filename, e),
self, gtk.MESSAGE_ERROR, gtk.BUTTONS_OK)
return False
return True
def save_file(self, pane, saveas=False):
buf = self.textbuffer[pane]
bufdata = buf.data
if saveas or not (bufdata.filename or bufdata.savefile) \
or not bufdata.writable:
if pane == 0:
prompt = _("Save Left Pane As")
elif pane == 1 and self.num_panes == 3:
prompt = _("Save Middle Pane As")
else:
prompt = _("Save Right Pane As")
filename = self._get_filename_for_saving(prompt)
if filename:
bufdata.filename = bufdata.label = os.path.abspath(filename)
bufdata.savefile = None
self.fileentry[pane].set_filename(bufdata.filename)
self.fileentry[pane].prepend_history(bufdata.filename)
else:
return False
start, end = buf.get_bounds()
text = text_type(buf.get_text(start, end, False), 'utf8')
if bufdata.newlines:
if isinstance(bufdata.newlines, basestring):
if bufdata.newlines != '\n':
text = text.replace("\n", bufdata.newlines)
else:
buttons = {
'\n': ("UNIX (LF)", 0),
'\r\n': ("DOS/Windows (CR-LF)", 1),
'\r': ("Mac OS (CR)", 2),
}
newline = misc.run_dialog( _("This file '%s' contains a mixture of line endings.\n\nWhich format would you like to use?") % bufdata.label,
self, gtk.MESSAGE_WARNING, buttonstype=gtk.BUTTONS_CANCEL,
extrabuttons=[ buttons[b] for b in bufdata.newlines ] )
if newline < 0:
return
for k,v in buttons.items():
if v[1] == newline:
bufdata.newlines = k
if k != '\n':
text = text.replace('\n', k)
break
if bufdata.encoding:
try:
text = text.encode(bufdata.encoding)
except UnicodeEncodeError:
if misc.run_dialog(
_("'%s' contains characters not encodable with '%s'\nWould you like to save as UTF-8?") % (bufdata.label, bufdata.encoding),
self, gtk.MESSAGE_ERROR, gtk.BUTTONS_YES_NO) != gtk.RESPONSE_YES:
return False
save_to = bufdata.savefile or bufdata.filename
if self._save_text_to_filename(save_to, text):
self.emit("file-changed", save_to)
self.undosequence.checkpoint(buf)
return True
else:
return False
def make_patch(self, *extra):
dialog = patchdialog.PatchDialog(self)
dialog.run()
def set_buffer_writable(self, buf, writable):
buf.data.writable = writable
self.recompute_label()
index = self.textbuffer.index(buf)
self.readonlytoggle[index].props.visible = not writable
self.set_buffer_editable(buf, writable)
def set_buffer_modified(self, buf, yesno):
buf.data.modified = yesno
self.recompute_label()
def set_buffer_editable(self, buf, editable):
buf.data.editable = editable
index = self.textbuffer.index(buf)
self.readonlytoggle[index].set_active(not editable)
self.textview[index].set_editable(editable)
self.on_cursor_position_changed(buf, None, True)
for linkmap in self.linkmap:
linkmap.queue_draw()
def save(self):
pane = self._get_focused_pane()
if pane >= 0:
self.save_file(pane)
def save_as(self):
pane = self._get_focused_pane()
if pane >= 0:
self.save_file(pane, True)
def on_save_all_activate(self, action):
for i in range(self.num_panes):
if self.textbuffer[i].data.modified:
self.save_file(i)
def on_fileentry_activate(self, entry):
if self.check_save_modified() != gtk.RESPONSE_CANCEL:
entries = self.fileentry[:self.num_panes]
paths = [e.get_full_path() for e in entries]
paths = [p.decode('utf8') for p in paths]
self.set_files(paths)
return True
def _get_focused_pane(self):
for i in range(self.num_panes):
if self.textview[i].is_focus():
return i
return -1
def on_revert_activate(self, *extra):
response = gtk.RESPONSE_OK
unsaved = [b.data.label for b in self.textbuffer if b.data.modified]
if unsaved:
ui_path = paths.ui_dir("filediff.ui")
dialog = gnomeglade.Component(ui_path, "revert_dialog")
dialog.widget.set_transient_for(self.widget.get_toplevel())
# FIXME: Should be packed into dialog.widget.get_message_area(),
# but this is unbound on currently required PyGTK.
filelist = "\n".join(["\t" + f for f in unsaved])
dialog.widget.props.secondary_text += filelist
response = dialog.widget.run()
dialog.widget.destroy()
if response == gtk.RESPONSE_OK:
files = [b.data.filename for b in self.textbuffer[:self.num_panes]]
self.set_files(files)
def on_refresh_activate(self, *extra):
self.refresh_comparison()
def queue_draw(self, junk=None):
for t in self.textview:
t.queue_draw()
for i in range(self.num_panes-1):
self.linkmap[i].queue_draw()
self.diffmap0.queue_draw()
self.diffmap1.queue_draw()
def on_action_lock_scrolling_toggled(self, action):
self.toggle_scroll_lock(action.get_active())
def on_lock_button_toggled(self, button):
self.toggle_scroll_lock(not button.get_active())
def toggle_scroll_lock(self, locked):
icon_name = "meld-locked" if locked else "meld-unlocked"
self.lock_button_image.props.icon_name = icon_name
self.lock_button.set_active(not locked)
self.actiongroup.get_action("LockScrolling").set_active(locked)
self._scroll_lock = not locked
def on_readonly_button_toggled(self, button):
index = self.readonlytoggle.index(button)
buf = self.textbuffer[index]
self.set_buffer_editable(buf, not button.get_active())
#
# scrollbars
#
def _sync_hscroll(self, adjustment):
if self._sync_hscroll_lock or self._scroll_lock:
return
self._sync_hscroll_lock = True
val = adjustment.get_value()
for sw in self.scrolledwindow[:self.num_panes]:
adj = sw.get_hadjustment()
if adj is not adjustment:
adj.set_value(val)
self._sync_hscroll_lock = False
def _sync_vscroll(self, adjustment, master):
# only allow one scrollbar to be here at a time
if self._sync_vscroll_lock:
return
if not self._scroll_lock and (self.keymask & MASK_SHIFT) == 0:
self._sync_vscroll_lock = True
syncpoint = 0.5
# the line to search for in the 'master' text
master_y = adjustment.value + adjustment.page_size * syncpoint
it = self.textview[master].get_line_at_y(int(master_y))[0]
line_y, height = self.textview[master].get_line_yrange(it)
line = it.get_line() + ((master_y-line_y)/height)
# scrollbar influence 0->1->2 or 0<-1->2 or 0<-1<-2
scrollbar_influence = ((1, 2), (0, 2), (1, 0))
for i in scrollbar_influence[master][:self.num_panes - 1]:
adj = self.scrolledwindow[i].get_vadjustment()
mbegin, mend = 0, self.textbuffer[master].get_line_count()
obegin, oend = 0, self.textbuffer[i].get_line_count()
# look for the chunk containing 'line'
for c in self.linediffer.pair_changes(master, i):
if c[1] >= line:
mend = c[1]
oend = c[3]
break
elif c[2] >= line:
mbegin, mend = c[1], c[2]
obegin, oend = c[3], c[4]
break
else:
mbegin = c[2]
obegin = c[4]
fraction = (line - mbegin) / ((mend - mbegin) or 1)
other_line = (obegin + fraction * (oend - obegin))
it = self.textbuffer[i].get_iter_at_line(int(other_line))
val, height = self.textview[i].get_line_yrange(it)
val -= (adj.page_size) * syncpoint
val += (other_line-int(other_line)) * height
val = min(max(val, adj.lower), adj.upper - adj.page_size)
adj.set_value( val )
# If we just changed the central bar, make it the master
if i == 1:
master, line = 1, other_line
self._sync_vscroll_lock = False
for lm in self.linkmap:
if lm.window:
lm.window.invalidate_rect(None, True)
lm.window.process_updates(True)
def set_num_panes(self, n):
if n != self.num_panes and n in (1,2,3):
self.num_panes = n
toshow = self.scrolledwindow[:n] + self.fileentry[:n]
toshow += self.vbox[:n] + self.msgarea_mgr[:n]
toshow += self.linkmap[:n-1] + self.diffmap[:n]
toshow += self.selector_hbox[:n]
for widget in toshow:
widget.show()
tohide = self.statusimage + self.scrolledwindow[n:] + self.fileentry[n:]
tohide += self.vbox[n:] + self.msgarea_mgr[n:]
tohide += self.linkmap[n-1:] + self.diffmap[n:]
tohide += self.selector_hbox[n:]
for widget in tohide:
widget.hide()
right_attach = 2 * n
if self.findbar.widget in self.table:
self.table.remove(self.findbar.widget)
self.table.attach(self.findbar.widget, 1, right_attach, 2, 3,
gtk.FILL, gtk.FILL)
self.actiongroup.get_action("MakePatch").set_sensitive(n > 1)
self.actiongroup.get_action("CycleDocuments").set_sensitive(n > 1)
def coords_iter(i):
buf_index = 2 if i == 1 and self.num_panes == 3 else i
get_end_iter = self.textbuffer[buf_index].get_end_iter
get_iter_at_line = self.textbuffer[buf_index].get_iter_at_line
get_line_yrange = self.textview[buf_index].get_line_yrange
def coords_by_chunk():
y, h = get_line_yrange(get_end_iter())
max_y = float(y + h)
for c in self.linediffer.single_changes(i):
y0, _ = get_line_yrange(get_iter_at_line(c[1]))
if c[1] == c[2]:
y, h = y0, 0
else:
y, h = get_line_yrange(get_iter_at_line(c[2] - 1))
yield c[0], y0 / max_y, (y + h) / max_y
return coords_by_chunk
for (w, i) in zip(self.diffmap, (0, self.num_panes - 1)):
scroll = self.scrolledwindow[i].get_vscrollbar()
w.setup(scroll, coords_iter(i), [self.fill_colors, self.line_colors])
for (w, i) in zip(self.linkmap, (0, self.num_panes - 2)):
w.associate(self, self.textview[i], self.textview[i + 1])
for i in range(self.num_panes):
if self.textbuffer[i].data.modified:
self.statusimage[i].show()
self.queue_draw()
self.recompute_label()
def next_diff(self, direction, centered=False):
pane = self._get_focused_pane()
if pane == -1:
if len(self.textview) > 1:
pane = 1
else:
pane = 0
buf = self.textbuffer[pane]
if direction == gtk.gdk.SCROLL_DOWN:
target = self.cursor.next
else: # direction == gtk.gdk.SCROLL_UP
target = self.cursor.prev
if target is None:
return
c = self.linediffer.get_chunk(target, pane)
if c:
# Warp the cursor to the first line of next chunk
if self.cursor.line != c[1]:
buf.place_cursor(buf.get_iter_at_line(c[1]))
if centered:
self.textview[pane].scroll_to_mark(buf.get_insert(), 0.0,
True)
else:
self.textview[pane].scroll_to_mark(buf.get_insert(), 0.2)
def copy_chunk(self, src, dst, chunk, copy_up):
b0, b1 = self.textbuffer[src], self.textbuffer[dst]
start = b0.get_iter_at_line_or_eof(chunk[1])
end = b0.get_iter_at_line_or_eof(chunk[2])
t0 = text_type(b0.get_text(start, end, False), 'utf8')
if copy_up:
if chunk[2] >= b0.get_line_count() and \
chunk[3] < b1.get_line_count():
# TODO: We need to insert a linebreak here, but there is no
# way to be certain what kind of linebreak to use.
t0 = t0 + "\n"
dst_start = b1.get_iter_at_line_or_eof(chunk[3])
mark0 = b1.create_mark(None, dst_start, True)
new_end = b1.insert_at_line(chunk[3], t0)
else: # copy down
dst_start = b1.get_iter_at_line_or_eof(chunk[4])
mark0 = b1.create_mark(None, dst_start, True)
new_end = b1.insert_at_line(chunk[4], t0)
mark1 = b1.create_mark(None, new_end, True)
# FIXME: If the inserted chunk ends up being an insert chunk, then
# this animation is not visible; this happens often in three-way diffs
rgba0 = misc.gdk_to_cairo_color(self.fill_colors['insert']) + (1.0,)
rgba1 = misc.gdk_to_cairo_color(self.fill_colors['insert']) + (0.0,)
anim = TextviewLineAnimation(mark0, mark1, rgba0, rgba1, 0.5)
self.animating_chunks[dst].append(anim)
def replace_chunk(self, src, dst, chunk):
b0, b1 = self.textbuffer[src], self.textbuffer[dst]
src_start = b0.get_iter_at_line_or_eof(chunk[1])
src_end = b0.get_iter_at_line_or_eof(chunk[2])
dst_start = b1.get_iter_at_line_or_eof(chunk[3])
dst_end = b1.get_iter_at_line_or_eof(chunk[4])
t0 = text_type(b0.get_text(src_start, src_end, False), 'utf8')
mark0 = b1.create_mark(None, dst_start, True)
self.on_textbuffer__begin_user_action()
b1.delete(dst_start, dst_end)
new_end = b1.insert_at_line(chunk[3], t0)
self.on_textbuffer__end_user_action()
mark1 = b1.create_mark(None, new_end, True)
# FIXME: If the inserted chunk ends up being an insert chunk, then
# this animation is not visible; this happens often in three-way diffs
rgba0 = misc.gdk_to_cairo_color(self.fill_colors['insert']) + (1.0,)
rgba1 = misc.gdk_to_cairo_color(self.fill_colors['insert']) + (0.0,)
anim = TextviewLineAnimation(mark0, mark1, rgba0, rgba1, 0.5)
self.animating_chunks[dst].append(anim)
def delete_chunk(self, src, chunk):
b0 = self.textbuffer[src]
it = b0.get_iter_at_line_or_eof(chunk[1])
if chunk[2] >= b0.get_line_count():
it.backward_char()
b0.delete(it, b0.get_iter_at_line_or_eof(chunk[2]))
mark0 = b0.create_mark(None, it, True)
mark1 = b0.create_mark(None, it, True)
# TODO: Need a more specific colour here; conflict is wrong
rgba0 = misc.gdk_to_cairo_color(self.fill_colors['conflict']) + (1.0,)
rgba1 = misc.gdk_to_cairo_color(self.fill_colors['conflict']) + (0.0,)
anim = TextviewLineAnimation(mark0, mark1, rgba0, rgba1, 0.5)
self.animating_chunks[src].append(anim)
def add_sync_point(self, action):
pane = self._get_focused_pane()
if pane == -1:
return
# Find a non-complete syncpoint, or create a new one
if self.syncpoints and None in self.syncpoints[-1]:
syncpoint = self.syncpoints.pop()
else:
syncpoint = [None] * self.num_panes
cursor_it = self.textbuffer[pane].get_iter_at_mark(
self.textbuffer[pane].get_insert())
syncpoint[pane] = cursor_it.get_line()
self.syncpoints.append(syncpoint)
valid_points = [p for p in self.syncpoints if all(p)]
if valid_points and self.num_panes == 2:
self.linediffer.syncpoints = [
((p[1], p[0]), ) for p in valid_points]
elif valid_points and self.num_panes == 3:
self.linediffer.syncpoints = [
((p[1], p[0]), (p[1], p[2])) for p in valid_points]
self.refresh_comparison()
def clear_sync_points(self, action):
self.syncpoints = []
self.linediffer.syncpoints = []
self.refresh_comparison()
|
gpl-2.0
| -4,633,573,906,481,092,000 | 42.910952 | 154 | 0.556036 | false | 3.800585 | false | false | false |
WmHHooper/aima-python
|
submissions/Thompson/myLogic.py
|
1
|
1132
|
##
farmer = {
'kb': '''
Farmer(Mac)
Rabbit(Pete)
Mother(MrsMac, Mac)
Mother(MrsRabbit, Pete)
(Rabbit(r) & Farmer(f)) ==> Hates(f, r)
(Mother(m, c)) ==> Loves(m, c)
(Mother(m, r) & Rabbit(r)) ==> Rabbit(m)
(Farmer(f)) ==> Human(f)
(Mother(m, h) & Human(h)) ==> Human(m)
''',
# Note that this order of conjuncts
# would result in infinite recursion:
# '(Human(h) & Mother(m, h)) ==> Human(m)'
'queries':'''
Human(x)
Hates(x, y)
''',
# 'limit': 1,
}
weapons = {
'kb': '''
(American(x) & Weapon(y) & Sells(x, y, z) & Hostile(z)) ==> Criminal(x)
Owns(Nono, M1)
Missile(M1)
(Missile(x) & Owns(Nono, x)) ==> Sells(West, x, Nono)
Missile(x) ==> Weapon(x)
Enemy(x, America) ==> Hostile(x)
American(West)
Enemy(Nono, America)
''',
'queries':'''
Criminal(x)
''',
}
wrath = {
'kb': '''
Father(Terenas)
DeathKnight(Arthas)
Living(Alliance)
Living(Horde)
Dead(Scourge)
(Living(f) & Dead(e) & DeathKnight(s)) ==> Kills(s, f, e)
(Father(f) & DeathKnight(s)) ==> Father(f, s)
''',
'queries': '''
Kills(x,y,z)
Father(x,y)
''',
}
Examples = {
# 'farmer': farmer,
# 'weapons': weapons,
'wrath': wrath,
}
#
#
|
mit
| -1,356,765,412,437,163,300 | 16.703125 | 71 | 0.560954 | false | 2.028674 | false | true | false |
ContextLab/quail
|
quail/analysis/lagcrp.py
|
1
|
4765
|
import numpy as np
import pandas as pd
from .recmat import recall_matrix
from scipy.spatial.distance import cdist
from ..helpers import check_nan
def lagcrp_helper(egg, match='exact', distance='euclidean',
ts=None, features=None):
"""
Computes probabilities for each transition distance (probability that a word
recalled will be a given distance--in presentation order--from the previous
recalled word).
Parameters
----------
egg : quail.Egg
Data to analyze
match : str (exact, best or smooth)
Matching approach to compute recall matrix. If exact, the presented and
recalled items must be identical (default). If best, the recalled item
that is most similar to the presented items will be selected. If smooth,
a weighted average of all presented items will be used, where the
weights are derived from the similarity between the recalled item and
each presented item.
distance : str
The distance function used to compare presented and recalled items.
Applies only to 'best' and 'smooth' matching approaches. Can be any
distance function supported by numpy.spatial.distance.cdist.
Returns
----------
prec : numpy array
each float is the probability of transition distance (distnaces indexed by
position, from -(n-1) to (n-1), excluding zero
"""
def lagcrp(rec, lstlen):
"""Computes lag-crp for a given recall list"""
def check_pair(a, b):
if (a>0 and b>0) and (a!=b):
return True
else:
return False
def compute_actual(rec, lstlen):
arr=pd.Series(data=np.zeros((lstlen)*2),
index=list(range(-lstlen,0))+list(range(1,lstlen+1)))
recalled=[]
for trial in range(0,len(rec)-1):
a=rec[trial]
b=rec[trial+1]
if check_pair(a, b) and (a not in recalled) and (b not in recalled):
arr[b-a]+=1
recalled.append(a)
return arr
def compute_possible(rec, lstlen):
arr=pd.Series(data=np.zeros((lstlen)*2),
index=list(range(-lstlen,0))+list(range(1,lstlen+1)))
recalled=[]
for trial in rec:
if np.isnan(trial):
pass
else:
lbound=int(1-trial)
ubound=int(lstlen-trial)
chances=list(range(lbound,0))+list(range(1,ubound+1))
for each in recalled:
if each-trial in chances:
chances.remove(each-trial)
arr[chances]+=1
recalled.append(trial)
return arr
actual = compute_actual(rec, lstlen)
possible = compute_possible(rec, lstlen)
crp = [0.0 if j == 0 else i / j for i, j in zip(actual, possible)]
crp.insert(int(len(crp) / 2), np.nan)
return crp
def nlagcrp(distmat, ts=None):
def lagcrp_model(s):
idx = list(range(0, -s, -1))
return np.array([list(range(i, i+s)) for i in idx])
# remove nan columns
distmat = distmat[:,~np.all(np.isnan(distmat), axis=0)].T
model = lagcrp_model(distmat.shape[1])
lagcrp = np.zeros(ts * 2)
for rdx in range(len(distmat)-1):
item = distmat[rdx, :]
next_item = distmat[rdx+1, :]
if not np.isnan(item).any() and not np.isnan(next_item).any():
outer = np.outer(item, next_item)
lagcrp += np.array(list(map(lambda lag: np.mean(outer[model==lag]), range(-ts, ts))))
lagcrp /= ts
lagcrp = list(lagcrp)
lagcrp.insert(int(len(lagcrp) / 2), np.nan)
return np.array(lagcrp)
def _format(p, r):
p = np.matrix([np.array(i) for i in p])
if p.shape[0]==1:
p=p.T
r = map(lambda x: [np.nan]*p.shape[1] if check_nan(x) else x, r)
r = np.matrix([np.array(i) for i in r])
if r.shape[0]==1:
r=r.T
return p, r
opts = dict(match=match, distance=distance, features=features)
if match is 'exact':
opts.update({'features' : 'item'})
recmat = recall_matrix(egg, **opts)
if not ts:
ts = egg.pres.shape[1]
if match in ['exact', 'best']:
lagcrp = [lagcrp(lst, egg.list_length) for lst in recmat]
elif match is 'smooth':
lagcrp = np.atleast_2d(np.mean([nlagcrp(r, ts=ts) for r in recmat], 0))
else:
raise ValueError('Match must be set to exact, best or smooth.')
return np.nanmean(lagcrp, axis=0)
|
mit
| 8,503,094,469,382,664,000 | 35.937984 | 101 | 0.555089 | false | 3.671032 | false | false | false |
SuLab/scheduled-bots
|
scheduled_bots/query_tester/validators.py
|
1
|
1080
|
class Validator:
description = '' # Plain text description of what is being checked
expected_result = [] # optional
def __init__(self):
self.success = None # True or False
self.result_message = '' # optional extra information about test result
def validate(self, result):
raise NotImplementedError("Implement a Validator Subclass")
class OneOrMoreResultsValidator(Validator):
description = "Checks for at least 1 result"
def validate(self, result):
self.success = True if len(result) >= 1 else False
class NoResultsValidator(Validator):
description = "Checks for no results"
def validate(self, result):
self.success = True if len(result) == 0 else False
class NoValidator(Validator):
description = "No validation"
def validate(self, result):
self.success = None
class FailValidator(Validator):
description = "Always returns FAIL"
expected_result = [{'a': 4}]
def validate(self, result):
self.success = False
self.result_message = "this is more info"
|
mit
| 5,172,143,440,517,708,000 | 28.216216 | 80 | 0.669444 | false | 4.337349 | false | false | false |
beiko-lab/gengis
|
bin/Lib/site-packages/scipy/signal/signaltools.py
|
1
|
56054
|
# Author: Travis Oliphant
# 1999 -- 2002
from __future__ import division, print_function, absolute_import
import warnings
from . import sigtools
from scipy.lib.six import callable
from scipy import linalg
from scipy.fftpack import fft, ifft, ifftshift, fft2, ifft2, fftn, \
ifftn, fftfreq
from numpy.fft import rfftn, irfftn
from numpy import polyadd, polymul, polydiv, polysub, roots, \
poly, polyval, polyder, cast, asarray, isscalar, atleast_1d, \
ones, real_if_close, zeros, array, arange, where, rank, \
newaxis, product, ravel, sum, r_, iscomplexobj, take, \
argsort, allclose, expand_dims, unique, prod, sort, reshape, \
transpose, dot, mean, ndarray, atleast_2d
import numpy as np
from scipy.misc import factorial
from .windows import get_window
from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext
__all__ = ['correlate', 'fftconvolve', 'convolve', 'convolve2d', 'correlate2d',
'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter',
'lfiltic', 'deconvolve', 'hilbert', 'hilbert2', 'cmplx_sort',
'unique_roots', 'invres', 'invresz', 'residue', 'residuez',
'resample', 'detrend', 'lfilter_zi', 'filtfilt', 'decimate']
_modedict = {'valid': 0, 'same': 1, 'full': 2}
_boundarydict = {'fill': 0, 'pad': 0, 'wrap': 2, 'circular': 2, 'symm': 1,
'symmetric': 1, 'reflect': 4}
def _valfrommode(mode):
try:
val = _modedict[mode]
except KeyError:
if mode not in [0, 1, 2]:
raise ValueError("Acceptable mode flags are 'valid' (0),"
" 'same' (1), or 'full' (2).")
val = mode
return val
def _bvalfromboundary(boundary):
try:
val = _boundarydict[boundary] << 2
except KeyError:
if val not in [0, 1, 2]:
raise ValueError("Acceptable boundary flags are 'fill', 'wrap'"
" (or 'circular'), \n and 'symm' (or 'symmetric').")
val = boundary << 2
return val
def _check_valid_mode_shapes(shape1, shape2):
for d1, d2 in zip(shape1, shape2):
if not d1 >= d2:
raise ValueError(
"in1 should have at least as many items as in2 in "
"every dimension for 'valid' mode.")
def correlate(in1, in2, mode='full'):
"""
Cross-correlate two N-dimensional arrays.
Cross-correlate `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
correlate : array
An N-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
Notes
-----
The correlation z of two arrays x and y of rank d is defined as:
z[...,k,...] = sum[..., i_l, ...]
x[..., i_l,...] * conj(y[..., i_l + k,...])
"""
in1 = asarray(in1)
in2 = asarray(in2)
val = _valfrommode(mode)
if rank(in1) == rank(in2) == 0:
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same rank")
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
ps = [i - j + 1 for i, j in zip(in1.shape, in2.shape)]
out = np.empty(ps, in1.dtype)
z = sigtools._correlateND(in1, in2, out, val)
else:
ps = [i + j - 1 for i, j in zip(in1.shape, in2.shape)]
# zero pad input
in1zpadded = np.zeros(ps, in1.dtype)
sc = [slice(0, i) for i in in1.shape]
in1zpadded[sc] = in1.copy()
if mode == 'full':
out = np.empty(ps, in1.dtype)
elif mode == 'same':
out = np.empty(in1.shape, in1.dtype)
z = sigtools._correlateND(in1zpadded, in2, out, val)
return z
def _centered(arr, newsize):
# Return the center newsize portion of the array.
newsize = asarray(newsize)
currsize = array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def fftconvolve(in1, in2, mode="full"):
"""Convolve two N-dimensional arrays using FFT.
Convolve `in1` and `in2` using the fast Fourier transform method, with
the output size determined by the `mode` argument.
This is generally much faster than `convolve` for large arrays (n > ~500),
but can be slower when only a few output values are needed, and can only
output float arrays (int or object array inputs will be cast to float).
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
out : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
"""
in1 = asarray(in1)
in2 = asarray(in2)
if rank(in1) == rank(in2) == 0: # scalar inputs
return in1 * in2
elif not in1.ndim == in2.ndim:
raise ValueError("in1 and in2 should have the same rank")
elif in1.size == 0 or in2.size == 0: # empty arrays
return array([])
s1 = array(in1.shape)
s2 = array(in2.shape)
complex_result = (np.issubdtype(in1.dtype, np.complex) or
np.issubdtype(in2.dtype, np.complex))
size = s1 + s2 - 1
if mode == "valid":
_check_valid_mode_shapes(s1, s2)
# Always use 2**n-sized FFT
fsize = 2 ** np.ceil(np.log2(size)).astype(int)
fslice = tuple([slice(0, int(sz)) for sz in size])
if not complex_result:
ret = irfftn(rfftn(in1, fsize) *
rfftn(in2, fsize), fsize)[fslice].copy()
ret = ret.real
else:
ret = ifftn(fftn(in1, fsize) * fftn(in2, fsize))[fslice].copy()
if mode == "full":
return ret
elif mode == "same":
return _centered(ret, s1)
elif mode == "valid":
return _centered(ret, s1 - s2 + 1)
def convolve(in1, in2, mode='full'):
"""
Convolve two N-dimensional arrays.
Convolve `in1` and `in2`, with the output size determined by the
`mode` argument.
Parameters
----------
in1 : array_like
First input.
in2 : array_like
Second input. Should have the same number of dimensions as `in1`;
if sizes of `in1` and `in2` are not equal then `in1` has to be the
larger array.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
Returns
-------
convolve : array
An N-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
"""
volume = asarray(in1)
kernel = asarray(in2)
if rank(volume) == rank(kernel) == 0:
return volume * kernel
slice_obj = [slice(None, None, -1)] * len(kernel.shape)
if np.iscomplexobj(kernel):
return correlate(volume, kernel[slice_obj].conj(), mode)
else:
return correlate(volume, kernel[slice_obj], mode)
def order_filter(a, domain, rank):
"""
Perform an order filter on an N-dimensional array.
Perform an order filter on the array in. The domain argument acts as a
mask centered over each pixel. The non-zero elements of domain are
used to select elements surrounding each input pixel which are placed
in a list. The list is sorted, and the output for that pixel is the
element corresponding to rank in the sorted list.
Parameters
----------
a : ndarray
The N-dimensional input array.
domain : array_like
A mask array with the same number of dimensions as `in`.
Each dimension should have an odd number of elements.
rank : int
A non-negative integer which selects the element from the
sorted list (0 corresponds to the smallest element, 1 is the
next smallest element, etc.).
Returns
-------
out : ndarray
The results of the order filter in an array with the same
shape as `in`.
Examples
--------
>>> from scipy import signal
>>> x = np.arange(25).reshape(5, 5)
>>> domain = np.identity(3)
>>> x
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14],
[15, 16, 17, 18, 19],
[20, 21, 22, 23, 24]])
>>> signal.order_filter(x, domain, 0)
array([[ 0., 0., 0., 0., 0.],
[ 0., 0., 1., 2., 0.],
[ 0., 5., 6., 7., 0.],
[ 0., 10., 11., 12., 0.],
[ 0., 0., 0., 0., 0.]])
>>> signal.order_filter(x, domain, 2)
array([[ 6., 7., 8., 9., 4.],
[ 11., 12., 13., 14., 9.],
[ 16., 17., 18., 19., 14.],
[ 21., 22., 23., 24., 19.],
[ 20., 21., 22., 23., 24.]])
"""
domain = asarray(domain)
size = domain.shape
for k in range(len(size)):
if (size[k] % 2) != 1:
raise ValueError("Each dimension of domain argument "
" should have an odd number of elements.")
return sigtools._order_filterND(a, domain, rank)
def medfilt(volume, kernel_size=None):
"""
Perform a median filter on an N-dimensional array.
Apply a median filter to the input array using a local window-size
given by `kernel_size`.
Parameters
----------
volume : array_like
An N-dimensional input array.
kernel_size : array_like, optional
A scalar or an N-length list giving the size of the median filter
window in each dimension. Elements of `kernel_size` should be odd.
If `kernel_size` is a scalar, then this scalar is used as the size in
each dimension. Default size is 3 for each dimension.
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
volume = atleast_1d(volume)
if kernel_size is None:
kernel_size = [3] * len(volume.shape)
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), volume.ndim)
for k in range(len(volume.shape)):
if (kernel_size[k] % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
domain = ones(kernel_size)
numels = product(kernel_size, axis=0)
order = numels // 2
return sigtools._order_filterND(volume, domain, order)
def wiener(im, mysize=None, noise=None):
"""
Perform a Wiener filter on an N-dimensional array.
Apply a Wiener filter to the N-dimensional array `im`.
Parameters
----------
im : ndarray
An N-dimensional array.
mysize : int or arraylike, optional
A scalar or an N-length list giving the size of the Wiener filter
window in each dimension. Elements of mysize should be odd.
If mysize is a scalar, then this scalar is used as the size
in each dimension.
noise : float, optional
The noise-power to use. If None, then noise is estimated as the
average of the local variance of the input.
Returns
-------
out : ndarray
Wiener filtered result with the same shape as `im`.
"""
im = asarray(im)
if mysize is None:
mysize = [3] * len(im.shape)
mysize = asarray(mysize)
if mysize.shape == ():
mysize = np.repeat(mysize.item(), im.ndim)
# Estimate the local mean
lMean = correlate(im, ones(mysize), 'same') / product(mysize, axis=0)
# Estimate the local variance
lVar = (correlate(im ** 2, ones(mysize), 'same') / product(mysize, axis=0)
- lMean ** 2)
# Estimate the noise power if needed.
if noise is None:
noise = mean(ravel(lVar), axis=0)
res = (im - lMean)
res *= (1 - noise / lVar)
res += lMean
out = where(lVar < noise, lMean, res)
return out
def convolve2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Convolve two 2-dimensional arrays.
Convolve `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : array_like
Two-dimensional input arrays to be convolved.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
out : ndarray
A 2-dimensional array containing a subset of the discrete linear
convolution of `in1` with `in2`.
"""
in1 = asarray(in1)
in2 = asarray(in2)
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 1, val, bval, fillvalue)
return out
def correlate2d(in1, in2, mode='full', boundary='fill', fillvalue=0):
"""
Cross-correlate two 2-dimensional arrays.
Cross correlate `in1` and `in2` with output size determined by `mode`, and
boundary conditions determined by `boundary` and `fillvalue`.
Parameters
----------
in1, in2 : array_like
Two-dimensional input arrays to be convolved.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear cross-correlation
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
boundary : str {'fill', 'wrap', 'symm'}, optional
A flag indicating how to handle boundaries:
``fill``
pad input arrays with fillvalue. (default)
``wrap``
circular boundary conditions.
``symm``
symmetrical boundary conditions.
fillvalue : scalar, optional
Value to fill pad input arrays with. Default is 0.
Returns
-------
correlate2d : ndarray
A 2-dimensional array containing a subset of the discrete linear
cross-correlation of `in1` with `in2`.
"""
in1 = asarray(in1)
in2 = asarray(in2)
if mode == 'valid':
_check_valid_mode_shapes(in1.shape, in2.shape)
val = _valfrommode(mode)
bval = _bvalfromboundary(boundary)
with warnings.catch_warnings():
warnings.simplefilter('ignore', np.ComplexWarning)
# FIXME: some cast generates a warning here
out = sigtools._convolve2d(in1, in2, 0, val, bval, fillvalue)
return out
def medfilt2d(input, kernel_size=3):
"""
Median filter a 2-dimensional array.
Apply a median filter to the `input` array using a local window-size
given by `kernel_size` (must be odd).
Parameters
----------
input : array_like
A 2-dimensional input array.
kernel_size : array_like, optional
A scalar or a list of length 2, giving the size of the
median filter window in each dimension. Elements of
`kernel_size` should be odd. If `kernel_size` is a scalar,
then this scalar is used as the size in each dimension.
Default is a kernel of size (3, 3).
Returns
-------
out : ndarray
An array the same size as input containing the median filtered
result.
"""
image = asarray(input)
if kernel_size is None:
kernel_size = [3] * 2
kernel_size = asarray(kernel_size)
if kernel_size.shape == ():
kernel_size = np.repeat(kernel_size.item(), 2)
for size in kernel_size:
if (size % 2) != 1:
raise ValueError("Each element of kernel_size should be odd.")
return sigtools._medfilt2d(image, kernel_size)
def lfilter(b, a, x, axis=-1, zi=None):
"""
Filter data along one-dimension with an IIR or FIR filter.
Filter a data sequence, `x`, using a digital filter. This works for many
fundamental data types (including Object type). The filter is a direct
form II transposed implementation of the standard difference equation
(see Notes).
Parameters
----------
b : array_like
The numerator coefficient vector in a 1-D sequence.
a : array_like
The denominator coefficient vector in a 1-D sequence. If ``a[0]``
is not 1, then both `a` and `b` are normalized by ``a[0]``.
x : array_like
An N-dimensional input array.
axis : int
The axis of the input data array along which to apply the
linear filter. The filter is applied to each subarray along
this axis. Default is -1.
zi : array_like, optional
Initial conditions for the filter delays. It is a vector
(or array of vectors for an N-dimensional input) of length
``max(len(a),len(b))-1``. If `zi` is None or is not given then
initial rest is assumed. See `lfiltic` for more information.
Returns
-------
y : array
The output of the digital filter.
zf : array, optional
If `zi` is None, this is not returned, otherwise, `zf` holds the
final filter delay values.
Notes
-----
The filter function is implemented as a direct II transposed structure.
This means that the filter implements::
a[0]*y[n] = b[0]*x[n] + b[1]*x[n-1] + ... + b[nb]*x[n-nb]
- a[1]*y[n-1] - ... - a[na]*y[n-na]
using the following difference equations::
y[m] = b[0]*x[m] + z[0,m-1]
z[0,m] = b[1]*x[m] + z[1,m-1] - a[1]*y[m]
...
z[n-3,m] = b[n-2]*x[m] + z[n-2,m-1] - a[n-2]*y[m]
z[n-2,m] = b[n-1]*x[m] - a[n-1]*y[m]
where m is the output sample number and n=max(len(a),len(b)) is the
model order.
The rational transfer function describing this filter in the
z-transform domain is::
-1 -nb
b[0] + b[1]z + ... + b[nb] z
Y(z) = ---------------------------------- X(z)
-1 -na
a[0] + a[1]z + ... + a[na] z
"""
if isscalar(a):
a = [a]
if zi is None:
return sigtools._linear_filter(b, a, x, axis)
else:
return sigtools._linear_filter(b, a, x, axis, zi)
def lfiltic(b, a, y, x=None):
"""
Construct initial conditions for lfilter.
Given a linear filter (b, a) and initial conditions on the output `y`
and the input `x`, return the inital conditions on the state vector zi
which is used by `lfilter` to generate the output given the input.
Parameters
----------
b : array_like
Linear filter term.
a : array_like
Linear filter term.
y : array_like
Initial conditions.
If ``N=len(a) - 1``, then ``y = {y[-1], y[-2], ..., y[-N]}``.
If `y` is too short, it is padded with zeros.
x : array_like, optional
Initial conditions.
If ``M=len(b) - 1``, then ``x = {x[-1], x[-2], ..., x[-M]}``.
If `x` is not given, its initial conditions are assumed zero.
If `x` is too short, it is padded with zeros.
Returns
-------
zi : ndarray
The state vector ``zi``.
``zi = {z_0[-1], z_1[-1], ..., z_K-1[-1]}``, where ``K = max(M,N)``.
See Also
--------
lfilter
"""
N = np.size(a) - 1
M = np.size(b) - 1
K = max(M, N)
y = asarray(y)
zi = zeros(K, y.dtype.char)
if x is None:
x = zeros(M, y.dtype.char)
else:
x = asarray(x)
L = np.size(x)
if L < M:
x = r_[x, zeros(M - L)]
L = np.size(y)
if L < N:
y = r_[y, zeros(N - L)]
for m in range(M):
zi[m] = sum(b[m + 1:] * x[:M - m], axis=0)
for m in range(N):
zi[m] -= sum(a[m + 1:] * y[:N - m], axis=0)
return zi
def deconvolve(signal, divisor):
"""Deconvolves `divisor` out of `signal`.
Parameters
----------
signal : array
Signal input
divisor : array
Divisor input
Returns
-------
q : array
Quotient of the division
r : array
Remainder
Examples
--------
>>> from scipy import signal
>>> sig = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1,])
>>> filter = np.array([1,1,0])
>>> res = signal.convolve(sig, filter)
>>> signal.deconvolve(res, filter)
(array([ 0., 0., 0., 0., 0., 1., 1., 1., 1.]),
array([ 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]))
"""
num = atleast_1d(signal)
den = atleast_1d(divisor)
N = len(num)
D = len(den)
if D > N:
quot = []
rem = num
else:
input = ones(N - D + 1, float)
input[1:] = 0
quot = lfilter(num, den, input)
rem = num - convolve(den, quot, mode='full')
return quot, rem
def hilbert(x, N=None, axis=-1):
"""
Compute the analytic signal, using the Hilbert transform.
The transformation is done along the last axis by default.
Parameters
----------
x : array_like
Signal data. Must be real.
N : int, optional
Number of Fourier components. Default: ``x.shape[axis]``
axis : int, optional
Axis along which to do the transformation. Default: -1.
Returns
-------
xa : ndarray
Analytic signal of `x`, of each 1-D array along `axis`
Notes
-----
The analytic signal ``x_a(t)`` of signal ``x(t)`` is:
.. math:: x_a = F^{-1}(F(x) 2U) = x + i y
where `F` is the Fourier transform, `U` the unit step function,
and `y` the Hilbert transform of `x`. [1]_
In other words, the negative half of the frequency spectrum is zeroed
out, turning the real-valued signal into a complex signal. The Hilbert
transformed signal can be obtained from ``np.imag(hilbert(x))``, and the
original signal from ``np.real(hilbert(x))``.
References
----------
.. [1] Wikipedia, "Analytic signal".
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = asarray(x)
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape[axis]
if N <= 0:
raise ValueError("N must be positive.")
Xf = fft(x, N, axis=axis)
h = zeros(N)
if N % 2 == 0:
h[0] = h[N // 2] = 1
h[1:N // 2] = 2
else:
h[0] = 1
h[1:(N + 1) // 2] = 2
if len(x.shape) > 1:
ind = [newaxis] * x.ndim
ind[axis] = slice(None)
h = h[ind]
x = ifft(Xf * h, axis=axis)
return x
def hilbert2(x, N=None):
"""
Compute the '2-D' analytic signal of `x`
Parameters
----------
x : array_like
2-D signal data.
N : int or tuple of two ints, optional
Number of Fourier components. Default is ``x.shape``
Returns
-------
xa : ndarray
Analytic signal of `x` taken along axes (0,1).
References
----------
.. [1] Wikipedia, "Analytic signal",
http://en.wikipedia.org/wiki/Analytic_signal
"""
x = atleast_2d(x)
if len(x.shape) > 2:
raise ValueError("x must be rank 2.")
if iscomplexobj(x):
raise ValueError("x must be real.")
if N is None:
N = x.shape
elif isinstance(N, int):
if N <= 0:
raise ValueError("N must be positive.")
N = (N, N)
elif len(N) != 2 or np.any(np.asarray(N) <= 0):
raise ValueError("When given as a tuple, N must hold exactly "
"two positive integers")
Xf = fft2(x, N, axes=(0, 1))
h1 = zeros(N[0], 'd')
h2 = zeros(N[1], 'd')
for p in range(2):
h = eval("h%d" % (p + 1))
N1 = N[p]
if N1 % 2 == 0:
h[0] = h[N1 // 2] = 1
h[1:N1 // 2] = 2
else:
h[0] = 1
h[1:(N1 + 1) // 2] = 2
exec("h%d = h" % (p + 1), globals(), locals())
h = h1[:, newaxis] * h2[newaxis, :]
k = len(x.shape)
while k > 2:
h = h[:, newaxis]
k -= 1
x = ifft2(Xf * h, axes=(0, 1))
return x
def cmplx_sort(p):
"sort roots based on magnitude."
p = asarray(p)
if iscomplexobj(p):
indx = argsort(abs(p))
else:
indx = argsort(p)
return take(p, indx, 0), indx
def unique_roots(p, tol=1e-3, rtype='min'):
"""
Determine unique roots and their multiplicities from a list of roots.
Parameters
----------
p : array_like
The list of roots.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
- 'max': pick the maximum of those roots.
- 'min': pick the minimum of those roots.
- 'avg': take the average of those roots.
Returns
-------
pout : ndarray
The list of unique roots, sorted from low to high.
mult : ndarray
The multiplicity of each root.
Notes
-----
This utility function is not specific to roots but can be used for any
sequence of values for which uniqueness and multiplicity has to be
determined. For a more general routine, see `numpy.unique`.
Examples
--------
>>> from scipy import signal
>>> vals = [0, 1.3, 1.31, 2.8, 1.25, 2.2, 10.3]
>>> uniq, mult = signal.unique_roots(vals, tol=2e-2, rtype='avg')
Check which roots have multiplicity larger than 1:
>>> uniq[mult > 1]
array([ 1.305])
"""
if rtype in ['max', 'maximum']:
comproot = np.max
elif rtype in ['min', 'minimum']:
comproot = np.min
elif rtype in ['avg', 'mean']:
comproot = np.mean
else:
raise ValueError("`rtype` must be one of "
"{'max', 'maximum', 'min', 'minimum', 'avg', 'mean'}")
p = asarray(p) * 1.0
tol = abs(tol)
p, indx = cmplx_sort(p)
pout = []
mult = []
indx = -1
curp = p[0] + 5 * tol
sameroots = []
for k in range(len(p)):
tr = p[k]
if abs(tr - curp) < tol:
sameroots.append(tr)
curp = comproot(sameroots)
pout[indx] = curp
mult[indx] += 1
else:
pout.append(tr)
curp = tr
sameroots = [tr]
indx += 1
mult.append(1)
return array(pout), array(mult)
def invres(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(s) and a(s) from partial fraction expansion: r,p,k
If ``M = len(b)`` and ``N = len(a)``::
b(s) b[0] x**(M-1) + b[1] x**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] x**(N-1) + a[1] x**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
Parameters
----------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
tol : float, optional
The tolerance for two roots to be considered equal. Default is 1e-3.
rtype : {'max', 'min, 'avg'}, optional
How to determine the returned root if multiple roots are within
`tol` of each other.
'max': pick the maximum of those roots.
'min': pick the minimum of those roots.
'avg': take the average of those roots.
See Also
--------
residue, unique_roots
"""
extra = k
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
for k in range(len(pout)):
temp = []
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
b = polyadd(b, r[indx] * poly(t2))
indx += 1
b = real_if_close(b)
while allclose(b[0], 0, rtol=1e-14) and (b.shape[-1] > 1):
b = b[1:]
return b, a
def residue(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(s) / a(s).
If ``M = len(b)`` and ``N = len(a)``, then the partial-fraction
expansion H(s) is defined as::
b(s) b[0] s**(M-1) + b[1] s**(M-2) + ... + b[M-1]
H(s) = ------ = ----------------------------------------------
a(s) a[0] s**(N-1) + a[1] s**(N-2) + ... + a[N-1]
r[0] r[1] r[-1]
= -------- + -------- + ... + --------- + k(s)
(s-p[0]) (s-p[1]) (s-p[-1])
If there are any repeated roots (closer together than `tol`), then H(s)
has terms like::
r[i] r[i+1] r[i+n-1]
-------- + ----------- + ... + -----------
(s-p[i]) (s-p[i])**2 (s-p[i])**n
Returns
-------
r : ndarray
Residues.
p : ndarray
Poles.
k : ndarray
Coefficients of the direct polynomial term.
See Also
--------
invres, numpy.poly, unique_roots
"""
b, a = map(asarray, (b, a))
rscale = a[0]
k, b = polydiv(b, a)
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula
indx = 0
for n in range(len(pout)):
bn = b.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))
# bn(s) / an(s) is (s-po[n])**Nn * b(s) / a(s) where Nn is
# multiplicity of pole at po[n]
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = polyval(bn, pout[n]) / polyval(an, pout[n]) \
/ factorial(sig - m)
indx += sig
return r / rscale, p, k
def residuez(b, a, tol=1e-3, rtype='avg'):
"""
Compute partial-fraction expansion of b(z) / a(z).
If ``M = len(b)`` and ``N = len(a)``::
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See also
--------
invresz, unique_roots
"""
b, a = map(asarray, (b, a))
gain = a[0]
brev, arev = b[::-1], a[::-1]
krev, brev = polydiv(brev, arev)
if krev == []:
k = []
else:
k = krev[::-1]
b = brev[::-1]
p = roots(a)
r = p * 0.0
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for n in range(len(pout)):
p.extend([pout[n]] * mult[n])
p = asarray(p)
# Compute the residue from the general formula (for discrete-time)
# the polynomial is in z**(-1) and the multiplication is by terms
# like this (1-p[i] z**(-1))**mult[i]. After differentiation,
# we must divide by (-p[i])**(m-k) as well as (m-k)!
indx = 0
for n in range(len(pout)):
bn = brev.copy()
pn = []
for l in range(len(pout)):
if l != n:
pn.extend([pout[l]] * mult[l])
an = atleast_1d(poly(pn))[::-1]
# bn(z) / an(z) is (1-po[n] z**(-1))**Nn * b(z) / a(z) where Nn is
# multiplicity of pole at po[n] and b(z) and a(z) are polynomials.
sig = mult[n]
for m in range(sig, 0, -1):
if sig > m:
# compute next derivative of bn(s) / an(s)
term1 = polymul(polyder(bn, 1), an)
term2 = polymul(bn, polyder(an, 1))
bn = polysub(term1, term2)
an = polymul(an, an)
r[indx + m - 1] = (polyval(bn, 1.0 / pout[n]) /
polyval(an, 1.0 / pout[n]) /
factorial(sig - m) / (-pout[n]) ** (sig - m))
indx += sig
return r / gain, p, k
def invresz(r, p, k, tol=1e-3, rtype='avg'):
"""
Compute b(z) and a(z) from partial fraction expansion: r,p,k
If ``M = len(b)`` and ``N = len(a)``::
b(z) b[0] + b[1] z**(-1) + ... + b[M-1] z**(-M+1)
H(z) = ------ = ----------------------------------------------
a(z) a[0] + a[1] z**(-1) + ... + a[N-1] z**(-N+1)
r[0] r[-1]
= --------------- + ... + ---------------- + k[0] + k[1]z**(-1) ...
(1-p[0]z**(-1)) (1-p[-1]z**(-1))
If there are any repeated roots (closer than tol), then the partial
fraction expansion has terms like::
r[i] r[i+1] r[i+n-1]
-------------- + ------------------ + ... + ------------------
(1-p[i]z**(-1)) (1-p[i]z**(-1))**2 (1-p[i]z**(-1))**n
See Also
--------
residuez, unique_roots
"""
extra = asarray(k)
p, indx = cmplx_sort(p)
r = take(r, indx, 0)
pout, mult = unique_roots(p, tol=tol, rtype=rtype)
p = []
for k in range(len(pout)):
p.extend([pout[k]] * mult[k])
a = atleast_1d(poly(p))
if len(extra) > 0:
b = polymul(extra, a)
else:
b = [0]
indx = 0
brev = asarray(b)[::-1]
for k in range(len(pout)):
temp = []
# Construct polynomial which does not include any of this root
for l in range(len(pout)):
if l != k:
temp.extend([pout[l]] * mult[l])
for m in range(mult[k]):
t2 = temp[:]
t2.extend([pout[k]] * (mult[k] - m - 1))
brev = polyadd(brev, (r[indx] * poly(t2))[::-1])
indx += 1
b = real_if_close(brev[::-1])
return b, a
def resample(x, num, t=None, axis=0, window=None):
"""
Resample `x` to `num` samples using Fourier method along the given axis.
The resampled signal starts at the same value as `x` but is sampled
with a spacing of ``len(x) / num * (spacing of x)``. Because a
Fourier method is used, the signal is assumed to be periodic.
Parameters
----------
x : array_like
The data to be resampled.
num : int
The number of samples in the resampled signal.
t : array_like, optional
If `t` is given, it is assumed to be the sample positions
associated with the signal data in `x`.
axis : int, optional
The axis of `x` that is resampled. Default is 0.
window : array_like, callable, string, float, or tuple, optional
Specifies the window applied to the signal in the Fourier
domain. See below for details.
Returns
-------
resampled_x or (resampled_x, resampled_t)
Either the resampled array, or, if `t` was given, a tuple
containing the resampled array and the corresponding resampled
positions.
Notes
-----
The argument `window` controls a Fourier-domain window that tapers
the Fourier spectrum before zero-padding to alleviate ringing in
the resampled values for sampled signals you didn't intend to be
interpreted as band-limited.
If `window` is a function, then it is called with a vector of inputs
indicating the frequency bins (i.e. fftfreq(x.shape[axis]) ).
If `window` is an array of the same length as `x.shape[axis]` it is
assumed to be the window to be applied directly in the Fourier
domain (with dc and low-frequency first).
For any other type of `window`, the function `scipy.signal.get_window`
is called to generate the window.
The first sample of the returned vector is the same as the first
sample of the input vector. The spacing between samples is changed
from dx to:
dx * len(x) / num
If `t` is not None, then it represents the old sample positions,
and the new sample positions will be returned as well as the new
samples.
"""
x = asarray(x)
X = fft(x, axis=axis)
Nx = x.shape[axis]
if window is not None:
if callable(window):
W = window(fftfreq(Nx))
elif isinstance(window, ndarray) and window.shape == (Nx,):
W = window
else:
W = ifftshift(get_window(window, Nx))
newshape = ones(len(x.shape))
newshape[axis] = len(W)
W.shape = newshape
X = X * W
sl = [slice(None)] * len(x.shape)
newshape = list(x.shape)
newshape[axis] = num
N = int(np.minimum(num, Nx))
Y = zeros(newshape, 'D')
sl[axis] = slice(0, (N + 1) // 2)
Y[sl] = X[sl]
sl[axis] = slice(-(N - 1) // 2, None)
Y[sl] = X[sl]
y = ifft(Y, axis=axis) * (float(num) / float(Nx))
if x.dtype.char not in ['F', 'D']:
y = y.real
if t is None:
return y
else:
new_t = arange(0, num) * (t[1] - t[0]) * Nx / float(num) + t[0]
return y, new_t
def detrend(data, axis=-1, type='linear', bp=0):
"""
Remove linear trend along axis from data.
Parameters
----------
data : array_like
The input data.
axis : int, optional
The axis along which to detrend the data. By default this is the
last axis (-1).
type : {'linear', 'constant'}, optional
The type of detrending. If ``type == 'linear'`` (default),
the result of a linear least-squares fit to `data` is subtracted
from `data`.
If ``type == 'constant'``, only the mean of `data` is subtracted.
bp : array_like of ints, optional
A sequence of break points. If given, an individual linear fit is
performed for each part of `data` between two break points.
Break points are specified as indices into `data`.
Returns
-------
ret : ndarray
The detrended input data.
Examples
--------
>>> from scipy import signal
>>> randgen = np.random.RandomState(9)
>>> npoints = 1e3
>>> noise = randgen.randn(npoints)
>>> x = 3 + 2*np.linspace(0, 1, npoints) + noise
>>> (signal.detrend(x) - noise).max() < 0.01
True
"""
if type not in ['linear', 'l', 'constant', 'c']:
raise ValueError("Trend type must be 'linear' or 'constant'.")
data = asarray(data)
dtype = data.dtype.char
if dtype not in 'dfDF':
dtype = 'd'
if type in ['constant', 'c']:
ret = data - expand_dims(mean(data, axis), axis)
return ret
else:
dshape = data.shape
N = dshape[axis]
bp = sort(unique(r_[0, bp, N]))
if np.any(bp > N):
raise ValueError("Breakpoints must be less than length "
"of data along given axis.")
Nreg = len(bp) - 1
# Restructure data so that axis is along first dimension and
# all other dimensions are collapsed into second dimension
rnk = len(dshape)
if axis < 0:
axis = axis + rnk
newdims = r_[axis, 0:axis, axis + 1:rnk]
newdata = reshape(transpose(data, tuple(newdims)),
(N, prod(dshape, axis=0) // N))
newdata = newdata.copy() # make sure we have a copy
if newdata.dtype.char not in 'dfDF':
newdata = newdata.astype(dtype)
# Find leastsq fit and remove it for each piece
for m in range(Nreg):
Npts = bp[m + 1] - bp[m]
A = ones((Npts, 2), dtype)
A[:, 0] = cast[dtype](arange(1, Npts + 1) * 1.0 / Npts)
sl = slice(bp[m], bp[m + 1])
coef, resids, rank, s = linalg.lstsq(A, newdata[sl])
newdata[sl] = newdata[sl] - dot(A, coef)
# Put data back in original shape.
tdshape = take(dshape, newdims, 0)
ret = reshape(newdata, tuple(tdshape))
vals = list(range(1, rnk))
olddims = vals[:axis] + [0] + vals[axis:]
ret = transpose(ret, tuple(olddims))
return ret
def lfilter_zi(b, a):
"""
Compute an initial state `zi` for the lfilter function that corresponds
to the steady state of the step response.
A typical use of this function is to set the initial state so that the
output of the filter starts at the same value as the first element of
the signal to be filtered.
Parameters
----------
b, a : array_like (1-D)
The IIR filter coefficients. See `lfilter` for more
information.
Returns
-------
zi : 1-D ndarray
The initial state for the filter.
Notes
-----
A linear filter with order m has a state space representation (A, B, C, D),
for which the output y of the filter can be expressed as::
z(n+1) = A*z(n) + B*x(n)
y(n) = C*z(n) + D*x(n)
where z(n) is a vector of length m, A has shape (m, m), B has shape
(m, 1), C has shape (1, m) and D has shape (1, 1) (assuming x(n) is
a scalar). lfilter_zi solves::
zi = A*zi + B
In other words, it finds the initial condition for which the response
to an input of all ones is a constant.
Given the filter coefficients `a` and `b`, the state space matrices
for the transposed direct form II implementation of the linear filter,
which is the implementation used by scipy.signal.lfilter, are::
A = scipy.linalg.companion(a).T
B = b[1:] - a[1:]*b[0]
assuming `a[0]` is 1.0; if `a[0]` is not 1, `a` and `b` are first
divided by a[0].
Examples
--------
The following code creates a lowpass Butterworth filter. Then it
applies that filter to an array whose values are all 1.0; the
output is also all 1.0, as expected for a lowpass filter. If the
`zi` argument of `lfilter` had not been given, the output would have
shown the transient signal.
>>> from numpy import array, ones
>>> from scipy.signal import lfilter, lfilter_zi, butter
>>> b, a = butter(5, 0.25)
>>> zi = lfilter_zi(b, a)
>>> y, zo = lfilter(b, a, ones(10), zi=zi)
>>> y
array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
Another example:
>>> x = array([0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0])
>>> y, zf = lfilter(b, a, x, zi=zi*x[0])
>>> y
array([ 0.5 , 0.5 , 0.5 , 0.49836039, 0.48610528,
0.44399389, 0.35505241])
Note that the `zi` argument to `lfilter` was computed using
`lfilter_zi` and scaled by `x[0]`. Then the output `y` has no
transient until the input drops from 0.5 to 0.0.
"""
# FIXME: Can this function be replaced with an appropriate
# use of lfiltic? For example, when b,a = butter(N,Wn),
# lfiltic(b, a, y=numpy.ones_like(a), x=numpy.ones_like(b)).
#
# We could use scipy.signal.normalize, but it uses warnings in
# cases where a ValueError is more appropriate, and it allows
# b to be 2D.
b = np.atleast_1d(b)
if b.ndim != 1:
raise ValueError("Numerator b must be rank 1.")
a = np.atleast_1d(a)
if a.ndim != 1:
raise ValueError("Denominator a must be rank 1.")
while len(a) > 1 and a[0] == 0.0:
a = a[1:]
if a.size < 1:
raise ValueError("There must be at least one nonzero `a` coefficient.")
if a[0] != 1.0:
# Normalize the coefficients so a[0] == 1.
a = a / a[0]
b = b / a[0]
n = max(len(a), len(b))
# Pad a or b with zeros so they are the same length.
if len(a) < n:
a = np.r_[a, np.zeros(n - len(a))]
elif len(b) < n:
b = np.r_[b, np.zeros(n - len(b))]
IminusA = np.eye(n - 1) - linalg.companion(a).T
B = b[1:] - a[1:] * b[0]
# Solve zi = A*zi + B
zi = np.linalg.solve(IminusA, B)
# For future reference: we could also use the following
# explicit formulas to solve the linear system:
#
# zi = np.zeros(n - 1)
# zi[0] = B.sum() / IminusA[:,0].sum()
# asum = 1.0
# csum = 0.0
# for k in range(1,n-1):
# asum += a[k]
# csum += b[k] - a[k]*b[0]
# zi[k] = asum*zi[0] - csum
return zi
def filtfilt(b, a, x, axis=-1, padtype='odd', padlen=None):
"""
A forward-backward filter.
This function applies a linear filter twice, once forward
and once backwards. The combined filter has linear phase.
Before applying the filter, the function can pad the data along the
given axis in one of three ways: odd, even or constant. The odd
and even extensions have the corresponding symmetry about the end point
of the data. The constant extension extends the data with the values
at end points. On both the forward and backwards passes, the
initial condition of the filter is found by using `lfilter_zi` and
scaling it by the end point of the extended data.
Parameters
----------
b : (N,) array_like
The numerator coefficient vector of the filter.
a : (N,) array_like
The denominator coefficient vector of the filter. If a[0]
is not 1, then both a and b are normalized by a[0].
x : array_like
The array of data to be filtered.
axis : int, optional
The axis of `x` to which the filter is applied.
Default is -1.
padtype : str or None, optional
Must be 'odd', 'even', 'constant', or None. This determines the
type of extension to use for the padded signal to which the filter
is applied. If `padtype` is None, no padding is used. The default
is 'odd'.
padlen : int or None, optional
The number of elements by which to extend `x` at both ends of
`axis` before applying the filter. This value must be less than
`x.shape[axis]-1`. `padlen=0` implies no padding.
The default value is 3*max(len(a),len(b)).
Returns
-------
y : ndarray
The filtered output, an array of type numpy.float64 with the same
shape as `x`.
See Also
--------
lfilter_zi, lfilter
Examples
--------
First we create a one second signal that is the sum of two pure sine
waves, with frequencies 5 Hz and 250 Hz, sampled at 2000 Hz.
>>> t = np.linspace(0, 1.0, 2001)
>>> xlow = np.sin(2 * np.pi * 5 * t)
>>> xhigh = np.sin(2 * np.pi * 250 * t)
>>> x = xlow + xhigh
Now create a lowpass Butterworth filter with a cutoff of 0.125 times
the Nyquist rate, or 125 Hz, and apply it to x with filtfilt. The
result should be approximately xlow, with no phase shift.
>>> from scipy import signal
>>> b, a = signal.butter(8, 0.125)
>>> y = signal.filtfilt(b, a, x, padlen=150)
>>> np.abs(y - xlow).max()
9.1086182074789912e-06
We get a fairly clean result for this artificial example because
the odd extension is exact, and with the moderately long padding,
the filter's transients have dissipated by the time the actual data
is reached. In general, transient effects at the edges are
unavoidable.
"""
if padtype not in ['even', 'odd', 'constant', None]:
raise ValueError(("Unknown value '%s' given to padtype. padtype must "
"be 'even', 'odd', 'constant', or None.") %
padtype)
b = np.asarray(b)
a = np.asarray(a)
x = np.asarray(x)
ntaps = max(len(a), len(b))
if padtype is None:
padlen = 0
if padlen is None:
# Original padding; preserved for backwards compatibility.
edge = ntaps * 3
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
# Get the steady state of the filter's step response.
zi = lfilter_zi(b, a)
# Reshape zi and create x0 so that zi*x0 broadcasts
# to the correct value for the 'zi' keyword argument
# to lfilter.
zi_shape = [1] * x.ndim
zi_shape[axis] = zi.size
zi = np.reshape(zi, zi_shape)
x0 = axis_slice(ext, stop=1, axis=axis)
# Forward filter.
(y, zf) = lfilter(b, a, ext, axis=axis, zi=zi * x0)
# Backward filter.
# Create y0 so zi*y0 broadcasts appropriately.
y0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = lfilter(b, a, axis_reverse(y, axis=axis), axis=axis, zi=zi * y0)
# Reverse y.
y = axis_reverse(y, axis=axis)
if edge > 0:
# Slice the actual signal from the extended signal.
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
from scipy.signal.filter_design import cheby1
from scipy.signal.fir_filter_design import firwin
def decimate(x, q, n=None, ftype='iir', axis=-1):
"""
Downsample the signal by using a filter.
By default, an order 8 Chebyshev type I filter is used. A 30 point FIR
filter with hamming window is used if `ftype` is 'fir'.
Parameters
----------
x : ndarray
The signal to be downsampled, as an N-dimensional array.
q : int
The downsampling factor.
n : int, optional
The order of the filter (1 less than the length for 'fir').
ftype : str {'iir', 'fir'}, optional
The type of the lowpass filter.
axis : int, optional
The axis along which to decimate.
Returns
-------
y : ndarray
The down-sampled signal.
See also
--------
resample
"""
if not isinstance(q, int):
raise TypeError("q must be an integer")
if n is None:
if ftype == 'fir':
n = 30
else:
n = 8
if ftype == 'fir':
b = firwin(n + 1, 1. / q, window='hamming')
a = 1.
else:
b, a = cheby1(n, 0.05, 0.8 / q)
y = lfilter(b, a, x, axis=axis)
sl = [slice(None)] * y.ndim
sl[axis] = slice(None, None, q)
return y[sl]
|
gpl-3.0
| -7,537,377,946,632,358,000 | 29.86697 | 80 | 0.52478 | false | 3.532296 | false | false | false |
roderickmackenzie/gpvdm
|
gpvdm_gui/gui/token_lib.py
|
1
|
59716
|
#
# General-purpose Photovoltaic Device Model - a drift diffusion base/Shockley-Read-Hall
# model for 1st, 2nd and 3rd generation solar cells.
# Copyright (C) 2012-2017 Roderick C. I. MacKenzie r.c.i.mackenzie at googlemail.com
#
# https://www.gpvdm.com
# Room B86 Coates, University Park, Nottingham, NG7 2RD, UK
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License v2.0, as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#
## @package token_lib
# A library of all tokens used in the model.
#
import re
import i18n
_ = i18n.language.gettext
class my_data():
token=""
units=""
info=""
def __init__(self,file_name,a,b,info,e,f,widget,defaults=None,units_widget="QLabel",min=None,max=None,hidden=False,hide_on_true_token="none",hide_on_false_token=[],data_type=None,hide_on_token_eq=None):
self.file_name=file_name
self.token=a
self.units=b
self.info=info
self.defaults=defaults
self.number_type=e
self.number_mul=f
self.widget=widget
self.units_widget=units_widget
self.hidden=hidden
self.hide_on_true_token=hide_on_true_token
self.hide_on_false_token=hide_on_false_token
self.hide_on_token_eq=hide_on_token_eq
self.data_type=data_type
self.min=min
self.max=max
lib=[]
def build_token_lib():
global lib
#light.inp
lib.append(my_data("","#light_wavelength_auto_mesh",_("True/False"),_("Automatically mesh wavelength space"),"e",1.0,"gtkswitch"))
lib.append(my_data("light.inp","#lpoints","au",_("Mesh points (lambda)"),"e",1.0,"QLineEdit",hide_on_true_token="#light_wavelength_auto_mesh"))
lib.append(my_data("light.inp","#lstart","m",_("Lambda start"),"e",1.0,"QLineEdit",hide_on_true_token="#light_wavelength_auto_mesh"))
lib.append(my_data("light.inp","#lstop","m",_("Lambda stop"),"e",1.0,"QLineEdit",hide_on_true_token="#light_wavelength_auto_mesh"))
lib.append(my_data("light.inp","#electron_eff","0-1",_("Electron generation efficiency"),"e",1.0,"QLineEdit"))
lib.append(my_data("light.inp","#hole_eff","0-1",_("Hole generation efficiency"),"e",1.0,"QLineEdit"))
lib.append(my_data("light.inp","#sun",_("filename"),_("Sun's spectra"),"e",1.0,"QLineEdit"))
lib.append(my_data("light.inp","#light_file_generation","file_name",_("File containing generation rate"),"e",1.0,"gpvdm_select"))
lib.append(my_data("light.inp","#Dphotoneff","0-1",_("Photon efficiency"),"e",1.0,"QLineEdit",min=0.001,max=1.2))
lib.append(my_data("light.inp","#light_file_qe_spectra","au",_("QE spectra file"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#light_profile","au",_("Profile of light beam"),"s",1.0,"QComboBoxShape"))
#filter.inp
lib.append(my_data("filter.inp","#filter_material","...",_("Optical filter material"),"e",1.0,"gpvdm_select_material" ,units_widget="QPushButton"))
lib.append(my_data("filter.inp","#filter_db","0-1000dB",_("dB"),"e",1.0,"QLineEdit"))
#laser?.inp
lib.append(my_data("","#laserwavelength","m",_("Laser wavelength"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#laser_pulse_width","s",_("Length of pulse"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#spotx","m",_("Spot size x"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#spoty","m",_("Spot size y"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#pulseJ","J",_("Energy in pulse"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#laser_photon_efficiency","0-1",_("Efficiency of photons"),"e",1.0,"QLineEdit"))
#dos?.inp
lib.append(my_data("","#dostype","Edit",_("DoS distribution"),"s",1.0,"generic_switch",units_widget="QPushButton",defaults=[[_("Complex"),"complex"],[_("Exponential"),"exponential"]]))
lib.append(my_data("","#dos_free_carrier_stats","type",_("Free carrier statistics"),"e",1.0,"QComboBoxLang",defaults=[[("mb_equation"),_("Maxwell Boltzmann - analytic")],["mb_look_up_table_analytic",_("Maxwell Boltzmann - numerical+analytic")],["mb_look_up_table",_("Maxwell Boltzmann - full numerical")],["fd_look_up_table",_("Ferm-Dirac - numerical")]]))
lib.append(my_data("","#Nc","m^{-3}",_("Effective density of free electron states (@300K)"),"e",1.0,"QLineEdit",min=1e10,max=1e27 ))
lib.append(my_data("","#Nv","m^{-3}",_("Effective density of free hole states (@300K)"),"e",1.0,"QLineEdit",min=1e10,max=1e27 ))
lib.append(my_data("","#symmetric_mobility_e","m^{2}V^{-1}s^{-1}",_("Electron mobility"),"e",1.0,"mobility_widget",min=1.0,max=1e-1,defaults=[True]))
lib.append(my_data("","#symmetric_mobility_h","m^{2}V^{-1}s^{-1}",_("Hole mobility"),"e",1.0,"mobility_widget",min=1.0,max=1e-14, defaults=[False] ))
lib.append(my_data("","#mue_z","m^{2}V^{-1}s^{-1}",_("Electron mobility z"),"e",1.0,"mobility_widget",min=1.0,max=1e-1,hidden=True))
lib.append(my_data("","#mue_x","m^{2}V^{-1}s^{-1}",_("Electron mobility x"),"e",1.0,"mobility_widget",min=1.0,max=1e-1,hidden=True))
lib.append(my_data("","#mue_y","m^{2}V^{-1}s^{-1}",_("Electron mobility y"),"e",1.0,"mobility_widget",min=1.0,max=1e-1,hidden=True))
lib.append(my_data("","#muh_z","m^{2}V^{-1}s^{-1}",_("Hole mobility z"),"e",1.0,"mobility_widget",min=1.0,max=1e-1,hidden=True))
lib.append(my_data("","#muh_x","m^{2}V^{-1}s^{-1}",_("Hole mobility x"),"e",1.0,"mobility_widget",min=1.0,max=1e-1,hidden=True))
lib.append(my_data("","#muh_y","m^{2}V^{-1}s^{-1}",_("Hole mobility y"),"e",1.0,"mobility_widget",min=1.0,max=1e-1,hidden=True))
lib.append(my_data("","#symmetric_mobility_h","m^{2}V^{-1}s^{-1}",_("Hole mobility"),"e",1.0,"mobility_widget",min=1.0,max=1e-14, defaults=[False] ))
lib.append(my_data("","#ion_density","m^{-3}",_("Perovskite ion density"),"e",1.0,"QLineEdit",min=1e10,max=1e27,hidden=True))
#lib.append(my_data("","#ion_mobility","m^{2}V^{-1}s^{-1}",_("Perovskite ion mobility"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#doping_start","m^{-3}",_("Doping density (x=0)"),"e",1.0,"QLineEdit",min=1.0,max=1e27,hidden=True))
lib.append(my_data("","#doping_stop","m^{-3}",_("Doping density (x=max)"),"e",1.0,"QLineEdit",min=1.0,max=1e27,hidden=True))
lib.append(my_data("","#Ntrape","m^{-3} eV^{-1}",_("Electron trap density"),"e",1.0,"QLineEdit",min=1e10,max=1e27 ))
lib.append(my_data("","#Ntraph","m^{-3} eV^{-1}",_("Hole trap density"),"e",1.0,"QLineEdit",min=1e10,max=1e27 ))
lib.append(my_data("","#Etrape","eV",_("Electron tail slope"),"e",1.0,"QLineEdit",min=20e-3,max=150e-3 ))
lib.append(my_data("","#Etraph","eV",_("Hole tail slope"),"e",1.0,"QLineEdit",min=20e-3,max=150e-3 ))
lib.append(my_data("","#epsilonr","au",_("Relative permittivity"),"e",1.0,"QLineEdit",min=1.0,max=10.0 ))
lib.append(my_data("","#srhsigman_e","m^{-2}",_("Free electron to Trapped electron"),"e",1.0,"QLineEdit",min=1e-27,max=1e-15 ))
lib.append(my_data("","#srhsigmap_e","m^{-2}",_("Trapped electron to Free hole"),"e",1.0,"QLineEdit",min=1e-27,max=1e-15 ))
lib.append(my_data("","#srhsigman_h","m^{-2}",_("Trapped hole to Free electron"),"e",1.0,"QLineEdit",min=1e-27,max=1e-15 ))
lib.append(my_data("","#srhsigmap_h","m^{-2}",_("Free hole to Trapped hole"),"e",1.0,"QLineEdit",min=1e-27,max=1e-15))
lib.append(my_data("","#free_to_free_recombination","m^{3}s^{-1}",_("n_{free} to p_{free} Recombination rate constant"),"e",1.0,"QLineEdit",min=1e-27,max=1e-15 ))
#electrical?.inp
lib.append(my_data("","#electrical_component","type",_("Component"),"e",1.0,"QComboBoxLang",defaults=[[("resistance"),_("Resistance")],["diode",_("Diode")],["link",_("Link")]]))
lib.append(my_data("","#electrical_shunt","Ohm m",_("Shunt resistivity"),"e",1.0,"QLineEdit",min=0.1,max=1e20, hide_on_token_eq=[["#electrical_component","resistance"],["#electrical_component","link"]] ))
#lib.append(my_data("","#electrical_series","Ohm m",_("Series resistivity"),"e",1.0,"QLineEdit",min=0.1,max=1e20, hide_on_token_eq=[["#electrical_component","link"]] ))
lib.append(my_data("","#electrical_symmetrical_resistance","Ohm m",_("Series resistivity"),"e",1.0,"mobility_widget", defaults=[False] ))
lib.append(my_data("","#electrical_series_z","Ohm m",_("Series resistivity z"),"e",1.0,"mobility_widget",min=1.0,max=1e-1,hidden=True))
lib.append(my_data("","#electrical_series_x","Ohm m",_("Series resistivity x"),"e",1.0,"mobility_widget",min=1.0,max=1e-1,hidden=True))
lib.append(my_data("","#electrical_series_y","Ohm m",_("Series resistivity y"),"e",1.0,"mobility_widget",min=1.0,max=1e-1,hidden=True))
lib.append(my_data("","#electrical_n","au",_("Layer ideality factor"),"e",1.0,"QLineEdit",min=0.0,max=1.0, hide_on_token_eq=[["#electrical_component","resistance"],["#electrical_component","link"]] ))
lib.append(my_data("","#electrical_J0","A m^{-2}",_("Reverse bias current"),"e",1.0,"QLineEdit",min=0.0,max=1e6, hide_on_token_eq=[["#electrical_component","resistance"],["#electrical_component","link"]] ))
lib.append(my_data("","#electrical_enable_generation",_("True/False"),_("Enable optical charge\ncarrier generation"),"e",1.0,"gtkswitch" ))
#shape?.inp
lib.append(my_data("","#shape_type","au",_("Shape type"),"s",1.0,"QComboBoxShape"))
lib.append(my_data("","#shape_dx","m",_("dx of the object"),"e",1.0,"QLineEdit",data_type="float"))
lib.append(my_data("","#shape_dy","m",_("dy of the object"),"e",1.0,"QLineEdit",data_type="float"))
lib.append(my_data("","#shape_dz","m",_("dz of the object"),"e",1.0,"QLineEdit",data_type="float"))
lib.append(my_data("","#shape_padding_dx","m",_("dx padding"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#shape_padding_dy","m",_("dy padding"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#shape_padding_dz","m",_("dz padding"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#shape_nx","au",_("Number of objects x"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#shape_ny","au",_("Number of objects y"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#shape_nz","au",_("Number of objects z"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#shape_x0","m",_("x offset"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#shape_y0","m",_("y offset"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#shape_z0","m",_("z offset"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#shape_remove_layer",_("True/False"),_("Remove layer"),"e",1.0,"gtkswitch"))
lib.append(my_data("","#shape_dos",_("Edit"),_("Electrical parameters"),"e",1.0,"shape_dos_switch",units_widget="QPushButton"))
lib.append(my_data("","#shape_electrical",_("Edit"),_("Electrical parameters"),"e",1.0,"shape_dos_switch",units_widget="QPushButton"))
lib.append(my_data("","#shape_optical_material",_("Edit"),_("Optical material"),"e",1.0,"gpvdm_select_material" ,units_widget="QPushButton"))
lib.append(my_data("","#shape_flip_y",_("True/False"),_("Flip y"),"e",1.0,"gtkswitch"))
#interface?.inp
lib.append(my_data("","#interface_model","type",_("Interface model"),"e",1.0,"QComboBoxLang",defaults=[[("none"),_("None")],["recombination",_("Recombination")]]))
lib.append(my_data("","#interface_eh_tau","m^{3}s^{-1}",_("Recombination constant"),"e",1.0,"QLineEdit",hide_on_token_eq=[["#interface_model","none"]]))
1e-15
#ver
1.0
#end
#stark.inp
lib.append(my_data("","#stark_startime","s",_("startime"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_ea_factor","au",_("ea_factor"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_Np","1/0",_("Np"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_den","1/0",_("den"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_externalv","V",_("externalv"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_dt_neg_time","s",_("dt_neg_time"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_dt","s",_("dt"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_dt_mull","au",_("dt_mull"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_stop","s",_("stop"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_stark","1/0",_("stark"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_lasereff","1/0",_("lasereff"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_probe_wavelength","nm",_("wavelength"),"e",1e9,"QLineEdit"))
lib.append(my_data("","#stark_sim_contacts","1/0",_("sim_contacts"),"e",1.0,"QLineEdit"))
#ref
lib.append(my_data("","#ref_url","au",_("Website"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ref_author","au",_("Author"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ref_jounral","au",_("Journal"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ref_title","au",_("Title"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ref_volume","au",_("Volume"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ref_pages","au",_("Pages"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ref_year","au",_("Year"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ref_DOI","au",_("DOI"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ref_booktitle","au",_("Book title"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ref_publisher","au",_("Publisher"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ref_isbn","au",_("ISBN"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ref_unformatted","au",_("Scraped text"),"e",1.0,"QLineEdit"))
#pulse
lib.append(my_data("","#Rshort_pulse","Ohms",_("R_{short}"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#pulse_bias","V",_("V_{bias}"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#pulse_light_efficiency","au",_("Efficiency of light"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#pulse_subtract_dc",_("True/False"),_("subtract DC"),"e",1.0,"gtkswitch"))
#mat.inp
lib.append(my_data("","#material_type","type",_("Material type"),"e",1.0,"QComboBoxLang",defaults=[[("organic"),_("Organic")],["oxide",_("Oxide")],["inorganic",_("Inorganic")],["metal",_("Metal")],["other",_("Other")]]))
lib.append(my_data("","#mat_alpha","0-1.0",_("Alpha channel"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#red_green_blue","rgb",_("Color"),"e",1.0,"QColorPicker"))
lib.append(my_data("","#mat_alpha","0-1",_("Transparency"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#status","type",_("Publish material data?"),"e",1.0,"QComboBoxLang",defaults=[[("public"),_("Public")],["private",_("Private")]]))
lib.append(my_data("","#changelog","au",_("Change log"),"e",1.0,"QChangeLog"))
#jv.inp
lib.append(my_data("","#jv_step_mul","0-2.0",_("JV voltage step multiplyer"),"e",1.0,"QLineEdit",hide_on_true_token="#jv_single_point"))
lib.append(my_data("","#jv_max_j","A m^{-2}",_("Maximum current density"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#jv_light_efficiency","au",_("JV curve photon generation efficiency"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#jv_pmax_n","m^{-3}",_("Average carrier density at P_{max}"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#jv_pmax_tau","m^{-1}",_("Recombination time constant"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#Vstart","V",_("Start voltage"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#Vstop","V",_("Stop voltage"),"e",1.0,"QLineEdit",hide_on_true_token="#jv_single_point"))
lib.append(my_data("","#Vstep","V",_("Voltage step"),"e",1.0,"QLineEdit",hide_on_true_token="#jv_single_point"))
lib.append(my_data("","#jv_Rcontact","V",_("Contact resistance"),"e",1.0,"QParasitic"))
lib.append(my_data("","#jv_Rshunt","V",_("Shunt resistance"),"e",1.0,"QParasitic"))
lib.append(my_data("","#jv_single_point",_("True/False"),_("Single point"),"e",1.0,"gtkswitch"))
lib.append(my_data("","#jv_use_external_voltage_as_stop",_("True/False"),_("Use external\nvoltage as stop"),"e",1.0,"gtkswitch"))
#sim_info.dat (jv plugin)
lib.append(my_data("","#voc","V",_("V_{oc}"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#pce","Percent",_("Power conversion efficiency"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ff","a.u.",_("Fill factor"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#Pmax","W m^{-2}",_("Max power"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#v_pmax","V",_("Voltage at max power"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#j_pmax","Am^{-2}",_("Current density at max power"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#voc_nt","m^{-3}",_("Trapped electrons at Voc"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#voc_pt","m^{-3}",_("Trapped holes at Voc"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#voc_nf","m^{-3}",_("Free electrons at Voc"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#voc_pf","m^{-3}",_("Free holes at Voc"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#voc_np_tot","m^{-3}",_("Total carriers (n+p)/2 at Voc"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#voc_tau","s",_("Recombination time constant at Voc"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#voc_R","m^{-3}s^{-1}",_("Recombination rate at Voc"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#voc_J","A m^{-2}",_("Current density at Voc"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#jsc","A m^{-2}",_("J_{sc}"),"e",1.0,"QLineEdit"))
#sim_info.dat (optics plugin)
lib.append(my_data("","#light_photons_in_active_layer","m^{-2}",_("Photos absorbed in active layer"),"e",1.0,"QLineEdit"))
#object_stats.dat (optics plugin)
lib.append(my_data("object_stats.dat","#Rp[0-9]","m",_("Peak height Rp"),"e",1.0,"QLineEdit"))
lib.append(my_data("object_stats.dat","#Rq[0-9]","m",_("RMS height Rq"),"e",1.0,"QLineEdit"))
lib.append(my_data("object_stats.dat","#Ra[0-9]","m",_("Average height Ra"),"e",1.0,"QLineEdit"))
#cv?.inp
lib.append(my_data("","#cv_start_voltage","Volts",_("Start voltage"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#cv_stop_voltage","Volts",_("Stop voltage"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#cv_dv_step","Volts",_("dV step"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#cv_fx","Hz",_("Frequency"),"e",1.0,"QLineEdit"))
#sim_info.dat (equlibrium)
lib.append(my_data("","#left_holes","m^{-3}",_("Left hole density"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#left_electrons","m^{-3}",_("Left electron density"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#right_holes","m^{-3}",_("Right hole density"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#right_electrons","m^{-3}",_("Right electron density"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#Vbi","m^{-3}",_("Built in potential"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#electron_affinity_left","eV",_("Electron affinity left"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#electron_affinity_right","eV",_("Electron affinity right"),"e",1.0,"QLineEdit"))
#tab
lib.append(my_data("","#english_name","name",_("English name"),"e",1.0,"QLineEdit"))
#server.inp
lib.append(my_data("","#gpvdm_core_max_threads","au",_("Number of gpvdm_core threads"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#max_gpvdm_instances","au",_("Maximum number of gpvdm_core instances"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#server_stall_time","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#server_exit_on_dos_error","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#server_max_run_time","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#server_auto_cpus","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#server_min_cpus","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#server_steel","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#port","au","Cluster port","e",1.0,"QLineEdit"))
lib.append(my_data("","#path_to_src","au",_("Path to source code"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#path_to_libs","au",_("Path to compiled libs for cluster"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#make_command","au",_("Make command"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#exe_name","au",_("exe name"),"e",1.0,"QLineEdit"))
#cluster.inp
lib.append(my_data("","#cluster_user_name","au","User name","e",1.0,"QLineEdit"))
lib.append(my_data("","#cluster_ip","au","Cluster IP","e",1.0,"QLineEdit"))
lib.append(my_data("","#cluster_cluster_dir","au",_("Remote cluster directory"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#nodes","au",_("Remote node list"),"e",1.0,"QLineEdit"))
#triangle mesh editor
lib.append(my_data("","#mesh_gen_nx","au",_("x-triangles"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#mesh_gen_ny","au",_("y-triangles"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#mesh_gen_opp","au",_("Method"),"e",1.0,"QComboBoxLang",defaults=[["node_reduce",_("Node reduce")],["square_mesh_gen",_("No reduce")]]))
lib.append(my_data("","#shape_import_blur","width pixels",_("Gaussian blur"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#shape_import_y_norm_percent","percent",_("Percent of histogram to ignore"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#gauss_sigma","pixels",_("Sigma of gaussian"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#gauss_offset_x","pixels",_("Gaussian offset x"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#gauss_offset_y","pixels",_("Gaussian offset y"),"e",1.0,"QLineEdit"))
#honeycomb
lib.append(my_data("","#honeycomb_dx","pixels",_("dx of Honeycomb"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#honeycomb_dy","pixels",_("dy of Honeycomb"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#honeycomb_line_width","pixels",_("Line width"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#honeycomb_x_shift","pixels",_("x shift"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#honeycomb_y_shift","pixels",_("y shift"),"e",1.0,"QLineEdit"))
#boundary
lib.append(my_data("","#image_boundary_x0","pixels",_("Boundary x0"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#image_boundary_x1","pixels",_("Boundary x1"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#image_boundary_y0","pixels",_("Boundary y0"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#image_boundary_y1","pixels",_("Boundary y1"),"e",1.0,"QLineEdit"))
#math.inp
lib.append(my_data("math.inp","#maxelectricalitt_first","au",_("Max Electrical itterations (first step)"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#electricalclamp_first","au",_("Electrical clamp (first step)"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#math_electrical_error_first","au",_("Desired electrical solver error (first step)"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#math_enable_pos_solver",_("True/False"),_("Enable poisson solver"),"e",1.0,"gtkswitch"))
lib.append(my_data("math.inp","#maxelectricalitt","au",_("Max electrical itterations"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#electricalclamp","au",_("Electrical clamp"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#posclamp","au",_("Poisson clamping"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#electricalerror","au",_("Minimum electrical error"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#pos_max_ittr","au",_("Poisson solver max itterations"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#newton_clever_exit",_("True/False"),"Newton solver clever exit","e",1.0,"gtkswitch"))
lib.append(my_data("math.inp","#newton_min_itt","au",_("Newton minimum iterations"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#complex_solver_name",_("dll name"),_("Complex matrix solver to use"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#solver_name",_("dll name"),_("Matrix solver"),"e",1.0,"QComboBoxNewtonSelect",defaults=["umfpack","external_solver","superlu","nr_d","nr_ld"]))
lib.append(my_data("math.inp","#newton_name",_("dll name"),_("Newton solver to use"),"e",1.0,"QComboBoxNewtonSelect",defaults=["none","newton_2d","newton_simple","newton_norm","newton"]))
lib.append(my_data("math.inp","#math_t0","au",_("Slotboom T0"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#math_d0","au",_("Slotboom D0"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#math_n0","au",_("Slotboom n0"),"e",1.0,"QLineEdit"))
lib.append(my_data("math.inp","#math_newton_cache","au",_("Use newton cache (experimental)"),"e",1.0,"gtkswitch"))
#fit.inp
lib.append(my_data("fit.inp","#fit_error_mul","au",_("Fit error multiplyer"),"e",1.0,"QLineEdit"))
lib.append(my_data("fit.inp","#fit_randomize",_("True/False"),_("Randomize fit"),"e",1.0,"gtkswitch"))
lib.append(my_data("fit.inp","#fit_random_reset_ittr","au",_("Number of iterations between random reset"),"e",1.0,"QLineEdit"))
lib.append(my_data("fit.inp","#fit_stall_steps","au",_("Stall steps"),"e",1.0,"QLineEdit"))
lib.append(my_data("fit.inp","#fit_disable_reset_at","au",_("Disable reset at level"),"e",1.0,"QLineEdit"))
lib.append(my_data("fit.inp","#fit_converge_error","au",_("Fit define convergence"),"e",1.0,"QLineEdit"))
lib.append(my_data("fit.inp","#fit_enable_simple_reset","au",_("Enable simplex reset"),"e",1.0,"gtkswitch"))
lib.append(my_data("fit.inp","#fit_enable_simple_reset","au",_("Simplex reset steps"),"e",1.0,"gtkswitch"))
lib.append(my_data("fit.inp","#fit_method","au",_("Fiting method"),"e",1.0,"QComboBox",defaults=["simplex","newton"]))
lib.append(my_data("fit.inp","#fit_simplexmul","au",_("Start simplex step multiplication"),"e",1.0,"QLineEdit"))
lib.append(my_data("fit.inp","#fit_simplex_reset","au",_("Simplex reset steps"),"e",1.0,"QLineEdit"))
#fit?.inp
lib.append(my_data("","#fit_subtract_lowest_point",_("True/False"),_("Subtract lowest point"),"e",1.0,"gtkswitch"))
lib.append(my_data("","#fit_set_first_point_to_zero",_("True/False"),_("Set first point to zero="),"e",1.0,"gtkswitch"))
#eqe.inp
lib.append(my_data("eqe.inp","#eqe_voltage","au",_("EQE Voltage"),"e",1.0,"QLineEdit"))
#thermal.inp
lib.append(my_data("thermal.inp","#thermal_model_type","au",_("Thermal model type"),"s",1.0,"QComboBoxLang",defaults=[["thermal_hydrodynamic",_("Hydrodynamic")],["thermal_lattice",_("Lattice heat")]], hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#Ty0","Kelvin",_("Device temperature at y_{min}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Ty0_boundry", "neumann"]]))
lib.append(my_data("thermal.inp","#Ty0_boundry","au",_("Boundary condition for y_{min}"),"s",1.0,"QComboBoxLang",defaults=[["isothermal",_("Isothermal")],["neumann",_("Neumann (==0)")],["heatsink",_("Heatsink")]], hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#heatsink_y0","W m^{-}K^{-1}",_("Conductivity of heat sink y_{min}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Ty0_boundry", "neumann"],["#Ty0_boundry", "isothermal"]]))
lib.append(my_data("thermal.inp","#heatsink_length_y0","m",_("Heat sink length y_{min}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Ty0_boundry", "neumann"],["#Ty0_boundry", "isothermal"]]))
lib.append(my_data("thermal.inp","#Ty1","Kelvin",_("Device temperature at y_{max}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Ty1_boundry", "neumann"]]))
lib.append(my_data("thermal.inp","#Ty1_boundry","au",_("Boundary condition for y_{max}"),"s",1.0,"QComboBoxLang",defaults=[["isothermal",_("Isothermal")],["neumann",_("Neumann (==0)")],["heatsink",_("Heatsink")]], hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#heatsink_y1","W m^{-2}K^{-1}",_("Conductivity of heat sink y_{max}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Ty1_boundry", "neumann"],["#Ty1_boundry", "isothermal"]]))
lib.append(my_data("thermal.inp","#heatsink_length_y1","m",_("Heat sink length y_{max}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Ty1_boundry", "neumann"],["#Ty1_boundry", "isothermal"]]))
lib.append(my_data("thermal.inp","#Tx0","Kelvin",_("Device temperature at x_{min}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Tx0_boundry", "neumann"]]))
lib.append(my_data("thermal.inp","#Tx0_boundry","au",_("Boundary condition for x_{min}"),"s",1.0,"QComboBoxLang",defaults=[["isothermal",_("Isothermal")],["neumann",_("Neumann (==0)")],["heatsink",_("Heatsink")]], hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#heatsink_x0","W m^{-2}K^{-1}",_("Conductivity of heat sink x_{min}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Tx0_boundry", "neumann"],["#Tx0_boundry", "isothermal"]]))
lib.append(my_data("thermal.inp","#heatsink_length_x0","m",_("Heat sink length x_{min}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Tx0_boundry", "neumann"],["#Tx0_boundry", "isothermal"]]))
lib.append(my_data("thermal.inp","#Tx1","Kelvin",_("Device temperature at x_{max}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Tx1_boundry", "neumann"]]))
lib.append(my_data("thermal.inp","#Tx1_boundry","au",_("Boundary condition for x_{max}"),"s",1.0,"QComboBoxLang",defaults=[["isothermal",_("Isothermal")],["neumann",_("Neumann (==0)")],["heatsink",_("Heatsink")]], hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#heatsink_x1","W m^{-2}K^{-1}",_("Conductivity of heat sink x_{max}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Tx1_boundry", "neumann"],["#Tx1_boundry", "isothermal"]]))
lib.append(my_data("thermal.inp","#heatsink_length_x1","m",_("Heat sink length x_{max}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Tx1_boundry", "neumann"],["#Tx1_boundry", "isothermal"]]))
lib.append(my_data("thermal.inp","#Tz0","Kelvin",_("Device temperature at z_{min}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Tz0_boundry", "neumann"]]))
lib.append(my_data("thermal.inp","#Tz0_boundry","au",_("Boundary condition for z_{min}"),"s",1.0,"QComboBoxLang",defaults=[["isothermal",_("Isothermal")],["neumann",_("Neumann (==0)")],["heatsink",_("Heatsink")]], hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#heatsink_z0","W m^{-2}K^{-1}",_("Conductivity of heat sink z_{min}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Tz0_boundry", "neumann"],["#Tz0_boundry", "isothermal"]]))
lib.append(my_data("thermal.inp","#heatsink_length_z0","m",_("Heat sink length z_{min}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Tz0_boundry", "neumann"],["#Tz0_boundry", "isothermal"]]))
lib.append(my_data("thermal.inp","#Tz1","Kelvin",_("Device temperature at z_{max}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Tz1_boundry", "neumann"]]))
lib.append(my_data("thermal.inp","#Tz1_boundry","au",_("Boundary condition for z_{max}"),"s",1.0,"QComboBoxLang",defaults=[["isothermal",_("Isothermal")],["neumann",_("Neumann (==0)")],["heatsink",_("Heatsink")]], hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#heatsink_z1","W m^{-2}K^{-1}",_("Conductivity of heat sink z_{max}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Tz1_boundry", "neumann"],["#Tz1_boundry", "isothermal"]]))
lib.append(my_data("thermal.inp","#heatsink_length_z1","m",_("Heat sink length z_{max}"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"], hide_on_token_eq=[["#Tz1_boundry", "neumann"],["#Tz1_boundry", "isothermal"]]))
lib.append(my_data("thermal.inp","#thermal_l",_("True/False"),_("Lattice heat model"),"e",1.0,"gtkswitch",hide_on_token_eq=[["#thermal_model_type", "thermal_lattice"]], hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#thermal_e",_("True/False"),_("Electron heat model"),"e",1.0,"gtkswitch",hide_on_token_eq=[["#thermal_model_type", "thermal_lattice"]], hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#thermal_h",_("True/False"),_("Hole heat model"),"e",1.0,"gtkswitch",hide_on_token_eq=[["#thermal_model_type", "thermal_lattice"]], hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#thermal_tau_e","s",_("Electron relaxation time"),"e",1.0,"QLineEdit",hide_on_token_eq=[["#thermal_model_type", "thermal_lattice"]], hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#thermal_tau_h","s",_("Hole relaxation time"),"e",1.0,"QLineEdit",hide_on_token_eq=[["#thermal_model_type", "thermal_lattice"]], hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#thermal_kl","W m^{-1} C^{-1}",_("Thermal conductivity"),"e",1.0,"QLineEdit", hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#Tliso",_("True/False"),_("Isothermal boundary on left"),"e",1.0,"gtkswitch", hide_on_false_token=["#thermal"]))
lib.append(my_data("thermal.inp","#Triso",_("True/False"),_("Isothermal boundary on right"),"e",1.0,"gtkswitch", hide_on_false_token=["#thermal"]))
#dump.inp
lib.append(my_data("dump.inp","#newton_dump",_("True/False"),_("Dump from newton solver"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#plot",_("True/False"),_("Plot bands etc.. "),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_band_structure","","","e",1.0,"QLineEdit"))
lib.append(my_data("dump.inp","#dump_slices_by_time",_("True/False"),_("dump slices by time"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_slices",_("True/False"),_("Dump slices"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_dynamic",_("True/False"),_("Dump dynamic"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_zip_files",_("True/False"),_("Dump zip files"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_write_out_band_structure",_("True/False"),_("Write out band structure"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_optics",_("True/False"),_("Dump optical information"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_optics_verbose",_("True/False"),_("Dump optics verbose"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_print_newtonerror",_("True/False"),_("Print newton error"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_print_converge",_("True/False"),_("Print solver convergence"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_write_converge",_("True/False"),_("Write newton solver convergence to disk"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_print_pos_error",_("True/False"),_("Print poisson solver convergence"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_norm_time_to_one",_("True/False"),_("Normalize output x-time to one"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_built_in_voltage",_("True/False"),_("Dump the built in voltage."),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_optical_probe_spectrum",_("True/False"),_("Dump optical probe spectrum"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_optics_summary",_("True/False"),_("Dump optical summary"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_ray_trace_map",_("True/False"),_("Dump raytrace plots"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dumpitdos","","","e",1.0,"QLineEdit"))
lib.append(my_data("dump.inp","#dump_workbook",_("True/False"),_("Dump an excel workbook for each simulation run congaing the results."),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_file_access_log",_("True/False"),_("Write file access log to disk."),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_use_cache",_("True/False"),_("Use cache for file writes"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_write_headers",_("True/False"),_("Write headers to output files"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_first_guess",_("True/False"),_("Write first guess to equations"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_log_level","au",_("Log verbocity"),"s",1.0,"QComboBoxLang",defaults=[[("none"),_("None")],["screen",_("Screen")],["disk",_("Disk")],["screen_and_disk",_("Screen and disk")]]))
lib.append(my_data("dump.inp","#dump_log_level","au",_("Log verbocity"),"s",1.0,"QComboBoxLang",defaults=[[("none"),_("None")],["screen",_("Screen")],["disk",_("Disk")],["screen_and_disk",_("Screen and disk")]]))
lib.append(my_data("dump.inp","#dump_dynamic_pl_energy","au",_("PL dump Energy"),"s",1.0,"QLineEdit"))
lib.append(my_data("dump.inp","#dump_remove_dos_cache",_("True/False"),_("Clean up DoS cache files"),"e",1.0,"gtkswitch"))
lib.append(my_data("dump.inp","#dump_verbose_electrical_solver_results",_("True/False"),_("Dump verbose electrical solver results"),"e",1.0,"gtkswitch"))
#pl_ss?.inp
lib.append(my_data("","#pl_mode","au",_("Device state"),"s",1.0,"QComboBoxLang",defaults=[[("voc"),_("Voc")],["Jsc",_("Jsc")]]))
#ray
lib.append(my_data("ray.inp","#ray_wavelength_points","au",_("Wavelength points"),"e",1.0,"QLineEdit"))
lib.append(my_data("ray.inp","#ray_auto_run","au",_("Run the ray tracer"),"s",1.0,"QComboBoxLang",defaults=[[("ray_run_never"),_("Never")],["ray_run_once",_("Once per simulation")],["ray_run_step",_("Each simulation step")]]))
lib.append(my_data("ray.inp","#ray_theta_steps","au",_("Theta steps"),"e",1.0,"QLineEdit"))
lib.append(my_data("ray.inp","#ray_theta_start","Degrees",_("Theta start"),"e",1.0,"QLineEdit"))
lib.append(my_data("ray.inp","#ray_theta_stop","Degrees",_("Theta stop"),"e",1.0,"QLineEdit"))
lib.append(my_data("ray.inp","#ray_phi_steps","au",_("Phi steps"),"e",1.0,"QLineEdit"))
lib.append(my_data("ray.inp","#ray_phi_start","Degrees",_("Phi start"),"e",1.0,"QLineEdit"))
lib.append(my_data("ray.inp","#ray_phi_stop","Degrees",_("Phi stop"),"e",1.0,"QLineEdit"))
lib.append(my_data("ray.inp","#ray_escape_bins","au",_("Escape bins"),"e",1.0,"QLineEdit"))
lib.append(my_data("ray.inp","#ray_auto_wavelength_range",_("True/False"),_("Automatic wavelength range"),"e",1.0,"gtkswitch"))
lib.append(my_data("ray.inp","#ray_lambda_start","nm",_("Start wavelength"),"e",1.0,"QLineEdit",hide_on_true_token="#ray_auto_wavelength_range"))
lib.append(my_data("ray.inp","#ray_lambda_stop","nm",_("Stop wavelength"),"e",1.0,"QLineEdit",hide_on_true_token="#ray_auto_wavelength_range"))
lib.append(my_data("ray.inp","#ray_emission_source","au",_("Emit from"),"s",1.0,"QComboBoxLang",defaults=[[("ray_emission_electrical_mesh"),_("Each electrical mesh point")],["ray_emission_single_point",_("Center of each layer")]]))
#viewpoint.inp
lib.append(my_data("view_point.inp","#viewpoint_enabled",_("True/False"),_("Enable viewpoint"),"e",1.0,"gtkswitch"))
lib.append(my_data("view_point.inp","#viewpoint_size","au",_("View point size"),"e",1.0,"QLineEdit"))
lib.append(my_data("view_point.inp","#viewpoint_dz","au",_("View point dz"),"e",1.0,"QLineEdit"))
lib.append(my_data("view_point.inp","#viewpoint_nx","au",_("Mesh points x"),"e",1.0,"QLineEdit"))
lib.append(my_data("view_point.inp","#viewpoint_nz","au",_("Mesh points z"),"e",1.0,"QLineEdit"))
#led.inp
lib.append(my_data("","#led_extract_eff","0.0-1.0",_("LED extraction efficiency"),"e",1.0,"QLineEdit"))
#device.inp
#lib.append(my_data("","#invert_applied_bias","au",_("Invert applied bias"),"e",1.0,"gtkswitch"))
#lib.append(my_data("","#lcharge","m^{-3}",_("Charge on left contact"),"e",1.0,"QLineEdit"))
#lib.append(my_data("","#rcharge","m^{-3}",_("Charge on right contact"),"e",1.0,"QLineEdit"))
#parasitic.inp
lib.append(my_data("parasitic.inp","#Rshunt","Ohms m^{2}",_("Shunt resistance"),"e",1.0,"QLineEdit",min=1e-3,max=1e6))
lib.append(my_data("parasitic.inp","#Rcontact","Ohms",_("Series resistance"),"e",1.0,"QLineEdit",min=1.0,max=200))
lib.append(my_data("parasitic.inp","#otherlayers","m",_("Other layers"),"e",1.0,"QLineEdit"))
lib.append(my_data("parasitic.inp","#test_param","m",_("debug (ignore)"),"e",1.0,"QLineEdit",hidden=True))
#mesh?.inp
lib.append(my_data("","#remesh_enable","au",_("Automatic remesh"),"e",1.0,"gtkswitch"))
#lib.append(my_data("mesh_y.inp","#mesh_layer_points0","s","Mesh points y0","e",1.0,"QLineEdit"))
#lib.append(my_data("mesh_y.inp","#mesh_layer_points1","s","Mesh points y1","e",1.0,"QLineEdit"))
#pl?.inp
lib.append(my_data("","#pl_enabled",_("True/False"),_("Turn on luminescence"),"e",1.0,"gtkswitch"))
lib.append(my_data("","#pl_fe_fh","0.0-1.0",_("n_{free} to p_{free} photon generation efficiency"),"e",1.0,"QLineEdit", hide_on_true_token="#pl_use_experimental_emission_spectra"))
lib.append(my_data("","#pl_fe_te","0.0-1.0",_("n_{free} to n_{trap} photon generation efficiency"),"e",1.0,"QLineEdit", hide_on_true_token="#pl_use_experimental_emission_spectra"))
lib.append(my_data("","#pl_te_fh","0.0-1.0",_("n_{trap} to p_{free} photon generation efficiency"),"e",1.0,"QLineEdit", hide_on_true_token="#pl_use_experimental_emission_spectra"))
lib.append(my_data("","#pl_th_fe","0.0-1.0",_("p_{trap} to n_{free} photon generation efficiency"),"e",1.0,"QLineEdit", hide_on_true_token="#pl_use_experimental_emission_spectra"))
lib.append(my_data("","#pl_fh_th","0.0-1.0",_("p_{free} to p_{trap} photon generation efficiency"),"e",1.0,"QLineEdit", hide_on_true_token="#pl_use_experimental_emission_spectra"))
lib.append(my_data("","#pl_input_spectrum",_("Edit"),_("Experimental emission spectra"),"e",1.0,"gpvdm_select_emission" ,units_widget="QPushButton", hide_on_false_token=["#pl_use_experimental_emission_spectra"]))
lib.append(my_data("","#pl_experimental_emission_efficiency","0.0-1.0",_("Experimental emission efficiency"),"e",1.0,"QLineEdit", hide_on_false_token="#pl_use_experimental_emission_spectra"))
lib.append(my_data("","#pl_emission_enabled",_("True/False"),_("Emission enabled from this layer"),"e",1.0,"gtkswitch"))
#pl_experimental_emission_efficiency
lib.append(my_data("","#pl_use_experimental_emission_spectra",_("True/False"),_("Use experimental emission spectra"),"e",1.0,"gtkswitch"))
#fxdomain?.inp
lib.append(my_data("","#fxdomain_Rload","Ohms",_("Load resistor"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#fxdomain_points","au",_("fx domain mesh points"),"e",1.0,"QLineEdit",hide_on_token_eq=[["#fxdomain_large_signal","small_signal"]]))
lib.append(my_data("","#fxdomain_n","au",_("Cycles to simulate"),"e",1.0,"QLineEdit",hide_on_token_eq=[["#fxdomain_large_signal","small_signal"]]))
lib.append(my_data("","#fxdomain_voltage_modulation_max","V",_("Voltage modulation depth"),"e",1.0,"QLineEdit",hide_on_token_eq=[["#fx_modulation_type","optical"],["#fxdomain_large_signal","small_signal"]]))
lib.append(my_data("","#fx_modulation_type","au",_("Excite with"),"e",1.0,"QComboBoxLang",defaults=[[("voltage"),_("Voltage")],[("optical"),_("Light")]]))
lib.append(my_data("","#fxdomain_measure","au",_("Measure"),"e",1.0,"QComboBoxLang",defaults=[[("measure_voltage"),_("Voltage")],[("measure_current"),_("Current")]]))
lib.append(my_data("","#fxdomain_light_modulation_depth","au",_("Light modulation depth"),"e",1.0,"QLineEdit",hide_on_token_eq=[["#fx_modulation_type","voltage"]]))
lib.append(my_data("","#fxdomain_do_fit","au",_("Run fit after simulation"),"e",1.0,"gtkswitch",hide_on_token_eq=[["#fxdomain_large_signal","small_signal"],["#fxdomain_large_signal","fourier"]]))
lib.append(my_data("","#periods_to_fit","au",_("Periods to fit"),"e",1.0,"QLineEdit",hide_on_token_eq=[["#fxdomain_large_signal","small_signal"],["#fxdomain_large_signal","fourier"]]))
lib.append(my_data("","#fxdomain_r","",_("Re(i)"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#fxdomain_i","V",_("Im(i)"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#fxdomain_Jr","Am^{-2}",_("Re(J)"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#fxdomain_Ji","Am^{-2}",_("Im(J)"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#fxdomain_fx","Hz",_("fx"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#fxdomain_delta_i","s",_("di"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#fxdomain_delta_g","s",_("dmodulation"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#fxdomain_delta_phase","rads",_("dphase"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#fxdomain_large_signal","au",_("Simulation type"),"e",1.0,"QComboBoxLang",defaults=[[("large_signal"),_("Large signal")],[("fourier"),_("Fourier")]])) #,[("small_signal"),_("Small signal")]
#is?.inp
lib.append(my_data("","#is_Vexternal","Volts",_("V_{external}"),"e",1.0,"QLineEdit"))
#node_list.inp
lib.append(my_data("","#node_list","au",_("Node list"),"e",1.0,"QChangeLog"))
#crypto.inp
lib.append(my_data("","#iv","au",_("Initialization vector"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#key","au",_("Cryptographic key"),"e",1.0,"QLineEdit"))
#lumo?.inp
lib.append(my_data("","#function_\d+","au","Function","e",1.0,"QLineEdit"))
lib.append(my_data("","#function_enable_\d+","au","Enabled","e",1.0,"QLineEdit"))
lib.append(my_data("","#function_a_\d+","au","a","e",1.0,"QLineEdit"))
lib.append(my_data("","#function_b_\d+","au","b","e",1.0,"QLineEdit"))
lib.append(my_data("","#function_c_\d+","au","c","e",1.0,"QLineEdit"))
lib.append(my_data("","#Psun","Sun",_("Intensity of the sun"),"e",1.0,"QLineEdit",hidden=True))
lib.append(my_data("","#saturation_n0","#saturation_n0",_("#saturation_n0"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#saturation_rate","#saturation_rate",_("#saturation_rate"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_saturate","#imps_saturate",_("#imps_saturate"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#simplephotondensity","m^{-2}s^{-1}",_("Photon density"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#simple_alpha","m^{-1}",_("Absorption of material"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#simmode","au",_("#simmode"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#meshpoints","au",_("Mesh points (x)"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#function","au",_("#function"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#Vexternal","V",_("start voltage"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#Vmax","V",_("Max voltage"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#Eg","eV",_("Eg"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#Xi","eV",_("Xi"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#start_stop_time","s",_("Time of pause"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stopstart","1/0",_("Pause between iterations"),"e",1.0,"QComboBox",defaults=["1","0"]))
lib.append(my_data("","#invert_current",_("True/False"),_("Invert output"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#use_capacitor","1/0",_("Use capacitor"),"e",1.0,"QComboBox",defaults=["1","0"]))
#
lib.append(my_data("","#Rshort_imps","Ohms",_("R_{short}"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_sun","1=1 Sun",_("Backgroud light bias"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_modulation_max","1=1 Sun",_("Modulation depth"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_modulation_fx","Hz",_("Modulation frequency"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#high_sun_scale","au",_("High light multiplyer"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_r","Amps",_("Re(i)"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_i","Amps",_("Im(i)"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_Jr","Amps $m^{-2}$",_("Re(J)"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_Ji","Amps $m^{-2}$",_("Im(J)"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_fx","Hz",_("Frequency"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_delta_i","s",_("Phase shift"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_delta_g","s",_("Phase shift"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_delta_phase","s",_("Phase shift"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_points","s",_("points"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_n","s",_("Wavelengths to simulate"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#imps_Vexternal","Volts",_("External voltage"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#Cext","C",_("External C"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#Rext","Ohms",_("External R"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#Rscope","Ohms",_("Resistance of scope"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#srh_bands","bands",_("Number of traps"),"s",1.0,"QLineEdit"))
#suns_voc
lib.append(my_data("","#sun_voc_single_point","True/False",_("Single point"),"e",1.0,"gtkswitch"))
lib.append(my_data("","#sun_voc_Psun_start","Suns",_("Start intensity"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#sun_voc_Psun_stop","Suns",_("Stop intensity"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#sun_voc_Psun_mul","au",_("step multiplier"),"e",1.0,"QLineEdit"))
#suns_jsc
lib.append(my_data("suns_voc.inp","#sunstart","Suns",_("Start intensity"), "e",1.0,"QLineEdit"))
lib.append(my_data("suns_voc.inp","#sunstop","Suns",_("Stop intensity"), "e",1.0,"QLineEdit"))
lib.append(my_data("suns_voc.inp","#sundp","au",_("Step"), "e",1.0,"QLineEdit"))
lib.append(my_data("suns_voc.inp","#sundpmul","au",_("step multiplier"), "e",1.0,"QLineEdit"))
lib.append(my_data("","#simplephotondensity","m^{-2}s^{-1}",_("Photon Flux"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#simple_alpha","m^{-1}",_("Absorption"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#xlen","m",_("device width"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#zlen","m",_("device breadth"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#ver","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#dostype","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#me","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#mh","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#gendos","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#notused","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#notused","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#Tstart","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#Tstop","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#Tpoints","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#nstart","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#nstop","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#npoints","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#nstart","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#nstop","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#npoints","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#srhbands","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#srh_start","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#srhvth_e","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#srhvth_h","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#srh_cut","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#lumodelstart","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#lumodelstop","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#homodelstart","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#homodelstop","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#gaus_mull","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#Esteps","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#Rshort","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#Dphoton","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#interfaceleft","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#interfaceright","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#phibleft","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#phibright","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#vl_e","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#vl_h","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#vr_e","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#vr_h","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#light_model","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#NDfilter","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#plottime","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#startstop","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#plotfile","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#Rshort","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#solve_at_Vbi","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#remesh","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#newmeshsize","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#epitaxy","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#alignmesh","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_start_time","","","e",1.0,"QLineEdit"))
lib.append(my_data("","#voc_J_to_Jr","au","Ratio of conduction current to recombination current","e",1.0,"QLineEdit"))
lib.append(my_data("","#voc_i","au",_("Current"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#kl_in_newton","1/0",_("Solve Kirchhoff's current law in Newton solver"),"e",1.0,"QComboBox",defaults=["1","0"]))
lib.append(my_data("","#simplexmul","au","simplex mull","e",1.0,"QLineEdit"))
lib.append(my_data("","#simplex_reset","au","Reset steps","e",1.0,"QLineEdit"))
lib.append(my_data("","#max_nfree_to_ptrap","m^{-3}s^{-1}","nfree_to_ptrap","e",1.0,"QLineEdit"))
lib.append(my_data("","#max_pfree_to_ntrap","m^{-3}s^{-1}","max_pfree_to_ntrap","e",1.0,"QLineEdit"))
lib.append(my_data("","#max_nrelax","m^{-3}s^{-1}","max_nrelax","e",1.0,"QLineEdit"))
lib.append(my_data("","#max_prelax","m^{-3}s^{-1}","max_prelax","e",1.0,"QLineEdit"))
lib.append(my_data("","#max_nfree","m^{-3}","max_nfree","e",1.0,"QLineEdit"))
lib.append(my_data("","#max_pfree","m^{-3}","max_pfree","e",1.0,"QLineEdit"))
lib.append(my_data("","#max_ntrap","m^{-3}","max_ntrap","e",1.0,"QLineEdit"))
lib.append(my_data("","#max_ptrap","m^{-3}","max_ptrap","e",1.0,"QLineEdit"))
lib.append(my_data("","#alpha_max_reduction","m^{-1}","alpha_max_reduction","e",1.0,"QLineEdit"))
lib.append(my_data("","#alpha_max_increase","m^{-1}","alpha_max_increase","e",1.0,"QLineEdit"))
lib.append(my_data("","#srh_n_r1","m^{-3}s^{-1}","srh electron rate 1","e",1.0,"QLineEdit"))
lib.append(my_data("","#srh_n_r2","m^{-3}s^{-1}","srh electron rate 2","e",1.0,"QLineEdit"))
lib.append(my_data("","#srh_n_r3","m^{-3}s^{-1}","srh electron rate 3","e",1.0,"QLineEdit"))
lib.append(my_data("","#srh_n_r4","m^{-3}s^{-1}","srh electron rate 4","e",1.0,"QLineEdit"))
lib.append(my_data("","#srh_p_r1","m^{-3}s^{-1}","srh hole rate 1","e",1.0,"QLineEdit"))
lib.append(my_data("","#srh_p_r2","m^{-3}s^{-1}","srh hole rate 2","e",1.0,"QLineEdit"))
lib.append(my_data("","#srh_p_r3","m^{-3}s^{-1}","srh hole rate 3","e",1.0,"QLineEdit"))
lib.append(my_data("","#srh_p_r4","m^{-3}s^{-1}","srh hole rate 4","e",1.0,"QLineEdit"))
lib.append(my_data("","#band_bend_max","percent","band bend max","e",1.0,"QLineEdit"))
#config.inp
lib.append(my_data("","#gui_config_3d_enabled",_("True/False"),_("Enable 3d effects"),"e",1.0,"gtkswitch"))
lib.append(my_data("","#gui_use_icon_theme",_("True/False"),_("Use icons from OS"),"e",1.0,"gtkswitch"))
lib.append(my_data("","#matlab_interpreter",_("au"),_("Matlab interpreter"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#gl_render_grid",_("True/False"),_("Render grid"),"e",1.0,"gtkswitch"))
lib.append(my_data("","#gl_bg_color",_("rgb"),_("Color used for 3d background"),"e",1.0,"QColorPicker"))
lib.append(my_data("","#gl_render_text",_("au"),_("Render text in 3d"),"e",1.0,"gtkswitch"))
lib.append(my_data("","#gl_device_height",_("au"),_("Device Heigh (display only)"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#gl_dy_layer_offset",_("au"),_("Layer offset (display only)"),"e",1.0,"QLineEdit"))
#fit
lib.append(my_data("","#time_shift","s","time shift","e",1.0,"QLineEdit"))
lib.append(my_data("","#start","s","start","e",1.0,"QLineEdit"))
lib.append(my_data("","#stop","s","stop","e",1.0,"QLineEdit"))
lib.append(my_data("","#log_x",_("True/False"),_("log x"),"e",1.0,"gtkswitch"))
lib.append(my_data("","#log_y",_("True/False"),_("log y"),"e",1.0,"gtkswitch"))
lib.append(my_data("","#sim_data",_("filename"),"Fit file name","e",1.0,"QLineEdit"))
lib.append(my_data("","#fit_invert_simulation_y",_("True/False"),_("Invert simulated data (y)"),"e",1.0,"gtkswitch"))
#epitaxy.inp
lib.append(my_data("epitaxy.inp","#layer_width0","nm","start","e",1e9,"QLineEdit"))
lib.append(my_data("epitaxy.inp","#layer_width1","nm","start","e",1e9,"QLineEdit"))
lib.append(my_data("epitaxy.inp","#layer_width2","nm","start","e",1e9,"QLineEdit"))
#
lib.append(my_data("","#layer0","m",_("Active layer width"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_saturate","au","Stark saturate","e",1.0,"QLineEdit"))
lib.append(my_data("","#n_mul","au","n mul","e",1.0,"QLineEdit"))
lib.append(my_data("","#alpha_mul","m^{-1}","Alpha mul","e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_point0","au","DR/R","e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_point1","au","DR/R","e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_point2","au","DR/R","e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_point3","au","DR/R","e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_point4","au","DR/R","e",1.0,"QLineEdit"))
lib.append(my_data("","#stark_subtracted_value","s","subtracted value","e",1.0,"QLineEdit"))
lib.append(my_data("","#light_energy","eV","Energy","e",1.0,"QLineEdit"))
lib.append(my_data("","#sim_id","au","sim id","e",1.0,"QLineEdit"))
lib.append(my_data("","#Rload","Ohms",_("External load resistor"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#pulse_shift","s","Shift of TPC signal","e",1.0,"QLineEdit"))
lib.append(my_data("","#light_illuminate_from","au",_("Illuminate from"),"e",1.0,"QComboBoxLang",defaults=[[("top"),_("Top")],[("bottom"),_("Bottom")]]))
#time_mesh_config*.inp
lib.append(my_data("","#fs_laser_time","s","Laser start time","e",1.0,"QLineEdit"))
#fdtd.inp
lib.append(my_data("fdtd.inp","#use_gpu","au",_("OpenCL GPU acceleration"),"e",1.0,"gtkswitch"))
lib.append(my_data("fdtd.inp","#fdtd_lambda_start","m",_("Start wavelength"),"e",1.0,"QLineEdit"))
lib.append(my_data("fdtd.inp","#fdtd_lambda_stop","m",_("Stop wavelength"),"e",1.0,"QLineEdit"))
lib.append(my_data("fdtd.inp","#fdtd_lambda_points","m",_("Wavelength steps"),"e",1.0,"QLineEdit"))
#any files
lib.append(my_data("","#dump_verbosity","au",_("Ouput verbosity to disk"),"e",1.0,"QComboBoxLang",defaults=[["0",_("Key results")],[("1"),_("Write everything to disk")],[("2"),_("Write everything to disk every 2nd step")],[("5"),_("Write everything to disk every 5th step")],[("10"),_("Write everything to disk every 10th step")]]))
lib.append(my_data("","#dump_screen_verbosity", "au", _("Ouput verbosity to screen"),"e",1.0,"QComboBoxLang",defaults=[[("dump_verbosity_everything"),_("Show lots")],["dump_verbosity_key_results",_("Show key results")]]))
#circuit diagram
lib.append(my_data("","#resistance","Ohms",_("Resistor"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#capacitance","F",_("Capacitor"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#inductance","H",_("Inductance"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#J0","Apms m^{-2}",_("J0"),"e",1.0,"QLineEdit"))
lib.append(my_data("","#nid","(a.u.)",_("Ideality factor"),"e",1.0,"QLineEdit"))
class tokens:
def __init__(self):
global lib
if len(lib)==0:
build_token_lib()
def find(self,token):
global lib
search_token=token.strip()
if search_token.startswith("#"):
search_token=search_token[1:]
for i in range(0, len(lib)):
if bool(re.match(lib[i].token[1:]+"$",search_token))==True:
if lib[i].units=="" and lib[i].info=="":
return False
else:
return lib[i]
#sys.stdout.write("Add -> lib.append(my_data(\""+token+"\",\"\",\"\",[\"text\"]))\n")
return False
def dump_lib(self):
global lib
for i in range(0, len(lib)):
print(">",lib[i].token,"<>",lib[i].info,"<")
def get_lib(self):
global lib
return lib
|
gpl-2.0
| -7,389,480,505,478,320,000 | 71.82439 | 357 | 0.624824 | false | 2.516477 | false | false | false |
znes/renpass_gis
|
renpass/components/electrical.py
|
1
|
4361
|
# -*- coding: utf-8 -*-
""" This module is designed to contain classes that act as simplified / reduced
energy specific interfaces (facades) for solph components to simplify its
application and work with the oemof datapackage - reader functionality
SPDX-License-Identifier: GPL-3.0-or-later
"""
import logging
from pyomo.core.base.block import SimpleBlock
from pyomo.environ import Var, Constraint, Set, BuildAction
from oemof.network import Node, Edge, Transformer
from oemof.solph import Flow, Bus
from oemof.solph.plumbing import sequence
from renpass.facades import Facade
class ElectricalBus(Bus):
"""
Parameters
-----------
slack: boolean
True if object is slack bus of network
v_max: numeric
Maximum value of voltage angle at electrical bus
v_min: numeric
Mininum value of voltag angle at electrical bus
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.slack = kwargs.get('slack', False)
self.v_max = kwargs.get('v_max', 1000)
self.v_min = kwargs.get('v_min', -1000)
class Line(Facade, Flow):
"""
Paramters
---------
from_bus: ElectricalBus object
Bus where the input of the Line object is connected to
to_bus: ElectricalBus object
Bus where the output of the Line object is connected to
reactance: numeric
Reactance of Line object
capacity: numeric
Capacity of the Line object
capacity_cost: numeric
Cost of capacity for 1 Unit of capacity
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.from_bus = kwargs.get('from_bus')
self.to_bus = kwargs.get('to_bus')
self.reactance = sequence(kwargs.get('reactance', 0.00001))
self.capacity = kwargs.get('capacity')
self.capacity_cost = kwargs.get('capacity_cost')
# oemof related attribute setting of 'Flow-object'
self.input = self.from_bus
self.output = self.to_bus
self.bidirectional = True
self.nominal_value = self.capacity
self.min = sequence(-1)
self.investment = self._investment()
def constraint_group(self):
return ElectricalLineConstraints
class ElectricalLineConstraints(SimpleBlock):
"""
"""
CONSTRAINT_GROUP = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def _create(self, group=None):
"""
"""
if group is None:
return None
m = self.parent_block()
# create voltage angle variables
self.ELECTRICAL_BUSES = Set(initialize=[n for n in m.es.nodes
if isinstance(n, ElectricalBus)])
def _voltage_angle_bounds(block, b, t):
return b.v_min, b.v_max
self.voltage_angle = Var(self.ELECTRICAL_BUSES, m.TIMESTEPS,
bounds=_voltage_angle_bounds)
if True not in [b.slack for b in self.ELECTRICAL_BUSES]:
# TODO: Make this robust to select the same slack bus for
# the same problems
bus = [b for b in self.ELECTRICAL_BUSES][0]
logging.info(
"No slack bus set,setting bus {0} as slack bus".format(
bus.label))
bus.slack = True
def _voltage_angle_relation(block):
for t in m.TIMESTEPS:
for n in group:
if n.input.slack is True:
self.voltage_angle[n.output, t].value = 0
self.voltage_angle[n.output, t].fix()
try:
lhs = m.flow[n.input, n.output, t]
rhs = 1 / n.reactance[t] * (
self.voltage_angle[n.input, t] -
self.voltage_angle[n.output, t])
except:
raise ValueError("Error in constraint creation",
"of node {}".format(n.label))
block.electrical_flow.add((n, t), (lhs == rhs))
self.electrical_flow = Constraint(group, m.TIMESTEPS, noruleinit=True)
self.electrical_flow_build = BuildAction(
rule=_voltage_angle_relation)
|
gpl-3.0
| -7,568,780,707,150,623,000 | 30.15 | 79 | 0.569136 | false | 3.99359 | false | false | false |
tylerclair/py3canvas
|
py3canvas/apis/accounts.py
|
1
|
26185
|
"""Accounts API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
from .base import BaseModel
class AccountsAPI(BaseCanvasAPI):
"""Accounts API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for AccountsAPI."""
super(AccountsAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.AccountsAPI")
def list_accounts(self, include=None):
"""
List accounts.
List accounts that the current user can view or manage. Typically,
students and even teachers will get an empty list in response, only
account admins can view the accounts that they are in.
"""
path = {}
data = {}
params = {}
# OPTIONAL - include
"""Array of additional information to include.
"lti_guid":: the 'tool_consumer_instance_guid' that will be sent for this account on LTI launches
"registration_settings":: returns info about the privacy policy and terms of use
"services":: returns services and whether they are enabled (requires account management permissions)"""
if include is not None:
self._validate_enum(include, ["lti_guid", "registration_settings", "services"])
params["include"] = include
self.logger.debug("GET /api/v1/accounts with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/accounts".format(**path), data=data, params=params, all_pages=True)
def list_accounts_for_course_admins(self):
"""
List accounts for course admins.
List accounts that the current user can view through their admin course enrollments.
(Teacher, TA, or designer enrollments).
Only returns "id", "name", "workflow_state", "root_account_id" and "parent_account_id"
"""
path = {}
data = {}
params = {}
self.logger.debug("GET /api/v1/course_accounts with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/course_accounts".format(**path), data=data, params=params, all_pages=True)
def get_single_account(self, id):
"""
Get a single account.
Retrieve information on an individual account, given by id or sis
sis_account_id.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
self.logger.debug("GET /api/v1/accounts/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/accounts/{id}".format(**path), data=data, params=params, single_item=True)
def get_sub_accounts_of_account(self, account_id, recursive=None):
"""
Get the sub-accounts of an account.
List accounts that are sub-accounts of the given account.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# OPTIONAL - recursive
"""If true, the entire account tree underneath
this account will be returned (though still paginated). If false, only
direct sub-accounts of this account will be returned. Defaults to false."""
if recursive is not None:
params["recursive"] = recursive
self.logger.debug("GET /api/v1/accounts/{account_id}/sub_accounts with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/accounts/{account_id}/sub_accounts".format(**path), data=data, params=params, all_pages=True)
def list_active_courses_in_account(self, account_id, blueprint=None, blueprint_associated=None, by_subaccounts=None, by_teachers=None, completed=None, enrollment_term_id=None, enrollment_type=None, hide_enrollmentless_courses=None, include=None, order=None, published=None, search_by=None, search_term=None, sort=None, state=None, with_enrollments=None):
"""
List active courses in an account.
Retrieve the list of courses in this account.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# OPTIONAL - with_enrollments
"""If true, include only courses with at least one enrollment. If false,
include only courses with no enrollments. If not present, do not filter
on course enrollment status."""
if with_enrollments is not None:
params["with_enrollments"] = with_enrollments
# OPTIONAL - enrollment_type
"""If set, only return courses that have at least one user enrolled in
in the course with one of the specified enrollment types."""
if enrollment_type is not None:
self._validate_enum(enrollment_type, ["teacher", "student", "ta", "observer", "designer"])
params["enrollment_type"] = enrollment_type
# OPTIONAL - published
"""If true, include only published courses. If false, exclude published
courses. If not present, do not filter on published status."""
if published is not None:
params["published"] = published
# OPTIONAL - completed
"""If true, include only completed courses (these may be in state
'completed', or their enrollment term may have ended). If false, exclude
completed courses. If not present, do not filter on completed status."""
if completed is not None:
params["completed"] = completed
# OPTIONAL - blueprint
"""If true, include only blueprint courses. If false, exclude them.
If not present, do not filter on this basis."""
if blueprint is not None:
params["blueprint"] = blueprint
# OPTIONAL - blueprint_associated
"""If true, include only courses that inherit content from a blueprint course.
If false, exclude them. If not present, do not filter on this basis."""
if blueprint_associated is not None:
params["blueprint_associated"] = blueprint_associated
# OPTIONAL - by_teachers
"""List of User IDs of teachers; if supplied, include only courses taught by
one of the referenced users."""
if by_teachers is not None:
params["by_teachers"] = by_teachers
# OPTIONAL - by_subaccounts
"""List of Account IDs; if supplied, include only courses associated with one
of the referenced subaccounts."""
if by_subaccounts is not None:
params["by_subaccounts"] = by_subaccounts
# OPTIONAL - hide_enrollmentless_courses
"""If present, only return courses that have at least one enrollment.
Equivalent to 'with_enrollments=true'; retained for compatibility."""
if hide_enrollmentless_courses is not None:
params["hide_enrollmentless_courses"] = hide_enrollmentless_courses
# OPTIONAL - state
"""If set, only return courses that are in the given state(s). By default,
all states but "deleted" are returned."""
if state is not None:
self._validate_enum(state, ["created", "claimed", "available", "completed", "deleted", "all"])
params["state"] = state
# OPTIONAL - enrollment_term_id
"""If set, only includes courses from the specified term."""
if enrollment_term_id is not None:
params["enrollment_term_id"] = enrollment_term_id
# OPTIONAL - search_term
"""The partial course name, code, or full ID to match and return in the results list. Must be at least 3 characters."""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - include
"""- All explanations can be seen in the {api:CoursesController#index Course API index documentation}
- "sections", "needs_grading_count" and "total_scores" are not valid options at the account level"""
if include is not None:
self._validate_enum(include, ["syllabus_body", "term", "course_progress", "storage_quota_used_mb", "total_students", "teachers"])
params["include"] = include
# OPTIONAL - sort
"""The column to sort results by."""
if sort is not None:
self._validate_enum(sort, ["course_name", "sis_course_id", "teacher", "subaccount", "enrollments"])
params["sort"] = sort
# OPTIONAL - order
"""The order to sort the given column by."""
if order is not None:
self._validate_enum(order, ["asc", "desc"])
params["order"] = order
# OPTIONAL - search_by
"""The filter to search by. "course" searches for course names, course codes,
and SIS IDs. "teacher" searches for teacher names"""
if search_by is not None:
self._validate_enum(search_by, ["course", "teacher"])
params["search_by"] = search_by
self.logger.debug("GET /api/v1/accounts/{account_id}/courses with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/accounts/{account_id}/courses".format(**path), data=data, params=params, all_pages=True)
def update_account(self, id, account_default_group_storage_quota_mb=None, account_default_storage_quota_mb=None, account_default_time_zone=None, account_default_user_storage_quota_mb=None, account_name=None, account_services=None, account_settings_lock_all_announcements_locked=None, account_settings_lock_all_announcements_value=None, account_settings_restrict_student_future_listing_locked=None, account_settings_restrict_student_future_listing_value=None, account_settings_restrict_student_future_view_locked=None, account_settings_restrict_student_future_view_value=None, account_settings_restrict_student_past_view_locked=None, account_settings_restrict_student_past_view_value=None, account_sis_account_id=None):
"""
Update an account.
Update an existing account.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - account[name]
"""Updates the account name"""
if account_name is not None:
data["account[name]"] = account_name
# OPTIONAL - account[sis_account_id]
"""Updates the account sis_account_id
Must have manage_sis permission and must not be a root_account."""
if account_sis_account_id is not None:
data["account[sis_account_id]"] = account_sis_account_id
# OPTIONAL - account[default_time_zone]
"""The default time zone of the account. Allowed time zones are
{http://www.iana.org/time-zones IANA time zones} or friendlier
{http://api.rubyonrails.org/classes/ActiveSupport/TimeZone.html Ruby on Rails time zones}."""
if account_default_time_zone is not None:
data["account[default_time_zone]"] = account_default_time_zone
# OPTIONAL - account[default_storage_quota_mb]
"""The default course storage quota to be used, if not otherwise specified."""
if account_default_storage_quota_mb is not None:
data["account[default_storage_quota_mb]"] = account_default_storage_quota_mb
# OPTIONAL - account[default_user_storage_quota_mb]
"""The default user storage quota to be used, if not otherwise specified."""
if account_default_user_storage_quota_mb is not None:
data["account[default_user_storage_quota_mb]"] = account_default_user_storage_quota_mb
# OPTIONAL - account[default_group_storage_quota_mb]
"""The default group storage quota to be used, if not otherwise specified."""
if account_default_group_storage_quota_mb is not None:
data["account[default_group_storage_quota_mb]"] = account_default_group_storage_quota_mb
# OPTIONAL - account[settings][restrict_student_past_view][value]
"""Restrict students from viewing courses after end date"""
if account_settings_restrict_student_past_view_value is not None:
data["account[settings][restrict_student_past_view][value]"] = account_settings_restrict_student_past_view_value
# OPTIONAL - account[settings][restrict_student_past_view][locked]
"""Lock this setting for sub-accounts and courses"""
if account_settings_restrict_student_past_view_locked is not None:
data["account[settings][restrict_student_past_view][locked]"] = account_settings_restrict_student_past_view_locked
# OPTIONAL - account[settings][restrict_student_future_view][value]
"""Restrict students from viewing courses before start date"""
if account_settings_restrict_student_future_view_value is not None:
data["account[settings][restrict_student_future_view][value]"] = account_settings_restrict_student_future_view_value
# OPTIONAL - account[settings][restrict_student_future_view][locked]
"""Lock this setting for sub-accounts and courses"""
if account_settings_restrict_student_future_view_locked is not None:
data["account[settings][restrict_student_future_view][locked]"] = account_settings_restrict_student_future_view_locked
# OPTIONAL - account[settings][lock_all_announcements][value]
"""Disable comments on announcements"""
if account_settings_lock_all_announcements_value is not None:
data["account[settings][lock_all_announcements][value]"] = account_settings_lock_all_announcements_value
# OPTIONAL - account[settings][lock_all_announcements][locked]
"""Lock this setting for sub-accounts and courses"""
if account_settings_lock_all_announcements_locked is not None:
data["account[settings][lock_all_announcements][locked]"] = account_settings_lock_all_announcements_locked
# OPTIONAL - account[settings][restrict_student_future_listing][value]
"""Restrict students from viewing future enrollments in course list"""
if account_settings_restrict_student_future_listing_value is not None:
data["account[settings][restrict_student_future_listing][value]"] = account_settings_restrict_student_future_listing_value
# OPTIONAL - account[settings][restrict_student_future_listing][locked]
"""Lock this setting for sub-accounts and courses"""
if account_settings_restrict_student_future_listing_locked is not None:
data["account[settings][restrict_student_future_listing][locked]"] = account_settings_restrict_student_future_listing_locked
# OPTIONAL - account[services]
"""Give this a set of keys and boolean values to enable or disable services matching the keys"""
if account_services is not None:
data["account[services]"] = account_services
self.logger.debug("PUT /api/v1/accounts/{id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("PUT", "/api/v1/accounts/{id}".format(**path), data=data, params=params, single_item=True)
def delete_user_from_root_account(self, user_id, account_id):
"""
Delete a user from the root account.
Delete a user record from a Canvas root account. If a user is associated
with multiple root accounts (in a multi-tenant instance of Canvas), this
action will NOT remove them from the other accounts.
WARNING: This API will allow a user to remove themselves from the account.
If they do this, they won't be able to make API calls or log into Canvas at
that account.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# REQUIRED - PATH - user_id
"""ID"""
path["user_id"] = user_id
self.logger.debug("DELETE /api/v1/accounts/{account_id}/users/{user_id} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("DELETE", "/api/v1/accounts/{account_id}/users/{user_id}".format(**path), data=data, params=params, single_item=True)
def create_new_sub_account(self, account_id, account_name, account_default_group_storage_quota_mb=None, account_default_storage_quota_mb=None, account_default_user_storage_quota_mb=None, account_sis_account_id=None):
"""
Create a new sub-account.
Add a new sub-account to a given account.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# REQUIRED - account[name]
"""The name of the new sub-account."""
data["account[name]"] = account_name
# OPTIONAL - account[sis_account_id]
"""The account's identifier in the Student Information System."""
if account_sis_account_id is not None:
data["account[sis_account_id]"] = account_sis_account_id
# OPTIONAL - account[default_storage_quota_mb]
"""The default course storage quota to be used, if not otherwise specified."""
if account_default_storage_quota_mb is not None:
data["account[default_storage_quota_mb]"] = account_default_storage_quota_mb
# OPTIONAL - account[default_user_storage_quota_mb]
"""The default user storage quota to be used, if not otherwise specified."""
if account_default_user_storage_quota_mb is not None:
data["account[default_user_storage_quota_mb]"] = account_default_user_storage_quota_mb
# OPTIONAL - account[default_group_storage_quota_mb]
"""The default group storage quota to be used, if not otherwise specified."""
if account_default_group_storage_quota_mb is not None:
data["account[default_group_storage_quota_mb]"] = account_default_group_storage_quota_mb
self.logger.debug("POST /api/v1/accounts/{account_id}/sub_accounts with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("POST", "/api/v1/accounts/{account_id}/sub_accounts".format(**path), data=data, params=params, single_item=True)
class Account(BaseModel):
"""Account Model."""
def __init__(self, integration_id=None, default_time_zone=None, uuid=None, default_storage_quota_mb=None, sis_account_id=None, root_account_id=None, default_group_storage_quota_mb=None, id=None, sis_import_id=None, lti_guid=None, workflow_state=None, default_user_storage_quota_mb=None, parent_account_id=None, name=None):
"""Init method for Account class."""
self._integration_id = integration_id
self._default_time_zone = default_time_zone
self._uuid = uuid
self._default_storage_quota_mb = default_storage_quota_mb
self._sis_account_id = sis_account_id
self._root_account_id = root_account_id
self._default_group_storage_quota_mb = default_group_storage_quota_mb
self._id = id
self._sis_import_id = sis_import_id
self._lti_guid = lti_guid
self._workflow_state = workflow_state
self._default_user_storage_quota_mb = default_user_storage_quota_mb
self._parent_account_id = parent_account_id
self._name = name
self.logger = logging.getLogger('py3canvas.Account')
@property
def integration_id(self):
"""The account's identifier in the Student Information System. Only included if the user has permission to view SIS information."""
return self._integration_id
@integration_id.setter
def integration_id(self, value):
"""Setter for integration_id property."""
self.logger.warn("Setting values on integration_id will NOT update the remote Canvas instance.")
self._integration_id = value
@property
def default_time_zone(self):
"""The default time zone of the account. Allowed time zones are {http://www.iana.org/time-zones IANA time zones} or friendlier {http://api.rubyonrails.org/classes/ActiveSupport/TimeZone.html Ruby on Rails time zones}."""
return self._default_time_zone
@default_time_zone.setter
def default_time_zone(self, value):
"""Setter for default_time_zone property."""
self.logger.warn("Setting values on default_time_zone will NOT update the remote Canvas instance.")
self._default_time_zone = value
@property
def uuid(self):
"""The UUID of the account."""
return self._uuid
@uuid.setter
def uuid(self, value):
"""Setter for uuid property."""
self.logger.warn("Setting values on uuid will NOT update the remote Canvas instance.")
self._uuid = value
@property
def default_storage_quota_mb(self):
"""The storage quota for the account in megabytes, if not otherwise specified."""
return self._default_storage_quota_mb
@default_storage_quota_mb.setter
def default_storage_quota_mb(self, value):
"""Setter for default_storage_quota_mb property."""
self.logger.warn("Setting values on default_storage_quota_mb will NOT update the remote Canvas instance.")
self._default_storage_quota_mb = value
@property
def sis_account_id(self):
"""The account's identifier in the Student Information System. Only included if the user has permission to view SIS information."""
return self._sis_account_id
@sis_account_id.setter
def sis_account_id(self, value):
"""Setter for sis_account_id property."""
self.logger.warn("Setting values on sis_account_id will NOT update the remote Canvas instance.")
self._sis_account_id = value
@property
def root_account_id(self):
"""The ID of the root account, or null if this is the root account."""
return self._root_account_id
@root_account_id.setter
def root_account_id(self, value):
"""Setter for root_account_id property."""
self.logger.warn("Setting values on root_account_id will NOT update the remote Canvas instance.")
self._root_account_id = value
@property
def default_group_storage_quota_mb(self):
"""The storage quota for a group in the account in megabytes, if not otherwise specified."""
return self._default_group_storage_quota_mb
@default_group_storage_quota_mb.setter
def default_group_storage_quota_mb(self, value):
"""Setter for default_group_storage_quota_mb property."""
self.logger.warn("Setting values on default_group_storage_quota_mb will NOT update the remote Canvas instance.")
self._default_group_storage_quota_mb = value
@property
def id(self):
"""the ID of the Account object."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn("Setting values on id will NOT update the remote Canvas instance.")
self._id = value
@property
def sis_import_id(self):
"""The id of the SIS import if created through SIS. Only included if the user has permission to manage SIS information."""
return self._sis_import_id
@sis_import_id.setter
def sis_import_id(self, value):
"""Setter for sis_import_id property."""
self.logger.warn("Setting values on sis_import_id will NOT update the remote Canvas instance.")
self._sis_import_id = value
@property
def lti_guid(self):
"""The account's identifier that is sent as context_id in LTI launches."""
return self._lti_guid
@lti_guid.setter
def lti_guid(self, value):
"""Setter for lti_guid property."""
self.logger.warn("Setting values on lti_guid will NOT update the remote Canvas instance.")
self._lti_guid = value
@property
def workflow_state(self):
"""The state of the account. Can be 'active' or 'deleted'."""
return self._workflow_state
@workflow_state.setter
def workflow_state(self, value):
"""Setter for workflow_state property."""
self.logger.warn("Setting values on workflow_state will NOT update the remote Canvas instance.")
self._workflow_state = value
@property
def default_user_storage_quota_mb(self):
"""The storage quota for a user in the account in megabytes, if not otherwise specified."""
return self._default_user_storage_quota_mb
@default_user_storage_quota_mb.setter
def default_user_storage_quota_mb(self, value):
"""Setter for default_user_storage_quota_mb property."""
self.logger.warn("Setting values on default_user_storage_quota_mb will NOT update the remote Canvas instance.")
self._default_user_storage_quota_mb = value
@property
def parent_account_id(self):
"""The account's parent ID, or null if this is the root account."""
return self._parent_account_id
@parent_account_id.setter
def parent_account_id(self, value):
"""Setter for parent_account_id property."""
self.logger.warn("Setting values on parent_account_id will NOT update the remote Canvas instance.")
self._parent_account_id = value
@property
def name(self):
"""The display name of the account."""
return self._name
@name.setter
def name(self, value):
"""Setter for name property."""
self.logger.warn("Setting values on name will NOT update the remote Canvas instance.")
self._name = value
|
mit
| -5,909,541,274,754,134,000 | 46.010772 | 722 | 0.652015 | false | 4.110675 | false | false | false |
sbhal/be-fruitful
|
pythonProject/qlearning_tf.py
|
1
|
5122
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 8 19:14:33 2017
@author: sbhal
"""
import numpy as np
import pandas as pd
import random
import tensorflow as tf
class qlearningTF:
def __init__(self, m_criteria, initialWeights=None):
if initialWeights == None:
self.weights = np.full(m_criteria, 3) #assign dtype
else:
self.weights = initialWeights
self.weightBins = 3 #.3 .7. .5
self.e = 0.5
self.lr = .8
self.y = .95
self.m_criteria = m_criteria
self.actionStatesCount = 3 #+-0
# initialize Q table
self.currState = "33"
self.Qrows = pow(self.weightBins,self.m_criteria)
self.Qcols = self.m_criteria* self.actionStatesCount
# These lines establish the feed-forward part of the network used to choose actions
self.inputs1 = tf.placeholder(shape=[1, self.Qrows], dtype=tf.float32)
#self.W = tf.Variable(tf.random_uniform([self.Qrows, self.Qcols], 0, 0.01))
self.W = tf.Variable(tf.random_uniform([self.Qrows, self.Qcols], 0, 0.00))
self.Qout = tf.matmul(self.inputs1, self.W)
self.predict = tf.argmax(self.Qout, 1)
# Below we obtain the loss by taking the sum of squares difference between the target and prediction Q values.
self.nextQ = tf.placeholder(shape=[1, self.Qcols], dtype=tf.float32)
loss = tf.reduce_sum(tf.square(self.nextQ - self.Qout))
trainer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
self.updateModel = trainer.minimize(loss)
self.sess = tf.Session()
self.sess.run(tf.initialize_all_variables())
def learn(self, s, a, reward, s1): #curState ----action----> finalState (+reward)
allQ = self.sess.run(self.Qout, feed_dict={self.inputs1: np.identity(self.Qrows)[s:s + 1]})
value2 = np.max(self.sess.run(self.Qout,feed_dict={self.inputs1:np.identity(self.Qrows)[s1:s1+1]}))
allQ[0, a] = reward + self.y * value2
_, W1 = self.sess.run([self.updateModel, self.W], feed_dict={self.inputs1: np.identity(self.Qrows)[s:s + 1], self.nextQ: allQ})
# print(self.sess.run(self.W), " weight updated @ state", self.currState)
self.currState = self.state_num_to_string(s1)
def currToFinalState (self, a, c):
c_num = list(map(int, c))
if a[2] == "+":
c_num[int(a[1])] = min(7, c_num[int(a[1])]+2)
else:
c_num[int(a[1])] = max(3, c_num[int(a[1])] - 2)
return "".join(map(str,c_num))
def update(self, action, latency):
reward = 0 if latency==0 else 1/latency
finalState = self.currToFinalState(action, self.currState)
s = self.state_string_to_num(self.currState)
s1 = self.state_string_to_num(finalState)
a = self.action_string_to_num(action)
self.learn (s, a, reward, s1)
def choose_action(self, currState):
#verify if currState has correct format
s = self.state_string_to_num(currState)
if np.random.rand(1) < self.e:
# print("Random action Chosen")
return self.action_num_to_string(random.randrange(0, self.Qcols))
else:
a = np.argmax(self.sess.run(self.Qout,feed_dict={self.inputs1:np.identity(self.Qrows)[s:s+1]}))
return self.action_num_to_string(a)
def state_string_to_num(self, s):
dict = {'3': 0,
'5': 1,
'7': 2}
sum =0
for i, c in enumerate(reversed(s)):
sum += pow(self.weightBins,i) * dict[c]
return sum
def state_num_to_string(self, num):
dict = {'0':'3',
'1':'5',
'2':'7'}
mynum = num
strr = ""
string = ""
for i in reversed(range(0,self.m_criteria)):
strr += str(mynum // pow(self.weightBins, i))
mynum = mynum % pow(self.weightBins, i)
for i,c in enumerate(strr):
string += dict[strr[i]]
return string
def action_num_to_string(self, num):
dict = {0: "+",
1: "-",
2: "0"}
quotient = num // self.weightBins
remainder = num % self.weightBins
return "w"+ str(quotient) + dict[remainder]
def action_string_to_num(self, s):
dict = { "+": 0,
"-": 1,
"0": 2}
return (int(s[1]) * self.weightBins) + dict[s[2]]
if __name__ == "__main__":
myIns = qlearningTF(m_criteria=2)
print (myIns.state_string_to_num("33"))
print(myIns.state_string_to_num("53"))
print(myIns.state_string_to_num("77"))
print(myIns.action_num_to_string(0))
print(myIns.action_num_to_string(4))
print(myIns.state_num_to_string(0))
print(myIns.state_num_to_string(3))
print(myIns.state_num_to_string(8))
print("From here:")
action = myIns.choose_action("33")
print("Action given is", action)
myIns.update(action, 300)
print("new")
action = myIns.choose_action("77")
myIns.update(action, 300)
print(myIns.choose_action("33"))
|
mit
| 606,855,611,802,600,200 | 36.115942 | 135 | 0.577509 | false | 3.134639 | false | false | false |
sarahdunbar/Multiplication-table
|
multiplication-table.py
|
1
|
1123
|
"""
multiplication-table.py
Author: Sarah Dunbar
Credit: http://stackoverflow.com/questions/12102749/how-can-i-suppress-the-newline-after-a-print-statement,
https://docs.python.org/3.3/library/functions.html#print, Mr. Dennison
Assignment:
Write and submit a Python program that prints a multiplication table. The user
must be able to determine the width and height of the table before it is printed.
The final multiplication table should look like this:
Width of multiplication table: 10
Height of multiplication table: 8
1 2 3 4 5 6 7 8 9 10
2 4 6 8 10 12 14 16 18 20
3 6 9 12 15 18 21 24 27 30
4 8 12 16 20 24 28 32 36 40
5 10 15 20 25 30 35 40 45 50
6 12 18 24 30 36 42 48 54 60
7 14 21 28 35 42 49 56 63 70
8 16 24 32 40 48 56 64 72 80
"""
i = input ("Width of multiplication table: ")
i = int(i)
j = input ("Height of multiplication table: ")
j = int(j)
r = 1
while r <= j:
t = 1
while t <= i:
print("{0:>3}".format(r*t), " ", end="")
t = t + 1
print(" ", end="\n")
r = r + 1
|
mit
| 5,617,071,994,563,114,000 | 28.552632 | 107 | 0.610864 | false | 2.793532 | false | false | false |
kyleabeauchamp/EnsemblePaper
|
code/model_building/evaluate_BW_entropy.py
|
1
|
1791
|
import pandas as pd
import numpy as np
from fitensemble import bayesian_weighting, belt
import experiment_loader
import ALA3
prior = "BW"
ff = "amber96"
stride = 1000
regularization_strength = 10.0
thin = 400
factor = 50
steps = 1000000
predictions_framewise, measurements, uncertainties = experiment_loader.load(ff, stride=stride)
phi, psi, ass_raw0, state_ind0 = experiment_loader.load_rama(ff, stride)
num_states = len(phi)
assignments = np.arange(num_states)
prior_pops = np.ones(num_states)
predictions = pd.DataFrame(bayesian_weighting.framewise_to_statewise(predictions_framewise, assignments), columns=predictions_framewise.columns)
model = bayesian_weighting.MaxentBayesianWeighting(predictions.values, measurements.values, uncertainties.values, assignments, regularization_strength)
model.sample(steps * factor, thin=thin * factor)
model2 = belt.MaxEntBELT(predictions.values, measurements.values, uncertainties.values, regularization_strength)
model2.sample(steps, thin=thin)
pi = model.mcmc.trace("matrix_populations")[:, 0]
num_samples = len(pi)
data = np.zeros((num_samples, num_samples))
for i, p in enumerate(model.iterate_populations()):
print(i)
for j, p2 in enumerate(model2.iterate_populations()):
data[i, j] = p.dot(np.log(p / p2))
p_bw = model.accumulate_populations()
p_BELT = model2.accumulate_populations()
chi2 = []
prior = []
H_terms = []
for j, p2 in enumerate(model2.iterate_populations()):
mu = predictions.T.dot(p2)
chi2.append(0.5 * (((mu - measurements) / uncertainties) ** 2).sum())
prior.append(regularization_strength * -1.0 * p2.dot(np.log(p2)))
H = -np.diag(p2[:-1] ** -1.) - p[-1] ** -1.
H_terms.append(0.5 * np.linalg.slogdet(H)[1])
R = pd.DataFrame({"chi2":chi2, "prior":prior, "H":H_terms})
|
gpl-3.0
| 1,257,935,225,862,620,400 | 31.563636 | 151 | 0.719151 | false | 2.975083 | false | false | false |
simonacca/TelegramLogHandler
|
TelegramLogHandler/handler.py
|
1
|
1167
|
import logging
class TelegramHandler(logging.Handler):
"""
A handler class which sends a Telegram message for each logging event.
"""
def __init__(self, token, ids):
"""
Initialize the handler.
Initialize the instance with the bot's token and a list of chat_id(s)
of the conversations that should be notified by the handler.
"""
logging.Handler.__init__(self)
self.token = token
self.ids = ids
def emit(self, record):
"""
Emit a record.
Format the record and send it to the specified chats.
"""
try:
import requests
requests_handler = logging.getLogger("requests")
url = 'https://api.telegram.org/bot{}/sendMessage'.format(self.token)
requests_handler.propagate = False
for chat_id in self.ids:
payload = {
'chat_id':chat_id,
'text': self.format(record)
}
requests.post(url, data=payload)
requests_handler.propagate = True
except:
self.handleError(record)
|
mit
| -6,455,856,852,596,988,000 | 28.175 | 81 | 0.547558 | false | 4.594488 | false | false | false |
mradamcox/arc2arches
|
scripts/shapefile_local.py
|
1
|
45114
|
"""
shapefile.py
Provides read and write support for ESRI Shapefiles.
author: jlawhead<at>geospatialpython.com
date: 20140507
version: 1.2.1
Compatible with Python versions 2.4-3.x
version changelog: Fixed u() to just return the byte sequence on exception
"""
__version__ = "1.2.1"
from struct import pack, unpack, calcsize, error
import os
import sys
import time
import array
import tempfile
print "local pyshp"
#
# Constants for shape types
NULL = 0
POINT = 1
POLYLINE = 3
POLYGON = 5
MULTIPOINT = 8
POINTZ = 11
POLYLINEZ = 13
POLYGONZ = 15
MULTIPOINTZ = 18
POINTM = 21
POLYLINEM = 23
POLYGONM = 25
MULTIPOINTM = 28
MULTIPATCH = 31
PYTHON3 = sys.version_info[0] == 3
if PYTHON3:
xrange = range
def b(v):
if PYTHON3:
if isinstance(v, str):
# For python 3 encode str to bytes.
return v.encode('utf-8')
elif isinstance(v, bytes):
# Already bytes.
return v
else:
# Error.
raise Exception('Unknown input type')
else:
# For python 2 assume str passed in and return str.
return v
def u(v):
if PYTHON3:
# try/catch added 2014/05/07
# returned error on dbf of shapefile
# from www.naturalearthdata.com named
# "ne_110m_admin_0_countries".
# Just returning v as is seemed to fix
# the problem. This function could
# be condensed further.
try:
if isinstance(v, bytes):
# For python 3 decode bytes to str.
return v.decode('utf-8')
elif isinstance(v, str):
# Already str.
return v
else:
# Error.
raise Exception('Unknown input type')
except: return v
else:
# For python 2 assume str passed in and return str.
return v
def is_string(v):
if PYTHON3:
return isinstance(v, str)
else:
return isinstance(v, basestring)
class _Array(array.array):
"""Converts python tuples to lits of the appropritate type.
Used to unpack different shapefile header parts."""
def __repr__(self):
return str(self.tolist())
def signed_area(coords):
"""Return the signed area enclosed by a ring using the linear time
algorithm at http://www.cgafaq.info/wiki/Polygon_Area. A value >= 0
indicates a counter-clockwise oriented ring.
"""
xs, ys = map(list, zip(*coords))
xs.append(xs[1])
ys.append(ys[1])
return sum(xs[i]*(ys[i+1]-ys[i-1]) for i in range(1, len(coords)))/2.0
class _Shape:
def __init__(self, shapeType=None):
"""Stores the geometry of the different shape types
specified in the Shapefile spec. Shape types are
usually point, polyline, or polygons. Every shape type
except the "Null" type contains points at some level for
example verticies in a polygon. If a shape type has
multiple shapes containing points within a single
geometry record then those shapes are called parts. Parts
are designated by their starting index in geometry record's
list of shapes."""
self.shapeType = shapeType
self.points = []
@property
def __geo_interface__(self):
if self.shapeType in [POINT, POINTM, POINTZ]:
return {
'type': 'Point',
'coordinates': tuple(self.points[0])
}
elif self.shapeType in [MULTIPOINT, MULTIPOINTM, MULTIPOINTZ]:
return {
'type': 'MultiPoint',
'coordinates': tuple([tuple(p) for p in self.points])
}
elif self.shapeType in [POLYLINE, POLYLINEM, POLYLINEZ]:
if len(self.parts) == 1:
return {
'type': 'LineString',
'coordinates': tuple([tuple(p) for p in self.points])
}
else:
ps = None
coordinates = []
for part in self.parts:
if ps == None:
ps = part
continue
else:
coordinates.append(tuple([tuple(p) for p in self.points[ps:part]]))
ps = part
else:
coordinates.append(tuple([tuple(p) for p in self.points[part:]]))
return {
'type': 'MultiLineString',
'coordinates': tuple(coordinates)
}
elif self.shapeType in [POLYGON, POLYGONM, POLYGONZ]:
if len(self.parts) == 1:
return {
'type': 'Polygon',
'coordinates': (tuple([tuple(p) for p in self.points]),)
}
else:
ps = None
coordinates = []
for part in self.parts:
if ps == None:
ps = part
continue
else:
coordinates.append(tuple([tuple(p) for p in self.points[ps:part]]))
ps = part
else:
coordinates.append(tuple([tuple(p) for p in self.points[part:]]))
polys = []
poly = [coordinates[0]]
for coord in coordinates[1:]:
if signed_area(coord) < 0:
polys.append(poly)
poly = [coord]
else:
poly.append(coord)
polys.append(poly)
if len(polys) == 1:
return {
'type': 'Polygon',
'coordinates': tuple(polys[0])
}
elif len(polys) > 1:
return {
'type': 'MultiPolygon',
'coordinates': polys
}
class _ShapeRecord:
"""A shape object of any type."""
def __init__(self, shape=None, record=None):
self.shape = shape
self.record = record
class ShapefileException(Exception):
"""An exception to handle shapefile specific problems."""
pass
class Reader:
"""Reads the three files of a shapefile as a unit or
separately. If one of the three files (.shp, .shx,
.dbf) is missing no exception is thrown until you try
to call a method that depends on that particular file.
The .shx index file is used if available for efficiency
but is not required to read the geometry from the .shp
file. The "shapefile" argument in the constructor is the
name of the file you want to open.
You can instantiate a Reader without specifying a shapefile
and then specify one later with the load() method.
Only the shapefile headers are read upon loading. Content
within each file is only accessed when required and as
efficiently as possible. Shapefiles are usually not large
but they can be.
"""
def __init__(self, *args, **kwargs):
self.shp = None
self.shx = None
self.dbf = None
self.shapeName = "Not specified"
self._offsets = []
self.shpLength = None
self.numRecords = None
self.fields = []
self.__dbfHdrLength = 0
# See if a shapefile name was passed as an argument
if len(args) > 0:
if is_string(args[0]):
self.load(args[0])
return
if "shp" in kwargs.keys():
if hasattr(kwargs["shp"], "read"):
self.shp = kwargs["shp"]
if hasattr(self.shp, "seek"):
self.shp.seek(0)
if "shx" in kwargs.keys():
if hasattr(kwargs["shx"], "read"):
self.shx = kwargs["shx"]
if hasattr(self.shx, "seek"):
self.shx.seek(0)
if "dbf" in kwargs.keys():
if hasattr(kwargs["dbf"], "read"):
self.dbf = kwargs["dbf"]
if hasattr(self.dbf, "seek"):
self.dbf.seek(0)
if self.shp or self.dbf:
self.load()
else:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object.")
def load(self, shapefile=None):
"""Opens a shapefile from a filename or file-like
object. Normally this method would be called by the
constructor with the file object or file name as an
argument."""
if shapefile:
(shapeName, ext) = os.path.splitext(shapefile)
self.shapeName = shapeName
try:
self.shp = open("%s.shp" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.shp" % shapeName)
try:
self.shx = open("%s.shx" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.shx" % shapeName)
try:
self.dbf = open("%s.dbf" % shapeName, "rb")
except IOError:
raise ShapefileException("Unable to open %s.dbf" % shapeName)
if self.shp:
self.__shpHeader()
if self.dbf:
self.__dbfHeader()
def __getFileObj(self, f):
"""Checks to see if the requested shapefile file object is
available. If not a ShapefileException is raised."""
if not f:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object.")
if self.shp and self.shpLength is None:
self.load()
if self.dbf and len(self.fields) == 0:
self.load()
return f
def __restrictIndex(self, i):
"""Provides list-like handling of a record index with a clearer
error message if the index is out of bounds."""
if self.numRecords:
rmax = self.numRecords - 1
if abs(i) > rmax:
raise IndexError("Shape or Record index out of range.")
if i < 0: i = range(self.numRecords)[i]
return i
def __shpHeader(self):
"""Reads the header information from a .shp or .shx file."""
if not self.shp:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no shp file found")
shp = self.shp
# File length (16-bit word * 2 = bytes)
shp.seek(24)
self.shpLength = unpack(">i", shp.read(4))[0] * 2
# Shape type
shp.seek(32)
self.shapeType= unpack("<i", shp.read(4))[0]
# The shapefile's bounding box (lower left, upper right)
self.bbox = _Array('d', unpack("<4d", shp.read(32)))
# Elevation
self.elevation = _Array('d', unpack("<2d", shp.read(16)))
# Measure
self.measure = _Array('d', unpack("<2d", shp.read(16)))
def __shape(self):
"""Returns the header info and geometry for a single shape."""
f = self.__getFileObj(self.shp)
record = _Shape()
nParts = nPoints = zmin = zmax = mmin = mmax = None
(recNum, recLength) = unpack(">2i", f.read(8))
# Determine the start of the next record
next = f.tell() + (2 * recLength)
shapeType = unpack("<i", f.read(4))[0]
record.shapeType = shapeType
# For Null shapes create an empty points list for consistency
if shapeType == 0:
record.points = []
# All shape types capable of having a bounding box
elif shapeType in (3,5,8,13,15,18,23,25,28,31):
record.bbox = _Array('d', unpack("<4d", f.read(32)))
# Shape types with parts
if shapeType in (3,5,13,15,23,25,31):
nParts = unpack("<i", f.read(4))[0]
# Shape types with points
if shapeType in (3,5,8,13,15,23,25,31):
nPoints = unpack("<i", f.read(4))[0]
# Read parts
if nParts:
record.parts = _Array('i', unpack("<%si" % nParts, f.read(nParts * 4)))
# Read part types for Multipatch - 31
if shapeType == 31:
record.partTypes = _Array('i', unpack("<%si" % nParts, f.read(nParts * 4)))
# Read points - produces a list of [x,y] values
if nPoints:
record.points = [_Array('d', unpack("<2d", f.read(16))) for p in range(nPoints)]
# Read z extremes and values
if shapeType in (13,15,18,31):
(zmin, zmax) = unpack("<2d", f.read(16))
record.z = _Array('d', unpack("<%sd" % nPoints, f.read(nPoints * 8)))
# Read m extremes and values if header m values do not equal 0.0
if shapeType in (13,15,18,23,25,28,31) and not 0.0 in self.measure:
(mmin, mmax) = unpack("<2d", f.read(16))
# Measure values less than -10e38 are nodata values according to the spec
record.m = []
for m in _Array('d', unpack("<%sd" % nPoints, f.read(nPoints * 8))):
if m > -10e38:
record.m.append(m)
else:
record.m.append(None)
# Read a single point
if shapeType in (1,11,21):
record.points = [_Array('d', unpack("<2d", f.read(16)))]
# Read a single Z value
if shapeType == 11:
record.z = unpack("<d", f.read(8))
# Read a single M value
if shapeType in (11,21):
record.m = unpack("<d", f.read(8))
# Seek to the end of this record as defined by the record header because
# the shapefile spec doesn't require the actual content to meet the header
# definition. Probably allowed for lazy feature deletion.
f.seek(next)
return record
def __shapeIndex(self, i=None):
"""Returns the offset in a .shp file for a shape based on information
in the .shx index file."""
shx = self.shx
if not shx:
return None
if not self._offsets:
# File length (16-bit word * 2 = bytes) - header length
shx.seek(24)
shxRecordLength = (unpack(">i", shx.read(4))[0] * 2) - 100
numRecords = shxRecordLength // 8
# Jump to the first record.
shx.seek(100)
for r in range(numRecords):
# Offsets are 16-bit words just like the file length
self._offsets.append(unpack(">i", shx.read(4))[0] * 2)
shx.seek(shx.tell() + 4)
if not i == None:
return self._offsets[i]
def shape(self, i=0):
"""Returns a shape object for a shape in the the geometry
record file."""
shp = self.__getFileObj(self.shp)
i = self.__restrictIndex(i)
offset = self.__shapeIndex(i)
if not offset:
# Shx index not available so iterate the full list.
for j,k in enumerate(self.iterShapes()):
if j == i:
return k
shp.seek(offset)
return self.__shape()
def shapes(self):
"""Returns all shapes in a shapefile."""
shp = self.__getFileObj(self.shp)
# Found shapefiles which report incorrect
# shp file length in the header. Can't trust
# that so we seek to the end of the file
# and figure it out.
shp.seek(0,2)
self.shpLength = shp.tell()
shp.seek(100)
shapes = []
while shp.tell() < self.shpLength:
shapes.append(self.__shape())
return shapes
def iterShapes(self):
"""Serves up shapes in a shapefile as an iterator. Useful
for handling large shapefiles."""
shp = self.__getFileObj(self.shp)
shp.seek(0,2)
self.shpLength = shp.tell()
shp.seek(100)
while shp.tell() < self.shpLength:
yield self.__shape()
def __dbfHeaderLength(self):
"""Retrieves the header length of a dbf file header."""
if not self.__dbfHdrLength:
if not self.dbf:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no dbf file found)")
dbf = self.dbf
(self.numRecords, self.__dbfHdrLength) = \
unpack("<xxxxLH22x", dbf.read(32))
return self.__dbfHdrLength
def __dbfHeader(self):
"""Reads a dbf header. Xbase-related code borrows heavily from ActiveState Python Cookbook Recipe 362715 by Raymond Hettinger"""
if not self.dbf:
raise ShapefileException("Shapefile Reader requires a shapefile or file-like object. (no dbf file found)")
dbf = self.dbf
headerLength = self.__dbfHeaderLength()
numFields = (headerLength - 33) // 32
for field in range(numFields):
fieldDesc = list(unpack("<11sc4xBB14x", dbf.read(32)))
name = 0
idx = 0
if b("\x00") in fieldDesc[name]:
idx = fieldDesc[name].index(b("\x00"))
else:
idx = len(fieldDesc[name]) - 1
fieldDesc[name] = fieldDesc[name][:idx]
fieldDesc[name] = u(fieldDesc[name])
fieldDesc[name] = fieldDesc[name].lstrip()
fieldDesc[1] = u(fieldDesc[1])
self.fields.append(fieldDesc)
terminator = dbf.read(1)
assert terminator == b("\r")
self.fields.insert(0, ('DeletionFlag', 'C', 1, 0))
def __recordFmt(self):
"""Calculates the size of a .shp geometry record."""
if not self.numRecords:
self.__dbfHeader()
fmt = ''.join(['%ds' % fieldinfo[2] for fieldinfo in self.fields])
fmtSize = calcsize(fmt)
return (fmt, fmtSize)
def __record(self):
"""Reads and returns a dbf record row as a list of values."""
f = self.__getFileObj(self.dbf)
recFmt = self.__recordFmt()
recordContents = unpack(recFmt[0], f.read(recFmt[1]))
if recordContents[0] != b(' '):
# deleted record
return None
record = []
for (name, typ, size, deci), value in zip(self.fields,
recordContents):
if name == 'DeletionFlag':
continue
elif not value.strip():
record.append(value)
continue
elif typ == "N":
value = value.replace(b('\0'), b('')).strip()
if value == b(''):
value = 0
elif deci:
value = float(value)
else:
value = int(value)
elif typ == b('D'):
try:
y, m, d = int(value[:4]), int(value[4:6]), int(value[6:8])
value = [y, m, d]
except:
value = value.strip()
elif typ == b('L'):
value = (value in b('YyTt') and b('T')) or \
(value in b('NnFf') and b('F')) or b('?')
else:
value = u(value)
value = value.strip()
record.append(value)
return record
def record(self, i=0):
"""Returns a specific dbf record based on the supplied index."""
f = self.__getFileObj(self.dbf)
if not self.numRecords:
self.__dbfHeader()
i = self.__restrictIndex(i)
recSize = self.__recordFmt()[1]
f.seek(0)
f.seek(self.__dbfHeaderLength() + (i * recSize))
return self.__record()
def records(self):
"""Returns all records in a dbf file."""
if not self.numRecords:
self.__dbfHeader()
records = []
f = self.__getFileObj(self.dbf)
f.seek(self.__dbfHeaderLength())
for i in range(self.numRecords):
r = self.__record()
if r:
records.append(r)
return records
def iterRecords(self):
"""Serves up records in a dbf file as an iterator.
Useful for large shapefiles or dbf files."""
if not self.numRecords:
self.__dbfHeader()
f = self.__getFileObj(self.dbf)
f.seek(self.__dbfHeaderLength())
for i in xrange(self.numRecords):
r = self.__record()
if r:
yield r
def shapeRecord(self, i=0):
"""Returns a combination geometry and attribute record for the
supplied record index."""
i = self.__restrictIndex(i)
return _ShapeRecord(shape=self.shape(i), record=self.record(i))
def shapeRecords(self):
"""Returns a list of combination geometry/attribute records for
all records in a shapefile."""
shapeRecords = []
return [_ShapeRecord(shape=rec[0], record=rec[1]) \
for rec in zip(self.shapes(), self.records())]
class Writer:
"""Provides write support for ESRI Shapefiles."""
def __init__(self, shapeType=None):
self._shapes = []
self.fields = []
self.records = []
self.shapeType = shapeType
self.shp = None
self.shx = None
self.dbf = None
# Geometry record offsets and lengths for writing shx file.
self._offsets = []
self._lengths = []
# Use deletion flags in dbf? Default is false (0).
self.deletionFlag = 0
def __getFileObj(self, f):
"""Safety handler to verify file-like objects"""
if not f:
raise ShapefileException("No file-like object available.")
elif hasattr(f, "write"):
return f
else:
pth = os.path.split(f)[0]
if pth and not os.path.exists(pth):
os.makedirs(pth)
return open(f, "wb")
def __shpFileLength(self):
"""Calculates the file length of the shp file."""
# Start with header length
size = 100
# Calculate size of all shapes
for s in self._shapes:
# Add in record header and shape type fields
size += 12
# nParts and nPoints do not apply to all shapes
#if self.shapeType not in (0,1):
# nParts = len(s.parts)
# nPoints = len(s.points)
if hasattr(s,'parts'):
nParts = len(s.parts)
if hasattr(s,'points'):
nPoints = len(s.points)
# All shape types capable of having a bounding box
if self.shapeType in (3,5,8,13,15,18,23,25,28,31):
size += 32
# Shape types with parts
if self.shapeType in (3,5,13,15,23,25,31):
# Parts count
size += 4
# Parts index array
size += nParts * 4
# Shape types with points
if self.shapeType in (3,5,8,13,15,23,25,31):
# Points count
size += 4
# Points array
size += 16 * nPoints
# Calc size of part types for Multipatch (31)
if self.shapeType == 31:
size += nParts * 4
# Calc z extremes and values
if self.shapeType in (13,15,18,31):
# z extremes
size += 16
# z array
size += 8 * nPoints
# Calc m extremes and values
if self.shapeType in (23,25,31):
# m extremes
size += 16
# m array
size += 8 * nPoints
# Calc a single point
if self.shapeType in (1,11,21):
size += 16
# Calc a single Z value
if self.shapeType == 11:
size += 8
# Calc a single M value
if self.shapeType in (11,21):
size += 8
# Calculate size as 16-bit words
size //= 2
return size
def __bbox(self, shapes, shapeTypes=[]):
x = []
y = []
for s in shapes:
shapeType = self.shapeType
if shapeTypes:
shapeType = shapeTypes[shapes.index(s)]
px, py = list(zip(*s.points))[:2]
x.extend(px)
y.extend(py)
return [min(x), min(y), max(x), max(y)]
def __zbox(self, shapes, shapeTypes=[]):
z = []
for s in shapes:
try:
for p in s.points:
z.append(p[2])
except IndexError:
pass
if not z: z.append(0)
return [min(z), max(z)]
def __mbox(self, shapes, shapeTypes=[]):
m = [0]
for s in shapes:
try:
for p in s.points:
m.append(p[3])
except IndexError:
pass
return [min(m), max(m)]
def bbox(self):
"""Returns the current bounding box for the shapefile which is
the lower-left and upper-right corners. It does not contain the
elevation or measure extremes."""
return self.__bbox(self._shapes)
def zbox(self):
"""Returns the current z extremes for the shapefile."""
return self.__zbox(self._shapes)
def mbox(self):
"""Returns the current m extremes for the shapefile."""
return self.__mbox(self._shapes)
def __shapefileHeader(self, fileObj, headerType='shp'):
"""Writes the specified header type to the specified file-like object.
Several of the shapefile formats are so similar that a single generic
method to read or write them is warranted."""
f = self.__getFileObj(fileObj)
f.seek(0)
# File code, Unused bytes
f.write(pack(">6i", 9994,0,0,0,0,0))
# File length (Bytes / 2 = 16-bit words)
if headerType == 'shp':
f.write(pack(">i", self.__shpFileLength()))
elif headerType == 'shx':
f.write(pack('>i', ((100 + (len(self._shapes) * 8)) // 2)))
# Version, Shape type
f.write(pack("<2i", 1000, self.shapeType))
# The shapefile's bounding box (lower left, upper right)
if self.shapeType != 0:
try:
f.write(pack("<4d", *self.bbox()))
except error:
raise ShapefileException("Failed to write shapefile bounding box. Floats required.")
else:
f.write(pack("<4d", 0,0,0,0))
# Elevation
z = self.zbox()
# Measure
m = self.mbox()
try:
f.write(pack("<4d", z[0], z[1], m[0], m[1]))
except error:
raise ShapefileException("Failed to write shapefile elevation and measure values. Floats required.")
def __dbfHeader(self):
"""Writes the dbf header and field descriptors."""
f = self.__getFileObj(self.dbf)
f.seek(0)
version = 3
year, month, day = time.localtime()[:3]
year -= 1900
# Remove deletion flag placeholder from fields
for field in self.fields:
if field[0].startswith("Deletion"):
self.fields.remove(field)
numRecs = len(self.records)
numFields = len(self.fields)
headerLength = numFields * 32 + 33
recordLength = sum([int(field[2]) for field in self.fields]) + 1
header = pack('<BBBBLHH20x', version, year, month, day, numRecs,
headerLength, recordLength)
f.write(header)
# Field descriptors
for field in self.fields:
name, fieldType, size, decimal = field
name = b(name)
name = name.replace(b(' '), b('_'))
name = name.ljust(11).replace(b(' '), b('\x00'))
fieldType = b(fieldType)
size = int(size)
fld = pack('<11sc4xBB14x', name, fieldType, size, decimal)
f.write(fld)
# Terminator
f.write(b('\r'))
def __shpRecords(self):
"""Write the shp records"""
f = self.__getFileObj(self.shp)
f.seek(100)
recNum = 1
for s in self._shapes:
self._offsets.append(f.tell())
# Record number, Content length place holder
f.write(pack(">2i", recNum, 0))
recNum += 1
start = f.tell()
# Shape Type
if self.shapeType != 31:
s.shapeType = self.shapeType
f.write(pack("<i", s.shapeType))
# All shape types capable of having a bounding box
if s.shapeType in (3,5,8,13,15,18,23,25,28,31):
try:
f.write(pack("<4d", *self.__bbox([s])))
except error:
raise ShapefileException("Falied to write bounding box for record %s. Expected floats." % recNum)
# Shape types with parts
if s.shapeType in (3,5,13,15,23,25,31):
# Number of parts
f.write(pack("<i", len(s.parts)))
# Shape types with multiple points per record
if s.shapeType in (3,5,8,13,15,23,25,31):
# Number of points
f.write(pack("<i", len(s.points)))
# Write part indexes
if s.shapeType in (3,5,13,15,23,25,31):
for p in s.parts:
f.write(pack("<i", p))
# Part types for Multipatch (31)
if s.shapeType == 31:
for pt in s.partTypes:
f.write(pack("<i", pt))
# Write points for multiple-point records
if s.shapeType in (3,5,8,13,15,23,25,31):
try:
[f.write(pack("<2d", *p[:2])) for p in s.points]
except error:
raise ShapefileException("Failed to write points for record %s. Expected floats." % recNum)
# Write z extremes and values
if s.shapeType in (13,15,18,31):
try:
f.write(pack("<2d", *self.__zbox([s])))
except error:
raise ShapefileException("Failed to write elevation extremes for record %s. Expected floats." % recNum)
try:
if hasattr(s,"z"):
f.write(pack("<%sd" % len(s.z), *s.z))
else:
[f.write(pack("<d", p[2])) for p in s.points]
except error:
raise ShapefileException("Failed to write elevation values for record %s. Expected floats." % recNum)
# Write m extremes and values
if s.shapeType in (13,15,18,23,25,28,31):
try:
if hasattr(s,"m"):
f.write(pack("<%sd" % len(s.m), *s.m))
else:
f.write(pack("<2d", *self.__mbox([s])))
except error:
raise ShapefileException("Failed to write measure extremes for record %s. Expected floats" % recNum)
try:
[f.write(pack("<d", p[3])) for p in s.points]
except error:
raise ShapefileException("Failed to write measure values for record %s. Expected floats" % recNum)
# Write a single point
if s.shapeType in (1,11,21):
try:
f.write(pack("<2d", s.points[0][0], s.points[0][1]))
except error:
raise ShapefileException("Failed to write point for record %s. Expected floats." % recNum)
# Write a single Z value
if s.shapeType == 11:
if hasattr(s, "z"):
try:
if not s.z:
s.z = (0,)
f.write(pack("<d", s.z[0]))
except error:
raise ShapefileException("Failed to write elevation value for record %s. Expected floats." % recNum)
else:
try:
if len(s.points[0])<3:
s.points[0].append(0)
f.write(pack("<d", s.points[0][2]))
except error:
raise ShapefileException("Failed to write elevation value for record %s. Expected floats." % recNum)
# Write a single M value
if s.shapeType in (11,21):
if hasattr(s, "m"):
try:
if not s.m:
s.m = (0,)
f.write(pack("<1d", s.m[0]))
except error:
raise ShapefileException("Failed to write measure value for record %s. Expected floats." % recNum)
else:
try:
if len(s.points[0])<4:
s.points[0].append(0)
f.write(pack("<1d", s.points[0][3]))
except error:
raise ShapefileException("Failed to write measure value for record %s. Expected floats." % recNum)
# Finalize record length as 16-bit words
finish = f.tell()
length = (finish - start) // 2
self._lengths.append(length)
# start - 4 bytes is the content length field
f.seek(start-4)
f.write(pack(">i", length))
f.seek(finish)
def __shxRecords(self):
"""Writes the shx records."""
f = self.__getFileObj(self.shx)
f.seek(100)
for i in range(len(self._shapes)):
f.write(pack(">i", self._offsets[i] // 2))
f.write(pack(">i", self._lengths[i]))
def __dbfRecords(self):
"""Writes the dbf records."""
f = self.__getFileObj(self.dbf)
for record in self.records:
if not self.fields[0][0].startswith("Deletion"):
f.write(b(' ')) # deletion flag
for (fieldName, fieldType, size, dec), value in zip(self.fields, record):
fieldType = fieldType.upper()
size = int(size)
if fieldType.upper() == "N":
value = str(value).rjust(size)
elif fieldType == 'L':
value = str(value)[0].upper()
else:
value = str(value)[:size].ljust(size)
assert len(value) == size
value = b(value)
f.write(value)
def null(self):
"""Creates a null shape."""
self._shapes.append(_Shape(NULL))
def point(self, x, y, z=0, m=0):
"""Creates a point shape."""
pointShape = _Shape(self.shapeType)
pointShape.points.append([x, y, z, m])
self._shapes.append(pointShape)
def line(self, parts=[], shapeType=POLYLINE):
"""Creates a line shape. This method is just a convienience method
which wraps 'poly()'.
"""
self.poly(parts, shapeType, [])
def poly(self, parts=[], shapeType=POLYGON, partTypes=[]):
"""Creates a shape that has multiple collections of points (parts)
including lines, polygons, and even multipoint shapes. If no shape type
is specified it defaults to 'polygon'. If no part types are specified
(which they normally won't be) then all parts default to the shape type.
"""
polyShape = _Shape(shapeType)
polyShape.parts = []
polyShape.points = []
# Make sure polygons are closed
if shapeType in (5,15,25,31):
for part in parts:
if part[0] != part[-1]:
part.append(part[0])
for part in parts:
polyShape.parts.append(len(polyShape.points))
for point in part:
# Ensure point is list
if not isinstance(point, list):
point = list(point)
# Make sure point has z and m values
while len(point) < 4:
point.append(0)
polyShape.points.append(point)
if polyShape.shapeType == 31:
if not partTypes:
for part in parts:
partTypes.append(polyShape.shapeType)
polyShape.partTypes = partTypes
self._shapes.append(polyShape)
def field(self, name, fieldType="C", size="50", decimal=0):
"""Adds a dbf field descriptor to the shapefile."""
self.fields.append((name, fieldType, size, decimal))
def record(self, *recordList, **recordDict):
"""Creates a dbf attribute record. You can submit either a sequence of
field values or keyword arguments of field names and values. Before
adding records you must add fields for the record values using the
fields() method. If the record values exceed the number of fields the
extra ones won't be added. In the case of using keyword arguments to specify
field/value pairs only fields matching the already registered fields
will be added."""
record = []
fieldCount = len(self.fields)
# Compensate for deletion flag
if self.fields[0][0].startswith("Deletion"): fieldCount -= 1
if recordList:
[record.append(recordList[i]) for i in range(fieldCount)]
elif recordDict:
for field in self.fields:
if field[0] in recordDict:
val = recordDict[field[0]]
if val is None:
record.append("")
else:
record.append(val)
if record:
self.records.append(record)
def shape(self, i):
return self._shapes[i]
def shapes(self):
"""Return the current list of shapes."""
return self._shapes
def saveShp(self, target):
"""Save an shp file."""
if not hasattr(target, "write"):
target = os.path.splitext(target)[0] + '.shp'
if not self.shapeType:
self.shapeType = self._shapes[0].shapeType
self.shp = self.__getFileObj(target)
self.__shapefileHeader(self.shp, headerType='shp')
self.__shpRecords()
def saveShx(self, target):
"""Save an shx file."""
if not hasattr(target, "write"):
target = os.path.splitext(target)[0] + '.shx'
if not self.shapeType:
self.shapeType = self._shapes[0].shapeType
self.shx = self.__getFileObj(target)
self.__shapefileHeader(self.shx, headerType='shx')
self.__shxRecords()
def saveDbf(self, target):
"""Save a dbf file."""
if not hasattr(target, "write"):
target = os.path.splitext(target)[0] + '.dbf'
self.dbf = self.__getFileObj(target)
self.__dbfHeader()
self.__dbfRecords()
def save(self, target=None, shp=None, shx=None, dbf=None):
"""Save the shapefile data to three files or
three file-like objects. SHP and DBF files can also
be written exclusively using saveShp, saveShx, and saveDbf respectively.
If target is specified but not shp,shx, or dbf then the target path and
file name are used. If no options or specified, a unique base file name
is generated to save the files and the base file name is returned as a
string.
"""
# Create a unique file name if one is not defined
if shp:
self.saveShp(shp)
if shx:
self.saveShx(shx)
if dbf:
self.saveDbf(dbf)
elif not shp and not shx and not dbf:
generated = False
if not target:
temp = tempfile.NamedTemporaryFile(prefix="shapefile_",dir=os.getcwd())
target = temp.name
generated = True
self.saveShp(target)
self.shp.close()
self.saveShx(target)
self.shx.close()
self.saveDbf(target)
self.dbf.close()
if generated:
return target
class Editor(Writer):
def __init__(self, shapefile=None, shapeType=POINT, autoBalance=1):
self.autoBalance = autoBalance
if not shapefile:
Writer.__init__(self, shapeType)
elif is_string(shapefile):
base = os.path.splitext(shapefile)[0]
if os.path.isfile("%s.shp" % base):
r = Reader(base)
Writer.__init__(self, r.shapeType)
self._shapes = r.shapes()
self.fields = r.fields
self.records = r.records()
def select(self, expr):
"""Select one or more shapes (to be implemented)"""
# TODO: Implement expressions to select shapes.
pass
def delete(self, shape=None, part=None, point=None):
"""Deletes the specified part of any shape by specifying a shape
number, part number, or point number."""
# shape, part, point
if shape and part and point:
del self._shapes[shape][part][point]
# shape, part
elif shape and part and not point:
del self._shapes[shape][part]
# shape
elif shape and not part and not point:
del self._shapes[shape]
# point
elif not shape and not part and point:
for s in self._shapes:
if s.shapeType == 1:
del self._shapes[point]
else:
for part in s.parts:
del s[part][point]
# part, point
elif not shape and part and point:
for s in self._shapes:
del s[part][point]
# part
elif not shape and part and not point:
for s in self._shapes:
del s[part]
def point(self, x=None, y=None, z=None, m=None, shape=None, part=None, point=None, addr=None):
"""Creates/updates a point shape. The arguments allows
you to update a specific point by shape, part, point of any
shape type."""
# shape, part, point
if shape and part and point:
try: self._shapes[shape]
except IndexError: self._shapes.append([])
try: self._shapes[shape][part]
except IndexError: self._shapes[shape].append([])
try: self._shapes[shape][part][point]
except IndexError: self._shapes[shape][part].append([])
p = self._shapes[shape][part][point]
if x: p[0] = x
if y: p[1] = y
if z: p[2] = z
if m: p[3] = m
self._shapes[shape][part][point] = p
# shape, part
elif shape and part and not point:
try: self._shapes[shape]
except IndexError: self._shapes.append([])
try: self._shapes[shape][part]
except IndexError: self._shapes[shape].append([])
points = self._shapes[shape][part]
for i in range(len(points)):
p = points[i]
if x: p[0] = x
if y: p[1] = y
if z: p[2] = z
if m: p[3] = m
self._shapes[shape][part][i] = p
# shape
elif shape and not part and not point:
try: self._shapes[shape]
except IndexError: self._shapes.append([])
# point
# part
if addr:
shape, part, point = addr
self._shapes[shape][part][point] = [x, y, z, m]
else:
Writer.point(self, x, y, z, m)
if self.autoBalance:
self.balance()
def validate(self):
"""An optional method to try and validate the shapefile
as much as possible before writing it (not implemented)."""
#TODO: Implement validation method
pass
def balance(self):
"""Adds a corresponding empty attribute or null geometry record depending
on which type of record was created to make sure all three files
are in synch."""
if len(self.records) > len(self._shapes):
self.null()
elif len(self.records) < len(self._shapes):
self.record()
def __fieldNorm(self, fieldName):
"""Normalizes a dbf field name to fit within the spec and the
expectations of certain ESRI software."""
if len(fieldName) > 11: fieldName = fieldName[:11]
fieldName = fieldName.upper()
fieldName.replace(' ', '_')
# Begin Testing
def test():
import doctest
doctest.NORMALIZE_WHITESPACE = 1
doctest.testfile("README.txt", verbose=1)
if __name__ == "__main__":
"""
Doctests are contained in the file 'README.txt'. This library was originally developed
using Python 2.3. Python 2.4 and above have some excellent improvements in the built-in
testing libraries but for now unit testing is done using what's available in
2.3.
"""
test()
|
mit
| -4,559,625,308,051,093,500 | 37.297114 | 136 | 0.519972 | false | 4.116616 | false | false | false |
yugangzhang/chxanalys
|
chxanalys/chx_compress.py
|
1
|
37856
|
import os,shutil
from glob import iglob
import matplotlib.pyplot as plt
from chxanalys.chx_libs import (np, roi, time, datetime, os, getpass, db,
get_images,LogNorm, RUN_GUI)
from chxanalys.chx_generic_functions import (create_time_slice,get_detector, get_fields, get_sid_filenames,
load_data)
import struct
from tqdm import tqdm
from contextlib import closing
from multiprocessing import Pool
import dill
import sys
import gc
import pickle as pkl
from eiger_io.pims_reader import EigerImages
def run_dill_encoded(what):
fun, args = dill.loads(what)
return fun(*args)
def apply_async(pool, fun, args, callback=None):
return pool.apply_async( run_dill_encoded, (dill.dumps((fun, args)),), callback= callback)
def map_async(pool, fun, args ):
return pool.map_async(run_dill_encoded, (dill.dumps((fun, args)),))
def pass_FD(FD,n):
#FD.rdframe(n)
FD.seekimg(n)
def go_through_FD(FD):
for i in range(FD.beg, FD.end):
pass_FD(FD,i)
def compress_eigerdata( images, mask, md, filename=None, force_compress=False,
bad_pixel_threshold=1e15, bad_pixel_low_threshold=0,
hot_pixel_threshold=2**30, nobytes=4,bins=1, bad_frame_list=None,
para_compress= False, num_sub=100, dtypes='uid',reverse =True,
num_max_para_process=500, with_pickle=False, direct_load_data=False, data_path=None):
end= len(images)//bins
if filename is None:
filename= '/XF11ID/analysis/Compressed_Data' +'/uid_%s.cmp'%md['uid']
if dtypes!= 'uid':
para_compress= False
else:
if para_compress:
images='foo'
#para_compress= True
#print( dtypes )
if force_compress:
print ("Create a new compress file with filename as :%s."%filename)
if para_compress:
print( 'Using a multiprocess to compress the data.')
return para_compress_eigerdata( images, mask, md, filename,
bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold,
bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes= nobytes, bins=bins,
num_sub=num_sub, dtypes=dtypes, reverse=reverse,
num_max_para_process=num_max_para_process, with_pickle= with_pickle,
direct_load_data= direct_load_data,data_path=data_path)
else:
return init_compress_eigerdata( images, mask, md, filename,
bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold,
bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes= nobytes, bins=bins,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path )
else:
if not os.path.exists( filename ):
print ("Create a new compress file with filename as :%s."%filename)
if para_compress:
print( 'Using a multiprocess to compress the data.')
return para_compress_eigerdata( images, mask, md, filename,
bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold,
bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes= nobytes, bins=bins,
num_sub=num_sub, dtypes=dtypes, reverse=reverse,
num_max_para_process=num_max_para_process,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path)
else:
return init_compress_eigerdata( images, mask, md, filename,
bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold,
bad_pixel_low_threshold=bad_pixel_low_threshold, nobytes= nobytes, bins=bins,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path )
else:
print ("Using already created compressed file with filename as :%s."%filename)
beg=0
return read_compressed_eigerdata( mask, filename, beg, end,
bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold,
bad_pixel_low_threshold=bad_pixel_low_threshold ,bad_frame_list=bad_frame_list,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path )
def read_compressed_eigerdata( mask, filename, beg, end,
bad_pixel_threshold=1e15, hot_pixel_threshold=2**30,
bad_pixel_low_threshold=0,bad_frame_list=None,with_pickle= False,
direct_load_data=False,data_path=None):
'''
Read already compress eiger data
Return
mask
avg_img
imsum
bad_frame_list
'''
#should use try and except instead of with_pickle in the future!
CAL = False
if not with_pickle:
CAL = True
else:
try:
mask, avg_img, imgsum, bad_frame_list_ = pkl.load( open(filename + '.pkl', 'rb' ) )
except:
CAL = True
if CAL:
FD = Multifile( filename, beg, end)
imgsum = np.zeros( FD.end- FD.beg, dtype= np.float )
avg_img = np.zeros( [FD.md['ncols'], FD.md['nrows'] ] , dtype= np.float )
imgsum, bad_frame_list_ = get_each_frame_intensityc( FD, sampling = 1,
bad_pixel_threshold=bad_pixel_threshold, bad_pixel_low_threshold=bad_pixel_low_threshold,
hot_pixel_threshold=hot_pixel_threshold, plot_ = False,
bad_frame_list=bad_frame_list)
avg_img = get_avg_imgc( FD, beg=None,end=None,sampling = 1, plot_ = False,bad_frame_list=bad_frame_list_ )
FD.FID.close()
return mask, avg_img, imgsum, bad_frame_list_
def para_compress_eigerdata( images, mask, md, filename, num_sub=100,
bad_pixel_threshold=1e15, hot_pixel_threshold=2**30,
bad_pixel_low_threshold=0, nobytes=4, bins=1, dtypes='uid',reverse =True,
num_max_para_process=500, cpu_core_number=72, with_pickle=True,
direct_load_data=False, data_path=None):
if dtypes=='uid':
uid= md['uid'] #images
if not direct_load_data:
detector = get_detector( db[uid ] )
images_ = load_data( uid, detector, reverse= reverse )
else:
images_ = EigerImages(data_path, md)
N= len(images_)
else:
N = len(images)
N = int( np.ceil( N/ bins ) )
Nf = int( np.ceil( N/ num_sub ) )
if Nf > cpu_core_number:
print("The process number is larger than %s (XF11ID server core number)"%cpu_core_number)
num_sub_old = num_sub
num_sub = int( np.ceil(N/cpu_core_number))
Nf = int( np.ceil( N/ num_sub ) )
print ("The sub compressed file number was changed from %s to %s"%( num_sub_old, num_sub ))
create_compress_header( md, filename +'-header', nobytes, bins )
#print( 'done for header here')
results = para_segment_compress_eigerdata( images=images, mask=mask, md=md,filename=filename,
num_sub=num_sub, bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold,
bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes=nobytes, bins=bins, dtypes=dtypes,
num_max_para_process=num_max_para_process,
direct_load_data=direct_load_data, data_path=data_path)
res_ = np.array( [ results[k].get() for k in list(sorted(results.keys())) ] )
imgsum = np.zeros( N )
bad_frame_list = np.zeros( N, dtype=bool )
good_count = 1
for i in range( Nf ):
mask_, avg_img_, imgsum_, bad_frame_list_ = res_[i]
imgsum[i*num_sub: (i+1)*num_sub] = imgsum_
bad_frame_list[i*num_sub: (i+1)*num_sub] = bad_frame_list_
if i==0:
mask = mask_
avg_img = np.zeros_like( avg_img_ )
else:
mask *= mask_
if not np.sum( np.isnan( avg_img_)):
avg_img += avg_img_
good_count += 1
bad_frame_list = np.where( bad_frame_list )[0]
avg_img /= good_count
if len(bad_frame_list):
print ('Bad frame list are: %s' %bad_frame_list)
else:
print ('No bad frames are involved.')
print( 'Combining the seperated compressed files together...')
combine_compressed( filename, Nf, del_old=True)
del results
del res_
if with_pickle:
pkl.dump( [mask, avg_img, imgsum, bad_frame_list], open(filename + '.pkl', 'wb' ) )
return mask, avg_img, imgsum, bad_frame_list
def combine_compressed( filename, Nf, del_old=True):
old_files = np.concatenate( np.array([ [filename +'-header'],
[filename + '_temp-%i.tmp'%i for i in range(Nf) ]]))
combine_binary_files(filename, old_files, del_old )
def combine_binary_files(filename, old_files, del_old = False):
'''Combine binary files together'''
fn_ = open(filename, 'wb')
for ftemp in old_files:
shutil.copyfileobj( open(ftemp, 'rb'), fn_)
if del_old:
os.remove( ftemp )
fn_.close()
def para_segment_compress_eigerdata( images, mask, md, filename, num_sub=100,
bad_pixel_threshold=1e15, hot_pixel_threshold=2**30,
bad_pixel_low_threshold=0, nobytes=4, bins=1, dtypes='images',reverse =True,
num_max_para_process=50,direct_load_data=False, data_path=None):
'''
parallelly compressed eiger data without header, this function is for parallel compress
'''
if dtypes=='uid':
uid= md['uid'] #images
if not direct_load_data:
detector = get_detector( db[uid ] )
images_ = load_data( uid, detector, reverse= reverse )
else:
images_ = EigerImages(data_path, md)
N= len(images_)
else:
N = len(images)
#N = int( np.ceil( N/ bins ) )
num_sub *= bins
if N%num_sub:
Nf = N// num_sub +1
print('The average image intensity would be slightly not correct, about 1% error.')
print( 'Please give a num_sub to make reminder of Num_images/num_sub =0 to get a correct avg_image')
else:
Nf = N//num_sub
print( 'It will create %i temporary files for parallel compression.'%Nf)
if Nf> num_max_para_process:
N_runs = np.int( np.ceil( Nf/float(num_max_para_process)))
print('The parallel run number: %s is larger than num_max_para_process: %s'%(Nf, num_max_para_process ))
else:
N_runs= 1
result = {}
#print( mask_filename )# + '*'* 10 + 'here' )
for nr in range( N_runs ):
if (nr+1)*num_max_para_process > Nf:
inputs= range( num_max_para_process*nr, Nf )
else:
inputs= range( num_max_para_process*nr, num_max_para_process*(nr + 1 ) )
fns = [ filename + '_temp-%i.tmp'%i for i in inputs]
#print( nr, inputs, )
pool = Pool(processes= len(inputs) ) #, maxtasksperchild=1000 )
#print( inputs )
for i in inputs:
if i*num_sub <= N:
result[i] = pool.apply_async( segment_compress_eigerdata, [
images, mask, md, filename + '_temp-%i.tmp'%i,bad_pixel_threshold, hot_pixel_threshold, bad_pixel_low_threshold, nobytes, bins, i*num_sub, (i+1)*num_sub, dtypes, reverse,direct_load_data, data_path ] )
pool.close()
pool.join()
pool.terminate()
return result
def segment_compress_eigerdata( images, mask, md, filename,
bad_pixel_threshold=1e15, hot_pixel_threshold=2**30,
bad_pixel_low_threshold=0, nobytes=4, bins=1,
N1=None, N2=None, dtypes='images',reverse =True,direct_load_data=False, data_path=None ):
'''
Create a compressed eiger data without header, this function is for parallel compress
for parallel compress don't pass any non-scalar parameters
'''
if dtypes=='uid':
uid= md['uid'] #images
if not direct_load_data:
detector = get_detector( db[uid ] )
images = load_data( uid, detector, reverse= reverse )[N1:N2]
else:
images = EigerImages(data_path, md)[N1:N2]
Nimg_ = len( images)
M,N = images[0].shape
avg_img = np.zeros( [M,N], dtype= np.float )
Nopix = float( avg_img.size )
n=0
good_count = 0
#frac = 0.0
if nobytes==2:
dtype= np.int16
elif nobytes==4:
dtype= np.int32
elif nobytes==8:
dtype=np.float64
else:
print ( "Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]")
dtype= np.int32
#Nimg = Nimg_//bins
Nimg = int( np.ceil( Nimg_ / bins ) )
time_edge = np.array(create_time_slice( N= Nimg_,
slice_num= Nimg, slice_width= bins ))
#print( time_edge, Nimg_, Nimg, bins, N1, N2 )
imgsum = np.zeros( Nimg )
if bins!=1:
#print('The frames will be binned by %s'%bins)
dtype=np.float64
fp = open( filename,'wb' )
for n in range(Nimg):
t1,t2 = time_edge[n]
if bins!=1:
img = np.array( np.average( images[t1:t2], axis=0 ) , dtype= dtype)
else:
img = np.array( images[t1], dtype=dtype)
mask &= img < hot_pixel_threshold
p = np.where( (np.ravel(img)>0) * np.ravel(mask) )[0] #don't use masked data
v = np.ravel( np.array( img, dtype= dtype )) [p]
dlen = len(p)
imgsum[n] = v.sum()
if (dlen==0) or (imgsum[n] > bad_pixel_threshold) or (imgsum[n] <=bad_pixel_low_threshold):
dlen = 0
fp.write( struct.pack( '@I', dlen ))
else:
np.ravel( avg_img )[p] += v
good_count +=1
fp.write( struct.pack( '@I', dlen ))
fp.write( struct.pack( '@{}i'.format( dlen), *p))
if bins==1:
fp.write( struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *v))
else:
fp.write( struct.pack( '@{}{}'.format( dlen,'dd'[nobytes==2] ), *v)) #n +=1
del p,v, img
fp.flush()
fp.close()
avg_img /= good_count
bad_frame_list = (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold)
sys.stdout.write('#')
sys.stdout.flush()
#del images, mask, avg_img, imgsum, bad_frame_list
#print( 'Should release memory here')
return mask, avg_img, imgsum, bad_frame_list
def create_compress_header( md, filename, nobytes=4, bins=1 ):
'''
Create the head for a compressed eiger data, this function is for parallel compress
'''
fp = open( filename,'wb' )
#Make Header 1024 bytes
#md = images.md
if bins!=1:
nobytes=8
Header = struct.pack('@16s8d7I916x',b'Version-COMP0001',
md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'],
md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'],
nobytes, md['pixel_mask'].shape[1], md['pixel_mask'].shape[0],
0, md['pixel_mask'].shape[1],
0, md['pixel_mask'].shape[0]
)
fp.write( Header)
fp.close()
def init_compress_eigerdata( images, mask, md, filename,
bad_pixel_threshold=1e15, hot_pixel_threshold=2**30,
bad_pixel_low_threshold=0,nobytes=4, bins=1, with_pickle=True,
direct_load_data=False, data_path=None):
'''
Compress the eiger data
Create a new mask by remove hot_pixel
Do image average
Do each image sum
Find badframe_list for where image sum above bad_pixel_threshold
Generate a compressed data with filename
if bins!=1, will bin the images with bin number as bins
Header contains 1024 bytes ['Magic value', 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance',
'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size',
bytes per pixel (either 2 or 4 (Default)),
Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End ]
Return
mask
avg_img
imsum
bad_frame_list
'''
fp = open( filename,'wb' )
#Make Header 1024 bytes
#md = images.md
if bins!=1:
nobytes=8
Header = struct.pack('@16s8d7I916x',b'Version-COMP0001',
md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'],
md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'],
nobytes, md['pixel_mask'].shape[1], md['pixel_mask'].shape[0],
0, md['pixel_mask'].shape[1],
0, md['pixel_mask'].shape[0]
)
fp.write( Header)
Nimg_ = len( images)
avg_img = np.zeros_like( images[0], dtype= np.float )
Nopix = float( avg_img.size )
n=0
good_count = 0
frac = 0.0
if nobytes==2:
dtype= np.int16
elif nobytes==4:
dtype= np.int32
elif nobytes==8:
dtype=np.float64
else:
print ( "Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]")
dtype= np.int32
Nimg = Nimg_//bins
time_edge = np.array(create_time_slice( N= Nimg_,
slice_num= Nimg, slice_width= bins ))
imgsum = np.zeros( Nimg )
if bins!=1:
print('The frames will be binned by %s'%bins)
for n in tqdm( range(Nimg) ):
t1,t2 = time_edge[n]
img = np.average( images[t1:t2], axis=0 )
mask &= img < hot_pixel_threshold
p = np.where( (np.ravel(img)>0) & np.ravel(mask) )[0] #don't use masked data
v = np.ravel( np.array( img, dtype= dtype )) [p]
dlen = len(p)
imgsum[n] = v.sum()
if (imgsum[n] >bad_pixel_threshold) or (imgsum[n] <=bad_pixel_low_threshold):
#if imgsum[n] >=bad_pixel_threshold :
dlen = 0
fp.write( struct.pack( '@I', dlen ))
else:
np.ravel(avg_img )[p] += v
good_count +=1
frac += dlen/Nopix
#s_fmt ='@I{}i{}{}'.format( dlen,dlen,'ih'[nobytes==2])
fp.write( struct.pack( '@I', dlen ))
fp.write( struct.pack( '@{}i'.format( dlen), *p))
if bins==1:
fp.write( struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *v))
else:
fp.write( struct.pack( '@{}{}'.format( dlen,'dd'[nobytes==2] ), *v))
#n +=1
fp.close()
frac /=good_count
print( "The fraction of pixel occupied by photon is %6.3f%% "%(100*frac) )
avg_img /= good_count
bad_frame_list = np.where( (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) )[0]
#bad_frame_list1 = np.where( np.array(imgsum) > bad_pixel_threshold )[0]
#bad_frame_list2 = np.where( np.array(imgsum) < bad_pixel_low_threshold )[0]
#bad_frame_list = np.unique( np.concatenate( [bad_frame_list1, bad_frame_list2]) )
if len(bad_frame_list):
print ('Bad frame list are: %s' %bad_frame_list)
else:
print ('No bad frames are involved.')
if with_pickle:
pkl.dump( [mask, avg_img, imgsum, bad_frame_list], open(filename + '.pkl', 'wb' ) )
return mask, avg_img, imgsum, bad_frame_list
""" Description:
This is code that Mark wrote to open the multifile format
in compressed mode, translated to python.
This seems to work for DALSA, FCCD and EIGER in compressed mode.
It should be included in the respective detector.i files
Currently, this refers to the compression mode being '6'
Each file is image descriptor files chunked together as follows:
Header (1024 bytes)
|--------------IMG N begin--------------|
| Dlen
|---------------------------------------|
| Pixel positions (dlen*4 bytes |
| (0 based indexing in file) |
|---------------------------------------|
| Pixel data(dlen*bytes bytes) |
| (bytes is found in header |
| at position 116) |
|--------------IMG N end----------------|
|--------------IMG N+1 begin------------|
|----------------etc.....---------------|
Header contains 1024 bytes version name, 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance',
'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size',
bytes per pixel (either 2 or 4 (Default)),
Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End,
"""
class Multifile:
'''The class representing the multifile.
The recno is in 1 based numbering scheme (first record is 1)
This is efficient for reading in increasing order.
Note: reading same image twice in a row is like reading an earlier
numbered image and means the program starts for the beginning again.
'''
def __init__(self,filename,beg,end):
'''Multifile initialization. Open the file.
Here I use the read routine which returns byte objects
(everything is an object in python). I use struct.unpack
to convert the byte object to other data type (int object
etc)
NOTE: At each record n, the file cursor points to record n+1
'''
self.FID = open(filename,"rb")
# self.FID.seek(0,os.SEEK_SET)
self.filename = filename
#br: bytes read
br = self.FID.read(1024)
self.beg=beg
self.end=end
ms_keys = ['beam_center_x', 'beam_center_y', 'count_time', 'detector_distance',
'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size',
'bytes',
'nrows', 'ncols', 'rows_begin', 'rows_end', 'cols_begin', 'cols_end'
]
magic = struct.unpack('@16s', br[:16])
md_temp = struct.unpack('@8d7I916x', br[16:])
self.md = dict(zip(ms_keys, md_temp))
self.imgread=0
self.recno = 0
# some initialization stuff
self.byts = self.md['bytes']
if (self.byts==2):
self.valtype = np.uint16
elif (self.byts == 4):
self.valtype = np.uint32
elif (self.byts == 8):
self.valtype = np.float64
#now convert pieces of these bytes to our data
self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0]
# now read first image
#print "Opened file. Bytes per data is {0img.shape = (self.rows,self.cols)}".format(self.byts)
def _readHeader(self):
self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0]
def _readImageRaw(self):
p= np.fromfile(self.FID, dtype = np.int32,count= self.dlen)
v= np.fromfile(self.FID, dtype = self.valtype,count= self.dlen)
self.imgread=1
return(p,v)
def _readImage(self):
(p,v)=self._readImageRaw()
img = np.zeros( ( self.md['ncols'], self.md['nrows'] ) )
np.put( np.ravel(img), p, v )
return(img)
def seekimg(self,n=None):
'''Position file to read the nth image.
For now only reads first image ignores n
'''
# the logic involving finding the cursor position
if (n is None):
n = self.recno
if (n < self.beg or n > self.end):
raise IndexError('Error, record out of range')
#print (n, self.recno, self.FID.tell() )
if ((n == self.recno) and (self.imgread==0)):
pass # do nothing
else:
if (n <= self.recno): #ensure cursor less than search pos
self.FID.seek(1024,os.SEEK_SET)
self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0]
self.recno = 0
self.imgread=0
if n == 0:
return
#have to iterate on seeking since dlen varies
#remember for rec recno, cursor is always at recno+1
if(self.imgread==0 ): #move to next header if need to
self.FID.seek(self.dlen*(4+self.byts),os.SEEK_CUR)
for i in range(self.recno+1,n):
#the less seeks performed the faster
#print (i)
self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0]
#print 's',self.dlen
self.FID.seek(self.dlen*(4+self.byts),os.SEEK_CUR)
# we are now at recno in file, read the header and data
#self._clearImage()
self._readHeader()
self.imgread=0
self.recno = n
def rdframe(self,n):
if self.seekimg(n)!=-1:
return(self._readImage())
def rdrawframe(self,n):
if self.seekimg(n)!=-1:
return(self._readImageRaw())
def pass_FD(FD,n):
#FD.rdframe(n)
FD.seekimg(n)
class Multifile_Bins( object ):
'''
Bin a compressed file with bins number
See Multifile for details for Multifile_class
'''
def __init__(self, FD, bins=100):
'''
FD: the handler of a compressed Eiger frames
bins: bins number
'''
self.FD=FD
if (FD.end - FD.beg)%bins:
print ('Please give a better bins number and make the length of FD/bins= integer')
else:
self.bins = bins
self.md = FD.md
#self.beg = FD.beg
self.beg = 0
Nimg = (FD.end - FD.beg)
slice_num = Nimg//bins
self.end = slice_num
self.time_edge = np.array(create_time_slice( N= Nimg,
slice_num= slice_num, slice_width= bins )) + FD.beg
self.get_bin_frame()
def get_bin_frame(self):
FD= self.FD
self.frames = np.zeros( [ FD.md['ncols'],FD.md['nrows'], len(self.time_edge)] )
for n in tqdm( range(len(self.time_edge))):
#print (n)
t1,t2 = self.time_edge[n]
#print( t1, t2)
self.frames[:,:,n] = get_avg_imgc( FD, beg=t1,end=t2, sampling = 1,
plot_ = False, show_progress = False )
def rdframe(self,n):
return self.frames[:,:,n]
def rdrawframe(self,n):
x_= np.ravel( self.rdframe(n) )
p= np.where( x_ ) [0]
v = np.array( x_[ p ])
return ( np.array(p, dtype=np.int32), v)
def get_avg_imgc( FD, beg=None,end=None, sampling = 100, plot_ = False, bad_frame_list=None,
show_progress=True, *argv,**kwargs):
'''Get average imagef from a data_series by every sampling number to save time'''
#avg_img = np.average(data_series[:: sampling], axis=0)
if beg is None:
beg = FD.beg
if end is None:
end = FD.end
avg_img = FD.rdframe(beg)
n=1
flag=True
if show_progress:
#print( sampling-1 + beg , end, sampling )
if bad_frame_list is None:
bad_frame_list =[]
fra_num = int( (end - beg )/sampling ) - len( bad_frame_list )
for i in tqdm(range( sampling-1 + beg , end, sampling ), desc= 'Averaging %s images'% fra_num):
if bad_frame_list is not None:
if i in bad_frame_list:
flag= False
else:
flag=True
#print(i, flag)
if flag:
(p,v) = FD.rdrawframe(i)
if len(p)>0:
np.ravel(avg_img )[p] += v
n += 1
else:
for i in range( sampling-1 + beg , end, sampling ):
if bad_frame_list is not None:
if i in bad_frame_list:
flag= False
else:
flag=True
if flag:
(p,v) = FD.rdrawframe(i)
if len(p)>0:
np.ravel(avg_img )[p] += v
n += 1
avg_img /= n
if plot_:
if RUN_GUI:
fig = Figure()
ax = fig.add_subplot(111)
else:
fig, ax = plt.subplots()
uid = 'uid'
if 'uid' in kwargs.keys():
uid = kwargs['uid']
im = ax.imshow(avg_img , cmap='viridis',origin='lower',
norm= LogNorm(vmin=0.001, vmax=1e2))
#ax.set_title("Masked Averaged Image")
ax.set_title('uid= %s--Masked-Averaged-Image-'%uid)
fig.colorbar(im)
if save:
#dt =datetime.now()
#CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute)
path = kwargs['path']
if 'uid' in kwargs:
uid = kwargs['uid']
else:
uid = 'uid'
#fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png'
fp = path + "uid=%s--avg-img-"%uid + '.png'
plt.savefig( fp, dpi=fig.dpi)
#plt.show()
return avg_img
def mean_intensityc(FD, labeled_array, sampling=1, index=None, multi_cor = False):
"""Compute the mean intensity for each ROI in the compressed file (FD), support parallel computation
Parameters
----------
FD: Multifile class
compressed file
labeled_array : array
labeled array; 0 is background.
Each ROI is represented by a nonzero integer. It is not required that
the ROI labels are contiguous
index : int, list, optional
The ROI's to use. If None, this function will extract averages for all
ROIs
Returns
-------
mean_intensity : array
The mean intensity of each ROI for all `images`
Dimensions:
len(mean_intensity) == len(index)
len(mean_intensity[0]) == len(images)
index : list
The labels for each element of the `mean_intensity` list
"""
qind, pixelist = roi.extract_label_indices( labeled_array )
if labeled_array.shape != ( FD.md['ncols'],FD.md['nrows']):
raise ValueError(
" `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" %( FD.md['ncols'],FD.md['nrows'], labeled_array.shape[0], labeled_array.shape[1]) )
# handle various input for `index`
if index is None:
index = list(np.unique(labeled_array))
index.remove(0)
else:
try:
len(index)
except TypeError:
index = [index]
index = np.array( index )
#print ('here')
good_ind = np.zeros( max(qind), dtype= np.int32 )
good_ind[ index -1 ] = np.arange( len(index) ) +1
w = np.where( good_ind[qind -1 ] )[0]
qind = good_ind[ qind[w] -1 ]
pixelist = pixelist[w]
# pre-allocate an array for performance
# might be able to use list comprehension to make this faster
mean_intensity = np.zeros( [ int( ( FD.end - FD.beg)/sampling ) , len(index)] )
#fra_pix = np.zeros_like( pixelist, dtype=np.float64)
timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 )
timg[pixelist] = np.arange( 1, len(pixelist) + 1 )
#maxqind = max(qind)
norm = np.bincount( qind )[1:]
n= 0
#for i in tqdm(range( FD.beg , FD.end )):
if not multi_cor:
for i in tqdm(range( FD.beg, FD.end, sampling ), desc= 'Get ROI intensity of each frame' ):
(p,v) = FD.rdrawframe(i)
w = np.where( timg[p] )[0]
pxlist = timg[ p[w] ] -1
mean_intensity[n] = np.bincount( qind[pxlist], weights = v[w], minlength = len(index)+1 )[1:]
n +=1
else:
ring_masks = [ np.array(labeled_array==i, dtype = np.int64) for i in np.unique( labeled_array )[1:] ]
inputs = range( len(ring_masks) )
go_through_FD(FD)
pool = Pool(processes= len(inputs) )
print( 'Starting assign the tasks...')
results = {}
for i in tqdm ( inputs ):
results[i] = apply_async( pool, _get_mean_intensity_one_q, ( FD, sampling, ring_masks[i] ) )
pool.close()
print( 'Starting running the tasks...')
res = [ results[k].get() for k in tqdm( list(sorted(results.keys())) ) ]
#return res
for i in inputs:
mean_intensity[:,i] = res[i]
print( 'ROI mean_intensit calculation is DONE!')
del results
del res
mean_intensity /= norm
return mean_intensity, index
def _get_mean_intensity_one_q( FD, sampling, labels ):
mi = np.zeros( int( ( FD.end - FD.beg)/sampling ) )
n=0
qind, pixelist = roi.extract_label_indices( labels )
# iterate over the images to compute multi-tau correlation
fra_pix = np.zeros_like( pixelist, dtype=np.float64)
timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 )
timg[pixelist] = np.arange( 1, len(pixelist) + 1 )
for i in range( FD.beg, FD.end, sampling ):
(p,v) = FD.rdrawframe(i)
w = np.where( timg[p] )[0]
pxlist = timg[ p[w] ] -1
mi[n] = np.bincount( qind[pxlist], weights = v[w], minlength = 2 )[1:]
n +=1
return mi
def get_each_frame_intensityc( FD, sampling = 1,
bad_pixel_threshold=1e10, bad_pixel_low_threshold=0,
hot_pixel_threshold=2**30,
plot_ = False, bad_frame_list=None, save=False, *argv,**kwargs):
'''Get the total intensity of each frame by sampling every N frames
Also get bad_frame_list by check whether above bad_pixel_threshold
Usuage:
imgsum, bad_frame_list = get_each_frame_intensity(good_series ,sampling = 1000,
bad_pixel_threshold=1e10, plot_ = True)
'''
#print ( argv, kwargs )
#mask &= img < hot_pixel_threshold
imgsum = np.zeros( int( (FD.end - FD.beg )/ sampling ) )
n=0
for i in tqdm(range( FD.beg, FD.end, sampling ), desc= 'Get each frame intensity' ):
(p,v) = FD.rdrawframe(i)
if len(p)>0:
imgsum[n] = np.sum( v )
n += 1
if plot_:
uid = 'uid'
if 'uid' in kwargs.keys():
uid = kwargs['uid']
fig, ax = plt.subplots()
ax.plot( imgsum,'bo')
ax.set_title('uid= %s--imgsum'%uid)
ax.set_xlabel( 'Frame_bin_%s'%sampling )
ax.set_ylabel( 'Total_Intensity' )
if save:
#dt =datetime.now()
#CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute)
path = kwargs['path']
if 'uid' in kwargs:
uid = kwargs['uid']
else:
uid = 'uid'
#fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png'
fp = path + "uid=%s--imgsum-"%uid + '.png'
fig.savefig( fp, dpi=fig.dpi)
plt.show()
bad_frame_list_ = np.where( ( np.array(imgsum) > bad_pixel_threshold ) | ( np.array(imgsum) <= bad_pixel_low_threshold) )[0] + FD.beg
if bad_frame_list is not None:
bad_frame_list = np.unique( np.concatenate([bad_frame_list, bad_frame_list_]) )
else:
bad_frame_list = bad_frame_list_
if len(bad_frame_list):
print ('Bad frame list length is: %s' %len(bad_frame_list))
else:
print ('No bad frames are involved.')
return imgsum,bad_frame_list
|
bsd-3-clause
| -9,175,865,570,974,697,000 | 39.618026 | 231 | 0.520684 | false | 3.481652 | false | false | false |
arangodb/arangodb
|
3rdParty/rocksdb/6.8/tools/advisor/advisor/rule_parser_example.py
|
14
|
3190
|
# Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
# This source code is licensed under both the GPLv2 (found in the
# COPYING file in the root directory) and Apache 2.0 License
# (found in the LICENSE.Apache file in the root directory).
from advisor.rule_parser import RulesSpec
from advisor.db_log_parser import DatabaseLogs, DataSource
from advisor.db_options_parser import DatabaseOptions
from advisor.db_stats_fetcher import LogStatsParser, OdsStatsFetcher
import argparse
def main(args):
# initialise the RulesSpec parser
rule_spec_parser = RulesSpec(args.rules_spec)
rule_spec_parser.load_rules_from_spec()
rule_spec_parser.perform_section_checks()
# initialize the DatabaseOptions object
db_options = DatabaseOptions(args.rocksdb_options)
# Create DatabaseLogs object
db_logs = DatabaseLogs(
args.log_files_path_prefix, db_options.get_column_families()
)
# Create the Log STATS object
db_log_stats = LogStatsParser(
args.log_files_path_prefix, args.stats_dump_period_sec
)
data_sources = {
DataSource.Type.DB_OPTIONS: [db_options],
DataSource.Type.LOG: [db_logs],
DataSource.Type.TIME_SERIES: [db_log_stats]
}
if args.ods_client:
data_sources[DataSource.Type.TIME_SERIES].append(OdsStatsFetcher(
args.ods_client,
args.ods_entity,
args.ods_tstart,
args.ods_tend,
args.ods_key_prefix
))
triggered_rules = rule_spec_parser.get_triggered_rules(
data_sources, db_options.get_column_families()
)
rule_spec_parser.print_rules(triggered_rules)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Use this script to get\
suggestions for improving Rocksdb performance.')
parser.add_argument(
'--rules_spec', required=True, type=str,
help='path of the file containing the expert-specified Rules'
)
parser.add_argument(
'--rocksdb_options', required=True, type=str,
help='path of the starting Rocksdb OPTIONS file'
)
parser.add_argument(
'--log_files_path_prefix', required=True, type=str,
help='path prefix of the Rocksdb LOG files'
)
parser.add_argument(
'--stats_dump_period_sec', required=True, type=int,
help='the frequency (in seconds) at which STATISTICS are printed to ' +
'the Rocksdb LOG file'
)
# ODS arguments
parser.add_argument(
'--ods_client', type=str, help='the ODS client binary'
)
parser.add_argument(
'--ods_entity', type=str,
help='the servers for which the ODS stats need to be fetched'
)
parser.add_argument(
'--ods_key_prefix', type=str,
help='the prefix that needs to be attached to the keys of time ' +
'series to be fetched from ODS'
)
parser.add_argument(
'--ods_tstart', type=int,
help='start time of timeseries to be fetched from ODS'
)
parser.add_argument(
'--ods_tend', type=int,
help='end time of timeseries to be fetched from ODS'
)
args = parser.parse_args()
main(args)
|
apache-2.0
| 7,147,545,253,902,652,000 | 34.842697 | 79 | 0.655172 | false | 3.633257 | false | false | false |
scott-maddox/obpds
|
src/obpds/examples/interactive_schottky_diode.py
|
1
|
1288
|
#
# Copyright (c) 2015, Scott J Maddox
#
# This file is part of Open Band Parameters Device Simulator (OBPDS).
#
# OBPDS is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OBPDS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OBPDS. If not, see <http://www.gnu.org/licenses/>.
#
#############################################################################
import logging; logging.basicConfig()
# Make sure we import the local obpds version
import os
import sys
sys.path.insert(0,
os.path.abspath(os.path.join(os.path.dirname(__file__), '../..')))
from obpds import *
# Layers
n = Layer(0.3*um, GaAs, -1e17/cm3)
# Device
d = TwoTerminalDevice(layers=[n],
contacts=[SchottkyContact(), OhmicContact()],
Fn='right')
d.interactive_zero_current()
|
agpl-3.0
| -4,934,516,960,856,693,000 | 32.921053 | 77 | 0.653727 | false | 3.690544 | false | false | false |
edwardgeorge/libgmail
|
demos/MakeTarBall.py
|
1
|
1530
|
#!/usr/bin/env python
# make tarball!
VERSION = '0.3'
PACKAGENAME = 'libgmail-docs_'
import os
print "\nCreate API docs"
os.system('epydoc -o API ../libgmail.py')
def cleanup(*args):
"""Used by os.path.walk to traverse the tree and remove CVS dirs"""
if os.path.split(args[1])[1] == "CVS":
print "Remove ",args[1]
os.system('rm -r %s' % args[1])
filelist = open('filelist', 'r')
folderlist = open('folderlist', 'r')
myFiles = filelist.readlines()
myFolders = folderlist.readlines()
os.system('mkdir %s%s' % (PACKAGENAME,VERSION))
for file in myFiles:
os.system('cp %s %s%s' % (file[:-1], PACKAGENAME,VERSION))
for folder in myFolders:
os.system('mkdir %s%s/%s' % (PACKAGENAME,VERSION, folder[:-1]))
os.system('cp -r %s %s%s' % (folder[:-1],PACKAGENAME, VERSION))
# removing the CVS stuff
os.path.walk('%s%s' % (PACKAGENAME,VERSION),cleanup,None)
print "\nCreate a GNU/Linux tarball..."
try:
execString = 'tar -czf %s%s.tgz %s%s/' % (PACKAGENAME,VERSION,PACKAGENAME, VERSION)
print execString
os.system(execString)
except Exception,info:
print info,"\nYou must have the tar package installed"
else:
print "Done.\n"
print "Create a Windows compatible zipfile..."
try:
execString = 'zip -rq %s%s.zip ./%s%s' % (PACKAGENAME,VERSION,PACKAGENAME, VERSION)
print execString
os.system(execString)
except Exception,info:
print info,"\nYou must have the zip package installed."
else:
print "Done\n"
os.system('rm -rf %s%s' % (PACKAGENAME,VERSION))
|
gpl-2.0
| -953,915,241,889,044,600 | 29 | 87 | 0.660784 | false | 2.959381 | false | false | false |
sysadminmatmoz/odoo-clearcorp
|
cash_budget/wizard/cash_budget_program_populate.py
|
1
|
4063
|
# -*- coding: utf-8 -*-
# © 2016 ClearCorp
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import errno
from openerp.osv import osv, fields
from openerp.tools.translate import _
import base64
import logging
class cash_budget_program_populate(osv.osv_memory):
_name = 'cash.budget.program.populate'
_columns = {
'parent_account': fields.many2one('cash.budget.account', 'Catalog parent', domain=[('account_type','!=','budget'), ('active','=','True')], required=True),
}
def create_prog_line(self, cr, uid, program_id, program_code, parent_account_id=None, parent_line_id=None, previous_program_id=None,context=None ):
prog_obj = self.pool.get('cash.budget.program')
line_obj = self.pool.get('cash.budget.program.line')
account_obj = self.pool.get('cash.budget.account')
for account in account_obj.browse(cr, uid, [parent_account_id], context=context):
# for child in account_obj.browse(cr, uid, account.child_parent_ids, context=context):
if account.child_parent_ids:
for child in account.child_parent_ids:
line_name = program_code + ' - [' + child.code + ']-' + child.name
previous_program_lines = line_obj.search(cr, uid, [('program_id','=',previous_program_id),('account_id','=',child.id),],context=context)
vals = {'parent_id':parent_line_id, 'account_id':child.id, 'program_id':program_id, 'name':line_name}
if previous_program_lines:
vals['previous_year_line_id'] = previous_program_lines[0]
new_line = line_obj.create(cr, uid, vals,context=context )
program = prog_obj.browse(cr,uid,[program_id],context=context)[0]
self.create_prog_line(cr, uid, program_id, program_code, child.id, new_line, previous_program_id=program.previous_program_id.id, context=context )
if account.child_consol_ids:
program = prog_obj.browse(cr,uid,[program_id],context=context)[0]
parent_line = line_obj.browse(cr, uid, [parent_line_id],context=context)[0]
for consol_child in account.child_consol_ids:
prog_lines=line_obj.search(cr, uid, [('account_id','=',consol_child.id)],context=context)
for prg_line in line_obj.browse(cr,uid,prog_lines,context=context):
if program.plan_id.id == prg_line.program_id.plan_id.id:
line_obj.write(cr,uid,[parent_line.id],{'child_consol_ids':[(4,prg_line.id)]})
#line_name = program_code + ' - [' + child.code + ']-' + child.name
#new_line = line_obj.create(cr, uid, {'parent_id':parent_line_id, 'account_id':child.id, 'program_id':program_id, 'name':line_name} )
#self.create_prog_line(cr, uid, program_id, program_code, child.id, new_line, context=context)
return True
def bulk_line_create(self, cr, uid, ids, context=None):
prog_obj = self.pool.get('cash.budget.program')
line_obj = self.pool.get('cash.budget.program.line')
account_obj = self.pool.get('cash.budget.account')
data = self.browse(cr, uid, ids, context=context)[0]
for program in prog_obj.browse(cr, uid, context['active_ids'], context=context):
current_lines = len(program.program_lines)
if current_lines > 0:
raise osv.except_osv(_('Error!'), _('This program already contains program lines'))
line_name = program.code + ' - [' + data.parent_account.code + ']-' + data.parent_account.name
new_line = line_obj.create(cr, uid, {'account_id':data.parent_account.id, 'program_id':program.id, 'name':line_name} )
self.create_prog_line(cr, uid, program.id, program.code, data.parent_account.id, new_line , previous_program_id=program.previous_program_id.id, context=context)
return True
|
agpl-3.0
| -6,161,663,397,664,325,000 | 61.492308 | 172 | 0.602659 | false | 3.513841 | false | false | false |
johnwlockwood/stream_tap
|
stream_tap/__init__.py
|
1
|
1389
|
from . import _meta
from collections import deque
__version__ = _meta.version
__version_info__ = _meta.version_info
class Bucket(object):
"""
Encloses a function that produces results from
an item of an iterator, accumulating any results
in a deque.
"""
def __init__(self, func):
self.func = func
self._contents = deque()
def __call__(self, *args, **kwargs):
result = self.func(*args, **kwargs)
if result is not None:
self._contents.append(result)
def contents(self):
"""
:returns: contents
"""
return self._contents
def drain_contents(self):
"""
Starts a new collection to accumulate future contents
and returns all of existing contents.
"""
existing_contents = self._contents
self._contents = deque()
return existing_contents
def stream_tap(callables, stream):
"""
Calls each callable with each item in the stream.
Use with Buckets. Make a Bucket with a callable
and then pass a tuple of those buckets
in as the callables. After iterating over
this generator, get contents from each Spigot.
:param callables: collection of callable.
:param stream: Iterator if values.
"""
for item in stream:
for caller in callables:
caller(item)
yield item
|
apache-2.0
| -7,339,902,638,887,691,000 | 24.722222 | 61 | 0.614831 | false | 4.451923 | false | false | false |
dilynfullerton/tr-A_dependence_plots
|
unused/xl.py
|
1
|
1861
|
"""Put data into an excel workbook. Currently unused.
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from openpyxl import load_workbook, Workbook
from ImsrgDataMap import ImsrgDataMapInt
from Exp import ExpInt
def print_single_particle_energy_data_to_excel(e, hw, datadir, savepath,
startrow=2):
all_data_map = ImsrgDataMapInt(parent_directory=datadir)
data_maps = all_data_map.map[ExpInt(e, hw)]
index_orbital_map = data_maps.index_orbital_map
ime_map = data_maps.index_mass_energy_map()
try:
wb = load_workbook(savepath)
except IOError:
wb = Workbook()
ws = wb.active
ws.title = 'e={e} hw={hw}'.format(e=e, hw=hw)
row = startrow
col = 1
ws.cell(row=row, column=col).value = 'KEY'
row += 1
for i, s in zip(range(5), ['Index', 'n', 'l', 'j', 'tz']):
ws.cell(row=row, column=col + i).value = s
row += 1
for oindex in sorted(index_orbital_map.keys()):
ws.cell(row=row, column=col).value = int(oindex)
qnums = index_orbital_map[oindex]
for i, qn in zip(range(1, 5), qnums):
ws.cell(row=row, column=col + i).value = qn
row += 1
row += 1
ws.cell(row=row, column=col).value = 'DATA'
row += 1
ws.cell(row=row, column=col).value = 'Index'
ws.cell(row=row, column=col + 1).value = 'A'
ws.cell(row=row, column=col + 2).value = 'energy (MeV)'
row += 1
for oindex in sorted(ime_map.keys()):
me_map = ime_map[oindex]
for m in me_map.keys():
ws.cell(row=row, column=col).value = int(oindex)
ws.cell(row=row, column=col + 1).value = int(m)
ws.cell(row=row, column=col + 2).value = me_map[m]
row += 1
wb.save(savepath)
|
cc0-1.0
| -8,680,902,879,900,898,000 | 28.078125 | 72 | 0.584095 | false | 3.030945 | false | false | false |
srznew/heat
|
doc/source/ext/resources.py
|
1
|
15178
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
import itertools
from docutils import core
from docutils import nodes
import pydoc
import six
from sphinx.util import compat
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import plugin_manager
from heat.engine import properties
from heat.engine import support
_CODE_NAMES = {'2013.1': 'Grizzly',
'2013.2': 'Havana',
'2014.1': 'Icehouse',
'2014.2': 'Juno',
'2015.1': 'Kilo',
'5.0.0': 'Liberty'}
all_resources = {}
class integratedrespages(nodes.General, nodes.Element):
pass
class unsupportedrespages(nodes.General, nodes.Element):
pass
class contribresourcepages(nodes.General, nodes.Element):
pass
class ResourcePages(compat.Directive):
has_content = False
required_arguments = 0
optional_arguments = 1
final_argument_whitespace = False
option_spec = {}
def path(self):
return None
def statuses(self):
return support.SUPPORT_STATUSES
def run(self):
prefix = self.arguments and self.arguments.pop() or None
content = []
for resource_type, resource_classes in _filter_resources(
prefix, self.path(), self.statuses()):
for resource_class in resource_classes:
self.resource_type = resource_type
self.resource_class = resource_class
section = self._section(content, resource_type, '%s')
self.props_schemata = properties.schemata(
self.resource_class.properties_schema)
self.attrs_schemata = attributes.schemata(
self.resource_class.attributes_schema)
# NOTE(prazumovsky): Adding base_attributes_schema dict to
# Resource class should means adding new attributes from this
# dict to documentation of each resource, else there is no
# chance to learn about base attributes.
self.attrs_schemata.update(
self.resource_class.base_attributes_schema)
self.update_policy_schemata = properties.schemata(
self.resource_class.update_policy_schema)
self._status_str(resource_class.support_status, section)
cls_doc = pydoc.getdoc(resource_class)
if cls_doc:
# allow for rst in the class comments
cls_nodes = core.publish_doctree(cls_doc).children
section.extend(cls_nodes)
self.contribute_properties(section)
self.contribute_attributes(section)
self.contribute_update_policy(section)
self.contribute_hot_syntax(section)
return content
def _version_str(self, version):
if version in _CODE_NAMES:
return _("%(version)s (%(code)s)") % {'version': version,
'code': _CODE_NAMES[version]}
else:
return version
def _status_str(self, support_status, section):
while support_status is not None:
sstatus = support_status.to_dict()
if sstatus['status'] is support.SUPPORTED:
msg = _('Available')
else:
msg = sstatus['status']
if sstatus['version'] is not None:
msg = _('%s since %s') % (msg,
self._version_str(
sstatus['version']))
if sstatus['message'] is not None:
msg = _('%s - %s') % (msg, sstatus['message'])
if not (sstatus['status'] == support.SUPPORTED and
sstatus['version'] is None):
para = nodes.paragraph(_(''), msg)
note = nodes.note(_(''), para)
section.append(note)
support_status = support_status.previous_status
def _section(self, parent, title, id_pattern):
id = id_pattern % self.resource_type
section = nodes.section(ids=[id])
parent.append(section)
title = nodes.title('', title)
section.append(title)
return section
def _prop_syntax_example(self, prop):
if not prop:
return 'Value'
if prop.type == properties.Schema.LIST:
schema = lambda i: prop.schema[i] if prop.schema else None
sub_type = [self._prop_syntax_example(schema(i))
for i in range(2)]
return '[%s, %s, ...]' % tuple(sub_type)
elif prop.type == properties.Schema.MAP:
def sub_props():
for sub_key, sub_value in prop.schema.items():
if sub_value.implemented:
yield '"%s": %s' % (
sub_key, self._prop_syntax_example(sub_value))
return '{%s}' % (', '.join(sub_props()) if prop.schema else '...')
else:
return prop.type
def contribute_hot_syntax(self, parent):
section = self._section(parent, _('HOT Syntax'), '%s-hot')
props = []
for prop_key in sorted(six.iterkeys(self.props_schemata)):
prop = self.props_schemata[prop_key]
if (prop.implemented
and prop.support_status.status == support.SUPPORTED):
props.append('%s: %s' % (prop_key,
self._prop_syntax_example(prop)))
props_str = ''
if props:
props_str = '''\n properties:
%s''' % ('\n '.join(props))
template = '''heat_template_version: 2013-05-23
...
resources:
...
the_resource:
type: %s%s''' % (self.resource_type, props_str)
block = nodes.literal_block('', template, language="hot")
section.append(block)
@staticmethod
def cmp_prop(x, y):
x_key, x_prop = x
y_key, y_prop = y
if x_prop.support_status.status == y_prop.support_status.status:
return cmp(x_key, y_key)
if x_prop.support_status.status == support.SUPPORTED:
return -1
if x_prop.support_status.status == support.DEPRECATED:
return 1
return cmp(x_prop.support_status.status,
y_prop.support_status.status)
def contribute_property(self, prop_list, prop_key, prop, upd_para=None):
prop_item = nodes.definition_list_item(
'', nodes.term('', prop_key))
prop_list.append(prop_item)
prop_item.append(nodes.classifier('', prop.type))
definition = nodes.definition()
prop_item.append(definition)
self._status_str(prop.support_status, definition)
if not prop.implemented:
para = nodes.paragraph('', _('Not implemented.'))
note = nodes.note('', para)
definition.append(note)
return
if prop.description:
para = nodes.paragraph('', prop.description)
definition.append(para)
if upd_para is not None:
definition.append(upd_para)
else:
if prop.update_allowed:
upd_para = nodes.paragraph(
'', _('Can be updated without replacement.'))
definition.append(upd_para)
elif prop.immutable:
upd_para = nodes.paragraph('', _('Updates are not supported. '
'Resource update will fail on'
' any attempt to update this '
'property.'))
definition.append(upd_para)
else:
upd_para = nodes.paragraph('', _('Updates cause replacement.'))
definition.append(upd_para)
if prop.default is not None:
para = nodes.paragraph('', _('Defaults to "%s".') % prop.default)
definition.append(para)
for constraint in prop.constraints:
para = nodes.paragraph('', str(constraint))
definition.append(para)
sub_schema = None
if prop.schema and prop.type == properties.Schema.MAP:
para = nodes.paragraph()
emph = nodes.emphasis('', _('Map properties:'))
para.append(emph)
definition.append(para)
sub_schema = prop.schema
elif prop.schema and prop.type == properties.Schema.LIST:
para = nodes.paragraph()
emph = nodes.emphasis('', _('List contents:'))
para.append(emph)
definition.append(para)
sub_schema = prop.schema
if sub_schema:
sub_prop_list = nodes.definition_list()
definition.append(sub_prop_list)
for sub_prop_key, sub_prop in sorted(sub_schema.items(),
self.cmp_prop):
if sub_prop.support_status.status != support.HIDDEN:
self.contribute_property(
sub_prop_list, sub_prop_key, sub_prop, upd_para)
def contribute_properties(self, parent):
if not self.props_schemata:
return
section = self._section(parent, _('Properties'), '%s-props')
prop_list_required = nodes.definition_list()
subsection_required = self._section(section, _('required'),
'%s-props-req')
subsection_required.append(prop_list_required)
prop_list_optional = nodes.definition_list()
subsection_optional = self._section(section, _('optional'),
'%s-props-opt')
subsection_optional.append(prop_list_optional)
for prop_key, prop in sorted(self.props_schemata.items(),
self.cmp_prop):
if prop.support_status.status != support.HIDDEN:
if prop.required:
prop_list = prop_list_required
else:
prop_list = prop_list_optional
self.contribute_property(prop_list, prop_key, prop)
def contribute_attributes(self, parent):
if not self.attrs_schemata:
return
section = self._section(parent, _('Attributes'), '%s-attrs')
prop_list = nodes.definition_list()
section.append(prop_list)
for prop_key, prop in sorted(self.attrs_schemata.items()):
if prop.support_status.status != support.HIDDEN:
description = prop.description
prop_item = nodes.definition_list_item(
'', nodes.term('', prop_key))
prop_list.append(prop_item)
definition = nodes.definition()
prop_item.append(definition)
self._status_str(prop.support_status, definition)
if description:
def_para = nodes.paragraph('', description)
definition.append(def_para)
def contribute_update_policy(self, parent):
if not self.update_policy_schemata:
return
section = self._section(parent, _('UpdatePolicy'), '%s-updpolicy')
prop_list = nodes.definition_list()
section.append(prop_list)
for prop_key, prop in sorted(self.update_policy_schemata.items(),
self.cmp_prop):
self.contribute_property(prop_list, prop_key, prop)
class IntegrateResourcePages(ResourcePages):
def path(self):
return 'heat.engine.resources'
def statuses(self):
return [support.SUPPORTED]
class UnsupportedResourcePages(ResourcePages):
def path(self):
return 'heat.engine.resources'
def statuses(self):
return [s for s in support.SUPPORT_STATUSES if s != support.SUPPORTED]
class ContribResourcePages(ResourcePages):
def path(self):
return 'heat.engine.plugins'
def _filter_resources(prefix=None, path=None, statuses=[]):
def not_hidden_match(cls):
return cls.support_status.status != support.HIDDEN
def prefix_match(name):
return prefix is None or name.startswith(prefix)
def path_match(cls):
return path is None or cls.__module__.startswith(path)
def status_match(cls):
return cls.support_status.status in statuses
filtered_resources = {}
for name in sorted(six.iterkeys(all_resources)):
if prefix_match(name):
for cls in all_resources.get(name):
if (path_match(cls) and status_match(cls) and
not_hidden_match(cls)):
if filtered_resources.get(name) is not None:
filtered_resources[name].append(cls)
else:
filtered_resources[name] = [cls]
return sorted(six.iteritems(filtered_resources))
def _load_all_resources():
manager = plugin_manager.PluginManager('heat.engine.resources')
resource_mapping = plugin_manager.PluginMapping('resource')
res_plugin_mappings = resource_mapping.load_all(manager)
for mapping in res_plugin_mappings:
name, cls = mapping
if all_resources.get(name) is not None:
all_resources[name].append(cls)
else:
all_resources[name] = [cls]
def link_resource(app, env, node, contnode):
reftarget = node.attributes['reftarget']
for resource_name in all_resources:
if resource_name.lower() == reftarget.lower():
resource = all_resources[resource_name]
refnode = nodes.reference('', '', internal=True)
refnode['reftitle'] = resource_name
if resource_name.startswith('AWS'):
source = 'template_guide/cfn'
else:
source = 'template_guide/openstack'
uri = app.builder.get_relative_uri(
node.attributes['refdoc'], source)
refnode['refuri'] = '%s#%s' % (uri, resource_name)
refnode.append(contnode)
return refnode
def setup(app):
_load_all_resources()
app.add_node(integratedrespages)
app.add_directive('integratedrespages', IntegrateResourcePages)
app.add_node(unsupportedrespages)
app.add_directive('unsupportedrespages', UnsupportedResourcePages)
app.add_node(contribresourcepages)
app.add_directive('contribrespages', ContribResourcePages)
app.connect('missing-reference', link_resource)
|
apache-2.0
| 401,516,824,030,433,600 | 35.224344 | 79 | 0.564238 | false | 4.330385 | false | false | false |
ngtrhieu/outline_alignment
|
autumn_utils/feature_mappings.py
|
1
|
1570
|
import cv
import cv2
import numpy as np
import math
def get_features (cnt, approx = 5):
return cv2.approxPolyDP (cnt, approx, False)
def simplify_feature (feature):
simple = []
prev = None
for v in feature:
dist = 5000
if prev is not None:
dist = np.linalg.norm (v - prev)
if dist > 2:
simple.append (v)
prev = v
return simple
def map_feature (feature1, feature2):
f1 = []
f2 = []
for u in feature1:
min = 20
m = None
index = None
for i, v in enumerate (feature2):
dist = np.linalg.norm (u - v)
if dist < min:
min = dist
m = v
index = i
if m is not None:
f1.append (u)
f2.append (m)
feature2.pop (index)
else:
f1.append (u)
f2.append (u)
f1 = np.array (f1).squeeze ()
f2 = np.array (f2).squeeze ()
return f1, f2
def segmentFeatures (fineFeatures, courseFeatures):
controlPoints = []
for u in courseFeatures:
ux, uy = u[0]
min_dst = 10000
m = None
for v in fineFeatures:
vx, vy = v[0]
dst = math.pow (ux - vx, 2) + math.pow (uy - vy, 2)
if dst < min_dst:
min_dst = dst
m = v
if m is not None:
controlPoints.append (m)
i = 0
currentSegment = []
allSegments = []
for u in fineFeatures:
if np.array_equal (u, controlPoints[i + 1]):
currentSegment.append (u)
allSegments.append (currentSegment)
currentSegment = [u]
i += 1
if i >= len (controlPoints):
break
else:
currentSegment.append (u)
if len (currentSegment) > 0:
currentSegment.append (fineFeatures[0])
allSegments.append (currentSegment)
return allSegments, controlPoints
|
mit
| 3,110,566,440,364,656,000 | 19.402597 | 54 | 0.63121 | false | 2.692967 | false | false | false |
nddsg/SimpleDBMS
|
simple_dbms/create_statement.py
|
1
|
2985
|
from sql_statement import SQLStatement
from catalog import Catalog
from operation_status import OperationStatus
import simple_dbms
try:
from bsddb import db
except ImportError:
from bsddb3 import db
class CreateStatement(SQLStatement, object):
def __init__(self, table, column_def_list):
"""
Constructs a CreateStatement object that will create the specified
table with the specified column information.
:param table:
:param column_def_list:
"""
super(CreateStatement, self).__init__(tables=[table], columns=column_def_list)
def execute(self):
table = self.get_table(0)
try:
# Add the column information to the Table object, and
# make sure there is only one primary-key column and no
# duplicate column names.
has_pk = False
for i in range(0, self.num_columns()):
col = self.get_column(i)
table.add_column(col)
if col.is_primary_key():
if has_pk:
raise (table + ": may not have more than one primary key column")
has_pk = True
for j in range(i + 1, self.num_columns()):
if col.get_name() == self.get_column(j).get_name():
raise table.get_name() + ": may not have two columns named " + col.get_name()
# Add the metadata for the table to the catalog, printing
# an error message if the table name is already in use.
if Catalog.put_metadata(table) == OperationStatus.KEYEXIST:
raise AttributeError(table.name + ": a table with this name already exists")
# create the BDB database for the table
d = db.DB(simple_dbms.SimpleDBMS.get_env())
txn = simple_dbms.SimpleDBMS.get_env().txn_begin()
simple_dbms.SimpleDBMS.push_txn(txn)
d.set_flags(0)
if has_pk:
d.open(filename=table.name, txn=txn, dbtype=db.DB_BTREE,
flags=db.DB_CREATE | db.DB_THREAD, mode=0666)
else:
d.open(filename=table.name, txn=txn, dbtype=db.DB_RECNO,
flags=db.DB_CREATE | db.DB_THREAD, mode=0666)
txn.commit()
simple_dbms.SimpleDBMS.pop_txn()
table.set_db(d)
print "Created table " + table.name + "."
except Exception as e:
print e
print "Could not create table " + table.name + "."
# Because we could not use Table.open() above, the table is not
# in the table cache. Thus, we need to close its DB handle here,
# or else it will never get closed!
#
# In the other commands, we will use Table.open(), so this
# step will not be necessary.
if table.get_db() is not None:
table.get_db().close()
|
gpl-3.0
| -5,155,393,903,485,593,000 | 39.337838 | 101 | 0.558124 | false | 4.174825 | false | false | false |
pytlakp/intranetref
|
src/intranet3/models/times.py
|
1
|
1043
|
import datetime
from sqlalchemy import Column, ForeignKey
from sqlalchemy.types import DateTime, Date, String, Integer, Float, Boolean
from intranet3.models import Base
class TimeEntry(Base):
__tablename__ = 'time_entry'
id = Column(Integer, primary_key=True, index=True)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False, index=True)
date = Column(Date, nullable=False, index=True)
time = Column(Float, nullable=False)
description = Column(String, nullable=False)
added_ts = Column(DateTime, nullable=False, default=datetime.datetime.now)
modified_ts = Column(DateTime, nullable=False, default=datetime.datetime.now)
timer_ts = Column(DateTime)
ticket_id = Column(Integer, nullable=True, index=True)
project_id = Column(Integer, ForeignKey('project.id'), nullable=False, index=True)
# TODO: task
deleted = Column(Boolean, nullable=False, default=False, index=True)
frozen = Column(Boolean, nullable=False, default=False, index=True)
|
mit
| 8,433,913,652,926,332,000 | 33.766667 | 86 | 0.705657 | false | 3.891791 | false | false | false |
frew/simpleproto
|
scons-local-1.1.0/SCons/Tool/swig.py
|
1
|
4609
|
"""SCons.Tool.swig
Tool-specific initialization for swig.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/swig.py 3603 2008/10/10 05:46:45 scons"
import os.path
import re
import SCons.Action
import SCons.Defaults
import SCons.Scanner
import SCons.Tool
import SCons.Util
SwigAction = SCons.Action.Action('$SWIGCOM', '$SWIGCOMSTR')
def swigSuffixEmitter(env, source):
if '-c++' in SCons.Util.CLVar(env.subst("$SWIGFLAGS", source=source)):
return '$SWIGCXXFILESUFFIX'
else:
return '$SWIGCFILESUFFIX'
# Match '%module test', as well as '%module(directors="1") test'
_reModule = re.compile(r'%module(?:\s*\(.*\))?\s+(.+)')
def _swigEmitter(target, source, env):
swigflags = env.subst("$SWIGFLAGS", target=target, source=source)
flags = SCons.Util.CLVar(swigflags)
for src in source:
src = str(src.rfile())
mnames = None
if "-python" in flags and "-noproxy" not in flags:
if mnames is None:
mnames = _reModule.findall(open(src).read())
target.extend(map(lambda m, d=target[0].dir:
d.File(m + ".py"), mnames))
if "-java" in flags:
if mnames is None:
mnames = _reModule.findall(open(src).read())
java_files = map(lambda m: [m + ".java", m + "JNI.java"], mnames)
java_files = SCons.Util.flatten(java_files)
outdir = env.subst('$SWIGOUTDIR', target=target, source=source)
if outdir:
java_files = map(lambda j, o=outdir: os.path.join(o, j), java_files)
java_files = map(env.fs.File, java_files)
for jf in java_files:
t_from_s = lambda t, p, s, x: t.dir
SCons.Util.AddMethod(jf, t_from_s, 'target_from_source')
target.extend(java_files)
return (target, source)
def generate(env):
"""Add Builders and construction variables for swig to an Environment."""
c_file, cxx_file = SCons.Tool.createCFileBuilders(env)
c_file.suffix['.i'] = swigSuffixEmitter
cxx_file.suffix['.i'] = swigSuffixEmitter
c_file.add_action('.i', SwigAction)
c_file.add_emitter('.i', _swigEmitter)
cxx_file.add_action('.i', SwigAction)
cxx_file.add_emitter('.i', _swigEmitter)
java_file = SCons.Tool.CreateJavaFileBuilder(env)
java_file.suffix['.i'] = swigSuffixEmitter
java_file.add_action('.i', SwigAction)
java_file.add_emitter('.i', _swigEmitter)
env['SWIG'] = 'swig'
env['SWIGFLAGS'] = SCons.Util.CLVar('')
env['SWIGCFILESUFFIX'] = '_wrap$CFILESUFFIX'
env['SWIGCXXFILESUFFIX'] = '_wrap$CXXFILESUFFIX'
env['_SWIGOUTDIR'] = '${"-outdir " + str(SWIGOUTDIR)}'
env['SWIGPATH'] = []
env['SWIGINCPREFIX'] = '-I'
env['SWIGINCSUFFIX'] = ''
env['_SWIGINCFLAGS'] = '$( ${_concat(SWIGINCPREFIX, SWIGPATH, SWIGINCSUFFIX, __env__, RDirs, TARGET, SOURCE)} $)'
env['SWIGCOM'] = '$SWIG -o $TARGET ${_SWIGOUTDIR} ${_SWIGINCFLAGS} $SWIGFLAGS $SOURCES'
expr = '^[ \t]*%[ \t]*(?:include|import|extern)[ \t]*(<|"?)([^>\s"]+)(?:>|"?)'
scanner = SCons.Scanner.ClassicCPP("SWIGScan", ".i", "SWIGPATH", expr)
env.Append(SCANNERS = scanner)
def exists(env):
return env.Detect(['swig'])
|
bsd-2-clause
| -4,627,851,629,485,158,000 | 38.059322 | 121 | 0.644174 | false | 3.416605 | false | false | false |
jakemalley/training-log
|
traininglog/admin/views.py
|
1
|
4565
|
# admin/views.py
# Jake Malley
# 19/02/15
"""
Define the routes for the admin blueprint.
"""
# Imports
from flask import redirect, render_template, \
request, url_for, Blueprint, abort, flash
from flask.ext.login import fresh_login_required, current_user
from traininglog import db
from traininglog.models import Member, Exercise
from forms import AdminEditDetailsForm
from functools import wraps
from datetime import datetime
# Setup the admin blueprint.
admin_blueprint = Blueprint(
'admin',__name__,
template_folder='templates'
)
# Admin Required - Only allows members with is_admin = 1 to access these views.
# Allows me to use the decorator @admin_required on different routes.
def admin_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
# If they are not an admin take them home.
if not bool(current_user.is_admin):
return redirect(url_for('home.index'))
return f(*args, **kwargs)
return decorated_function
# Define the routes
@admin_blueprint.route('/')
@fresh_login_required
@admin_required
def dashboard():
"""
The dashboard for the admin blueprint.
"""
# Get a list of all the members.
members = Member.query.all()
# Get a list of all the active members. (Members who have logged in today.)
active_members = Member.query.filter(Member.last_login_date>datetime.utcnow().date()).all()
# Create a dictionary of the stats.
stats = {
"total_members":len(members), # Total number of members.
"active_members":len(active_members) # Total number of active members.
}
# Render the admin index page passing in the members and stats.
return render_template('admin_index.html', members=members,stats=stats)
@admin_blueprint.route('/view/<member_id>', methods=['POST','GET'])
@fresh_login_required
@admin_required
def view(member_id):
"""
Method for viewing individual users.
"""
# Create the form to edit the members data.
admin_edit_form = AdminEditDetailsForm()
# If the method was post and the form was valid.
if request.method == 'POST' and admin_edit_form.validate_on_submit():
# Change the data.
# Get the member with that ID.
member = Member.query.filter_by(id=admin_edit_form.member_id.data).first()
# See if the account was marked to be delete as then we don't need to update the details as well.
if bool(admin_edit_form.delete_user.data) == True:
# Delete the user.
# Get their exercise data.
exercise_data = Exercise.query.filter_by(member=member).all()
# For each piece of data.
for data in exercise_data:
# Delete the data.
db.session.delete(data)
# Finally delete the user.
db.session.delete(member)
# And commit the changes
db.session.commit()
# Flash a message.
flash('Account has been delete!')
# Redirect to the admin dashboard sicne that user doesn't exist anymore.
return redirect(url_for('admin.dashboard'))
else:
# User was not marked as deleted,
# update their details with the details from the form.
member.firstname = admin_edit_form.firstname.data
member.surname = admin_edit_form.surname.data
member.email = admin_edit_form.email.data
member.set_active_status(int(admin_edit_form.set_active.data))
member.is_admin = int(admin_edit_form.set_admin.data)
# If the password was changed.
if admin_edit_form.password.data:
# Update the password.
member.update_password(admin_edit_form.password.data)
# Flash a success message.
flash("Details have been updated. Please inform the member of the changes.")
# Commit the changes.
db.session.commit()
# Refresh the page
return render_template('admin_view.html', member=member, admin_edit_form=admin_edit_form)
else:
# Get the member with that ID.
member = Member.query.filter_by(id=member_id).first()
# If that member exists.
if member is not None:
# Render the template passing in the member and form.
return render_template('admin_view.html', member=member, admin_edit_form=admin_edit_form)
else:
# Raise a HTTP 404 (Page not found) error.
abort(404)
|
mit
| -4,846,714,409,154,546,000 | 33.854962 | 105 | 0.635487 | false | 4.097846 | false | false | false |
jiansenzheng/oanda_trading
|
oanda_trading/forex_trading_general_171005.py
|
1
|
27162
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 06 20:00:30 2016
@author: Jiansen
"""
import requests
import threading
import copy
import logging
import os
#import urllib3
import json
from scipy import stats
#from decimal import Decimal, getcontext, ROUND_HALF_DOWN
#from event00 import TickEvent,TickEvent2
#import time
import oandapy
import httplib
import pandas as pd
import math
import numpy as np
import pywt
import time
from settings import STREAM_DOMAIN, API_DOMAIN, ACCESS_TOKEN, ACCOUNT_ID
from trading_events import Event,TickEvent2,LiqEvent,OrderEvent,CloseEvent
from trading_global_functions import *
from trading_log import log_dict
import Queue
#for writing data
import datetime
from bson.objectid import ObjectId
import pymongo as pm
from pymongo import MongoClient
import statsmodels.tsa.stattools as ts
#requests.adapters.DEFAULT_RETRIES = 5
from warningOps import warning
from seriesADF import getADF
corpid= ''
secret=''
warn = warning(corpid,secret)
#------the only line we need to change is about the instruments----#
pairs = "EUR_USD"
#-----------------------------------------------------------------------#
client = MongoClient('localhost',27017)
db = client.test_database
#---------------Initialize the parameters and database connections-------#
if pairs == "EUR_USD":
try:
from param_EUR_USD import MA_dict, threshold_dict,sltp_dict
except ImportError:
raise ValueError("cannot find parameters for {0}!".format(pairs))
collection = db.tick_test
index_collect = db.index_EUR_USD
elif pairs == "USD_CNH":
try:
from param_USD_CNH import MA_dict, threshold_dict,sltp_dict
except ImportError:
raise ValueError("cannot find parameters for {0}!".format(pairs))
collection = db.tick_USD_CNH
index_collect = db.index_USD_CNH
elif pairs == "AUD_USD":
try:
from param_AUD_USD import MA_dict, threshold_dict,sltp_dict
except ImportError:
raise ValueError("cannot find parameters for {0}!".format(pairs))
collection = db.tick_AUD_USD
index_collect = db.index_AUD_USD
else:
raise ValueError('Invalid <pairs>, CANNOT FIND THE INSTRUMENTS!')
#-----------------------------------------------------------------------#
#--------------------------Liquidity Index------------------------------#
#-----------------------------------------------------------------------#
class LiqForex(object):
def __init__(
self, domain, access_token,
account_id, instruments,ct, gran, dd, events_queue
):
self.domain = domain
self.access_token = access_token
self.account_id = account_id
self.instruments = instruments
self.ct = ct
self.gran=gran
self.dd= dd
self.events_queue = events_queue
def getLiq(self):
try:
requests.packages.urllib3.disable_warnings()
s = requests.Session()
#s.keep_alive = False
url = "https://" + self.domain + "/v1/candles"
headers = {'Authorization' : 'Bearer ' + self.access_token}
params = {'instrument':self.instruments, 'accountId' : self.account_id,
'count':self.ct,'candleFormat':'midpoint','granularity':self.gran}
req = requests.Request('GET', url, headers=headers, params=params)
pre = req.prepare()
logging.info( pre)
resp = s.send(pre, stream=False, verify=False)
try:
msg=json.loads(resp.text)
except Exception as e:
logging.warning( "Caught exception when converting message into json\n" + str(e))
return
if msg.has_key("candles"):
time0=msg.get("candles")[-1]["time"]
lis = ohlcv_lis(msg.get("candles"))
liqS = pd.Series()
for i in range(0, len(lis)- (self.dd+1) ,1):
s2 = liq15min(lis[i:i+self.dd])
liqS = np.append(liqS,s2)
liq=liqS[-1]
logging.info( "liq=".format(liq))
tev = LiqEvent(self.instruments,time0,liq)
self.events_queue.put(tev,False)
post_metric = get_indicator(self.instruments,None,None,self.gran,liq,None,None)
index_collect.insert_one(post_metric)
except Exception as e:
s.close()
content0 = "Caught exception when connecting to history\n" + str(e)
logging.warning(content0)
#warn.tradingWarning(content0)
def activeLiq(self,period):
while True:
self.getLiq()
time.sleep(period)
#--------------------------------------------------------------------#
class StreamingForexPrices(object):
def __init__(
self, domain, access_token,
account_id, instruments,ct, gran, dd, events_queue
):
self.domain = domain
self.access_token = access_token
self.account_id = account_id
self.instruments = instruments
self.ct = ct
self.gran=gran
self.dd= dd
self.events_queue = events_queue
def connect_to_stream(self):
try:
requests.packages.urllib3.disable_warnings()
s = requests.Session() # socket
url = "https://" + self.domain + "/v1/prices"
headers = {'Authorization' : 'Bearer ' + self.access_token}
params = {'instruments' : self.instruments, 'accountId' : self.account_id}
time.sleep(0.8) # sleep some seconds
req = requests.Request('GET', url, headers=headers, params=params)
pre = req.prepare()
resp = s.send(pre, stream=True, verify=False)
return resp
except Exception as e:
#global s
s.close()
content0 = "Caught exception when connecting to stream\n" + str(e)
logging.warning(content0)
#warn.tradingWarning(content0)
def stream_to_queue_old(self,collection):
response = self.connect_to_stream()
if response.status_code != 200:
return
try:
for line in response.iter_lines(1):
if line:
try:
msg = json.loads(line)
except Exception as e:
content0 = "Caught exception when converting message into json\n" + str(e)
logging.warning(content0)
return
if msg.has_key("instrument") or msg.has_key("tick"):
logging.info(msg)
instrument = msg["tick"]["instrument"]
time0 = msg["tick"]["time"]
bid = msg["tick"]["bid"]
ask = msg["tick"]["ask"]
tev = TickEvent2(instrument, time0, bid, ask)
self.events_queue.put(tev,False)
post= getDoc(msg)
collection.insert_one(post)
except Exception as e:
logging.warning('Caught ChunkedEncodingError in stream_to_queue_old()!'+str(time.ctime()))
return
#--------------
#------
# new strategy
class LiqMAStrategy(object):
"""
"""
def __init__(
self, access_token, account_id, pairs, units, events, stopLoss1, takeProfit1,stopLoss2, takeProfit2,
short_window1, long_window1,short_window2, long_window2, idxU, lam, thres1, thres2,thres3, thres4, adf_thres
):
self.access_token = access_token
self.account_id = account_id
self.pairs = pairs
self.units = units
self.stopLoss1 = stopLoss1
self.takeProfit1 = takeProfit1
self.stopLoss2 = stopLoss2
self.takeProfit2 = takeProfit2
self.pairs_dict = self.create_pairs_dict()
self.events = events
self.short_window1 = short_window1
self.long_window1 = long_window1
self.short_window2 = short_window2
self.long_window2 = long_window2
self.idxU = idxU
self.lam = lam
self.priceLis1 = pd.Series() #for trends
self.priceLis2 = pd.Series() #for reversion
self.thres1 = thres1
self.thres2 = thres2
self.thres3 = thres3
self.thres4 = thres4
self.adf_thres = adf_thres
#---intermediates---#
self.SL_TP = {"trends":[self.stopLoss1,self.takeProfit1],
"reversion":[self.stopLoss2,self.takeProfit2]}
self.s_l_window = {"trends":[self.short_window1,self.long_window1],
"reversion":[self.short_window2,self.long_window2]}
self.thres_tre_rev = {"trends":[self.thres1, self.thres2],
"reversion":[self.thres3,self.thres4]}
def create_pairs_dict(self):
attr_dict = {
"ticks": 0,
"tick0": 0,
"priceLS":0.0,
"invested": False,
"short_sma": None,
"long_sma": None,
"longShort": None,
"short_slope":None,
"long_slope":None, # False denotes sell, while True denotes buy
"check": False,
"orlis":[0,0,0,0],
"stra": 0,
"fixed": False
}
#pairs_dict = {}
pairs_dict = copy.deepcopy(attr_dict)
return pairs_dict
def check_order(self,check):
if check== True:
oanda0 = oandapy.API(environment="practice", access_token=self.access_token)
try:
responseTrades = oanda0.get_trades(self.account_id,instrument=self.pairs)
except Exception as e:
logging.warning('Caught exception in get_trades() of check_order()!\n'+str(time.ctime()))
return
if responseTrades.get("trades")==[]:
pd = self.pairs_dict
pd["orlis"].pop(0)
logging.info(" orlis: "+str(pd["orlis"]))
pd["orlis"].append(0)
logging.info(" orlis: "+str(pd["orlis"]))
if pd["orlis"][0:4]==[1,1,0,0]:
logging.warning( "Stop Loss Order Executed!")
#warn.tradingWarning(" Stop Loss Order Executed!")
pd["invested"]= False
pd["fixed"] = False #position closed, the stra type is free
pd["check"] = False
else:
pass
else:
pd = self.pairs_dict
#pd["orlis"][0] = copy.copy(pd["orlis"][1])
pd["orlis"].pop(0)
pd["orlis"].append(1)
logging.info("not empty- orlis: "+str(pd["orlis"]))
pd["invested"]= True
pd["fixed"] = True #position closed, the stra type is free
pd["check"] = True
else:
pass
def compute_slope(self,price_lis,window_length,k):
'''[summary]
compute the slope ratio for a short time series
Arguments:
price_lis {np.ndarray} -- the filtered time series to compute the slope ratio
for both SMA and LMA
default: newPriceLis
window_length {[type]} -- a parameter for the SMA
k: an parameter for performing average, default->0.5
default: self.short_window2
Returns:
[float] -- [the slope ratio]
'''
amp = lambda lis: (lis-lis[0])*10000.0
pShort = amp(price_lis[-window_length:])
pLong = amp(price_lis)
#compute the slope ratio
aveSlope = k*getSlope(pShort)+ (1-k)*getSlope(pLong)
return aveSlope
def set_invested_check_fixed(self,pair_dict,invested_bool,check_bool,fixed_bool):
pair_dict["invested"] = invested_bool
pair_dict["check"] = check_bool
pair_dict["fixed"] = fixed_bool
time.sleep(0.0)
def get_sl_tp(self,TreRev):
return self.SL_TP[TreRev]
def insert_metric(self,collection,pair_dict):
'''
default collection: index_USD_CNH
'''
short_window,long_window = self.s_l_window[pair_dict["stra"]]
post_metric = get_indicator(self.pairs,short_window,long_window,
None,None,pair_dict["short_slope"],pair_dict["long_slope"])
collection.insert_one(post_metric)
#----------------#
def buy_send_order(self,pd,side,price0,price1,TreRev):
logging.info("price02={0}".format(price0))
self.set_invested_check_fixed(pd,True,True,True)
fixSL, fixeTP = self.get_sl_tp(TreRev)
sl_b, tp_b= round(price0 - fixSL,5),round(price1 + fixeTP,5)
order = OrderEvent(self.pairs, self.units, "market", side, sl_b, tp_b,"Trends")
self.events.put(order)
pd["longShort"] = True
pd["tick0"]= pd["ticks"]
pd["priceLS"]= price0
def sell_send_order(self,pd,side,price0,price1,TreRev):
logging.info("price01={0}".format(price1))
self.set_invested_check_fixed(pd,True,True,True)
fixSL, fixeTP = self.get_sl_tp(TreRev)
sl_s,tp_s = round(price1 + fixSL,5),round(price0 - fixeTP,5)
order = OrderEvent(self.pairs, self.units, "market", side, sl_s, tp_s,"Trends")
self.events.put(order)
pd["longShort"] = False
pd["tick0"]= pd["ticks"]
pd["priceLS"]= price1
def logging_invested(self,priceLis,pd,sign):
TreRev = pd["stra"]
logging.info(TreRev+" position!")
#??? TODO 23:38 Oct 5, 2017
short_window = self.s_l_window[TreRev][0]
newPriceLis = get_new_price_lis(priceLis, pd, short_window)
basePrice=pd["priceLS"]+sign*self.lam*np.std(priceLis)*np.sqrt(pd["ticks"]-pd["tick0"])
logging.info( "basePrice="+str(basePrice))
logging.info( "short_sma"+str(pd["short_sma"]))
logging.info( "long_sma"+str(pd["long_sma"]))
aveSlope = self.compute_slope(newPriceLis,short_window, 0.5)
logging.info( "aveSlope="+str(aveSlope))
return aveSlope
def put_close_order(self,pairs,num):
'''
pairs,num = self.pairs,0
'''
order_closed = CloseEvent(pairs,num)
self.events.put(order_closed)
#--------------------------------------#
def open_trends_buy(self,pd,aveSlope):
thres = self.thres_tre_rev[pd["stra"]][0]
return (pd["short_sma"] > pd["long_sma"] and aveSlope > thres)
def open_trends_sell(self,pd,aveSlope):
thres = self.thres_tre_rev[pd["stra"]][0]
return (pd["short_sma"] < pd["long_sma"] and aveSlope < -thres)
def open_reversion_buy(self,pd,aveSlope):
thres = self.thres_tre_rev[pd["stra"]][0]
return (pd["short_sma"] < pd["long_sma"] and aveSlope< -thres)
def open_reversion_sell(self,pd,aveSlope):
thres = self.thres_tre_rev[pd["stra"]][0]
return (pd["short_sma"] > pd["long_sma"] and aveSlope> thres)
#-----------------------------------------------#
def close_trends_buy(self,pd,aveSlope):
thres = self.thres_tre_rev[pd["stra"]][1]
return (pd["longShort"] and aveSlope < thres)
def close_trends_sell(self,pd,aveSlope):
thres = self.thres_tre_rev[pd["stra"]][1]
return (not pd["longShort"] and aveSlope > -thres)
def close_reversion_buy(self,pd,aveSlope):
thres = self.thres_tre_rev[pd["stra"]][1]
return (pd["short_sma"] > pd["long_sma"]*(1+thres/100.0) and pd["longShort"])
def close_reversion_sell(self,pd,aveSlope):
thres = self.thres_tre_rev[pd["stra"]][1]
return (pd["short_sma"] < pd["long_sma"]*(1-thres/100.0) and not pd["longShort"])
#--------------------------------------#
def calculate_signals(self, event):
#if True:
global liqIndex
global newPriceLis
if event.type == 'TICK':
price = (event.bid+event.ask)/2.000
self.priceLis1 = np.append(self.priceLis1,price)
self.priceLis2 = np.append(self.priceLis2,price)
if len(self.priceLis1)>max([self.long_window1,self.long_window2]):
self.priceLis1=self.priceLis1[-self.long_window1:]
self.priceLis2=self.priceLis2[-self.long_window2:]
else:
pass
#liqIndex= event.liq
logging.info("liqIndex= "+str(liqIndex)+"\n")
logging.info("price= "+str(price))
pd = self.pairs_dict
logging.info("check"+str(pd["check"]))
self.check_order(pd["check"]) #check whether the SLTP order is triggered..
# Only start the strategy when we have created an accurate short window
logging.info("INVESTED= "+str(pd["invested"]))
if not pd["invested"]:
#global price0
if pd["ticks"]>max([self.long_window1, self.long_window2])+1 and liqIndex > self.idxU:
if not pd["fixed"]:
critAdf = getADF(collection).priceADF(200,1)
if critAdf > self.adf_thres:
pd["stra"] = "reversion"
newPriceLis = get_new_price_lis(self.priceLis2, pd, self.short_window2)
aveSlope = self.compute_slope(newPriceLis,self.short_window2, 0.5)
logging.info( "REVERSION+aveSlope="+str(aveSlope))
self.insert_metric(index_collect,pd)
else:
pd["stra"] = "trends"
newPriceLis = get_new_price_lis(self.priceLis1, pd, self.short_window1)
aveSlope = self.compute_slope(newPriceLis,self.short_window1, 0.5)
logging.info("TRENDS+aveSlope="+str(aveSlope))
self.insert_metric(index_collect,pd)
else:
raise ValueError("pd[fixed] should be False!")
price0, price1 = event.bid, event.ask
if pd["stra"] =="trends":
if self.open_trends_buy(pd,aveSlope):
side = "buy"
self.buy_send_order(pd,side,price0,price1,pd["stra"])
elif self.open_trends_sell(pd,aveSlope):
side = "sell"
self.sell_send_order(pd,side,price0,price1,pd["stra"])
else:
pd["fixed"] = False
elif pd["stra"] =="reversion":
if self.open_reversion_sell(pd,aveSlope):
side = "sell"
self.sell_send_order(pd,side,price0,price1,pd["stra"])
elif self.open_reversion_buy(pd,aveSlope):
side = "buy"
self.buy_send_order(pd,side,price0,price1,pd["stra"])
else:
pd["fixed"] = False
else:
pass
else:
pass
elif pd["invested"]:
sign= 1 if pd["longShort"] == True else -1
if pd["stra"] =="trends":
aveSlope = self.logging_invested(self.priceLis1,pd,sign)
self.insert_metric(index_collect,pd)
if self.close_trends_sell(pd,aveSlope):
#side = "sell"
self.set_invested_check_fixed(pd,False,False,False)
#warn.tradingWarning(" check->False Executed!")
self.put_close_order(self.pairs,0)
elif self.close_trends_buy(pd,aveSlope):
#side = "buy"
self.set_invested_check_fixed(pd,False,False,False)
#warn.tradingWarning(" check->False Executed!")
self.put_close_order(self.pairs,0)
else: #not closing positions, just keep the pd["fixed"] as True.
pd["fixed"] = True #should we add pd["invested"]
elif pd["stra"] =="reversion":
aveSlope=self.logging_invested(self.priceLis2,pd,sign)
self.insert_metric(index_collect,pd)
if self.close_reversion_sell(pd,aveSlope):
#side = "sell"
self.set_invested_check_fixed(pd,False,False,False)
#warn.tradingWarning(" check->False Executed!")
self.put_close_order(self.pairs,0)
elif self.close_reversion_buy(pd,aveSlope):
#side = "buy"
self.set_invested_check_fixed(pd,False,False,False)
#warn.tradingWarning(" check->False Executed!")
self.put_close_order(self.pairs,0)
else:
pd["fixed"] = True #should we add pd["invested"]
else:
pass
pd["ticks"] += 1
logging.info("current Tick "+str(pd["ticks"])+"\n"+str(time.ctime()))
#--------------------------------------------------------------------#
class Execution(object):
def __init__(self, domain, access_token, account_id):
self.domain = domain
self.access_token = access_token
self.account_id = account_id
self.conn = self.obtain_connection()
def obtain_connection(self):
return httplib.HTTPSConnection(self.domain)
def execute_order(self, event):
oanda0 = oandapy.API(environment="practice", access_token=self.access_token)
try:
responseX = oanda0.create_order(self.account_id,
instrument=event.instrument,
units= event.units,
side= event.side,
type= event.order_type,
stopLoss = event.stopLoss,
takeProfit = event.takeProfit
)
except Exception as e:
content0 = "Caught OnadaError when sending the orders\n" + str(e)
logging.warning(content0)
return
logging.info( "Execute Order ! \n {0}".format(responseX))
content0 = str(event.stra)+"Execute Order ! "+" "+str(event.side)+" "+ str(event.units)+" units of "+str(event.instrument)
#warn.tradingWarning(content0)
logging.info(content0)
def close_order(self, event):
oanda0 = oandapy.API(environment="practice", access_token=self.access_token)
response1= oanda0.get_trades(self.account_id,instrument=event.instrument)
order_lis= response1["trades"]
if order_lis !=[]:
for order in order_lis: #close all trades
responseX = oanda0.close_trade(self.account_id,trade_id= order['id'])
logging.info( "Close Order ! \n {0}".format(responseX))
content0 = "Close Order !" + "profit: "+str(responseX['profit'])+" CLOSE "+str(responseX['instrument'])
content0 = content0 + " "+str(responseX['side'])+" at "+ str(responseX['price'])
#warn.tradingWarning(content0)
else:
logging.warning("No trade to be closed! :{0}".format(time.ctime()))
#--------------------------------------------------------------------#
def trade(events, strategy,execution,heartbeat):
"""
"""
global liqIndex
while True:
try:
event = events.get(False)
except Queue.Empty:
pass
else:
if event is not None:
if event.type =='LIQ':
liqIndex= event.liq
#print "current index ="+str(liqIndex)
elif event.type == 'TICK':
strategy.calculate_signals(event)
logging.info( "Tick!")
elif event.type == 'ORDER':
logging.info( "Executing order!")
execution.execute_order(event)
elif event.type == "CLOSE":
logging.info( "Close trading!")
execution.close_order(event)
time.sleep(heartbeat)
#--------------------------------------------------------------------#
if __name__ == "__main__":
logPath,logName = log_dict[pairs]["path"],log_dict[pairs]["name"]
logging.basicConfig(filename= os.path.join(logPath,logName),
format='%(levelname)s:%(message)s',level=logging.DEBUG)
global liqIndex
liqIndex=0
ct = 20
gran ='M15'
time_dict = {
"S5": 5,
"S10": 10,
"S15": 15,
"S30": 30,
"M1": 60,
"M2": 120 }
dd = 11
lam= 0.1 #0.5 basePrice tuning
units = 100 #100
#----------Parameters----------------
short_window1= MA_dict['short_window1']
long_window1 = MA_dict['long_window1']
short_window2= MA_dict['short_window2']
long_window2 = MA_dict['long_window2']
idxu = threshold_dict['idxu']
thres1= threshold_dict['thres1']
thres2= threshold_dict['thres2']
thres3 = threshold_dict['thres3']
thres4= threshold_dict['thres4']
adf_thres = threshold_dict['adf_thres']
sl1 = sltp_dict['sl1'] #10
tp1 = sltp_dict['tp1'] #10
sl2 = sltp_dict['sl2'] #10
tp2 = sltp_dict['tp2'] #10
#--------------------------------------
heartbeat= 0.2
period= 600
print 'initial'
print('MA:\n sw1 {0} lw1 {1} sw2 {2} lw2 {3}'.format(short_window1, long_window1, short_window2, long_window2))
print('parameters:\n thres1 {0} thres2 {1} thres3 {2} thres4 {3}'.format(thres1,thres2,thres3,thres4))
print('sltp_parameters:\n {0} {1} {2} {3}'.format(sl1,tp1,sl2,tp2))
events = Queue.Queue()
# initial the threads
prices = StreamingForexPrices(STREAM_DOMAIN, ACCESS_TOKEN, ACCOUNT_ID, pairs, ct, gran, dd, events)
liquidity = LiqForex(API_DOMAIN, ACCESS_TOKEN, ACCOUNT_ID, pairs, ct, gran, dd, events)
execution = Execution(API_DOMAIN, ACCESS_TOKEN, ACCOUNT_ID)
#strategy = MovingAverageCrossStrategy(pairs, units, events, sl, tp, short_window,long_window)
strategy = LiqMAStrategy(ACCESS_TOKEN, ACCOUNT_ID, pairs, units, events, sl1, tp1, sl2, tp2, short_window1,long_window1,
short_window2,long_window2,idxu,lam,thres1,thres2,thres3,thres4,adf_thres)
# construct the thread
price_thread = threading.Thread(target=prices.stream_to_queue_old, args=[collection])
liq_thread = threading.Thread(target= liquidity.activeLiq, args=[period])
trade_thread = threading.Thread(target=trade, args=(events, strategy,execution,heartbeat))
print "Full?:",events.full()
trade_thread.start()
price_thread.start()
liq_thread.start()
|
gpl-3.0
| -6,703,210,643,960,976,000 | 40.723502 | 130 | 0.533024 | false | 3.672526 | false | false | false |
catapult-project/catapult
|
dashboard/dashboard/pinpoint/models/cas.py
|
3
|
2757
|
# Copyright 2021 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Model for storing information to look up CAS from RBE.
A CASReference instance contains metadata that allows us to use RBE-CAS
digests when starting Swarming tasks.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
from google.appengine.ext import ndb
CAS_EXPIRY_DURATION = datetime.timedelta(days=88)
def Get(builder_name, change, target):
"""Retrieve an cas reference from the Datastore.
Args:
builder_name: The name of the builder that produced the cas reference.
change: The Change the cas reference was built at.
target: The compile target the cas reference is for.
Returns:
A tuple containing the cas_instance and cas_digest as strings.
"""
entity = ndb.Key(CASReference, _Key(builder_name, change, target)).get()
if not entity:
raise KeyError('No cas reference with builder %s, '
'change %s, and target %s.' %
(builder_name, change, target))
if entity.created + CAS_EXPIRY_DURATION < datetime.datetime.utcnow():
raise KeyError('Cas reference with builder %s, '
'change %s, and target %s was '
'found, but is expired.' % (builder_name, change, target))
return entity.cas_instance, entity.cas_digest
def Put(cas_references):
"""Add CASReference to the Datastore.
This function takes multiple entries to do a batched Datstore put.
Args:
cas_references: An iterable of tuples. Each tuple is of the form
(builder_name, change, target, cas_instance, cas_digest).
"""
ndb.put_multi(
CASReference(
cas_instance=cas_instance,
cas_digest=cas_digest,
id=_Key(builder_name, change, target),
) for builder_name, change, target, cas_instance, cas_digest
in cas_references)
class CASReference(ndb.Model):
cas_instance = ndb.StringProperty(indexed=False, required=True)
cas_digest = ndb.StringProperty(indexed=False, required=True)
created = ndb.DateTimeProperty(auto_now_add=True)
# We can afford to look directly in Datastore here since we don't expect to
# make multiple calls to this at a high rate to benefit from being in
# memcache. This lets us clear out the cache in Datastore and not have to
# clear out memcache as well.
_use_memcache = False
_use_datastore = True
_use_cache = False
def _Key(builder_name, change, target):
# The key must be stable across machines, platforms,
# Python versions, and Python invocations.
return '\n'.join((builder_name, change.id_string, target))
|
bsd-3-clause
| 5,540,963,464,776,257,000 | 33.037037 | 77 | 0.700762 | false | 3.921764 | false | false | false |
Boris-Barboris/rsoi
|
lab03/local_library/app_local_library/models.py
|
1
|
2941
|
from django.db import models
import json
import logging
from .clients import *
log = logging.getLogger('app_logging')
# Create your models here.
class Book(models.Model):
isbn = models.CharField(max_length=20, blank=False)
BORROWED = 'brwed'
FREE = 'free'
state_choices = (
(BORROWED, 'borrowed'),
(FREE, 'free'),
)
state = models.CharField(max_length=20, choices=state_choices, default=FREE)
borrow_id = models.IntegerField(null=True, default=None)
def to_dict(self):
return {
'id': self.id,
'isbn': self.isbn,
'state': self.state,
'borrow_id': self.borrow_id,
}
def to_json(self):
return json.dumps(self.to_dict())
# exceptions
class BookAlreadyExists(Exception):
def __init__(self, id):
Exception.__init__(self, 'Book id={} already exists'.format(id))
self.id = id
class PrintDoesNotExist(Exception):
def __init__(self, isbn):
Exception.__init__(self, 'Print isbn={} does not exists'.format(isbn))
self.isbn = isbn
class AlreadyBorrowed(Exception):
def __init__(self, id):
Exception.__init__(self, 'Book id={} is already borrowed'.format(id))
self.id = id
class AlreadyFree(Exception):
def __init__(self, id):
Exception.__init__(self, 'Book id={} is already free'.format(id))
self.id = id
# model operations
def get_status(isbn):
free_books = Book.objects.filter(isbn=isbn).filter(state=Book.FREE)
log.debug('free_books = ' + str(free_books))
log.debug('free_books len = ' + str(len(free_books)))
if len(free_books) > 0:
return True
else:
return False
def create_book(id, isbn, me):
try:
book = Book.objects.get(id=id)
raise BookAlreadyExists(id)
except Book.DoesNotExist:
pass
# validate isbn
token = me['token']
br = book_registry_client(token)
p = br.list_prints(isbn=isbn)
if p['total'] == 0:
raise PrintDoesNotExist(isbn)
book = Book(id=id, isbn=isbn)
book.save()
return book
def delete_book(id):
book = Book.objects.get(id=id)
book.delete()
def get_book(id):
book = Book.objects.get(id=id)
return book
def get_books_isbn(isbn):
books = Book.objects.filter(isbn=isbn)
return books
def get_books():
return Book.objects.all()
def borrow_book(id, borrow_id):
book = Book.objects.get(id=id)
if book.state == Book.FREE:
book.borrow_id = borrow_id
book.state = Book.BORROWED
book.save()
else:
raise AlreadyBorrowed(id)
def return_book(id):
book = Book.objects.get(id=id)
if book.state == Book.BORROWED:
book.borrow_id = None
book.state = Book.FREE
book.save()
else:
raise AlreadyFree(id)
|
mit
| 1,966,678,895,610,190,300 | 25.035398 | 80 | 0.584495 | false | 3.368843 | false | false | false |
EderSantana/seya
|
seya/parzen.py
|
1
|
2543
|
"""
This file was copyed from pyleran2.distributions.parzen.py
Their license is BSD clause-3: https://github.com/lisa-lab/pylearn2/
"""
import numpy
import theano
T = theano.tensor
def log_mean_exp(a):
"""
We need the log-likelihood, this calculates the logarithm
of a Parzen window
"""
max_ = a.max(1)
return max_ + T.log(T.exp(a - max_.dimshuffle(0, 'x')).mean(1))
def make_lpdf(mu, sigma):
"""
Makes a Theano function that allows the evalution of a Parzen windows
estimator (aka kernel density estimator) where the Kernel is a normal
distribution with stddev sigma and with points at mu.
Parameters
-----------
mu : numpy matrix
Contains the data points over which this distribution is based.
sigma : scalar
The standard deviation of the normal distribution around each data \
point.
Returns
-------
lpdf : callable
Estimator of the log of the probability density under a point.
"""
x = T.matrix()
mu = theano.shared(mu)
a = (x.dimshuffle(0, 'x', 1) - mu.dimshuffle('x', 0, 1)) / sigma
E = log_mean_exp(-0.5*(a**2).sum(2))
Z = mu.shape[1] * T.log(sigma * numpy.sqrt(numpy.pi * 2))
return theano.function([x], E - Z)
class ParzenWindows(object):
"""
Parzen Window estimation and log-likelihood calculator.
This is usually used to test generative models as follows:
1 - Get 10k samples from the generative model
2 - Contruct a ParzenWindows object with the samples from 1
3 - Test the log-likelihood on the test set
Parameters
----------
samples : numpy matrix
See description for make_lpdf
sigma : scalar
See description for make_lpdf
"""
def __init__(self, samples, sigma):
# just keeping these for debugging/examination, not needed
self._samples = samples
self._sigma = sigma
self.lpdf = make_lpdf(samples, sigma)
def get_ll(self, x, batch_size=10):
"""
Evaluates the log likelihood of a set of datapoints with respect to the
probability distribution.
Parameters
----------
x : numpy matrix
The set of points for which you want to evaluate the log \
likelihood.
"""
inds = range(x.shape[0])
n_batches = int(numpy.ceil(float(len(inds)) / batch_size))
lls = []
for i in range(n_batches):
lls.extend(self.lpdf(x[inds[i::n_batches]]))
return numpy.array(lls).mean()
|
bsd-3-clause
| -7,313,181,990,569,429,000 | 27.573034 | 79 | 0.616201 | false | 3.818318 | false | false | false |
cyphactor/lifecyclemanager
|
testenv/trac-0.10.4/trac/web/modpython_frontend.py
|
1
|
2761
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2005 Edgewall Software
# Copyright (C) 2004-2005 Christopher Lenz <[email protected]>
# Copyright (C) 2005 Matthew Good <[email protected]>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Christopher Lenz <[email protected]>
# Matthew Good <[email protected]>
from mod_python import apache
from trac.web.main import dispatch_request
from trac.web.wsgi import WSGIGateway, _ErrorsWrapper
class InputWrapper(object):
def __init__(self, req):
self.req = req
def close(self):
pass
def read(self, size=-1):
return self.req.read(size)
def readline(self, size=-1):
return self.req.readline(size)
def readlines(self, hint=-1):
return self.req.readlines(hint)
class ModPythonGateway(WSGIGateway):
wsgi_multithread = apache.mpm_query(apache.AP_MPMQ_IS_THREADED) > 0
wsgi_multiprocess = apache.mpm_query(apache.AP_MPMQ_IS_FORKED) > 0
def __init__(self, req, options):
environ = {}
environ.update(apache.build_cgi_env(req))
environ['mod_python.options'] = options
environ['mod_python.subprocess_env'] = req.subprocess_env
WSGIGateway.__init__(self, environ, InputWrapper(req),
_ErrorsWrapper(lambda x: req.log_error(x)))
self.req = req
def _send_headers(self):
assert self.headers_set, 'Response not started'
if not self.headers_sent:
status, headers = self.headers_sent = self.headers_set
self.req.status = int(status[:3])
for name, value in headers:
if name.lower() == 'content-length':
self.req.set_content_length(int(value))
elif name.lower() == 'content-type':
self.req.content_type = value
else:
self.req.headers_out.add(name, value)
def _sendfile(self, fileobj):
self._send_headers()
self.req.sendfile(fileobj.name)
def _write(self, data):
self._send_headers()
try:
self.req.write(data)
except IOError, e:
if 'client closed connection' not in str(e):
raise
def handler(req):
options = req.get_options()
gateway = ModPythonGateway(req, options)
gateway.run(dispatch_request)
return apache.OK
|
gpl-3.0
| -3,080,754,147,963,335,000 | 30.375 | 72 | 0.62912 | false | 3.62336 | false | false | false |
mhbashari/machine-learning-snippets
|
Basic/01-linear_regression_tensorflow.py
|
1
|
2015
|
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from numpy.core.multiarray import ndarray
__author__ = "mhbashari"
class LinearRegression:
def __init__(self, train_X: ndarray, train_Y: ndarray, learning_rate=0.001, training_epochs=100):
self.train_X = train_X
self.train_Y = train_Y
self.learning_rate = learning_rate
self.training_epochs = training_epochs
def fit(self):
x = tf.placeholder("float")
y = tf.placeholder("float")
a = tf.Variable(1.0, name="weight")
b = tf.Variable(1.0, name="bias")
pred = tf.multiply(x, a) + b
cost = tf.reduce_mean(tf.abs(pred - y))
optimizer = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(cost)
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for epoch in range(self.training_epochs):
for i, out in zip(self.train_X, self.train_Y):
sess.run(optimizer, feed_dict={x: i, y: out})
print("Epoch:", '%04d' % (epoch + 1), "cost=", "W=", sess.run(a), "b=", sess.run(b))
print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={x: self.train_X, y: self.train_Y})
print("Training cost=", training_cost, "a=", sess.run(a), "b=", sess.run(b), '\n')
return sess.run(a), sess.run(b)
def visualize(a, b, train_X: ndarray, train_Y: ndarray):
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, train_Y)
plt.plot(train_X, a * train_X + b, label='Fitted line')
plt.scatter(train_X, train_Y)
plt.legend()
plt.show()
def data_maker(num=80):
X = np.arange(0, num, dtype=np.float32)
Y = np.float32(np.ceil(5 * (np.sin(X) + X / 5)))
return X, Y
if __name__ == "__main__":
data = data_maker(5)
regression = LinearRegression(*data_maker())
visualize(*(regression.fit() + data_maker()))
|
mit
| -6,478,785,350,685,866,000 | 32.583333 | 104 | 0.585112 | false | 3.23435 | false | false | false |
QKaiser/pynessus
|
pynessus/nessus.py
|
1
|
26073
|
"""
Copyright 2014 Quentin Kaiser
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from httplib import HTTPSConnection, CannotSendRequest, ImproperConnectionState
import os
import json
import socket
import ssl
import errno
from xml.dom.minidom import parseString
from models.scan import Scan
from models.policy import Policy
from models.plugin import Plugin, PluginFamily, PluginRule
from models.user import User
from models.folder import Folder
from models.template import Template
from models.host import Host
from models.scanner import Scanner
from models.agent import Agent
from models.agentgroup import AgentGroup
from models.mail import Mail
from models.permission import Permission
from models.proxy import Proxy
from models.group import Group
from models.vulnerability import Vulnerability
class NessusAPIError(Exception):
pass
class Nessus(object):
"""
A Nessus Server instance.
"""
def __init__(self, url="", port=8834, verify=True):
"""
Constructor.
Params:
url(string): nessus server's url
port(int): nessus server's port
verify(bool): verify server's SSL cert if True
Returns:
"""
self._url = url
self._port = port
self._verify = verify
self._uuid = 0
self._connection = None
self._product = None
self._engine = None
self._web_ui = None
self._misc_settings = []
self._loaded_plugin_set = None
self._scanner_boottime = 0
self._idle_timeout = 0
self._plugin_set = None
self._plugins_lastupdated = 0
self._plugins_expiration = 0
self._web_server_version = None
self._expiration = None
self._nessus_ui_version = None
self._ec2 = None
self._nessus_type = None
self._capabilities = None
self._plugin_set = None
self._idle_timeout = None
self._scanner_boottime = None
self._server_version = None
self._feed = None
self._mail = None
self._proxy = None
# managing multiple user sessions
self._user = None
self._agents = []
self._agentgroups = []
self._schedules = []
self._policies = []
self._templates = []
self._scans = []
self._tags = []
self._folders = []
self._users = []
self._notifications = []
self._scanners = []
self._permissions = []
self._groups = []
self._plugin_families =[]
self._plugin_rules = []
self._plugins = []
self._headers = {
"Content-type": "application/json",
"Accept": "application/json"
}
def Agent(self):
return Agent(self)
def AgentGroup(self):
return AgentGroup(self)
def Scan(self):
return Scan(self)
def Host(self):
return Host(self)
def Policy(self):
return Policy(self)
def Plugin(self):
return Plugin(self)
def PluginFamily(self):
return PluginFamily(self)
def PluginRule(self):
return PluginRule(self)
def Schedule(self):
return Schedule(self)
def Scanner(self):
return Scanner(self)
def User(self, username=None, password=None):
return User(self, username, password)
def Folder(self):
return Folder(self)
def Template(self):
return Template(self)
def Mail(self):
return Mail(self)
def Permission(self):
return Permission(self)
def Proxy(self):
return Proxy(self)
def Group(self):
return Group(self)
def Vulnerability(self):
return Vulnerability(self)
def _request(self, method, target, params, headers=None):
"""
Send an HTTP request.
Params:
method(string): HTTP method (i.e. GET, POST, PUT, DELETE, HEAD)
target(string): target path (i.e. /schedule/new)
params(string): HTTP parameters
headers(array): HTTP headers
Returns:
Response body if successful, None otherwise.
"""
try:
if self._connection is None:
if not self._verify:
ctx = ssl._create_unverified_context()
self._connection = HTTPSConnection(self._url, self._port, context=ctx)
else:
self._connection = HTTPSConnection(self._url, self._port)
self._connection.request(method, target, params, self._headers if headers is None else headers)
except CannotSendRequest:
self._connection = HTTPSConnection(self._url, self._port)
self.login(self._user)
self._request(method, target, params, self._headers)
except ImproperConnectionState:
self._connection = HTTPSConnection(self._url, self._port)
self.login(self._user)
self._request(method, target, params, self._headers)
except socket.error as serr:
if serr.errno != errno.ECONNREFUSED:
# Not the error we are looking for, re-raise
raise serr
else:
raise Exception("Can't connect to Nessus at https://%s:%s" % (self._url, self._port))
response = self._connection.getresponse()
if response.status == 200:
return response.read()
else:
raise Exception(response.read())
def _api_request(self, method, target, params=None):
"""
Send a request to the Nessus REST API.
Params:
method(string): HTTP method (i.e. GET, PUT, POST, DELETE, HEAD)
target(string): target path (i.e. /schedule/new)
params(dict): HTTP parameters
Returns:
dict: parsed dict from json answer, None if no content.
"""
if not params:
params = {}
raw_response = self._request(method, target, json.dumps(params))
if raw_response is not None and len(raw_response):
response = json.loads(raw_response)
if response is not None and "error" in response:
raise NessusAPIError(response["error"])
return response
return None
@staticmethod
def _encode(filename):
"""
Encode filename content into a multipart/form-data data string.
Params:
filename(string): filename of the file that will be encoded.
Returns:
string: multipart/form-data data string
"""
boundary = '----------bundary------'
crlf = '\r\n'
body = []
with open(filename, "rb") as f:
body.extend(
[
'--' + boundary,
'Content-Disposition: form-data; name="Filedata"; filename="%s"' % (os.path.basename(filename)),
'Content-Type: text/xml',
'',
f.read(),
]
)
body.extend(['--' + boundary + '--', ''])
return 'multipart/form-data; boundary=%s' % boundary, crlf.join(body)
def login(self, user):
"""
Log into Nessus server with provided user profile.
Args:
user (User): user instance
Returns:
bool: True if successful login, False otherwise.
Raises:
"""
if self.server_version[0] != "6":
raise Exception("This version of Nessus is not supported by pynessus. \nIf you absolutely need to use "
"pynessus with Nessus 5.x, please follow the instructions"
"available on the git repository (https://github.com/qkaiser/pynessus)")
params = {'username': user.username, 'password': user.password}
response = self._api_request("POST", "/session", params)
if response is not None:
if "status" in response:
raise Exception(response["status"])
self._user = user
self._user.token = response['token']
# Persist token value for subsequent requests
self._headers["X-Cookie"] = 'token=%s' % (response['token'])
return True
else:
return False
def logout(self):
"""
Log out of the Nessus server, invalidating the current token value.
Returns:
bool: True if successful login, False otherwise.
"""
self._request("DELETE", "/session", [])
return True
@property
def status(self):
"""
Return the Nessus server status.
Params:
Returns
"""
response = self._api_request("GET", "/server/status", "")
if response is not None:
return response["status"]
else:
return "unknown"
def load(self):
"""
Load Nessus.
Returns:
bool: True if successful login, False otherwise.
"""
success = True
success &= self.load_properties()
success &= self.load_mail()
success &= self.load_proxy()
success &= self.load_scanners()
success &= self.load_agents()
success &= self.load_agentgroups()
success &= self.load_policies()
success &= self.load_scans()
success &= self.load_folders()
success &= self.load_templates()
success &= self.load_users()
#success &= self.load_groups()
#success &= self.load_plugin_families()
#success &= self.load_plugin_rules()
return success
def load_plugin_families(self):
"""
:return:
"""
response = self._api_request("GET", "/plugins/families", "")
if response is not None and "families" in response:
for family in response["families"]:
p = self.PluginFamily()
p.id = family["id"]
p.name = family["name"]
p.plugin_count = family["count"]
p.load_plugins()
self._plugin_families.append(p)
return True
def load_plugin_rules(self):
"""
:return:
"""
response = self._api_request("GET", "/plugin-rules", "")
if "plugin_rules" in response and response["plugin_rules"] is not None:
for p in response["plugin_rules"]:
plugin_rule = self.PluginRule()
plugin_rule.id = p["id"]
plugin_rule.plugin_id = p["plugin_id"]
plugin_rule.date = p["date"]
plugin_rule.host = p["host"]
plugin_rule.type = p["type"]
plugin_rule.owner = p["owner"]
plugin_rule.owner_id = p["owner_id"]
self._plugin_rules.append(plugin_rule)
return True
def load_groups(self):
"""
:return:
"""
response = self._api_request("GET", "/groups")
if "groups" in response and response["groups"] is not None:
for g in response["groups"]:
group = self.Group()
group.id = g["id"]
group.name = g["name"]
group.user_count = g["user_count"]
group.permissions = g["permissions"]
self._groups.append(group)
return True
def load_agents(self):
"""
:return:
"""
for scanner in self._scanners:
response = self._api_request("GET", "/scanners/%d/agents" % scanner.id)
if "agents" in response and response["agents"] is not None:
for a in response["agents"]:
agent = self.Agent()
agent.distros = a["distros"]
agent.id = a["id"]
agent.ip = a["ip"]
agent.last_scanned = a["last_scanned"]
agent.name = a["name"]
agent.platform = a["platform"]
agent.token = a["token"]
agent.uuid = a["uuid"]
agent.scanner_id = scanner.id
self._agents.append(agent)
return True
def load_agentgroups(self):
"""
:return:
"""
for scanner in self._scanners:
response = self._api_request("GET", "/scanners/%d/agent-groups" % scanner.id)
if "groups" in response and response["groups"] is not None:
for g in response["groups"]:
group = self.AgentGroup()
group.id = g["id"]
group.name = g["name"]
group.owner_id = g["owner_id"]
group.owner = g["owner"]
group.shared = g["shared"]
group.user_permissions = g["user_permissions"]
group.creation_date = g["creation_date"]
group.last_modification_date = g["last_modification_date"]
self._agentgroups.append(group)
return True
def load_properties(self):
"""
Load Nessus server properties.
Returns:
bool: True if successful login, False otherwise.
"""
response = self._api_request("GET", "/server/properties?json=1", {})
if response is not None:
self._loaded_plugin_set = response["loaded_plugin_set"]
self._uuid = response["server_uuid"]
self._expiration = response["expiration"]
self._nessus_ui_version = response["nessus_ui_version"]
self._nessus_type = response["nessus_type"]
self._notifications = []
for notification in response["notifications"]:
self._notifications.append(notification)
self._capabilities = response["capabilities"]
self._plugin_set = response["plugin_set"]
self._idle_timeout = response["idle_timeout"]
self._scanner_boottime = response["scanner_boottime"]
self._server_version = response["server_version"]
return True
else:
return False
def load_mail(self):
self._mail = self.Mail()
return self._mail.load()
def load_proxy(self):
self._proxy = self.Proxy()
return self._proxy.load()
def load_templates(self):
"""
Load Nessus server's scan templates.
Params:
Returns:
bool: True if successful login, False otherwise.
"""
response = self._api_request("GET", "/editor/scan/templates", "")
self._templates = []
if "templates" in response:
for t in response["templates"]:
template = self.Template()
template.uuid = t["uuid"]
template.title = t["title"]
template.name = t["name"]
template.description = t["desc"]
template.more_info = t["more_info"] if "more_info" in t else None
template.cloud_only = t["cloud_only"]
template.subscription_only = t["subscription_only"]
self._templates.append(template)
return True
def load_scanners(self):
"""
:return:
"""
response = self._api_request("GET", "/scanners")
if "scanners" in response:
for s in response["scanners"]:
scanner = self.Scanner()
scanner.id = s["id"]
scanner.uuid = s["uuid"]
scanner.name = s["name"]
scanner.type = s["type"]
scanner.status = s["status"]
scanner.scan_count = s["scan_count"]
scanner.engine_version = s["engine_version"]
scanner.platform = s["platform"]
scanner.loaded_plugin_set = s["loaded_plugin_set"]
scanner.registration_code = s["registration_code"]
scanner.owner = s["owner"]
self._scanners.append(scanner)
return True
def load_scans(self, tag_id=None):
"""
Load Nessus server's scans. Load scans from a specific tag if tag_id is provided.
Params:
tag_id(int, optional): Tag's identification number.
Returns:
bool: True if successful login, False otherwise.
"""
response = self._api_request("GET", "/scans", "")
self._scans = []
if "scans" in response and response["scans"] is not None:
for s in response["scans"]:
scan = self.Scan()
scan.status = s["status"]
scan.name = s["name"]
scan.read = s["read"]
scan.last_modification_date = s["last_modification_date"]
scan.creation_date = s["creation_date"]
scan.user_permissions = s["user_permissions"]
scan.shared = s["shared"]
scan.id = s["id"]
scan.template = self.Template()
scan.template.uuid = s["uuid"]
scan.folder = self.Folder()
scan.folder.id = s["folder_id"]
for user in self.users:
if user.id == s["owner_id"]:
scan.owner = user
self._scans.append(scan)
return True
def load_folders(self):
"""
Params:
Returns:
"""
response = self._api_request("GET", "/folders")
if "folders" in response:
self._folders = []
for result in response["folders"]:
f = self.Folder()
f.id = result["id"]
f.type = result["type"] if "type" in result else "local"
f.custom = result["custom"]
f.default_tag = result["default_tag"]
f.name = result["name"]
f.unread_count = result["unread_count"] if "unread_count" in result else 0
self._folders.append(f)
return True
else:
return False
def load_policies(self):
"""
Load Nessus server's policies.
Params:
Returns:
bool: True if successful login, False otherwise.
"""
response = self._api_request("GET", "/policies")
if "policies" in response and response["policies"] is not None:
self._policies = []
for result in response['policies']:
policy = self.Policy()
policy.id = result["id"]
policy.template_uuid = result["template_uuid"]
policy.name = result["name"]
policy.owner = result["owner"]
policy.creation_date = result["creation_date"]
policy.no_target = result["no_target"] if "no_target" in result else False
policy.visibility = result["visibility"]
policy.shared = result["shared"]
policy.user_permissions = result["user_permissions"]
policy.last_modification_date = result["last_modification_date"]
policy.creation_date = result["creation_date"]
self._policies.append(policy)
return True
def load_users(self):
"""
Load Nessus server's users.
Params:
Returns:
bool: True if successful login, False otherwise.
"""
response = self._api_request("GET", "/users")
if "users" in response:
users = []
for result in response["users"]:
user = self.User()
user.last_login = result["lastlogin"]
user.permissions = result["permissions"]
user.type = result["type"]
user.name = result["name"]
user.username = result["username"]
user.id = result["id"]
users.append(user)
self._users = users
return True
else:
return False
def upload_file(self, filename):
"""
Upload the file identified by filename to the server.
Params:
filename(string): file path
Returns:
bool: True if successful, False otherwise.
"""
if not os.path.isfile(filename):
raise Exception("This file does not exist.")
else:
content_type, body = self._encode(filename)
headers = self._headers
headers["Content-type"] = content_type
response = json.loads(self._request("POST", "/file/upload", body, self._headers))
if "fileuploaded" in response:
return response["fileuploaded"]
else:
return False
def import_policy(self, filename):
"""
Import an existing policy uploaded using Nessus.file (.nessus format only).
Params:
Returns:
"""
uploaded_file = self.upload_file(filename)
if uploaded_file:
response = self._api_request(
"POST",
"/policies/import",
{"file": uploaded_file}
)
return True if response is None else False
else:
raise Exception("An error occured while uploading %s." % filename)
def import_scan(self, filename, folder_id=None, password=None):
"""
Import an existing policy uploaded using Nessus.file (.nessus format only).
Params:
filename(str):
folder_id(int):
password(str):
Returns:
"""
uploaded_file = self.upload_file(filename)
if uploaded_file:
params = {"file": uploaded_file}
if folder_id is not None:
params["folder_id"] = folder_id
if password is not None:
params["password"] = password
response = self._api_request(
"POST",
"/scans/import",
params
)
return True if response is None else False
@property
def server_version(self):
if self._server_version is None:
if "404 File not found" not in self._request("GET", "/nessus6.html", ""):
self._server_version = "6.x"
elif self._request("GET", "/html5.html", "") is not None:
self._server_version = "5.x"
else:
self._server_version = "unknown"
return self._server_version
@property
def agents(self):
if self._agents is None:
self.load_agents()
return self._agents
@property
def agentgroups(self):
if self._agentgroups is None:
self.load_agentgrous()
return self._agentgroups
@property
def scanners(self):
if not len(self._scanners):
self.load_scanners()
return self._scanners
@property
def scans(self):
if self._scans is None:
self.load_scans()
return self._scans
@property
def policies(self):
if self._policies is None:
self.load_policies()
return self._policies
@property
def users(self):
if self._users is None:
self.load_users()
return self._users
@property
def tags(self):
if self._tags is None:
self.load_tags()
return self._tags
@property
def templates(self):
if not len(self._templates):
self.load_templates()
return self._templates
@property
def mail(self):
return self._mail
@property
def proxy(self):
return self._proxy
@property
def folders(self):
if not len(self._folders):
self.load_folders()
return self._folders
@property
def groups(self):
return self._groups
@property
def user(self):
return self._user
@property
def plugin_families(self):
return self._plugin_families
@property
def plugin_rules(self):
return self._plugin_rules
@policies.setter
def policies(self, value):
self._policies = value
@scans.setter
def scans(self, value):
self._scans = value
@tags.setter
def tags(self, value):
self._tags = value
@users.setter
def users(self, value):
self._users = value
@templates.setter
def templates(self, value):
self._templates = value
@scanners.setter
def scanners(self, value):
self._scanners = value
@agents.setter
def agents(self, value):
self._agents = value
@agentgroups.setter
def agentgroups(self, value):
self._agentgroups = value
@mail.setter
def mail(self, value):
if isinstance(value, Mail):
self._mail = value
else:
raise Exception("Not a Mail instance")
@proxy.setter
def proxy(self, value):
if isinstance(value, Proxy):
self._proxy = value
else:
raise Exception("Not a Proxy instance")
@folders.setter
def folders(self, value):
self._folders = value
@groups.setter
def groups(self, value):
self._groups = value
@user.setter
def user(self, value):
if isinstance(value, User):
self._user = value
else:
raise Exception("Not a User instance")
|
apache-2.0
| -6,025,734,581,301,239,000 | 31.149199 | 116 | 0.536225 | false | 4.517152 | false | false | false |
sergeneren/anima
|
anima/env/mayaEnv/relax_vertices.py
|
1
|
1428
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, Anima Istanbul
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
"""Relax Vertices by Erkan Ozgur Yilmaz
Relaxes vertices without shrinking/expanding the geometry.
Version History
---------------
v0.1.1
- script works with all kind of components
v0.1.0
- initial working version
"""
import pymel.core as pm
__version__ = "0.1.1"
def relax():
# check the selection
selection = pm.ls(sl=1)
if not selection:
return
# convert the selection to vertices
verts = pm.ls(pm.polyListComponentConversion(tv=1))
if not verts:
return
shape = verts[0].node()
# duplicate the geometry
dup = shape.duplicate()[0]
dup_shape = dup.getShape()
# now relax the selected vertices of the original shape
pm.polyAverageVertex(verts, i=1, ch=0)
# now transfer point positions using transferAttributes
ta_node = pm.transferAttributes(
dup,
verts,
transferPositions=True,
transferNormals=False,
transferUVs=False,
transferColors=False,
sampleSpace=0,
searchMethod=0,
flipUVs=False,
colorBorders=1,
)
# delete history
pm.delete(shape, ch=1)
# delete the duplicate surface
pm.delete(dup)
# reselect selection
pm.select(selection)
|
bsd-2-clause
| 2,163,826,030,786,291,200 | 20.636364 | 68 | 0.64916 | false | 3.578947 | false | false | false |
corbt/pypeline
|
setup.py
|
1
|
1413
|
import os
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
with open(os.path.join(os.path.dirname(__file__), 'pypeline', '_version.py')) as f:
exec(f.read(), globals(), locals())
long_description = ''
if os.path.exists('README.txt'):
long_description = open('README.txt').read()
setup(
name = "pypeline-db",
version = __version__,
author = "Kyle Corbitt",
author_email = "[email protected]",
description = "A database for processing and storing datasets based on LevelDB",
license = "MIT",
keywords = "levelDB big data data science",
url = "https://github.com/kcorbitt/pypeline",
packages=['pypeline'],
long_description=long_description,
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Intended Audience :: Science/Research",
"Operating System :: POSIX",
"Topic :: Utilities",
"Topic :: Database",
"Topic :: Scientific/Engineering",
],
install_requires=['plyvel']
)
|
mit
| -2,353,462,996,388,564,500 | 32.642857 | 84 | 0.635527 | false | 3.748011 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.