repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
cjlee112/spnet | spnet/rest.py | 1 | 6279 | import cherrypy
import glob
import os.path
from base import IdString
import view
def request_tuple():
mimeType = 'html'
try:
accept = cherrypy.request.headers['Accept']
if 'application/json' in accept or accept == '*/*':
mimeType = 'json'
except KeyError:
pass
return cherrypy.request.method, mimeType
class Response(object):
'_GET etc. methods can return this to pass back HTML output'
def __init__(self, content):
self.content = content
def __call__(self):
return self.content
class Redirect(Response):
'_GET etc. methods can return this to force redirection to a URL'
def __call__(self):
return view.redirect(self.content)
class Collection(object):
'''subclass this by adding the following kinds of methods:
1. HTTP verbs, e.g. GET, POST, DELETE, as follows
_POST(self, docID, **kwargs): create the specified document.
_search(self, **kwargs): search the collection based on kwargs.
2. representation generators for a specific verb and mimeType, e.g.
get_html(self, doc, **kwargs): for a GET request,
return HTML representation of the doc object.
This will typically be a renderer of a Jinja2 template.
'''
def __init__(self, name, klass, templateEnv=None, templateDir='_templates',
docArgs=None, collectionArgs=None, **templateArgs):
self.name = name
self.klass = klass
if docArgs is None:
docArgs = {}
self.docArgs = docArgs
self.collectionArgs = collectionArgs
if templateEnv: # load our template files
self.bind_templates(templateEnv, templateDir, **templateArgs)
def default(self, docID=None, *args, **kwargs):
'process all requests for this collection'
try:
method, mimeType = request_tuple()
if docID: # a specific document from this collection
docID = IdString(docID) # implements proper cmp() vs. ObjectId
invalidResponse = self.check_permission(method, docID, *args,
**kwargs)
if invalidResponse:
return invalidResponse
if not args: # perform the request
return self._request(method, mimeType, docID, **kwargs)
else: # pass request on to subcollection
try:
subcoll = getattr(self, args[0])
except AttributeError:
return view.report_error('no such subcollection: %s.%s'
% (self.name, args[0]), 404)
try:
parents = kwargs['parents'].copy()
except KeyError:
parents = {}
try:
parents[self.name] = self._GET(docID, parents=parents)
except KeyError:
return view.report_error('invalid ID: %s' % docID, 404,
"""Sorry, the data ID %s that
you requested does not exist in the database.
Please check whether you have the correct ID.""" % docID)
kwargs['parents'] = parents # pass dict of parents
return subcoll.default(*args[1:], **kwargs)
elif method == 'GET': # search the collection
return self._request('search', mimeType, **kwargs)
else:
return view.report_error('REST does not permit collection-%s'
% method, 405)
except Exception:
return view.report_error('REST collection error', 500)
default.exposed = True
def _request(self, method, mimeType, *args, **kwargs):
'dispatch to proper handler method, or return appropriate error'
try: # do we support this method?
action = getattr(self, '_' + method)
except AttributeError:
return view.report_error('%s objects do not allow %s'
% (self.name, method), 405)
try: # execute the request
o = action(*args, **kwargs)
except KeyError:
return view.report_error('Not found: %s: args=%s, kwargs=%s'
% (self.name, str(args), str(kwargs)), status=404,
webMsg="""Sorry, the data that
you requested does not exist in the database.
Please check whether you have the correct ID or spelling.""")
if isinstance(o, Response):
return o() # send the redirect
try: # do we support this mimeType?
viewFunc = getattr(self, method.lower() + '_' + mimeType)
except AttributeError:
return view.report_error('%s objects cannot return %s'
% (self.name, mimeType), 406)
try:
return viewFunc(o, **kwargs)
except Exception:
return view.report_error('view function error', 500)
def _GET(self, docID, parents={}, **kwargs):
'default GET method'
kwargs.update(self.docArgs)
if not parents: # works with documents with unique ID
return self.klass(docID, **kwargs)
elif len(parents) == 1: # works with ArrayDocument
return self.klass((parents.values()[0]._id, docID), **kwargs)
else: # multiple parents
return self.klass(docID, parents=parents, **kwargs)
def bind_templates(self, env, dirpath, **kwargs):
'''load template files of the form get_paper.html, bind as
attrs of the form get_html'''
for fname in glob.glob(os.path.join(dirpath,
'*_%s.html' % self.name)):
basename = os.path.basename(fname)
template = env.get_template(basename)
methodName = basename.split('_')[0] + '_html'
v = view.TemplateView(template, self.name, **kwargs)
setattr(self, methodName, v)
def check_permission(self, method, docID, *args, **kwargs):
'this authentication stub allows all requests'
return False
| gpl-2.0 | -6,523,738,228,163,122,000 | 42.909091 | 79 | 0.555821 | false | 4.637371 | false | false | false |
Sylrob434/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/googleplus.py | 9 | 3200 | # coding: utf-8
from __future__ import unicode_literals
import datetime
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
)
class GooglePlusIE(InfoExtractor):
IE_DESC = 'Google Plus'
_VALID_URL = r'https://plus\.google\.com/(?:[^/]+/)*?posts/(?P<id>\w+)'
IE_NAME = 'plus.google'
_TEST = {
'url': 'https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH',
'info_dict': {
'id': 'ZButuJc6CtH',
'ext': 'flv',
'upload_date': '20120613',
'uploader': '井上ヨシマサ',
'title': '嘆きの天使 降臨',
}
}
def _real_extract(self, url):
# Extract id from URL
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
# Step 1, Retrieve post webpage to extract further information
webpage = self._download_webpage(url, video_id, 'Downloading entry webpage')
self.report_extraction(video_id)
# Extract update date
upload_date = self._html_search_regex(
r'''(?x)<a.+?class="o-U-s\s[^"]+"\s+style="display:\s*none"\s*>
([0-9]{4}-[0-9]{2}-[0-9]{2})</a>''',
webpage, 'upload date', fatal=False, flags=re.VERBOSE)
if upload_date:
# Convert timestring to a format suitable for filename
upload_date = datetime.datetime.strptime(upload_date, "%Y-%m-%d")
upload_date = upload_date.strftime('%Y%m%d')
# Extract uploader
uploader = self._html_search_regex(r'rel\="author".*?>(.*?)</a>',
webpage, 'uploader', fatal=False)
# Extract title
# Get the first line for title
video_title = self._og_search_description(webpage).splitlines()[0]
# Step 2, Simulate clicking the image box to launch video
DOMAIN = 'https://plus.google.com/'
video_page = self._search_regex(r'<a href="((?:%s)?photos/.*?)"' % re.escape(DOMAIN),
webpage, 'video page URL')
if not video_page.startswith(DOMAIN):
video_page = DOMAIN + video_page
webpage = self._download_webpage(video_page, video_id, 'Downloading video page')
# Extract video links all sizes
pattern = r'\d+,\d+,(\d+),"(http\://redirector\.googlevideo\.com.*?)"'
mobj = re.findall(pattern, webpage)
if len(mobj) == 0:
raise ExtractorError('Unable to extract video links')
# Sort in resolution
links = sorted(mobj)
# Choose the lowest of the sort, i.e. highest resolution
video_url = links[-1]
# Only get the url. The resolution part in the tuple has no use anymore
video_url = video_url[-1]
# Treat escaped \u0026 style hex
try:
video_url = video_url.decode("unicode_escape")
except AttributeError: # Python 3
video_url = bytes(video_url, 'ascii').decode('unicode-escape')
return {
'id': video_id,
'url': video_url,
'uploader': uploader,
'upload_date': upload_date,
'title': video_title,
'ext': 'flv',
}
| gpl-3.0 | 6,297,759,568,644,632,000 | 33.5 | 93 | 0.559861 | false | 3.627429 | false | false | false |
mainakibui/kobocat | onadata/apps/main/migrations/0012_auto__add_unique_metadata_xform_data_type_data_value.py | 3 | 10615 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'MetaData', fields ['xform', 'data_type', 'data_value']
M = orm['main.metadata']
for x in M.objects.all():
dupes = M.objects.filter(xform_id=x.xform_id, data_type=x.data_type, data_value=x.data_value)
if dupes.count() > 1:
for dupe in dupes[1:]:
print 'Deleting duplicate MetaData', dupe.xform_id, dupe.data_type, dupe.data_value
dupe.delete()
partial_dupes = M.objects.filter(xform_id=x.xform_id, data_type=x.data_type)
if partial_dupes.count() > 1:
print 'Partially duplicate MetaData{}'.format('\n\t'.join(map(str, partial_dupes.values_list('xform_id', 'data_type', 'data_value'))))
db.create_unique(u'main_metadata', ['xform_id', 'data_type', 'data_value'])
def backwards(self, orm):
# Removing unique constraint on 'MetaData', fields ['xform', 'data_type', 'data_value']
db.delete_unique(u'main_metadata', ['xform_id', 'data_type', 'data_value'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'logger.xform': {
'Meta': {'ordering': "('id_string',)", 'unique_together': "(('user', 'id_string'), ('user', 'sms_id_string'))", 'object_name': 'XForm'},
'allows_sms': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bamboo_dataset': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '60'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "u''", 'null': 'True'}),
'downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'encrypted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_start_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_string': ('django.db.models.fields.SlugField', [], {'max_length': '100'}),
'instances_with_geopoints': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'json': ('django.db.models.fields.TextField', [], {'default': "u''"}),
'last_submission_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'num_of_submissions': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'require_auth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shared': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'shared_data': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sms_id_string': ('django.db.models.fields.SlugField', [], {'default': "''", 'max_length': '100'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xforms'", 'null': 'True', 'to': u"orm['auth.User']"}),
'uuid': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '32'}),
'xls': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'main.metadata': {
'Meta': {'unique_together': "(('xform', 'data_type', 'data_value'),)", 'object_name': 'MetaData'},
'data_file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'data_file_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'data_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'data_value': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['logger.XForm']"})
},
'main.tokenstoragemodel': {
'Meta': {'object_name': 'TokenStorageModel'},
'id': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'google_id'", 'primary_key': 'True', 'to': u"orm['auth.User']"}),
'token': ('django.db.models.fields.TextField', [], {})
},
'main.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'address': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'home_page': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'num_of_submissions': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'organization': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'phonenumber': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'require_auth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'twitter': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['main']
| bsd-2-clause | -6,083,967,920,070,348,000 | 75.366906 | 187 | 0.550824 | false | 3.583727 | false | false | false |
indeedops/dd-agent | checks.d/elastic.py | 2 | 31510 | # (C) Datadog, Inc. 2010-2016
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
# stdlib
from collections import defaultdict, namedtuple
import time
import urlparse
# 3p
import requests
# project
from checks import AgentCheck
from config import _is_affirmative
from util import headers
class NodeNotFound(Exception):
pass
ESInstanceConfig = namedtuple(
'ESInstanceConfig', [
'pshard_stats',
'cluster_stats',
'password',
'service_check_tags',
'tags',
'timeout',
'url',
'username',
'pending_task_stats',
'ssl_verify',
'ssl_cert',
'ssl_key',
])
class ESCheck(AgentCheck):
SERVICE_CHECK_CONNECT_NAME = 'elasticsearch.can_connect'
SERVICE_CHECK_CLUSTER_STATUS = 'elasticsearch.cluster_health'
DEFAULT_TIMEOUT = 5
# Clusterwise metrics, pre aggregated on ES, compatible with all ES versions
PRIMARY_SHARD_METRICS = {
"elasticsearch.primaries.docs.count": ("gauge", "_all.primaries.docs.count"),
"elasticsearch.primaries.docs.deleted": ("gauge", "_all.primaries.docs.deleted"),
"elasticsearch.primaries.store.size": ("gauge", "_all.primaries.store.size_in_bytes"),
"elasticsearch.primaries.indexing.index.total": ("gauge", "_all.primaries.indexing.index_total"),
"elasticsearch.primaries.indexing.index.time": ("gauge", "_all.primaries.indexing.index_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.indexing.index.current": ("gauge", "_all.primaries.indexing.index_current"),
"elasticsearch.primaries.indexing.delete.total": ("gauge", "_all.primaries.indexing.delete_total"),
"elasticsearch.primaries.indexing.delete.time": ("gauge", "_all.primaries.indexing.delete_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.indexing.delete.current": ("gauge", "_all.primaries.indexing.delete_current"),
"elasticsearch.primaries.get.total": ("gauge", "_all.primaries.get.total"),
"elasticsearch.primaries.get.time": ("gauge", "_all.primaries.get.time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.get.current": ("gauge", "_all.primaries.get.current"),
"elasticsearch.primaries.get.exists.total": ("gauge", "_all.primaries.get.exists_total"),
"elasticsearch.primaries.get.exists.time": ("gauge", "_all.primaries.get.exists_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.get.missing.total": ("gauge", "_all.primaries.get.missing_total"),
"elasticsearch.primaries.get.missing.time": ("gauge", "_all.primaries.get.missing_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.search.query.total": ("gauge", "_all.primaries.search.query_total"),
"elasticsearch.primaries.search.query.time": ("gauge", "_all.primaries.search.query_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.search.query.current": ("gauge", "_all.primaries.search.query_current"),
"elasticsearch.primaries.search.fetch.total": ("gauge", "_all.primaries.search.fetch_total"),
"elasticsearch.primaries.search.fetch.time": ("gauge", "_all.primaries.search.fetch_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.search.fetch.current": ("gauge", "_all.primaries.search.fetch_current")
}
PRIMARY_SHARD_METRICS_POST_1_0 = {
"elasticsearch.primaries.merges.current": ("gauge", "_all.primaries.merges.current"),
"elasticsearch.primaries.merges.current.docs": ("gauge", "_all.primaries.merges.current_docs"),
"elasticsearch.primaries.merges.current.size": ("gauge", "_all.primaries.merges.current_size_in_bytes"),
"elasticsearch.primaries.merges.total": ("gauge", "_all.primaries.merges.total"),
"elasticsearch.primaries.merges.total.time": ("gauge", "_all.primaries.merges.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.merges.total.docs": ("gauge", "_all.primaries.merges.total_docs"),
"elasticsearch.primaries.merges.total.size": ("gauge", "_all.primaries.merges.total_size_in_bytes"),
"elasticsearch.primaries.refresh.total": ("gauge", "_all.primaries.refresh.total"),
"elasticsearch.primaries.refresh.total.time": ("gauge", "_all.primaries.refresh.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.primaries.flush.total": ("gauge", "_all.primaries.flush.total"),
"elasticsearch.primaries.flush.total.time": ("gauge", "_all.primaries.flush.total_time_in_millis", lambda v: float(v)/1000)
}
STATS_METRICS = { # Metrics that are common to all Elasticsearch versions
"elasticsearch.docs.count": ("gauge", "indices.docs.count"),
"elasticsearch.docs.deleted": ("gauge", "indices.docs.deleted"),
"elasticsearch.store.size": ("gauge", "indices.store.size_in_bytes"),
"elasticsearch.indexing.index.total": ("gauge", "indices.indexing.index_total"),
"elasticsearch.indexing.index.time": ("gauge", "indices.indexing.index_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.indexing.index.current": ("gauge", "indices.indexing.index_current"),
"elasticsearch.indexing.delete.total": ("gauge", "indices.indexing.delete_total"),
"elasticsearch.indexing.delete.time": ("gauge", "indices.indexing.delete_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.indexing.delete.current": ("gauge", "indices.indexing.delete_current"),
"elasticsearch.get.total": ("gauge", "indices.get.total"),
"elasticsearch.get.time": ("gauge", "indices.get.time_in_millis", lambda v: float(v)/1000),
"elasticsearch.get.current": ("gauge", "indices.get.current"),
"elasticsearch.get.exists.total": ("gauge", "indices.get.exists_total"),
"elasticsearch.get.exists.time": ("gauge", "indices.get.exists_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.get.missing.total": ("gauge", "indices.get.missing_total"),
"elasticsearch.get.missing.time": ("gauge", "indices.get.missing_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.search.query.total": ("gauge", "indices.search.query_total"),
"elasticsearch.search.query.time": ("gauge", "indices.search.query_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.search.query.current": ("gauge", "indices.search.query_current"),
"elasticsearch.search.fetch.total": ("gauge", "indices.search.fetch_total"),
"elasticsearch.search.fetch.time": ("gauge", "indices.search.fetch_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.search.fetch.current": ("gauge", "indices.search.fetch_current"),
"elasticsearch.indices.segments.count": ("gauge", "indices.segments.count"),
"elasticsearch.indices.segments.memory_in_bytes": ("gauge", "indices.segments.memory_in_bytes"),
"elasticsearch.merges.current": ("gauge", "indices.merges.current"),
"elasticsearch.merges.current.docs": ("gauge", "indices.merges.current_docs"),
"elasticsearch.merges.current.size": ("gauge", "indices.merges.current_size_in_bytes"),
"elasticsearch.merges.total": ("gauge", "indices.merges.total"),
"elasticsearch.merges.total.time": ("gauge", "indices.merges.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.merges.total.docs": ("gauge", "indices.merges.total_docs"),
"elasticsearch.merges.total.size": ("gauge", "indices.merges.total_size_in_bytes"),
"elasticsearch.refresh.total": ("gauge", "indices.refresh.total"),
"elasticsearch.refresh.total.time": ("gauge", "indices.refresh.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.flush.total": ("gauge", "indices.flush.total"),
"elasticsearch.flush.total.time": ("gauge", "indices.flush.total_time_in_millis", lambda v: float(v)/1000),
"elasticsearch.process.open_fd": ("gauge", "process.open_file_descriptors"),
"elasticsearch.transport.rx_count": ("gauge", "transport.rx_count"),
"elasticsearch.transport.tx_count": ("gauge", "transport.tx_count"),
"elasticsearch.transport.rx_size": ("gauge", "transport.rx_size_in_bytes"),
"elasticsearch.transport.tx_size": ("gauge", "transport.tx_size_in_bytes"),
"elasticsearch.transport.server_open": ("gauge", "transport.server_open"),
"elasticsearch.thread_pool.bulk.active": ("gauge", "thread_pool.bulk.active"),
"elasticsearch.thread_pool.bulk.threads": ("gauge", "thread_pool.bulk.threads"),
"elasticsearch.thread_pool.bulk.queue": ("gauge", "thread_pool.bulk.queue"),
"elasticsearch.thread_pool.bulk.rejected": ("gauge", "thread_pool.bulk.rejected"),
"elasticsearch.thread_pool.flush.active": ("gauge", "thread_pool.flush.active"),
"elasticsearch.thread_pool.flush.threads": ("gauge", "thread_pool.flush.threads"),
"elasticsearch.thread_pool.flush.queue": ("gauge", "thread_pool.flush.queue"),
"elasticsearch.thread_pool.generic.active": ("gauge", "thread_pool.generic.active"),
"elasticsearch.thread_pool.generic.threads": ("gauge", "thread_pool.generic.threads"),
"elasticsearch.thread_pool.generic.queue": ("gauge", "thread_pool.generic.queue"),
"elasticsearch.thread_pool.get.active": ("gauge", "thread_pool.get.active"),
"elasticsearch.thread_pool.get.threads": ("gauge", "thread_pool.get.threads"),
"elasticsearch.thread_pool.get.queue": ("gauge", "thread_pool.get.queue"),
"elasticsearch.thread_pool.index.active": ("gauge", "thread_pool.index.active"),
"elasticsearch.thread_pool.index.threads": ("gauge", "thread_pool.index.threads"),
"elasticsearch.thread_pool.index.queue": ("gauge", "thread_pool.index.queue"),
"elasticsearch.thread_pool.management.active": ("gauge", "thread_pool.management.active"),
"elasticsearch.thread_pool.management.threads": ("gauge", "thread_pool.management.threads"),
"elasticsearch.thread_pool.management.queue": ("gauge", "thread_pool.management.queue"),
"elasticsearch.thread_pool.percolate.active": ("gauge", "thread_pool.percolate.active"),
"elasticsearch.thread_pool.percolate.threads": ("gauge", "thread_pool.percolate.threads"),
"elasticsearch.thread_pool.percolate.queue": ("gauge", "thread_pool.percolate.queue"),
"elasticsearch.thread_pool.refresh.active": ("gauge", "thread_pool.refresh.active"),
"elasticsearch.thread_pool.refresh.threads": ("gauge", "thread_pool.refresh.threads"),
"elasticsearch.thread_pool.refresh.queue": ("gauge", "thread_pool.refresh.queue"),
"elasticsearch.thread_pool.search.active": ("gauge", "thread_pool.search.active"),
"elasticsearch.thread_pool.search.threads": ("gauge", "thread_pool.search.threads"),
"elasticsearch.thread_pool.search.queue": ("gauge", "thread_pool.search.queue"),
"elasticsearch.thread_pool.snapshot.active": ("gauge", "thread_pool.snapshot.active"),
"elasticsearch.thread_pool.snapshot.threads": ("gauge", "thread_pool.snapshot.threads"),
"elasticsearch.thread_pool.snapshot.queue": ("gauge", "thread_pool.snapshot.queue"),
"elasticsearch.http.current_open": ("gauge", "http.current_open"),
"elasticsearch.http.total_opened": ("gauge", "http.total_opened"),
"jvm.mem.heap_committed": ("gauge", "jvm.mem.heap_committed_in_bytes"),
"jvm.mem.heap_used": ("gauge", "jvm.mem.heap_used_in_bytes"),
"jvm.mem.heap_in_use": ("gauge", "jvm.mem.heap_used_percent"),
"jvm.mem.heap_max": ("gauge", "jvm.mem.heap_max_in_bytes"),
"jvm.mem.non_heap_committed": ("gauge", "jvm.mem.non_heap_committed_in_bytes"),
"jvm.mem.non_heap_used": ("gauge", "jvm.mem.non_heap_used_in_bytes"),
"jvm.threads.count": ("gauge", "jvm.threads.count"),
"jvm.threads.peak_count": ("gauge", "jvm.threads.peak_count"),
"elasticsearch.fs.total.total_in_bytes": ("gauge", "fs.total.total_in_bytes"),
"elasticsearch.fs.total.free_in_bytes": ("gauge", "fs.total.free_in_bytes"),
"elasticsearch.fs.total.available_in_bytes": ("gauge", "fs.total.available_in_bytes"),
}
JVM_METRICS_POST_0_90_10 = {
"jvm.gc.collectors.young.count": ("gauge", "jvm.gc.collectors.young.collection_count"),
"jvm.gc.collectors.young.collection_time": ("gauge", "jvm.gc.collectors.young.collection_time_in_millis", lambda v: float(v)/1000),
"jvm.gc.collectors.old.count": ("gauge", "jvm.gc.collectors.old.collection_count"),
"jvm.gc.collectors.old.collection_time": ("gauge", "jvm.gc.collectors.old.collection_time_in_millis", lambda v: float(v)/1000)
}
JVM_METRICS_PRE_0_90_10 = {
"jvm.gc.concurrent_mark_sweep.count": ("gauge", "jvm.gc.collectors.ConcurrentMarkSweep.collection_count"),
"jvm.gc.concurrent_mark_sweep.collection_time": ("gauge", "jvm.gc.collectors.ConcurrentMarkSweep.collection_time_in_millis", lambda v: float(v)/1000),
"jvm.gc.par_new.count": ("gauge", "jvm.gc.collectors.ParNew.collection_count"),
"jvm.gc.par_new.collection_time": ("gauge", "jvm.gc.collectors.ParNew.collection_time_in_millis", lambda v: float(v)/1000),
"jvm.gc.collection_count": ("gauge", "jvm.gc.collection_count"),
"jvm.gc.collection_time": ("gauge", "jvm.gc.collection_time_in_millis", lambda v: float(v)/1000),
}
ADDITIONAL_METRICS_POST_0_90_5 = {
"elasticsearch.search.fetch.open_contexts": ("gauge", "indices.search.open_contexts"),
"elasticsearch.fielddata.size": ("gauge", "indices.fielddata.memory_size_in_bytes"),
"elasticsearch.fielddata.evictions": ("gauge", "indices.fielddata.evictions"),
}
ADDITIONAL_METRICS_POST_0_90_5_PRE_2_0 = {
"elasticsearch.cache.filter.evictions": ("gauge", "indices.filter_cache.evictions"),
"elasticsearch.cache.filter.size": ("gauge", "indices.filter_cache.memory_size_in_bytes"),
"elasticsearch.id_cache.size": ("gauge", "indices.id_cache.memory_size_in_bytes"),
}
ADDITIONAL_METRICS_PRE_0_90_5 = {
"elasticsearch.cache.field.evictions": ("gauge", "indices.cache.field_evictions"),
"elasticsearch.cache.field.size": ("gauge", "indices.cache.field_size_in_bytes"),
"elasticsearch.cache.filter.count": ("gauge", "indices.cache.filter_count"),
"elasticsearch.cache.filter.evictions": ("gauge", "indices.cache.filter_evictions"),
"elasticsearch.cache.filter.size": ("gauge", "indices.cache.filter_size_in_bytes"),
}
ADDITIONAL_METRICS_POST_1_0_0 = {
"elasticsearch.indices.translog.size_in_bytes": ("gauge", "indices.translog.size_in_bytes"),
"elasticsearch.indices.translog.operations": ("gauge", "indices.translog.operations"),
}
ADDITIONAL_METRICS_1_x = { # Stats are only valid for v1.x
"elasticsearch.fs.total.disk_reads": ("rate", "fs.total.disk_reads"),
"elasticsearch.fs.total.disk_writes": ("rate", "fs.total.disk_writes"),
"elasticsearch.fs.total.disk_io_op": ("rate", "fs.total.disk_io_op"),
"elasticsearch.fs.total.disk_read_size_in_bytes": ("gauge", "fs.total.disk_read_size_in_bytes"),
"elasticsearch.fs.total.disk_write_size_in_bytes": ("gauge", "fs.total.disk_write_size_in_bytes"),
"elasticsearch.fs.total.disk_io_size_in_bytes": ("gauge", "fs.total.disk_io_size_in_bytes"),
}
ADDITIONAL_METRICS_POST_1_3_0 = {
"elasticsearch.indices.segments.index_writer_memory_in_bytes": ("gauge", "indices.segments.index_writer_memory_in_bytes"),
"elasticsearch.indices.segments.version_map_memory_in_bytes": ("gauge", "indices.segments.version_map_memory_in_bytes"),
}
ADDITIONAL_METRICS_POST_1_4_0 = {
"elasticsearch.indices.segments.index_writer_max_memory_in_bytes": ("gauge", "indices.segments.index_writer_max_memory_in_bytes"),
"elasticsearch.indices.segments.fixed_bit_set_memory_in_bytes": ("gauge", "indices.segments.fixed_bit_set_memory_in_bytes"),
}
ADDITIONAL_METRICS_PRE_2_0 = {
"elasticsearch.thread_pool.merge.active": ("gauge", "thread_pool.merge.active"),
"elasticsearch.thread_pool.merge.threads": ("gauge", "thread_pool.merge.threads"),
"elasticsearch.thread_pool.merge.queue": ("gauge", "thread_pool.merge.queue"),
}
CLUSTER_HEALTH_METRICS = {
"elasticsearch.number_of_nodes": ("gauge", "number_of_nodes"),
"elasticsearch.number_of_data_nodes": ("gauge", "number_of_data_nodes"),
"elasticsearch.active_primary_shards": ("gauge", "active_primary_shards"),
"elasticsearch.active_shards": ("gauge", "active_shards"),
"elasticsearch.relocating_shards": ("gauge", "relocating_shards"),
"elasticsearch.initializing_shards": ("gauge", "initializing_shards"),
"elasticsearch.unassigned_shards": ("gauge", "unassigned_shards"),
"elasticsearch.cluster_status": ("gauge", "status", lambda v: {"red": 0, "yellow": 1, "green": 2}.get(v, -1)),
}
CLUSTER_PENDING_TASKS = {
"elasticsearch.pending_tasks_total": ("gauge", "pending_task_total"),
"elasticsearch.pending_tasks_priority_high": ("gauge", "pending_tasks_priority_high"),
"elasticsearch.pending_tasks_priority_urgent": ("gauge", "pending_tasks_priority_urgent")
}
SOURCE_TYPE_NAME = 'elasticsearch'
def __init__(self, name, init_config, agentConfig, instances=None):
AgentCheck.__init__(self, name, init_config, agentConfig, instances)
# Host status needs to persist across all checks
self.cluster_status = {}
def get_instance_config(self, instance):
url = instance.get('url')
if url is None:
raise Exception("An url must be specified in the instance")
pshard_stats = _is_affirmative(instance.get('pshard_stats', False))
cluster_stats = _is_affirmative(instance.get('cluster_stats', False))
if 'is_external' in instance:
cluster_stats = _is_affirmative(instance.get('is_external', False))
pending_task_stats = _is_affirmative(instance.get('pending_task_stats', True))
# Support URLs that have a path in them from the config, for
# backwards-compatibility.
parsed = urlparse.urlparse(url)
if parsed[2] != "":
url = "%s://%s" % (parsed[0], parsed[1])
port = parsed.port
host = parsed.hostname
custom_tags = instance.get('tags', [])
service_check_tags = [
'host:%s' % host,
'port:%s' % port
]
service_check_tags.extend(custom_tags)
# Tag by URL so we can differentiate the metrics
# from multiple instances
tags = ['url:%s' % url]
tags.extend(custom_tags)
timeout = instance.get('timeout') or self.DEFAULT_TIMEOUT
config = ESInstanceConfig(
pshard_stats=pshard_stats,
cluster_stats=cluster_stats,
password=instance.get('password'),
service_check_tags=service_check_tags,
ssl_cert=instance.get('ssl_cert'),
ssl_key=instance.get('ssl_key'),
ssl_verify=instance.get('ssl_verify'),
tags=tags,
timeout=timeout,
url=url,
username=instance.get('username'),
pending_task_stats=pending_task_stats
)
return config
def check(self, instance):
config = self.get_instance_config(instance)
# Check ES version for this instance and define parameters
# (URLs and metrics) accordingly
version = self._get_es_version(config)
health_url, nodes_url, stats_url, pshard_stats_url, pending_tasks_url, stats_metrics, \
pshard_stats_metrics = self._define_params(version, config.cluster_stats)
# Load clusterwise data
if config.pshard_stats:
pshard_stats_url = urlparse.urljoin(config.url, pshard_stats_url)
pshard_stats_data = self._get_data(pshard_stats_url, config)
self._process_pshard_stats_data(pshard_stats_data, config, pshard_stats_metrics)
# Load stats data.
stats_url = urlparse.urljoin(config.url, stats_url)
stats_data = self._get_data(stats_url, config)
self._process_stats_data(nodes_url, stats_data, stats_metrics, config)
# Load the health data.
health_url = urlparse.urljoin(config.url, health_url)
health_data = self._get_data(health_url, config)
self._process_health_data(health_data, config)
if config.pending_task_stats:
# Load the pending_tasks data.
pending_tasks_url = urlparse.urljoin(config.url, pending_tasks_url)
pending_tasks_data = self._get_data(pending_tasks_url, config)
self._process_pending_tasks_data(pending_tasks_data, config)
# If we're here we did not have any ES conn issues
self.service_check(
self.SERVICE_CHECK_CONNECT_NAME,
AgentCheck.OK,
tags=config.service_check_tags
)
def _get_es_version(self, config):
""" Get the running version of elasticsearch.
"""
try:
data = self._get_data(config.url, config, send_sc=False)
version = map(int, data['version']['number'].split('.')[0:3])
except Exception as e:
self.warning(
"Error while trying to get Elasticsearch version "
"from %s %s"
% (config.url, str(e))
)
version = [1, 0, 0]
self.service_metadata('version', version)
self.log.debug("Elasticsearch version is %s" % version)
return version
def _define_params(self, version, cluster_stats):
""" Define the set of URLs and METRICS to use depending on the
running ES version.
"""
pshard_stats_url = "/_stats"
if version >= [0, 90, 10]:
# ES versions 0.90.10 and above
health_url = "/_cluster/health?pretty=true"
nodes_url = "/_nodes?network=true"
pending_tasks_url = "/_cluster/pending_tasks?pretty=true"
# For "external" clusters, we want to collect from all nodes.
if cluster_stats:
stats_url = "/_nodes/stats?all=true"
else:
stats_url = "/_nodes/_local/stats?all=true"
additional_metrics = self.JVM_METRICS_POST_0_90_10
else:
health_url = "/_cluster/health?pretty=true"
nodes_url = "/_cluster/nodes?network=true"
pending_tasks_url = None
if cluster_stats:
stats_url = "/_cluster/nodes/stats?all=true"
else:
stats_url = "/_cluster/nodes/_local/stats?all=true"
additional_metrics = self.JVM_METRICS_PRE_0_90_10
stats_metrics = dict(self.STATS_METRICS)
stats_metrics.update(additional_metrics)
### Additional Stats metrics ###
if version >= [0, 90, 5]:
# ES versions 0.90.5 and above
additional_metrics = self.ADDITIONAL_METRICS_POST_0_90_5
else:
# ES version 0.90.4 and below
additional_metrics = self.ADDITIONAL_METRICS_PRE_0_90_5
stats_metrics.update(additional_metrics)
if version >= [1, 0, 0]:
stats_metrics.update(self.ADDITIONAL_METRICS_POST_1_0_0)
if version < [2, 0, 0]:
stats_metrics.update(self.ADDITIONAL_METRICS_PRE_2_0)
if version >= [0, 90, 5]:
stats_metrics.update(self.ADDITIONAL_METRICS_POST_0_90_5_PRE_2_0)
if version >= [1, 0, 0]:
stats_metrics.update(self.ADDITIONAL_METRICS_1_x)
if version >= [1, 3, 0]:
stats_metrics.update(self.ADDITIONAL_METRICS_POST_1_3_0)
if version >= [1, 4, 0]:
# ES versions 1.4 and above
stats_metrics.update(self.ADDITIONAL_METRICS_POST_1_4_0)
# Version specific stats metrics about the primary shards
pshard_stats_metrics = dict(self.PRIMARY_SHARD_METRICS)
if version >= [1, 0, 0]:
additional_metrics = self.PRIMARY_SHARD_METRICS_POST_1_0
pshard_stats_metrics.update(additional_metrics)
return health_url, nodes_url, stats_url, pshard_stats_url, pending_tasks_url, \
stats_metrics, pshard_stats_metrics
def _get_data(self, url, config, send_sc=True):
""" Hit a given URL and return the parsed json
"""
# Load basic authentication configuration, if available.
if config.username and config.password:
auth = (config.username, config.password)
else:
auth = None
# Load SSL configuration, if available.
# ssl_verify can be a bool or a string (http://docs.python-requests.org/en/latest/user/advanced/#ssl-cert-verification)
if isinstance(config.ssl_verify, bool) or isinstance(config.ssl_verify, str):
verify = config.ssl_verify
else:
verify = None
if config.ssl_cert and config.ssl_key:
cert = (config.ssl_cert, config.ssl_key)
elif config.ssl_cert:
cert = config.ssl_cert
else:
cert = None
try:
resp = requests.get(
url,
timeout=config.timeout,
headers=headers(self.agentConfig),
auth=auth,
verify=verify,
cert=cert
)
resp.raise_for_status()
except Exception as e:
if send_sc:
self.service_check(
self.SERVICE_CHECK_CONNECT_NAME,
AgentCheck.CRITICAL,
message="Error {0} when hitting {1}".format(e, url),
tags=config.service_check_tags
)
raise
return resp.json()
def _process_pending_tasks_data(self, data, config):
p_tasks = defaultdict(int)
for task in data.get('tasks', []):
p_tasks[task.get('priority')] += 1
node_data = {
'pending_task_total': sum(p_tasks.values()),
'pending_tasks_priority_high': p_tasks['high'],
'pending_tasks_priority_urgent': p_tasks['urgent'],
}
for metric in self.CLUSTER_PENDING_TASKS:
# metric description
desc = self.CLUSTER_PENDING_TASKS[metric]
self._process_metric(node_data, metric, *desc, tags=config.tags)
def _process_stats_data(self, nodes_url, data, stats_metrics, config):
cluster_stats = config.cluster_stats
for node_data in data['nodes'].itervalues():
metric_hostname = None
metrics_tags = list(config.tags)
# Resolve the node's name
node_name = node_data.get('name')
if node_name:
metrics_tags.append(
u"node_name:{}".format(node_name)
)
# Resolve the node's hostname
if cluster_stats:
for k in ['hostname', 'host']:
if k in node_data:
metric_hostname = node_data[k]
break
for metric, desc in stats_metrics.iteritems():
self._process_metric(
node_data, metric, *desc,
tags=metrics_tags, hostname=metric_hostname
)
def _process_pshard_stats_data(self, data, config, pshard_stats_metrics):
for metric, desc in pshard_stats_metrics.iteritems():
self._process_metric(data, metric, *desc, tags=config.tags)
def _process_metric(self, data, metric, xtype, path, xform=None,
tags=None, hostname=None):
"""data: dictionary containing all the stats
metric: datadog metric
path: corresponding path in data, flattened, e.g. thread_pool.bulk.queue
xfom: a lambda to apply to the numerical value
"""
value = data
# Traverse the nested dictionaries
for key in path.split('.'):
if value is not None:
value = value.get(key, None)
else:
break
if value is not None:
if xform:
value = xform(value)
if xtype == "gauge":
self.gauge(metric, value, tags=tags, hostname=hostname)
else:
self.rate(metric, value, tags=tags, hostname=hostname)
else:
self._metric_not_found(metric, path)
def _process_health_data(self, data, config):
if self.cluster_status.get(config.url) is None:
self.cluster_status[config.url] = data['status']
if data['status'] in ["yellow", "red"]:
event = self._create_event(data['status'], tags=config.tags)
self.event(event)
if data['status'] != self.cluster_status.get(config.url):
self.cluster_status[config.url] = data['status']
event = self._create_event(data['status'], tags=config.tags)
self.event(event)
for metric, desc in self.CLUSTER_HEALTH_METRICS.iteritems():
self._process_metric(data, metric, *desc, tags=config.tags)
# Process the service check
cluster_status = data['status']
if cluster_status == 'green':
status = AgentCheck.OK
data['tag'] = "OK"
elif cluster_status == 'yellow':
status = AgentCheck.WARNING
data['tag'] = "WARN"
else:
status = AgentCheck.CRITICAL
data['tag'] = "ALERT"
msg = "{tag} on cluster \"{cluster_name}\" "\
"| active_shards={active_shards} "\
"| initializing_shards={initializing_shards} "\
"| relocating_shards={relocating_shards} "\
"| unassigned_shards={unassigned_shards} "\
"| timed_out={timed_out}" \
.format(**data)
self.service_check(
self.SERVICE_CHECK_CLUSTER_STATUS,
status,
message=msg,
tags=config.service_check_tags
)
def _metric_not_found(self, metric, path):
self.log.debug("Metric not found: %s -> %s", path, metric)
def _create_event(self, status, tags=None):
hostname = self.hostname.decode('utf-8')
if status == "red":
alert_type = "error"
msg_title = "%s is %s" % (hostname, status)
elif status == "yellow":
alert_type = "warning"
msg_title = "%s is %s" % (hostname, status)
else:
# then it should be green
alert_type = "success"
msg_title = "%s recovered as %s" % (hostname, status)
msg = "ElasticSearch: %s just reported as %s" % (hostname, status)
return {
'timestamp': int(time.time()),
'event_type': 'elasticsearch',
'host': hostname,
'msg_text': msg,
'msg_title': msg_title,
'alert_type': alert_type,
'source_type_name': "elasticsearch",
'event_object': hostname,
'tags': tags
}
| bsd-3-clause | 1,589,628,936,024,437,800 | 48.700315 | 158 | 0.619105 | false | 3.701833 | true | false | false |
dgschwend/zynqnet | tools/convert_caffemodel/tools/bindump_to_list.py | 1 | 2171 | #!/usr/bin/env python2.7
#######
## bindump_to_list.py
## (c) 2016 David Gschwend
##
## Usage: python2.7 bindump_to_list.py file.bin
#######
from __future__ import print_function # print without newline
import os
import argparse
import struct
import time
import numpy as np
import math
# Parse arguments
parser = argparse.ArgumentParser(description='Print a binary float32 memory dump in human-readable format')
### Positional arguments
parser.add_argument('bin_file', help='Binary dump of float32 memory region')
### Optional arguments
parser.add_argument('-x', '--hex', action='store_true', help='Use Hex Address')
parser.add_argument('-c', '--cols', type=int, help='Number of Columns', default=1)
parser.add_argument('-p', '--precision', type=int, help='Number of Places after Comma', default=3)
parser.add_argument('-i', '--intwidth', type=int, help='Number of Places before Comma', default=3)
args = vars(parser.parse_args())
filename = args["bin_file"]
addrformat = "X" if args["hex"] else "d"
cols = args["cols"]
precision = args["precision"]
intwidth = args["intwidth"]
# Read Binary Contents
print("Using input file: {}".format(filename))
binary = ""
try:
with open(filename, "rb") as f:
binary = f.read();
except:
print("Could not open file {}".format(filename))
raise
# Interpret Binary File as List of Floats
num_floats = len(binary)/struct.calcsize('f')
print("Interpreting as {} float32 values".format(num_floats))
# Convert to List of Floats
floats = []
try:
floats = struct.unpack('f'*num_floats, binary)
except:
print("Could not convert to floats!")
raise
# Print to stdout
printbase = (16 if addrformat=="X" else 10)
addrwidth = max(int(math.ceil(math.log(4*num_floats, printbase))), 6)
datawidth = intwidth + precision + 1
hdrformat = "{:%ds} {:>%ds}"%(addrwidth, datawidth)
addrformat = "\n{:0%d%s} "%(addrwidth, addrformat)
dataformat = "{:%d.%df} "%(datawidth, precision)
print("\nValue Dump:\n")
print(hdrformat.format("ADDR", "VALUES"))
addr = 0
for f in floats:
if (addr % cols == 0):
print(addrformat.format(addr*4), end="")
print(dataformat.format(f), end="")
addr += 1
| gpl-3.0 | -1,622,241,999,994,910,700 | 26.833333 | 107 | 0.678029 | false | 3.240299 | false | false | false |
joshuahoman/vivisect | envi/archs/arm/const.py | 3 | 10798 | MODE_ARM = 0
MODE_THUMB = 1
MODE_JAZELLE = 2
#IFLAGS - keep bottom 8-bits for cross-platform flags like envi.IF_NOFALL and envi.IF_BRFALL
IF_PSR_S = 1<<32 # This DP instruciton can update CPSR
IF_B = 1<<33 # Byte
IF_H = 1<<35 # HalfWord
IF_S = 1<<36 # Signed
IF_D = 1<<37 # Dword
IF_L = 1<<38 # Long-store (eg. Dblword Precision) for STC
IF_T = 1<<39 # Translate for strCCbt
IF_W = 1<<40 # Write Back for STM/LDM (!)
IF_UM = 1<<41 # User Mode Registers for STM/LDM (^) (obviously no R15)
IF_DAIB_SHFT = 56 # shift-bits to get DAIB bits down to 0. this chops off the "is DAIB present" bit that the following store.
IF_DAIB_MASK = 7<<(IF_DAIB_SHFT-1)
IF_DA = 1<<(IF_DAIB_SHFT-1) # Decrement After
IF_IA = 3<<(IF_DAIB_SHFT-1) # Increment After
IF_DB = 5<<(IF_DAIB_SHFT-1) # Decrement Before
IF_IB = 7<<(IF_DAIB_SHFT-1) # Increment Before
IF_DAIB_B = 5<<(IF_DAIB_SHFT-1) # Before mask
IF_DAIB_I = 3<<(IF_DAIB_SHFT-1) # Before mask
IF_THUMB32 = 1<<50 # thumb32
IF_VQ = 1<<51 # Adv SIMD: operation uses saturating arithmetic
IF_VR = 1<<52 # Adv SIMD: operation performs rounding
IF_VD = 1<<53 # Adv SIMD: operation doubles the result
IF_VH = 1<<54 # Adv SIMD: operation halves the result
IF_SYS_MODE = 1<<58 # instruction is encoded to be executed in SYSTEM mode, not USER mode
OF_W = 1<<8 # Write back to
OF_UM = 1<<9 # Usermode, or if r15 included set current SPSR -> CPSR
OSZFMT_BYTE = "B"
OSZFMT_HWORD = "<H" # Introduced in ARMv4
OSZFMT_WORD = "<I"
OSZ_BYTE = 1
OSZ_HWORD = 2
OSZ_WORD = 4
fmts = [None, OSZ_BYTE, OSZ_HWORD, None, OSZ_WORD]
COND_EQ = 0x0 # z==1 (equal)
COND_NE = 0x1 # z==0 (not equal)
COND_CS = 0x2 # c==1 (carry set/unsigned higher or same)
COND_CC = 0x3 # c==0 (carry clear/unsigned lower)
COND_MI = 0x4 # n==1 (minus/negative)
COND_PL = 0x5 # n==0 (plus/positive or zero)
COND_VS = 0x6 # v==1 (overflow)
COND_VC = 0x7 # v==0 (no overflow)
COND_HI = 0x8 # c==1 and z==0 (unsigned higher)
COND_LO = 0x9 # c==0 or z==1 (unsigned lower or same)
COND_GE = 0xA # n==v (signed greater than or equal) (n==1 and v==1) or (n==0 and v==0)
COND_LT = 0xB # n!=v (signed less than) (n==1 and v==0) or (n==0 and v==1)
COND_GT = 0xC # z==0 and n==v (signed greater than)
COND_LE = 0xD # z==1 and n!=v (signed less than or equal)
COND_AL = 0xE # always
COND_EXTENDED = 0xF # special case - see conditional 0b1111
cond_codes = {
COND_EQ:"eq", # Equal Z set
COND_NE:"ne", # Not equal Z clear
COND_CS:"cs", #/HS Carry set/unsigned higher or same C set
COND_CC:"cc", #/LO Carry clear/unsigned lower C clear
COND_MI:"mi", # Minus/negative N set
COND_PL:"pl", # Plus/positive or zero N clear
COND_VS:"vs", # Overflow V set
COND_VC:"vc", # No overflow V clear
COND_HI:"hi", # Unsigned higher C set and Z clear
COND_LO:"lo", # Unsigned lower or same C clear or Z set
COND_GE:"ge", # Signed greater than or equal N set and V set, or N clear and V clear (N == V)
COND_LT:"lt", # Signed less than N set and V clear, or N clear and V set (N!= V)
COND_GT:"gt", # Signed greater than Z clear, and either N set and V set, or N clear and V clear (Z == 0,N == V)
COND_LE:"le", # Signed less than or equal Z set, or N set and V clear, or N clear and V set (Z == 1 or N!= V)
COND_AL:"", # Always (unconditional) - could be "al" but "" seems better...
COND_EXTENDED:"2", # See extended opcode table
}
cond_map = {
COND_EQ:0, # Equal Z set
COND_NE:1, # Not equal Z clear
COND_CS:2, #/HS Carry set/unsigned higher or same C set
COND_CC:3, #/LO Carry clear/unsigned lower C clear
COND_MI:4, # Minus/negative N set
COND_PL:5, # Plus/positive or zero N clear
COND_VS:6, # Overflow V set
COND_VC:7, # No overflow V clear
COND_HI:8, # Unsigned higher C set and Z clear
COND_LO:9, # Unsigned lower or same C clear or Z set
COND_GE:10, # Signed greater than or equal N set and V set, or N clear and V clear (N == V)
COND_LT:11, # Signed less than N set and V clear, or N clear and V set (N!= V)
COND_GT:12, # Signed greater than Z clear, and either N set and V set, or N clear and V clear (Z == 0,N == V)
COND_LE:13, # Signed less than or equal Z set, or N set and V clear, or N clear and V set (Z == 1 or N!= V)
COND_AL:"", # Always (unconditional) - could be "al" but "" seems better...
COND_EXTENDED:"2", # See extended opcode table
}
PM_usr = 0b10000
PM_fiq = 0b10001
PM_irq = 0b10010
PM_svc = 0b10011
PM_mon = 0b10110
PM_abt = 0b10111
PM_hyp = 0b11010
PM_und = 0b11011
PM_sys = 0b11111
# reg stuff stolen from regs.py to support proc_modes
# these are in context of reg_table, not reg_data.
# ie. these are indexes into the lookup table.
REG_OFFSET_USR = 17 * (PM_usr&0xf)
REG_OFFSET_FIQ = 17 * (PM_fiq&0xf)
REG_OFFSET_IRQ = 17 * (PM_irq&0xf)
REG_OFFSET_SVC = 17 * (PM_svc&0xf)
REG_OFFSET_MON = 17 * (PM_mon&0xf)
REG_OFFSET_ABT = 17 * (PM_abt&0xf)
REG_OFFSET_HYP = 17 * (PM_hyp&0xf)
REG_OFFSET_UND = 17 * (PM_und&0xf)
REG_OFFSET_SYS = 17 * (PM_sys&0xf)
#REG_OFFSET_CPSR = 17 * 16
REG_OFFSET_CPSR = 16 # CPSR is available in every mode, and PM_usr and PM_sys don't have an SPSR.
REG_SPSR_usr = REG_OFFSET_USR + 17
REG_SPSR_fiq = REG_OFFSET_FIQ + 17
REG_SPSR_irq = REG_OFFSET_IRQ + 17
REG_SPSR_svc = REG_OFFSET_SVC + 17
REG_SPSR_mon = REG_OFFSET_MON + 17
REG_SPSR_abt = REG_OFFSET_ABT + 17
REG_SPSR_hyp = REG_OFFSET_HYP + 17
REG_SPSR_und = REG_OFFSET_UND + 17
REG_SPSR_sys = REG_OFFSET_SYS + 17
REG_PC = 0xf
REG_SP = 0xd
REG_BP = None
REG_CPSR = REG_OFFSET_CPSR
REG_FLAGS = REG_OFFSET_CPSR #same location, backward-compat name
proc_modes = { # mode_name, short_name, description, offset, mode_reg_count, PSR_offset, privilege_level
PM_usr: ("User Processor Mode", "usr", "Normal program execution mode", REG_OFFSET_USR, 15, REG_SPSR_usr, 0),
PM_fiq: ("FIQ Processor Mode", "fiq", "Supports a high-speed data transfer or channel process", REG_OFFSET_FIQ, 8, REG_SPSR_fiq, 1),
PM_irq: ("IRQ Processor Mode", "irq", "Used for general-purpose interrupt handling", REG_OFFSET_IRQ, 13, REG_SPSR_irq, 1),
PM_svc: ("Supervisor Processor Mode", "svc", "A protected mode for the operating system", REG_OFFSET_SVC, 13, REG_SPSR_svc, 1),
PM_mon: ("Monitor Processor Mode", "mon", "Secure Monitor Call exception", REG_OFFSET_MON, 13, REG_SPSR_mon, 1),
PM_abt: ("Abort Processor Mode", "abt", "Implements virtual memory and/or memory protection", REG_OFFSET_ABT, 13, REG_SPSR_abt, 1),
PM_hyp: ("Hyp Processor Mode", "hyp", "Hypervisor Mode", REG_OFFSET_HYP, 13, REG_SPSR_hyp, 2),
PM_und: ("Undefined Processor Mode", "und", "Supports software emulation of hardware coprocessor", REG_OFFSET_UND, 13, REG_SPSR_und, 1),
PM_sys: ("System Processor Mode", "sys", "Runs privileged operating system tasks (ARMv4 and above)", REG_OFFSET_SYS, 15, REG_SPSR_sys, 1),
}
PM_LNAME = 0
PM_SNAME = 1
PM_DESC = 2
PM_REGOFF = 3
PM_BANKED = 4
PM_SPSR = 5
INST_ENC_DP_IMM = 0 # Data Processing Immediate Shift
INST_ENC_MISC = 1 # Misc Instructions
# Instruction encodings in arm v5
IENC_DP_IMM_SHIFT = 0 # Data processing immediate shift
IENC_MISC = 1 # Miscellaneous instructions
IENC_MISC1 = 2 # Miscellaneous instructions again
IENC_DP_REG_SHIFT = 3 # Data processing register shift
IENC_MULT = 4 # Multiplies & Extra load/stores
IENC_UNDEF = 5 # Undefined instruction
IENC_MOV_IMM_STAT = 6 # Move immediate to status register
IENC_DP_IMM = 7 # Data processing immediate
IENC_LOAD_IMM_OFF = 8 # Load/Store immediate offset
IENC_LOAD_REG_OFF = 9 # Load/Store register offset
IENC_ARCH_UNDEF = 10 # Architecturally undefined
IENC_MEDIA = 11 # Media instructions
IENC_LOAD_MULT = 12 # Load/Store Multiple
IENC_BRANCH = 13 # Branch
IENC_COPROC_RREG_XFER = 14 # mrrc/mcrr
IENC_COPROC_LOAD = 15 # Coprocessor load/store and double reg xfers
IENC_COPROC_DP = 16 # Coprocessor data processing
IENC_COPROC_REG_XFER = 17 # Coprocessor register transfers
IENC_SWINT = 18 # Sofware interrupts
IENC_UNCOND = 19 # unconditional wacko instructions
IENC_EXTRA_LOAD = 20 # extra load/store (swp)
IENC_DP_MOVW = 21 #
IENC_DP_MOVT = 22 #
IENC_DP_MSR_IMM = 23 #
# offchutes
IENC_MEDIA_PARALLEL = ((IENC_MEDIA << 8) + 1) << 8
IENC_MEDIA_SAT = ((IENC_MEDIA << 8) + 2) << 8
IENC_MEDIA_REV = ((IENC_MEDIA << 8) + 3) << 8
IENC_MEDIA_SEL = ((IENC_MEDIA << 8) + 4) << 8
IENC_MEDIA_USAD8 = ((IENC_MEDIA << 8) + 5) << 8
IENC_MEDIA_USADA8 = ((IENC_MEDIA << 8) + 6) << 8
IENC_MEDIA_EXTEND = ((IENC_MEDIA << 8) + 7) << 8
IENC_MEDIA_PACK = ((IENC_MEDIA << 8) + 8) << 8
IENC_UNCOND_CPS = ((IENC_UNCOND << 8) + 1) << 8
IENC_UNCOND_SETEND = ((IENC_UNCOND << 8) + 2) << 8
IENC_UNCOND_PLD = ((IENC_UNCOND << 8) + 3) << 8
IENC_UNCOND_BLX = ((IENC_UNCOND << 8) + 4) << 8
IENC_UNCOND_RFE = ((IENC_UNCOND << 8) + 5) << 8
# The supported types of operand shifts (by the 2 bit field)
S_LSL = 0
S_LSR = 1
S_ASR = 2
S_ROR = 3
S_RRX = 4 # FIXME HACK XXX add this
shift_names = ("lsl", "lsr", "asr", "ror", "rrx")
SOT_REG = 0
SOT_IMM = 1
daib = ("da", "ia", "db", "ib")
def instrenc(encoding, index):
return (encoding << 16) + index
INS_AND = IENC_DP_IMM_SHIFT << 16
INS_EOR = (IENC_DP_IMM_SHIFT << 16) + 1
INS_SUB = (IENC_DP_IMM_SHIFT << 16) + 2
INS_RSB = (IENC_DP_IMM_SHIFT << 16) + 3
INS_ADD = (IENC_DP_IMM_SHIFT << 16) + 4
INS_ADC = (IENC_DP_IMM_SHIFT << 16) + 5
INS_SBC = (IENC_DP_IMM_SHIFT << 16) + 6
INS_RSC = (IENC_DP_IMM_SHIFT << 16) + 7
INS_TST = (IENC_DP_IMM_SHIFT << 16) + 8
INS_TEQ = (IENC_DP_IMM_SHIFT << 16) + 9
INS_CMP = (IENC_DP_IMM_SHIFT << 16) + 10
INS_CMN = (IENC_DP_IMM_SHIFT << 16) + 11
INS_ORR = (IENC_DP_IMM_SHIFT << 16) + 12
INS_MOV = (IENC_DP_IMM_SHIFT << 16) + 13
INS_BIC = (IENC_DP_IMM_SHIFT << 16) + 14
INS_MVN = (IENC_DP_IMM_SHIFT << 16) + 15
INS_ORN = (IENC_DP_IMM_SHIFT << 16) + 12
INS_ADR = (IENC_DP_IMM_SHIFT << 16) + 16
INS_B = instrenc(IENC_BRANCH, 0)
INS_BL = instrenc(IENC_BRANCH, 1)
INS_BCC = instrenc(IENC_BRANCH, 2)
INS_BX = instrenc(IENC_MISC, 3)
INS_BXJ = instrenc(IENC_MISC, 5)
INS_BLX = IENC_UNCOND_BLX
INS_SWI = IENC_SWINT
# FIXME: must fit these into the numbering scheme
INS_TB = 85
INS_LDREX = 85
INS_ORN = 85
INS_PKH = 85
INS_LSL = 85
INS_LSR = 85
INS_ASR = 85
INS_ROR = 85
INS_RRX = 85
INS_LDR = instrenc(IENC_LOAD_IMM_OFF, 0)
INS_STR = instrenc(IENC_LOAD_IMM_OFF, 1)
no_update_Rd = (INS_TST, INS_TEQ, INS_CMP, INS_CMN, )
| apache-2.0 | 6,058,271,233,781,974,000 | 39.291045 | 142 | 0.624004 | false | 2.476038 | false | false | false |
esonderegger/jagss | jagss/sftpsync.py | 1 | 7979 | # -*- coding: utf-8 -*-
import os
import re
import socket
from stat import S_ISDIR
from datetime import datetime
import logging
import paramiko
MTIME_TOLERANCE = 3
logger = logging.getLogger(__name__)
class AuthenticationError(Exception):
pass
class TimeoutError(Exception):
pass
class SshError(Exception):
pass
class Sftp(object):
def __init__(self, host, username, password=None, port=22, timeout=10,
max_attempts=3, **kwargs):
self.host = host
self.port = port
self.username = username
self.password = password
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.sftp = None
for i in range(max_attempts):
try:
self.client.connect(host, port=port, username=username,
password=password, timeout=timeout,
**kwargs)
self.sftp = self.client.open_sftp()
return
except (paramiko.BadHostKeyException,
paramiko.AuthenticationException), e:
raise AuthenticationError(str(e))
except socket.timeout, e:
raise TimeoutError(str(e))
except Exception, e:
if i == max_attempts - 1:
raise SshError(str(e))
def _walk_remote(self, path, topdown=True):
try:
res = self.sftp.listdir_attr(path)
except IOError:
res = []
for stat in res:
file = os.path.join(path, stat.filename)
if not S_ISDIR(stat.st_mode):
yield 'file', file, stat
else:
if topdown:
yield 'dir', file, stat
for res in self._walk_remote(file, topdown=topdown):
yield res
else:
for res in self._walk_remote(file, topdown=topdown):
yield res
yield 'dir', file, None
def _walk_local(self, path, topdown=True):
for path, dirs, files in os.walk(path, topdown=topdown):
for file in files:
file = os.path.join(path, file)
yield 'file', file, os.stat(file)
for dir in dirs:
dir = os.path.join(path, dir)
yield 'dir', dir, os.stat(dir)
def _walk(self, *args, **kwargs):
remote = kwargs.pop('remote', False)
if remote:
return self._walk_remote(*args, **kwargs)
else:
return self._walk_local(*args, **kwargs)
def _makedirs_dst(self, path, remote=True, dry=False):
if remote:
paths = []
while path not in ('/', ''):
paths.insert(0, path)
path = os.path.dirname(path)
for path in paths:
try:
self.sftp.lstat(path)
except Exception:
if not dry:
self.sftp.mkdir(path)
logger.debug('created destination directory %s', path)
else:
if not os.path.exists(path):
if not dry:
os.makedirs(path)
logger.debug('created destination directory %s', path)
def _validate_src(self, file, include, exclude):
for re_ in include:
if not re_.search(file):
return False
for re_ in exclude:
if re_.search(file):
return False
return True
def _validate_dst(self, file, src_stat, remote=True):
if remote:
try:
dst_stat = self.sftp.lstat(file)
except Exception:
return
else:
if not os.path.exists(file):
return
dst_stat = os.stat(file)
if abs(dst_stat.st_mtime - src_stat.st_mtime) > MTIME_TOLERANCE:
debug_string = '%s modified time mismatch '
debug_string += '(source: %s, destination: %s)'
logger.debug(debug_string,
file, datetime.utcfromtimestamp(src_stat.st_mtime),
datetime.utcfromtimestamp(dst_stat.st_mtime))
return
if dst_stat.st_size != src_stat.st_size:
return
return True
def _save(self, src, dst, src_stat, remote=True):
if remote:
logger.info('copying %s to %s@%s:%s', src, self.username,
self.host, dst)
self.sftp.put(src, dst)
self.sftp.utime(dst, (int(src_stat.st_atime),
int(src_stat.st_mtime)))
else:
logger.info('copying %s@%s:%s to %s', self.username, self.host,
src, dst)
self.sftp.get(src, dst)
os.utime(dst, (int(src_stat.st_atime), int(src_stat.st_mtime)))
def _delete_dst(self, path, files, remote=True, dry=False):
if remote:
callables = {'file': self.sftp.remove, 'dir': self.sftp.rmdir}
else:
callables = {'file': os.remove, 'dir': os.rmdir}
for type, file, stat in self._walk(path, topdown=False, remote=remote):
if file not in files[type]:
if not dry:
try:
callables[type](file)
except Exception, e:
logger.debug('failed to remove %s: %s', file, str(e))
continue
logger.debug('removed %s', file)
def _get_filters(self, filters):
if not filters:
return []
return [re.compile(f) for f in filters]
def sync(self, src, dst, download=True, include=None, exclude=None,
delete=False, dry=False):
'''Sync files and directories.
:param src: source directory
:param dst: destination directory
:param download: True to sync from a remote source to a local
destination, else sync from a local source to a remote destination
:param include: list of regex patterns the source files must match
:param exclude: list of regex patterns the source files must not match
:param delete: remove destination files and directories not present
at source or filtered by the include/exlude patterns
'''
include = self._get_filters(include)
exclude = self._get_filters(exclude)
if src.endswith('/') != dst.endswith('/'):
dst = os.path.join(dst, os.path.basename(src.rstrip('/')))
src = src.rstrip('/')
re_base = re.compile(r'^%s/' % re.escape(src))
if not src:
src = '/'
self._makedirs_dst(dst, remote=not download, dry=dry)
started = datetime.utcnow()
total_size = 0
dst_list = {'file': [], 'dir': []}
for type, file, stat in self._walk(src, remote=download):
file_ = re_base.sub('', file)
if not self._validate_src(file_, include, exclude):
logger.debug('filtered %s', file)
continue
dst_file = os.path.join(dst, file_)
dst_list[type].append(dst_file)
if type == 'dir':
self._makedirs_dst(dst_file, remote=not download, dry=dry)
elif type == 'file':
if not self._validate_dst(dst_file, stat, remote=not download):
if not dry:
self._save(file, dst_file, stat, remote=not download)
total_size += stat.st_size
logger.debug('copied %s to %s', file, dst_file)
if delete:
self._delete_dst(dst, dst_list, remote=not download, dry=dry)
logger.debug('transferred %s bytes in %s', total_size,
datetime.utcnow() - started)
| mit | 655,222,774,239,576,600 | 33.995614 | 79 | 0.517107 | false | 4.235138 | false | false | false |
iulian787/spack | var/spack/repos/builtin/packages/isescan/package.py | 3 | 2864 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Isescan(Package):
"""A python pipeline to identify IS (Insertion Sequence) elements in
genome and metagenome"""
homepage = "https://github.com/xiezhq/ISEScan"
url = "https://github.com/xiezhq/ISEScan/archive/v1.7.2.1.tar.gz"
version('1.7.2.1', sha256='b971a3e86a8cddaa4bcd520ba9e75425bbe93190466f81a3791ae0cb4baf5e5d')
depends_on('[email protected]:', type='run')
depends_on('[email protected]:', type='run')
depends_on('[email protected]:', type='run')
depends_on('[email protected]:', type='run')
depends_on('py-fastcluster', type='run')
depends_on('py-argparse', type='run')
depends_on('[email protected]:', type='run')
depends_on('[email protected]:', type='run')
depends_on('[email protected]:', type='run')
def setup_run_environment(self, env):
env.prepend_path('PATH', self.prefix)
env.prepend_path('LD_LIBRARY_PATH',
join_path(self.prefix, 'ssw201507'))
def install(self, spec, prefix):
# build bundled SSW library
with working_dir('ssw201507'):
Executable(spack_cc)(
'-O3', '-pipe', self.compiler.cc_pic_flag, '-shared',
'-rdynamic', '-o', 'libssw.' + dso_suffix, 'ssw.c', 'ssw.h',
)
# set paths to required programs
blast_pfx = self.spec['blast-plus'].prefix.bin
blastn_path = blast_pfx.blastn
blastp_path = blast_pfx.blastp
makeblastdb_path = blast_pfx.makeblastdb
hmmer_pfx = self.spec['hmmer'].prefix.bin
phmmer_path = hmmer_pfx.phmmer
hmmsearch_path = hmmer_pfx.hmmsearch
fgs_pfx = self.spec['fraggenescan'].prefix.bin
fgs_path = join_path(fgs_pfx, 'run_FragGeneScan.pl')
constants = FileFilter('constants.py')
constants.filter('/apps/inst/FragGeneScan1.30/run_FragGeneScan.pl',
fgs_path, string=True)
constants.filter('/apps/inst/hmmer-3.3/bin/phmmer',
phmmer_path, string=True)
constants.filter('/apps/inst/hmmer-3.3/bin/hmmsearch',
hmmsearch_path, string=True)
constants.filter('/apps/inst/ncbi-blast-2.10.0+/bin/blastn',
blastn_path, string=True)
constants.filter('/apps/inst/ncbi-blast-2.10.0+/bin/blastp',
blastp_path, string=True)
constants.filter('/apps/inst/ncbi-blast-2.10.0+/bin/makeblastdb',
makeblastdb_path, string=True)
# install the whole tree
install_tree('.', prefix)
set_executable(join_path(prefix, 'isescan.py'))
| lgpl-2.1 | -1,895,937,211,816,651,000 | 39.338028 | 97 | 0.606145 | false | 3.066381 | false | false | false |
Codophile1/exide | exide/odp_element_parsers/SlideParser.py | 1 | 5109 | #!/usr/bin/python
#-*- coding: utf-8 -*-
from .TextParser import TextParser
from .utils import namespace
class SlideParser(object):
def __init__(self, XMLSlideObject, number, presentationParser):
self.presentation = presentationParser
self.text_parsers = self.parseText(XMLSlideObject)
self.title_parsers = self.parseTitle(XMLSlideObject)
self.number = number
self.layout = None
def get_style_by_id(self, style_id):
"""
Return a |StyleParser| matching the given id.
:param style_id:
:return: |StyleParser| object.
"""
return self.presentation.get_style_by_id(style_id)
def parseText(self, XMLSlideObject):
"""
Create |TextParser| object for each text of the given XML slide object.
:param XMLSlideObject: LXML slide object
:return: List of |TextParser| object.
"""
text = []
for frame in XMLSlideObject.findall(".//draw:frame", XMLSlideObject.nsmap):
if frame not in XMLSlideObject.findall(".//draw:frame[@presentation:class='title']", XMLSlideObject.nsmap):
for textF in frame.findall(".//text:p", XMLSlideObject.nsmap):
style = None
if textF.get(namespace(textF)+"style-name") is not None:
style_id = textF.get(namespace(textF)+"style-name")
style = self.get_style_by_id(style_id)
if textF.text is not None:
text.append(TextParser(textF, style, self))
for textF in frame.findall(".//text:span", XMLSlideObject.nsmap):
style = None
if textF.get(namespace(textF)+"style-name") is not None:
style_id = textF.get(namespace(textF)+"style-name")
style = self.get_style_by_id(style_id)
if textF.text is not None:
text.append(TextParser(textF, style, self))
for textF in frame.findall(".//text:text", XMLSlideObject.nsmap):
style = None
if textF.get(namespace(textF)+"style-name") is not None:
style_id = textF.get(namespace(textF)+"style-name")
style = self.get_style_by_id(style_id)
if textF.text is not None:
text.append(TextParser(textF, style, self))
return text
def parseTitle(self, XMLSlideObject):
"""
Look up for the XML title object within the given XML Slide Object and creates a list of |TextParser| objects for each text within the title.
:param XMLSlideObject:
:return:
"""
title = []
# On cheche la zone de texte correspondant au titre de la diapositive
titleFrame = XMLSlideObject.find(".//draw:frame[@presentation:class='title']", XMLSlideObject.nsmap)
if titleFrame is not None:
# On cherche le paragraphe qui contiendrait le titre
for textF in titleFrame.findall(".//text:p", XMLSlideObject.nsmap):
style = None
if textF.get(namespace(textF) + "style-name") is not None:
style_id = textF.get(namespace(textF) + "style-name")
style = self.get_style_by_id(style_id)
if textF.text is not None:
title.append(TextParser(textF, style, self))
# On cherche le span qui contiendrait le titre
for textF in titleFrame.findall(".//text:span", XMLSlideObject.nsmap):
style = None
if textF.get(namespace(textF)+"style-name") is not None:
style_id = textF.get(namespace(textF)+"style-name")
style = self.get_style_by_id(style_id)
if textF.text is not None:
title.append(TextParser(textF, style, self))
return title
# On cherche à extraire les textes d'un certain style
def getTextsByStyleId(self, styleID):
"""
Return a list of |TextParser| objects whose style matches the given style ID.
:param styleID: ID of a |StyleParser|
:return: List of |TextParser| objects.
"""
texts = []
# On parcourt les textes et pour chaque texte, on vérifie si il a le style recherché
for text in self.text_parsers:
if text.style_id == styleID:
texts.append(text)
return texts
@property
def text(self):
"""
Return a string containing all the body text of the slide.
:return: Strinf
"""
text=""
for tp in self.text_parsers:
text+="\n"+tp.text
return text
@property
def title(self):
"""
Retrun a string containing the title of the slide.
:return: String
"""
if len(self.title_parsers) > 0:
text=""
for tp in self.title_parsers:
text+=tp.text
return text
| lgpl-3.0 | -5,969,898,557,859,558,000 | 37.390977 | 149 | 0.5613 | false | 4.033175 | false | false | false |
jakeres/plugin.video.triluliluro | default.py | 1 | 10700 | import codecs,urllib,urllib2,re,xbmc,xbmcplugin,xbmcaddon,xbmcgui,os,sys,commands,HTMLParser,jsunpack,time
website = 'http://www.trilulilu.ro/';
__version__ = "1.0.4"
__plugin__ = "trilulilu.ro" + __version__
__url__ = "www.xbmc.com"
settings = xbmcaddon.Addon( id = 'plugin.video.triluliluro' )
search_thumb = os.path.join( settings.getAddonInfo( 'path' ), 'resources', 'media', 'search.png' )
movies_thumb = os.path.join( settings.getAddonInfo( 'path' ), 'resources', 'media', 'movies.png' )
movies_hd_thumb = os.path.join( settings.getAddonInfo( 'path' ), 'resources', 'media', 'movies-hd.png' )
tv_series_thumb = os.path.join( settings.getAddonInfo( 'path' ), 'resources', 'media', 'tv.png' )
next_thumb = os.path.join( settings.getAddonInfo( 'path' ), 'resources', 'media', 'next.png' )
def ROOT():
#addDir('Filme','http://www.trilulilu.ro/',1,movies_thumb)
addDir('Cauta','http://www.trilulilu.ro/',3,search_thumb)
addDir('Cauta ... dublat','http://www.trilulilu.ro/',31,search_thumb)
xbmc.executebuiltin("Container.SetViewMode(500)")
def CAUTA_LIST(url):
link = get_search(url)
match=re.compile('<a href="(http://www.trilulilu.ro/video-.+?)#ref=cauta" .+?title="(.+?)" .+?>\n.+?<div.+?>(\d+:\d+)</div><img (src|data-src)="(.+?)" width="', re.IGNORECASE|re.MULTILINE).findall(link)
if len(match) > 0:
print match
for legatura, name, length, s, img in match:
#name = HTMLParser.HTMLParser().unescape( codecs.decode(name, "unicode_escape") ) + " " + length
name = name + " " + length
the_link = legatura
image = img
sxaddLink(name,the_link,image,name,10)
match=re.compile('<link rel="next" href="\?offset=(\d+)" />', re.IGNORECASE).findall(link)
if len(match) > 0:
nexturl = re.sub('\?offset=(\d+)', '?offset='+match[0], url)
if nexturl.find("offset=") == -1:
nexturl += '?offset='+match[0]
print "NEXT " + nexturl
addNext('Next', nexturl, 2, next_thumb)
xbmc.executebuiltin("Container.SetViewMode(500)")
def CAUTA(url, autoSearch = None):
keyboard = xbmc.Keyboard( '' )
keyboard.doModal()
if ( keyboard.isConfirmed() == False ):
return
search_string = keyboard.getText()
if len( search_string ) == 0:
return
if autoSearch is None:
autoSearch = ""
CAUTA_LIST( get_search_url(search_string + "" + autoSearch) )
def SXVIDEO_GENERIC_PLAY(sxurl, seltitle, linksource="source1"):
listitem = xbmcgui.ListItem(seltitle)
listitem.setInfo('video', {'Title': seltitle})
selurl = sxurl
SXVIDEO_PLAY_THIS(selurl, listitem, None)
return
def SXVIDEO_PLAY_THIS(selurl, listitem, source):
movie_formats = {'flv': 'flv-vp6', 'mp4': 'mp4-360p'}
sformat = ''
player = xbmc.Player( xbmc.PLAYER_CORE_MPLAYER )
for (mfn, mf) in movie_formats.items():
if SX_checkUrl(selurl + mf):
player.play(selurl + mf, listitem)
time.sleep(1)
break;
#if player.isPlaying():
# break;
try:
print "-"
#player.setSubtitles(source['subtitle'])
except:
pass
#while player.isPlaying:
# xbmc.sleep(100);
return player.isPlaying()
def SXSHOWINFO(text):
#progress = xbmcgui.DialogProgress()
#progress.create("kml browser", "downloading playlist...", "please wait.")
print ""
def SXVIDEO_FILM_PLAY(url):
SXSHOWINFO("Playing movie...")
#print url
sxurli = sxGetMovieLink(url)
#print sxurli
#return
#print sxurls
SXVIDEO_GENERIC_PLAY(sxurli['url'], sxurli['title'])
def SX_checkUrl(url):
content_range=None
try:
req = urllib2.Request(url)
#
# Here we request that bytes 18000--19000 be downloaded.
# The range is inclusive, and starts at 0.
#
req.headers['Range']='bytes=%s-%s' % (100, 200)
f = urllib2.urlopen(req)
# This shows you the actual bytes that have been downloaded.
content_range=f.headers.get('Content-Range')
except:
pass
print "URL costel " + url
#print(content_range)
return content_range != None
def sxGetMovieLink(url):
print 'url video '+url
#print 'nume video '+ name
# thumbnail
src = get_url(urllib.quote(url, safe="%/:=&?~#+!$,;'@()*[]"))
#print src
thumbnail = ''
title = ''
link_video_trilu = ""
#title
match = re.compile('<title>(.+?)<', re.IGNORECASE).findall(src)
title = HTMLParser.HTMLParser().unescape(match[0])
title = re.sub('\s+-\s*Video\s*-\s*Trilulilu', '', title);
#print "MATCH SERCH " + match[0]
#video link --- # block_flash_vars = {"userid":"andreea_popa","hash":"edee1b51b240c9","server":"65","autoplay":"true","hasAds":"true","viewf
match = re.compile('block_flash_vars = {"userid":"(.+?)","hash":"(.+?)","server":"(.+?)",', re.IGNORECASE).findall(src)
if not match:
#addLink('Could NOT generate video link ', " ", thumbnail, title)
xbmc.executebuiltin('Notification(Error,Could NOT generate video link,5000,/script.hellow.world.png)')
return False
ids = match[0]
username = ids[0]
hash = ids[1]
server = ids[2]
#print "hash = " + hash + "; username = " + username + "; server=" + server
# video id
link_video_trilu = "http://fs"+server+".trilulilu.ro/stream.php?type=video&source=site&hash=" + hash + "&username=" + username + "&format="
return {'url':link_video_trilu, 'title': title}
def get_url(url):
req = urllib2.Request(url)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
try:
response = urllib2.urlopen(req)
link=response.read()
response.close()
return link
except:
return False
def get_search_url(keyword, offset = None):
url = 'http://cauta.trilulilu.ro/video/' + urllib.quote_plus(keyword)
if offset != None:
url += "?offset="+offset
return url
def get_search(url):
params = {}
req = urllib2.Request(url, urllib.urlencode(params))
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
req.add_header('Content-type', 'application/x-www-form-urlencoded')
try:
response = urllib2.urlopen(req)
link=response.read()
response.close()
return link
except:
return False
def get_params():
param=[]
paramstring=sys.argv[2]
if len(paramstring)>=2:
params=sys.argv[2]
cleanedparams=params.replace('?','')
if (params[len(params)-1]=='/'):
params=params[0:len(params)-2]
pairsofparams=cleanedparams.split('&')
param={}
for i in range(len(pairsofparams)):
splitparams={}
splitparams=pairsofparams[i].split('=')
if (len(splitparams))==2:
param[splitparams[0]]=splitparams[1]
return param
def yt_get_all_url_maps_name(url):
conn = urllib2.urlopen(url)
encoding = conn.headers.getparam('charset')
content = conn.read().decode(encoding)
s = re.findall(r'"url_encoded_fmt_stream_map": "([^"]+)"', content)
if s:
s = s[0].split(',')
s = [a.replace('\\u0026', '&') for a in s]
s = [urllib2.parse_keqv_list(a.split('&')) for a in s]
n = re.findall(r'<title>(.+) - YouTube</title>', content)
return (s or [],
HTMLParser.HTMLParser().unescape(n[0]))
def yt_get_url(z):
#return urllib.unquote(z['url'] + '&signature=%s' % z['sig'])
return urllib.unquote(z['url'])
def youtube_video_link(url):
# 18 - mp4
fmt = '18'
s, n = yt_get_all_url_maps_name(url)
for z in s:
if z['itag'] == fmt:
if 'mp4' in z['type']:
ext = '.mp4'
elif 'flv' in z['type']:
ext = '.flv'
found = True
link = yt_get_url(z)
return link
def sxaddLink(name,url,iconimage,movie_name,mode=4):
ok=True
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": movie_name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz)
return ok
def addLink(name,url,iconimage,movie_name):
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultVideo.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": movie_name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=url,listitem=liz)
return ok
def addNext(name,page,mode,iconimage):
u=sys.argv[0]+"?url="+urllib.quote_plus(page)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
def addDir(name,url,mode,iconimage):
u=sys.argv[0]+"?url="+urllib.quote_plus(url)+"&mode="+str(mode)+"&name="+urllib.quote_plus(name)
ok=True
liz=xbmcgui.ListItem(name, iconImage="DefaultFolder.png", thumbnailImage=iconimage)
liz.setInfo( type="Video", infoLabels={ "Title": name } )
ok=xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),url=u,listitem=liz,isFolder=True)
return ok
params=get_params()
url=None
name=None
mode=None
try:
url=urllib.unquote_plus(params["url"])
except:
pass
try:
name=urllib.unquote_plus(params["name"])
except:
pass
try:
mode=int(params["mode"])
except:
pass
#print "Mode: "+str(mode)
#print "URL: "+str(url)
#print "Name: "+str(name)
if mode==None or url==None or len(url)<1:
ROOT()
elif mode==2:
CAUTA_LIST(url)
elif mode==3:
CAUTA(url)
elif mode==31:
CAUTA(url, " dublat")
elif mode==4:
VIDEO(url,name)
elif mode==9:
SXVIDEO_EPISOD_PLAY(url)
elif mode==10:
SXVIDEO_FILM_PLAY(url)
xbmcplugin.endOfDirectory(int(sys.argv[1]))
| gpl-2.0 | 5,968,702,429,578,963,000 | 31.035928 | 206 | 0.583645 | false | 3.241442 | false | false | false |
GNOME/gedit-plugins | plugins/sessionsaver/sessionsaver/store/session.py | 1 | 1484 | # -*- coding: utf-8 -*-
# store.py
# This file is part of gedit Session Saver Plugin
#
# Copyright (C) 2006-2007 - Steve Frécinaux <[email protected]>
# Copyright (C) 2010 - Kenny Meyer <[email protected]>
#
# gedit Session Saver Plugin is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# gedit Session Saver Plugin is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gedit Session Saver Plugin; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA
from gi.repository import Gio
class Session(object):
def __init__(self, name, files = None):
super(Session, self).__init__()
self.name = name
if files is None:
files = []
self.files = files
def __lt__(self, session):
return (self.name.lower() < session.name.lower())
def __eq__(self, session):
return (self.name.lower() == session.name.lower())
def add_file(self, filename):
self.files.append(Gio.file_new_for_uri(filename))
# ex:ts=4:et:
| gpl-2.0 | -927,613,206,277,590,700 | 34.309524 | 79 | 0.686446 | false | 3.617073 | false | false | false |
JorisDeRieck/Flexget | flexget/components/managed_lists/lists/pending_list/db.py | 4 | 4844 | from __future__ import unicode_literals, division, absolute_import
import logging
from builtins import * # pylint: disable=unused-import, redefined-builtin
from datetime import datetime
from sqlalchemy import Column, Unicode, Integer, DateTime, func, Boolean
from sqlalchemy.orm import relationship
from sqlalchemy.sql.elements import and_
from sqlalchemy.sql.schema import ForeignKey
from flexget import db_schema
from flexget.db_schema import versioned_base
from flexget.utils.database import entry_synonym, with_session
plugin_name = 'pending_list'
log = logging.getLogger(plugin_name)
Base = versioned_base(plugin_name, 0)
@db_schema.upgrade(plugin_name)
def upgrade(ver, session):
ver = 0
return ver
class PendingListList(Base):
__tablename__ = 'pending_list_lists'
id = Column(Integer, primary_key=True)
name = Column(Unicode, unique=True)
added = Column(DateTime, default=datetime.now)
entries = relationship(
'PendingListEntry', backref='list', cascade='all, delete, delete-orphan', lazy='dynamic'
)
def to_dict(self):
return {'id': self.id, 'name': self.name, 'added_on': self.added}
class PendingListEntry(Base):
__tablename__ = 'wait_list_entries'
id = Column(Integer, primary_key=True)
list_id = Column(Integer, ForeignKey(PendingListList.id), nullable=False)
added = Column(DateTime, default=datetime.now)
title = Column(Unicode)
original_url = Column(Unicode)
_json = Column('json', Unicode)
entry = entry_synonym('_json')
approved = Column(Boolean)
def __init__(self, entry, pending_list_id):
self.title = entry['title']
self.original_url = entry.get('original_url') or entry['url']
self.entry = entry
self.list_id = pending_list_id
self.approved = False
def __repr__(self):
return '<PendingListEntry,title=%s,original_url=%s,approved=%s>' % (
self.title,
self.original_url,
self.approved,
)
def to_dict(self):
return {
'id': self.id,
'list_id': self.list_id,
'added_on': self.added,
'title': self.title,
'original_url': self.original_url,
'entry': dict(self.entry),
'approved': self.approved,
}
@with_session
def get_pending_lists(name=None, session=None):
log.debug('retrieving pending lists')
query = session.query(PendingListList)
if name:
log.debug('searching for pending lists with name %s', name)
query = query.filter(PendingListList.name.contains(name))
return query.all()
@with_session
def get_list_by_exact_name(name, session=None):
log.debug('returning pending list with name %s', name)
return (
session.query(PendingListList)
.filter(func.lower(PendingListList.name) == name.lower())
.one()
)
@with_session
def get_list_by_id(list_id, session=None):
log.debug('returning pending list with id %d', list_id)
return session.query(PendingListList).filter(PendingListList.id == list_id).one()
@with_session
def delete_list_by_id(list_id, session=None):
entry_list = get_list_by_id(list_id=list_id, session=session)
if entry_list:
log.debug('deleting pending list with id %d', list_id)
session.delete(entry_list)
@with_session
def get_entries_by_list_id(
list_id,
start=None,
stop=None,
order_by='title',
descending=False,
approved=False,
filter=None,
session=None,
):
log.debug('querying entries from pending list with id %d', list_id)
query = session.query(PendingListEntry).filter(PendingListEntry.list_id == list_id)
if filter:
query = query.filter(func.lower(PendingListEntry.title).contains(filter.lower()))
if approved:
query = query.filter(PendingListEntry.approved is approved)
if descending:
query = query.order_by(getattr(PendingListEntry, order_by).desc())
else:
query = query.order_by(getattr(PendingListEntry, order_by))
return query.slice(start, stop).all()
@with_session
def get_entry_by_title(list_id, title, session=None):
entry_list = get_list_by_id(list_id=list_id, session=session)
if entry_list:
log.debug('fetching entry with title `%s` from list id %d', title, list_id)
return (
session.query(PendingListEntry)
.filter(and_(PendingListEntry.title == title, PendingListEntry.list_id == list_id))
.first()
)
@with_session
def get_entry_by_id(list_id, entry_id, session=None):
log.debug('fetching entry with id %d from list id %d', entry_id, list_id)
return (
session.query(PendingListEntry)
.filter(and_(PendingListEntry.id == entry_id, PendingListEntry.list_id == list_id))
.one()
)
| mit | 8,526,603,098,097,593,000 | 30.454545 | 96 | 0.657308 | false | 3.585492 | false | false | false |
b6d/lima | test/test_dump.py | 1 | 11623 | from collections import OrderedDict
from datetime import date, datetime
import pytest
from lima import fields, schema
# model -----------------------------------------------------------------------
class Knight:
'''A knight.'''
def __init__(self, title, name, number, born):
self.title = title
self.name = name
self.number = number
self.born = born
class King(Knight):
'''A king is a knight with subjects.'''
def __init__(self, title, name, number, born, subjects=None):
super().__init__(title, name, number, born)
self.subjects = subjects if subjects is not None else []
# schemas ---------------------------------------------------------------------
class KnightSchema(schema.Schema):
title = fields.String()
name = fields.String()
number = fields.Integer()
born = fields.Date()
class KnightDictSchema(schema.Schema):
title = fields.String(key='title')
name = fields.String(key='name')
number = fields.Integer(key='number')
born = fields.Date(key='born')
class KnightListSchema(schema.Schema):
title = fields.String(key=0)
name = fields.String(key=1)
number = fields.Integer(key=2)
born = fields.Date(key=3)
class FieldWithAttrArgSchema(schema.Schema):
date_of_birth = fields.Date(attr='born')
class FieldWithGetterArgSchema(schema.Schema):
full_name = fields.String(
get=lambda obj: '{} {}'.format(obj.title, obj.name)
)
class FieldWithValArgSchema(schema.Schema):
constant_date = fields.Date(val=date(2014, 10, 20))
class KingWithEmbeddedSubjectsObjSchema(KnightSchema):
subjects = fields.Embed(schema=KnightSchema(many=True))
class KingWithEmbeddedSubjectsClassSchema(KnightSchema):
subjects = fields.Embed(schema=KnightSchema, many=True)
class KingWithEmbeddedSubjectsStrSchema(KnightSchema):
subjects = fields.Embed(schema=__name__ + '.KnightSchema', many=True)
class KingWithReferencedSubjectsObjSchema(KnightSchema):
subjects = fields.Reference(schema=KnightSchema(many=True), field='name')
class KingWithReferencedSubjectsClassSchema(KnightSchema):
subjects = fields.Reference(schema=KnightSchema, field='name', many=True)
class KingWithReferencedSubjectsStrSchema(KnightSchema):
subjects = fields.Reference(schema=__name__ + '.KnightSchema',
field='name', many=True)
class KingSchemaEmbedSelf(KnightSchema):
boss = fields.Embed(schema=__name__ + '.KingSchemaEmbedSelf',
exclude='boss')
class KingSchemaReferenceSelf(KnightSchema):
boss = fields.Reference(schema=__name__ + '.KingSchemaEmbedSelf',
field='name')
# fixtures --------------------------------------------------------------------
@pytest.fixture
def bedevere():
return Knight('Sir', 'Bedevere', 2, date(502, 2, 2))
@pytest.fixture
def lancelot():
return Knight('Sir', 'Lancelot', 3, date(503, 3, 3))
@pytest.fixture
def galahad():
return Knight('Sir', 'Galahad', 4, date(504, 4, 4))
@pytest.fixture
def knights(bedevere, lancelot, galahad):
return [bedevere, lancelot, galahad]
@pytest.fixture
def arthur(knights):
return King('King', 'Arthur', 1, date(501, 1, 1), knights)
@pytest.fixture
def lancelot_dict():
return {
'title': 'Sir',
'name': 'Lancelot',
'number': 3,
'born': date(503, 3, 3),
}
@pytest.fixture
def lancelot_list():
return [
'Sir',
'Lancelot',
3,
date(503, 3, 3),
]
# tests -----------------------------------------------------------------------
def test_dump_single_dict_unordered(lancelot_dict):
knight_dict_schema = KnightDictSchema(many=False, ordered=False)
result = knight_dict_schema.dump(lancelot_dict)
expected = {
'title': 'Sir',
'name': 'Lancelot',
'number': 3,
'born': '0503-03-03'
}
assert type(result) == dict
assert result == expected
def test_dump_single_list_unordered(lancelot_list):
knight_list_schema = KnightListSchema(many=False, ordered=False)
result = knight_list_schema.dump(lancelot_list)
expected = {
'title': 'Sir',
'name': 'Lancelot',
'number': 3,
'born': '0503-03-03'
}
assert type(result) == dict
assert result == expected
def test_dump_single_unordered(lancelot):
knight_schema = KnightSchema(many=False, ordered=False)
result = knight_schema.dump(lancelot)
expected = {
'title': 'Sir',
'name': 'Lancelot',
'number': 3,
'born': '0503-03-03'
}
assert type(result) == dict
assert result == expected
def test_dump_single_ordered(lancelot):
knight_schema = KnightSchema(many=False, ordered=True)
result = knight_schema.dump(lancelot)
expected = OrderedDict([
('title', 'Sir'),
('name', 'Lancelot'),
('number', 3),
('born', '0503-03-03'),
])
assert type(result) == OrderedDict
assert result == expected
def test_dump_many_unordered(knights):
knight_schema = KnightSchema(many=True, ordered=False)
result = knight_schema.dump(knights)
expected = [
dict(title='Sir', name='Bedevere', number=2, born='0502-02-02'),
dict(title='Sir', name='Lancelot', number=3, born='0503-03-03'),
dict(title='Sir', name='Galahad', number=4, born='0504-04-04'),
]
assert all(type(x) == dict for x in result)
assert result == expected
def test_dump_many_ordered(knights):
knight_schema = KnightSchema(many=True, ordered=True)
result = knight_schema.dump(knights)
expected = [
OrderedDict([('title', 'Sir'), ('name', 'Bedevere'),
('number', 2), ('born', '0502-02-02')]),
OrderedDict([('title', 'Sir'), ('name', 'Lancelot'),
('number', 3), ('born', '0503-03-03')]),
OrderedDict([('title', 'Sir'), ('name', 'Galahad'),
('number', 4), ('born', '0504-04-04')]),
]
assert all(type(x) == OrderedDict for x in result)
assert result == expected
def test_field_exclude_dump(lancelot):
knight_schema = KnightSchema(exclude=['born', 'number'])
result = knight_schema.dump(lancelot)
expected = {
'title': 'Sir',
'name': 'Lancelot',
}
assert result == expected
def test_field_only_dump(lancelot):
knight_schema = KnightSchema(only=['name', 'number'])
result = knight_schema.dump(lancelot)
expected = {
'name': 'Lancelot',
'number': 3,
}
assert result == expected
def test_dump_field_with_attr_arg(lancelot):
attr_schema = FieldWithAttrArgSchema()
result = attr_schema.dump(lancelot)
expected = {
'date_of_birth': '0503-03-03'
}
assert result == expected
def test_dump_field_with_getter_arg(lancelot):
getter_schema = FieldWithGetterArgSchema()
result = getter_schema.dump(lancelot)
expected = {
'full_name': 'Sir Lancelot'
}
assert result == expected
def test_dump_field_with_val_arg(lancelot):
val_schema = FieldWithValArgSchema()
result = val_schema.dump(lancelot)
expected = {
'constant_date': '2014-10-20'
}
assert result == expected
def test_fail_on_unexpected_collection(knights):
knight_schema = KnightSchema(many=False)
with pytest.raises(AttributeError):
knight_schema.dump(knights)
@pytest.mark.parametrize(
'king_schema_cls',
[KingWithEmbeddedSubjectsObjSchema,
KingWithEmbeddedSubjectsClassSchema,
KingWithEmbeddedSubjectsStrSchema]
)
def test_dump_embedding_schema(king_schema_cls, arthur):
king_schema = king_schema_cls()
expected = {
'title': 'King',
'name': 'Arthur',
'number': 1,
'born': '0501-01-01',
'subjects': [
dict(title='Sir', name='Bedevere', number=2, born='0502-02-02'),
dict(title='Sir', name='Lancelot', number=3, born='0503-03-03'),
dict(title='Sir', name='Galahad', number=4, born='0504-04-04'),
]
}
assert king_schema.dump(arthur) == expected
@pytest.mark.parametrize(
'king_schema_cls',
[KingWithReferencedSubjectsObjSchema,
KingWithReferencedSubjectsClassSchema,
KingWithReferencedSubjectsStrSchema]
)
def test_dump_referencing_schema(king_schema_cls, arthur):
king_schema = king_schema_cls()
expected = {
'title': 'King',
'name': 'Arthur',
'number': 1,
'born': '0501-01-01',
'subjects': ['Bedevere', 'Lancelot', 'Galahad']
}
assert king_schema.dump(arthur) == expected
def test_embed_self_schema(arthur):
# a king is his own boss
arthur.boss = arthur
king_schema = KingSchemaEmbedSelf()
result = king_schema.dump(arthur)
expected = {
'title': 'King',
'name': 'Arthur',
'number': 1,
'born': '0501-01-01',
'boss': {
'title': 'King',
'name': 'Arthur',
'number': 1,
'born': '0501-01-01',
}
}
assert result == expected
def test_reference_self_schema(arthur):
# a king is his own boss
arthur.boss = arthur
king_schema = KingSchemaReferenceSelf()
result = king_schema.dump(arthur)
expected = {
'title': 'King',
'name': 'Arthur',
'number': 1,
'born': '0501-01-01',
'boss': 'Arthur',
}
assert result == expected
def test_fail_on_unnecessary_keywords():
class EmbedSchema(schema.Schema):
some_field = fields.String()
embed_schema = EmbedSchema(many=True)
class EmbeddingSchema(schema.Schema):
another_field = fields.String()
# here we provide a schema _instance_. the kwarg "many" is unnecessary
incorrect_embed_field = fields.Embed(schema=embed_schema, many=True)
# the incorrect field is constructed lazily. we'll have to access it
with pytest.raises(ValueError):
EmbeddingSchema.__fields__['incorrect_embed_field']._schema_inst
def test_fail_on_unnecessary_arg():
class EmbedSchema(schema.Schema):
some_field = fields.String()
embed_schema = EmbedSchema(many=True)
class EmbeddingSchema(schema.Schema):
another_field = fields.String()
# here we provide a schema _instance_. the kwarg "many" is unnecessary
incorrect_embed_field = fields.Embed(schema=embed_schema, many=True)
# the incorrect field is constructed lazily. we'll have to access it
with pytest.raises(ValueError):
EmbeddingSchema.__fields__['incorrect_embed_field']._schema_inst
def test_dump_exotic_field_names():
exotic_names = [
'', # empty string
'"', # single quote
"'", # double quote
'\u2665', # unicode heart symbol
'print(123)', # valid python code
'print("123\'', # invalid python code
]
class ExoticFieldNamesSchema(schema.Schema):
__lima_args__ = {
'include': {name: fields.String(attr='foo')
for name in exotic_names}
}
class Foo:
def __init__(self):
self.foo = 'foobar'
obj = Foo()
exotic_field_names_schema = ExoticFieldNamesSchema()
result = exotic_field_names_schema.dump(obj)
expected = {name: 'foobar' for name in exotic_names}
assert result == expected
for name in exotic_names:
dump_field_func = exotic_field_names_schema._dump_field_func(name)
result = dump_field_func(obj)
expected = 'foobar'
assert result == expected
| mit | -2,044,180,541,219,316,000 | 26.739857 | 79 | 0.604491 | false | 3.426592 | true | false | false |
g3rd/Expo | expo/slides/templatetags/utils.py | 1 | 3334 | from __future__ import unicode_literals
from django.template import Library, Node, TemplateSyntaxError
from django.conf import settings
from django.template.loader import select_template
from django.template.base import token_kwargs, compile_string
register = Library()
@register.tag
def multi_include(parser, token):
bits = token.split_contents()
if len(bits) < 2:
raise TemplateSyntaxError(
"%r tag takes at least one argument: the name of the template to be included." % bits[0])
template_expressions, extra_index = _template_expressions(bits)
template_expressions = [compile_quote_string(path) for path in template_expressions]
options = {}
remaining_bits = bits[extra_index:]
while remaining_bits:
option = remaining_bits.pop(0)
if option in options:
raise TemplateSyntaxError('The %r option was specified more '
'than once.' % option)
if option == 'with':
value = token_kwargs(remaining_bits, parser, support_legacy=False)
if not value:
raise TemplateSyntaxError('"with" in %r tag needs at least '
'one keyword argument.' % bits[0])
elif option == 'only':
value = True
else:
raise TemplateSyntaxError('Unknown argument for %r tag: %r.' %
(bits[0], option))
options[option] = value
isolated_context = options.get('only', False)
namemap = options.get('with', {})
return MultiIncludeNode(template_expressions, extra_context=namemap,
isolated_context=isolated_context)
class MultiIncludeNode(Node):
def __init__(self, template_name_list, *args, **kwargs):
self.template_name_list = template_name_list
self.extra_context = kwargs.pop('extra_context', {})
self.isolated_context = kwargs.pop('isolated_context', False)
super(MultiIncludeNode, self).__init__(*args, **kwargs)
def render_template(self, template, context):
values = dict([(name, var.resolve(context)) for name, var
in self.extra_context.iteritems()])
if self.isolated_context:
return template.render(context.new(values))
context.update(values)
output = template.render(context)
context.pop()
return output
def render(self, context):
try:
template_names = [exp.render(context) for exp in self.template_name_list]
template = select_template(template_names)
return self.render_template(template, context)
except:
if settings.TEMPLATE_DEBUG:
raise
return ''
def _template_expressions(bits):
extra_index = len(bits)
keyword_indexes = []
for keyword in ['with', 'only']:
try:
keyword_indexes.append(bits.index(keyword))
except ValueError:
pass
if keyword_indexes:
extra_index = min(keyword_indexes)
return bits[1:extra_index], extra_index
def compile_quote_string(path):
if path[0] in ('"', "'") and path[-1] == path[0]:
return compile_string(path[1:-1], "")
else:
raise TemplateSyntaxError('String must contain quotes')
| apache-2.0 | -8,034,546,822,722,200,000 | 35.637363 | 101 | 0.608278 | false | 4.404227 | false | false | false |
simeksgol/BellmanWin_szlim | catbellman.py | 1 | 3272 | import os, glob
import golly as g
def convbellman (text, stx, sty):
textln = text.split ('\n')
gen = -1
glcnt = -1
y = sty;
for ln in textln:
if not ln:
break
if ln [0] == '#':
if ln [0:35] == "#C Solution accepted at generation ":
gen = int (ln [35:])
elif ln [0:26] == "#C Glider count at accept ":
glcnt = int (ln [26:])
else:
x = stx;
for c in ln:
if c == '.':
g.setcell (x, y, 0)
x += 1;
elif c == '?':
g.setcell (x, y, 5)
x += 1;
elif c == '*':
g.setcell (x, y, 3)
x += 1;
elif c == '@':
g.setcell (x, y, 1)
x += 1;
y += 1
return (gen, glcnt)
def clean (rect):
for y in xrange (rect [1], rect [1] + rect [3]):
for x in xrange (rect [0], rect [0] + rect [2]):
if g.getcell (x, y) != 1:
g.setcell (x, y, 0)
def addmarkers (rect):
g.setcell (rect [0], rect [1], 1)
g.setcell (rect [0] + rect [2] - 1, rect [1], 1)
g.setcell (rect [0], rect [1] + rect [3] - 1, 1)
g.setcell (rect [0] + rect [2] - 1, rect [1] + rect [3] - 1, 1)
def analyse (gogen, glcnt, minpop, maxpop, mingl):
if glcnt < mingl:
return (False, 0)
g.run (gogen)
inrect = g.getrect ()
clean (inrect)
endpop = int (g.getpop ())
if endpop < minpop or endpop > maxpop:
return (False, 0)
rect = g.getrect ()
if rect == []:
return (True, 0)
else:
addmarkers (inrect)
return (True, g.hash (inrect))
def main ():
g.update ()
g.check (False)
path = g.getstring ("Output directory:")
files = glob.glob (os.path.join (path, "*.out"))
mingls = g.getstring ("Min number of gliders at accept:")
if mingls == "":
mingl = 0
minpop = 0
maxpop = 1024
else:
mingl = int (mingls)
minpops = g.getstring ("Min population except catalyzers:")
if minpops == "":
minpop = 0
maxpop = 1024
else:
minpop = int (minpops)
maxpop = int (g.getstring ("Max population except catalyzers:"))
if g.getname () != "catbellman_temp":
g.addlayer ()
hashdir = {}
catlist = []
catix = 0
g.new ("catbellman_temp")
g.setrule ("LifeBellman")
for fix, filename in enumerate (files):
patt = g.getrect ()
if patt != []:
g.select (patt)
g.clear (0)
g.setgen ("0")
with open(filename, 'r') as f:
filetext = f.read ()
if fix % 16 == 0:
g.show ("Analysing " + str (fix) + "/" + str (len (files)))
(gogen, glcnt) = convbellman (filetext, 0, 0)
if gogen == -1:
gogen = 128
(use, hash) = analyse (gogen, glcnt, minpop, maxpop, mingl)
if use:
if not hash in hashdir:
catlist.append ([])
hashdir [hash] = catix
catix += 1
cat = hashdir [hash]
catlist [cat].append (filetext)
g.new ("catbellman_temp")
g.setrule ("LifeBellman")
fix = 0
y = 0
for cat in catlist:
x = 96 * (len (cat) - 1)
for filetext in cat:
convbellman (filetext, x, y)
x -= 96
fix += 1
if fix % 32 == 0:
g.show ("Rendering " + str (fix) + "/" + str (len (files)))
g.fit ()
g.check (True)
g.update ()
g.check (False)
y += 96
g.show ("Done")
g.fit ()
g.setstep (-1)
g.check (True)
main ()
| bsd-2-clause | 2,742,611,353,457,835,000 | 20.526316 | 70 | 0.51467 | false | 2.751892 | false | false | false |
sixuanwang/SAMSaaS | wirecloud-develop/src/wirecloud/oauth2provider/pyoauth2.py | 2 | 15938 | import json
from wirecloud.oauth2provider import pyoauth2_utils as utils
class Provider(object):
"""Base provider class for different types of OAuth 2.0 providers."""
def _handle_exception(self, exc):
"""Handle an internal exception that was caught and suppressed.
:param exc: Exception to process.
:type exc: Exception
"""
pass
def _make_response(self, body='', headers=None, status_code=200):
"""Return a response object from the given parameters.
:param body: Buffer/string containing the response body.
:type body: str
:param headers: Dict of headers to include in the requests.
:type headers: dict
:param status_code: HTTP status code.
:type status_code: int
:rtype: requests.Response
"""
raise NotImplementedError('Subclasses must implement ' \
'_make_response.')
def _make_redirect_error_response(self, redirect_uri, err):
"""Return a HTTP 302 redirect response object containing the error.
:param redirect_uri: Client redirect URI.
:type redirect_uri: str
:param err: OAuth error message.
:type err: str
:rtype: requests.Response
"""
params = {
'error': err,
'response_type': None,
'client_id': None,
'redirect_uri': None
}
redirect = utils.build_url(redirect_uri, params)
return self._make_response(headers={'Location': redirect},
status_code=302)
def _make_json_response(self, data, headers=None, status_code=200):
"""Return a response object from the given JSON data.
:param data: Data to JSON-encode.
:type data: mixed
:param headers: Dict of headers to include in the requests.
:type headers: dict
:param status_code: HTTP status code.
:type status_code: int
:rtype: requests.Response
"""
response_headers = {}
if headers is not None:
response_headers.update(headers)
response_headers['Content-Type'] = 'application/json;charset=UTF-8'
response_headers['Cache-Control'] = 'no-store'
response_headers['Pragma'] = 'no-cache'
return self._make_response(json.dumps(data),
response_headers,
status_code)
class AuthorizationProvider(Provider):
"""OAuth 2.0 authorization provider. This class manages authorization
codes and access tokens. Certain methods MUST be overridden in a
subclass, thus this class cannot be directly used as a provider.
These are the methods that must be implemented in a subclass:
get_client(self, client_id)
# Return a Client instance. Exception if not found
validate_client_secret(self, client, client_secret)
# Return True or False
validate_scope(self, client, scope)
# Return True or False
validate_redirect_uri(self, client_id, redirect_uri)
# Return True or False
validate_access(self) # Use this to validate your app session user
# Return True or False
from_authorization_code(self, client_id, code, scope)
# Return mixed data or None on invalid
from_refresh_token(self, client_id, refresh_token, scope)
# Return mixed data or None on invalid
persist_authorization_code(self, user, client, code, scope)
# Return value ignored
persist_token_information(self, client_id, scope, access_token,
token_type, expires_in, refresh_token,
data)
# Return value ignored
discard_authorization_code(self, client_id, code)
# Return value ignored
discard_refresh_token(self, client_id, refresh_token)
# Return value ignored
Optionally, the following may be overridden to acheive desired behavior:
@property
token_length(self)
@property
token_type(self)
@property
token_expires_in(self)
generate_authorization_code(self)
generate_access_token(self)
generate_refresh_token(self)
"""
@property
def token_length(self):
"""Property method to get the length used to generate tokens.
:rtype: int
"""
return 40
@property
def token_type(self):
"""Property method to get the access token type.
:rtype: str
"""
return 'Bearer'
@property
def token_expires_in(self):
"""Property method to get the token expiration time in seconds.
:rtype: int
"""
return 3600
def generate_authorization_code(self):
"""Generate a random authorization code.
:rtype: str
"""
return utils.random_ascii_string(self.token_length)
def generate_access_token(self):
"""Generate a random access token.
:rtype: str
"""
return utils.random_ascii_string(self.token_length)
def generate_refresh_token(self):
"""Generate a random refresh token.
:rtype: str
"""
return utils.random_ascii_string(self.token_length)
def validate_authorization_code_request(self, request, user, response_type, client, redirect_uri, scope='', **params):
# Check client
if client is None:
return self._make_error_response(request, 'unauthorized_client')
# Check redirect URI
if not self.validate_redirect_uri(client, redirect_uri):
return self._make_error_response(request, 'invalid_request')
# Ensure proper response_type
if response_type != 'code':
return self._make_redirect_error_response(redirect_uri, 'unsupported_response_type')
# Check conditions
# Return proper error responses on invalid conditions
if not self.validate_access():
err = 'access_denied'
return self._make_redirect_error_response(redirect_uri, err)
if not self.validate_scope(client, scope):
err = 'invalid_scope'
return self._make_redirect_error_response(redirect_uri, err)
def get_authorization_code(self, request, user, response_type, client_id, redirect_uri, **params):
"""Generate authorization code HTTP response.
:param response_type: Desired response type. Must be exactly "code".
:type response_type: str
:param client_id: Client ID.
:type client_id: str
:param redirect_uri: Client redirect URI.
:type redirect_uri: str
:rtype: requests.Response
"""
scope = params.get('scope', '')
client = self.get_client(client_id)
error_response = self.validate_authorization_code_request(request, user, response_type, client, redirect_uri, scope)
if error_response is not None:
return error_response
# Generate authorization code
code = self.generate_authorization_code()
# Save information to be used to validate later requests
self.persist_authorization_code(user=user, client=client, code=code, scope=scope)
# Return redirection response
params.update({
'code': code,
'response_type': None,
'client_id': None,
'redirect_uri': None
})
redirect = utils.build_url(redirect_uri, params)
return self._make_response(headers={'Location': redirect}, status_code=302)
def refresh_token(self, request, client_id, client_secret, refresh_token, **params):
"""Generate access token HTTP response from a refresh token.
:param client_id: Client ID.
:type client_id: str
:param client_secret: Client secret.
:type client_secret: str
:param refresh_token: Refresh token.
:type refresh_token: str
:rtype: requests.Response
"""
scope = params.get('scope', '')
# Check conditions
try:
client = self.get_client(client_id)
except:
return self._make_error_response(request, 'invalid_client')
# Validate grant info
is_valid_client_secret = self.validate_client_secret(client, client_secret)
data = self.from_refresh_token(client_id, refresh_token, scope)
is_valid_grant = data is not None
if not is_valid_client_secret or not is_valid_grant:
return self._make_error_response(request, 'invalid_grant')
# Validate scope
if not self.validate_scope(client, scope):
return self._make_error_response(request, 'invalid_scope')
# Discard original refresh token
self.discard_refresh_token(client_id, refresh_token)
# Generate access tokens once all conditions have been met
access_token = self.generate_access_token()
token_type = self.token_type
expires_in = self.token_expires_in
refresh_token = self.generate_refresh_token()
# Save information to be used to validate later requests
self.persist_token_information(client_id=client_id,
scope=scope,
access_token=access_token,
token_type=token_type,
expires_in=expires_in,
refresh_token=refresh_token,
data=data)
# Return json response
return self._make_json_response({
'access_token': access_token,
'token_type': token_type,
'expires_in': expires_in,
'refresh_token': refresh_token
})
def get_token(self, request, client_id, client_secret, redirect_uri, code, **params):
"""Generate access token HTTP response.
:param client_id: Client ID.
:type client_id: str
:param client_secret: Client secret.
:type client_secret: str
:param redirect_uri: Client redirect URI.
:type redirect_uri: str
:param code: Authorization code.
:type code: str
:rtype: requests.Response
"""
scope = params.get('scope', '')
# Check conditions
try:
client = self.get_client(client_id)
except:
return self._make_error_response(request, 'invalid_client')
# Validate grant info
is_valid_redirect_uri = self.validate_redirect_uri(client, redirect_uri)
is_valid_client_secret = self.validate_client_secret(client, client_secret)
data = self.from_authorization_code(client_id, code, scope)
is_valid_grant = data is not None
if not is_valid_client_secret or not is_valid_grant or not is_valid_redirect_uri:
return self._make_error_response(request, 'invalid_grant')
# Validate scope
if not self.validate_scope(client, scope):
return self._make_error_response(request, 'invalid_scope')
# Discard original authorization code
self.discard_authorization_code(client_id, code)
# Generate access tokens once all conditions have been met
access_token = self.generate_access_token()
token_type = self.token_type
expires_in = self.token_expires_in
refresh_token = self.generate_refresh_token()
# Save information to be used to validate later requests
self.persist_token_information(client_id=client_id,
scope=scope,
access_token=access_token,
token_type=token_type,
expires_in=expires_in,
refresh_token=refresh_token,
data=data)
# Return json response
return self._make_json_response({
'access_token': access_token,
'token_type': token_type,
'expires_in': expires_in,
'refresh_token': refresh_token
})
def get_token_from_post_data(self, request, data):
"""Get a token response from POST data.
:param data: POST data containing authorization information.
:type data: dict
:rtype: requests.Response
"""
try:
# Verify OAuth 2.0 Parameters
for x in ['grant_type', 'client_id', 'client_secret']:
if not data.get(x):
raise TypeError("Missing required OAuth 2.0 POST param: {}".format(x))
# Handle get token from refresh_token
if data['grant_type'] == 'refresh_token':
if 'refresh_token' not in data:
raise TypeError("Missing required OAuth 2.0 POST param: refresh_token")
return self.refresh_token(request, **data)
elif data['grant_type'] == 'authorization_code':
# Handle get token from authorization code
for x in ['redirect_uri', 'code']:
if not data.get(x):
raise TypeError("Missing required OAuth 2.0 POST param: {}".format(x))
return self.get_token(request, **data)
else:
return self._make_error_response(request, 'unsupported_grant_type')
except TypeError as exc:
self._handle_exception(exc)
# Catch missing parameters in request
return self._make_error_response(request, 'invalid_request')
except StandardError as exc:
self._handle_exception(exc)
# Catch all other server errors
return self._make_error_response(request, 'server_error')
def get_client(self, client_id): # pragma: no cover
raise NotImplementedError('Subclasses must implement get_client.')
def validate_client_secret(self, client, client_secret): # pragma: no cover
raise NotImplementedError('Subclasses must implement validate_client_secret.')
def validate_redirect_uri(self, client, redirect_uri): # pragma: no cover
raise NotImplementedError('Subclasses must implement validate_redirect_uri.')
def validate_scope(self, client, scope): # pragma: no cover
raise NotImplementedError('Subclasses must implement validate_scope.')
def validate_access(self): # pragma: no cover
raise NotImplementedError('Subclasses must implement validate_access.')
def from_authorization_code(self, client_id, code, scope): # pragma: no cover
raise NotImplementedError('Subclasses must implement from_authorization_code.')
def from_refresh_token(self, client_id, refresh_token, scope): # pragma: no cover
raise NotImplementedError('Subclasses must implement from_refresh_token.')
def persist_authorization_code(self, client, code, scope): # pragma: no cover
raise NotImplementedError('Subclasses must implement persist_authorization_code.')
def persist_token_information(self, client_id, scope, access_token,
token_type, expires_in, refresh_token,
data): # pragma: no cover
raise NotImplementedError('Subclasses must implement persist_token_information.')
def discard_authorization_code(self, client_id, code): # pragma: no cover
raise NotImplementedError('Subclasses must implement discard_authorization_code.')
def discard_refresh_token(self, client_id, refresh_token): # pragma: no cover
raise NotImplementedError('Subclasses must implement discard_refresh_token.')
| gpl-2.0 | -3,620,813,159,037,716,500 | 36.325527 | 124 | 0.600891 | false | 4.598384 | false | false | false |
disturbedfood/timeplan | structures.py | 1 | 1055 | class Course:
def __init__(self, name, hashcode, code):
self.subjects = set()
self.name = name
self.hashcode = hashcode
self.code = code
@classmethod
def from_db(cls, row):
name = row[0]
hashcode = row[1]
return cls(name, hashcode, "")
def add_subject(self, subject_code):
self.subjects.add(subject_code)
def response_json(self):
return {"name": self.name, "code": self.hashcode, "subjects": list(self.subjects)}
class DataRow:
def __init__(self):
self.week = ""
self.day = ""
self.date = ""
self.start = ""
self.end = ""
self.code = ""
self.type = ""
self.info = ""
self.campus = ""
self.rooms = ""
def get_data_tuple(self):
return (self.week, self.day, self.date, self.start, self.end, self.code, self.type, self.info, self.campus, self.rooms)
def get_csv_data(self):
return ";".join(self.get_data_tuple) + ";\n"
| apache-2.0 | -2,402,610,393,383,240,700 | 27.513514 | 126 | 0.522275 | false | 3.552189 | false | false | false |
ancafarcas/superdesk-core | apps/preferences.py | 2 | 12825 | # -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from flask import request
from eve.validation import ValidationError
from eve.utils import config
import logging
import superdesk
from superdesk.resource import Resource
from superdesk.services import BaseService
from superdesk.utc import utcnow
from superdesk import get_backend
from superdesk import get_resource_service
from superdesk.workflow import get_privileged_actions
_preferences_key = 'preferences'
_user_preferences_key = 'user_preferences'
_session_preferences_key = 'session_preferences'
_privileges_key = 'active_privileges'
_action_key = 'allowed_actions'
logger = logging.getLogger(__name__)
def init_app(app):
endpoint_name = 'preferences'
service = PreferencesService(endpoint_name, backend=get_backend())
PreferencesResource(endpoint_name, app=app, service=service)
app.on_session_end -= service.on_session_end
app.on_session_end += service.on_session_end
app.on_role_privileges_revoked -= service.on_role_privileges_revoked
app.on_role_privileges_revoked += service.on_role_privileges_revoked
superdesk.intrinsic_privilege(resource_name=endpoint_name, method=['PATCH'])
def enhance_document_with_default_prefs(doc):
user_prefs = doc.get(_user_preferences_key, {})
available = dict(superdesk.default_user_preferences)
available.update(user_prefs)
def sync_field(field, dest, default):
if not isinstance(dest, dict) or not isinstance(default, dict):
return
if default.get(field):
dest[field] = default[field]
elif dest.get(field):
dest.pop(field, None)
# make sure label and category are up-to-date
for k, v in available.items():
default = superdesk.default_user_preferences.get(k)
if default:
sync_field('label', v, default)
sync_field('category', v, default)
doc[_user_preferences_key] = available
class PreferencesResource(Resource):
datasource = {
'source': 'users',
'projection': {
_session_preferences_key: 1,
_user_preferences_key: 1,
_privileges_key: 1,
_action_key: 1,
'_etag': 1
}
}
schema = {
_session_preferences_key: {'type': 'dict', 'required': True},
_user_preferences_key: {'type': 'dict', 'required': True},
_privileges_key: {'type': 'dict'},
_action_key: {'type': 'list'}
}
resource_methods = []
item_methods = ['GET', 'PATCH']
superdesk.register_default_user_preference('feature:preview', {
'type': 'bool',
'enabled': False,
'default': False,
'label': 'Enable Feature Preview',
'category': 'feature',
'privileges': ['feature_preview']
})
superdesk.register_default_user_preference('archive:view', {
'type': 'string',
'allowed': ['mgrid', 'compact'],
'view': 'mgrid',
'default': 'mgrid',
'label': 'Users archive view format',
'category': 'archive'
})
superdesk.register_default_user_preference('singleline:view', {
'type': 'bool',
'enabled': None,
'default': False,
'label': 'Enable Single Line View',
'category': 'rows'
})
superdesk.register_default_user_preference('editor:theme', {
'type': 'string',
'theme': '',
})
superdesk.register_default_user_preference('workqueue:items', {
'items': []
})
superdesk.register_default_user_preference('dashboard:ingest', {
'providers': []
})
superdesk.register_default_user_preference('agg:view', {
'active': {},
})
superdesk.register_default_user_preference('templates:recent', {})
superdesk.register_default_user_preference('dateline:located', {
'type': 'dict',
'label': 'Located',
'category': 'article_defaults'
})
superdesk.register_default_user_preference('categories:preferred', {
'type': 'dict',
'category': 'categories',
'label': 'Preferred Categories',
'selected': {},
})
superdesk.register_default_user_preference('desks:preferred', {
'type': 'dict',
'category': 'desks',
'label': 'Preferred Desks',
'selected': {},
})
superdesk.register_default_user_preference('article:default:place', {
'type': 'list',
'label': 'Place',
'category': 'article_defaults',
'place': []
})
superdesk.register_default_user_preference('spellchecker:status', {
'type': 'bool',
'enabled': True,
'default': True
})
superdesk.register_default_user_preference('destination:active', {})
superdesk.register_default_session_preference('scratchpad:items', [])
superdesk.register_default_session_preference('desk:last_worked', '')
superdesk.register_default_session_preference('desk:items', [])
superdesk.register_default_session_preference('stage:items', [])
superdesk.register_default_session_preference('pinned:items', [])
class PreferencesService(BaseService):
def on_session_end(self, user_id, session_id):
service = get_resource_service('users')
user_doc = service.find_one(req=None, _id=user_id)
session_prefs = user_doc.get(_session_preferences_key, {}).copy()
if not isinstance(session_id, str):
session_id = str(session_id)
if session_id in session_prefs:
del session_prefs[session_id]
service.system_update(user_id, {_session_preferences_key: session_prefs}, user_doc)
def set_session_based_prefs(self, session_id, user_id):
service = get_resource_service('users')
user_doc = service.find_one(req=None, _id=user_id)
session_prefs = user_doc.get(_session_preferences_key, {})
available = dict(superdesk.default_session_preferences)
if available.get('desk:last_worked') == '' and user_doc.get('desk'):
available['desk:last_worked'] = user_doc.get('desk')
session_prefs.setdefault(str(session_id), available)
service.system_update(user_id, {_session_preferences_key: session_prefs}, user_doc)
def set_user_initial_prefs(self, user_doc):
if _user_preferences_key not in user_doc:
orig_user_prefs = user_doc.get(_preferences_key, {})
available = dict(superdesk.default_user_preferences)
available.update(orig_user_prefs)
user_doc[_user_preferences_key] = available
def find_one(self, req, **lookup):
session = get_resource_service('sessions').find_one(req=None, _id=lookup['_id'])
_id = session['user'] if session else lookup['_id']
doc = get_resource_service('users').find_one(req, _id=_id)
if doc:
doc['_id'] = session['_id'] if session else _id
return doc
def on_fetched_item(self, doc):
session_id = request.view_args['_id']
session_prefs = doc.get(_session_preferences_key, {}).get(session_id, {})
doc[_session_preferences_key] = session_prefs
self.enhance_document_with_user_privileges(doc)
enhance_document_with_default_prefs(doc)
self._filter_preferences_by_privileges(doc)
def on_update(self, updates, original):
existing_user_preferences = original.get(_user_preferences_key, {}).copy()
existing_session_preferences = original.get(_session_preferences_key, {}).copy()
self.update_user_prefs(updates, existing_user_preferences)
session_id = request.view_args['_id']
self.update_session_prefs(updates, existing_session_preferences, session_id)
def update_session_prefs(self, updates, existing_session_preferences, session_id):
session_prefs = updates.get(_session_preferences_key)
if session_prefs is not None:
for k in (k for k, v in session_prefs.items() if k not in superdesk.default_session_preferences):
raise ValidationError('Invalid preference: %s' % k)
existing = existing_session_preferences.get(session_id, {})
existing.update(session_prefs)
existing_session_preferences[session_id] = existing
updates[_session_preferences_key] = existing_session_preferences
def update_user_prefs(self, updates, existing_user_preferences):
user_prefs = updates.get(_user_preferences_key)
if user_prefs is not None:
# check if the input is validated against the default values
for k in ((k for k, v in user_prefs.items() if k not in superdesk.default_user_preferences)):
raise ValidationError('Invalid preference: %s' % k)
existing_user_preferences.update(user_prefs)
updates[_user_preferences_key] = existing_user_preferences
def update(self, id, updates, original):
session = get_resource_service('sessions').find_one(req=None, _id=original['_id'])
original_unpatched = self.backend.find_one(self.datasource, req=None, _id=session['user'])
updated = original_unpatched.copy()
updated.update(updates)
del updated['_id']
res = self.backend.update(self.datasource, original_unpatched['_id'], updated, original_unpatched)
updates.update(updated)
# Return only the patched session prefs
session_prefs = updates.get(_session_preferences_key, {}).get(str(original['_id']), {})
updates[_session_preferences_key] = session_prefs
self.enhance_document_with_user_privileges(updates)
enhance_document_with_default_prefs(updates)
return res
def enhance_document_with_user_privileges(self, user_doc):
role_doc = get_resource_service('users').get_role(user_doc)
get_resource_service('users').set_privileges(user_doc, role_doc)
user_doc[_action_key] = get_privileged_actions(user_doc[_privileges_key])
def get_user_preference(self, user_id):
"""
This function returns preferences for the user.
"""
doc = get_resource_service('users').find_one(req=None, _id=user_id)
prefs = doc.get(_user_preferences_key, {})
return prefs
def email_notification_is_enabled(self, user_id=None, preferences=None):
"""
This function checks if email notification is enabled or not based on the preferences.
"""
if user_id:
preferences = self.get_user_preference(user_id)
send_email = preferences.get('email:notification', {}) if isinstance(preferences, dict) else {}
return send_email and send_email.get('enabled', False)
def is_authorized(self, **kwargs):
"""
Returns False if logged-in user is trying to update other user's or session's privileges.
:param kwargs:
:return: True if authorized, False otherwise
"""
if not kwargs.get('_id') or not kwargs.get('user_id'):
return False
session = get_resource_service('sessions').find_one(req=None, _id=kwargs.get('_id'))
if not session:
return False
return str(kwargs.get('user_id')) == str(session.get('user'))
def on_role_privileges_revoked(self, role, role_users):
"""Runs when user privilage has been revoked.
Update the session for active user so that preferences can be reloaded.
:param dict role: role getting updated
:param list role_users: list of user belonging to the role.
"""
if not role_users or not role:
return
logger.info('On_Role_Privileges_Revoked: Updating Users for Role:{}.'.format(role.get(config.ID_FIELD)))
for user in role_users:
try:
self.system_update(user[config.ID_FIELD], {config.LAST_UPDATED: utcnow()}, user)
except:
logger.warn('On_Role_Privileges_Revoked:Failed to update user:{} with role:{}.'.
format(user.get(config.ID_FIELD), role.get(config.ID_FIELD)), exc_info=True)
def _filter_preferences_by_privileges(self, doc):
privileges = doc[_privileges_key]
preferences = doc[_user_preferences_key]
def has_missing_privileges(prefs):
prefs = prefs if isinstance(prefs, list) else [prefs]
return [priv for pref in prefs for priv in pref.get('privileges', []) if not privileges.get(priv)]
doc[_user_preferences_key] = {k: v for k, v in preferences.items() if not has_missing_privileges(v)}
| agpl-3.0 | -8,090,301,490,491,624,000 | 37.863636 | 112 | 0.635789 | false | 3.905298 | true | false | false |
Hawk94/coin_tracker | backend/settings.py | 1 | 4864 | """
Django settings for backend project.
Generated by 'django-admin startproject' using Django 1.11.1.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
import environ
import dj_database_url
from configurations import Configuration, values
class BaseConfiguration(Configuration):
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'mnj9$1z4d$yllwa^6(&*&@*_ksz&$!ya-7-!*-hu^419be=+&a'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# THIRD PARTY APPS
'rest_framework',
'rest_framework_swagger',
'corsheaders',
'storages',
'django_extensions',
# LOCAL APPS
'main.bitfinex',
'main.coins',
'main.rates',
'main.stocks',
'core',
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticatedOrReadOnly',
)
}
CORS_ORIGIN_ALLOW_ALL = True
MIDDLEWARE = [
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'backend.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'backend.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASE_ENV = values.Value(environ_prefix=None, environ_name='DATABASE_ENV', default='DATABASE_URL')
DATABASE_URL = values.SecretValue(environ_prefix=None, environ_name=str(DATABASE_ENV))
DATABASES = {
'default': dj_database_url.parse(str(DATABASE_URL)),
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Storages
DEFAULT_FILE_STORAGE = 'storages.backends.s3boto3.S3Boto3Storage'
AWS_STORAGE_BUCKET_NAME = 'www.dustapp.io'
AWS_S3_REGION_NAME = 'eu-west-2'
AWS_S3_HOST = 's3.eu-west-2.amazonaws.com'
AWS_S3_SIGNATURE_VERSION = 's3v4'
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATICFILES_STORAGE = 'whitenoise.storage.CompressedManifestStaticFilesStorage'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = [
]
OPEN_EXCHANGE_APP_ID = values.SecretValue(environ_prefix=None)
ALPHA_VANTAGE_API_KEY = values.SecretValue(environ_prefix=None)
| mit | -5,219,796,573,422,314,000 | 28.478788 | 105 | 0.63898 | false | 3.817896 | false | false | false |
AiOO/oi | oi/user.py | 1 | 1542 | from flask import Blueprint
from flask import redirect, session, url_for
from functools import partial, wraps
from oi.model import User
from oi.util import timestamp, TIME_MINUTES
def get_user(db_session, user_id):
user = db_session.query(User).filter(User.id == user_id).first()
return user
def get_user_by_google_id(db_session, google_id):
user = db_session.query(User).filter(User.google_id == google_id).first()
return user
def get_user_in_session(db_session):
return get_user(db_session, session['user_id'])
def set_expire():
session['expires'] = timestamp() + 30 * TIME_MINUTES
def sign_in(user):
session['user_id'] = user.id
session['github_access_token'] = user.github_access_token
set_expire()
def sign_out():
session.pop('user_id', None)
session.pop('github_access_token', None)
session.pop('expires', None)
def check_sign_in(need_github=False):
if 'expires' not in session:
return False
if need_github and session['github_access_token'] is None:
return False
if session['expires'] < timestamp():
sign_out()
return False
set_expire()
return True
def require_sign_in(func=None, need_github=False):
if func is None:
return partial(require_sign_in, need_github=need_github)
@wraps(func)
def new_function(*args, **kwargs):
if check_sign_in(need_github=need_github) == True:
return func(*args, **kwargs)
else:
return redirect(url_for('index'))
return new_function
| mit | 6,469,918,013,855,115,000 | 28.653846 | 77 | 0.66083 | false | 3.426667 | false | false | false |
Flavsditz/projects | eyeTracking/findPupil.py | 1 | 2290 |
import pygtk
pygtk.require('2.0')
import gtk
import sys
import threading
import time
from SimpleCV import VirtualCamera, DrawingLayer, Color, Camera
binarizationValue = 30
class gui:
def __init__(self):
self.gladefile = "binControl.glade"
self.glade = gtk.Builder()
self.glade.add_from_file(self.gladefile)
self.glade.connect_signals(self)
self.glade.get_object("windowMain").show_all()
self.scale = self.glade.get_object("binValue")
#self.scale.connect("value-changed", self.on_binValue_value_changed)
def on_MainWindow_delete_event(self, widget, event):
gtk.main_quit()
def on_binValue_value_changed(self, widget):
print "At change value"
try:
global binarizationValue
binarizationValue = self.glade.get_object("binValue").get_value()
print binarizationValue
except ValueError:
return 0
def on_windowMain_destroy(self, widget):
sys.exit(0)
def startGUI():
gui()
gtk.main()
def startCAM():
global binarizationValue
cam = Camera()
#cam = VirtualCamera("pupilTest.mp4", "video", 300)
while True:
img = cam.getImage().binarize(binarizationValue)
blobs = img.findBlobs()
if blobs is None:
img.show()
else:
blobs[-1].draw(color=(0, 0, 0))
img.drawCircle((blobs[-1].x,blobs[-1].y),6, thickness=-1,color=Color.RED)
img.drawCircle((blobs[-1].centroid()),5, thickness=-1,color=Color.GREEN)
sTmp = "Center of Mass: "+str(blobs[-1].x)+", "+str(blobs[-1].y)
img.drawText(sTmp, x=10, y=30, color=Color.RED, fontsize=20)
sTmp = blobs[-1].centroid()
sTmp = " Bounding Box: "+str(int(sTmp[0]))+", "+ str(int(sTmp[1]))
img.drawText(sTmp, x=10, y=10, color=Color.GREEN, fontsize=20)
img.show()
#time.sleep(10)
def main():
print "First Thread"
guiThread = threading.Thread(target=startGUI)
guiThread.start()
startCAM()
print "Got Here!"
'''
startGUI()
'''
if __name__ == "__main__":
main()
| gpl-2.0 | 4,624,577,015,599,548,000 | 21.367347 | 85 | 0.561135 | false | 3.443609 | false | false | false |
gundramleifert/exp_tf | util/LoaderUtil.py | 1 | 4488 | from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
from scipy import misc
import STR2CTC
import os
import codecs
def read_image_list(pathToList):
"""Reads a .txt file containing paths to the images
Args:
image_list_file: a .txt file with one /path/to/image per line
label: optionally, if set label will be pasted after each line
Returns:
List with all filenames in file image_list_file
"""
f = open(pathToList, 'r')
filenames = []
for line in f:
if line[-1] == '\n':
filenames.append(line[:-1])
else:
filenames.append(line)
f.close()
return filenames
def get_batch_labels(bList, cm):
u_labels = []
for path in bList:
labelFile = path[:] + ".txt"
tmp = codecs.open(labelFile, 'r', encoding='utf-8')
u_str = tmp.readline()
u_labels.append(u_str)
# print(str)
if tmp is not None:
tmp.close()
idx, val, shape = STR2CTC.target_string_list_to_ctc_tensor_repr(u_labels, cm)
return idx, val, shape
def get_batch_imgs(bList, imgW, mvn):
imgs = []
seqL = []
# print("\n")
for path in bList:
# print(path)
aImg = misc.imread(path)
width = aImg.shape[1]
hei = aImg.shape[0]
# aSeqL = min(width, imgW)
# aSeqL = max(aSeqL, imgW / 2)
aSeqL = width
seqL.append(aSeqL)
# aImg = aImg.astype('float32')
aImg = aImg / 255.0
if mvn:
std = np.std(aImg)
mean = np.mean(aImg)
tImg = (aImg - mean) / std
aImg = tImg
if width < imgW:
padW = imgW - width
npad = ((0, 0), (0, padW))
tImg = np.pad(aImg, npad, mode='constant', constant_values=0)
aImg = tImg
# if width > imgW:
# tImg = aImg[:, :imgW]
# aImg = tImg
# plt.imshow(aImg, cmap=plt.cm.gray)
# plt.show()
imgs.append(aImg)
bSize = len(bList)
imgBatched = np.zeros((bSize, hei, imgW, 1), dtype='float32')
# batch the image list
for idx, img in enumerate(imgs):
imgBatched[idx, :, :, 0] = img
return imgBatched, seqL
def get_list_vals(bList, cm, imgW, mvn=False):
tgtIdx, tgtVal, tgtShape = get_batch_labels(bList, cm)
inpBatch, inpSeqL = get_batch_imgs(bList, imgW, mvn)
return inpBatch, inpSeqL, tgtIdx, tgtVal, tgtShape
def clean_list(list, imgW, cm, subsampling=-1):
res = []
# Count the skipped Images (width too big)
countW = 0
# Count the skipped Images (char not in charMap)
countC = 0
# Count the skipped Images (subsampling too much)
countS = 0
for path in list:
aImg = misc.imread(path)
width = aImg.shape[1]
# Skip image if image width is bigger than imgW
if width > imgW:
countW += 1
continue
# Skip image if a character is not in charMap
skip = False
labelFile = path[:] + ".txt"
tmp = codecs.open(labelFile, 'r', encoding='utf-8')
u_str = tmp.readline()
if subsampling > 0:
if subsampling * len(u_str) > width:
countS += 1
continue
if tmp is not None:
tmp.close()
count = 0
lastCh = -1
for c in u_str:
try:
ch = cm.get_channel(c)
if lastCh == ch:
count += 1
lastCh = ch
except KeyError:
# print('Character \'{}\' not in charMap, skipping Image...'.format(c))
skip = True
countC += 1
break
if not skip:
if subsampling * (count + len(u_str)) > width:
countS += 1
continue
res.append(path)
print("Skipped {} out of {} images...".format(countC + countW + countS, len(list)))
print("...{} too big images, {} images where subsampling is too much and additionally {} images with unknown characters.".format(countW, countS, countC))
return res
if __name__ == '__main__':
os.chdir("..")
list = read_image_list('./resources/lp_only_train.lst')
imgBatches, seqL = get_list_vals(list, STR2CTC.get_charmap_lp(), 100)
# print(seqL)
print(imgBatches.shape)
print(imgBatches.dtype)
plt.imshow(imgBatches[129], cmap=plt.cm.gray)
plt.show()
| apache-2.0 | -4,205,013,080,744,168,000 | 28.92 | 157 | 0.541889 | false | 3.447005 | false | false | false |
ercius/openNCEM | ncempy/eval/multicorr.py | 1 | 2952 | import numpy as np
from ..algo.multicorr_funcs import *
def multicorr(g1, g2, method='cross', upsample_factor=1, verbose=False):
"""Align a reference to an image by cross correlation. The template
and the image must have the same size.
The function takes in FFTs so that any FFT algorithm can be used to
transform the image and template (fft2, mkl, scipack, etc.)
Parameters
----------
g1 : complex ndarray
Fourier transform of reference image.
g2 : complex ndarray
Fourier transform of the image to register (the kernel).
method : str, optional
The correlation method to use. Must be 'phase' or 'cross' or 'hybrid' (default = 'cross')
upsample_factor : int
Upsample factor for subpixel precision of cross correlation. (default = 1)
verbose : bool, default is False
Print output.
Returns
-------
xyShift : list of floats
The shift between G1 and G2 in pixels.
Example
-------
Cross correlate two images already stored as ndarrays. You must input the FFT
of the images.
>>> import ncempy.algo as neval
>>> import numpy as np
>>> im0FFT = np.fft.fft2(im0)
>>> im1FFT = np.fft.fft2(im1)
>>> shifts = neval.multicorr(im0FFT, im1FFT)
"""
# Check to make sure both G1 and G2 are arrays
if type(g1) is not np.ndarray:
raise TypeError('G1 must be an ndarray')
elif type(g2) is not np.ndarray:
raise TypeError('G2 must be an ndarray')
# Check that the inputs are complex FFTs (common error)
if not np.iscomplexobj(g1) or not np.iscomplexobj(g2):
raise TypeError('G1 and G2 must be complex FFTs.')
# Check to make sure method and upsample factor are the correct values
if method not in ['phase', 'cross', 'hybrid']:
print('Unknown method used, setting to cross.')
method = 'cross'
if type(upsample_factor) is not int and type(upsample_factor) is not float:
print('Upsample factor is not an integer or float, setting to 1')
upsample_factor = 1
elif type(upsample_factor) is not int:
print('Upsample factor is not an integer, rounding down')
upsample_factor = int(upsample_factor)
if upsample_factor < 1:
print('Upsample factor is < 1, setting to 1')
upsample_factor = 1
if upsample_factor < 1:
raise ValueError('upsample_factor must be >= 1')
if verbose:
print('upsample factor = {}'.format(upsample_factor))
# Verify images are the same size.
if g1.shape != g2.shape:
raise TypeError('G1 and G2 are not the same size, G1 is {0} and G2 is {1}'.format(g1.shape, g2.shape))
imageCorr = initial_correlation_image(g1, g2, method, verbose=verbose)
xyShift = upsampled_correlation(imageCorr, upsample_factor, verbose=verbose)
return xyShift
| gpl-3.0 | 1,564,005,175,779,663,400 | 35.9 | 110 | 0.635163 | false | 3.838752 | false | false | false |
citrix-openstack-build/glance | glance/cmd/scrubber.py | 4 | 2498 | #!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011-2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Glance Scrub Service
"""
import os
import sys
# If ../glance/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'glance', '__init__.py')):
sys.path.insert(0, possible_topdir)
from oslo.config import cfg
from glance.common import config
from glance.openstack.common import log
import glance.store
import glance.store.scrubber
CONF = cfg.CONF
def main():
CONF.register_cli_opt(
cfg.BoolOpt('daemon',
short='D',
default=False,
help='Run as a long-running process. When not '
'specified (the default) run the scrub operation '
'once and then exits. When specified do not exit '
'and run scrub on wakeup_time interval as '
'specified in the config.'))
CONF.register_opt(cfg.IntOpt('wakeup_time', default=300))
try:
config.parse_args()
log.setup('glance')
glance.store.create_stores()
glance.store.verify_default_store()
app = glance.store.scrubber.Scrubber(glance.store)
if CONF.daemon:
server = glance.store.scrubber.Daemon(CONF.wakeup_time)
server.start(app)
server.wait()
else:
import eventlet
pool = eventlet.greenpool.GreenPool(1000)
scrubber = app.run(pool)
except RuntimeError as e:
sys.exit("ERROR: %s" % e)
if __name__ == '__main__':
main()
| apache-2.0 | 7,744,330,308,532,266,000 | 30.620253 | 78 | 0.614091 | false | 3.952532 | false | false | false |
katajakasa/aiohttp-spyne | setup.py | 1 | 1196 | from setuptools import setup
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, "README.rst"), encoding="utf-8") as f:
long_description = f.read()
setup(
name="aiohttp-spyne",
version="1.2.0",
description="Aiohttp transport for Spyne RPC library",
long_description=long_description,
url="https://github.com/katajakasa/aiohttp-spyne",
author="Tuomas Virtanen",
author_email="[email protected]",
license="LGPLv2.1",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU Lesser General Public License v2 (LGPLv2)",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: HTTP Servers",
"Operating System :: OS Independent",
"Framework :: AsyncIO",
],
packages=["aiohttp_spyne"],
install_requires=["aiohttp>=3.0.0,<4.0.0", "spyne>=2.13.16"],
)
| lgpl-2.1 | 4,266,790,089,964,007,000 | 34.176471 | 83 | 0.626254 | false | 3.613293 | false | false | false |
apache/airflow | tests/models/test_connection.py | 2 | 26256 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import re
import unittest
from collections import namedtuple
from unittest import mock
import pytest
import sqlalchemy
from cryptography.fernet import Fernet
from parameterized import parameterized
from airflow import AirflowException
from airflow.hooks.base import BaseHook
from airflow.models import Connection, crypto
from airflow.providers.sqlite.hooks.sqlite import SqliteHook
from tests.test_utils.config import conf_vars
ConnectionParts = namedtuple("ConnectionParts", ["conn_type", "login", "password", "host", "port", "schema"])
class UriTestCaseConfig:
def __init__(
self,
test_conn_uri: str,
test_conn_attributes: dict,
description: str,
):
"""
:param test_conn_uri: URI that we use to create connection
:param test_conn_attributes: we expect a connection object created with `test_uri` to have these
attributes
:param description: human-friendly name appended to parameterized test
"""
self.test_uri = test_conn_uri
self.test_conn_attributes = test_conn_attributes
self.description = description
@staticmethod
def uri_test_name(func, num, param):
return f"{func.__name__}_{num}_{param.args[0].description.replace(' ', '_')}"
class TestConnection(unittest.TestCase):
def setUp(self):
crypto._fernet = None
patcher = mock.patch('airflow.models.connection.mask_secret', autospec=True)
self.mask_secret = patcher.start()
self.addCleanup(patcher.stop)
def tearDown(self):
crypto._fernet = None
@conf_vars({('core', 'fernet_key'): ''})
def test_connection_extra_no_encryption(self):
"""
Tests extras on a new connection without encryption. The fernet key
is set to a non-base64-encoded string and the extra is stored without
encryption.
"""
test_connection = Connection(extra='testextra')
assert not test_connection.is_extra_encrypted
assert test_connection.extra == 'testextra'
@conf_vars({('core', 'fernet_key'): Fernet.generate_key().decode()})
def test_connection_extra_with_encryption(self):
"""
Tests extras on a new connection with encryption.
"""
test_connection = Connection(extra='testextra')
assert test_connection.is_extra_encrypted
assert test_connection.extra == 'testextra'
def test_connection_extra_with_encryption_rotate_fernet_key(self):
"""
Tests rotating encrypted extras.
"""
key1 = Fernet.generate_key()
key2 = Fernet.generate_key()
with conf_vars({('core', 'fernet_key'): key1.decode()}):
test_connection = Connection(extra='testextra')
assert test_connection.is_extra_encrypted
assert test_connection.extra == 'testextra'
assert Fernet(key1).decrypt(test_connection._extra.encode()) == b'testextra'
# Test decrypt of old value with new key
with conf_vars({('core', 'fernet_key'): ','.join([key2.decode(), key1.decode()])}):
crypto._fernet = None
assert test_connection.extra == 'testextra'
# Test decrypt of new value with new key
test_connection.rotate_fernet_key()
assert test_connection.is_extra_encrypted
assert test_connection.extra == 'testextra'
assert Fernet(key2).decrypt(test_connection._extra.encode()) == b'testextra'
test_from_uri_params = [
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation:1234/schema',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location',
schema='schema',
login='user',
password='password',
port=1234,
extra=None,
),
description='without extras',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?'
'extra1=a%20value&extra2=%2Fpath%2F',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location',
schema='schema',
login='user',
password='password',
port=1234,
extra_dejson={'extra1': 'a value', 'extra2': '/path/'},
),
description='with extras',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?' '__extra__=single+value',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location',
schema='schema',
login='user',
password='password',
port=1234,
extra='single value',
),
description='with extras single value',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?'
'__extra__=arbitrary+string+%2A%29%2A%24',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location',
schema='schema',
login='user',
password='password',
port=1234,
extra='arbitrary string *)*$',
),
description='with extra non-json',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?'
'__extra__=%5B%22list%22%2C+%22of%22%2C+%22values%22%5D',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location',
schema='schema',
login='user',
password='password',
port=1234,
extra_dejson=['list', 'of', 'values'],
),
description='with extras list',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?'
'__extra__=%7B%22my_val%22%3A+%5B%22list%22%2C+%22of%22%2C+%22values%22%5D%2C+%22extra%22%3A+%7B%22nested%22%3A+%7B%22json%22%3A+%22val%22%7D%7D%7D', # noqa: E501
test_conn_attributes=dict(
conn_type='scheme',
host='host/location',
schema='schema',
login='user',
password='password',
port=1234,
extra_dejson={'my_val': ['list', 'of', 'values'], 'extra': {'nested': {'json': 'val'}}},
),
description='with nested json',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation:1234/schema?extra1=a%20value&extra2=',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location',
schema='schema',
login='user',
password='password',
port=1234,
extra_dejson={'extra1': 'a value', 'extra2': ''},
),
description='with empty extras',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password@host%2Flocation%3Ax%3Ay:1234/schema?'
'extra1=a%20value&extra2=%2Fpath%2F',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location:x:y',
schema='schema',
login='user',
password='password',
port=1234,
extra_dejson={'extra1': 'a value', 'extra2': '/path/'},
),
description='with colon in hostname',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password%20with%20space@host%2Flocation%3Ax%3Ay:1234/schema',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location:x:y',
schema='schema',
login='user',
password='password with space',
port=1234,
),
description='with encoded password',
),
UriTestCaseConfig(
test_conn_uri='scheme://domain%2Fuser:password@host%2Flocation%3Ax%3Ay:1234/schema',
test_conn_attributes=dict(
conn_type='scheme',
host='host/location:x:y',
schema='schema',
login='domain/user',
password='password',
port=1234,
),
description='with encoded user',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password%20with%20space@host:1234/schema%2Ftest',
test_conn_attributes=dict(
conn_type='scheme',
host='host',
schema='schema/test',
login='user',
password='password with space',
port=1234,
),
description='with encoded schema',
),
UriTestCaseConfig(
test_conn_uri='scheme://user:password%20with%20space@host:1234',
test_conn_attributes=dict(
conn_type='scheme',
host='host',
schema='',
login='user',
password='password with space',
port=1234,
),
description='no schema',
),
UriTestCaseConfig(
test_conn_uri='google-cloud-platform://?extra__google_cloud_platform__key_'
'path=%2Fkeys%2Fkey.json&extra__google_cloud_platform__scope='
'https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fcloud-platform&extra'
'__google_cloud_platform__project=airflow',
test_conn_attributes=dict(
conn_type='google_cloud_platform',
host='',
schema='',
login=None,
password=None,
port=None,
extra_dejson=dict(
extra__google_cloud_platform__key_path='/keys/key.json',
extra__google_cloud_platform__scope='https://www.googleapis.com/auth/cloud-platform',
extra__google_cloud_platform__project='airflow',
),
),
description='with underscore',
),
UriTestCaseConfig(
test_conn_uri='scheme://host:1234',
test_conn_attributes=dict(
conn_type='scheme',
host='host',
schema='',
login=None,
password=None,
port=1234,
),
description='without auth info',
),
UriTestCaseConfig(
test_conn_uri='scheme://%2FTmP%2F:1234',
test_conn_attributes=dict(
conn_type='scheme',
host='/TmP/',
schema='',
login=None,
password=None,
port=1234,
),
description='with path',
),
UriTestCaseConfig(
test_conn_uri='scheme:///airflow',
test_conn_attributes=dict(
conn_type='scheme',
schema='airflow',
),
description='schema only',
),
UriTestCaseConfig(
test_conn_uri='scheme://@:1234',
test_conn_attributes=dict(
conn_type='scheme',
port=1234,
),
description='port only',
),
UriTestCaseConfig(
test_conn_uri='scheme://:password%2F%21%40%23%24%25%5E%26%2A%28%29%7B%7D@',
test_conn_attributes=dict(
conn_type='scheme',
password='password/!@#$%^&*(){}',
),
description='password only',
),
UriTestCaseConfig(
test_conn_uri='scheme://login%2F%21%40%23%24%25%5E%26%2A%28%29%7B%7D@',
test_conn_attributes=dict(
conn_type='scheme',
login='login/!@#$%^&*(){}',
),
description='login only',
),
]
@parameterized.expand([(x,) for x in test_from_uri_params], UriTestCaseConfig.uri_test_name)
def test_connection_from_uri(self, test_config: UriTestCaseConfig):
connection = Connection(uri=test_config.test_uri)
for conn_attr, expected_val in test_config.test_conn_attributes.items():
actual_val = getattr(connection, conn_attr)
if expected_val is None:
assert expected_val is None
if isinstance(expected_val, dict):
assert expected_val == actual_val
else:
assert expected_val == actual_val
expected_calls = []
if test_config.test_conn_attributes.get('password'):
expected_calls.append(mock.call(test_config.test_conn_attributes['password']))
if test_config.test_conn_attributes.get('extra_dejson'):
expected_calls.append(mock.call(test_config.test_conn_attributes['extra_dejson']))
self.mask_secret.assert_has_calls(expected_calls)
@parameterized.expand([(x,) for x in test_from_uri_params], UriTestCaseConfig.uri_test_name)
def test_connection_get_uri_from_uri(self, test_config: UriTestCaseConfig):
"""
This test verifies that when we create a conn_1 from URI, and we generate a URI from that conn, that
when we create a conn_2 from the generated URI, we get an equivalent conn.
1. Parse URI to create `Connection` object, `connection`.
2. Using this connection, generate URI `generated_uri`..
3. Using this`generated_uri`, parse and create new Connection `new_conn`.
4. Verify that `new_conn` has same attributes as `connection`.
"""
connection = Connection(uri=test_config.test_uri)
generated_uri = connection.get_uri()
new_conn = Connection(uri=generated_uri)
assert connection.conn_type == new_conn.conn_type
assert connection.login == new_conn.login
assert connection.password == new_conn.password
assert connection.host == new_conn.host
assert connection.port == new_conn.port
assert connection.schema == new_conn.schema
assert connection.extra_dejson == new_conn.extra_dejson
@parameterized.expand([(x,) for x in test_from_uri_params], UriTestCaseConfig.uri_test_name)
def test_connection_get_uri_from_conn(self, test_config: UriTestCaseConfig):
"""
This test verifies that if we create conn_1 from attributes (rather than from URI), and we generate a
URI, that when we create conn_2 from this URI, we get an equivalent conn.
1. Build conn init params using `test_conn_attributes` and store in `conn_kwargs`
2. Instantiate conn `connection` from `conn_kwargs`.
3. Generate uri `get_uri` from this conn.
4. Create conn `new_conn` from this uri.
5. Verify `new_conn` has same attributes as `connection`.
"""
conn_kwargs = {}
for k, v in test_config.test_conn_attributes.items():
if k == 'extra_dejson':
conn_kwargs.update({'extra': json.dumps(v)})
else:
conn_kwargs.update({k: v})
connection = Connection(conn_id='test_conn', **conn_kwargs) # type: ignore
gen_uri = connection.get_uri()
new_conn = Connection(conn_id='test_conn', uri=gen_uri)
for conn_attr, expected_val in test_config.test_conn_attributes.items():
actual_val = getattr(new_conn, conn_attr)
if expected_val is None:
assert actual_val is None
else:
assert actual_val == expected_val
@parameterized.expand(
[
(
"http://:password@host:80/database",
ConnectionParts(
conn_type="http", login='', password="password", host="host", port=80, schema="database"
),
),
(
"http://user:@host:80/database",
ConnectionParts(
conn_type="http", login="user", password=None, host="host", port=80, schema="database"
),
),
(
"http://user:password@/database",
ConnectionParts(
conn_type="http", login="user", password="password", host="", port=None, schema="database"
),
),
(
"http://user:password@host:80/",
ConnectionParts(
conn_type="http", login="user", password="password", host="host", port=80, schema=""
),
),
(
"http://user:password@/",
ConnectionParts(
conn_type="http", login="user", password="password", host="", port=None, schema=""
),
),
(
"postgresql://user:password@%2Ftmp%2Fz6rqdzqh%2Fexample%3Awest1%3Atestdb/testdb",
ConnectionParts(
conn_type="postgres",
login="user",
password="password",
host="/tmp/z6rqdzqh/example:west1:testdb",
port=None,
schema="testdb",
),
),
(
"postgresql://user@%2Ftmp%2Fz6rqdzqh%2Fexample%3Aeurope-west1%3Atestdb/testdb",
ConnectionParts(
conn_type="postgres",
login="user",
password=None,
host="/tmp/z6rqdzqh/example:europe-west1:testdb",
port=None,
schema="testdb",
),
),
(
"postgresql://%2Ftmp%2Fz6rqdzqh%2Fexample%3Aeurope-west1%3Atestdb",
ConnectionParts(
conn_type="postgres",
login=None,
password=None,
host="/tmp/z6rqdzqh/example:europe-west1:testdb",
port=None,
schema="",
),
),
]
)
def test_connection_from_with_auth_info(self, uri, uri_parts):
connection = Connection(uri=uri)
assert connection.conn_type == uri_parts.conn_type
assert connection.login == uri_parts.login
assert connection.password == uri_parts.password
assert connection.host == uri_parts.host
assert connection.port == uri_parts.port
assert connection.schema == uri_parts.schema
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI': 'postgres://username:[email protected]:5432/the_database',
},
)
def test_using_env_var(self):
conn = SqliteHook.get_connection(conn_id='test_uri')
assert 'ec2.compute.com' == conn.host
assert 'the_database' == conn.schema
assert 'username' == conn.login
assert 'password' == conn.password
assert 5432 == conn.port
self.mask_secret.assert_called_once_with('password')
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgres://ec2.compute.com/the_database',
},
)
def test_using_unix_socket_env_var(self):
conn = SqliteHook.get_connection(conn_id='test_uri_no_creds')
assert 'ec2.compute.com' == conn.host
assert 'the_database' == conn.schema
assert conn.login is None
assert conn.password is None
assert conn.port is None
def test_param_setup(self):
conn = Connection(
conn_id='local_mysql',
conn_type='mysql',
host='localhost',
login='airflow',
password='airflow',
schema='airflow',
)
assert 'localhost' == conn.host
assert 'airflow' == conn.schema
assert 'airflow' == conn.login
assert 'airflow' == conn.password
assert conn.port is None
def test_env_var_priority(self):
conn = SqliteHook.get_connection(conn_id='airflow_db')
assert 'ec2.compute.com' != conn.host
with mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_AIRFLOW_DB': 'postgres://username:[email protected]:5432/the_database',
},
):
conn = SqliteHook.get_connection(conn_id='airflow_db')
assert 'ec2.compute.com' == conn.host
assert 'the_database' == conn.schema
assert 'username' == conn.login
assert 'password' == conn.password
assert 5432 == conn.port
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI': 'postgres://username:[email protected]:5432/the_database',
'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgres://ec2.compute.com/the_database',
},
)
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
assert 'postgres://username:[email protected]:5432/the_database' == hook.get_uri()
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
assert 'postgres://ec2.compute.com/the_database' == hook2.get_uri()
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI': 'postgres://username:[email protected]:5432/the_database',
'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgres://ec2.compute.com/the_database',
},
)
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
assert isinstance(engine, sqlalchemy.engine.Engine)
assert 'postgres://username:[email protected]:5432/the_database' == str(engine.url)
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI': 'postgres://username:[email protected]:5432/the_database',
'AIRFLOW_CONN_TEST_URI_NO_CREDS': 'postgres://ec2.compute.com/the_database',
},
)
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
def test_connection_mixed(self):
with pytest.raises(
AirflowException,
match=re.escape(
"You must create an object using the URI or individual values (conn_type, host, login, "
"password, schema, port or extra).You can't mix these two ways to create this object."
),
):
Connection(conn_id="TEST_ID", uri="mysql://", schema="AAA")
def test_masking_from_db(self):
"""Test secrets are masked when loaded directly from the DB"""
from airflow.settings import Session
session = Session()
try:
conn = Connection(
conn_id=f"test-{os.getpid()}",
conn_type="http",
password="s3cr3t",
extra='{"apikey":"masked too"}',
)
session.add(conn)
session.flush()
# Make sure we re-load it, not just get the cached object back
session.expunge(conn)
self.mask_secret.reset_mock()
from_db = session.query(Connection).get(conn.id)
from_db.extra_dejson
assert self.mask_secret.mock_calls == [
# We should have called it _again_ when loading from the DB
mock.call("s3cr3t"),
mock.call({"apikey": "masked too"}),
]
finally:
session.rollback()
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI': 'sqlite://',
},
)
def test_connection_test_success(self):
conn = Connection(conn_id='test_uri', conn_type='sqlite')
res = conn.test_connection()
assert res[0] is True
assert res[1] == 'Connection successfully tested'
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI_NO_HOOK': 'fs://',
},
)
def test_connection_test_no_hook(self):
conn = Connection(conn_id='test_uri_no_hook', conn_type='fs')
res = conn.test_connection()
assert res[0] is False
assert res[1] == 'Unknown hook type "fs"'
@mock.patch.dict(
'os.environ',
{
'AIRFLOW_CONN_TEST_URI_HOOK_METHOD_MISSING': 'ftp://',
},
)
def test_connection_test_hook_method_missing(self):
conn = Connection(conn_id='test_uri_hook_method_mising', conn_type='ftp')
res = conn.test_connection()
assert res[0] is False
assert res[1] == "Hook FTPHook doesn't implement or inherit test_connection method"
| apache-2.0 | 2,956,835,774,849,428,000 | 37.385965 | 175 | 0.547151 | false | 4.10635 | true | false | false |
fariias/icanfinance | backend/test/account_tests.py | 3 | 1201 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from base import GAETestCase
from gaepermission import facade
from routes import account
import settings
from tekton.gae.middleware.redirect import RedirectResponse
class AccountTests(GAETestCase):
def test_index(self):
email = '[email protected]'
user = facade.save_user_cmd(email)()
response = account.index(user)
self.assert_can_render(response)
def test_edit(self):
email = '[email protected]'
initial_name = 'initial_name'
user = facade.save_user_cmd(email, initial_name)()
self.assertEqual(initial_name, user.name)
self.assertEqual(settings.DEFAULT_LOCALE, user.locale)
self.assertEqual(settings.DEFAULT_TIMEZONE, user.timezone)
edited_name = 'edited_name'
locale = 'pt_BR'
timezone = 'America/Sao_Paulo'
response = account.edit(user, edited_name, locale, timezone)
user = user.key.get()
self.assertIsInstance(response, RedirectResponse)
self.assertEqual(edited_name, user.name)
self.assertEqual(locale, user.locale)
self.assertEqual(timezone, user.timezone)
| mit | -3,730,230,932,860,824,000 | 36.53125 | 68 | 0.676936 | false | 3.824841 | false | false | false |
labsanmartin/Bika-LIMS | bika/lims/content/analysiscategory.py | 3 | 3474 | """Analysis Category - the category of the analysis service
"""
from AccessControl import ClassSecurityInfo
from bika.lims import bikaMessageFactory as _
from bika.lims.utils import t
from bika.lims.config import PROJECTNAME
from bika.lims.content.bikaschema import BikaSchema
from bika.lims.interfaces import IAnalysisCategory
from plone.indexer import indexer
from Products.Archetypes.public import *
from Products.Archetypes.references import HoldingReference
from Products.CMFCore.utils import getToolByName
from Products.CMFCore.WorkflowCore import WorkflowException
from zope.interface import implements
import sys
import transaction
@indexer(IAnalysisCategory)
def sortable_title_with_sort_key(instance):
sort_key = instance.getSortKey()
if sort_key:
return "{:010.3f}{}".format(sort_key, instance.Title())
return instance.Title()
schema = BikaSchema.copy() + Schema((
TextField('Comments',
default_output_type = 'text/plain',
allowable_content_types = ('text/plain',),
widget=TextAreaWidget (
description = _("To be displayed below each Analysis "
"Category section on results reports."),
label = _("Comments")),
),
ReferenceField('Department',
required=1,
vocabulary='getDepartments',
vocabulary_display_path_bound=sys.maxsize,
allowed_types=('Department',),
relationship='AnalysisCategoryDepartment',
referenceClass=HoldingReference,
widget=ReferenceWidget(
checkbox_bound=0,
label = _("Department"),
description = _("The laboratory department"),
),
),
ComputedField('DepartmentTitle',
expression="context.getDepartment() and context.getDepartment().Title() or ''",
widget=ComputedWidget(
visible=False,
),
),
FloatField('SortKey',
validators=('SortKeyValidator',),
widget=DecimalWidget(
label = _("Sort Key"),
description = _("Float value from 0.0 - 1000.0 indicating the sort order. Duplicate values are ordered alphabetically."),
),
),
))
schema['description'].widget.visible = True
schema['description'].schemata = 'default'
class AnalysisCategory(BaseContent):
implements(IAnalysisCategory)
security = ClassSecurityInfo()
displayContentsTab = False
schema = schema
_at_rename_after_creation = True
def _renameAfterCreation(self, check_auto_id=False):
from bika.lims.idserver import renameAfterCreation
renameAfterCreation(self)
def getDepartments(self):
bsc = getToolByName(self, 'bika_setup_catalog')
deps = []
for d in bsc(portal_type='Department',
inactive_state='active'):
deps.append((d.UID, d.Title))
return DisplayList(deps)
def workflow_script_deactivat(self):
# A instance cannot be deactivated if it contains services
pu = getToolByName(self, 'plone_utils')
bsc = getToolByName(self, 'bika_setup_catalog')
ars = bsc(portal_type='AnalysisService', getCategoryUID=self.UID())
if ars:
message = _("Category cannot be deactivated because "
"it contains Analysis Services")
pu.addPortalMessage(message, 'error')
transaction.get().abort()
raise WorkflowException
registerType(AnalysisCategory, PROJECTNAME)
| agpl-3.0 | 3,410,657,442,674,726,400 | 34.090909 | 133 | 0.662349 | false | 4.315528 | false | false | false |
crowd-course/scholars | scholars/courses/migrations/0017_auto_20170812_0545.py | 1 | 1669 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-12 05:45
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('courses', '0016_coursemember_dri'),
]
operations = [
migrations.AddField(
model_name='course',
name='parent',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='variants', to='courses.Course'),
),
migrations.AlterField(
model_name='course',
name='num_audio',
field=models.PositiveIntegerField(default=2, validators=[django.core.validators.MinValueValidator(1)]),
),
migrations.AlterField(
model_name='course',
name='num_dri',
field=models.PositiveIntegerField(default=2),
),
migrations.AlterField(
model_name='course',
name='num_graphics',
field=models.PositiveIntegerField(default=2, validators=[django.core.validators.MinValueValidator(1)]),
),
migrations.AlterField(
model_name='course',
name='num_presentation',
field=models.PositiveIntegerField(default=2, validators=[django.core.validators.MinValueValidator(1)]),
),
migrations.AlterField(
model_name='course',
name='num_scripting',
field=models.PositiveIntegerField(default=2, validators=[django.core.validators.MinValueValidator(1)]),
),
]
| mit | -5,473,503,350,215,734,000 | 34.510638 | 150 | 0.617735 | false | 4.474531 | false | false | false |
brefsdal/sherpa | sherpa/data.py | 1 | 20062 | #_PYTHON_INSERT_SAO_COPYRIGHT_HERE_(2008)_
#_PYTHON_INSERT_GPL_LICENSE_HERE_
"""
Tools for creating, storing, inspecting, and manipulating data sets
"""
import sys
import inspect
from itertools import izip
import numpy
from sherpa.utils.err import DataErr, NotImplementedErr
from sherpa.utils import SherpaFloat, NoNewAttributesAfterInit, \
print_fields, create_expr, calc_total_error, bool_cast, \
filter_bins
_all__ = ('Data', 'DataSimulFit', 'Data1D', 'Data1DInt', 'Data2D', 'Data2DInt')
class BaseData(NoNewAttributesAfterInit):
"Base class for all data set types"
def _get_filter(self):
return self._filter
def _set_filter(self, val):
self._filter = val
self._mask = True
filter = property(_get_filter, _set_filter,
doc='Filter for dependent variable')
def _get_mask(self):
return self._mask
def _set_mask(self, val):
if (val is True) or (val is False):
self._mask = val
elif (val is None) or numpy.isscalar(val):
raise DataErr('ismask')
else:
self._mask = numpy.asarray(val, numpy.bool_)
self._filter = None
mask = property(_get_mask, _set_mask,
doc='Mask array for dependent variable')
def __init__(self):
"""
Initialize a data object. This method can only be called from
a derived class constructor. Attempts to create a BaseData
instance will raise NotImplementedErr.
Derived class constructors must call this method directly (and
not indirectly through a superclass constructor). When thus
invoked, this method will extract the argument names and
values from the derived class constructor invocation and set
corresponding attributes on the instance (thereby eliminating
the need for the derived class constructor to do its own
attribute setting). If the name of an argument matches the
name of a DataProperty of the derived class, then the
corresponding attribute name will have an underscore prepended
(meaning the property will use the value directly instead of
relying on _get_*/_set_* methods).
"""
if type(self) is BaseData:
raise NotImplementedErr('noinstanceallowed', 'BaseData')
frame = sys._getframe().f_back
cond = (frame.f_code is self.__init__.im_func.func_code)
assert cond, (('%s constructor must call BaseData constructor ' +
'directly') % type(self).__name__)
args = inspect.getargvalues(frame)
self._fields = tuple(args[0][1:])
for f in self._fields:
cond = (f not in vars(self))
assert cond, (("'%s' object already has attribute '%s'") %
(type(self).__name__, f))
setattr(self, f, args[3][f])
self.filter = None
self.mask = True
NoNewAttributesAfterInit.__init__(self)
def __str__(self):
"""
Return a listing of the attributes listed in self._fields and,
if present, self._extra_fields.
"""
fields = self._fields + getattr(self, '_extra_fields', ())
fdict = dict(izip(fields, [getattr(self, f) for f in fields]))
return print_fields(fields, fdict)
def apply_filter(self, data):
if data is not None:
if self.filter is not None:
if callable(self.filter):
data = self.filter(data)
else:
data = data[self.filter]
elif self.mask is not True:
if self.mask is False:
raise DataErr('notmask')
data = numpy.asarray(data)
if data.shape != self.mask.shape:
raise DataErr('mismatch', 'mask', 'data array')
data = data[self.mask]
return data
def ignore(self, *args, **kwargs):
kwargs['ignore'] = True
self.notice(*args, **kwargs)
def notice(self, mins, maxes, axislist, ignore=False):
ignore = bool_cast(ignore)
if( str in [type(min) for min in mins] ):
raise DataErr('typecheck', 'lower bound')
elif( str in [type(max) for max in maxes] ):
raise DataErr('typecheck', 'upper bound')
elif( str in [type(axis) for axis in axislist] ):
raise DataErr('typecheck', 'grid')
mask = filter_bins(mins, maxes, axislist)
if mask is None:
self.mask = not ignore
elif not ignore:
if self.mask is True:
self.mask = mask
else:
self.mask |= mask
else:
mask = ~mask
if self.mask is False:
self.mask = mask
else:
self.mask &= mask
class Data(BaseData):
"Generic data set"
def __init__(self, name, indep, dep, staterror=None, syserror=None):
"""
Initialize a Data instance. indep should be a tuple of
independent axis arrays, dep should be an array of dependent
variable values, and staterror and syserror should be arrays
of statistical and systematic errors, respectively, in the
dependent variable (or None).
"""
BaseData.__init__(self)
def __repr__(self):
r = '<%s data set instance' % type(self).__name__
if hasattr(self, 'name'):
r += " '%s'" % self.name
r += '>'
return r
def eval_model(self, modelfunc):
return modelfunc(*self.get_indep())
def eval_model_to_fit(self, modelfunc):
return modelfunc(*self.get_indep(filter=True))
#
# Primary properties. These can depend only on normal attributes (and not
# other properties).
#
def get_indep(self, filter=False):
"Return a tuple containing the independent variables/axes"
indep = getattr(self, 'indep', None)
filter=bool_cast(filter)
if filter:
indep = tuple([self.apply_filter(x) for x in indep])
return indep
def get_dep(self, filter=False):
"Return an array of dependent variable values"
dep = getattr(self, 'dep', None)
filter=bool_cast(filter)
if filter:
dep = self.apply_filter(dep)
return dep
def get_staterror(self, filter=False, staterrfunc=None):
"Return the statistical error array"
staterror = getattr(self, 'staterror', None)
filter=bool_cast(filter)
if filter:
staterror = self.apply_filter(staterror)
if (staterror is None) and (staterrfunc is not None):
dep = self.get_dep()
if filter:
dep = self.apply_filter(dep)
staterror = staterrfunc(dep)
return staterror
def get_syserror(self, filter=False):
"Return the systematic error array"
syserr = getattr(self, 'syserror', None)
filter=bool_cast(filter)
if filter:
syserr = self.apply_filter(syserr)
return syserr
#
# Utility methods
#
def _wrong_dim_error(self, baddim):
raise DataErr('wrongdim', self.name, baddim)
def _no_image_error(self):
raise DataErr('notimage', self.name)
def _no_dim_error(self):
raise DataErr('nodim', self.name)
#
# Secondary properties. To best support subclasses, these should depend
# only on the primary properties whenever possible, though there may be
# instances when they depend on normal attributes.
#
def get_dims(self):
self._no_dim_error()
def get_error(self, filter=False, staterrfunc=None):
"Return total error in dependent variable"
return calc_total_error(self.get_staterror(filter, staterrfunc),
self.get_syserror(filter))
def get_x(self, filter=False):
"Return linear view of independent axis/axes"
self._wrong_dim_error(1)
def get_xerr(self, filter=False):
"Return linear view of bin size in independent axis/axes"
return None
def get_xlabel(self):
"Return label for linear view ofindependent axis/axes"
return 'x'
def get_y(self, filter=False, yfunc=None):
"Return dependent axis in N-D view of dependent variable"
y = self.get_dep(filter)
if yfunc is not None:
if filter:
yfunc = self.eval_model_to_fit(yfunc)
else:
yfunc = self.eval_model(yfunc)
y = (y, yfunc)
return y
def get_yerr(self, filter=False, staterrfunc=None):
"Return errors in dependent axis in N-D view of dependent variable"
return self.get_error(filter, staterrfunc)
def get_ylabel(self, yfunc=None):
"Return label for dependent axis in N-D view of dependent variable"
return 'y'
def get_x0(self, filter=False):
"Return first dimension in 2-D view of independent axis/axes"
self._wrong_dim_error(2)
def get_x0label(self):
"Return label for first dimension in 2-D view of independent axis/axes"
return 'x0'
def get_x1(self, filter=False):
"Return second dimension in 2-D view of independent axis/axes"
self._wrong_dim_error(2)
def get_x1label(self):
"""
Return label for second dimension in 2-D view of independent axis/axes
"""
return 'x1'
# For images, only need y-array
# Also, we do not filter, as imager needs M x N (or
# L x M x N) array
def get_img(self, yfunc=None):
"Return dependent variable as an image"
self._no_image_error()
def get_imgerr(self, yfunc=None):
"Return total error in dependent variable as an image"
self._no_image_error()
def to_guess(self):
arrays = [self.get_y(True)]
arrays.extend(self.get_indep(True))
return tuple(arrays)
def to_fit(self, staterrfunc=None):
return (self.get_dep(True),
self.get_staterror(True, staterrfunc),
self.get_syserror(True))
def to_plot(self, yfunc=None, staterrfunc=None):
return (self.get_x(True),
self.get_y(True, yfunc),
self.get_yerr(True, staterrfunc),
self.get_xerr(True),
self.get_xlabel(),
self.get_ylabel())
def to_contour(self, yfunc=None):
return (self.get_x0(True),
self.get_x1(True),
self.get_y(True, yfunc),
self.get_x0label(),
self.get_x1label())
class DataSimulFit(Data):
def __init__(self, name, datasets):
if len(datasets) == 0:
raise DataErr('zerodatasimulfit', type(self).__name__)
datasets = tuple(datasets)
BaseData.__init__(self)
def eval_model_to_fit(self, modelfuncs):
total_model = []
for func, data in izip(modelfuncs, self.datasets):
total_model.append(data.eval_model_to_fit(func))
return numpy.concatenate(total_model)
def to_fit(self, staterrfunc=None):
total_dep = []
total_staterror = []
total_syserror = []
no_staterror = True
no_syserror = True
for data in self.datasets:
dep, staterror, syserror = data.to_fit(staterrfunc)
total_dep.append(dep)
if staterror is not None:
no_staterror = False
total_staterror.append(staterror)
if syserror is not None:
no_syserror = False
else:
syserror = numpy.zeros_like(dep)
total_syserror.append(syserror)
total_dep = numpy.concatenate(total_dep)
if no_staterror:
total_staterror = None
elif None in total_staterror:
raise DataErr('staterrsimulfit')
else:
total_staterror = numpy.concatenate(total_staterror)
if no_syserror:
total_syserror = None
else:
total_syserror = numpy.concatenate(total_syserror)
return (total_dep, total_staterror, total_syserror)
def to_plot(self, yfunc=None, staterrfunc=None):
return self.datasets[0].to_plot(yfunc.parts[0], staterrfunc)
class DataND(Data):
"Base class for Data1D, Data2D, etc."
def get_dep(self, filter=False):
y = self.y
filter=bool_cast(filter)
if filter:
y = self.apply_filter(y)
return y
class Data1D(DataND):
"1-D data set"
def _set_mask(self, val):
DataND._set_mask(self, val)
try:
self._x = self.apply_filter(self.x)
except DataErr:
self._x = self.x
mask = property(DataND._get_mask, _set_mask,
doc='Mask array for dependent variable')
def __init__(self, name, x, y, staterror=None, syserror=None):
self._x = x
BaseData.__init__(self)
def get_indep(self, filter=False):
filter=bool_cast(filter)
if filter:
return (self._x,)
return (self.x,)
def get_x(self, filter=False):
return self.get_indep(filter)[0]
def get_dims(self, filter=False):
return (len(self.get_x(filter)),)
def get_filter(self, format='%.4f', delim=':'):
# for derived intergrated classes, this will return values in center of
# bin.
x = self.get_x(filter=True)
mask = numpy.ones(len(x), dtype=bool)
if numpy.iterable(self.mask):
mask = self.mask
return create_expr(x, mask, format, delim)
def get_filter_expr(self):
return (self.get_filter(delim='-') + ' ' + self.get_xlabel())
def get_bounding_mask(self):
mask = self.mask
size = None
if numpy.iterable(self.mask):
# create bounding box around noticed image regions
mask = numpy.array(self.mask)
# xi = numpy.where(mask == True)[0]
# xlo = xi.min()
# xhi = xi.max()
# size = (mask[xlo:xhi+1].size,)
# mask = mask[xlo:xhi+1]
size = (mask.size,)
return mask, size
def get_img(self, yfunc=None):
"Return 1D dependent variable as a 1 x N image"
y_img = self.get_y(False, yfunc)
if yfunc is not None:
y_img = (y_img[0].reshape(1,y_img[0].size),
y_img[1].reshape(1,y_img[1].size))
else:
y_img = y_img.reshape(1,y_img.size)
return y_img
def get_imgerr(self):
err = self.get_error()
if err is not None:
err = err.reshape(1,err.size)
return err
def notice(self, xlo=None, xhi=None, ignore=False):
BaseData.notice(self, (xlo,), (xhi,), self.get_indep(), ignore)
class Data1DInt(Data1D):
"1-D integrated data set"
def _set_mask(self, val):
DataND._set_mask(self, val)
try:
self._lo = self.apply_filter(self.xlo)
self._hi = self.apply_filter(self.xhi)
except DataErr:
self._lo = self.xlo
self._hi = self.xhi
mask = property(DataND._get_mask, _set_mask,
doc='Mask array for dependent variable')
def __init__(self, name, xlo, xhi, y, staterror=None, syserror=None):
self._lo = xlo
self._hi = xhi
BaseData.__init__(self)
def get_indep(self, filter=False):
filter=bool_cast(filter)
if filter:
return (self._lo, self._hi)
return (self.xlo, self.xhi)
def get_x(self, filter=False):
indep = self.get_indep(filter)
return (indep[0] + indep[1]) / 2.0
def get_xerr(self, filter=False):
xlo,xhi = self.get_indep(filter)
return xhi-xlo
def notice(self, xlo=None, xhi=None, ignore=False):
BaseData.notice(self, (None, xlo), (xhi, None), self.get_indep(),
ignore)
class Data2D(DataND):
"2-D data set"
def _set_mask(self, val):
DataND._set_mask(self, val)
try:
self._x0 = self.apply_filter(self.x0)
self._x1 = self.apply_filter(self.x1)
except DataErr:
self._x0 = self.x0
self._x1 = self.x1
mask = property(DataND._get_mask, _set_mask,
doc='Mask array for dependent variable')
def __init__(self, name, x0, x1, y, shape=None, staterror=None,
syserror=None):
self._x0 = x0
self._x1 = x1
BaseData.__init__(self)
def get_indep(self, filter=False):
filter=bool_cast(filter)
if filter:
return (self._x0, self._x1)
return (self.x0, self.x1)
def get_x0(self, filter=False):
return self.get_indep(filter)[0]
def get_x1(self, filter=False):
return self.get_indep(filter)[1]
def get_axes(self):
self._check_shape()
# FIXME: how to filter an axis when self.mask is size of self.y?
return (numpy.arange(self.shape[1])+1, numpy.arange(self.shape[0])+1)
def get_dims(self, filter=False):
#self._check_shape()
if self.shape is not None:
return self.shape[::-1]
return (len(self.get_x0(filter)), len(self.get_x1(filter)))
def get_filter_expr(self):
return ''
get_filter = get_filter_expr
def _check_shape(self):
if self.shape is None:
raise DataErr('shape',self.name)
def get_max_pos(self, dep=None):
if dep is None:
dep = self.get_dep(True)
x0 = self.get_x0(True)
x1 = self.get_x1(True)
pos = numpy.asarray(numpy.where(dep == dep.max())).squeeze()
if pos.ndim == 0:
pos = int(pos)
return (x0[pos], x1[pos])
return [(x0[index], x1[index]) for index in pos]
def get_img(self, yfunc=None):
self._check_shape()
y_img = self.get_y(False, yfunc)
if yfunc is not None:
y_img = (y_img[0].reshape(*self.shape),
y_img[1].reshape(*self.shape))
else:
y_img = y_img.reshape(*self.shape)
return y_img
def get_imgerr(self):
self._check_shape()
err = self.get_error()
if err is not None:
err = err.reshape(*self.shape)
return err
def notice(self, x0lo=None, x0hi=None, x1lo=None, x1hi=None, ignore=False):
BaseData.notice(self, (x0lo, x1lo), (x0hi, x1hi), self.get_indep(),
ignore)
class Data2DInt(Data2D):
"2-D integrated data set"
def _set_mask(self, val):
DataND._set_mask(self, val)
try:
self._x0lo = self.apply_filter(self.x0lo)
self._x0hi = self.apply_filter(self.x0hi)
self._x1lo = self.apply_filter(self.x1lo)
self._x1hi = self.apply_filter(self.x1hi)
except DataErr:
self._x0lo = self.x0lo
self._x1lo = self.x1lo
self._x0hi = self.x0hi
self._x1hi = self.x1hi
mask = property(DataND._get_mask, _set_mask,
doc='Mask array for dependent variable')
def __init__(self, name, x0lo, x1lo, x0hi, x1hi, y, shape=None,
staterror=None, syserror=None):
self._x0lo = x0lo
self._x1lo = x1lo
self._x0hi = x0hi
self._x1hi = x1hi
BaseData.__init__(self)
def get_indep(self, filter=False):
filter=bool_cast(filter)
if filter:
return (self._x0lo, self._x1lo, self._x0hi, self._x1hi)
return (self.x0lo, self.x1lo, self.x0hi, self.x1hi)
def get_x0(self, filter=False):
indep = self.get_indep(filter)
return (indep[0] + indep[2]) / 2.0
def get_x1(self, filter=False):
indep = self.get_indep(filter)
return (indep[1] + indep[3]) / 2.0
def notice(self, x0lo=None, x0hi=None, x1lo=None, x1hi=None, ignore=False):
BaseData.notice(self, (None, None, x0lo, x1lo),
(x0hi, x1hi, None, None), self.get_indep(), ignore)
| gpl-2.0 | 752,074,171,594,710,800 | 29.769939 | 79 | 0.571977 | false | 3.534531 | false | false | false |
rhyolight/nupic.son | app/soc/modules/gci/models/task.py | 1 | 9890 | # Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the GCI Task Model."""
from google.appengine.ext import db
from django.utils.translation import ugettext
from soc.modules.gci.models.comment import GCIComment
from soc.modules.gci.models.work_submission import GCIWorkSubmission
import soc.models.profile
import soc.modules.gci.models.organization
import soc.modules.gci.models.program
# state that the task is in when it is not yet available
UNPUBLISHED = 'Unpublished'
# state that the task is in when it is open
OPEN = 'Open'
# state that the task is in when it is claimed
CLAIMED = 'Claimed'
# state that task has been claimed but never finished and has been reopened
REOPENED = 'Reopened'
# state that the task has not been approved by org admin
UNAPPROVED = 'Unapproved'
# state that task has been successfully closed
CLOSED = 'Closed'
# TODO(piyush.devel): Define constants for the rest of the statuses.
# states in which a task does not show up publicly
UNAVAILABLE = [UNPUBLISHED, UNAPPROVED]
# states in which a student can claim a task
CLAIMABLE = [OPEN, REOPENED]
# States in which we consider the task to count towards the task quota of
# the student.
ACTIVE_CLAIMED_TASK = [
'ClaimRequested', CLAIMED, 'ActionNeeded', 'NeedsWork', 'NeedsReview']
# States in which we consider that the student can work on a task as long
# as the deadline has not passed.
TASK_IN_PROGRESS = [CLAIMED, 'ActionNeeded', 'NeedsWork', 'NeedsReview']
# states in which the student is allowed to transition the task to NeedsReview
SEND_FOR_REVIEW_ALLOWED = [CLAIMED, 'ActionNeeded', 'NeedsWork']
class DifficultyLevel(object):
"""Enumerates all difficulty levels for GCI Tasks.
"""
EASY = 'Easy'
MEDIUM = 'Medium'
HARD = 'Hard'
UNKNOWN = 'Unknown'
DIFFICULTIES = [
DifficultyLevel.EASY, DifficultyLevel.MEDIUM, DifficultyLevel.HARD,
DifficultyLevel.UNKNOWN]
POINTS = {
DifficultyLevel.EASY: 1,
DifficultyLevel.MEDIUM: 2,
DifficultyLevel.HARD: 4,
DifficultyLevel.UNKNOWN: 0
}
class GCITask(db.Model):
"""Model for a task used in GCI workflow.
"""
#: Required field indicating the "title" of the task
title = db.StringProperty(required=True,
verbose_name=ugettext('Task Title'))
title.help_text = ugettext('Title of the task')
#: Required field containing the description of the task
description = db.TextProperty(required=True,
verbose_name=ugettext('Description'))
description.help_text = ugettext('Complete description of the task')
#: Field indicating the difficulty level of the Task.
difficulty_level = db.StringProperty(required=False,
verbose_name=ugettext('Difficulty'), choices=DIFFICULTIES)
#: Field indicating the types of the Task
types = db.StringListProperty(verbose_name=ugettext('Type'))
#: Field which contains the arbitrary tags for the task. These tags can
#: be assigned by org admins and mentors.
tags = db.StringListProperty(verbose_name=ugettext('Tags'))
#: A field which contains time allowed for completing the task (in hours)
#: from the moment that this task has been assigned to a Student
time_to_complete = db.IntegerProperty(required=True,
verbose_name=('Time to Complete'))
time_to_complete.help_text = ugettext(
'Time allowed to complete the task, in hours, once it is claimed')
#: List of Mentors assigned to this task. A Mentor who creates this
#: task is assigned as the Mentor by default. An Org Admin will have
#: to assign a Mentor upon task creation.
mentors = db.ListProperty(item_type=db.Key, default=[])
#: Student profile to whom this task is currently assigned to.
student = db.ReferenceProperty(reference_class=soc.models.profile.Profile,
required=False,
collection_name='assigned_tasks')
#: Program in which this Task has been created
program = db.ReferenceProperty(
reference_class=soc.modules.gci.models.program.GCIProgram,
required=True, collection_name='tasks')
#: Program in which this Task has been created
org = db.ReferenceProperty(
reference_class=soc.modules.gci.models.organization.GCIOrganization,
required=True, collection_name='org_tasks')
#: Required property which holds the state, the Task is currently in.
#: This is a hidden field not shown on forms. Handled by logic internally.
#: The state can be one of the following:
#: Unapproved: If Task is created by a Mentor, this is the automatically
#: assigned state.
#: Unpublished: This Task is not published yet.
#: OPEN: This Task is open and ready to be claimed.
#: Reopened: This Task has been claimed but never finished and has been
#: reopened.
#: ClaimRequested: A Student has requested to claim this task.
#: CLAIMED: This Task has been claimed and someone is working on it.
#: ActionNeeded: Work on this Task must be submitted for review within
#: 24 hours.
#: Closed: Work on this Task has been completed to the org's content.
#: needs to complete Student registration before this task is closed.
#: This status is now deprecated since we register before any interaction.
#: NeedsWork: This work on this Tasks needs a bit more brushing up. This
#: state is followed by a Mentor review.
#: NeedsReview: Student has submitted work for this task and it should
#: be reviewed by a Mentor.
#: Invalid: The Task is deleted either by an Org Admin/Mentor
status = db.StringProperty(
required=True, verbose_name=ugettext('Status'),
choices=[UNAPPROVED, UNPUBLISHED, OPEN, REOPENED,
'ClaimRequested', CLAIMED, 'ActionNeeded',
CLOSED, 'NeedsWork', 'NeedsReview', 'Invalid'],
default=UNAPPROVED)
#: Indicates when the Task was closed. Its value is None before it is
#: completed.
closed_on = db.DateTimeProperty(required=False,
verbose_name=ugettext('Closed on'))
#: This field is set to the next deadline that will have consequences for
#: this Task. For instance this will store a DateTime property which will
#: tell when this Task should be completed.
deadline = db.DateTimeProperty(required=False,
verbose_name=ugettext('Deadline'))
# Property holding the list of GCIProfiles who are subscribed to the task.
subscribers = db.ListProperty(item_type=db.Key, default=[])
#: Required field containing the Mentor/Org Admin who created this task.
#: If site developer has created the task, it is empty.
created_by = db.ReferenceProperty(reference_class=soc.models.profile.Profile,
required=False,
collection_name='created_tasks',
verbose_name=ugettext('Created by'))
#: Date when the proposal was created
created_on = db.DateTimeProperty(required=True, auto_now_add=True,
verbose_name=ugettext('Created on'))
#: Required field containing the Mentor/Org Admin who last edited this
#: task. It changes only when Mentor/Org Admin changes title, description,
#: difficulty, task_type, time_to_complete. If site developer has modified
#: the task, it is empty.
modified_by = db.ReferenceProperty(reference_class=soc.models.profile.Profile,
required=False,
collection_name='edited_tasks',
verbose_name=ugettext('Modified by'))
#: Date when the proposal was last modified, should be set manually on edit
modified_on = db.DateTimeProperty(required=True, auto_now_add=True,
verbose_name=ugettext('Modified on'))
#: The task can be marked to be featured on program home page.
is_featured = db.BooleanProperty(default=False, required=True,
verbose_name=ugettext('Featured'))
is_featured.help_text = ugettext(
'Should this task be featured on the program homepage.')
#: Determines whether the student who completed this task should
#: receive points for it
points_invalidated = db.BooleanProperty(default=False,
verbose_name=ugettext('Points invalidated.'))
def taskTimeToComplete(self):
days = self.time_to_complete / 24
hours = self.time_to_complete % 24
result = []
if days == 1:
result.append("1 day")
if days > 1:
result.append("%d days" % days)
if days and hours:
result.append(" and ")
if hours == 1:
result.append("1 hour")
if hours > 1:
result.append("%d hours" % hours)
return "".join(result)
def isAvailable(self):
"""Returns True if the task is published."""
return self.status not in UNAVAILABLE
def workSubmissions(self):
"""Returns the GCIWorksubmissions that have the given task as parent."""
q = GCIWorkSubmission.all()
q.ancestor(self)
return q.fetch(1000)
def comments(self):
"""Returns the GCIComments that have the given task as parent.
The results are sorted by the date on which they have been created.
"""
q = GCIComment.all()
q.ancestor(self)
q.order('created_on')
return q.fetch(1000)
| apache-2.0 | -4,247,628,573,120,247,000 | 39.203252 | 80 | 0.688777 | false | 4.058268 | false | false | false |
tianz/MyInventory | inventory/models.py | 1 | 5035 | from django.db import models
from django.db.models import Sum, F
from django.db.models.functions import Coalesce
from django.contrib.auth.models import User
from django.utils.functional import cached_property
from .managers import ProductManager, PurchaseOrderManager, PurchaseItemManager, SalesOrderManager, SalesItemManager, AdjustmentManager
class MyUser(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
date_format = models.CharField(max_length=32, default='%m/%d/%Y')
date_format_ym = models.CharField(max_length=32, default='%m/%Y')
date_format_ym_short = models.CharField(max_length=32, default='%m/%y')
date_format_md = models.CharField(max_length=32, default='%m/%d')
class Currency(models.Model):
name = models.CharField(max_length=128, unique=True)
symbol = models.CharField(max_length=8, unique=True)
class Meta:
verbose_name_plural = 'currencies'
def __str__(self):
return self.name
class Product(models.Model):
sku = models.CharField(max_length=128, unique=True)
name = models.CharField(max_length=255)
price = models.DecimalField(max_digits=10, decimal_places=2)
objects = ProductManager()
@cached_property
def purchased(self):
return PurchaseItem.objects.filter(product=self).aggregate(value=Coalesce(Sum('quantity'), 0))['value']
def get_sold(self, year=None, month=None):
objects = SalesItem.objects.filter(product=self).exclude(sales_order__status='RE')
if year is not None and month is not None:
objects = objects.filter(sales_order__date__year=year, sales_order__date__month=month)
return objects.aggregate(value=Coalesce(Sum('quantity'), 0))['value']
sold = cached_property(get_sold)
@cached_property
def pending(self):
return SalesItem.objects.filter(product=self, sales_order__status='PR').aggregate(value=Coalesce(Sum('quantity'), 0))['value']
@cached_property
def adjustment(self):
return Adjustment.objects.filter(product=self).aggregate(value=Coalesce(Sum('quantity'), 0))['value']
def available(self):
return self.purchased - self.sold + self.adjustment
def available_str(self):
return str(self.available() + self.pending) + ('' if self.pending == 0 else ' (-' + str(self.pending) + ')')
def avg_price(self):
revenue = SalesItem.objects.filter(product=self) \
.aggregate(value=Sum(F('price')*F('quantity')*(100-F('sales_order__discount'))/100, output_field=models.DecimalField()))['value']
if revenue:
return '${0:,.2f}'.format(revenue / self.sold)
else:
return "N/A"
def last_sold(self):
try:
last_so = SalesItem.objects.filter(product=self).exclude(sales_order__status='RE').values('sales_order__date').latest('sales_order__date')
return last_so['sales_order__date']
except SalesItem.DoesNotExist:
return None
def __str__(self):
return self.sku
class PurchaseOrder(models.Model):
date = models.DateField()
order_id = models.CharField(max_length=128, unique=True)
currency = models.ForeignKey(Currency)
objects = PurchaseOrderManager()
def __str__(self):
return str(self.id)
class PurchaseItem(models.Model):
purchase_order = models.ForeignKey(PurchaseOrder)
product = models.ForeignKey(Product)
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.IntegerField()
objects = PurchaseItemManager()
def __str__(self):
return str(self.id)
class Platform(models.Model):
name = models.CharField(max_length=255, unique=True)
def __str__(self):
return self.name
class SalesOrder(models.Model):
STATUS_CHOICES = (
('PR', 'Processing'),
('CO', 'Complete'),
('RE', 'Returned'),
)
date = models.DateField()
platform = models.ForeignKey(Platform)
order_id = models.CharField(max_length=128)
customer = models.CharField(max_length=255)
currency = models.ForeignKey(Currency)
discount = models.DecimalField(max_digits=10, decimal_places=2)
tax = models.DecimalField(max_digits=10, decimal_places=3)
status = models.CharField(max_length=2, choices=STATUS_CHOICES, default='PR', blank=False)
objects = SalesOrderManager()
def __str__(self):
return str(self.id)
class SalesItem(models.Model):
sales_order = models.ForeignKey(SalesOrder)
product = models.ForeignKey(Product)
price = models.DecimalField(max_digits=10, decimal_places=2)
quantity = models.IntegerField()
objects = SalesItemManager()
def __str__(self):
return str(self.id)
class Adjustment(models.Model):
date = models.DateField()
product = models.ForeignKey(Product)
quantity = models.IntegerField()
comment = models.CharField(max_length=255, blank=True)
objects = AdjustmentManager()
def __str__(self):
return str(self.id)
| mit | 6,578,966,080,557,096,000 | 31.070064 | 150 | 0.672095 | false | 3.688645 | false | false | false |
pavel-paulau/perfrunner | perfrunner/utils/verify_logs.py | 1 | 1604 | import glob
import zipfile
from collections import defaultdict
from typing import List
from logger import logger
from perfrunner.helpers.misc import pretty_dict
GOLANG_LOG_FILES = ("eventing.log",
"fts.log",
"goxdcr.log",
"indexer.log",
"projector.log",
"query.log")
def check_for_golang_panic(file_name: str) -> List[str]:
zf = zipfile.ZipFile(file_name)
panic_files = []
for name in zf.namelist():
if any(log_file in name for log_file in GOLANG_LOG_FILES):
data = zf.read(name)
if "panic" in str(data):
panic_files.append(name)
return panic_files
def check_for_crash_files(file_name: str) -> List[str]:
zf = zipfile.ZipFile(file_name)
crash_files = []
for name in zf.namelist():
if name.endswith('.dmp'):
crash_files.append(name)
return crash_files
def validate_logs(file_name: str):
panic_files = check_for_golang_panic(file_name)
crash_files = check_for_crash_files(file_name)
return panic_files, crash_files
def main():
failures = defaultdict(dict)
for file_name in glob.iglob('./*.zip'):
panic_files, crash_files = validate_logs(file_name)
if panic_files:
failures['panics'][file_name] = panic_files
if crash_files:
failures['crashes'][file_name] = crash_files
if failures:
logger.interrupt(
"Following failures found: {}".format(pretty_dict(failures)))
if __name__ == '__main__':
main()
| apache-2.0 | 6,336,424,920,664,982,000 | 26.186441 | 73 | 0.592269 | false | 3.588367 | false | false | false |
jorgeas80/cercanias-api | cercanias/settings.py | 1 | 2404 | # -*- coding: utf-8 -*-
"""
Django settings for cercanias project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
try:
from local_settings import *
except ImportError:
import sys
sys.stderr.write("Warning: Can't find the file 'local_settings.py")
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e38u=_m)^5e#-zt_n4uiei9%d@5(wz&ab11$q==3)y$)qva^^$'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'cercanias_api',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'cercanias.urls'
WSGI_APPLICATION = 'cercanias.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Madrid'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
CORS_ORIGIN_ALLOW_ALL = True
#CORS_ORIGIN_WHITELIST = (
# 'jorgeas80.github.com',
#)
| gpl-2.0 | 9,208,013,409,117,161,000 | 23.282828 | 71 | 0.71173 | false | 3.179894 | false | false | false |
ryfeus/lambda-packs | Keras_tensorflow_nightly/source2.7/tensorflow/contrib/factorization/python/ops/gen_clustering_ops.py | 1 | 12595 | """Python wrappers around TensorFlow ops.
This file is MACHINE GENERATED! Do not edit.
Original C++ source file: gen_clustering_ops.cc
"""
import collections as _collections
import six as _six
from tensorflow.python import pywrap_tensorflow as _pywrap_tensorflow
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import errors as _errors
from tensorflow.python.framework import tensor_shape as _tensor_shape
from tensorflow.core.framework import op_def_pb2 as _op_def_pb2
# Needed to trigger the call to _set_call_cpp_shape_fn.
from tensorflow.python.framework import common_shapes as _common_shapes
from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.tf_export import tf_export
@tf_export('kmc2_chain_initialization')
def kmc2_chain_initialization(distances, seed, name=None):
r"""Returns the index of a data point that should be added to the seed set.
Entries in distances are assumed to be squared distances of candidate points to
the already sampled centers in the seed set. The op constructs one Markov chain
of the k-MC^2 algorithm and returns the index of one candidate point to be added
as an additional cluster center.
Args:
distances: A `Tensor` of type `float32`.
Vector with squared distances to the closest previously sampled
cluster center for each candidate point.
seed: A `Tensor` of type `int64`.
Scalar. Seed for initializing the random number generator.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `int64`. Scalar with the index of the sampled point.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"KMC2ChainInitialization", distances=distances, seed=seed, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"KMC2ChainInitialization", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"KMC2ChainInitialization", name, _ctx._post_execution_callbacks,
distances, seed)
return _result
except _core._FallbackException:
return kmc2_chain_initialization_eager_fallback(
distances, seed, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def kmc2_chain_initialization_eager_fallback(distances, seed, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function kmc2_chain_initialization
"""
_ctx = ctx if ctx else _context.context()
distances = _ops.convert_to_tensor(distances, _dtypes.float32)
seed = _ops.convert_to_tensor(seed, _dtypes.int64)
_inputs_flat = [distances, seed]
_attrs = None
_result = _execute.execute(b"KMC2ChainInitialization", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"KMC2ChainInitialization", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_ops.RegisterShape("KMC2ChainInitialization")(None)
@tf_export('kmeans_plus_plus_initialization')
def kmeans_plus_plus_initialization(points, num_to_sample, seed, num_retries_per_sample, name=None):
r"""Selects num_to_sample rows of input using the KMeans++ criterion.
Rows of points are assumed to be input points. One row is selected at random.
Subsequent rows are sampled with probability proportional to the squared L2
distance from the nearest row selected thus far till num_to_sample rows have
been sampled.
Args:
points: A `Tensor` of type `float32`.
Matrix of shape (n, d). Rows are assumed to be input points.
num_to_sample: A `Tensor` of type `int64`.
Scalar. The number of rows to sample. This value must not be
larger than n.
seed: A `Tensor` of type `int64`.
Scalar. Seed for initializing the random number generator.
num_retries_per_sample: A `Tensor` of type `int64`.
Scalar. For each row that is sampled, this parameter
specifies the number of additional points to draw from the current
distribution before selecting the best. If a negative value is specified, a
heuristic is used to sample O(log(num_to_sample)) additional points.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32`.
Matrix of shape (num_to_sample, d). The sampled rows.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"KmeansPlusPlusInitialization", points=points,
num_to_sample=num_to_sample, seed=seed,
num_retries_per_sample=num_retries_per_sample, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"KmeansPlusPlusInitialization", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"KmeansPlusPlusInitialization", name, _ctx._post_execution_callbacks,
points, num_to_sample, seed, num_retries_per_sample)
return _result
except _core._FallbackException:
return kmeans_plus_plus_initialization_eager_fallback(
points, num_to_sample, seed, num_retries_per_sample, name=name,
ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def kmeans_plus_plus_initialization_eager_fallback(points, num_to_sample, seed, num_retries_per_sample, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function kmeans_plus_plus_initialization
"""
_ctx = ctx if ctx else _context.context()
points = _ops.convert_to_tensor(points, _dtypes.float32)
num_to_sample = _ops.convert_to_tensor(num_to_sample, _dtypes.int64)
seed = _ops.convert_to_tensor(seed, _dtypes.int64)
num_retries_per_sample = _ops.convert_to_tensor(num_retries_per_sample, _dtypes.int64)
_inputs_flat = [points, num_to_sample, seed, num_retries_per_sample]
_attrs = None
_result = _execute.execute(b"KmeansPlusPlusInitialization", 1,
inputs=_inputs_flat, attrs=_attrs, ctx=_ctx,
name=name)
_execute.record_gradient(
"KmeansPlusPlusInitialization", _inputs_flat, _attrs, _result, name)
_result, = _result
return _result
_ops.RegisterShape("KmeansPlusPlusInitialization")(None)
_nearest_neighbors_outputs = ["nearest_center_indices",
"nearest_center_distances"]
_NearestNeighborsOutput = _collections.namedtuple(
"NearestNeighbors", _nearest_neighbors_outputs)
@tf_export('nearest_neighbors')
def nearest_neighbors(points, centers, k, name=None):
r"""Selects the k nearest centers for each point.
Rows of points are assumed to be input points. Rows of centers are assumed to be
the list of candidate centers. For each point, the k centers that have least L2
distance to it are computed.
Args:
points: A `Tensor` of type `float32`.
Matrix of shape (n, d). Rows are assumed to be input points.
centers: A `Tensor` of type `float32`.
Matrix of shape (m, d). Rows are assumed to be centers.
k: A `Tensor` of type `int64`.
Scalar. Number of nearest centers to return for each point. If k is larger
than m, then only m centers are returned.
name: A name for the operation (optional).
Returns:
A tuple of `Tensor` objects (nearest_center_indices, nearest_center_distances).
nearest_center_indices: A `Tensor` of type `int64`. Matrix of shape (n, min(m, k)). Each row contains the
indices of the centers closest to the corresponding point, ordered by
increasing distance.
nearest_center_distances: A `Tensor` of type `float32`. Matrix of shape (n, min(m, k)). Each row contains the
squared L2 distance to the corresponding center in nearest_center_indices.
"""
_ctx = _context._context
if _ctx is None or not _ctx._eager_context.is_eager:
_, _, _op = _op_def_lib._apply_op_helper(
"NearestNeighbors", points=points, centers=centers, k=k, name=name)
_result = _op.outputs[:]
_inputs_flat = _op.inputs
_attrs = None
_execute.record_gradient(
"NearestNeighbors", _inputs_flat, _attrs, _result, name)
_result = _NearestNeighborsOutput._make(_result)
return _result
else:
try:
_result = _pywrap_tensorflow.TFE_Py_FastPathExecute(
_ctx._context_handle, _ctx._eager_context.device_name,
"NearestNeighbors", name, _ctx._post_execution_callbacks, points,
centers, k)
_result = _NearestNeighborsOutput._make(_result)
return _result
except _core._FallbackException:
return nearest_neighbors_eager_fallback(
points, centers, k, name=name, ctx=_ctx)
except _core._NotOkStatusException as e:
if name is not None:
message = e.message + " name: " + name
else:
message = e.message
_six.raise_from(_core._status_to_exception(e.code, message), None)
def nearest_neighbors_eager_fallback(points, centers, k, name=None, ctx=None):
r"""This is the slowpath function for Eager mode.
This is for function nearest_neighbors
"""
_ctx = ctx if ctx else _context.context()
points = _ops.convert_to_tensor(points, _dtypes.float32)
centers = _ops.convert_to_tensor(centers, _dtypes.float32)
k = _ops.convert_to_tensor(k, _dtypes.int64)
_inputs_flat = [points, centers, k]
_attrs = None
_result = _execute.execute(b"NearestNeighbors", 2, inputs=_inputs_flat,
attrs=_attrs, ctx=_ctx, name=name)
_execute.record_gradient(
"NearestNeighbors", _inputs_flat, _attrs, _result, name)
_result = _NearestNeighborsOutput._make(_result)
return _result
_ops.RegisterShape("NearestNeighbors")(None)
def _InitOpDefLibrary(op_list_proto_bytes):
op_list = _op_def_pb2.OpList()
op_list.ParseFromString(op_list_proto_bytes)
_op_def_registry.register_op_list(op_list)
op_def_lib = _op_def_library.OpDefLibrary()
op_def_lib.add_op_list(op_list)
return op_def_lib
# op {
# name: "KMC2ChainInitialization"
# input_arg {
# name: "distances"
# type: DT_FLOAT
# }
# input_arg {
# name: "seed"
# type: DT_INT64
# }
# output_arg {
# name: "index"
# type: DT_INT64
# }
# }
# op {
# name: "KmeansPlusPlusInitialization"
# input_arg {
# name: "points"
# type: DT_FLOAT
# }
# input_arg {
# name: "num_to_sample"
# type: DT_INT64
# }
# input_arg {
# name: "seed"
# type: DT_INT64
# }
# input_arg {
# name: "num_retries_per_sample"
# type: DT_INT64
# }
# output_arg {
# name: "samples"
# type: DT_FLOAT
# }
# }
# op {
# name: "NearestNeighbors"
# input_arg {
# name: "points"
# type: DT_FLOAT
# }
# input_arg {
# name: "centers"
# type: DT_FLOAT
# }
# input_arg {
# name: "k"
# type: DT_INT64
# }
# output_arg {
# name: "nearest_center_indices"
# type: DT_INT64
# }
# output_arg {
# name: "nearest_center_distances"
# type: DT_FLOAT
# }
# }
_op_def_lib = _InitOpDefLibrary(b"\n=\n\027KMC2ChainInitialization\022\r\n\tdistances\030\001\022\010\n\004seed\030\t\032\t\n\005index\030\t\np\n\034KmeansPlusPlusInitialization\022\n\n\006points\030\001\022\021\n\rnum_to_sample\030\t\022\010\n\004seed\030\t\022\032\n\026num_retries_per_sample\030\t\032\013\n\007samples\030\001\nl\n\020NearestNeighbors\022\n\n\006points\030\001\022\013\n\007centers\030\001\022\005\n\001k\030\t\032\032\n\026nearest_center_indices\030\t\032\034\n\030nearest_center_distances\030\001")
| mit | 958,460,742,814,141,800 | 37.05136 | 520 | 0.679635 | false | 3.376676 | false | false | false |
numericalalgorithmsgroup/pybobyqa | pybobyqa/tests/test_model.py | 1 | 37410 | """
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
The development of this software was sponsored by NAG Ltd. (http://www.nag.co.uk)
and the EPSRC Centre For Doctoral Training in Industrially Focused Mathematical
Modelling (EP/L015803/1) at the University of Oxford. Please contact NAG for
alternative licensing.
"""
# Ensure compatibility with Python 2
from __future__ import absolute_import, division, print_function, unicode_literals
from math import sqrt, sin
import numpy as np
import unittest
from pybobyqa.model import Model
from pybobyqa.util import sumsq, model_value
def array_compare(x, y, thresh=1e-14):
return np.max(np.abs(x - y)) < thresh
def rosenbrock_residuals(x):
return np.array([10.0 * (x[1] - x[0] ** 2), 1.0 - x[0]])
def rosenbrock(x):
return sumsq(rosenbrock_residuals(x))
def objfun(x):
# An arbitrary-dimension objective function
return sin(np.dot(x, np.arange(1,len(x)+1,dtype=float))) # f(x1,...,xn) = sin(x1 + 2*x2 + ... + n*xn)
class TestAddValues(unittest.TestCase):
def runTest(self):
n, m = 2, 2
npt = n + 1
x0 = np.array([-1.2, 1.0])
xl = -1e20 * np.ones((n,))
xu = 1e20 * np.ones((n,))
model = Model(npt, x0, rosenbrock(x0), xl, xu, 1)
self.assertEqual(model.npt(), npt, 'Wrong npt after initialisation')
self.assertTrue(array_compare(model.xopt(abs_coordinates=True), x0), 'Wrong xopt after initialisation')
self.assertTrue(array_compare(model.fopt(), rosenbrock(x0)), 'Wrong fopt after initialisation')
# Now add better point
x1 = np.array([1.0, 0.9])
rvec = rosenbrock(x1)
model.change_point(1, x1 - model.xbase, rvec, allow_kopt_update=True)
self.assertEqual(model.npt(), npt, 'Wrong npt after x1')
self.assertTrue(array_compare(model.xopt(abs_coordinates=True), x1), 'Wrong xopt after x1')
self.assertTrue(array_compare(model.fopt(), rosenbrock(x1)), 'Wrong fopt after x1')
# Now add worse point
x2 = np.array([2.0, 0.9])
rvec = rosenbrock(x2)
model.change_point(2, x2 - model.xbase, rvec, allow_kopt_update=True)
self.assertEqual(model.npt(), npt, 'Wrong npt after x2')
self.assertTrue(array_compare(model.xpt(0, abs_coordinates=True), x0), 'Wrong xpt(0) after x2')
self.assertTrue(array_compare(model.xpt(1, abs_coordinates=True), x1), 'Wrong xpt(1) after x2')
self.assertTrue(array_compare(model.xpt(2, abs_coordinates=True), x2), 'Wrong xpt(2) after x2')
self.assertTrue(array_compare(model.xopt(abs_coordinates=True), x1), 'Wrong xopt after x2')
self.assertTrue(array_compare(model.fopt(), rosenbrock(x1)), 'Wrong fopt after x2')
# Now add best point (but don't update kopt)
x3 = np.array([1.0, 1.0])
rvec = rosenbrock(x3)
model.change_point(0, x3 - model.xbase, rvec, allow_kopt_update=False) # full: overwrite x0
self.assertEqual(model.npt(), npt, 'Wrong npt after x3')
self.assertTrue(array_compare(model.xopt(abs_coordinates=True), x1), 'Wrong xopt after x3')
self.assertTrue(array_compare(model.fopt(), rosenbrock(x1)), 'Wrong fopt after x3')
self.assertAlmostEqual(model.fopt(), rosenbrock(x1), msg='Wrong fopt after x3')
self.assertTrue(array_compare(model.xopt(abs_coordinates=True), model.as_absolute_coordinates(model.xopt())),
'Comparison wrong after x3')
dirns = model.xpt_directions(include_kopt=True)
self.assertTrue(array_compare(x3 - x1, dirns[0, :]), 'Wrong dirn 0')
self.assertTrue(array_compare(x1 - x1, dirns[1, :]), 'Wrong dirn 1')
self.assertTrue(array_compare(x2 - x1, dirns[2, :]), 'Wrong dirn 2')
dirns = model.xpt_directions(include_kopt=False)
self.assertTrue(array_compare(x3 - x1, dirns[0, :]), 'Wrong dirn 0 (no kopt)')
# self.assertTrue(array_compare(x1 - x1, dirns[1, :]), 'Wrong dirn 1')
self.assertTrue(array_compare(x2 - x1, dirns[1, :]), 'Wrong dirn 1 (no kopt)')
class TestSwap(unittest.TestCase):
def runTest(self):
n, m = 2, 2
npt = n + 1
x0 = np.array([-1.2, 1.0])
xl = -1e20 * np.ones((n,))
xu = 1e20 * np.ones((n,))
model = Model(npt, x0, rosenbrock(x0), xl, xu, 1)
# Now add better point
x1 = np.array([1.0, 0.9])
f1 = rosenbrock(x1)
model.change_point(1, x1 - model.xbase, f1, allow_kopt_update=True)
# Now add worse point
x2 = np.array([2.0, 0.9])
f2 = rosenbrock(x2)
model.change_point(2, x2 - model.xbase, f2, allow_kopt_update=True)
model.swap_points(0, 2)
self.assertTrue(array_compare(model.xpt(0, abs_coordinates=True), x2), 'Wrong xpt(0) after swap 1')
self.assertTrue(array_compare(model.xpt(1, abs_coordinates=True), x1), 'Wrong xpt(1) after swap 1')
self.assertTrue(array_compare(model.xpt(2, abs_coordinates=True), x0), 'Wrong xpt(2) after swap 1')
self.assertTrue(array_compare(model.xopt(abs_coordinates=True), x1), 'Wrong xopt after swap 1')
model.swap_points(1, 2)
self.assertTrue(array_compare(model.xpt(0, abs_coordinates=True), x2), 'Wrong xpt(0) after swap 2')
self.assertTrue(array_compare(model.xpt(1, abs_coordinates=True), x0), 'Wrong xpt(1) after swap 2')
self.assertTrue(array_compare(model.xpt(2, abs_coordinates=True), x1), 'Wrong xpt(2) after swap 2')
self.assertTrue(array_compare(model.xopt(abs_coordinates=True), x1), 'Wrong xopt after swap 2')
class TestBasicManipulation(unittest.TestCase):
def runTest(self):
n, m = 2, 2
npt = n + 1
x0 = np.array([-1.2, 1.0])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, rosenbrock(x0), xl, xu, 1)
self.assertTrue(array_compare(model.sl, xl - x0), 'Wrong sl after initialisation')
self.assertTrue(array_compare(model.su, xu - x0), 'Wrong su after initialisation')
x1 = np.array([1.0, 0.9])
model.change_point(1, x1 - model.xbase, rosenbrock(x1))
self.assertTrue(array_compare(model.as_absolute_coordinates(x1 - x0), x1), 'Wrong abs coords')
self.assertTrue(array_compare(model.as_absolute_coordinates(np.array([-1e3, 1e3])-x0), np.array([-1e2, 1e2])),
'Bad abs coords with bounds')
x2 = np.array([2.0, 0.9])
model.change_point(2, x2 - model.xbase, rosenbrock(x2))
sqdists = model.distances_to_xopt()
self.assertAlmostEqual(sqdists[0], sumsq(x0 - x1), msg='Wrong distance 0')
self.assertAlmostEqual(sqdists[1], sumsq(x1 - x1), msg='Wrong distance 1')
self.assertAlmostEqual(sqdists[2], sumsq(x2 - x1), msg='Wrong distance 2')
model.add_new_sample(0, rosenbrock(x0))
self.assertEqual(model.nsamples[0], 2, 'Wrong number of samples 0')
self.assertEqual(model.nsamples[1], 1, 'Wrong number of samples 1')
self.assertEqual(model.nsamples[2], 1, 'Wrong number of samples 2')
for i in range(50):
model.add_new_sample(0, 0.0)
self.assertEqual(model.kopt, 0, 'Wrong kopt after bad resampling')
self.assertTrue(array_compare(model.fopt(), 2*rosenbrock(x0)/52), 'Wrong fopt after bad resampling')
d = np.array([10.0, 10.0])
dirns_old = model.xpt_directions(include_kopt=True)
model.shift_base(d)
dirns_new = model.xpt_directions(include_kopt=True)
self.assertTrue(array_compare(model.xbase, x0 + d), 'Wrong new base')
self.assertEqual(model.kopt, 0, 'Wrong kopt after shift base')
for i in range(3):
self.assertTrue(array_compare(dirns_old[i, :], dirns_new[i, :]), 'Wrong dirn %i after shift base' % i)
self.assertTrue(array_compare(model.sl, xl - x0 - d), 'Wrong sl after shift base')
self.assertTrue(array_compare(model.su, xu - x0 - d), 'Wrong su after shift base')
# save_point and get_final_results
model.change_point(0, x0 - model.xbase, rosenbrock(x0)) # revert after resampling
model.change_point(1, x1 - model.xbase, rosenbrock(x1)) # revert after resampling
x, f, gradmin, hessmin, nsamples = model.get_final_results()
self.assertTrue(array_compare(x, x1), 'Wrong final x')
self.assertAlmostEqual(rosenbrock(x1), f, msg='Wrong final f')
self.assertTrue(array_compare(np.zeros((2,)), gradmin), 'Wrong final gradmin')
self.assertTrue(array_compare(np.zeros((2,2)), hessmin), 'Wrong final hessmin')
self.assertEqual(1, nsamples, 'Wrong final nsamples')
self.assertIsNone(model.xsave, 'xsave not none after initialisation')
self.assertIsNone(model.fsave, 'fsave not none after initialisation')
self.assertIsNone(model.nsamples_save, 'nsamples_save not none after initialisation')
model.save_point(x0, rosenbrock(x0), 1, x_in_abs_coords=True)
self.assertTrue(array_compare(model.xsave, x0), 'Wrong xsave after saving')
self.assertAlmostEqual(model.fsave, rosenbrock(x0), msg='Wrong fsave after saving')
self.assertEqual(model.nsamples_save, 1, 'Wrong nsamples_save after saving')
x, f, gradmin, hessmin, nsamples = model.get_final_results()
self.assertTrue(array_compare(x, x1), 'Wrong final x after saving')
self.assertAlmostEqual(rosenbrock(x1), f, msg='Wrong final f after saving')
self.assertEqual(1, nsamples, 'Wrong final nsamples after saving')
model.save_point(x2 - model.xbase, 0.0, 2, x_in_abs_coords=False)
self.assertTrue(array_compare(model.xsave, x2), 'Wrong xsave after saving 2')
self.assertAlmostEqual(model.fsave, 0.0, msg='Wrong fsave after saving 2')
self.assertEqual(model.nsamples_save, 2, 'Wrong nsamples_save after saving 2')
x, f, gradmin, hessmin, nsamples = model.get_final_results()
self.assertTrue(array_compare(x, x2), 'Wrong final x after saving 2')
self.assertAlmostEqual(f, 0.0, msg='Wrong final f after saving 2')
self.assertEqual(2, nsamples, 'Wrong final nsamples after saving 2')
model.save_point(x0, rosenbrock(x0), 3, x_in_abs_coords=True) # try to re-save a worse value
self.assertTrue(array_compare(model.xsave, x2), 'Wrong xsave after saving 3')
self.assertAlmostEqual(model.fsave, 0.0, msg='Wrong fsave after saving 3')
self.assertEqual(model.nsamples_save, 2, 'Wrong nsamples_save after saving 3')
class TestAveraging(unittest.TestCase):
def runTest(self):
n, m = 2, 2
npt = n + 1
x0 = np.array([-1.2, 1.0])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, rosenbrock(x0), xl, xu, 1)
x1 = np.array([1.0, 0.9])
model.change_point(1, x1 - model.xbase, rosenbrock(x1))
x2 = np.array([1.0, 1.0])
model.change_point(2, x2 - model.xbase, rosenbrock(x2))
self.assertEqual(model.kopt, 2, 'Wrong kopt before resampling')
# Originally, x2 is the ideal point
# Here, testing that kopt moves back to x1 after adding heaps of bad x2 samples
for i in range(10):
model.add_new_sample(2, 5.0)
self.assertEqual(model.kopt, 1, 'Wrong kopt after resampling')
class TestMinObjValue(unittest.TestCase):
def runTest(self):
n, m = 2, 2
npt = n + 1
x0 = np.array([-1.2, 1.0])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, rosenbrock(x0), xl, xu, 1)
x1 = np.array([1.0, 0.9])
model.change_point(1, x1 - model.xbase, rosenbrock(x1))
x2 = np.array([2.0, 0.9])
model.change_point(2, x2 - model.xbase, rosenbrock(x2))
self.assertAlmostEqual(model.min_objective_value(), -1e20, msg='Wrong min obj value')
model = Model(npt, x0, rosenbrock(x0), xl, xu, 1, abs_tol=1.0)
self.assertAlmostEqual(model.min_objective_value(), 1.0, msg='Wrong min obj value 3')
class TestInterpMatrixLinear(unittest.TestCase):
def runTest(self):
n, m = 2, 2
npt = n + 1
x0 = np.array([-1.2, 1.0])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, rosenbrock(x0), xl, xu, 1, precondition=False)
x1 = np.array([1.0, 0.9])
model.change_point(1, x1 - model.xbase, rosenbrock(x1))
x2 = np.array([2.0, 0.9])
model.change_point(2, x2 - model.xbase, rosenbrock(x2))
A, left_scaling, right_scaling = model.interpolation_matrix()
A_expect = np.zeros((2, 2))
A_expect[0, :] = x0 - x1 # x1 is xopt in this situation
A_expect[1, :] = x2 - x1
self.assertTrue(array_compare(A, A_expect), 'Interp matrix 1')
# For reference: model based around model.xbase
interp_ok, interp_cond_num, norm_chg_grad, norm_chg_hess, interp_error = model.interpolate_model()
self.assertTrue(interp_ok, 'Interpolation failed')
self.assertAlmostEqual(interp_error, 0.0, msg='Expect exact interpolation')
self.assertAlmostEqual(model.model_const, rosenbrock(model.xbase), msg='Wrong constant term')
self.assertTrue(array_compare(model.model_value(x1 - model.xbase, d_based_at_xopt=False, with_const_term=True),
rosenbrock(x1), thresh=1e-10), 'Wrong x1') # allow some inexactness
self.assertTrue(array_compare(model.model_value(x2 - model.xbase, d_based_at_xopt=False, with_const_term=True),
rosenbrock(x2), thresh=1e-10), 'Wrong x2')
# Test some other parameter settings for model.model_value()
self.assertTrue(array_compare(model.model_value(x2 - x1, d_based_at_xopt=True, with_const_term=True),
rosenbrock(x2), thresh=1e-10), 'Wrong x2 (from xopt)')
self.assertTrue(array_compare(model.model_value(x2 - x1, d_based_at_xopt=True, with_const_term=False),
rosenbrock(x2)-rosenbrock(model.xbase), thresh=1e-10), 'Wrong x2 (no constant)')
self.assertTrue(array_compare(model.model_value(x2 - model.xbase, d_based_at_xopt=False, with_const_term=False),
rosenbrock(x2) - rosenbrock(model.xbase), thresh=1e-10), 'Wrong x2 (no constant v2)')
g, hess = model.build_full_model()
self.assertTrue(np.allclose(g, model.model_grad + model.model_hess.dot(model.xopt(abs_coordinates=False))),
'Bad gradient')
self.assertTrue(np.allclose(hess, model.model_hess), 'Bad Hessian')
class TestInterpMatrixUnderdeterminedQuadratic(unittest.TestCase):
def runTest(self):
n = 2
npt = n+2
x0 = np.array([1.0, 1.0])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, objfun(x0), xl, xu, 1, precondition=False)
x1 = x0 + np.array([1.0, 0.0])
model.change_point(1, x1 - model.xbase, objfun(x1))
x2 = x0 + np.array([0.1, 0.9])
model.change_point(2, x2 - model.xbase, objfun(x2))
x3 = x0 + np.array([-0.1, 0.0])
model.change_point(3, x3 - model.xbase, objfun(x3))
# x2 is xopt in this situation
self.assertTrue(model.kopt == 2, 'Wrong xopt')
xs = [x0, x1, x3]
xopt = x2
nxs = len(xs)
A = np.zeros((nxs+n,nxs+n))
for i in range(nxs):
for j in range(nxs):
A[i,j] = 0.5 * np.dot(xs[i]-xopt, xs[j]-xopt)**2
A[i,nxs:] = xs[i] - xopt
A[nxs:,i] = xs[i] - xopt
A2, left_scaling, right_scaling = model.interpolation_matrix()
# print("Expect", A)
# print("Got", A2)
self.assertTrue(np.allclose(A, A2), 'Interp matrix 1')
# For reference: model based around model.xbase
interp_ok, interp_cond_num, norm_chg_grad, norm_chg_hess, interp_error = model.interpolate_model(verbose=True)
self.assertTrue(interp_ok, 'Interpolation failed')
self.assertAlmostEqual(interp_error, 0.0, msg='Expect exact interpolation')
self.assertAlmostEqual(norm_chg_grad, np.linalg.norm(model.model_grad))
self.assertAlmostEqual(norm_chg_hess, np.linalg.norm(model.model_hess, ord='fro'))
self.assertAlmostEqual(model.model_const, objfun(model.xbase), msg='Wrong constant term')
for xi in [x0, x1, x2, x3]:
self.assertAlmostEqual(model.model_value(xi - model.xbase, d_based_at_xopt=False, with_const_term=True),
objfun(xi), msg='Wrong interp value at %s' % str(xi))
# Test some other parameter settings for model.model_value()
# print("Ignore after here")
g, hess = model.build_full_model()
self.assertTrue(np.allclose(g, model.model_grad + model.model_hess.dot(model.xopt(abs_coordinates=False))),
'Bad gradient')
self.assertTrue(np.allclose(hess, model.model_hess), 'Bad Hessian')
# Build a new model
model2 = Model(npt, x0, objfun(x0), xl, xu, 1, precondition=False)
model2.change_point(1, x1 - model.xbase, objfun(x1))
model2.change_point(2, x2 - model.xbase, objfun(x2))
model2.change_point(3, x3 - model.xbase, objfun(x3))
# Force Hessian to be something else
model2.model_hess = np.eye(n)
A2, left_scaling, right_scaling = model2.interpolation_matrix()
self.assertTrue(np.allclose(A, A2), 'Interp matrix 2')
interp_ok, interp_cond_num, norm_chg_grad, norm_chg_hess, interp_error = model2.interpolate_model()
self.assertTrue(interp_ok, 'Interpolation failed')
self.assertAlmostEqual(interp_error, 0.0, msg='Expect exact interpolation')
self.assertAlmostEqual(model2.model_const, objfun(model2.xbase), msg='Wrong constant term')
for xi in [x0, x1, x2, x3]:
self.assertAlmostEqual(model2.model_value(xi - model2.xbase, d_based_at_xopt=False, with_const_term=True),
objfun(xi), msg='Wrong interp value at %s' % str(xi))
# Compare distance of hessians
h1 = np.zeros((n,n))
h2 = np.eye(n)
self.assertLessEqual(np.linalg.norm(model.model_hess-h1, ord='fro'),
np.linalg.norm(model2.model_hess-h1, ord='fro'), 'Not min frob Hess 1')
self.assertLessEqual(np.linalg.norm(model2.model_hess - h2, ord='fro'),
np.linalg.norm(model.model_hess - h2, ord='fro'), 'Not min frob Hess 2')
# print(model.model_hess)
# print(model2.model_hess)
# Build a new model
model3 = Model(npt, x0, objfun(x0), xl, xu, 1, precondition=False)
model3.change_point(1, x1 - model.xbase, objfun(x1))
model3.change_point(2, x2 - model.xbase, objfun(x2))
model3.change_point(3, x3 - model.xbase, objfun(x3))
# Force Hessian to be something else
model3.model_hess = np.eye(n)
A2, left_scaling, right_scaling = model3.interpolation_matrix()
self.assertTrue(np.allclose(A, A2), 'Interp matrix 3')
interp_ok, interp_cond_num, norm_chg_grad, norm_chg_hess, interp_error = model3.interpolate_model(min_chg_hess=False)
self.assertTrue(interp_ok, 'Interpolation failed')
self.assertAlmostEqual(interp_error, 0.0, msg='Expect exact interpolation')
self.assertAlmostEqual(model3.model_const, objfun(model3.xbase), msg='Wrong constant term')
for xi in [x0, x1, x2, x3]:
self.assertAlmostEqual(model3.model_value(xi - model3.xbase, d_based_at_xopt=False, with_const_term=True),
objfun(xi), msg='Wrong interp value at %s' % str(xi))
self.assertTrue(np.allclose(model.model_hess, model3.model_hess),
'min_chg_hess=False not working')
class TestInterpMatrixUnderdeterminedQuadratic2(unittest.TestCase):
def runTest(self):
n = 2
npt = 2*n+1
x0 = np.array([1.0, 1.0])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, objfun(x0), xl, xu, 1, precondition=False)
x1 = x0 + np.array([1.0, 0.0])
model.change_point(1, x1 - model.xbase, objfun(x1))
x2 = x0 + np.array([0.1, 0.9])
model.change_point(2, x2 - model.xbase, objfun(x2))
x3 = x0 + np.array([-0.1, 0.0])
model.change_point(3, x3 - model.xbase, objfun(x3))
x4 = x0 + np.array([-0.1, 2.0])
model.change_point(4, x4 - model.xbase, objfun(x4))
# x2 is xopt in this situation
self.assertTrue(model.kopt == 2, 'Wrong xopt')
xs = [x0, x1, x3, x4]
xopt = x2
nxs = len(xs)
A = np.zeros((nxs+n,nxs+n))
for i in range(nxs):
for j in range(nxs):
A[i,j] = 0.5 * np.dot(xs[i]-xopt, xs[j]-xopt)**2
A[i,nxs:] = xs[i] - xopt
A[nxs:,i] = xs[i] - xopt
A2, left_scaling, right_scaling = model.interpolation_matrix()
# print("Expect", A)
# print("Got", A2)
self.assertTrue(np.allclose(A, A2), 'Interp matrix 1')
# For reference: model based around model.xbase
interp_ok, interp_cond_num, norm_chg_grad, norm_chg_hess, interp_error = model.interpolate_model(verbose=True)
self.assertTrue(interp_ok, 'Interpolation failed')
self.assertAlmostEqual(interp_error, 0.0, msg='Expect exact interpolation')
self.assertAlmostEqual(norm_chg_grad, np.linalg.norm(model.model_grad))
self.assertAlmostEqual(norm_chg_hess, np.linalg.norm(model.model_hess, ord='fro'))
self.assertAlmostEqual(model.model_const, objfun(model.xbase), msg='Wrong constant term')
for xi in [x0, x1, x2, x3, x4]:
self.assertAlmostEqual(model.model_value(xi - model.xbase, d_based_at_xopt=False, with_const_term=True),
objfun(xi), msg='Wrong interp value at %s' % str(xi))
# Test some other parameter settings for model.model_value()
g, hess = model.build_full_model()
self.assertTrue(np.allclose(g, model.model_grad + model.model_hess.dot(model.xopt(abs_coordinates=False))),
'Bad gradient')
self.assertTrue(np.allclose(hess, model.model_hess), 'Bad Hessian')
# Build a new model
model2 = Model(npt, x0, objfun(x0), xl, xu, 1, precondition=False)
model2.change_point(1, x1 - model.xbase, objfun(x1))
model2.change_point(2, x2 - model.xbase, objfun(x2))
model2.change_point(3, x3 - model.xbase, objfun(x3))
model2.change_point(4, x4 - model.xbase, objfun(x4))
# Force Hessian to be something else
model2.model_hess = np.eye(n)
A2, left_scaling, right_scaling = model2.interpolation_matrix()
self.assertTrue(np.allclose(A, A2), 'Interp matrix 2')
interp_ok, interp_cond_num, norm_chg_grad, norm_chg_hess, interp_error = model2.interpolate_model()
self.assertTrue(interp_ok, 'Interpolation failed')
self.assertAlmostEqual(interp_error, 0.0, msg='Expect exact interpolation')
self.assertAlmostEqual(model2.model_const, objfun(model2.xbase), msg='Wrong constant term')
for xi in [x0, x1, x2, x3, x4]:
self.assertAlmostEqual(model2.model_value(xi - model2.xbase, d_based_at_xopt=False, with_const_term=True),
objfun(xi), msg='Wrong interp value at %s' % str(xi))
# Compare distance of hessians
h1 = np.zeros((n,n))
h2 = np.eye(n)
self.assertLessEqual(np.linalg.norm(model.model_hess-h1, ord='fro'),
np.linalg.norm(model2.model_hess-h1, ord='fro'), 'Not min frob Hess 1')
self.assertLessEqual(np.linalg.norm(model2.model_hess - h2, ord='fro'),
np.linalg.norm(model.model_hess - h2, ord='fro'), 'Not min frob Hess 2')
# print(model.model_hess)
# print(model2.model_hess)
# Build a new model
model3 = Model(npt, x0, objfun(x0), xl, xu, 1, precondition=False)
model3.change_point(1, x1 - model.xbase, objfun(x1))
model3.change_point(2, x2 - model.xbase, objfun(x2))
model3.change_point(3, x3 - model.xbase, objfun(x3))
model3.change_point(4, x4 - model.xbase, objfun(x4))
# Force Hessian to be something else
model3.model_hess = np.eye(n)
A2, left_scaling, right_scaling = model3.interpolation_matrix()
self.assertTrue(np.allclose(A, A2), 'Interp matrix 3')
interp_ok, interp_cond_num, norm_chg_grad, norm_chg_hess, interp_error = model3.interpolate_model(min_chg_hess=False)
self.assertTrue(interp_ok, 'Interpolation failed')
self.assertAlmostEqual(interp_error, 0.0, msg='Expect exact interpolation')
self.assertAlmostEqual(model3.model_const, objfun(model3.xbase), msg='Wrong constant term')
for xi in [x0, x1, x2, x3, x4]:
self.assertAlmostEqual(model3.model_value(xi - model3.xbase, d_based_at_xopt=False, with_const_term=True),
objfun(xi), msg='Wrong interp value at %s' % str(xi))
self.assertTrue(np.allclose(model.model_hess, model3.model_hess),
'min_chg_hess=False not working')
class TestInterpMatrixFullQuadratic(unittest.TestCase):
def runTest(self):
n = 2
npt = (n+1) * (n+2) // 2
x0 = np.array([1.0, 1.0])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, objfun(x0), xl, xu, 1)
x1 = x0 + np.array([1.0, 0.0])
model.change_point(1, x1 - model.xbase, objfun(x1))
x2 = x0 + np.array([0.1, 0.9])
model.change_point(2, x2 - model.xbase, objfun(x2))
x3 = x0 + np.array([-0.1, 0.0])
model.change_point(3, x3 - model.xbase, objfun(x3))
x4 = x0 + np.array([-0.1, 2.0])
model.change_point(4, x4 - model.xbase, objfun(x4))
x5 = x0 + np.array([-1.1, 1.0])
model.change_point(5, x5 - model.xbase, objfun(x5))
# For reference: model based around model.xbase
interp_ok, interp_cond_num, norm_chg_grad, norm_chg_hess, interp_error = model.interpolate_model(verbose=True)
self.assertTrue(interp_ok, 'Interpolation failed')
self.assertAlmostEqual(interp_error, 0.0, msg='Expect exact interpolation')
self.assertAlmostEqual(norm_chg_grad, np.linalg.norm(model.model_grad))
self.assertAlmostEqual(norm_chg_hess, np.linalg.norm(model.model_hess, ord='fro'))
self.assertAlmostEqual(model.model_const, objfun(model.xbase), msg='Wrong constant term')
for xi in [x0, x1, x2, x3, x4, x5]:
self.assertAlmostEqual(model.model_value(xi - model.xbase, d_based_at_xopt=False, with_const_term=True),
objfun(xi), msg='Wrong interp value at %s' % str(xi))
# Test some other parameter settings for model.model_value()
g, hess = model.build_full_model()
self.assertTrue(np.allclose(g, model.model_grad + model.model_hess.dot(model.xopt(abs_coordinates=False))),
'Bad gradient')
self.assertTrue(np.allclose(hess, model.model_hess), 'Bad Hessian')
class TestLagrangePolyLinear(unittest.TestCase):
def runTest(self):
n = 2
npt = n + 1
x0 = np.array([-1.2, 1.0])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, rosenbrock(x0), xl, xu, 1)
x1 = np.array([1.0, 0.9])
model.change_point(1, x1 - model.xbase, rosenbrock(x1))
x2 = np.array([2.0, 0.9])
model.change_point(2, x2 - model.xbase, rosenbrock(x2))
xopt = model.xopt()
for i in range(npt):
c, g, hess = model.lagrange_polynomial(i) # based at xopt
for j in range(npt):
dx = model.xpt(j) - xopt
lag_value = c + model_value(g, hess, dx)
expected_value = 1.0 if i==j else 0.0
self.assertAlmostEqual(lag_value, expected_value, msg="Lagrange for x%g has bad value at x%g" % (i, j))
class TestLagrangePolyUnderdeterminedQuadratic(unittest.TestCase):
def runTest(self):
n = 2
npt = n + 2
x0 = np.array([1.0, 1.0])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, objfun(x0), xl, xu, 1)
x1 = x0 + np.array([1.0, 0.0])
model.change_point(1, x1 - model.xbase, objfun(x1))
x2 = x0 + np.array([0.1, 0.9])
model.change_point(2, x2 - model.xbase, objfun(x2))
x3 = x0 + np.array([-0.1, 0.0])
model.change_point(3, x3 - model.xbase, objfun(x3))
xopt = model.xopt()
for i in range(npt):
c, g, hess = model.lagrange_polynomial(i) # based at xopt
for j in range(npt):
dx = model.xpt(j) - xopt
lag_value = c + model_value(g, hess, dx)
expected_value = 1.0 if i == j else 0.0
self.assertAlmostEqual(lag_value, expected_value, msg="Lagrange for x%g has bad value at x%g" % (i, j))
class TestLagrangePolyUnderdeterminedQuadratic2(unittest.TestCase):
def runTest(self):
n = 2
npt = 2 * n + 1
x0 = np.array([1.0, 1.0])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, objfun(x0), xl, xu, 1)
x1 = x0 + np.array([1.0, 0.0])
model.change_point(1, x1 - model.xbase, objfun(x1))
x2 = x0 + np.array([0.1, 0.9])
model.change_point(2, x2 - model.xbase, objfun(x2))
x3 = x0 + np.array([-0.1, 0.0])
model.change_point(3, x3 - model.xbase, objfun(x3))
x4 = x0 + np.array([-0.1, 2.0])
model.change_point(4, x4 - model.xbase, objfun(x4))
xopt = model.xopt()
for i in range(npt):
c, g, hess = model.lagrange_polynomial(i) # based at xopt
for j in range(npt):
dx = model.xpt(j) - xopt
lag_value = c + model_value(g, hess, dx)
expected_value = 1.0 if i == j else 0.0
self.assertAlmostEqual(lag_value, expected_value, msg="Lagrange for x%g has bad value at x%g" % (i, j))
class TestLagrangePolyFullQuadratic(unittest.TestCase):
def runTest(self):
n = 2
npt = (n + 1) * (n + 2) // 2
x0 = np.array([1.0, 1.0])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, objfun(x0), xl, xu, 1)
x1 = x0 + np.array([1.0, 0.0])
model.change_point(1, x1 - model.xbase, objfun(x1))
x2 = x0 + np.array([0.1, 0.9])
model.change_point(2, x2 - model.xbase, objfun(x2))
x3 = x0 + np.array([-0.1, 0.0])
model.change_point(3, x3 - model.xbase, objfun(x3))
x4 = x0 + np.array([-0.1, 2.0])
model.change_point(4, x4 - model.xbase, objfun(x4))
x5 = x0 + np.array([-1.1, 1.0])
model.change_point(5, x5 - model.xbase, objfun(x5))
xopt = model.xopt()
for i in range(npt):
c, g, hess = model.lagrange_polynomial(i) # based at xopt
for j in range(npt):
dx = model.xpt(j) - xopt
lag_value = c + model_value(g, hess, dx)
expected_value = 1.0 if i == j else 0.0
self.assertAlmostEqual(lag_value, expected_value, msg="Lagrange for x%g has bad value at x%g" % (i, j))
class TestPoisednessLinear(unittest.TestCase):
def runTest(self):
n = 2
npt = n + 1
x0 = np.array([-1.2, 1.0])
delta = 0.5
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, rosenbrock(x0), xl, xu, 1)
model.add_new_sample(0, rosenbrock(x0))
x1 = x0 + delta * np.array([1.0, 0.0])
model.change_point(1, x1 - model.xbase, rosenbrock(x1))
x2 = x0 + delta * np.array([0.0, 1.0])
model.change_point(2, x2 - model.xbase, rosenbrock(x2))
model.kopt = 0 # force this
# Here (use delta=1), Lagrange polynomials are (1-x-y), 1-x and 1-y
# Maximum value in ball is for (1-x-y) at (x,y)=(1/sqrt2, 1/sqrt2) --> max value = 1 + sqrt(2)
self.assertAlmostEqual(model.poisedness_constant(delta), 1.0 + sqrt(2.0), places=6, msg="Poisedness wrong")
class TestPoisednessFullQuadratic(unittest.TestCase):
def runTest(self):
# DFO book, Figure 3.1 (note errata) - solution from Mathematica
n = 2
npt = (n + 1) * (n + 2) // 2
x0 = np.array([0.5, 0.5])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, objfun(x0), xl, xu, 1)
x1 = np.array([0.05, 0.1])
model.change_point(1, x1 - model.xbase, objfun(x1))
x2 = np.array([0.1, 0.05])
model.change_point(2, x2 - model.xbase, objfun(x2))
x3 = np.array([0.95, 0.9])
model.change_point(3, x3 - model.xbase, objfun(x3))
x4 = np.array([0.9, 0.95])
model.change_point(4, x4 - model.xbase, objfun(x4))
x5 = np.array([0.85, 0.85])
model.change_point(5, x5 - model.xbase, objfun(x5))
delta = 0.5
model.kopt = 0 # force base point
self.assertLessEqual(model.poisedness_constant(delta), 294.898, msg="Poisedness wrong")
class TestPoisednessUnderdeterminedQuadratic(unittest.TestCase):
def runTest(self):
# Based originally on DFO book, Figure 3.3 - solution from Mathematica
n = 2
npt = 2*n + 1
x0 = np.array([0.5, 0.5])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, objfun(x0), xl, xu, 1)
x1 = np.array([0.524, 0.0006])
model.change_point(1, x1 - model.xbase, objfun(x1))
x2 = np.array([0.032, 0.323])
model.change_point(2, x2 - model.xbase, objfun(x2))
x3 = np.array([0.187, 0.89])
model.change_point(3, x3 - model.xbase, objfun(x3))
x4 = np.array([0.982, 0.368])
model.change_point(4, x4 - model.xbase, objfun(x4))
delta = 0.5
model.kopt = 0 # force base point
self.assertAlmostEqual(model.poisedness_constant(delta), 1.10018, places=3, msg="Poisedness wrong")
class TestAddPoint(unittest.TestCase):
def runTest(self):
n, m = 2, 2
npt = n + 1
x0 = np.array([-1.2, 1.0])
xl = -1e2 * np.ones((n,))
xu = 1e2 * np.ones((n,))
model = Model(npt, x0, rosenbrock(x0), xl, xu, 1)
x1 = np.array([1.0, 0.9])
model.change_point(1, x1 - model.xbase, rosenbrock(x1))
x2 = np.array([2.0, 0.9])
model.change_point(2, x2 - model.xbase, rosenbrock(x2))
# Now add a new point
x3 = np.array([1.0, 1.0]) # good point
add_ok = model.add_new_point(x3 - model.xbase, rosenbrock(x3))
self.assertTrue(add_ok, "Adding x3 failed")
self.assertEqual(model.npt(), 4, "Wrong number of points after x3")
self.assertTrue(array_compare(model.xpt(3, abs_coordinates=True), x3), "Wrong new point after x3")
self.assertTrue(array_compare(model.fval(3), rosenbrock(x3)), "Wrong fval after x3")
self.assertEqual(model.kopt, 3, "Wrong kopt after x3")
self.assertEqual(len(model.nsamples), 4, "Wrong nsamples length after x3")
self.assertEqual(model.nsamples[-1], 1, "Wrong nsample value after x3")
x4 = np.array([-1.8, 1.8]) # bad point
add_ok = model.add_new_point(x4 - model.xbase, rosenbrock(x4))
self.assertTrue(add_ok, "Adding x4 failed")
self.assertEqual(model.npt(), 5, "Wrong number of points after x4")
self.assertTrue(array_compare(model.xpt(4, abs_coordinates=True), x4), "Wrong new point after x4")
self.assertTrue(array_compare(model.fval(4), rosenbrock(x4)), "Wrong fval after x4")
self.assertEqual(model.kopt, 3, "Wrong kopt after x4")
x5 = np.array([-1.0, 1.0])
add_ok = model.add_new_point(x5 - model.xbase, rosenbrock(x5))
self.assertTrue(add_ok, "Adding x5 failed")
self.assertEqual(model.npt(), 6, "Wrong number of points after x5")
x6 = np.array([-1.5, 1.5])
add_ok = model.add_new_point(x6 - model.xbase, rosenbrock(x6))
self.assertFalse(add_ok, "Adding x6 should have failed")
self.assertEqual(model.npt(), 6, "Wrong number of points after x6")
self.assertTrue(array_compare(model.xpt(5, abs_coordinates=True), x5), "Wrong new point after x6")
self.assertTrue(array_compare(model.fval(5), rosenbrock(x5)), "Wrong fval after x6")
self.assertEqual(model.kopt, 3, "Wrong kopt after x6")
| gpl-3.0 | 3,865,291,634,601,302,000 | 51.248603 | 125 | 0.603796 | false | 3.03185 | true | false | false |
mbuesch/toprammer | libtoprammer/chips/microchip8/pic18f2320dip28.py | 1 | 5199 | """
# TOP2049 Open Source programming suite
#
# Microchip PIC18F2320 DIP18
#
# Copyright (c) 2013 Pavel Stemberk <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from .microchip8_18f1220family import *
class Chip_PIC18F2320dip28(microchip8_18f1220family):
hasEEPROM = True
writeBufferSize = 8
eraseBufferSize = 64
def __init__(self):
microchip8_18f1220family.__init__(self,
chipPackage="DIP28",
chipPinVCC=20,
chipPinsVPP=1,
chipPinGND=19,
signature=b"\x80\x05",
flashPageSize=0x2000,
flashPages=1,
eepromPageSize=0x100,
eepromPages=1,
fuseBytes=14
)
fuseDesc = (
BitDescription(0o00, "NA"),
BitDescription(0o01, "NA"),
BitDescription(0o02, "NA"),
BitDescription(0o03, "NA"),
BitDescription(0o04, "NA"),
BitDescription(0o05, "NA"),
BitDescription(0o06, "NA"),
BitDescription(0o07, "NA"),
BitDescription(0o10, "FOSC[0], 0=LP, 100=INTOSC"),
BitDescription(0o11, "FOSC[1]"),
BitDescription(0o12, "FOSC[2]"),
BitDescription(0o13, "FOSC[3]"),
BitDescription(0o14, "NA"),
BitDescription(0o15, "NA"),
BitDescription(0o16, "FSCM, 0=Fail-Safe Clock Monitor is disabled"),
BitDescription(0o17, "IESO, 0=Internal/External Switchover mode is disabled"),
BitDescription(0o20, "nPWRT"),
BitDescription(0o21, "BOR"),
BitDescription(0o22, "BORV[0]"),
BitDescription(0o23, "BORV[1]"),
BitDescription(0o24, "NA"),
BitDescription(0o25, "NA"),
BitDescription(0o26, "NA"),
BitDescription(0o27, "NA"),
BitDescription(0o30, "WDT, 0=WDT disabled, 1=WDT enabled"),
BitDescription(0o31, "WDTPS[0]"),
BitDescription(0o32, "WDTPS[1]"),
BitDescription(0o33, "WDTPS[2]"),
BitDescription(0o34, "WDTPS[3]"),
BitDescription(0o35, "NA"),
BitDescription(0o36, "NA"),
BitDescription(0o37, "NA"),
BitDescription(0o40, "NA"),
BitDescription(0o41, "NA"),
BitDescription(0o42, "NA"),
BitDescription(0o43, "NA"),
BitDescription(0o44, "NA"),
BitDescription(0o45, "NA"),
BitDescription(0o46, "NA"),
BitDescription(0o47, "NA"),
BitDescription(0o50, "NA"),
BitDescription(0o51, "NA"),
BitDescription(0o52, "NA"),
BitDescription(0o53, "NA"),
BitDescription(0o54, "NA"),
BitDescription(0o55, "NA"),
BitDescription(0o56, "NA"),
BitDescription(0o57, "MCLRE"),
BitDescription(0o60, "STVR"),
BitDescription(0o61, "NA"),
BitDescription(0o62, "LVP"),
BitDescription(0o63, "NA"),
BitDescription(0o64, "NA"),
BitDescription(0o65, "NA"),
BitDescription(0o66, "NA"),
BitDescription(0o67, "nDEBUG"),
BitDescription(0o70, "NA"),
BitDescription(0o71, "NA"),
BitDescription(0o72, "NA"),
BitDescription(0o73, "NA"),
BitDescription(0o74, "NA"),
BitDescription(0o75, "NA"),
BitDescription(0o76, "NA"),
BitDescription(0o77, "NA"),
BitDescription(0o100, "CP[0]"),
BitDescription(0o101, "CP[1]"),
BitDescription(0o102, "CP[2]"),
BitDescription(0o103, "CP[3]"),
BitDescription(0o104, "NA"),
BitDescription(0o105, "NA"),
BitDescription(0o106, "NA"),
BitDescription(0o107, "NA"),
BitDescription(0o110, "NA"),
BitDescription(0o111, "NA"),
BitDescription(0o112, "NA"),
BitDescription(0o113, "NA"),
BitDescription(0o114, "NA"),
BitDescription(0o115, "NA"),
BitDescription(0o116, "CPB"),
BitDescription(0o117, "CPD"),
BitDescription(0o120, "WRT[0]"),
BitDescription(0o121, "WRT[1]"),
BitDescription(0o122, "NA"),
BitDescription(0o123, "NA"),
BitDescription(0o124, "NA"),
BitDescription(0o125, "NA"),
BitDescription(0o126, "NA"),
BitDescription(0o127, "NA"),
BitDescription(0o130, "NA"),
BitDescription(0o131, "NA"),
BitDescription(0o132, "NA"),
BitDescription(0o133, "NA"),
BitDescription(0o134, "NA"),
BitDescription(0o135, "WRTC"),
BitDescription(0o136, "WRTB"),
BitDescription(0o137, "WRTD"),
BitDescription(0o140, "EBTR[0]"),
BitDescription(0o141, "EBTR[1]"),
BitDescription(0o142, "NA"),
BitDescription(0o143, "NA"),
BitDescription(0o144, "NA"),
BitDescription(0o145, "NA"),
BitDescription(0o146, "NA"),
BitDescription(0o147, "NA"),
BitDescription(0o150, "NA"),
BitDescription(0o151, "NA"),
BitDescription(0o152, "NA"),
BitDescription(0o153, "NA"),
BitDescription(0o154, "NA"),
BitDescription(0o155, "NA"),
BitDescription(0o156, "EBTRB"),
BitDescription(0o157, "NA"),
)
ChipDescription(
Chip_PIC18F2320dip28,
bitfile="microchip01dip28",
chipID="PIC18F2320dip28",
runtimeID=(0xDE07, 0x01),
chipVendors="Microchip",
description="PIC18F2320",
packages=(("DIP18", ""),),
fuseDesc=fuseDesc,
maintainer="Pavel Stemberk <[email protected]>",
)
| gpl-2.0 | -1,197,358,248,590,329,300 | 28.207865 | 79 | 0.69975 | false | 2.591725 | false | false | false |
metal-remco/afstuderen | blockly/build.py | 5 | 5154 | #!/usr/bin/python
# Compresses the core Blockly files into a single JavaScript file.
#
# Copyright 2012 Google Inc.
# http://blockly.googlecode.com/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script generates two files:
# demos/blockly_compressed.js
# demos/blockly_uncompressed.js
# The compressed file is a concatenation of all of Blockly's core files which
# have been run through Google's Closure Compiler. This is done using the
# online API (which takes a few seconds and requires an Internet connection).
# The uncompressed file is a script that loads in each of Blockly's core files
# one by one. This takes much longer for a browser to load, but may be useful
# when debugging code since line numbers are meaningful and variables haven't
# been renamed. The oncompressed file also allows for a faster developement
# cycle since there is no need to rebuild or recompile, just reload.
import httplib, json, urllib, sys
filenames = [
'blockly.js',
'block.js',
'block_svg.js',
'bubble.js',
'comment.js',
'connection.js',
'contextmenu.js',
'field.js',
'field_dropdown.js',
'field_checkbox.js',
'field_colour.js',
'field_image.js',
'field_label.js',
'field_textinput.js',
'field_variable.js',
'flyout.js',
'generator.js',
'inject.js',
'input.js',
'mutator.js',
'names.js',
'procedures.js',
'scrollbar.js',
'toolbox.js',
'tooltip.js',
'trashcan.js',
'utils.js',
'variables.js',
'warning.js',
'workspace.js',
'xml.js']
header = ('// Do not edit this file; automatically generated by build.py.\n'
'"use strict";')
def gen_uncompressed():
target_filename = 'demos/blockly_uncompressed.js'
inc = '''%s
document.write('<script type="text/javascript" src="../../../closure-library-read-only/closure/goog/base.js"></script>');
(function() {
var filenames = %s;
for (var x = 0; x < filenames.length; x++) {
document.write('<script type="text/javascript" src="../../core/' + filenames[x] + '"></script>');
}
})();
''' % (header, filenames)
f = open(target_filename, 'w')
f.write(inc)
f.close()
print 'SUCCESS: ' + target_filename
def gen_compressed():
target_filename = 'demos/blockly_compressed.js'
# Define the parameters for the POST request.
params = [
('compilation_level', 'SIMPLE_OPTIMIZATIONS'),
('use_closure_library', 'true'),
('output_format', 'json'),
('output_info', 'compiled_code'),
('output_info', 'warnings'),
('output_info', 'errors'),
('output_info', 'statistics'),
]
# Read in all the source files.
for filename in filenames:
f = open('core/' + filename)
params.append(('js_code', ''.join(f.readlines())))
f.close()
# Send the request to Google.
headers = { "Content-type": "application/x-www-form-urlencoded" }
conn = httplib.HTTPConnection('closure-compiler.appspot.com')
conn.request('POST', '/compile', urllib.urlencode(params), headers)
response = conn.getresponse()
json_str = response.read()
conn.close
# Parse the JSON response.
json_data = json.loads(json_str)
def file_lookup(name):
if not name.startswith('Input_'):
return '???'
n = int(name[6:])
return filenames[n]
if json_data.has_key('errors'):
errors = json_data['errors']
for error in errors:
print 'FATAL ERROR'
print error['error']
print '%s at line %d:' % (
file_lookup(error['file']), error['lineno'])
print error['line']
print (' ' * error['charno']) + '^'
else:
if json_data.has_key('warnings'):
warnings = json_data['warnings']
for warning in warnings:
print 'WARNING'
print warning['warning']
print '%s at line %d:' % (
file_lookup(warning['file']), warning['lineno'])
print warning['line']
print (' ' * warning['charno']) + '^'
print
code = header + '\n' + json_data['compiledCode']
stats = json_data['statistics']
original_b = stats['originalSize']
compressed_b = stats['compressedSize']
if original_b > 0 and compressed_b > 0:
f = open(target_filename, 'w')
f.write(code)
f.close()
original_kb = int(original_b / 1024 + 0.5)
compressed_kb = int(compressed_b / 1024 + 0.5)
ratio = int(float(compressed_b) / float(original_b) * 100 + 0.5)
print 'SUCCESS: ' + target_filename
print 'Size changed from %d KB to %d KB (%d%%).' % (
original_kb, compressed_kb, ratio)
else:
print 'UNKNOWN ERROR'
if __name__ == '__main__':
gen_uncompressed()
gen_compressed()
| apache-2.0 | -3,536,292,354,067,038,700 | 30.426829 | 121 | 0.635817 | false | 3.559392 | false | false | false |
hvasbath/beat | test/test_ffi_gfstacking.py | 1 | 7603 | from pyrocko import gf
from pyrocko import model, util
from pyrocko import orthodrome as otd
from pyrocko import moment_tensor as mt
from pyrocko import trace
from beat.sources import RectangularSource
from beat import ffi, models
import numpy as num
from beat import inputf, utility, heart, config
import os
km = 1000.
util.setup_logging('test_ffi_stacking', 'info')
# set random seed for reproducible station locations
num.random.seed(10)
nuc_dip = 5.
nuc_strike = 2.
time_shift = -10. # from previous inversion
# general
project_dir = '/home/vasyurhm/BEATS/LaquilaJointPonlyUPDATE_wide_kin3'
store_superdirs = ['/home/vasyurhm/GF/Laquila']
white_noise_perc_max = 0.025 # White noise to disturb the synthetic data, in percent to the maximum amplitude [Hallo et al. 2016 use 0.01]
problem = models.load_model(project_dir, mode='ffi', build=False)
event = problem.config.event
components = ['uparr'] #, 'uperp']
starttime_sampling = 0.5
arrival_taper = heart.ArrivalTaper(
a=-15.,
b=-10.,
c=50.,
d=55.)
sc = problem.composites['seismic']
fault = sc.load_fault_geometry()
# get number of patches in dip and strike direction
npdip, npstrike = fault.ordering.get_subfault_discretization(0)
# do fast sweeping to get rupture onset times for patches with respect to hypocenter
velocities = num.ones((npdip, npstrike)) * 3.5
nuc_dip_idx, nuc_strike_idx = fault.fault_locations2idxs(
0, nuc_dip, nuc_strike, backend='numpy')
starttimes = fault.get_subfault_starttimes(
0, velocities, nuc_dip_idx, nuc_strike_idx).ravel() + time_shift
print(starttimes)
# defining distributed slip values for slip parallel and perpendicular directions
uparr = num.ones((npdip, npstrike)) * 2.
#uparr[1:3, 3:7] = 1.5
uperp = num.zeros((npdip, npstrike))
#uperp[0,0] = 1.
#uperp[3,9] = 1.
uperp[1:3, 3:7] = 1.0
# define rupture durations on each patch
durations = num.ones((npdip, npstrike)) * 0.5
slips = {
components[0]: uparr.ravel(),
# components[1]: uperp.ravel(),
'durations': durations.ravel(),
'velocities': velocities.ravel()
}
print('fault parameters', slips)
# update patches with distributed slip and STF values
for comp in components:
patches = fault.get_subfault_patches(0, datatype='seismic', component=comp)
for patch, starttime, duration, slip in zip(
patches, starttimes, durations.ravel(), slips[comp]):
#stf = gf.HalfSinusoidSTF(anchor=-1., duration=float(duration))
patch.stf.duration = float(duration)
#stime = num.round(starttime / starttime_sampling) * starttime_sampling
patch.update(slip=float(slip), time=event.time + float(starttime))
# print(patch)
# synthetics generation
engine = gf.LocalEngine(store_superdirs=store_superdirs)
patchidx = fault.patchmap(
index=0, dipidx=nuc_dip_idx, strikeidx=nuc_strike_idx)
targets = sc.wavemaps[0].targets
filterer = sc.wavemaps[0].config.filterer
ntargets = len(targets)
gfs = ffi.load_gf_library(
directory=project_dir + '/ffi/linear_gfs/',
filename='seismic_uparr_any_P_0')
ats = gfs.reference_times - arrival_taper.b
traces, tmins = heart.seis_synthetics(
engine, patches, targets, arrival_times=ats,
wavename='any_P', arrival_taper=arrival_taper,
filterer=filterer, outmode='stacked_traces')
targetidxs = num.lib.index_tricks.s_[:]
if False:
# for station corrections maybe in the future?
station_corrections = num.zeros(len(traces))
starttimes = (num.tile(starttimes, ntargets) + num.repeat(
station_corrections, fault.npatches)).reshape(
ntargets, fault.npatches)
targetidxs = num.atleast_2d(num.arange(ntargets)).T
gfs.set_stack_mode('numpy')
synthetics_nn = gfs.stack_all(
targetidxs=targetidxs,
starttimes=starttimes,
durations=durations.ravel(),
slips=slips[components[0]],
interpolation='nearest_neighbor')
synthetics_ml = gfs.stack_all(
targetidxs=targetidxs,
starttimes=starttimes,
durations=durations.ravel(),
slips=slips[components[0]],
interpolation='multilinear')
gfs.init_optimization()
synthetics_nn_t = gfs.stack_all(
targetidxs=targetidxs,
starttimes=starttimes,
durations=durations.ravel(),
slips=slips[components[0]],
interpolation='nearest_neighbor').eval()
synthetics_ml_t = gfs.stack_all(
targetidxs=targetidxs,
starttimes=starttimes,
durations=durations.ravel(),
slips=slips[components[0]],
interpolation='multilinear').eval()
synth_traces_nn = []
for i, target in enumerate(targets):
tr = trace.Trace(
ydata=synthetics_nn[i, :],
tmin=gfs.reference_times[i],
deltat=gfs.deltat)
#print('trace tmin synthst', tr.tmin)
tr.set_codes(*target.codes)
tr.set_location('nn')
synth_traces_nn.append(tr)
synth_traces_ml = []
for i, target in enumerate(targets):
tr = trace.Trace(
ydata=synthetics_ml[i, :],
tmin=gfs.reference_times[i],
deltat=gfs.deltat)
#print 'trace tmin synthst', tr.tmin
tr.set_codes(*target.codes)
tr.set_location('ml')
synth_traces_ml.append(tr)
synth_traces_nn_t = []
for i, target in enumerate(targets):
tr = trace.Trace(
ydata=synthetics_nn_t[i, :],
tmin=gfs.reference_times[i],
deltat=gfs.deltat)
#print('trace tmin synthst', tr.tmin)
tr.set_codes(*target.codes)
tr.set_location('nn_t')
synth_traces_nn_t.append(tr)
synth_traces_ml_t = []
for i, target in enumerate(targets):
tr = trace.Trace(
ydata=synthetics_ml_t[i, :],
tmin=gfs.reference_times[i],
deltat=gfs.deltat)
#print 'trace tmin synthst', tr.tmin
tr.set_codes(*target.codes)
tr.set_location('ml_t')
synth_traces_ml_t.append(tr)
# display to check
trace.snuffle(
traces + synth_traces_nn + synth_traces_ml + synth_traces_nn_t + synth_traces_ml_t,
stations=sc.wavemaps[0].stations, events=[event])
traces1, tmins = heart.seis_synthetics(
engine, [patches[0]], targets, arrival_times=ats,
wavename='any_P', arrival_taper=arrival_taper,
filterer=filterer, outmode='stacked_traces')
gfs.set_stack_mode('numpy')
synth_traces_ml1 = []
for i in range(1):
synthetics_ml1 = gfs.stack_all(
targetidxs=targetidxs,
patchidxs=[i],
starttimes=starttimes[0],
durations=durations.ravel()[0],
slips=num.atleast_1d(slips[components[0]][0]),
interpolation='multilinear')
for i, target in enumerate(targets):
tr = trace.Trace(
ydata=synthetics_ml1[i, :],
tmin=gfs.reference_times[i],
deltat=gfs.deltat)
print('trace tmin synthst', tr.tmin)
#print(target.codes)
tr.set_codes(*target.codes)
tr.set_location('ml%i' % i)
synth_traces_ml1.append(tr)
trace.snuffle(
traces1 + synth_traces_ml1,
stations=sc.wavemaps[0].stations, events=[event])
# convert pyrocko traces to beat traces
beat_traces = []
for tr in traces:
#print tr
btrc = heart.SeismicDataset.from_pyrocko_trace(tr)
seis_err_std = num.abs(btrc.ydata).max() * white_noise_perc_max
noise = num.random.normal(0, seis_err_std, btrc.ydata.shape[0])
btrc.ydata += noise
btrc.set_location('0')
beat_traces.append(btrc)
# display to check noisy traces
#trace.snuffle(beat_traces, stations=stations, events=[event])
# save data to project folder
seismic_outpath = os.path.join(project_dir, 'seismic_data.pkl')
#util.ensuredir(project_dir)
#print 'saving synthetic data to: ', seismic_outpath
#utility.dump_objects(seismic_outpath, outlist=[stations, beat_traces])
| gpl-3.0 | 4,783,027,602,398,209,000 | 29.170635 | 140 | 0.685519 | false | 2.886484 | false | false | false |
arokem/nipype | nipype/interfaces/mne/base.py | 9 | 4469 | from nipype.interfaces.base import (traits, File, Directory, TraitedSpec,
OutputMultiPath)
import os.path as op
import glob
from nipype.interfaces.freesurfer.base import FSCommand, FSTraitedSpec
from nipype.utils.filemanip import list_to_filename
from nipype.external import six
import logging
logging.basicConfig()
iflogger = logging.getLogger('interface')
class WatershedBEMInputSpec(FSTraitedSpec):
subject_id = traits.Str(argstr='--subject %s', mandatory=True,
desc='Subject ID (must have a complete Freesurfer directory)')
subjects_dir = Directory(exists=True, mandatory=True, usedefault=True,
desc='Path to Freesurfer subjects directory')
volume = traits.Enum('T1', 'aparc+aseg', 'aseg', 'brain', 'orig', 'brainmask', 'ribbon',
argstr='--volume %s', usedefault=True,
desc='The volume from the "mri" directory to use (defaults to T1)')
overwrite = traits.Bool(True, usedefault=True, argstr='--overwrite',
desc='Overwrites the existing files')
atlas_mode = traits.Bool(argstr='--atlas',
desc='Use atlas mode for registration (default: no rigid alignment)')
class WatershedBEMOutputSpec(TraitedSpec):
mesh_files = OutputMultiPath(File(exists=True),
desc=('Paths to the output meshes (brain, inner '
'skull, outer skull, outer skin)'))
brain_surface = File(exists=True, loc='bem/watershed',
desc='Brain surface (in Freesurfer format)')
inner_skull_surface = File(exists=True, loc='bem/watershed',
desc='Inner skull surface (in Freesurfer format)')
outer_skull_surface = File(exists=True, loc='bem/watershed',
desc='Outer skull surface (in Freesurfer format)')
outer_skin_surface = File(exists=True, loc='bem/watershed',
desc='Outer skin surface (in Freesurfer format)')
fif_file = File(exists=True, loc='bem', altkey='fif',
desc='"fif" format file for EEG processing in MNE')
cor_files = OutputMultiPath(File(exists=True), loc='bem/watershed/ws',
altkey='COR', desc='"COR" format files')
class WatershedBEM(FSCommand):
"""Uses mne_watershed_bem to get information from dicom directories
Examples
--------
>>> from nipype.interfaces.mne import WatershedBEM
>>> bem = WatershedBEM()
>>> bem.inputs.subject_id = 'subj1'
>>> bem.inputs.subjects_dir = '.'
>>> bem.cmdline
'mne_watershed_bem --overwrite --subject subj1 --volume T1'
>>> bem.run() # doctest: +SKIP
"""
_cmd = 'mne_watershed_bem'
input_spec = WatershedBEMInputSpec
output_spec = WatershedBEMOutputSpec
_additional_metadata = ['loc', 'altkey']
def _get_files(self, path, key, dirval, altkey=None):
globsuffix = '*'
globprefix = '*'
keydir = op.join(path, dirval)
if altkey:
key = altkey
globpattern = op.join(keydir, ''.join((globprefix, key, globsuffix)))
return glob.glob(globpattern)
def _list_outputs(self):
outputs = self.output_spec().get()
subjects_dir = self.inputs.subjects_dir
subject_path = op.join(subjects_dir, self.inputs.subject_id)
output_traits = self._outputs()
mesh_paths = []
for k in outputs.keys():
if k != 'mesh_files':
val = self._get_files(subject_path, k,
output_traits.traits()[k].loc,
output_traits.traits()[k].altkey)
if val:
value_list = list_to_filename(val)
if isinstance(value_list, list):
out_files = []
for value in value_list:
out_files.append(op.abspath(value))
elif isinstance(value_list, six.string_types):
out_files = op.abspath(value_list)
else:
raise TypeError
outputs[k] = out_files
if not k.rfind('surface') == -1:
mesh_paths.append(out_files)
outputs['mesh_files'] = mesh_paths
return outputs
| bsd-3-clause | -5,784,901,698,516,482,000 | 43.247525 | 97 | 0.564556 | false | 3.95137 | false | false | false |
eeg/PieTree | src/PieClasses.py | 1 | 14053 | #! /usr/bin/env python
#--------------------------------------------------
# Copyright 2008 Emma Goldberg
#
# This file is part of PieTree.
#
# PieTree is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PieTree is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PieTree. If not, see <http://www.gnu.org/licenses/>.
#--------------------------------------------------
######################################################
# Module: PieClasses.py
# Author: Emma Goldberg
# Date: Nov, 2011 (orig Apr 2008)
######################################################
from math import cos, sin, pi
import cairo
import TreeStruct
#--------------------------------------------------
# For drawing a tree of any shape
#--------------------------------------------------
class PieTree():
'''
This class contains:
* tree root
* tree attributes: ntips, nstates
* node/tip drawing functions
further fleshed out in the rectangular and radial subclasses
* cairo surface to be drawn to
* plotting variables
'''
def __init__(self, root, ntips, nstates, surface, plot_values):
self.root = root
self.ntips = ntips
self.nstates = nstates
self.surface = surface
self.plot_vars = plot_values
def MaxTipNameSize(self):
'''Find the longest (widest) tip name in this tree.'''
# todo: extract answer by using return rather than list
def MTNS(node, cr, tipsize):
if node.daughters == None:
thistipsize = cr.text_extents(node.label)[2]
if thistipsize > tipsize[0]:
tipsize[0] = thistipsize
else:
for d in node.daughters:
MTNS(d, cr, tipsize)
tipsize = [-1]
MTNS(self.root, self.surface, tipsize)
return tipsize[0]
def PlotTree(self):
'''Calls the drawing functions for the whole tree.'''
def PT(tree, node):
if node.daughters == None:
tree.DrawTip(node)
else:
tree.DrawFork(node)
if tree.plot_vars.pieradius > 0:
if node.state != None:
tree.DrawPie(node)
else:
print "NOTE: state not specified for %s" \
% (node.label)
if tree.plot_vars.nodenamesize > 0:
tree.DrawNodeLabel(node)
for d in node.daughters:
PT(tree, d)
self.DrawRoot()
PT(self, self.root)
def DrawTipMore(self, node, (x,y), delta):
'''Finish the work of DrawTip.'''
c = self.plot_vars
cr = self.surface
# box border
if c.rimthick > 0 and c.boxsize > 0:
cr.set_line_width(c.rimthick)
cr.set_source_rgb(c.linecolor[0], c.linecolor[1], \
c.linecolor[2])
cr.stroke_preserve()
# tip color
if node.state in range(self.nstates):
i = node.state
cr.set_source_rgb(c.color[i][0], c.color[i][1], c.color[i][2])
else:
cr.set_source_rgb(0.5, 0.5, 0.5)
print "WARNING: check the state of %s" % node.label
cr.fill()
# tip label
if c.tipnamesize > 0:
if c.tipnamestatecolor != "yes":
cr.set_source_rgb(c.textcolor[0], c.textcolor[1], \
c.textcolor[2])
cr.set_font_size(c.tipnamesize)
textheight = cr.text_extents(node.label)[3]
cr.move_to(x + delta/2. + c.tipspacing/4., y + textheight/3.)
if c.underscorespace == "yes":
cr.show_text((node.label).replace("_", " "))
else:
cr.show_text(node.label)
def DrawPieMore(self, node, (x,y)):
'''Finish the work of DrawPie.'''
c = self.plot_vars
cr = self.surface
R = c.pieradius
# the outer circle of the pie
if c.rimthick > 0:
cr.set_line_width(c.rimthick)
cr.set_source_rgb(c.linecolor[0], c.linecolor[1], \
c.linecolor[2])
cr.move_to(x, y)
cr.arc(x, y, R, 0, 2*pi)
cr.stroke()
# the pie pieces
angle_start = -pi/2
for i in range(self.nstates):
angle_stop = node.state[i] * 2 * pi + angle_start
cr.set_source_rgb(c.color[i][0], c.color[i][1], c.color[i][2])
cr.move_to(x, y)
cr.arc(x, y, R, angle_start, angle_stop)
cr.fill()
angle_start = angle_stop
def DrawNodeLabelMore(self, node, (x,y)):
'''Finish the work of DrawNodeLabel.'''
c = self.plot_vars
cr = self.surface
cr.set_source_rgb(c.textcolor[0], c.textcolor[1], c.textcolor[2])
cr.set_font_size(c.nodenamesize)
if node.label != None:
textheight = cr.text_extents(node.label)[3]
cr.move_to(x + c.pieradius + c.tipspacing/5., y + textheight/2.)
if c.underscorespace == "yes":
cr.show_text((node.label).replace("_", " "))
else:
cr.show_text(node.label)
def DrawScalebar(self):
'''Display the time scale.'''
c = self.plot_vars
cr = self.surface
cr.set_line_width(c.linethick)
cr.set_source_rgb(c.linecolor[0], c.linecolor[1], c.linecolor[2])
# size of the label
showme = str(c.scalebar["length"])
tw = (cr.text_extents(showme)[2], cr.text_extents(showme)[3])
# note: "%.*e" % (n-1, x) rounds to n digits
x0 = self.Xform( (self.root.x, 0) )[0]
x1 = self.Xform( (self.root.x + c.scalebar["length"], 0) )[0]
y = c.height - c.ymargin/2
y0 = y - tw[1]
y1 = y + tw[1]
# actual scalebar
cr.move_to(x0, y)
cr.line_to(x1, y)
cr.stroke()
# whiskers
cr.move_to(x0, y0)
cr.line_to(x0, y1)
cr.stroke()
cr.move_to(x1, y0)
cr.line_to(x1, y1)
cr.stroke()
# label
cr.move_to((x0 + x1) / 2. - tw[0], y0)
cr.set_font_size(c.scalebar["textsize"])
cr.show_text(showme)
#--------------------------------------------------
# For drawing a rectangular tree
#--------------------------------------------------
class PieTreeRect(PieTree):
'''For plotting a rectangularly-oriented tree.'''
def CalcXY(self, tipsize):
'''Compute the (x, y) coordinate for each tip and node.
These are stored as .x and .y node attributes.
Also store horizontal scaling info as .xmax and .xscale.'''
# todo: extract answer by using return rather than list
def CXY(node, x, i, xmax):
if node.length != None:
x += node.length
node.x = x
if x > xmax[0]:
xmax[0] = x
if node.daughters != None:
for d in node.daughters:
i = CXY(d, x, i, xmax)
if node.daughters == None:
node.y = i
i += 1
else:
sum_y = 0.0
for d in node.daughters:
sum_y += d.y
node.y = sum_y / len(node.daughters)
return i
c = self.plot_vars
xmax = [-1]
CXY(self.root, 0, 0.5, xmax)
c.xmax = xmax[0]
c.xscale = (c.width - 2*c.xmargin - c.tipspacing - tipsize - \
c.pieradius) / c.xmax
def Xform(self, (x,y)):
'''Transform (x, y) coordinates from tree to canvas.'''
c = self.plot_vars
return(c.xmargin + c.pieradius + c.linethick + x * c.xscale, \
c.ymargin + y * c.tipspacing)
def DrawTip(self, node):
'''Draw the tip box, border, and label.'''
c = self.plot_vars
cr = self.surface
# the tip box
(x, y) = self.Xform( (node.x, node.y) )
delta = c.boxsize
cr.rectangle(x - delta/2., y-delta/2., delta, delta)
# everything else
self.DrawTipMore(node, (x,y), delta)
def DrawPie(self, node):
'''Draw the pie chart at this node.'''
xy = self.Xform( (node.x, node.y) )
self.DrawPieMore(node, xy)
def DrawFork(self, node):
'''Draw the fork to this node's daughters.'''
c = self.plot_vars
cr = self.surface
cr.set_line_width(c.linethick)
cr.set_source_rgb(c.linecolor[0], c.linecolor[1], c.linecolor[2])
(x0, y0) = self.Xform( (node.x, node.y) )
for d in node.daughters:
(x, y) = self.Xform( (d.x, d.y) )
cr.move_to(x0, y0)
cr.line_to(x0, y)
cr.line_to(x, y)
cr.stroke()
def DrawNodeLabel(self, node):
'''Put the text label by this node.'''
xy = self.Xform( (node.x, node.y) )
self.DrawNodeLabelMore(node, xy)
def DrawRoot(self):
'''Draw the branch leading to the root.'''
c = self.plot_vars
cr = self.surface
(x0, y) = self.Xform( (0, self.root.y) )
(x, y) = self.Xform( (self.root.x, self.root.y) )
cr.set_line_width(c.linethick)
cr.set_source_rgb(c.linecolor[0], c.linecolor[1], c.linecolor[2])
cr.move_to(x, y)
cr.line_to(x0, y)
cr.stroke()
#--------------------------------------------------
# For drawing a circular tree
#--------------------------------------------------
class PieTreeRadial(PieTree):
'''For plotting a radially-oriented tree.'''
def CalcXY(self, tipsize):
'''Compute the (x, y) and (r, theta) coordinate for each tip
and node. These are stored as node attributes .x .y .r .t.
Also store horizontal scaling info as .xmax and .xscale.'''
def CalcRT(node, r, i, rmax, ntips):
'''Compute the (r, theta) coordinates for each tip and node.
These are stored as .r and .t attributes.'''
if node.length != None:
r += node.length
node.r = r
if r > rmax[0]:
rmax[0] = r
if node.daughters != None:
for d in node.daughters:
i = CalcRT(d, r, i, rmax, ntips)
if node.daughters == None:
node.t = 2 * pi * i / ntips
i += 1
else:
sum_t = 0.0
for d in node.daughters:
sum_t += d.t
node.t = sum_t / len(node.daughters)
return i
def RTtoXY(node):
'''Convert polar to Cartesian coordinates.'''
if node.daughters != None:
for d in node.daughters:
RTtoXY(d)
node.x = node.r * cos(node.t)
node.y = node.r * sin(node.t)
c = self.plot_vars
rmax = [-1]
CalcRT(self.root, 0, 0, rmax, self.ntips)
RTtoXY(self.root)
c.xmax = rmax[0] * 2
c.xscale = (c.width - 2*c.xmargin - 2*c.tipspacing - 2*tipsize - \
2*c.pieradius) / c.xmax
def Xform(self, (x,y)):
'''transform (x, y) coordinates from tree to canvas'''
c = self.plot_vars
return (x * c.xscale + c.width/2., y * c.xscale + c.height/2.)
def DrawTip(self, node):
'''Draw the tip box, border, and label.'''
c = self.plot_vars
cr = self.surface
# the tip box
(x, y) = self.Xform( (node.x, node.y) )
delta = c.boxsize
m = cr.get_matrix() # for rotation, with set_matrix below
cr.translate(x, y)
cr.rotate(node.t)
cr.rectangle(0, -delta/2., delta, delta)
# everything else
self.DrawTipMore(node, (0,0), delta*2)
cr.set_matrix(m)
def DrawPie(self, node):
'''Draw the pie chart at this node.'''
cr = self.surface
(x, y) = self.Xform( (node.x, node.y) )
m = cr.get_matrix() # for rotation, with set_matrix below
cr.translate(x, y)
cr.rotate(node.t)
self.DrawPieMore(node, (0,0))
cr.set_matrix(m)
def DrawFork(self, node):
'''Draw the fork to this node's daughters.'''
c = self.plot_vars
cr = self.surface
cr.set_line_width(c.linethick)
cr.set_source_rgb(c.linecolor[0], c.linecolor[1], c.linecolor[2])
cr.set_line_cap(cairo.LINE_CAP_ROUND)
(x0, y0) = self.Xform( (node.x, node.y) )
(mint, maxt) = (2*pi, 0)
for d in node.daughters:
if d.t < mint:
mint = d.t
if d.t > maxt:
maxt = d.t
(xd, yd) = self.Xform( (d.x, d.y) )
xa = node.r * cos(d.t)
ya = node.r * sin(d.t)
(xb, yb) = self.Xform( (xa, ya) )
cr.move_to(xd, yd)
cr.line_to(xb, yb)
cr.stroke()
cr.arc(c.width/2., c.height/2., node.r*c.xscale, mint, maxt)
cr.stroke()
def DrawNodeLabel(self, node):
'''Put the text label by this node.'''
cr = self.surface
(x, y) = self.Xform( (node.x, node.y) )
m = cr.get_matrix() # for rotation, with set_matrix below
cr.translate(x, y)
cr.rotate(node.t)
self.DrawNodeLabelMore(node, (0, 0) )
cr.set_matrix(m)
def DrawRoot(self):
'''Draw the branch leading to the root.'''
pass
| gpl-3.0 | -7,510,339,363,670,532,000 | 28.647679 | 76 | 0.493418 | false | 3.405137 | false | false | false |
svleeuwen/dont-be-late-appengine | src/generic/handlers/admin/admin.py | 1 | 6689 | from google.appengine.api import urlfetch
from google.appengine.ext import ndb
from base import handlers
from base.handlers import AdminAjaxHandler
from generic import utils
from generic.utils import valid_slug
from dontbelate import settings
class AdminBaseDetailHandler(handlers.AdminHandler):
"""
Base object detail handler.
Usage:
Add the following class attrs to your handler.
model: model class
id_url_kwarg: url kwarg used in route
template_name: template name
"""
slug_fields = [('slug_en', 'Slug EN'), ('slug_pt', 'Slug PT')]
template_name = None
id_url_kwarg = 'obj_id'
def get_object(self, *args, **kwargs):
object_id = kwargs.get(self.id_url_kwarg)
try:
object_id = int(object_id)
except ValueError:
return self.abort(404)
obj = self.model.get_by_id(object_id)
if not obj:
return self.abort(404)
return obj
def get(self, *args, **kwargs):
self.render(self.template_name,
self.get_context_data(object=self.get_object(*args, **kwargs)))
def get_context_data(self, **kwargs):
return kwargs
def render_with_errors(self, obj, errors):
self.render(self.template_name,
self.get_context_data(object=obj, errors=errors))
def save_slugs(self, obj, errors):
"""
Call this method when saving form data
When calling this, it assumes the properties in self.slug_fields
are available on self.model
"""
for slug_name, label in self.slug_fields:
slug_value = self.request.get(slug_name)
slug_value = slug_value.strip()
setattr(obj, slug_name, slug_value)
if not slug_value:
errors.append('{} is required'.format(label))
return
if len(slug_value) < settings.MIN_SLUG_LENGTH:
errors.append('{} needs to be at least {} characters long.'.format(label, settings.MIN_SLUG_LENGTH))
if not valid_slug(slug_value):
errors.append('Enter a valid {} consisting of letters, numbers, underscores or hyphens.'.format(label))
else:
# check if obj with provided slug already exists
query = self.model.query(getattr(self.model, slug_name) == slug_value)
query = [item for item in query if not item == obj]
if query:
errors.append('{} with {} \'{}\' already exists'.format(self.model.__name__, label, slug_value))
class AdminImageUploadHandler(AdminAjaxHandler):
"""
Handles image upload from Croppic
"""
def post(self):
image_file = self.request.get('img')
thumbnail = utils.handle_image_upload(image_file)
self.render_json({
'status': 'success',
'url': thumbnail.url,
'width': thumbnail.width,
'height': thumbnail.height,
})
def DenyAccess(self):
self.render_json({'status': 'error', 'message': 'No access granted'})
def XsrfFail(self):
self.render_json({'status': 'error', 'message': 'XSRF token error'})
class AdminImageCropHandler(AdminAjaxHandler):
"""
Handles image crop from Croppic
"""
def post(self):
# handle image upload here
image_url = self.request.get('imgUrl')
image_w = int(float(self.request.get('imgW')))
image_h = int(float(self.request.get('imgH')))
image_x1 = float(self.request.get('imgX1'))
image_y1 = float(self.request.get('imgY1'))
crop_width = float(self.request.get('cropW'))
crop_height = float(self.request.get('cropH'))
image_file = urlfetch.fetch(image_url).content
thumbnail = utils.handle_image_crop(image_file, image_w, image_h, image_x1, image_y1, crop_width, crop_height)
self.render_json({
'status': 'success',
'url': thumbnail.url,
})
def DenyAccess(self):
self.render_json({'status': 'error', 'message': 'No access granted'})
def XsrfFail(self):
self.render_json({'status': 'error', 'message': 'XSRF token error'})
class AdminActionMixin(object):
"""
Adds action handling to admin change list handler.
Currently handles delete, publish and unpublish action.
Could hold more in the future.
Usage:
- Add a class attribute `model` to your handler
which should be set to the model class
- If `post` is implemented in the AdminHandler,
call `self.handle_action` in it. See implementation in `post` below.
- Make sure the change list html is wrapped in a <form>
"""
DELETE = 'delete'
PUBLISH = 'publish'
UNPUBLISH = 'unpublish'
actions = [
(DELETE, 'Delete selected items'),
]
def get_actions(self):
return self.render_to_string('admin/includes/actions.tpl', {
'actions': self.actions,
})
def handle_action(self, **kwargs):
action = self.request.get('_action')
if action not in [item[0] for item in self.actions]:
return
ids = self.request.get_all('_selected_action')
if not ids:
self.add_message('No items selected')
return
keys = [ndb.Key(urlsafe=_id) for _id in ids]
# update with extra keys
extra_keys = kwargs.get('extra_keys', [])
keys.extend(extra_keys)
if action in [self.PUBLISH, self.UNPUBLISH]:
objects = ndb.get_multi(keys)
for obj in objects:
obj.published = action == self.PUBLISH
ndb.put_multi(objects)
count = len(objects)
self.add_message('Successfully {}ed {} items'.format(action, count))
# after delete redirect to current path (prevents replaying the post)
return self.redirect(self.request.path)
# we're dealing with delete
# check if user confirmed, otherwise show confirmation page
if self.request.get('_confirm'):
# already confirmed, delete objects
deleted = ndb.delete_multi(keys)
self.add_message('Deleted {} items'.format(len(deleted)))
return self.redirect(self.request.path)
# show confirmation page
context = {
'object_list': ndb.get_multi(keys),
'cancel_url': self.request.path,
}
self.render('admin/confirm_delete.tpl', context)
return True
def post(self, **kwargs):
if not self.handle_action(**kwargs):
self.get(**kwargs) | mit | 8,832,483,235,327,074,000 | 33.484536 | 119 | 0.59411 | false | 4.015006 | false | false | false |
fivethreeo/django-netaxept | djnetaxept/models.py | 1 | 2600 | from django.db import models
from djnetaxept.managers import NetaxeptPaymentManager, NetaxeptTransactionManager
STATUS_CHOICES = (
('AUTHORIZED', 'AUTHORIZED'),
('SALE', 'SALE'),
('CAPTURE', 'CAPTURE'),
('CREDIT', 'CREDIT'),
('ANNUL', 'ANNUL')
)
class NetaxeptPayment(models.Model):
transaction_id = models.CharField(max_length=32)
amount = models.IntegerField(null=True, blank=True)
currencycode = models.CharField(max_length=3)
description = models.CharField(max_length=255)
ordernumber = models.CharField(max_length=32)
flagged = models.BooleanField(default=False)
responsecode = models.CharField(max_length=3, null=True, blank=True)
responsesource = models.CharField(max_length=20, null=True, blank=True)
responsetext = models.CharField(max_length=255, null=True, blank=True)
objects = NetaxeptPaymentManager()
def auth(self):
return NetaxeptTransaction.objects.auth_payment(self)
def sale(self):
return NetaxeptTransaction.objects.sale_payment(self)
def completed(self):
return not self.flagged
"""
RECURRING_CHOICES = (
('S', 'S'),
('R', 'R')
)
class NetaxeptRecurringPayment(NetaxeptPayment):
recurring_type = models.CharField(max_length=1, choices=RECURRING_CHOICES)
minimum_frequency = models.PositiveIntegerField(null=True, blank=True)
expiry_date = models.DateField(null=True, blank=True)
"""
OPERATION_CHOICES = (
('AUTH', 'AUTH'),
('SALE', 'SALE'),
('CAPTURE', 'CAPTURE'),
('CREDIT', 'CREDIT'),
('ANNUL', 'ANNUL')
)
class NetaxeptTransaction(models.Model):
payment = models.ForeignKey(NetaxeptPayment)
transaction_id = models.CharField(max_length=32)
operation = models.CharField(max_length=7, choices=OPERATION_CHOICES)
amount = models.PositiveIntegerField(null=True, blank=True)
flagged = models.BooleanField(default=False)
responsecode = models.CharField(max_length=3, null=True, blank=True)
responsesource = models.CharField(max_length=20, null=True, blank=True)
responsetext = models.CharField(max_length=255, null=True, blank=True)
objects = NetaxeptTransactionManager()
def capture(self, amount):
return NetaxeptTransaction.objects.capture_payment(self.payment, amount)
def credit(self, amount):
return NetaxeptTransaction.objects.credit_payment(self.payment, amount)
def annul(self):
return NetaxeptTransaction.objects.annul_payment(self.payment)
def completed(self):
return not self.flagged
| bsd-3-clause | -3,634,597,276,225,729,500 | 33.210526 | 82 | 0.692692 | false | 3.551913 | false | false | false |
startling/cytoplasm | cytoplasm/server.py | 1 | 3054 | # -*- coding: utf-8 -*-
''' These are the things that are used when you `cytoplasm serve`.
'''
import os
import sys
import cytoplasm
# make this work in either Python 2.x or 3.x
if sys.version_info.major >= 3:
from http.server import SimpleHTTPRequestHandler, HTTPServer
else:
from SimpleHTTPServer import SimpleHTTPRequestHandler
from BaseHTTPServer import HTTPServer
# keep track of when things were last built in this global variable
most_recent_time = 0
# keep track of the site in this global variable
site = None
def serve(port, rebuild, event=None):
"Serve the Cytoplasm site."
# keep track of the most recently modified time in global variable
# most_recent_time
global most_recent_time
global site
# create a site and rebuild it first.
site = cytoplasm.Site(".")
site.build()
# set the most recent time.
most_recent_time = most_recent()
# change to the build directory, where things are to be served from.
os.chdir(site.config.build_dir)
# use either SimpleHTTPRequestHandler or RebuildHandler, depending on
# whether rebuild is True.
if rebuild:
handler = RebuildHandler
else:
handler = SimpleHTTPRequestHandler
# make a server with the handler and the port
httpd = HTTPServer(('', port), handler)
# serve!
httpd.serve_forever()
def most_recent():
"""Determine the most recent modified time in the source directory,
ignoring dotfiles and _build.
"""
directory = site.source
build_dir = site.config.build_dir
# get the candidate files:
files = [f for f in os.listdir(directory) if f != build_dir and not
f.startswith(".")]
# append files in additional watch directories
for dir in site.config.watch_dirs:
files += [os.path.join(dir, f) for f in
os.listdir(os.path.join(directory, dir)) if not f.startswith(".")]
# get each of their times
times = [os.stat(os.path.join(directory, f)).st_mtime for f in files]
# the highest time here is the most recent; return that.
return max(times)
class RebuildHandler(SimpleHTTPRequestHandler):
def handle(self):
"Handle a request and, if anything has changed, rebuild the site."
# overwrite the handle method in SimpleHTTPRequestHandler with this.
# declare most_recent_time global; we'll be changing it later.
global most_recent_time
# figure out the most recent time edited in the source directory
new_recent = most_recent()
# only build the site if the new most recent is more recent than the
# old one, i.e. if one or more of the files has been edited.
if new_recent > most_recent_time:
# update most_recent_time
most_recent_time = new_recent
# Build the site from the source directory
print("Rebuilding your Cytoplasm site...")
site.build()
# Call SimpleHTTPRequestHandler.handle(), so it can do stuff there too.
SimpleHTTPRequestHandler.handle(self)
| mit | -7,692,092,576,646,564,000 | 34.929412 | 79 | 0.67518 | false | 4.155102 | false | false | false |
saurabh6790/omnit-lib | webnotes/modules/__init__.py | 32 | 1699 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
"""
Utilities for using modules
"""
import webnotes, os
from webnotes import conf
import webnotes.utils
lower_case_files_for = ['DocType', 'Page', 'Report',
"Workflow", 'Module Def', 'Desktop Item', 'Workflow State', 'Workflow Action']
def scrub(txt):
return txt.replace(' ','_').replace('-', '_').replace('/', '_').lower()
def scrub_dt_dn(dt, dn):
"""Returns in lowercase and code friendly names of doctype and name for certain types"""
ndt, ndn = dt, dn
if dt in lower_case_files_for:
ndt, ndn = scrub(dt), scrub(dn)
return ndt, ndn
def get_module_path(module):
"""Returns path of the given module"""
m = scrub(module)
app_path = webnotes.utils.get_base_path()
if m in ('core', 'website'):
return os.path.join(app_path, 'lib', m)
else:
return os.path.join(app_path, 'app', m)
def get_doc_path(module, doctype, name):
dt, dn = scrub_dt_dn(doctype, name)
return os.path.join(get_module_path(module), dt, dn)
def reload_doc(module, dt=None, dn=None, plugin=None, force=True):
from webnotes.modules.import_file import import_files
return import_files(module, dt, dn, plugin=plugin, force=force)
def export_doc(doctype, name, module=None, plugin=None):
"""write out a doc"""
from webnotes.modules.export_file import write_document_file
import webnotes.model.doc
if not module: module = webnotes.conn.get_value(doctype, name, 'module')
write_document_file(webnotes.model.doc.get(doctype, name), module, plugin=plugin)
def get_doctype_module(doctype):
return webnotes.conn.get_value('DocType', doctype, 'module')
| mit | 7,569,147,823,721,121,000 | 30.462963 | 89 | 0.705121 | false | 3.077899 | false | false | false |
Yobretaw/Qrawler | src/scheduler.py | 1 | 1641 | from spider import Spider
import time
import sqlite3
class Scheduler:
# userList acts as a seed to spider
userList = ()
# processedDict contains all users that
# have been processed
userDict = {}
#tempList is the list that contain most recent names that
# are crawled by spider
tempList = []
# tempListCount represent the number of users
# that have been crawled
tempListCount = 0
def __init__(self, inputfile):
# open given file and read from it
self.userList = [line.strip() for line in open(inputfile)]
self.preTime = time.time()
self.storeUnit = 10000
# return true if given username has been processed, otherwise
# add it to the userDict and return false
def hasProcessed(self, username):
if username in self.userDict:
return True
self.userDict[username] = '1'
self.tempList.append(username)
self.tempListCount += 1
if self.tempListCount > self.storeUnit:
self.storeData()
return False
def startCrawl(self):
spider = Spider(self.userList)
spider.crawl(self.hasProcessed)
def storeData(self):
#timeDiff is time(measured in minutes) that used to crawl 10000
timeDiff = (time.time() - self.preTime) / 60
self.preTime = time.time()
# filename will be in format like "Thu,28,2013-06:50:07=2.56"
# where 2.56 is the first 4 digits of timeDiff
filename = time.strftime("%a, %d, %b, %Y-%H:%M:%S", time.gtime(), + "=" + str(timeDiff)[:4])
# write data into test file, one username per line
f = open(filename + '.txt' + 'w')
f.write('\n'.join(self.tempList))
f.close()
# reset tempList to empty and set count to 0
self.tempList = []
self.tempListCount = 0
| apache-2.0 | 6,562,375,032,907,024,000 | 24.246154 | 94 | 0.697745 | false | 3.090395 | false | false | false |
Hillshum/gPodder-tagging | src/gpodder/gtkui/draw.py | 1 | 10580 | # -*- coding: utf-8 -*-
#
# gPodder - A media aggregator and podcast client
# Copyright (c) 2005-2010 Thomas Perl and the gPodder Team
#
# gPodder is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# gPodder is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#
# draw.py -- Draw routines for gPodder-specific graphics
# Thomas Perl <[email protected]>, 2007-11-25
#
import gpodder
import gtk
import pango
import pangocairo
import cairo
import StringIO
import math
class TextExtents(object):
def __init__(self, ctx, text):
tuple = ctx.text_extents(text)
(self.x_bearing, self.y_bearing, self.width, self.height, self.x_advance, self.y_advance) = tuple
RRECT_LEFT_SIDE = 1
RRECT_RIGHT_SIDE = 2
def draw_rounded_rectangle(ctx, x, y, w, h, r=10, left_side_width = None, sides_to_draw=0, close=False):
if left_side_width is None:
left_side_width = flw/2
x = int(x)
offset = 0
if close: offset = 0.5
if sides_to_draw & RRECT_LEFT_SIDE:
ctx.move_to(x+int(left_side_width)-offset, y+h)
ctx.line_to(x+r, y+h)
ctx.curve_to(x, y+h, x, y+h, x, y+h-r)
ctx.line_to(x, y+r)
ctx.curve_to(x, y, x, y, x+r, y)
ctx.line_to(x+int(left_side_width)-offset, y)
if close:
ctx.line_to(x+int(left_side_width)-offset, y+h)
if sides_to_draw & RRECT_RIGHT_SIDE:
ctx.move_to(x+int(left_side_width)+offset, y)
ctx.line_to(x+w-r, y)
ctx.curve_to(x+w, y, x+w, y, x+w, y+r)
ctx.line_to(x+w, y+h-r)
ctx.curve_to(x+w, y+h, x+w, y+h, x+w-r, y+h)
ctx.line_to(x+int(left_side_width)+offset, y+h)
if close:
ctx.line_to(x+int(left_side_width)+offset, y)
def rounded_rectangle(ctx, x, y, width, height, radius=4.):
"""Simple rounded rectangle algorithmn
http://www.cairographics.org/samples/rounded_rectangle/
"""
degrees = math.pi / 180.
ctx.new_sub_path()
if width > radius:
ctx.arc(x + width - radius, y + radius, radius, -90. * degrees, 0)
ctx.arc(x + width - radius, y + height - radius, radius, 0, 90. * degrees)
ctx.arc(x + radius, y + height - radius, radius, 90. * degrees, 180. * degrees)
ctx.arc(x + radius, y + radius, radius, 180. * degrees, 270. * degrees)
ctx.close_path()
def draw_text_box_centered(ctx, widget, w_width, w_height, text, font_desc=None, add_progress=None):
style = widget.rc_get_style()
text_color = style.text[gtk.STATE_PRELIGHT]
red, green, blue = text_color.red, text_color.green, text_color.blue
text_color = [float(x)/65535. for x in (red, green, blue)]
text_color.append(.5)
if font_desc is None:
font_desc = style.font_desc
font_desc.set_size(14*pango.SCALE)
pango_context = widget.create_pango_context()
layout = pango.Layout(pango_context)
layout.set_font_description(font_desc)
layout.set_text(text)
width, height = layout.get_pixel_size()
ctx.move_to(w_width/2-width/2, w_height/2-height/2)
ctx.set_source_rgba(*text_color)
ctx.show_layout(layout)
# Draw an optional progress bar below the text (same width)
if add_progress is not None:
bar_height = 10
ctx.set_source_rgba(*text_color)
ctx.set_line_width(1.)
rounded_rectangle(ctx, w_width/2-width/2-.5, w_height/2+height-.5, width+1, bar_height+1)
ctx.stroke()
rounded_rectangle(ctx, w_width/2-width/2, w_height/2+height, int(width*add_progress)+.5, bar_height)
ctx.fill()
def draw_text_pill(left_text, right_text, x=0, y=0, border=2, radius=14, font_desc=None):
if gpodder.ui.fremantle:
border += 3
# Create temporary context to calculate the text size
ctx = cairo.Context(cairo.ImageSurface(cairo.FORMAT_ARGB32, 1, 1))
# Use GTK+ style of a normal Button
widget = gtk.Label()
style = widget.rc_get_style()
x_border = border*2
if font_desc is None:
font_desc = style.font_desc
font_desc.set_weight(pango.WEIGHT_BOLD)
pango_context = widget.create_pango_context()
layout_left = pango.Layout(pango_context)
layout_left.set_font_description(font_desc)
layout_left.set_text(left_text)
layout_right = pango.Layout(pango_context)
layout_right.set_font_description(font_desc)
layout_right.set_text(right_text)
width_left, height_left = layout_left.get_pixel_size()
width_right, height_right = layout_right.get_pixel_size()
text_height = max(height_left, height_right)
image_height = int(y+text_height+border*2)
image_width = int(x+width_left+width_right+x_border*4)
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, image_width, image_height)
ctx = pangocairo.CairoContext(cairo.Context(surface))
if left_text == '0':
left_text = None
if right_text == '0':
right_text = None
left_side_width = width_left + x_border*2
right_side_width = width_right + x_border*2
rect_width = left_side_width + right_side_width
rect_height = text_height + border*2
if left_text is not None:
draw_rounded_rectangle(ctx,x,y,rect_width,rect_height,radius, left_side_width, RRECT_LEFT_SIDE, right_text is None)
linear = cairo.LinearGradient(x, y, x+left_side_width/2, y+rect_height/2)
linear.add_color_stop_rgba(0, .8, .8, .8, .5)
linear.add_color_stop_rgba(.4, .8, .8, .8, .7)
linear.add_color_stop_rgba(.6, .8, .8, .8, .6)
linear.add_color_stop_rgba(.9, .8, .8, .8, .8)
linear.add_color_stop_rgba(1, .8, .8, .8, .9)
ctx.set_source(linear)
ctx.fill()
xpos, ypos, width_left, height = x+1, y+1, left_side_width, rect_height-2
if right_text is None:
width_left -= 2
draw_rounded_rectangle(ctx, xpos, ypos, rect_width, height, radius, width_left, RRECT_LEFT_SIDE, right_text is None)
ctx.set_source_rgba(1., 1., 1., .3)
ctx.set_line_width(1)
ctx.stroke()
draw_rounded_rectangle(ctx,x,y,rect_width,rect_height,radius, left_side_width, RRECT_LEFT_SIDE, right_text is None)
ctx.set_source_rgba(.2, .2, .2, .6)
ctx.set_line_width(1)
ctx.stroke()
ctx.move_to(x+x_border, y+1+border)
ctx.set_source_rgba( 0, 0, 0, 1)
ctx.show_layout(layout_left)
ctx.move_to(x-1+x_border, y+border)
ctx.set_source_rgba( 1, 1, 1, 1)
ctx.show_layout(layout_left)
if right_text is not None:
draw_rounded_rectangle(ctx, x, y, rect_width, rect_height, radius, left_side_width, RRECT_RIGHT_SIDE, left_text is None)
linear = cairo.LinearGradient(x+left_side_width, y, x+left_side_width+right_side_width/2, y+rect_height)
linear.add_color_stop_rgba(0, .2, .2, .2, .9)
linear.add_color_stop_rgba(.4, .2, .2, .2, .8)
linear.add_color_stop_rgba(.6, .2, .2, .2, .6)
linear.add_color_stop_rgba(.9, .2, .2, .2, .7)
linear.add_color_stop_rgba(1, .2, .2, .2, .5)
ctx.set_source(linear)
ctx.fill()
xpos, ypos, width, height = x, y+1, rect_width-1, rect_height-2
if left_text is None:
xpos, width = x+1, rect_width-2
draw_rounded_rectangle(ctx, xpos, ypos, width, height, radius, left_side_width, RRECT_RIGHT_SIDE, left_text is None)
ctx.set_source_rgba(1., 1., 1., .3)
ctx.set_line_width(1)
ctx.stroke()
draw_rounded_rectangle(ctx, x, y, rect_width, rect_height, radius, left_side_width, RRECT_RIGHT_SIDE, left_text is None)
ctx.set_source_rgba(.1, .1, .1, .6)
ctx.set_line_width(1)
ctx.stroke()
ctx.move_to(x+left_side_width+x_border, y+1+border)
ctx.set_source_rgba( 0, 0, 0, 1)
ctx.show_layout(layout_right)
ctx.move_to(x-1+left_side_width+x_border, y+border)
ctx.set_source_rgba( 1, 1, 1, 1)
ctx.show_layout(layout_right)
return surface
def draw_pill_pixbuf(left_text, right_text):
return cairo_surface_to_pixbuf(draw_text_pill(left_text, right_text))
def cairo_surface_to_pixbuf(s):
"""
Converts a Cairo surface to a Gtk Pixbuf by
encoding it as PNG and using the PixbufLoader.
"""
sio = StringIO.StringIO()
try:
s.write_to_png(sio)
except:
# Write an empty PNG file to the StringIO, so
# in case of an error we have "something" to
# load. This happens in PyCairo < 1.1.6, see:
# http://webcvs.cairographics.org/pycairo/NEWS?view=markup
# Thanks to Chris Arnold for reporting this bug
sio.write('iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAAAXNSR0IArs4c6QAAAAZiS0dEAP8A\n/wD/oL2nkwAAAAlwSFlzAAALEwAACxMBAJqcGAAAAAd0SU1FB9cMEQkqIyxn3RkAAAAZdEVYdENv\nbW1lbnQAQ3JlYXRlZCB3aXRoIEdJTVBXgQ4XAAAADUlEQVQI12NgYGBgAAAABQABXvMqOgAAAABJ\nRU5ErkJggg==\n'.decode('base64'))
pbl = gtk.gdk.PixbufLoader()
pbl.write(sio.getvalue())
pbl.close()
pixbuf = pbl.get_pixbuf()
return pixbuf
def progressbar_pixbuf(width, height, percentage):
COLOR_BG = (.4, .4, .4, .4)
COLOR_FG = (.2, .9, .2, 1.)
COLOR_FG_HIGH = (1., 1., 1., .5)
COLOR_BORDER = (0., 0., 0., 1.)
surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
ctx = cairo.Context(surface)
padding = int(float(width)/8.0)
bar_width = 2*padding
bar_height = height - 2*padding
bar_height_fill = bar_height*percentage
# Background
ctx.rectangle(padding, padding, bar_width, bar_height)
ctx.set_source_rgba(*COLOR_BG)
ctx.fill()
# Foreground
ctx.rectangle(padding, padding+bar_height-bar_height_fill, bar_width, bar_height_fill)
ctx.set_source_rgba(*COLOR_FG)
ctx.fill()
ctx.rectangle(padding+bar_width/3, padding+bar_height-bar_height_fill, bar_width/4, bar_height_fill)
ctx.set_source_rgba(*COLOR_FG_HIGH)
ctx.fill()
# Border
ctx.rectangle(padding-.5, padding-.5, bar_width+1, bar_height+1)
ctx.set_source_rgba(*COLOR_BORDER)
ctx.set_line_width(1.)
ctx.stroke()
return cairo_surface_to_pixbuf(surface)
| gpl-3.0 | 5,748,209,790,360,395,000 | 35.736111 | 286 | 0.638091 | false | 2.861006 | false | false | false |
mrachinskiy/blender-addon-jewelcraft | op_prongs/prongs_mesh.py | 1 | 3120 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# JewelCraft jewelry design toolkit for Blender.
# Copyright (C) 2015-2019 Mikhail Rachinskiy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
# ##### END GPL LICENSE BLOCK #####
from math import pi, tau, sin, cos
import bmesh
from mathutils import Matrix
def create_prongs(self):
# Prong
# ---------------------------
prong_rad = self.diameter / 2
taper = self.taper + 1
if self.bump_scale:
curve_resolution = int(self.detalization / 4) + 1
angle = (pi / 2) / (curve_resolution - 1)
v_cos = []
v_co_app = v_cos.append
x = 0.0
for i in range(curve_resolution):
y = sin(i * angle) * prong_rad
z = cos(i * angle) * prong_rad * self.bump_scale + self.z_top
v_co_app((x, y, z))
v_co_app((x, prong_rad * taper, -self.z_btm))
else:
v_cos = (
(0.0, 0.0, self.z_top),
(0.0, prong_rad, self.z_top),
(0.0, prong_rad * taper, -self.z_btm),
)
bm = bmesh.new()
v_profile = [bm.verts.new(v) for v in v_cos]
for i in range(len(v_profile) - 1):
bm.edges.new((v_profile[i], v_profile[i + 1]))
bmesh.ops.spin(bm, geom=bm.edges, angle=tau, steps=self.detalization, axis=(0.0, 0.0, 1.0), cent=(0.0, 0.0, 0.0))
bmesh.ops.remove_doubles(bm, verts=bm.verts, dist=0.00001)
v_boundary = [x for x in bm.verts if x.is_boundary]
bm.faces.new(reversed(v_boundary))
# Transforms
# ---------------------------
pos_offset = (self.gem_l / 2 + prong_rad) - (self.diameter * (self.intersection / 100))
spin_steps = self.number - 1
if self.alignment:
bmesh.ops.rotate(bm, verts=bm.verts, cent=(0.0, 0.0, 0.0), matrix=Matrix.Rotation(-self.alignment, 4, "X"))
bmesh.ops.translate(bm, verts=bm.verts, vec=(0.0, pos_offset, 0.0))
if spin_steps:
spin_angle = tau - tau / self.number
bmesh.ops.spin(bm, geom=bm.faces, angle=spin_angle, steps=spin_steps, axis=(0.0, 0.0, 1.0), cent=(0.0, 0.0, 0.0), use_duplicate=True)
bmesh.ops.rotate(bm, verts=bm.verts, cent=(0.0, 0.0, 0.0), matrix=Matrix.Rotation(-self.position, 4, "Z"))
if self.symmetry:
bmesh.ops.mirror(bm, geom=bm.faces, merge_dist=0, axis="Y")
bmesh.ops.recalc_face_normals(bm, faces=bm.faces)
bmesh.ops.rotate(bm, verts=bm.verts, cent=(0.0, 0.0, 0.0), matrix=Matrix.Rotation(-self.symmetry_pivot, 4, "Z"))
return bm
| mit | -2,640,801,898,690,367,000 | 32.191489 | 141 | 0.6 | false | 2.965779 | false | false | false |
misgeatgit/atomspace | tests/cython/bindlink/test_bindlink.py | 1 | 7006 | from unittest import TestCase
import os
from opencog.atomspace import AtomSpace, TruthValue, Atom, types
from opencog.bindlink import stub_bindlink, bindlink, single_bindlink,\
first_n_bindlink, af_bindlink, \
satisfaction_link, satisfying_set, \
satisfying_element, first_n_satisfying_set, \
execute_atom, evaluate_atom
from opencog.type_constructors import *
from opencog.utilities import initialize_opencog, finalize_opencog
from test_functions import green_count, red_count
__author__ = 'Curtis Faith'
class BindlinkTest(TestCase):
bindlink_atom = None
getlink_atom = None
atomspace = AtomSpace()
starting_size = 0
def setUp(self):
print ("setUp - atomspace = ", self.atomspace)
# Clear atoms from previous test
self.atomspace.clear()
# Initialize Python
initialize_opencog(self.atomspace)
set_type_ctor_atomspace(self.atomspace)
# Define several animals and something of a different type as well
InheritanceLink( ConceptNode("Frog"), ConceptNode("animal"))
InheritanceLink( ConceptNode("Zebra"), ConceptNode("animal"))
InheritanceLink( ConceptNode("Deer"), ConceptNode("animal"))
InheritanceLink( ConceptNode("Spaceship"), ConceptNode("machine"))
# Define a graph search query
self.bindlink_atom = \
BindLink(
# The variable node to be grounded.
VariableNode("$var"),
# The pattern to be grounded.
InheritanceLink(
VariableNode("$var"),
ConceptNode("animal")
),
# The grounding to be returned.
VariableNode("$var")
# bindlink needs a handle
)
# Define a pattern to be grounded
self.getlink_atom = \
GetLink(
InheritanceLink(
VariableNode("$var"),
ConceptNode("animal")
)
)
# Remember the starting atomspace size.
self.starting_size = self.atomspace.size()
def tearDown(self):
print ("tearDown - atomspace = ", self.atomspace)
# Can't do this; finalize can be called only once, ever, and
# then never again. The second call will never follow through.
# Also, cannot create and delete atomspaces here; this will
# confuse the PythonEval singletonInstance.
# finalize_opencog()
# del self.atomspace
def test_stub_bindlink(self):
# Remember the starting atomspace size. This test should not
# change the atomspace.
starting_size = self.atomspace.size()
# Run bindlink.
atom = stub_bindlink(self.atomspace, self.bindlink_atom)
self.assertTrue(atom is not None)
# Check the ending atomspace size, it should be the same.
ending_size = self.atomspace.size()
self.assertEquals(ending_size, starting_size)
def _check_result_setlink(self, atom, expected_arity):
# Check if the atom is a SetLink
self.assertTrue(atom is not None)
self.assertEquals(atom.type, types.SetLink)
# Check the ending atomspace size, it should have added one SetLink.
ending_size = self.atomspace.size()
self.assertEquals(ending_size, self.starting_size + 1)
# The SetLink should have expected_arity items in it.
self.assertEquals(atom.arity, expected_arity)
def test_bindlink(self):
atom = bindlink(self.atomspace, self.bindlink_atom)
self._check_result_setlink(atom, 3)
def test_single_bindlink(self):
atom = single_bindlink(self.atomspace, self.bindlink_atom)
self._check_result_setlink(atom, 1)
def test_first_n_bindlink(self):
atom = first_n_bindlink(self.atomspace, self.bindlink_atom, 5)
self._check_result_setlink(atom, 3)
def test_af_bindlink(self):
atom = af_bindlink(self.atomspace, self.bindlink_atom)
# The SetLink is empty. ??? Should it be.
self._check_result_setlink(atom, 0)
def test_satisfying_set(self):
atom = satisfying_set(self.atomspace, self.getlink_atom)
self._check_result_setlink(atom, 3)
def test_satisfying_element(self):
atom = satisfying_element(self.atomspace, self.getlink_atom)
self._check_result_setlink(atom, 1)
def test_first_n_satisfying_set(self):
atom = first_n_satisfying_set(self.atomspace, self.getlink_atom, 5)
self._check_result_setlink(atom, 3)
def test_satisfy(self):
satisfaction_atom = SatisfactionLink(
VariableList(), # no variables
SequentialAndLink(
EvaluationLink(
GroundedPredicateNode("py: test_functions.stop_go"),
ListLink(
ConceptNode("green light")
)
),
EvaluationLink(
GroundedPredicateNode("py: test_functions.stop_go"),
ListLink(
ConceptNode("green light")
)
),
EvaluationLink(
GroundedPredicateNode("py: test_functions.stop_go"),
ListLink(
ConceptNode("red light")
)
),
EvaluationLink(
GroundedPredicateNode("py: test_functions.stop_go"),
ListLink(
ConceptNode("traffic ticket")
)
)
)
)
atom = satisfaction_link(self.atomspace, satisfaction_atom)
self.assertTrue(atom is not None and atom.mean <= 0.5)
self.assertEquals(green_count(), 2)
self.assertEquals(red_count(), 1)
def test_execute_atom(self):
result = execute_atom(self.atomspace,
ExecutionOutputLink(
GroundedSchemaNode("py: test_functions.add_link"),
ListLink(
ConceptNode("one"),
ConceptNode("two")
)
)
)
list_link = ListLink(
ConceptNode("one"),
ConceptNode("two")
)
self.assertEquals(result, list_link)
def test_evaluate_atom(self):
result = evaluate_atom(self.atomspace,
EvaluationLink(
GroundedPredicateNode("py: test_functions.bogus_tv"),
ListLink(
ConceptNode("one"),
ConceptNode("two")
)
)
)
self.assertEquals(result, TruthValue(0.6, 0.234))
| agpl-3.0 | -9,066,683,654,534,083,000 | 34.03 | 76 | 0.553954 | false | 4.327363 | true | false | false |
yzl0083/orange | Orange/testing/regression/tests_20/reference_transformvalue-domain.py | 6 | 1671 | # Description: Shows how to use value transformers
# Category: preprocessing
# Classes: TransformValue, Continuous2Discrete, Discrete2Continuous, MapIntValue
# Uses:
# Referenced:
import orange
print
def printExample(ex):
for val in ex:
print "%16s: %s" % (val.variable.name, val)
data = orange.ExampleTable("bridges")
for attr in data.domain:
if attr.varType == orange.VarTypes.Continuous:
print "%20s: continuous" % attr.name
else:
print "%20s: %s" % (attr.name, attr.values)
print
print "Original 15th example:"
printExample(data[15])
continuizer = orange.DomainContinuizer()
continuizer.multinomialTreatment = continuizer.LowestIsBase
domain0 = continuizer(data)
data0 = data.translate(domain0)
print
print "Lowest is base"
printExample(data0[15])
continuizer.multinomialTreatment = continuizer.FrequentIsBase
domain0 = continuizer(data)
data0 = data.translate(domain0)
print
print "Frequent is base"
printExample(data0[15])
continuizer.multinomialTreatment = continuizer.NValues
domain0 = continuizer(data)
data0 = data.translate(domain0)
print
print "NValues"
printExample(data0[15])
continuizer.multinomialTreatment = continuizer.Ignore
domain0 = continuizer(data)
data0 = data.translate(domain0)
print
print "Ignore"
printExample(data0[15])
continuizer.multinomialTreatment = continuizer.AsOrdinal
domain0 = continuizer(data)
data0 = data.translate(domain0)
print
print "As ordinal"
printExample(data0[15])
continuizer.multinomialTreatment = continuizer.AsNormalizedOrdinal
domain0 = continuizer(data)
data0 = data.translate(domain0)
print
print "As normalized ordinal"
printExample(data0[15])
| gpl-3.0 | -1,618,650,396,070,907,400 | 21.581081 | 84 | 0.767205 | false | 3.146893 | false | false | false |
wpjesus/codematch | ietf/iesg/urls.py | 1 | 3187 | # Copyright The IETF Trust 2007, All Rights Reserved
# Portion Copyright (C) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
# All rights reserved. Contact: Pasi Eronen <[email protected]>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Nokia Corporation and/or its
# subsidiary(-ies) nor the names of its contributors may be used
# to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from django.conf.urls import patterns
from django.views.generic import RedirectView
urlpatterns = patterns('',
(r'^telechat/.*$', RedirectView.as_view(url='https://www.ietf.org/iesg/minutes.html')),
(r'^ann/(?:ind|new|prev)/$', RedirectView.as_view(url="/iesg/decisions/", permanent=True )),
(r'^telechatdates/$', RedirectView.as_view(url='/admin/iesg/telechatdate/')),
(r'^decisions/(?:(?P<year>[0-9]{4})/)?$', "ietf.iesg.views.review_decisions"),
(r'^agenda/(?:(?P<date>\d{4}-\d{2}-\d{2})/)?$', "ietf.iesg.views.agenda"),
(r'^agenda/(?:(?P<date>\d{4}-\d{2}-\d{2})/)?agenda.txt$', "ietf.iesg.views.agenda_txt"),
(r'^agenda/(?:(?P<date>\d{4}-\d{2}-\d{2})/)?agenda.json$', "ietf.iesg.views.agenda_json"),
(r'^agenda/(?:(?P<date>\d{4}-\d{2}-\d{2})/)?scribe_template.html$', "ietf.iesg.views.agenda_scribe_template"),
(r'^agenda/(?:(?P<date>\d{4}-\d{2}-\d{2})/)?moderator_package.html$', "ietf.iesg.views.agenda_moderator_package"),
(r'^agenda/(?:(?P<date>\d{4}-\d{2}-\d{2})/)?agenda_package.txt$', "ietf.iesg.views.agenda_package"),
(r'^agenda/documents.txt$', "ietf.iesg.views.agenda_documents_txt"),
(r'^agenda/documents/$', "ietf.iesg.views.agenda_documents"),
(r'^agenda/telechat-(?:(?P<date>\d{4}-\d{2}-\d{2})-)?docs.tgz', "ietf.iesg.views.telechat_docs_tarfile"),
(r'^discusses/$', "ietf.iesg.views.discusses"),
(r'^milestones/$', "ietf.iesg.views.milestones_needing_review"),
)
| bsd-3-clause | -624,618,232,841,359,700 | 55.910714 | 118 | 0.701286 | false | 3.245418 | false | false | false |
rxcomm/nymserv | setup.py | 2 | 1272 | #!/usr/bin/python
#
# vim: tabstop=4 expandtab shiftwidth=4 noautoindent
#
# nymserv.py - A Basic Nymserver for delivering messages to a shared mailbox
# such as alt.anonymous.messages.
#
# Copyright (C) 2012 Steve Crook <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
from distutils.core import setup
setup(
name='nymserv',
author='Steve Crook',
author_email='[email protected]',
version='0.4',
packages=['nymserv', ],
scripts=['nymserv/nymserv', ],
license='GPLv3',
url='https://github.com/crooks/nymserv',
long_description=open('README').read(),
install_requires=['pyaxo>=0.4.1', ],
#data_files=[('man/man1', ['man/nymserv.1'])],
)
| gpl-3.0 | -8,858,795,147,376,805,000 | 33.378378 | 78 | 0.711478 | false | 3.437838 | false | false | false |
libvirt/libvirt-test-API | libvirttestapi/repos/migration/migrate_tls.py | 1 | 3454 | # Copyright (C) 2010-2012 Red Hat, Inc.
# This work is licensed under the GNU GPLv2 or later.
import libvirt
from libvirt import libvirtError
from libvirttestapi.repos.domain import domain_common
from libvirttestapi.utils import utils
required_params = ('transport',
'target_machine',
'username',
'password',
'guestname',
'poststate')
optional_params = {}
def get_state(state):
dom_state = ''
if state == libvirt.VIR_DOMAIN_NOSTATE:
dom_state = 'nostate'
elif state == libvirt.VIR_DOMAIN_RUNNING:
dom_state = 'running'
elif state == libvirt.VIR_DOMAIN_BLOCKED:
dom_state = 'blocked'
elif state == libvirt.VIR_DOMAIN_PAUSED:
dom_state = 'paused'
elif state == libvirt.VIR_DOMAIN_SHUTDOWN:
dom_state = 'shutdown'
elif state == libvirt.VIR_DOMAIN_SHUTOFF:
dom_state = 'shutoff'
elif state == libvirt.VIR_DOMAIN_CRASHED:
dom_state = 'crashed'
else:
dom_state = 'no sure'
return dom_state
def clean_guest(conn, guestname, logger):
running_guests = []
ids = conn.listDomainsID()
for id in ids:
obj = conn.lookupByID(id)
running_guests.append(obj.name())
if guestname in running_guests:
logger.info("Destroy guest: %s" % guestname)
domobj = conn.lookupByName(guestname)
domobj.destroy()
define_guests = conn.listDefinedDomains()
if guestname in define_guests:
logger.info("Undefine guest: %s" % guestname)
domobj = conn.lookupByName(guestname)
domobj.undefine()
def env_clean(srcconn, dstconn, guestname, logger):
logger.info("destroy and undefine %s on both side if it exsits", guestname)
clean_guest(srcconn, guestname, logger)
clean_guest(dstconn, guestname, logger)
def migrate_tls(params):
""" migrate a guest back and forth between two machines"""
logger = params['logger']
transport = params['transport']
target_machine = params['target_machine']
username = params['username']
password = params['password']
guestname = params['guestname']
poststate = params['poststate']
domain_common.config_ssh(target_machine, username, password, logger)
target_hostname = utils.get_target_hostname(target_machine, username, password, logger)
dsturi = "qemu+%s://%s/system" % (transport, target_hostname)
try:
# Connect to local hypervisor connection URI
srcconn = libvirt.open()
srcdom = srcconn.lookupByName(guestname)
dstconn = libvirt.open(dsturi)
logger.info("use migrate() to migrate")
srcdom.migrate(dstconn, libvirt.VIR_MIGRATE_TLS | libvirt.VIR_MIGRATE_UNSAFE, None, None, 0)
except libvirtError as err:
logger.error("API error message: %s, error code is %s"
% (err.get_error_message(), err.get_error_code()))
logger.error("Migration Failed")
env_clean(srcconn, dstconn, guestname, logger)
return 1
dstdom = dstconn.lookupByName(guestname)
dstdom_state = dstdom.info()[0]
if get_state(dstdom_state) != poststate:
logger.error("Dst VM wrong state %s, should be %s", get_state(dstdom_state), poststate)
env_clean(srcconn, dstconn, guestname, logger)
return 1
logger.info("Migration PASS")
env_clean(srcconn, dstconn, guestname, logger)
return 0
| gpl-2.0 | -2,749,031,390,518,975,000 | 32.862745 | 100 | 0.642154 | false | 3.698073 | false | false | false |
dcmorton/MalwareTools | mal_to_db.py | 1 | 3619 | #!/usr/bin/python
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Add malware files and their hashes to a MySQL database, saving them as LONGBLOBs in the database structure
import sys
import os
import re
import hashlib
from optparse import OptionParser
try:
import MySQLdb
except ImportError:
print "Cannot import MySQLdb, fix it."
sys.exit()
#MySQL Connection Info
host = ''
username = ''
password = ''
database = ''
def initdb():
conn = MySQLdb.connect(host=host, user=username, passwd=password, db=database)
curs = conn.cursor()
curs.execute("""
CREATE TABLE files (
id INTEGER PRIMARY KEY AUTO_INCREMENT,
md5 TEXT,
sha1 TEXT,
sha256 TEXT,
malware LONGBLOB,
time DATETIME
) ENGINE=INNODB, ROW_FORMAT=DYNAMIC;
""")
curs.close()
conn.commit()
conn.close()
def savetodb(filename, force):
conn = MySQLdb.connect(host=host, user=username, passwd=password, db=database)
curs = conn.cursor()
md5 = hashlib.md5(open(filename, 'rb').read()).hexdigest()
sha1 = hashlib.sha1(open(filename, 'rb').read()).hexdigest()
sha256 = hashlib.sha256(open(filename, 'rb').read()).hexdigest()#
file = open(filename, 'rb').read()
curs.execute("SELECT id FROM files WHERE md5=%s", (md5,))
ids = curs.fetchall()
if len(ids):
if not force:
ids = ["%d" % id[0] for id in ids]
print "The sample exists in the database with ID %s" % (','.join(ids))
print "Use the -o or --overwrite option to force"
return
else:
curs.execute("DELETE FROM files WHERE md5=%s", (md5,))
curs.execute("INSERT INTO files VALUES (NULL,%s,%s,%s,%s,NOW())", (md5,sha1,sha256,file))
curs.close()
conn.commit()
conn.close()
def main():
parser = OptionParser()
parser.add_option("-i", "--init", action="store_true",
dest="init", default=False, help="initialize database")
parser.add_option("-o", "--overwrite", action="store_true",
dest="force", default=False,
help="overwrite existing DB entry")
parser.add_option("-f", "--file", action="store", dest="filename",
type="string", help="save FILENAME")
parser.add_option("-u", "--upload", action="store_true",
dest="savetodb", default=False,
help="Save file to database")
(opts, args) = parser.parse_args()
if opts.init:
initdb()
sys.exit()
if opts.filename == None:
parser.print_help()
parser.error("You must supply a filename!")
if not os.path.isfile(opts.filename):
parser.error("%s does not exist" % opts.filename)
if opts.savetodb:
print "Saving " + opts.filename + " to the database"
savetodb(opts.filename, opts.force)
print "Done"
print
if __name__ == '__main__':
main() | gpl-3.0 | -1,676,382,926,745,943,600 | 30.754386 | 108 | 0.610113 | false | 3.845909 | false | false | false |
Juniper/tempest | tempest/tests/lib/fake_credentials.py | 8 | 2480 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import auth
class FakeCredentials(auth.Credentials):
def is_valid(self):
return True
class FakeKeystoneV2Credentials(auth.KeystoneV2Credentials):
def __init__(self):
creds = dict(
username='fake_username',
password='fake_password',
tenant_name='fake_tenant_name'
)
super(FakeKeystoneV2Credentials, self).__init__(**creds)
class FakeKeystoneV3Credentials(auth.KeystoneV3Credentials):
"""Fake credentials suitable for the Keystone Identity V3 API"""
def __init__(self):
creds = dict(
username='fake_username',
password='fake_password',
user_domain_name='fake_domain_name',
project_name='fake_tenant_name',
project_domain_name='fake_domain_name'
)
super(FakeKeystoneV3Credentials, self).__init__(**creds)
class FakeKeystoneV3DomainCredentials(auth.KeystoneV3Credentials):
"""Fake credentials for the Keystone Identity V3 API, with no scope"""
def __init__(self):
creds = dict(
username='fake_username',
password='fake_password',
user_domain_name='fake_domain_name'
)
super(FakeKeystoneV3DomainCredentials, self).__init__(**creds)
class FakeKeystoneV3AllCredentials(auth.KeystoneV3Credentials):
"""Fake credentials for the Keystone Identity V3 API, with no scope"""
def __init__(self):
creds = dict(
username='fake_username',
password='fake_password',
user_domain_name='fake_domain_name',
project_name='fake_tenant_name',
project_domain_name='fake_domain_name',
domain_name='fake_domain_name'
)
super(FakeKeystoneV3AllCredentials, self).__init__(**creds)
| apache-2.0 | 1,720,746,403,775,689,500 | 32.513514 | 78 | 0.650403 | false | 4.154104 | false | false | false |
MotherNatureNetwork/django-dynamic-forms | dynamic_forms/admin.py | 1 | 8490 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import six
from django import forms
from django.contrib import admin
from django.contrib.admin import SimpleListFilter
from django.forms.utils import flatatt
from django.utils.encoding import force_text
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.utils.text import slugify
from django.utils.translation import ugettext_lazy as _
from suit.admin import SortableStackedInline
from dynamic_forms.formfields import formfield_registry
from dynamic_forms.models import FormFieldModel, FormModel, FormModelData
from dynamic_forms.utils import export_as_csv_action
class ReadOnlyWidget(forms.Widget):
def __init__(self, attrs=None, **kwargs):
self.show_text = kwargs.pop('show_text', None)
super(ReadOnlyWidget, self).__init__(attrs)
def render(self, name, value, attrs=None):
content = ''
if value is not None:
content = value
if self.show_text is not None:
content = self.show_text
final_attrs = self.build_attrs(attrs)
return format_html('<span{0}>{1}</span>',
flatatt(final_attrs),
force_text(content),
)
class OptionsWidget(forms.MultiWidget):
def __init__(self, option_names, widgets, attrs=None):
self.option_names = option_names
super(OptionsWidget, self).__init__(widgets, attrs)
def decompress(self, value):
mapping = json.loads(value) if value else {}
return [mapping.get(key, None) for key in self.option_names]
def format_output(self, rendered_widgets, id_):
output = []
i = 0
for n, (r, w) in six.moves.zip(self.option_names, rendered_widgets):
output.append(
format_html(
'<label for="{0}_{1}">{2}:</label>{3}',
w.id_for_label(id_), i, n, r
)
)
i += 1
return mark_safe('<div style="display:inline-block;">' +
('<br />\n'.join(output)) + '</div>')
def render(self, name, value, attrs=None):
if self.is_localized:
for widget in self.widgets:
widget.is_localized = self.is_localized
# value is a list of values, each corresponding to a widget
# in self.widgets.
if not isinstance(value, list):
value = self.decompress(value)
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
for i, widget in enumerate(self.widgets):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
rendered = widget.render(name + '_%s' % i, widget_value,
final_attrs)
output.append((rendered, widget))
return mark_safe(self.format_output(output, id_))
class OptionsField(forms.MultiValueField):
def __init__(self, meta, *args, **kwargs):
self.option_names = []
self.option_fields = []
self.option_widgets = []
initial = {}
for name, option in sorted(meta.items()):
self.option_names.append(name)
initial[name] = option[1]
formfield = option[2]
if isinstance(formfield, forms.Field):
self.option_fields.append(formfield)
self.option_widgets.append(formfield.widget)
elif isinstance(formfield, (tuple, list)):
if isinstance(formfield[0], forms.Field):
self.option_fields.append(formfield[0])
else:
self.option_fields.append(formfield[0]())
if isinstance(formfield[1], forms.Widget):
self.option_widgets.append(formfield[1])
else:
self.option_widgets.append(formfield[1]())
elif isinstance(formfield, type):
self.option_fields.append(formfield())
self.option_widgets.append(formfield.widget)
kwargs['widget'] = OptionsWidget(self.option_names,
self.option_widgets)
if 'initial' in kwargs:
kwargs['initial'].update(initial)
else:
kwargs['initial'] = initial
super(OptionsField, self).__init__(self.option_fields, *args, **kwargs)
def compress(self, data_list):
data = {}
for name, value in six.moves.zip(self.option_names, data_list):
if value is not None:
data[name] = value
return json.dumps(data)
class AdminFormModelForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(AdminFormModelForm, self).__init__(*args, **kwargs)
choices = self.fields['actions'].choices
self.fields['actions'].choices = sorted(choices, key=lambda x: x[1])
class AdminFormFieldInlineForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
instance = kwargs.get('instance', None)
meta = None
if instance:
df = formfield_registry.get(instance.field_type)
if df:
meta = df._meta
super(AdminFormFieldInlineForm, self).__init__(*args, **kwargs)
choices = self.fields['field_type'].choices
self.fields['field_type'].choices = sorted(choices, key=lambda x: x[1])
if meta is not None:
self.fields['_options'] = OptionsField(meta, required=False,
label=_('Options'))
else:
self.fields['_options'].widget = ReadOnlyWidget(show_text=_(
'The options for this field will be available once it has '
'been stored the first time.'
))
class FormFieldModelInlineAdmin(SortableStackedInline):
extra = 0
form = AdminFormFieldInlineForm
list_display = ('field_type', 'name', 'label')
model = FormFieldModel
prepopulated_fields = {"name": ("label",)}
sortable = 'position'
class FormModelAdmin(admin.ModelAdmin):
form = AdminFormModelForm
inlines = (FormFieldModelInlineAdmin,)
list_display = ('name', 'allow_display')
list_filter = ('name',)
model = FormModel
actions = [export_as_csv_action("Export form submissions as CSV")]
admin.site.register(FormModel, FormModelAdmin)
class FormFilter(SimpleListFilter):
title = 'Selected Form'
parameter_name = 'form'
def lookups(self, request, model_admin):
forms = set([f for f in FormModel.objects.all()])
return [(f.id, f.name) for f in forms]
def queryset(self, request, queryset):
if self.value():
return FormModelData.objects.filter(form__id__exact=self.value())
else:
return queryset
class FormModelDataAdmin(admin.ModelAdmin):
fields = ('form', 'value', 'submitted', 'show_url_link')
model = FormModelData
readonly_fields = ('submitted', 'show_url_link',)
list_filter = (FormFilter,)
actions_on_top = False
actions_on_bottom = True
date_hierarchy = 'submitted'
def get_list_display(self, request):
if not request.GET.get('form', None):
return ('form', 'submitted')
else:
list_display_tuple = ['form', 'submitted']
form_obj = FormModel.objects.get(pk=int(request.GET.get('form')))
self.form_obj = form_obj
fields = form_obj.fields.all()
for field in fields:
if field.field_type in ('dynamic_forms.formfields.StartGroupField',
'dynamic_forms.formfields.EndGroupField'):
continue
field_slug = slugify(field.name).replace('-', '_')
list_display_tuple.append("get_form_data_value_for_%s" % field_slug)
self.add_form_value_display(field.label, field_slug)
return list_display_tuple
def add_form_value_display(self, label, name):
def inner_add_form_value_display(obj):
json_value = json.loads(obj.value)
return json_value[label]
inner_add_form_value_display.short_description = name
inner_add_form_value_display.name = name
setattr(self, "get_form_data_value_for_%s" % name, inner_add_form_value_display)
admin.site.register(FormModelData, FormModelDataAdmin)
| bsd-3-clause | -2,820,495,002,670,992,000 | 35.594828 | 88 | 0.595053 | false | 4.046711 | false | false | false |
cshields/satnogs-client | satnogsclient/upsat/gnuradio_handler.py | 1 | 2800 | import logging
import cPickle
import subprocess
from satnogsclient.upsat import packet_settings
from satnogsclient import settings as client_settings
from satnogsclient.observer.udpsocket import Udpsocket
from satnogsclient.upsat import packet
logger = logging.getLogger('satnogsclient')
backend_listener_sock = Udpsocket(('0.0.0.0', client_settings.BACKEND_LISTENER_PORT)) # Port in which client listens for frames from gnuradio
ui_listener_sock = Udpsocket(('127.0.0.1', client_settings.BACKEND_FEEDER_PORT))
ecss_feeder_sock = Udpsocket([]) # The socket with which we communicate with the ecss feeder thread
backend_feeder_sock = Udpsocket([])
ld_socket = Udpsocket([])
ld_uplink_socket = Udpsocket([])
ld_downlink_socket = Udpsocket([])
def write_to_gnuradio(buf):
backend_feeder_sock.sendto(buf, (client_settings.GNURADIO_IP, client_settings.GNURADIO_UDP_PORT))
def read_from_gnuradio():
logger.info('Started gnuradio listener process')
while True:
conn = backend_listener_sock.recv()
buf_in = bytearray(conn[0])
ecss_dict = {}
ret = packet.deconstruct_packet(buf_in, ecss_dict, "gnuradio")
ecss_dict = ret[0]
pickled = cPickle.dumps(ecss_dict)
if len(ecss_dict) == 0:
logger.error('Ecss Dictionary not properly constructed. Error occured')
continue
try:
if ecss_dict['ser_type'] == packet_settings.TC_LARGE_DATA_SERVICE:
if ecss_dict['ser_subtype'] <= 8: # 8 is sthe maximum service subtype corresponding to Large Data downlink
ld_downlink_socket.sendto(pickled, ('127.0.0.1', client_settings.LD_DOWNLINK_LISTEN_PORT))
else:
ld_uplink_socket.sendto(pickled, ('127.0.0.1', client_settings.LD_UPLINK_LISTEN_PORT))
else:
ecss_feeder_sock.sendto(pickled, ('127.0.0.1', client_settings.ECSS_FEEDER_UDP_PORT))
except KeyError:
logger.error('Ecss Dictionary not properly constructed. Error occured. Key \'ser_type\' not in dictionary')
def exec_gnuradio(observation_file, waterfall_file, freq):
arguments = {'filename': observation_file,
'waterfall': waterfall_file,
'rx_device': client_settings.SATNOGS_RX_DEVICE,
'center_freq': str(freq)}
arg_string = ' '
arg_string += '--rx-sdr-device=' + arguments['rx_device'] + ' '
arg_string += '--file-path=' + arguments['filename'] + ' '
arg_string += '--waterfall-file-path=' + arguments['waterfall'] + ' '
arg_string += '--rx-freq=' + arguments['center_freq'] + ' '
logger.info('Starting GNUradio python script')
proc = subprocess.Popen([client_settings.GNURADIO_SCRIPT_FILENAME + " " + arg_string], shell=True)
return proc
| agpl-3.0 | -1,079,943,015,096,485,200 | 44.16129 | 142 | 0.659643 | false | 3.473945 | false | false | false |
tst-ahernandez/earthenterprise | earth_enterprise/src/fusion/tools/gee_test/tests/common/utils.py | 3 | 2962 | #!/usr/bin/python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Install Fusion and Earth Server, build tutorial databases, and test."""
import os
import subprocess
BASE_DIR = os.getcwd()
LOG_SHELL_CMDS = True
GEE_TESTS_LOG = "%s/gee_tests.log" % BASE_DIR
BYTES_PER_MEGABYTE = 1024.0 * 1024.0
class OsCommandError(Exception):
"""Thrown if os command fails."""
pass
def BaseDir():
"""Returns the directory that contains the application that is running."""
return BASE_DIR
def ClearLog():
"""Clear content of log file."""
fp = open(GEE_TESTS_LOG, "w")
fp.close()
def Log(message):
"""If logging is on, log the message."""
if LOG_SHELL_CMDS:
fp = open(GEE_TESTS_LOG, "a")
fp.write(message + "\n")
fp.close()
def ExecuteCmd(os_cmd, do_log=False, err2out=False):
"""Execute and log os command.
If the shell command fails, an exception is thrown.
Args:
os_cmd: (string) linux shell command to run.
do_log: whether to do logging.
err2out: whether to send stderr to the same file handle as for stdout.
Returns:
results of the linux shell command.
Raises:
OsCommandError: if error from shell command is not None.
"""
print "Executing: %s" % os_cmd
if do_log:
Log(os_cmd)
try:
p = subprocess.Popen(
os_cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT if err2out else subprocess.PIPE)
result, error = p.communicate()
if (not err2out) and error:
print "ERROR: %s" % error
return "Unable to execute %s" % os_cmd
return result
except Exception, e:
print "FAILED: %s" % e.__str__()
raise OsCommandError()
def DiskSpace(path):
"""Returns remaining disk space in Megabytes."""
mount_info = os.statvfs(path)
return mount_info.f_bsize * mount_info.f_bavail / BYTES_PER_MEGABYTE
def ChDir(path):
"""Changes directory so that it is logged."""
Log("cd %s" % path)
os.chdir(path)
def GetFileWithReplace(path, replace):
"""Return content of file after replacing any keys in replace with values."""
fp = open(path)
content = fp.read()
fp.close()
for key in replace.iterkeys():
content = content.replace(key, replace[key])
return content
def main():
# Quick test of module.
ClearLog()
print ExecuteCmd("pwd")
ChDir("/tmp")
print ExecuteCmd("pwd")
ChDir(BASE_DIR)
print ExecuteCmd("pwd")
if __name__ == "__main__":
main()
| apache-2.0 | 1,928,320,609,421,841,700 | 23.278689 | 79 | 0.674544 | false | 3.46028 | true | false | false |
RatulSaha/leetcode | 101-150/107-binary-tree-level-traversal-II.py | 1 | 1180 | """
STATEMENT
Given a binary tree, return the bottom-up level order traversal of its nodes' values.
(ie, from left to right, level by level from leaf to root).
CLARIFICATIONS
- Do we print all 'None' values that a level may have? Yes.
EXAMPLES
[3,9,20,null,null,15,7] -> [[15,7],[9,20],[3]]
COMMENTS
- The usual tree level traversal would work
(https://github.com/RatulSaha/leetcode/blob/master/101-150/102-binary-tree-level-order-reversal.py).
- We can use deque from collections module to do appendleft instead of append.
"""
def levelOrderBottom(root):
"""
:type root: TreeNode
:rtype: List[List[int]]
"""
to_return = []
if not root:
return to_return
level = [root]
while level:
current_level_val = [node.val for node in level]
to_return = [current_level_val] + to_return
next_level = []
for node in level:
if node.left is not None:
next_level.append(node.left)
if node.right is not None:
next_level.append(node.right)
level = next_level
return to_return
| mit | 7,317,192,898,427,964,000 | 30.891892 | 102 | 0.594068 | false | 3.676012 | false | false | false |
kfoltman/calfbox | sampler_api_example.py | 1 | 1432 | import os
import sys
import struct
import time
import unittest
sys.path = ["./py"] + sys.path
import cbox
global Document
Document = cbox.Document
scene = Document.get_scene()
scene.clear()
instrument = scene.add_new_instrument_layer("test_sampler", "sampler").get_instrument()
npfs = instrument.engine.load_patch_from_string(0, '.', '', 'new_patch')
instrument.engine.set_patch(1, 0)
mgrp = npfs.get_global().get_children()[0]
g1 = mgrp.new_child()
g1.set_param("cutoff", "100")
g1.set_param("resonance", "6")
g1.set_param("fil_type", "lpf_4p")
g1.set_param("fileg_start", "50")
g1.set_param("fileg_attack", "0.01")
g1.set_param("fileg_decay", "0.2")
g1.set_param("fileg_sustain", "20")
g1.set_param("fileg_depth", "5400")
g1.set_param("fileg_release", "10")
g1.set_param("ampeg_release", "0.1")
g1.set_param("amp_veltrack", "0")
g1.set_param("volume", "-12")
g1.set_param("fileg_depthcc14", "-5400")
#g1.set_param("cutoff", "1000")
#g1.set_param("fillfo_freq", "4")
#g1.set_param("fillfo_depth", "2400")
#g1.set_param("fillfo_wave", "12")
#g1.set_param("fillfo_freqcc2", "4")
r1 = g1.new_child()
r1.set_param("sample", "*saw")
r1.set_param("transpose", "0")
r1.set_param("tune", "5")
r1.set_param("gain_cc17", "12")
r2 = g1.new_child()
r2.set_param("sample", "*sqr")
r2.set_param("transpose", "12")
r2.set_param("gain_cc17", "-12")
print(instrument.engine.status())
print("Ready!")
while True:
cbox.call_on_idle()
| gpl-3.0 | -2,428,959,159,386,839,600 | 23.271186 | 87 | 0.664106 | false | 2.336052 | false | false | false |
cangencer/hazelcast-python-client | hazelcast/proxy/base.py | 1 | 6744 | import logging
from hazelcast.future import make_blocking
from hazelcast.partition import string_partition_strategy
from hazelcast.util import enum, thread_id
def default_response_handler(future, codec, to_object):
response = future.result()
if response:
try:
codec.decode_response
except AttributeError:
return
decoded_response = codec.decode_response(response, to_object)
try:
return decoded_response['response']
except AttributeError:
pass
class Proxy(object):
def __init__(self, client, service_name, name):
self.service_name = service_name
self.name = name
self.partition_key = string_partition_strategy(self.name)
self._client = client
self.logger = logging.getLogger("%s(%s)" % (type(self).__name__, name))
self._to_object = client.serializer.to_object
self._to_data = client.serializer.to_data
self._start_listening = client.listener.start_listening
self._stop_listening = client.listener.stop_listening
def destroy(self):
self._on_destroy()
return self._client.proxy.destroy_proxy(self.service_name, self.name)
def _on_destroy(self):
pass
def __repr__(self):
return '%s(name="%s")' % (type(self).__name__, self.name)
def _encode_invoke(self, codec, response_handler=default_response_handler, **kwargs):
request = codec.encode_request(name=self.name, **kwargs)
return self._client.invoker.invoke_on_random_target(request).continue_with(response_handler, codec,
self._to_object)
def _encode_invoke_on_target(self, codec, _address, response_handler=default_response_handler, **kwargs):
request = codec.encode_request(name=self.name, **kwargs)
return self._client.invoker.invoke_on_target(request, _address).continue_with(response_handler, codec,
self._to_object)
def _encode_invoke_on_key(self, codec, key_data, **kwargs):
partition_id = self._client.partition_service.get_partition_id(key_data)
return self._encode_invoke_on_partition(codec, partition_id, **kwargs)
def _encode_invoke_on_partition(self, codec, _partition_id, response_handler=default_response_handler, **kwargs):
request = codec.encode_request(name=self.name, **kwargs)
return self._client.invoker.invoke_on_partition(request, _partition_id).continue_with(response_handler,
codec, self._to_object)
def blocking(self):
"""
:return: Return a version of this proxy with only blocking method calls
"""
return make_blocking(self)
class PartitionSpecificProxy(Proxy):
def __init__(self, client, service_name, name):
super(PartitionSpecificProxy, self).__init__(client, service_name, name)
self._partition_id = self._client.partition_service.get_partition_id(self.partition_key)
def _encode_invoke(self, codec, response_handler=default_response_handler, **kwargs):
return super(PartitionSpecificProxy, self)._encode_invoke_on_partition(codec, self._partition_id,
response_handler=response_handler,
**kwargs)
class TransactionalProxy(object):
def __init__(self, name, transaction):
self.name = name
self.transaction = transaction
self._to_object = transaction.client.serializer.to_object
self._to_data = transaction.client.serializer.to_data
def _encode_invoke(self, codec, response_handler=default_response_handler, **kwargs):
request = codec.encode_request(name=self.name, txn_id=self.transaction.id, thread_id=thread_id(), **kwargs)
return self.transaction.client.invoker.invoke_on_connection(request, self.transaction.connection).continue_with(
response_handler, codec, self._to_object)
def __repr__(self):
return '%s(name="%s")' % (type(self).__name__, self.name)
ItemEventType = enum(added=1, removed=2)
EntryEventType = enum(added=1,
removed=1 << 1,
updated=1 << 2,
evicted=1 << 3,
evict_all=1 << 4,
clear_all=1 << 5,
merged=1 << 6,
expired=1 << 7)
class ItemEvent(object):
def __init__(self, name, item_data, event_type, member, to_object):
self.name = name
self._item_data = item_data
self.event_type = event_type
self.member = member
self._to_object = to_object
@property
def item(self):
return self._to_object(self._item_data)
class EntryEvent(object):
def __init__(self, to_object, key, old_value, value, merging_value, event_type, uuid,
number_of_affected_entries):
self._key_data = key
self._value_data = value
self._old_value_data = old_value
self._merging_value_data = merging_value
self.event_type = event_type
self.uuid = uuid
self.number_of_affected_entries = number_of_affected_entries
self._to_object = to_object
@property
def key(self):
return self._to_object(self._key_data)
@property
def old_value(self):
return self._to_object(self._old_value_data)
@property
def value(self):
return self._to_object(self._value_data)
@property
def merging_value(self):
return self._to_object(self._merging_value_data)
def __repr__(self):
return "EntryEvent(key=%s, old_value=%s, value=%s, merging_value=%s, event_type=%s, uuid=%s, " \
"number_of_affected_entries=%s)" % (
self.key, self.old_value, self.value, self.merging_value, self.event_type, self.uuid,
self.number_of_affected_entries)
class TopicMessage(object):
def __init__(self, name, message_data, publish_time, member, to_object):
self.name = name
self._message_data = message_data
self.publish_time = publish_time
self.member = member
self._to_object = to_object
@property
def message(self):
return self._to_object(self._message_data)
def get_entry_listener_flags(**kwargs):
flags = 0
for (key, value) in kwargs.iteritems():
if value:
flags |= getattr(EntryEventType, key)
return flags
| apache-2.0 | 33,862,116,476,375,604 | 37.758621 | 120 | 0.595344 | false | 4.023866 | false | false | false |
davidmontgom/pyvmomi-community-samples | samples/list_datastore_cluster.py | 14 | 2776 | #!/usr/bin/env python
# William Lam
# www.virtuallyghetto.com
"""
vSphere Python SDK program for listing Datastores in Datastore Cluster
"""
import argparse
import atexit
from pyVmomi import vim
from pyVmomi import vmodl
from pyVim import connect
def get_args():
"""
Supports the command-line arguments listed below.
"""
parser = argparse.ArgumentParser(
description='Process args for retrieving all the Virtual Machines')
parser.add_argument('-s', '--host',
required=True, action='store',
help='Remote host to connect to')
parser.add_argument('-o', '--port',
type=int, default=443,
action='store', help='Port to connect on')
parser.add_argument('-u', '--user', required=True,
action='store',
help='User name to use when connecting to host')
parser.add_argument('-p', '--password',
required=True, action='store',
help='Password to use when connecting to host')
parser.add_argument('-d', '--dscluster', required=True, action='store',
help='Name of vSphere Datastore Cluster')
args = parser.parse_args()
return args
def main():
"""
Simple command-line program for listing Datastores in Datastore Cluster
"""
args = get_args()
try:
service_instance = connect.SmartConnect(host=args.host,
user=args.user,
pwd=args.password,
port=int(args.port))
if not service_instance:
print("Could not connect to the specified host using "
"specified username and password")
return -1
atexit.register(connect.Disconnect, service_instance)
content = service_instance.RetrieveContent()
# Search for all Datastore Clusters aka StoragePod
obj_view = content.viewManager.CreateContainerView(content.rootFolder,
[vim.StoragePod],
True)
ds_cluster_list = obj_view.view
obj_view.Destroy()
for ds_cluster in ds_cluster_list:
if ds_cluster.name == args.dscluster:
datastores = ds_cluster.childEntity
print "Datastores: "
for datastore in datastores:
print datastore.name
except vmodl.MethodFault as error:
print "Caught vmodl fault : " + error.msg
return -1
return 0
# Start program
if __name__ == "__main__":
main()
| apache-2.0 | 2,424,465,620,236,208,600 | 30.545455 | 78 | 0.541066 | false | 4.802768 | false | false | false |
peczony/chgksuite | chgksuite.py | 1 | 1277 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import argparse
import os
from chgk_parser import gui_parse
from chgk_composer import gui_compose
try:
from Tkinter import *
except:
from tkinter import *
import tkFileDialog
import tkFont
debug = False
def gui_choose_action():
def parsereturn():
root.ret = 'parse'
root.quit()
root.destroy()
def composereturn():
root.ret = 'compose'
root.quit()
root.destroy()
root = Tk()
root.ret = 'None'
frame = Frame(root)
frame.pack()
bottomframe = Frame(root)
bottomframe.pack(side = 'bottom')
Button(frame, command=
parsereturn, text = 'Parse').pack(side = 'left',
padx = 20, pady = 20,
ipadx = 20, ipady = 20,)
Button(frame, command=
composereturn, text = 'Compose').pack(side = 'left',
padx = 20, pady = 20,
ipadx = 20, ipady = 20,)
root.mainloop()
return root.ret
def main():
action = gui_choose_action()
if action == 'parse':
gui_parse()
if action == 'compose':
gui_compose()
if __name__ == "__main__":
main() | mit | -805,545,435,855,413,500 | 23.576923 | 64 | 0.53798 | false | 3.81194 | false | false | false |
maximtrp/posthocs | scikit_posthocs/_posthocs.py | 1 | 80919 | # -*- coding: utf-8 -*-
import numpy as np
import scipy.stats as ss
import itertools as it
from statsmodels.sandbox.stats.multicomp import multipletests
from statsmodels.stats.multicomp import pairwise_tukeyhsd
from statsmodels.stats.libqsturng import psturng
from pandas import DataFrame
def __convert_to_df(a, val_col: str = 'vals', group_col: str = 'groups',
val_id: int = None, group_id: int = None) -> DataFrame:
'''Hidden helper method to create a DataFrame with input data for further
processing.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary, i.e. groups
may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values
(test or response variable). Values should have a non-nominal scale.
Must be specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
val_id : int, optional
Index of a column that contains dependent variable values (test or
response variable). Should be specified if a NumPy ndarray is used as an
input. It will be inferred from data, if not specified.
group_id : int, optional
Index of a column that contains independent variable values (grouping or
predictor variable). Should be specified if a NumPy ndarray is used as
an input. It will be inferred from data, if not specified.
Returns
-------
x : pandas DataFrame
DataFrame with input data, `val_col` column contains numerical values and
`group_col` column contains categorical values.
val_col : str
Name of a DataFrame column that contains dependent variable values (test
or response variable).
group_col : str
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable).
Notes
-----
Inferrence algorithm for determining `val_id` and `group_id` args is rather
simple, so it is better to specify them explicitly to prevent errors.
'''
if not group_col:
group_col = 'groups'
if not val_col:
val_col = 'vals'
if isinstance(a, DataFrame):
x = a.copy()
if not {group_col, val_col}.issubset(a.columns):
raise ValueError('Specify correct column names using `group_col` and `val_col` args')
return x, val_col, group_col
elif isinstance(a, list) or (isinstance(a, np.ndarray) and not a.shape.count(2)):
grps_len = map(len, a)
grps = list(it.chain(*[[i+1] * l for i, l in enumerate(grps_len)]))
vals = list(it.chain(*a))
return DataFrame({val_col: vals, group_col: grps}), val_col, group_col
elif isinstance(a, np.ndarray):
# cols ids not defined
# trying to infer
if not(all([val_id, group_id])):
if np.argmax(a.shape):
a = a.T
ax = [np.unique(a[:, 0]).size, np.unique(a[:, 1]).size]
if np.diff(ax).item():
__val_col = np.argmax(ax)
__group_col = np.argmin(ax)
else:
raise ValueError('Cannot infer input format.\nPlease specify `val_id` and `group_id` args')
cols = {__val_col: val_col,
__group_col: group_col}
else:
cols = {val_id: val_col,
group_id: group_col}
cols_vals = dict(sorted(cols.items())).values()
return DataFrame(a, columns=cols_vals), val_col, group_col
def __convert_to_block_df(a, y_col=None, group_col=None, block_col=None, melted=False):
# TODO: refactor conversion of block data to DataFrame
if melted and not all([i is not None for i in [block_col, group_col, y_col]]):
raise ValueError('`block_col`, `group_col`, `y_col` should be explicitly specified if using melted data')
if isinstance(a, DataFrame) and not melted:
x = a.copy(deep=True)
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
elif isinstance(a, DataFrame) and melted:
x = DataFrame.from_dict({'groups': a[group_col],
'blocks': a[block_col],
'y': a[y_col]})
elif not isinstance(a, DataFrame):
x = np.array(a)
x = DataFrame(x, index=np.arange(x.shape[0]), columns=np.arange(x.shape[1]))
if not melted:
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
x.columns.name = group_col
x.index.name = block_col
x = x.reset_index().melt(id_vars=block_col, var_name=group_col, value_name=y_col)
else:
x.rename(columns={group_col: 'groups', block_col: 'blocks', y_col: 'y'}, inplace=True)
group_col = 'groups'
block_col = 'blocks'
y_col = 'y'
return x, y_col, group_col, block_col
def posthoc_conover(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Conover's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by `group_col` or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction are employed according to Conover [1]_.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_conover(x, p_adjust = 'holm')
'''
def compare_conover(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
D = (n - 1. - h_cor) / (n - x_len)
t_value = diff / np.sqrt(S2 * B * D)
p_value = 2. * ss.t.sf(np.abs(t_value), df=n-x_len)
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col, _val_col], ascending=True) if sort else x
n = len(x.index)
x_groups_unique = x[_group_col].unique()
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
x_ranks_sum = x.groupby(_group_col)['ranks'].sum()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
h = (12. / (n * (n + 1.))) * np.sum(x_ranks_sum**2 / x_lens) - 3. * (n + 1.)
h_cor = h / x_ties
if x_ties == 1:
S2 = n * (n + 1.) / 12.
else:
S2 = (1. / (n - 1.)) * (np.sum(x['ranks'] ** 2.) - (n * (((n + 1.)**2.) / 4.)))
vs = np.zeros((x_len, x_len))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
combs = it.combinations(range(x_len), 2)
for i, j in combs:
vs[i, j] = compare_conover(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_dunn(a, val_col=None, group_col=None, p_adjust=None, sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Dunn's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas DataFrame.
Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] O.J. Dunn (1964). Multiple comparisons using rank sums.
Technometrics, 6, 241-252.
.. [2] S.A. Glantz (2012), Primer of Biostatistics. New York: McGraw Hill.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_dunn(x, p_adjust = 'holm')
'''
def compare_dunn(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
z_value = diff / np.sqrt((A - x_ties) * B)
p_value = 2. * ss.norm.sf(np.abs(z_value))
return p_value
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col, _val_col], ascending=True) if sort else x
n = len(x.index)
x_groups_unique = x[_group_col].unique()
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = tie_sum / (12. * (n - 1))
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
for i, j in combs:
vs[i, j] = compare_dunn(x_groups_unique[i], x_groups_unique[j])
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi(a, val_col=None, group_col=None, dist='chi', sort=True):
'''Post hoc pairwise test for multiple comparisons of mean rank sums
(Nemenyi's test). May be used after Kruskal-Wallis one-way analysis of
variance by ranks to do pairwise comparisons [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional. Second dimension may vary,
i.e. groups may have different lengths.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
dist : str, optional
Method for determining the p value. The default distribution is "chi"
(chi-squared), else "tukey" (studentized range).
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A tie correction will be employed according to Glantz (2012).
References
----------
.. [1] Lothar Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 395-397, 662-664.
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_nemenyi(x)
'''
def compare_stats_chi(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
A = n * (n + 1.) / 12.
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
chi = diff ** 2. / (A * B)
return chi
def compare_stats_tukey(i, j):
diff = np.abs(x_ranks_avg.loc[i] - x_ranks_avg.loc[j])
B = (1. / x_lens.loc[i] + 1. / x_lens.loc[j])
q = diff / np.sqrt((n * (n + 1.) / 12.) * B)
return q
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col, _val_col], ascending=True) if sort else x
n = len(x.index)
x_groups_unique = x[_group_col].unique()
x_len = x_groups_unique.size
x_lens = x.groupby(_group_col)[_val_col].count()
x['ranks'] = x[_val_col].rank()
x_ranks_avg = x.groupby(_group_col)['ranks'].mean()
# ties
vals = x.groupby('ranks').count()[_val_col].values
tie_sum = np.sum(vals[vals != 1] ** 3 - vals[vals != 1])
tie_sum = 0 if not tie_sum else tie_sum
x_ties = np.min([1., 1. - tie_sum / (n ** 3. - n)])
vs = np.zeros((x_len, x_len))
combs = it.combinations(range(x_len), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
if dist == 'chi':
for i, j in combs:
vs[i, j] = compare_stats_chi(x_groups_unique[i], x_groups_unique[j]) / x_ties
vs[tri_upper] = ss.chi2.sf(vs[tri_upper], x_len - 1)
elif dist == 'tukey':
for i, j in combs:
vs[i, j] = compare_stats_tukey(x_groups_unique[i], x_groups_unique[j]) * np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], x_len, np.inf)
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=x_groups_unique, columns=x_groups_unique)
def posthoc_nemenyi_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Calculate pairwise comparisons using Nemenyi post hoc test for
unreplicated blocked data. This test is usually conducted post hoc if
significant results of the Friedman's test are obtained. The statistics
refer to upper quantiles of the studentized range distribution (Tukey) [1]_,
[2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via Friedman's
test. The consequent post hoc pairwise multiple comparison test
according to Nemenyi is conducted with this function.
This function does not test for ties.
References
----------
.. [1] J. Demsar (2006), Statistical comparisons of classifiers over
multiple data sets, Journal of Machine Learning Research, 7, 1-30.
.. [2] P. Nemenyi (1963) Distribution-free Multiple Comparisons. Ph.D.
thesis, Princeton University.
.. [3] L. Sachs (1997), Angewandte Statistik. Berlin: Springer.
Pages: 668-675.
Examples
--------
>>> # Non-melted case, x is a block design matrix, i.e. rows are blocks
>>> # and columns are groups.
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_nemenyi_friedman(x)
'''
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
x = x.sort_values(by=[_group_col, _block_col], ascending=True) if sort else x
x.dropna(inplace=True)
groups = x[_group_col].unique()
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].mean()
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs *= np.sqrt(2.)
vs[tri_upper] = psturng(vs[tri_upper], k, np.inf)
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_conover_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Conover post hoc test for unreplicated
blocked data. This test is usually conducted post hoc after
significant results of the Friedman test. The statistics refer to
the Student t distribution [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
'single-step' : uses Tukey distribution for multiple comparisons
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
A one-way ANOVA with repeated measures that is also referred to as ANOVA
with unreplicated block design can also be conducted via the
friedman.test. The consequent post hoc pairwise multiple comparison test
according to Conover is conducted with this function.
If y is a matrix, than the columns refer to the treatment and the rows
indicate the block.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_conover_friedman(x)
'''
def compare_stats(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
tval = dif / np.sqrt(A) / np.sqrt(B)
pval = 2. * ss.t.sf(np.abs(tval), df=(m*n*k - k - n + 1))
return pval
def compare_tukey(i, j):
dif = np.abs(R.loc[groups[i]] - R.loc[groups[j]])
qval = np.sqrt(2.) * dif / (np.sqrt(A) * np.sqrt(B))
pval = psturng(qval, k, np.inf)
return pval
x, _y_col, _group_col, _block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
x = x.sort_values(by=[_group_col, _block_col], ascending=True) if sort else x
x.dropna(inplace=True)
groups = x[_group_col].unique()
k = groups.size
n = x[_block_col].unique().size
x['mat'] = x.groupby(_block_col)[_y_col].rank()
R = x.groupby(_group_col)['mat'].sum()
A1 = (x['mat'] ** 2).sum()
m = 1
S2 = m/(m*k - 1.) * (A1 - m*k*n*(m*k + 1.)**2./4.)
T2 = 1. / S2 * (np.sum(R) - n * m * ((m * k + 1.) / 2.)**2.)
A = S2 * (2. * n * (m * k - 1.)) / (m * n * k - k - n + 1.)
B = 1. - T2 / (n * (m * k - 1.))
vs = np.zeros((k, k))
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
if p_adjust == 'single-step':
for i, j in combs:
vs[i, j] = compare_tukey(i, j)
else:
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust is not None:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_npm_test(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Nashimoto and Wright's all-pairs
comparison procedure (NPM test) for simply ordered mean ranksums.
NPM test is basically an extension of Nemenyi's procedure for testing
increasingly ordered alternatives [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See `statsmodels.sandbox.stats.multicomp`
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are estimated from the studentized range distribution. If
the medians are already increasingly ordered, than the NPM-test
simplifies to the ordinary Nemenyi test
References
----------
.. [1] Nashimoto, K., Wright, F.T., (2005), Multiple comparison procedures for
detecting differences in simply ordered means. Comput. Statist. Data
Anal. 48, 291--306.
Examples
--------
>>> x = np.array([[102,109,114,120,124],
[110,112,123,130,145],
[132,141,156,160,172]])
>>> sp.posthoc_npm_test(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col], ascending=True) if sort else x
groups = x[_group_col].unique()
x['ranks'] = x[_val_col].rank()
ri = x.groupby(_group_col)['ranks'].mean()
ni = x.groupby(_group_col)[_val_col].count()
k = groups.size
n = x.shape[0]
sigma = np.sqrt(n * (n + 1) / 12.)
df = np.inf
def compare(m, u):
a = [(ri.loc[groups[u]]-ri.loc[groups[_mi]])/(sigma/np.sqrt(2)*np.sqrt(1./ni.loc[groups[_mi]] + 1./ni.loc[groups[u]])) for _mi in m]
return np.array(a)
stat = np.zeros((k, k))
for i in range(k-1):
for j in range(i+1, k):
u = j
m = np.arange(i, u)
tmp = compare(m, u)
stat[j, i] = np.max(tmp)
stat[stat < 0] = 0
p_values = psturng(stat, k, df)
tri_lower = np.tril_indices(p_values.shape[0], -1)
p_values[tri_lower] = p_values.T[tri_lower]
np.fill_diagonal(p_values, 1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_siegel_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Siegel and Castellan's All-Pairs Comparisons Test for Unreplicated Blocked
Data. See authors' paper for additional information [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block design
with non-normally distributed residuals, Siegel and Castellan's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] S. Siegel, N. J. Castellan Jr. (1988), Nonparametric Statistics for the
Behavioral Sciences. 2nd ed. New York: McGraw-Hill.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_siegel_friedman(x)
'''
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
zval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return zval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
x = x.sort_values(by=[group_col, block_col], ascending=True) if sort else x
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = 2. * ss.norm.sf(np.abs(vs))
vs[vs > 1] = 1.
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_miller_friedman(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False):
'''Miller's All-Pairs Comparisons Test for Unreplicated Blocked Data.
The p-values are computed from the chi-square distribution [1]_, [2]_,
[3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (strings).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
For all-pairs comparisons in a two factorial unreplicated complete block
design with non-normally distributed residuals, Miller's test can be
performed on Friedman-type ranked data.
References
----------
.. [1] J. Bortz J, G. A. Lienert, K. Boehnke (1990), Verteilungsfreie
Methoden in der Biostatistik. Berlin: Springerself.
.. [2] R. G. Miller Jr. (1996), Simultaneous statistical inference. New
York: McGraw-Hill.
.. [3] E. L. Wike (2006), Data Analysis. A Statistical Primer for Psychology
Students. New Brunswick: Aldine Transaction.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_miller_friedman(x)
'''
def compare_stats(i, j):
dif = np.abs(R[groups[i]] - R[groups[j]])
qval = dif / np.sqrt(k * (k + 1.) / (6. * n))
return qval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
x = x.sort_values(by=[group_col, block_col], ascending=True) if sort else x
x.dropna(inplace=True)
groups = x[group_col].unique()
k = groups.size
n = x[block_col].unique().size
x['mat'] = x.groupby(block_col)[y_col].rank()
R = x.groupby(group_col)['mat'].mean()
vs = np.zeros((k, k), dtype=float)
combs = it.combinations(range(k), 2)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
vs = vs ** 2
vs = ss.chi2.sf(vs, k - 1)
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_durbin(a, y_col=None, block_col=None, group_col=None, melted=False, sort=False, p_adjust=None):
'''Pairwise post hoc test for multiple comparisons of rank sums according to
Durbin and Conover for a two-way balanced incomplete block design (BIBD). See
references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of block design,
i.e. rows are blocks, and columns are groups. In this case you do
not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp
for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] W. J. Conover (1999), Practical nonparametric Statistics,
3rd. edition, Wiley.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_durbin(x)
'''
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
def compare_stats(i, j):
dif = np.abs(rj[groups[i]] - rj[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df=df)
return pval
x = x.sort_values(by=[block_col, group_col], ascending=True) if sort else x
x.dropna(inplace=True)
groups = x[group_col].unique()
t = len(groups)
b = x[block_col].unique().size
r = b
k = t
x['y_ranked'] = x.groupby(block_col)[y_col].rank()
rj = x.groupby(group_col)['y_ranked'].sum()
A = (x['y_ranked'] ** 2).sum()
C = (b * k * (k + 1) ** 2) / 4.
D = (rj ** 2).sum() - r * C
T1 = (t - 1) / (A - C) * D
denom = np.sqrt(((A - C) * 2 * r) / (b * k - b - t + 1) * (1 - T1 / (b * (k - 1))))
df = b * k - b - t + 1
vs = np.zeros((t, t), dtype=float)
combs = it.combinations(range(t), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_anderson(a, val_col=None, group_col=None, midrank=True, sort=False, p_adjust=None):
'''Anderson-Darling Pairwise Test for k-samples. Tests the null hypothesis
that k-samples are drawn from the same population without having to specify
the distribution function of that population [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
midrank : bool, optional
Type of Anderson-Darling test which is computed. If set to True (default), the
midrank test applicable to continuous and discrete populations is performed. If
False, the right side empirical distribution is used.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values. See statsmodels.sandbox.stats.multicomp for details. Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
References
----------
.. [1] F.W. Scholz, M.A. Stephens (1987), K-Sample Anderson-Darling Tests,
Journal of the American Statistical Association, Vol. 82, pp. 918-924.
Examples
--------
>>> x = np.array([[2.9, 3.0, 2.5, 2.6, 3.2], [3.8, 2.7, 4.0, 2.4], [2.8, 3.4, 3.7, 2.2, 2.0]])
>>> sp.posthoc_anderson(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col], ascending=True) if sort else x
groups = x[_group_col].unique()
k = groups.size
vs = np.zeros((k, k), dtype=float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
for i, j in combs:
vs[i, j] = ss.anderson_ksamp([x.loc[x[_group_col] == groups[i], _val_col], x.loc[x[_group_col] == groups[j], _val_col]], midrank=midrank)[2]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_quade(a, y_col=None, block_col=None, group_col=None, dist='t', melted=False, sort=False, p_adjust=None):
'''Calculate pairwise comparisons using Quade's post hoc test for
unreplicated blocked data. This test is usually conducted if significant
results were obtained by the omnibus test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
If `melted` is set to False (default), `a` is a typical matrix of
block design, i.e. rows are blocks, and columns are groups. In this
case you do not need to specify col arguments.
If `a` is an array and `melted` is set to True,
y_col, block_col and group_col must specify the indices of columns
containing elements of correspondary type.
If `a` is a Pandas DataFrame and `melted` is set to True,
y_col, block_col and group_col must specify columns names (string).
y_col : str or int, optional
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains y data.
block_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains blocking factor values.
group_col : str or int
Must be specified if `a` is a pandas DataFrame object.
Name of the column that contains treatment (group) factor values.
dist : str, optional
Method for determining p values.
The default distribution is "t", else "normal".
melted : bool, optional
Specifies if data are given as melted columns "y", "blocks", and
"groups".
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
Pandas DataFrame containing p values.
References
----------
.. [1] W. J. Conover (1999), Practical nonparametric Statistics, 3rd. Edition,
Wiley.
.. [2] N. A. Heckert and J. J. Filliben (2003). NIST Handbook 148: Dataplot
Reference Manual, Volume 2: Let Subcommands and Library Functions.
National Institute of Standards and Technology Handbook Series, June 2003.
.. [3] D. Quade (1979), Using weighted rankings in the analysis of complete
blocks with additive block effects. Journal of the American Statistical
Association, 74, 680-683.
Examples
--------
>>> x = np.array([[31,27,24],[31,28,31],[45,29,46],[21,18,48],[42,36,46],[32,17,40]])
>>> sp.posthoc_quade(x)
'''
def compare_stats_t(i, j):
dif = np.abs(S[groups[i]] - S[groups[j]])
tval = dif / denom
pval = 2. * ss.t.sf(np.abs(tval), df=(b - 1) * (k - 1))
return pval
def compare_stats_norm(i, j):
dif = np.abs(W[groups[i]] * ff - W[groups[j]] * ff)
zval = dif / denom
pval = 2. * ss.norm.sf(np.abs(zval))
return pval
x, y_col, group_col, block_col = __convert_to_block_df(a, y_col, group_col, block_col, melted)
x = x.sort_values(by=[block_col, group_col], ascending=True) if sort else x
x.dropna(inplace=True)
groups = x[group_col].unique()
k = len(groups)
b = x[block_col].unique().size
x['r'] = x.groupby(block_col)[y_col].rank()
q = (x.groupby(block_col)[y_col].max() - x.groupby(block_col)[y_col].min()).rank()
x['rr'] = x['r'] - (k + 1)/2
x['s'] = x.apply(lambda row: row['rr'] * q[row[block_col]], axis=1)
x['w'] = x.apply(lambda row: row['r'] * q[row[block_col]], axis=1)
A = (x['s'] ** 2).sum()
S = x.groupby(group_col)['s'].sum()
B = np.sum(S ** 2) / b
W = x.groupby(group_col)['w'].sum()
vs = np.zeros((k, k), dtype=float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
if dist == 't':
denom = np.sqrt((2 * b * (A - B)) / ((b - 1) * (k - 1)))
for i, j in combs:
vs[i, j] = compare_stats_t(i, j)
else:
n = b * k
denom = np.sqrt((k * (k + 1.) * (2. * n + 1.) * (k-1.)) / (18. * n * (n + 1.)))
ff = 1. / (b * (b + 1.)/2.)
for i, j in combs:
vs[i, j] = compare_stats_norm(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_vanwaerden(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Van der Waerden's test for pairwise multiple comparisons between group
levels. See references for additional information [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
For one-factorial designs with samples that do not meet the assumptions for
one-way-ANOVA and subsequent post hoc tests, the van der Waerden test using
normal scores can be employed. Provided that significant differences were
detected by this global test, one may be interested in applying post hoc
tests according to van der Waerden for pairwise multiple comparisons of the
group levels.
There is no tie correction applied in this function.
References
----------
.. [1] W. J. Conover and R. L. Iman (1979), On multiple-comparisons procedures,
Tech. Rep. LA-7677-MS, Los Alamos Scientific Laboratory.
.. [2] B. L. van der Waerden (1952) Order tests for the two-sample problem and
their power, Indagationes Mathematicae, 14, 453-458.
Examples
--------
>>> x = np.array([[10,'a'], [59,'a'], [76,'b'], [10, 'b']])
>>> sp.posthoc_vanwaerden(x, val_col = 0, group_col = 1)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col], ascending=True) if sort else x
groups = x[_group_col].unique()
n = x[_val_col].size
k = groups.size
r = ss.rankdata(x[_val_col])
x['z_scores'] = ss.norm.ppf(r / (n + 1))
aj = x.groupby(_group_col)['z_scores'].sum()
nj = x.groupby(_group_col)['z_scores'].count()
s2 = (1. / (n - 1.)) * (x['z_scores'] ** 2.).sum()
sts = (1. / s2) * np.sum(aj ** 2. / nj)
A = aj / nj
vs = np.zeros((k, k), dtype=float)
combs = it.combinations(range(k), 2)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
def compare_stats(i, j):
dif = np.abs(A[groups[i]] - A[groups[j]])
B = 1. / nj[groups[i]] + 1. / nj[groups[j]]
tval = dif / np.sqrt(s2 * (n - 1. - sts)/(n - k) * B)
pval = 2. * ss.t.sf(np.abs(tval), df=n - k)
return pval
for i, j in combs:
vs[i, j] = compare_stats(i, j)
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_ttest(a, val_col=None, group_col=None, pool_sd=False, equal_var=True, p_adjust=None, sort=False):
'''Pairwise T test for multiple comparisons of independent groups. May be
used after a parametric ANOVA to do pairwise comparisons.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
equal_var : bool, optional
If True (default), perform a standard independent test
that assumes equal population variances [1]_.
If False, perform Welch's t-test, which does not assume equal
population variance [2]_.
pool_sd : bool, optional
Calculate a common SD for all groups and use that for all
comparisons (this can be useful if some groups are small).
This method does not actually call scipy ttest_ind() function,
so extra arguments are ignored. Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
Numpy ndarray or pandas DataFrame of p values depending on input
data type.
References
----------
.. [1] http://en.wikipedia.org/wiki/T-test#Independent_two-sample_t-test
.. [2] http://en.wikipedia.org/wiki/Welch%27s_t_test
Examples
--------
>>> x = [[1,2,3,5,1], [12,31,54, np.nan], [10,12,6,74,11]]
>>> sp.posthoc_ttest(x, p_adjust = 'holm')
array([[-1. , 0.04600899, 0.31269089],
[ 0.04600899, -1. , 0.6327077 ],
[ 0.31269089, 0.6327077 , -1. ]])
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col], ascending=True) if sort else x
groups = x[_group_col].unique()
k = groups.size
xg = x.groupby(by=_group_col)[_val_col]
vs = np.zeros((k, k), dtype=float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
combs = it.combinations(range(k), 2)
if pool_sd:
ni = xg.count()
m = xg.mean()
sd = xg.std(ddof=1)
deg_f = ni - 1.
total_deg_f = np.sum(deg_f)
pooled_sd = np.sqrt(np.sum(sd ** 2. * deg_f) / total_deg_f)
def compare_pooled(i, j):
diff = m.iloc[i] - m.iloc[j]
se_diff = pooled_sd * np.sqrt(1. / ni.iloc[i] + 1. / ni.iloc[j])
t_value = diff / se_diff
return 2. * ss.t.cdf(-np.abs(t_value), total_deg_f)
for i, j in combs:
vs[i, j] = compare_pooled(i, j)
else:
for i, j in combs:
vs[i, j] = ss.ttest_ind(xg.get_group(groups[i]), xg.get_group(groups[j]), equal_var=equal_var)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_tukey_hsd(x, g, alpha=0.05):
'''Pairwise comparisons with TukeyHSD confidence intervals. This is a
convenience function to make statsmodels `pairwise_tukeyhsd` method more
applicable for further use.
Parameters
----------
x : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing dependent
variable values (test or response variable). Values should have a
non-nominal scale. NaN values will cause an error (please handle
manually).
g : array_like or pandas Series object, 1d
An array, any object exposing the array interface, containing
independent variable values (grouping or predictor variable). Values
should have a nominal scale (categorical).
alpha : float, optional
Significance level for the test. Default is 0.05.
Returns
-------
result : pandas DataFrame
DataFrame with 0, 1, and -1 values, where 0 is False (not significant),
1 is True (significant), and -1 is for diagonal elements.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> g = [['a'] * 5, ['b'] * 5, ['c'] * 5]
>>> sp.posthoc_tukey_hsd(np.concatenate(x), np.concatenate(g))
'''
result = pairwise_tukeyhsd(x, g, alpha=0.05)
groups = np.array(result.groupsunique, dtype=str)
groups_len = len(groups)
vs = np.zeros((groups_len, groups_len), dtype=int)
for a in result.summary()[1:]:
a0 = str(a[0])
a1 = str(a[1])
a0i = np.where(groups == a0)[0][0]
a1i = np.where(groups == a1)[0][0]
vs[a0i, a1i] = 1 if str(a[-1]) == 'True' else 0
vsu = np.triu(vs)
np.fill_diagonal(vsu, 1)
tri_lower = np.tril_indices(vsu.shape[0], -1)
vsu[tri_lower] = np.transpose(vsu)[tri_lower]
return DataFrame(vsu, index=groups, columns=groups)
def posthoc_mannwhitney(a, val_col=None, group_col=None, use_continuity=True, alternative='two-sided', p_adjust=None, sort=True):
'''Pairwise comparisons with Mann-Whitney rank test.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
use_continuity : bool, optional
Whether a continuity correction (1/2.) should be taken into account.
Default is True.
alternative : ['two-sided', 'less', or 'greater'], optional
Whether to get the p-value for the one-sided hypothesis
('less' or 'greater') or for the two-sided hypothesis ('two-sided').
Defaults to 'two-sided'.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col or not. Recommended
unless you sort your data manually.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.mannwhitneyu` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_mannwhitney(x, p_adjust = 'holm')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col, _val_col], ascending=True) if sort else x
groups = x[_group_col].unique()
x_len = groups.size
vs = np.zeros((x_len, x_len))
xg = x.groupby(_group_col)[_val_col]
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
combs = it.combinations(range(x_len), 2)
for i, j in combs:
vs[i, j] = ss.mannwhitneyu(
xg.get_group(groups[i]),
xg.get_group(groups[j]),
use_continuity=use_continuity,
alternative=alternative)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_wilcoxon(a, val_col=None, group_col=None, zero_method='wilcox', correction=False, p_adjust=None, sort=False):
'''Pairwise comparisons with Wilcoxon signed-rank test. It is a non-parametric
version of the paired T-test for use with non-parametric ANOVA.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame. Array must be two-dimensional.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
zero_method : string, {"pratt", "wilcox", "zsplit"}, optional
"pratt": Pratt treatment, includes zero-differences in the ranking
process (more conservative)
"wilcox": Wilcox treatment, discards all zero-differences
"zsplit": Zero rank split, just like Pratt, but spliting the zero rank
between positive and negative ones
correction : bool, optional
If True, apply continuity correction by adjusting the Wilcoxon rank
statistic by 0.5 towards the mean value when computing the z-statistic.
Default is False.
p_adjust : str, optional
Method for adjusting p values.
See statsmodels.sandbox.stats.multicomp for details.
Available methods are:
'bonferroni' : one-step correction
'sidak' : one-step correction
'holm-sidak' : step-down method using Sidak adjustments
'holm' : step-down method using Bonferroni adjustments
'simes-hochberg' : step-up method (independent)
'hommel' : closed method based on Simes tests (non-negative)
'fdr_bh' : Benjamini/Hochberg (non-negative)
'fdr_by' : Benjamini/Yekutieli (negative)
'fdr_tsbh' : two stage fdr correction (non-negative)
'fdr_tsbky' : two stage fdr correction (non-negative)
sort : bool, optional
Specifies whether to sort DataFrame by group_col and val_col or not.
Default is False.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
Refer to `scipy.stats.wilcoxon` reference page for further details.
Examples
--------
>>> x = [[1,2,3,4,5], [35,31,75,40,21], [10,6,9,6,1]]
>>> sp.posthoc_wilcoxon(x)
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col, _val_col], ascending=True) if sort else x
groups = x[_group_col].unique()
x_len = groups.size
vs = np.zeros((x_len, x_len))
xg = x.groupby(_group_col)[_val_col]
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
combs = it.combinations(range(x_len), 2)
for i, j in combs:
vs[i, j] = ss.wilcoxon(
xg.get_group(groups[i]),
xg.get_group(groups[j]),
zero_method=zero_method,
correction=correction)[1]
if p_adjust:
vs[tri_upper] = multipletests(vs[tri_upper], method=p_adjust)[1]
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_scheffe(a, val_col=None, group_col=None, sort=False, p_adjust=None):
'''Scheffe's all-pairs comparisons test for normally distributed data with equal
group variances. For all-pairs comparisons in an one-factorial layout with
normally distributed residuals and equal variances Scheffe's test can be
performed with parametric ANOVA [1]_, [2]_, [3]_.
A total of m = k(k-1)/2 hypotheses can be tested.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the F-distribution.
References
----------
.. [1] J. Bortz (1993) Statistik für Sozialwissenschaftler. 4. Aufl., Berlin:
Springer.
.. [2] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [3] H. Scheffe (1953) A Method for Judging all Contrasts in the Analysis
of Variance. Biometrika 40, 87-110.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_scheffe(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col], ascending=True) if sort else x
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
xi = x_grouped.mean()
si = x_grouped.var()
n = ni.sum()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1.))
def compare(i, j):
dif = xi.loc[i] - xi.loc[j]
A = sin * (1. / ni.loc[i] + 1. / ni.loc[j]) * (groups.size - 1.)
f_val = dif ** 2. / A
return f_val
vs = np.zeros((groups.size, groups.size), dtype=float)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
combs = it.combinations(range(groups.size), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_lower] = np.transpose(vs)[tri_lower]
p_values = ss.f.sf(vs, groups.size - 1., n - groups.size)
np.fill_diagonal(p_values, 1)
return DataFrame(p_values, index=groups, columns=groups)
def posthoc_tamhane(a, val_col=None, group_col=None, welch=True, sort=False):
'''Tamhane's T2 all-pairs comparison test for normally distributed data with
unequal variances. Tamhane's T2 test can be performed for all-pairs
comparisons in an one-factorial layout with normally distributed residuals
but unequal groups variances. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against the
alternative hypothesis [1]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
welch : bool, optional
If True, use Welch's approximate solution for calculating the degree of
freedom. T2 test uses the usual df = N - 2 approximation.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the t-distribution and adjusted according to
Dunn-Sidak.
References
----------
.. [1] A.C. Tamhane (1979), A Comparison of Procedures for Multiple Comparisons of
Means with Unequal Variances. Journal of the American Statistical Association,
74, 471-480.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tamhane(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col], ascending=True) if sort else x
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
xi = x_grouped.mean()
si = x_grouped.var()
def compare(i, j):
dif = xi[i] - xi[j]
A = si[i] / ni[i] + si[j] / ni[j]
t_val = dif / np.sqrt(A)
if welch:
df = A ** 2. / (si[i] ** 2. / (ni[i] ** 2. * (ni[i] - 1.)) + si[j] ** 2. / (ni[j] ** 2. * (ni[j] - 1.)))
else:
# checks according to Tamhane (1979, p. 474)
ok1 = (9./10. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 10./9.)
ok2 = (9./10. <= (si[i] / ni[i]) / (si[j] / ni[j])) and\
((si[i] / ni[i]) / (si[j] / ni[j]) <= 10./9.)
ok3 = (4./5. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 5./4.) and\
(1./2. <= (si[i] / ni[i]) / (si[j] / ni[j])) and\
((si[i] / ni[i]) / (si[j] / ni[j]) <= 2.)
ok4 = (2./3. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 3./2.) and\
(3./4. <= (si[i] / ni[i]) / (si[j] / ni[j]))\
and ((si[i] / ni[i]) / (si[j] / ni[j]) <= 4./3.)
OK = any([ok1, ok2, ok3, ok4])
if not OK:
print("Sample sizes or standard errors are not balanced. T2 test is recommended.")
df = ni[i] + ni[j] - 2.
p_val = 2. * ss.t.sf(np.abs(t_val), df=df)
return p_val
vs = np.zeros((groups.size, groups.size), dtype=float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
combs = it.combinations(range(groups.size), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = 1. - (1. - vs[tri_upper]) ** groups.size
vs[tri_lower] = np.transpose(vs)[tri_lower]
vs[vs > 1] = 1
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_tukey(a, val_col: str = None,
group_col: str = None, sort: bool = False) -> DataFrame:
'''Performs Tukey's all-pairs comparisons test for normally distributed data
with equal group variances. For all-pairs comparisons in an
one-factorial layout with normally distributed residuals and equal variances
Tukey's test can be performed. A total of m = k(k-1)/2 hypotheses can be
tested. The null hypothesis is tested in the two-tailed test against
the alternative hypothesis [1]_, [2]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
result : pandas DataFrame
P values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] L. Sachs (1997) Angewandte Statistik, New York: Springer.
.. [2] J. Tukey (1949) Comparing Individual Means in the Analysis of Variance,
Biometrics 5, 99-114.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_tukey(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col], ascending=True) if sort else x
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
ni = x_grouped.count()
n = ni.sum()
xi = x_grouped.mean()
si = x_grouped.var()
sin = 1. / (n - groups.size) * np.sum(si * (ni - 1))
def compare(i, j):
dif = xi[i] - xi[j]
A = sin * 0.5 * (1. / ni.loc[i] + 1. / ni.loc[j])
q_val = dif / np.sqrt(A)
return q_val
vs = np.zeros((groups.size, groups.size), dtype=float)
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:, :] = 0
combs = it.combinations(range(groups.size), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), groups.size, n - groups.size)
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
def posthoc_dscf(a, val_col=None, group_col=None, sort=False):
'''Dwass, Steel, Critchlow and Fligner all-pairs comparison test for a
one-factorial layout with non-normally distributed residuals. As opposed to
the all-pairs comparison procedures that depend on Kruskal ranks, the DSCF
test is basically an extension of the U-test as re-ranking is conducted for
each pairwise test [1]_, [2]_, [3]_.
Parameters
----------
a : array_like or pandas DataFrame object
An array, any object exposing the array interface or a pandas
DataFrame.
val_col : str, optional
Name of a DataFrame column that contains dependent variable values (test
or response variable). Values should have a non-nominal scale. Must be
specified if `a` is a pandas DataFrame object.
group_col : str, optional
Name of a DataFrame column that contains independent variable values
(grouping or predictor variable). Values should have a nominal scale
(categorical). Must be specified if `a` is a pandas DataFrame object.
sort : bool, optional
If True, sort data by block and group columns.
Returns
-------
Pandas DataFrame containing p values.
Notes
-----
The p values are computed from the Tukey-distribution.
References
----------
.. [1] Douglas, C. E., Fligner, A. M. (1991) On distribution-free multiple
comparisons in the one-way analysis of variance, Communications in
Statistics - Theory and Methods, 20, 127-139.
.. [2] Dwass, M. (1960) Some k-sample rank-order tests. In Contributions to
Probability and Statistics, Edited by: I. Olkin, Stanford: Stanford
University Press.
.. [3] Steel, R. G. D. (1960) A rank sum test for comparing all pairs of
treatments, Technometrics, 2, 197-207.
Examples
--------
>>> import scikit_posthocs as sp
>>> import pandas as pd
>>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]})
>>> x = x.melt(var_name='groups', value_name='values')
>>> sp.posthoc_dscf(x, val_col='values', group_col='groups')
'''
x, _val_col, _group_col = __convert_to_df(a, val_col, group_col)
x = x.sort_values(by=[_group_col], ascending=True) if sort else x
groups = x[_group_col].unique()
x_grouped = x.groupby(_group_col)[_val_col]
n = x_grouped.count()
k = groups.size
def get_ties(x):
t = x.value_counts().values
c = np.sum((t ** 3 - t) / 12.)
return c
def compare(i, j):
ni = n.loc[i]
nj = n.loc[j]
x_raw = x.loc[(x[_group_col] == i) | (x[_group_col] == j)].copy()
x_raw['ranks'] = x_raw.loc[:, _val_col].rank()
r = x_raw.groupby(_group_col)['ranks'].sum().loc[[j, i]]
u = np.array([nj * ni + (nj * (nj + 1) / 2),
nj * ni + (ni * (ni + 1) / 2)]) - r
u_min = np.min(u)
s = ni + nj
var = (nj*ni/(s*(s - 1.))) * ((s**3 - s)/12. - get_ties(x_raw['ranks']))
p = np.sqrt(2.) * (u_min - nj * ni / 2.) / np.sqrt(var)
return p
vs = np.zeros((k, k))
tri_upper = np.triu_indices(vs.shape[0], 1)
tri_lower = np.tril_indices(vs.shape[0], -1)
vs[:,:] = 0
combs = it.combinations(range(k), 2)
for i, j in combs:
vs[i, j] = compare(groups[i], groups[j])
vs[tri_upper] = psturng(np.abs(vs[tri_upper]), k, np.inf)
vs[tri_lower] = np.transpose(vs)[tri_lower]
np.fill_diagonal(vs, 1)
return DataFrame(vs, index=groups, columns=groups)
| gpl-3.0 | -6,722,613,776,829,086,000 | 35.764198 | 148 | 0.610618 | false | 3.315089 | true | false | false |
Canule/SwitchMouseSensitivity | Source/SwitchMouseSensitivity.py | 1 | 4196 | #v1.0.1
import win32gui, win32con, sys, os
import dataIO
sp_json = "setpoints.json"
sens = 0
def set_spi(spi, value):
value = int(value)
try:
cset=getattr(win32con,spi)
set = win32gui.SystemParametersInfo(cset, Param=value, WinIni=1)
return True
except WindowsError:
return False
def get_spi(spi):
try:
cget=getattr(win32con,spi)
value=win32gui.SystemParametersInfo(cget)
return value
except WindowsError:
return None
def checkValue(sens):
if sens.isdigit():
if int(sens) in range(1,21):
return True
else:
print ('setpoint out of range, 1...20')
return False
else:
print('Numbers only, 1...20')
return False
def writeSense(sens):
if set_spi('SPI_SETMOUSESPEED', str(sens)) == True: #MouseSensitivity = 1/20
print ('\nAdjusting sensitivity to {} went successful.'.format(sens))
else:
print ('Something went wrong while writing.')
if __name__ == '__main__':
#Appdata/Roaming
global ROAMING
setpoints = {}
APP_DIRNAME = 'SwitchMouseSensitivity'
if not os.path.exists(os.path.join(os.environ['APPDATA'],APP_DIRNAME)):
try:
os.mkdir(os.path.join(os.environ['APPDATA'],APP_DIRNAME))
ROAMING = os.path.join(os.environ['APPDATA'],APP_DIRNAME)
except:
print('Error creating settings folder')
ROAMING = os.environ['APPDATA']+'\\'+APP_DIRNAME+'\\'
dataIO.loadAndCheckSettings(ROAMING, True)
setpoints = dataIO.fileIO(ROAMING+sp_json, "load")
#Get current sensitivity and save to json
setpoints["curReg"] = setpoints["curReg"] = get_spi('SPI_GETMOUSESPEED')
dataIO.fileIO(ROAMING+sp_json, "save", setpoints)
print ('Current mouse sensitivity : '+str(setpoints["curReg"]))
#Check for run parameter
if len(sys.argv) > 1: #arguments is given
sens = sys.argv[1]
if checkValue(sens) == True:
print ('Changing the sensitivity by run parameter to: '+sens)
writeSense(sens)
os.system('pause')
exit(1)
#Read previous setpoint
if setpoints["lastSp"] == "sp2":
sens = setpoints["sp1"]
elif setpoints["lastSp"] == "sp1":
sens = setpoints["sp2"]
#Confirm switch setpoint
if sens is not 0:
print ('The setpoint before the previous one was {}, apply this value? y/n: '.format(sens), end='')
confirmInput = input()
else:
confirmInput = 'n'
if confirmInput == 'y' or confirmInput == '':
if setpoints["lastSp"] == "sp2":
sens = setpoints["sp1"]
setpoints["lastSp"] = setpoints["lastSp"] ='sp1'
dataIO.fileIO(ROAMING+sp_json, "save", setpoints)
elif setpoints["lastSp"] == "sp1":
sens = setpoints["sp2"]
setpoints["lastSp"] = setpoints["lastSp"] ='sp2'
dataIO.fileIO(ROAMING+sp_json, "save", setpoints)
elif confirmInput == 'n':
sens = input("Enter the preferred sensitivity : ")
if checkValue(sens) == True:
if setpoints["lastSp"] == "sp2" and setpoints["sp2"] is not '0':
setpoints["sp1"] = setpoints["sp1"] = sens
sens = setpoints["sp1"]
setpoints["lastSp"] = setpoints["lastSp"] ='sp1'
dataIO.fileIO(ROAMING+sp_json, "save", setpoints)
print('Setpoint {} is now registered'.format(sens))
elif setpoints["lastSp"] == "sp1" and setpoints["sp1"] is not '0':
setpoints["sp2"] = setpoints["sp2"] = sens
sens = setpoints["sp2"]
setpoints["lastSp"] = setpoints["lastSp"] ='sp2'
dataIO.fileIO(ROAMING+sp_json, "save", setpoints)
print('Setpoint {} is now registered'.format(sens))
else:#should be displayed only the first two sp changes
print('Setpoint {} is now registered'.format(sens))
writeSense(sens)
dataIO.fileIO(ROAMING+sp_json, "save", setpoints)
os.system('pause')
| mit | -4,711,427,286,756,214,000 | 35.807018 | 107 | 0.572927 | false | 3.546915 | false | false | false |
thejumono/ResumeGen | generate.py | 1 | 5082 | import json, os, sys, shutil
toVerifiy = ["config.json", "src/", "src/imgs/", "src/section.html", "src/style.css", "src/template.html"]
print("Verifying everything is good...\n")
# Making sure that every file needed is here
for v in toVerifiy:
if os.path.exists(v):
print("File " + v + " found!")
else:
print("File " + v + " is missing!")
print("Please refer to https://github.com/thejumono/ResumeGen for more information.")
sys.exit(1)
# Removing an old build directory if there's one.
if os.path.exists("build/"):
print("\nRemoving the last build.")
shutil.rmtree("build/")
print("Creating temporary folder...")
try:
os.mkdir("build")
print("Created successfully!")
except:
print("Couldn't create temporary folder!")
print("Check the folder permission...")
sys.exit(1)
print("\nGenerating your resume page...\n")
json_file = open("config.json", "r")
parsed_json = json.loads(json_file.read())
json_file.close()
template_html = open("src/template.html", "r").read()
# Basically generate the whole HTML file.
try:
print("\tRenaming some lines...")
template_html = template_html.replace("{title}", parsed_json["title"])
template_html = template_html.replace("{name}", parsed_json["name"])
section_container = "";
sections = 0;
print("\tCreating the sections:\n")
for p in parsed_json["projects"]:
print("\t\tGenerating the " + p + " section!")
section_file = open("src/section.html", "r")
section_template = section_file.read()
section_file.close()
print("\t\tSetting the section name.")
section_template = section_template.replace("{section-name}", p)
print("\t\tAdding some color to it.")
project_data = parsed_json["projects"][p]
section_template = section_template.replace("{section-background-color}", project_data["color"])
if sections % 2 == 1:
section_template = section_template.replace("{class}", 'class="reversed"')
else:
section_template = section_template.replace("{class}", '')
print("\t\tGenerating some text and links.")
section_about = "";
for a in project_data["about"]:
section_about += "<p>" + a + "</p>\n\t\t\t\t\t"
for l in project_data["links"]:
section_about = section_about.replace("<" + l + ">", '<a href="' + project_data["links"][l] + '">' + l + "</a>")
print("\t\tAdding the text and links.")
section_template = section_template.replace("{section-text}", section_about)
print("\t\tSetting the picture...")
if os.path.exists("src/imgs/" + project_data["filename"]):
print("\t\tThe " + project_data["filename"] + "file was found, setting it.")
else:
print("\t\tThe file " + project_data["filename"] + " wasn't found!")
print("\t\tThis won't cause any errors, but you should fix it manually.")
section_template = section_template.replace("{section-image-name}", project_data["filename"])
print("\t\tThe section " + p + " was generated succesfully!\n")
section_container += section_template + "\n"
sections += 1
print("\tAdding the sections to the template!")
template_html = template_html.replace("{section-container}", section_container)
print("\tEditing the \"about me\" section.\n")
about_info = parsed_json["about"]
print("\t\tAdding a title.")
template_html = template_html.replace("{about-me-title}", about_info["title"])
print("\t\tAdding some text.")
about_text = ""
for t in about_info["text"]:
about_text += "<p>" + t + "</p>\n\t\t\t\t\t"
print("\t\tAdding the contact links.")
contact_links = ""
for c in about_info["contact"]:
contact_links += "<p><a href=\"" + about_info["contact"][c]["link"] + "\">" + about_info["contact"][c]["account"] + "</a> (" + c + ")</p>\n\t\t\t\t\t"
print("\t\tAdding the text and links to the template.\n")
template_html = template_html.replace("{about-me-text}", about_text)
template_html = template_html.replace("{contact-links}", contact_links)
except:
print("Something went horribly wrong!")
print("Make sure that your config.json is correct.")
sys.exit(1)
print("\tHTML Generation is done!\n")
print("Exporting everything to the build folder.")
try:
index_html = open("build/index.html", "w+")
index_html.write(template_html)
except:
print("An error happened when writing the index.html file!")
print("Check if you have the correct permissions.")
sys.exit(1)
print("index.html generated...")
print("copying style.css")
try:
shutil.copy2("src/style.css", "build/")
except:
print("An error happened when copying the style.css file!")
print("Check if you have the correct permissions.")
sys.exit(1)
print("style.css copied!")
print("Copying images...")
try:
os.mkdir("build/imgs")
for img in os.listdir("src/imgs/"):
shutil.copy2("src/imgs/" + img, "build/imgs/")
except:
print("An error happened when copying images!")
print("Check if you have the correct permissions.")
sys.exit(1)
print("Images copied!\n")
print("Your new resume has been exported at " + os.getcwd() + "/build/\n")
print("Simply copy its content to your web server and it should work without any problem.")
print("Thank you for using ResumeGen!")
print("By @BestGirlZoey") | mit | 8,455,513,281,173,522,000 | 30.571429 | 152 | 0.671192 | false | 3.168329 | false | false | false |
COSMOGRAIL/PyCS | pycs/gen/util.py | 1 | 11221 | """
Various useful stuff.
For now there are some wrappers to pickle objects.
"""
import sys
import os
import glob
import cPickle as pickle
import numpy as np
import math, datetime
import pycs.gen.lc
tracei = 1 # global variable, filename to write trace pkl.
def writepickle(obj, filepath, verbose=True, protocol = -1):
"""
I write your python object obj into a pickle file at filepath.
If filepath ends with .gz, I'll use gzip to compress the pickle.
Leave protocol = -1 : I'll use the latest binary protocol of pickle.
"""
if os.path.splitext(filepath)[1] == ".gz":
pkl_file = gzip.open(filepath, 'wb')
else:
pkl_file = open(filepath, 'wb')
pickle.dump(obj, pkl_file, protocol)
pkl_file.close()
if verbose: print "Wrote %s" % filepath
def readpickle(filepath, verbose=True):
"""
I read a pickle file and return whatever object it contains.
If the filepath ends with .gz, I'll unzip the pickle file.
"""
if os.path.splitext(filepath)[1] == ".gz":
pkl_file = gzip.open(filepath,'rb')
else:
pkl_file = open(filepath, 'rb')
obj = pickle.load(pkl_file)
pkl_file.close()
if verbose: print "Read %s" % filepath
return obj
def oldwritepickle(obj, filepath):
"""
DO NOT USE ME ANYMORE
Simplistic wrapper around pickle, to writes an object into a file, using cpickle.
@type obj: object
@param obj: e.g. a dict of lightcurves
@type filepath: string
@param filepath: filename or path to write
"""
output = open(filepath, 'wb')
pickle.dump(obj, output)
output.close()
print "Wrote %s" % filepath
def oldreadpickle(filepath):
"""
DO NOT USE ME ANYMORE
Reads a pickle and returns it.
@type filepath: string
@param filepath: filename or path to read
@rtype: object
@return: whatever was in that pickle
"""
pkl_file = open(filepath, 'rb')
obj = pickle.load(pkl_file)
pkl_file.close()
print "Read %s" % filepath
return obj
def readidlist(filepath, verbose=True):
"""
Reads a textfile with "one point per line", probably a skiplists.
Accepts blank lines, and lines starting with # will not be read.
Format of the lines is : id [comment]
If this is a skiplist, id is a MJD.
"""
if not os.path.exists(filepath):
raise RuntimeError("File does not exist : %s" % (filepath))
myfile = open(filepath, "r")
lines = myfile.readlines()
myfile.close
table=[]
for line in lines:
if line[0] == '#' or len(line) < 4:
continue
if len(line.split()) > 1:
id = line.split()[0]
comment = line.split(None, 1)[1:][0].rstrip('\n')
else:
id = line.split()[0]
comment = ""
table.append([id, comment])
if verbose:
print "I've read %i lines from %s" % (len(table), os.path.basename(filepath))
return table
def trace(lclist=[], splist=[], tracedir = "trace", level="Full"):
"""
Function to save a "trace" of processes modifying lightcurves and splines, like optimizers do.
Just call this from inside your loop etc, I will save your current lightcurves and splines into a pickle inside the tracedir.
I increment the filenames.
The argument "level" is about what should be saved.
level = "Full" : Everything you give me is saved in the pickle. Now this is large ...
level = "Light" : I try to reduce filesize of the pickle, by removing the splines datapoints etc. You can still plot these objects.
"""
if not os.path.exists(tracedir):
os.mkdir(tracedir)
global tracei
filepath = os.path.join(tracedir, "%06i.pkl" % (tracei))
if os.path.exists(filepath):
raise RuntimeError("Sorry, I don't want to overwrite the existing trace inside '%s'." % (tracedir))
now = datetime.datetime.now()
writepickle({"lclist":lclist, "splist":splist, "datetime":now}, filepath, verbose=True, protocol = -1)
tracei += 1
def plottrace(tracedir = "trace", reset=False, showspl=True, **kwargs):
"""
Turns a trace into plots ...
reset = True : I will remove all shifts/ML etc, just show the real "observations".
kwargs are passed to the display function.
"""
tracepkls = glob.glob(os.path.join(tracedir, "??????.pkl"))
def plot(tracepkl):
pkl = readpickle(tracepkl, verbose=True)
if reset:
for l in pkl["lclist"]:
l.timeshift = 0.0
l.magshift = 0.0
l.fluxshift = 0.0
l.ml = None
if not showspl:
pkl["splist"] = []
#pycs.gen.lc.display(pkl["lclist"], pkl["splist"], title = pkl["datetime"], filename=tracepkl+".png", **kwargs)
# hmm, ugly datetime ...
shiftstxt = "(%s)" % "/".join(["%+.1f" % (getattr(l, "truetimeshift", 0.0)) for l in pkl["lclist"]])
titletxt = "%s %s %s" % (tracedir, "", shiftstxt)
pycs.gen.lc.display(pkl["lclist"], pkl["splist"], title = titletxt, filename=tracepkl+".png", **kwargs)
map(plot, tracepkls)
"""
if ncpu == 1:
map(plot, tracepkls)
else:
if ncpu == None:
ncpu = multiprocessing.cpu_count()
print "I will use %i CPUs." % (ncpu)
pool = multiprocessing.Pool(processes=ncpu)
answers = pool.map(plot, tracepkls)
"""
def multilcsexport(lclist, filepath, separator="\t", rdbunderline=True, verbose=True, properties=None):
"""
Writes the lightcurves as flat acscii files into one single file.
Normally you should prefer writing each lightcurve into a single file, using
:py:meth:`pycs.gen.lc.lightcurve.rdbexport`.
Note that only lightcurves of same length and sampling can be written with this function !
:param lclist: A list of lightcurve objects to write
:type lclist: list
:param filepath: where to write
:type filepath: string
:param separator: how to separate the collumns
:type separator: string
:param rdbunderline: do you want the "=====" underlining ?
:type rdbunderline: boolean
:param properties: properties of the lightcurves to include in the file.
:type properties: list of strings
.. todo:: We might need here an extra argument that specifies how to format the properties.
"""
import csv
# We start with a few tests to see if it is possible to write these lcs into a single file ...
commonjds = lclist[0].getjds()
lenlc = len(lclist[0])
for thislc in lclist:
thislc.validate() # Good idea to keep this here, as the code below is so ugly ...
if len(thislc) != lenlc:
print "First lightcurve has %i points" % len(commonjds)
raise RuntimeError, "Lightcurve %s has not the same length !" % str(thislc)
if not np.allclose(thislc.getjds(), commonjds, rtol=0.0, atol=1e-5):
raise RuntimeError, "Lightcurve %s has different epochs !" % str(thislc)
# Now we check the properties. At least a minimal check : they should be available for all the
# lightcurves.
if properties == None:
properties = []
for property in properties:
for l in lclist:
if not property in l.commonproperties():
raise RuntimeError, "Lightcurve %s has no property %s" % (l, property)
# We also have to check that all those properties are identical for all lcs !
firstprops = [p[property] for p in lclist[0].properties]
for l in lclist:
if not firstprops == [p[property] for p in l.properties]:
raise RuntimeError("Properties not identical !")
# Ok, now we prepare the data to write into that file.
colnames = []
data = []
colnames.append("mhjd")
data.append(["%.5f" % commonjd for commonjd in commonjds])
for thislc in lclist:
print str(thislc)
colnames.append("mag_" + thislc.object)
#data.append(["%09.5f" % mag for mag in thislc.getmags()])
data.append(["%.5f" % mag for mag in thislc.getmags()])
colnames.append("magerr_" + thislc.object)
data.append(["%.5f" % magerr for magerr in thislc.magerrs])
# And now the properties
for property in properties:
values = [p[property] for p in lclist[0].properties]
colnames.append(property)
data.append(values)
# We put all this together :
datatransposed = zip(*data) # Yep !
rdbunderlines = ["="*len(colname) for colname in colnames]
if rdbunderline:
biglist = [colnames, rdbunderlines]
else:
biglist = [colnames]
biglist.extend(datatransposed)
# biglist now contains the file items line by line.
# we write the file
csvwriter = csv.writer(open(filepath, 'w'), delimiter=separator, quotechar='"', quoting=csv.QUOTE_MINIMAL)
csvwriter.writerows(biglist)
if verbose:
print "Wrote the lightcurves into %s" % filepath
def datetimefromjd(JD):
"""
Copy and past from cosmouline.
Can be of use here to plot lightcurves with nice dates.
Returns the Gregorian calendar (i.e. our "normal" calendar)
Based on wikipedia:de and the interweb :-)
:type JD: float
:param JD: julian date
:rtype: datetime object
:returns: corresponding datetime
"""
if JD < 0:
raise ValueError, 'Julian Day must be positive'
dayofwk = int(math.fmod(int(JD + 1.5),7))
(F, Z) = math.modf(JD + 0.5)
Z = int(Z)
if JD < 2299160.5:
A = Z
else:
alpha = int((Z - 1867216.25)/36524.25)
A = Z + 1 + alpha - int(alpha/4)
B = A + 1524
C = int((B - 122.1)/365.25)
D = int(365.25 * C)
E = int((B - D)/30.6001)
day = B - D - int(30.6001 * E) + F
nday = B-D-123
if nday <= 305:
dayofyr = nday+60
else:
dayofyr = nday-305
if E < 14:
month = E - 1
else:
month = E - 13
if month > 2:
year = C - 4716
else:
year = C - 4715
# a leap year?
leap = 0
if year % 4 == 0:
leap = 1
if year % 100 == 0 and year % 400 != 0:
print year % 100, year % 400
leap = 0
if leap and month > 2:
dayofyr = dayofyr + leap
# Convert fractions of a day to time
(dfrac, days) = math.modf(day/1.0)
(hfrac, hours) = math.modf(dfrac * 24.0)
(mfrac, minutes) = math.modf(hfrac * 60.0)
seconds = round(mfrac * 60.0) # seconds are rounded
if seconds > 59:
seconds = 0
minutes = minutes + 1
if minutes > 59:
minutes = 0
hours = hours + 1
if hours > 23:
hours = 0
days = days + 1
return datetime.datetime(year,month,int(days),int(hours),int(minutes),int(seconds))
def flatten(x):
"""
Source : http://kogs-www.informatik.uni-hamburg.de/~meine/python_tricks
flatten(sequence) -> list
Returns a single, flat list which contains all elements retrieved
from the sequence and all recursively contained sub-sequences
(iterables).
Examples:
::
>>> [1, 2, [3,4], (5,6)]
[1, 2, [3, 4], (5, 6)]
>>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, MyVector(8,9,10)])
[1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]
"""
result = []
for el in x:
#if isinstance(el, (list, tuple)):
if hasattr(el, "__iter__") and not isinstance(el, basestring):
result.extend(flatten(el))
else:
result.append(el)
return result
def strtd(td):
"""
To print out time differences ...
Could be improved a bit :-)
"""
strdiff = str(td) # looks like 0:02:04.43353
return strdiff.split(".")[0]
def zipdirs(pattern = "rrs_*"):
"""
I will tgz all directories matching the pattern, except if the tgz already exists.
(handy to transfert multirun output to another computer ...)
"""
matches = sorted(glob.glob(pattern))
for match in matches:
if not os.path.isdir(match):
continue
if os.path.islink(match):
continue
zipdirpath = match + ".tgz"
if os.path.exists(zipdirpath):
continue
nfiles = len(glob.glob(os.path.join(match, "*")))
print "%s (%i) -> %s" % (match, nfiles, zipdirpath)
cmd = "tar zcvf %s %s" % (zipdirpath, match)
os.system(cmd)
| gpl-3.0 | 8,631,004,929,809,384,000 | 25.21729 | 132 | 0.667409 | false | 2.938973 | false | false | false |
GeosoftInc/gxapi | spec/ps/NGRD.py | 1 | 4092 | from .. import Availability, Class, Constant, Define, Method, Parameter, Type
gx_class = Class('NGRD',
doc="Neargrid")
gx_defines = [
]
gx_methods = {
'': [
Method('_Clear_NGRD', module='geocslib', version='6.0.0',
availability=Availability.EXTENSION,
doc="Clears all the parameters in a NGRD object",
return_type=Type.VOID,
return_doc="Nothing",
parameters = [
Parameter('param0', type="NGRD",
doc="Handle to NGRD object (stores control parameters)")
]),
Method('Create_NGRD', module='geocslib', version='6.0.0',
availability=Availability.EXTENSION,
doc="Create a NGRD.",
return_type="NGRD",
return_doc="NGRD if OK (NULL if error)"),
Method('Destroy_NGRD', module='geocslib', version='6.0.0',
availability=Availability.EXTENSION,
doc="Destroy a NGRD.",
return_type=Type.VOID,
return_doc="Nothing",
parameters = [
Parameter('param0', type="NGRD",
doc="NGRD to destroy.")
]),
Method('iLoadParms_NGRD', module='geocslib', version='6.3.0',
availability=Availability.EXTENSION,
notes="""
If the control file name passed into this function is a file
which does not exist, then the defaults for a Neargrid control
file will be generated and put into the NGRD object.
Otherwise, the control file's settings are retrieved from
the file and loaded into the NGRD object.
""",
doc="Retrieves a Neargrid object's control parameters from a file.",
return_type=Type.INT32_T,
return_doc="0 OK, 1 Error.",
parameters = [
Parameter('param0', type="NGRD",
doc="NGRD to load parameter settings into"),
Parameter('param1', type=Type.STRING,
doc="Name of file to get the parameter settings from")
]),
Method('iRun_NGRD', module='geocslib', version='6.3.0',
availability=Availability.EXTENSION,
doc="""
Executes the Neargrid program, using the input channel and
output file parameters.
""",
return_type=Type.INT32_T,
return_doc="0 OK, 1 Error.",
parameters = [
Parameter('param0', type="NGRD",
doc="Handle to NGRD object (stores control parameters)"),
Parameter('param1', type=Type.STRING,
doc="Name of Z Channel to perfrom gridding on"),
Parameter('param2', type="DAT",
doc="Handle to source DAT object (from database)"),
Parameter('param3', type="DAT",
doc="Handle to output grid file DAT")
]),
Method('iSaveParms_NGRD', module='geocslib', version='6.3.0',
availability=Availability.EXTENSION,
notes="""
If the control file did not previously exist, it will be
created. Otherwise, the old file will be overwritten.
""",
doc="""
Puts the Neargrid object's control parameters back into
its control file.
""",
return_type=Type.INT32_T,
return_doc="0 OK, 1 Error.",
parameters = [
Parameter('param0', type="NGRD",
doc="NGRD object to get parameters from and put into the control file"),
Parameter('param1', type=Type.STRING,
doc="Name of file to put the parameter settings into")
])
]
}
| bsd-2-clause | -6,933,728,253,875,308,000 | 41.185567 | 101 | 0.495357 | false | 4.848341 | false | false | false |
wxgeo/geophar | wxgeometrie/sympy/printing/tests/test_octave.py | 3 | 13617 | from sympy.core import (S, pi, oo, symbols, Function, Rational, Integer,
Tuple, Symbol)
from sympy.core import EulerGamma, GoldenRatio, Catalan, Lambda, Mul, Pow
from sympy.functions import (Piecewise, sqrt, ceiling, exp, sin, cos, LambertW,
sinc, Max, Min, arg, im, re)
from sympy.utilities.pytest import raises
from sympy.utilities.lambdify import implemented_function
from sympy.matrices import (eye, Matrix, MatrixSymbol, Identity,
HadamardProduct, SparseMatrix)
from sympy.functions.special.bessel import (jn, yn, besselj, bessely, besseli,
besselk, hankel1, hankel2, airyai,
airybi, airyaiprime, airybiprime)
from sympy.functions.special.gamma_functions import (lowergamma, uppergamma)
from sympy.utilities.pytest import XFAIL
from sympy.core.compatibility import range
from sympy import octave_code
from sympy import octave_code as mcode
x, y, z = symbols('x,y,z')
def test_Integer():
assert mcode(Integer(67)) == "67"
assert mcode(Integer(-1)) == "-1"
def test_Rational():
assert mcode(Rational(3, 7)) == "3/7"
assert mcode(Rational(18, 9)) == "2"
assert mcode(Rational(3, -7)) == "-3/7"
assert mcode(Rational(-3, -7)) == "3/7"
assert mcode(x + Rational(3, 7)) == "x + 3/7"
assert mcode(Rational(3, 7)*x) == "3*x/7"
def test_Function():
assert mcode(sin(x) ** cos(x)) == "sin(x).^cos(x)"
assert mcode(abs(x)) == "abs(x)"
assert mcode(ceiling(x)) == "ceil(x)"
assert mcode(arg(x)) == "angle(x)"
assert mcode(im(x)) == "imag(x)"
assert mcode(re(x)) == "real(x)"
assert mcode(Max(x, y) + Min(x, y)) == "max(x, y) + min(x, y)"
assert mcode(Max(x, y, z)) == "max(x, max(y, z))"
assert mcode(Min(x, y, z)) == "min(x, min(y, z))"
def test_Pow():
assert mcode(x**3) == "x.^3"
assert mcode(x**(y**3)) == "x.^(y.^3)"
assert mcode(x**Rational(2, 3)) == 'x.^(2/3)'
g = implemented_function('g', Lambda(x, 2*x))
assert mcode(1/(g(x)*3.5)**(x - y**x)/(x**2 + y)) == \
"(3.5*2*x).^(-x + y.^x)./(x.^2 + y)"
# For issue 14160
assert mcode(Mul(-2, x, Pow(Mul(y,y,evaluate=False), -1, evaluate=False),
evaluate=False)) == '-2*x./(y.*y)'
def test_basic_ops():
assert mcode(x*y) == "x.*y"
assert mcode(x + y) == "x + y"
assert mcode(x - y) == "x - y"
assert mcode(-x) == "-x"
def test_1_over_x_and_sqrt():
# 1.0 and 0.5 would do something different in regular StrPrinter,
# but these are exact in IEEE floating point so no different here.
assert mcode(1/x) == '1./x'
assert mcode(x**-1) == mcode(x**-1.0) == '1./x'
assert mcode(1/sqrt(x)) == '1./sqrt(x)'
assert mcode(x**-S.Half) == mcode(x**-0.5) == '1./sqrt(x)'
assert mcode(sqrt(x)) == 'sqrt(x)'
assert mcode(x**S.Half) == mcode(x**0.5) == 'sqrt(x)'
assert mcode(1/pi) == '1/pi'
assert mcode(pi**-1) == mcode(pi**-1.0) == '1/pi'
assert mcode(pi**-0.5) == '1/sqrt(pi)'
def test_mix_number_mult_symbols():
assert mcode(3*x) == "3*x"
assert mcode(pi*x) == "pi*x"
assert mcode(3/x) == "3./x"
assert mcode(pi/x) == "pi./x"
assert mcode(x/3) == "x/3"
assert mcode(x/pi) == "x/pi"
assert mcode(x*y) == "x.*y"
assert mcode(3*x*y) == "3*x.*y"
assert mcode(3*pi*x*y) == "3*pi*x.*y"
assert mcode(x/y) == "x./y"
assert mcode(3*x/y) == "3*x./y"
assert mcode(x*y/z) == "x.*y./z"
assert mcode(x/y*z) == "x.*z./y"
assert mcode(1/x/y) == "1./(x.*y)"
assert mcode(2*pi*x/y/z) == "2*pi*x./(y.*z)"
assert mcode(3*pi/x) == "3*pi./x"
assert mcode(S(3)/5) == "3/5"
assert mcode(S(3)/5*x) == "3*x/5"
assert mcode(x/y/z) == "x./(y.*z)"
assert mcode((x+y)/z) == "(x + y)./z"
assert mcode((x+y)/(z+x)) == "(x + y)./(x + z)"
assert mcode((x+y)/EulerGamma) == "(x + y)/%s" % EulerGamma.evalf(17)
assert mcode(x/3/pi) == "x/(3*pi)"
assert mcode(S(3)/5*x*y/pi) == "3*x.*y/(5*pi)"
def test_mix_number_pow_symbols():
assert mcode(pi**3) == 'pi^3'
assert mcode(x**2) == 'x.^2'
assert mcode(x**(pi**3)) == 'x.^(pi^3)'
assert mcode(x**y) == 'x.^y'
assert mcode(x**(y**z)) == 'x.^(y.^z)'
assert mcode((x**y)**z) == '(x.^y).^z'
def test_imag():
I = S('I')
assert mcode(I) == "1i"
assert mcode(5*I) == "5i"
assert mcode((S(3)/2)*I) == "3*1i/2"
assert mcode(3+4*I) == "3 + 4i"
assert mcode(sqrt(3)*I) == "sqrt(3)*1i"
def test_constants():
assert mcode(pi) == "pi"
assert mcode(oo) == "inf"
assert mcode(-oo) == "-inf"
assert mcode(S.NegativeInfinity) == "-inf"
assert mcode(S.NaN) == "NaN"
assert mcode(S.Exp1) == "exp(1)"
assert mcode(exp(1)) == "exp(1)"
def test_constants_other():
assert mcode(2*GoldenRatio) == "2*(1+sqrt(5))/2"
assert mcode(2*Catalan) == "2*%s" % Catalan.evalf(17)
assert mcode(2*EulerGamma) == "2*%s" % EulerGamma.evalf(17)
def test_boolean():
assert mcode(x & y) == "x & y"
assert mcode(x | y) == "x | y"
assert mcode(~x) == "~x"
assert mcode(x & y & z) == "x & y & z"
assert mcode(x | y | z) == "x | y | z"
assert mcode((x & y) | z) == "z | x & y"
assert mcode((x | y) & z) == "z & (x | y)"
def test_Matrices():
assert mcode(Matrix(1, 1, [10])) == "10"
A = Matrix([[1, sin(x/2), abs(x)],
[0, 1, pi],
[0, exp(1), ceiling(x)]]);
expected = "[1 sin(x/2) abs(x); 0 1 pi; 0 exp(1) ceil(x)]"
assert mcode(A) == expected
# row and columns
assert mcode(A[:,0]) == "[1; 0; 0]"
assert mcode(A[0,:]) == "[1 sin(x/2) abs(x)]"
# empty matrices
assert mcode(Matrix(0, 0, [])) == '[]'
assert mcode(Matrix(0, 3, [])) == 'zeros(0, 3)'
# annoying to read but correct
assert mcode(Matrix([[x, x - y, -y]])) == "[x x - y -y]"
def test_vector_entries_hadamard():
# For a row or column, user might to use the other dimension
A = Matrix([[1, sin(2/x), 3*pi/x/5]])
assert mcode(A) == "[1 sin(2./x) 3*pi./(5*x)]"
assert mcode(A.T) == "[1; sin(2./x); 3*pi./(5*x)]"
@XFAIL
def test_Matrices_entries_not_hadamard():
# For Matrix with col >= 2, row >= 2, they need to be scalars
# FIXME: is it worth worrying about this? Its not wrong, just
# leave it user's responsibility to put scalar data for x.
A = Matrix([[1, sin(2/x), 3*pi/x/5], [1, 2, x*y]])
expected = ("[1 sin(2/x) 3*pi/(5*x);\n"
"1 2 x*y]") # <- we give x.*y
assert mcode(A) == expected
def test_MatrixSymbol():
n = Symbol('n', integer=True)
A = MatrixSymbol('A', n, n)
B = MatrixSymbol('B', n, n)
assert mcode(A*B) == "A*B"
assert mcode(B*A) == "B*A"
assert mcode(2*A*B) == "2*A*B"
assert mcode(B*2*A) == "2*B*A"
assert mcode(A*(B + 3*Identity(n))) == "A*(3*eye(n) + B)"
assert mcode(A**(x**2)) == "A^(x.^2)"
assert mcode(A**3) == "A^3"
assert mcode(A**(S.Half)) == "A^(1/2)"
def test_special_matrices():
assert mcode(6*Identity(3)) == "6*eye(3)"
def test_containers():
assert mcode([1, 2, 3, [4, 5, [6, 7]], 8, [9, 10], 11]) == \
"{1, 2, 3, {4, 5, {6, 7}}, 8, {9, 10}, 11}"
assert mcode((1, 2, (3, 4))) == "{1, 2, {3, 4}}"
assert mcode([1]) == "{1}"
assert mcode((1,)) == "{1}"
assert mcode(Tuple(*[1, 2, 3])) == "{1, 2, 3}"
assert mcode((1, x*y, (3, x**2))) == "{1, x.*y, {3, x.^2}}"
# scalar, matrix, empty matrix and empty list
assert mcode((1, eye(3), Matrix(0, 0, []), [])) == "{1, [1 0 0; 0 1 0; 0 0 1], [], {}}"
def test_octave_noninline():
source = mcode((x+y)/Catalan, assign_to='me', inline=False)
expected = (
"Catalan = %s;\n"
"me = (x + y)/Catalan;"
) % Catalan.evalf(17)
assert source == expected
def test_octave_piecewise():
expr = Piecewise((x, x < 1), (x**2, True))
assert mcode(expr) == "((x < 1).*(x) + (~(x < 1)).*(x.^2))"
assert mcode(expr, assign_to="r") == (
"r = ((x < 1).*(x) + (~(x < 1)).*(x.^2));")
assert mcode(expr, assign_to="r", inline=False) == (
"if (x < 1)\n"
" r = x;\n"
"else\n"
" r = x.^2;\n"
"end")
expr = Piecewise((x**2, x < 1), (x**3, x < 2), (x**4, x < 3), (x**5, True))
expected = ("((x < 1).*(x.^2) + (~(x < 1)).*( ...\n"
"(x < 2).*(x.^3) + (~(x < 2)).*( ...\n"
"(x < 3).*(x.^4) + (~(x < 3)).*(x.^5))))")
assert mcode(expr) == expected
assert mcode(expr, assign_to="r") == "r = " + expected + ";"
assert mcode(expr, assign_to="r", inline=False) == (
"if (x < 1)\n"
" r = x.^2;\n"
"elseif (x < 2)\n"
" r = x.^3;\n"
"elseif (x < 3)\n"
" r = x.^4;\n"
"else\n"
" r = x.^5;\n"
"end")
# Check that Piecewise without a True (default) condition error
expr = Piecewise((x, x < 1), (x**2, x > 1), (sin(x), x > 0))
raises(ValueError, lambda: mcode(expr))
def test_octave_piecewise_times_const():
pw = Piecewise((x, x < 1), (x**2, True))
assert mcode(2*pw) == "2*((x < 1).*(x) + (~(x < 1)).*(x.^2))"
assert mcode(pw/x) == "((x < 1).*(x) + (~(x < 1)).*(x.^2))./x"
assert mcode(pw/(x*y)) == "((x < 1).*(x) + (~(x < 1)).*(x.^2))./(x.*y)"
assert mcode(pw/3) == "((x < 1).*(x) + (~(x < 1)).*(x.^2))/3"
def test_octave_matrix_assign_to():
A = Matrix([[1, 2, 3]])
assert mcode(A, assign_to='a') == "a = [1 2 3];"
A = Matrix([[1, 2], [3, 4]])
assert mcode(A, assign_to='A') == "A = [1 2; 3 4];"
def test_octave_matrix_assign_to_more():
# assigning to Symbol or MatrixSymbol requires lhs/rhs match
A = Matrix([[1, 2, 3]])
B = MatrixSymbol('B', 1, 3)
C = MatrixSymbol('C', 2, 3)
assert mcode(A, assign_to=B) == "B = [1 2 3];"
raises(ValueError, lambda: mcode(A, assign_to=x))
raises(ValueError, lambda: mcode(A, assign_to=C))
def test_octave_matrix_1x1():
A = Matrix([[3]])
B = MatrixSymbol('B', 1, 1)
C = MatrixSymbol('C', 1, 2)
assert mcode(A, assign_to=B) == "B = 3;"
# FIXME?
#assert mcode(A, assign_to=x) == "x = 3;"
raises(ValueError, lambda: mcode(A, assign_to=C))
def test_octave_matrix_elements():
A = Matrix([[x, 2, x*y]])
assert mcode(A[0, 0]**2 + A[0, 1] + A[0, 2]) == "x.^2 + x.*y + 2"
A = MatrixSymbol('AA', 1, 3)
assert mcode(A) == "AA"
assert mcode(A[0, 0]**2 + sin(A[0,1]) + A[0,2]) == \
"sin(AA(1, 2)) + AA(1, 1).^2 + AA(1, 3)"
assert mcode(sum(A)) == "AA(1, 1) + AA(1, 2) + AA(1, 3)"
def test_octave_boolean():
assert mcode(True) == "true"
assert mcode(S.true) == "true"
assert mcode(False) == "false"
assert mcode(S.false) == "false"
def test_octave_not_supported():
assert mcode(S.ComplexInfinity) == (
"% Not supported in Octave:\n"
"% ComplexInfinity\n"
"zoo"
)
f = Function('f')
assert mcode(f(x).diff(x)) == (
"% Not supported in Octave:\n"
"% Derivative\n"
"Derivative(f(x), x)"
)
def test_trick_indent_with_end_else_words():
# words starting with "end" or "else" do not confuse the indenter
t1 = S('endless');
t2 = S('elsewhere');
pw = Piecewise((t1, x < 0), (t2, x <= 1), (1, True))
assert mcode(pw, inline=False) == (
"if (x < 0)\n"
" endless\n"
"elseif (x <= 1)\n"
" elsewhere\n"
"else\n"
" 1\n"
"end")
def test_haramard():
A = MatrixSymbol('A', 3, 3)
B = MatrixSymbol('B', 3, 3)
v = MatrixSymbol('v', 3, 1)
h = MatrixSymbol('h', 1, 3)
C = HadamardProduct(A, B)
assert mcode(C) == "A.*B"
assert mcode(C*v) == "(A.*B)*v"
assert mcode(h*C*v) == "h*(A.*B)*v"
assert mcode(C*A) == "(A.*B)*A"
# mixing Hadamard and scalar strange b/c we vectorize scalars
assert mcode(C*x*y) == "(x.*y)*(A.*B)"
def test_sparse():
M = SparseMatrix(5, 6, {})
M[2, 2] = 10;
M[1, 2] = 20;
M[1, 3] = 22;
M[0, 3] = 30;
M[3, 0] = x*y;
assert mcode(M) == (
"sparse([4 2 3 1 2], [1 3 3 4 4], [x.*y 20 10 30 22], 5, 6)"
)
def test_sinc():
assert mcode(sinc(x)) == 'sinc(x/pi)'
assert mcode(sinc((x + 3))) == 'sinc((x + 3)/pi)'
assert mcode(sinc(pi*(x + 3))) == 'sinc(x + 3)'
def test_specfun():
n = Symbol('n')
for f in [besselj, bessely, besseli, besselk]:
assert octave_code(f(n, x)) == f.__name__ + '(n, x)'
assert octave_code(hankel1(n, x)) == 'besselh(n, 1, x)'
assert octave_code(hankel2(n, x)) == 'besselh(n, 2, x)'
assert octave_code(airyai(x)) == 'airy(0, x)'
assert octave_code(airyaiprime(x)) == 'airy(1, x)'
assert octave_code(airybi(x)) == 'airy(2, x)'
assert octave_code(airybiprime(x)) == 'airy(3, x)'
assert octave_code(uppergamma(n, x)) == 'gammainc(x, n, \'upper\')'
assert octave_code(lowergamma(n, x)) == 'gammainc(x, n, \'lower\')'
assert octave_code(jn(n, x)) == 'sqrt(2)*sqrt(pi)*sqrt(1./x).*besselj(n + 1/2, x)/2'
assert octave_code(yn(n, x)) == 'sqrt(2)*sqrt(pi)*sqrt(1./x).*bessely(n + 1/2, x)/2'
assert octave_code(LambertW(x)) == 'lambertw(x)'
assert octave_code(LambertW(x, n)) == 'lambertw(n, x)'
def test_MatrixElement_printing():
# test cases for issue #11821
A = MatrixSymbol("A", 1, 3)
B = MatrixSymbol("B", 1, 3)
C = MatrixSymbol("C", 1, 3)
assert mcode(A[0, 0]) == "A(1, 1)"
assert mcode(3 * A[0, 0]) == "3*A(1, 1)"
F = C[0, 0].subs(C, A - B)
assert mcode(F) == "(-B + A)(1, 1)"
| gpl-2.0 | 5,848,072,156,838,027,000 | 33.0425 | 91 | 0.507895 | false | 2.484401 | true | false | false |
alviproject/alvi | alvi/tests/test_client/test_graph.py | 1 | 3376 | import logging
import unittest
from selenium.webdriver.common.by import By
import alvi.tests.pages as pages
from alvi.tests.test_client.base import TestContainer
logger = logging.getLogger(__name__)
class TestGraph(TestContainer):
def test_create_node(self):
graph_page = pages.Graph(self._browser.driver, "GraphCreateNode")
graph_page.run(options=dict(n=4))
self.assertEqual(4, len(graph_page.svg.nodes), "create_node does not work properly")
self.assertEqual(4, len(graph_page.svg.edges), "create_edge does not work properly")
node_values = [int(element.find_element(By.CSS_SELECTOR, "text").text) for element in graph_page.svg.nodes]
node_values.sort()
created = node_values[:3]
self.assertEqual([0, 1, 2], created, "create_node does not work properly")
@unittest.skip("graph container does not support updating nodes at the moment")
def test_update_node(self):
graph_page = pages.Graph(self._browser.driver, "GraphUpdateNode")
graph_page.run()
updated = list(graph_page.svg.node_values)[3]
self.assertEqual(10, updated, "update_node does not work properly")
@unittest.skip("graph container does not support removing nodes at the moment")
def test_remove_node(self):
graph_page = pages.Graph(self._browser.driver, "GraphRemoveNode")
graph_page.run()
self.assertEqual(3, len(graph_page.svg.nodes), "remove_node does not work properly")
node_values = list(graph_page.svg.node_values)
node_values.sort()
self.assertEqual([0, 1, 2], node_values, "remove_node does not work properly")
def test_multi_marker(self):
graph_page = pages.Graph(self._browser.driver, "GraphAddMultiMarker")
graph_page.run(options=dict(n=4))
marker = [e for e in graph_page.svg.nodes if e.find_element(By.CSS_SELECTOR, "text").text == "multi marker"]
self.assertEquals(1, len(marker), "multi_marker was not created successfully")
#marked node have different color
marker = marker[0]
color = marker.value_of_css_property("stroke")
colors = map(lambda e: e.value_of_css_property("stroke"), graph_page.svg.nodes)
marked = [c for c in colors if c == color]
#expect 2 marked nodes + 1 node of multi_marker itself
self.assertEquals(3, len(marked), "nodes were not successfully added to multi_marker")
def test_marker(self):
graph_page = pages.Graph(self._browser.driver, "GraphMarker")
graph_page.run(options=dict(n=4))
marker0 = [e for e in graph_page.svg.nodes if e.find_element(By.CSS_SELECTOR, "text").text == "marker 0"]
marker1 = [e for e in graph_page.svg.nodes if e.find_element(By.CSS_SELECTOR, "text").text == "marker 1"]
self.assertEquals(1, len(marker0), "marker 0 was not created successfully")
self.assertEquals(1, len(marker0), "marker 1 was not created successfully")
#marked node have different color
marker = marker0[0]
color = marker.value_of_css_property("stroke")
colors = map(lambda e: e.value_of_css_property("stroke"), graph_page.svg.nodes)
marked = [c for c in colors if c == color]
#expect 1 marked nodes + 1 node of marker itself
self.assertEquals(2, len(marked), "node was not successfully marked")
| mit | 5,794,634,751,119,835,000 | 45.246575 | 116 | 0.668246 | false | 3.673558 | true | false | false |
qsnake/abinit | util/users/xmlTagger.py | 4 | 4494 | #=================================
# xmlTagger.py
version = '1.0'
#=================================
# last modified : january 17 2006
# written by Benjamin Tardif
# [email protected]
#=================================
header = '\n#==============\n# xmlTagger.py\n# version %s\n#==============' %version
#====================================================================================================
#IMPORTS
import os
import sys
#====================================================================================================
#====================================================================================================
#METHODS
def detectfile(filename,path): # type(filename) = type(path) = string
# method detectfile returns True if the specified file is found in the specified path
return filename in os.listdir(path)
def clean(list): # type(list) = list of strings
# method clean removes character strings '\n' and '\r' and empty lines from a string list
# (the string list is usually obtained with the ".readlines()" method)
L = len(list)
for i in range(L):
list[L-1-i] = list[L-1-i].replace('\n','')
list[L-1-i] = list[L-1-i].replace('\r','')
if list[L-1-i].split() == []:
list.pop(L-1-i)
#====================================================================================================
#----------------------------------------------------------------------------------------------------
#MAIN
print header
#====================================================================================================
#COMMAND LINE
#get xmlfilename
if len(sys.argv) > 2:
# user entered too many arguments in the command line
print '\n- ERROR -\ntoo many arguments in the command line'
sys.exit()
elif len(sys.argv) == 2:
# user entered the xmlfilename in the command line
xmlfilename = sys.argv[1]
else:
# user entered no xmlfilename in the command line
xmlfilename = raw_input('\nEnter the name of the xml file to tag :\n')
#abort if file not found
if detectfile(xmlfilename,'.') == False:
print '\n- ERROR -\nfile not found\n'
sys.exit()
#abort if the file is not a xml file
if xmlfilename[-4:] != '.xml':
print '\n- ERROR -\nyou must enter a xml file (*.xml)\n'
sys.exit()
#abort if the file is already a tagged xml file
if xmlfilename[-8:] == '_tag.xml':
print '\n- ERROR -\nthis file is already tagged\n'
sys.exit()
#====================================================================================================
#====================================================================================================
#READ AND TREAT THE FILE
#read the file
reader = open(xmlfilename,'r')
filedata = reader.readlines()
reader.close()
clean(filedata)
#for each line, remove all characters before '<' and after '>'
for i in range(len(filedata)):
while filedata[i][0] != '<':
filedata[i] = filedata[i][1:]
while filedata[i][-1] != '>':
filedata[i] = filedata[i][:-1]
#compute len_max (number of digits of the number of the last line of the xml file)
len_max = len(str(len(filedata)))
#compute tagxmlfilename (name of the tagged xml file)
tagxmlfilename = xmlfilename[:-4]+'_tag.xml'
#====================================================================================================
#====================================================================================================
#WRITE THE TAGGED XML FILE
writer = open(tagxmlfilename,'w')
tag=0
for line in filedata:
if line.split()[0][1] == '/':
# </Element>
tag-=0
len_tag = len(str(tag))
writer.write((len_max+7)*' '+'%s\n' %line)
elif line.split()[-1][-2] == '/':
# <Element/>
tag+=1
len_tag = len(str(tag))
writer.write((len_max-len_tag)*' '+'<!--%i-->'%tag+line[:-2]+" tag='%i'/>\n"%tag)
else:
# <Element>
tag+=1
len_tag = len(str(tag))
writer.write((len_max-len_tag)*' '+'<!--%i-->'%tag+line[:-1]+" tag='%i'>\n"%tag)
writer.close()
print '\n"%s" file created successfully\n' %tagxmlfilename
#====================================================================================================
#----------------------------------------------------------------------------------------------------
| gpl-3.0 | 3,014,264,779,602,103,000 | 33.666667 | 101 | 0.414553 | false | 4.126722 | false | false | false |
thisismedium/md | md/stm/journal.py | 1 | 5664 | from __future__ import absolute_import
import copy, threading
from ..prelude import *
from .interfaces import Cursor, Journal, Memory, Change, CannotCommit
from .log import log, weaklog
__all__ = (
'memory', 'journal',
'readable_state', 'original_state', 'writable_state',
'change_state', 'copy_state', 'commit_transaction',
'change', 'Deleted', 'Inserted',
'good', 'verify_read', 'verify_write', 'unverified_write'
)
### Generic Operations
def readable_state(journal, cursor, *default):
return good(journal.readable_state, cursor, *default)
def original_state(journal, cursor, *default):
return good(journal.original_state, cursor, *default)
def writable_state(journal, cursor):
return good(journal.writable_state, cursor)
def change_state(method, what, *args, **kwargs):
if isinstance(what, Cursor):
method(what, *args, **kwargs)
else:
## cache cursors in a list so the log can be modified.
for cursor in list(what):
method(cursor, *args, **kwargs)
def commit_transaction(source, nested):
if source is nested:
raise RuntimeError("A journal can't be committed to itself.")
source.commit_transaction(nested)
### Journals
class change(namedtuple('changes', 'cursor orig state'), Change):
pass
class journal(Journal):
LogType = log
name = None
source = None
def __init__(self, name, source):
self.name = name
self.source = source
self.read_log = self.LogType()
self.write_log = self.LogType()
def __repr__(self):
return '<%s %s>' % (type(self).__name__, str(self))
def __str__(self):
return self.name
def make_journal(self, name):
return type(self)(name, self)
def allocate(self, cursor, state):
self.write_log.allocate(cursor, state)
return cursor
def readable_state(self, cursor):
try:
return self.write_log[cursor]
except KeyError:
return self.original_state(cursor)
def original_state(self, cursor):
try:
return self.read_log[cursor]
except KeyError:
state = good(self.source.readable_state, cursor, Inserted)
self.read_log[cursor] = state
return state
def writable_state(self, cursor):
try:
return self.write_log[cursor]
except KeyError:
state = copy_state(self.original_state(cursor))
self.write_log[cursor] = state
return state
def delete_state(self, cursor):
self.write_log[cursor] = Deleted
def rollback_state(self, cursor):
self.write_log.pop(cursor, None)
def commit_transaction(self, trans):
## A journal is single-threaded; state can be blindly copied
## in.
for (cursor, orig, state) in trans.changed():
self._write_log[cursor] = state
def original(self):
return iter(self.read_log)
def changed(self):
return (
change(k, get_state(self.read_log, k), v)
for (k, v) in self.write_log
)
class memory(Memory):
JournalType = journal
LogType = weaklog
name = None
def __init__(self, name='*memory*', check_read=True, check_write=True):
self.name = name
self.write_lock = threading.RLock()
self.check_read = check_read
self.check_write = check_write
self.mem = self.LogType()
def __repr__(self):
return '<%s %s>' % (type(self).__name__, str(self))
def __str__(self):
return self.name
def make_journal(self, name):
return self.JournalType(name, self)
def allocate(self, cursor, state):
self.mem.allocate(cursor, state)
return cursor
def readable_state(self, cursor):
return self.mem[cursor]
def commit_transaction(self, trans):
with self.write_lock:
self._read(trans.original())
self._commit(self._write(trans.changed()))
def _read(self, read):
if self.check_read:
verify_read(self.mem, read)
def _write(self, changed):
if self.check_write:
return verify_write(self.mem, changed)
else:
return unverified_write(changed)
def _commit(self, changed):
for (cursor, state) in changed:
if state is Deleted:
self.mem.pop(cursor, None)
else:
self.mem[cursor] = state
### State
copy_state = copy.deepcopy
Inserted = sentinal('<inserted>')
Deleted = sentinal('<deleted>')
def good(method, cursor, *default):
try:
value = method(cursor)
if not isinstance(value, Sentinal):
return value
except KeyError:
value = Undefined
if default:
return default[0]
raise ValueError(
'%s object %s %r.' % (type(cursor).__name__, id(cursor), value)
)
### Operations on Logs
def get_state(log, cursor):
return log.get(cursor, Inserted)
def verify_read(log, read):
conflicts = [(c, s) for (c, s) in read if log.get(c) != s]
if conflicts:
raise CannotCommit(conflicts)
def verify_write(log, changed):
changed, conflicts = partition_conflicts(log, changed)
if conflicts:
raise CannotCommit(conflicts)
return changed
def unverified_write(changed):
return list((c, s) for (c, o, s) in changed)
def partition_conflicts(log, changed):
good = []; bad = []
for (cursor, orig, state) in changed:
current = get_state(log, cursor)
(good if current is orig else bad).append((cursor, state))
return good, bad
| bsd-2-clause | 8,051,461,610,319,959,000 | 25.344186 | 75 | 0.603814 | false | 3.819285 | false | false | false |
inveniosoftware-contrib/invenio-workflows | tests/alembic/test_alembic.py | 2 | 2121 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2017 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
from __future__ import absolute_import, print_function
import pytest
from sqlalchemy import inspect
from invenio_db.utils import drop_alembic_version_table
def test_alembic_revision_a26f133d42a9(app, db):
ext = app.extensions['invenio-db']
if db.engine.name == 'sqlite':
raise pytest.skip('Upgrades are not supported on SQLite.')
db.drop_all()
drop_alembic_version_table()
with app.app_context():
inspector = inspect(db.engine)
assert 'workflows_workflow' not in inspector.get_table_names()
assert 'workflows_object' not in inspector.get_table_names()
ext.alembic.upgrade(target='a26f133d42a9')
with app.app_context():
inspector = inspect(db.engine)
assert 'workflows_workflow' in inspector.get_table_names()
assert 'workflows_object' in inspector.get_table_names()
ext.alembic.downgrade(target='720ddf51e24b')
with app.app_context():
inspector = inspect(db.engine)
assert 'workflows_workflow' not in inspector.get_table_names()
assert 'workflows_object' not in inspector.get_table_names()
drop_alembic_version_table()
| gpl-2.0 | -188,392,670,502,221,000 | 34.949153 | 76 | 0.721829 | false | 3.760638 | false | false | false |
m-nez/tetroll | blocks.py | 1 | 5019 | # -*- coding: utf-8 -*-
# Copyright 2015 Michał Nieznański
#
# This file is part of Tetroll.
#
# Tetroll is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Tetroll is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Tetroll. If not, see <http://www.gnu.org/licenses/>.
class block:
def __init__(self):
self.sqares = []
self.cur_sqares = [[0,0],[0,0],[0,0],[0,0]]
self.rotation = 0
self.colour = 1
self.identifier = 0
def I(self):
self.sqares = [
[[0, 0], [0, 1], [0, 2], [0, 3]],
[[0,1], [-1, 1], [1, 1], [2, 1]]
]
self.init_cur_sqares()
self.colour = 1
self.identifier = 0
def T(self):
self.sqares = [
[[-1, 0], [0, 0], [1, 0], [0, 1]],
[[0, -1], [0, 0], [1, 0], [0, 1]],
[[-1, 0], [0, 0], [1, 0], [0, -1]],
[[-1, 0], [0, 0], [0, -1], [0, 1]]
]
self.init_cur_sqares()
self.colour = 1
self.identifier = 1
def O(self):
self.sqares = [
[[0,0], [1, 0], [0, 1], [1 , 1]]
]
self.init_cur_sqares()
self.colour = 1
self.identifier = 2
def S(self):
self.sqares = [
[[-1, 0], [0, 0], [0, 1], [1, 1]],
[[-1, 1], [0, 1], [-1, 2], [0, 0]]
]
self.init_cur_sqares()
self.colour = 1
self.identifier = 3
def Z(self):
self.sqares = [
[[0, 0], [1, 0], [-1, 1], [0, 1]],
[[0, 0], [1, 1], [1, 2], [0, 1]]
]
self.init_cur_sqares()
self.colour = 1
self.identifier = 4
def L(self):
self.sqares = [
[[0, 0], [1, 0], [0, 1], [0, 2]],
[[0, 0], [0, 1], [1, 1], [2, 1]],
[[0, 2], [1, 0], [1, 1], [1, 2]],
[[0, 0], [1, 0], [1, 1], [-1,0]]
]
self.init_cur_sqares()
self.colour = 1
self.identifier = 5
def J(self):
self.sqares = [
[[0, 0], [1, 0], [1, 1], [1, 2]],
[[0, 0], [0, 1], [2, 0], [1, 0]],
[[0, 0], [0, 1], [0, 2], [1, 2]],
[[0, 1], [1, 1], [2, 1], [2, 0]],
]
self.init_cur_sqares()
self.colour = 1
self.identifier = 6
def X(self):
self.sqares = [
[[-1, 0], [1, 0], [0, 1], [-1, 2], [1,2]]
]
self.init_cur_sqares()
self.colour = 2
self.identifier = 7
def H(self):
self.sqares = [
[[-1, 0], [1, 0], [0, 1], [-1, 2], [1,2], [-1,1], [1,1]]
]
self.init_cur_sqares()
self.colour = 2
self.identifier = 8
def U(self):
self.sqares = [
[[0, 0], [-1, 2], [1,2], [-1,1], [1,1]]
]
self.init_cur_sqares()
self.colour = 2
self.identifier = 9
def Tbig(self):
self.sqares = [
[[0, 0], [-1, 2], [1,2], [0,1], [0,2]]
]
self.init_cur_sqares()
self.colour = 2
self.identifier = 10
def t(self):
self.sqares = [
[[0, 0], [-1, 1], [1,1], [0,1], [0,2]]
]
self.init_cur_sqares()
self.colour = 2
self.identifier = 11
def dot(self):
self.sqares = [
[[0, 0]]
]
self.init_cur_sqares()
self.colour = 1
self.identifier = 16
def from_int(self, x):
if x == 0:
self.I();
elif x == 1:
self.T()
elif x == 2:
self.O()
elif x == 3:
self.S()
elif x == 4:
self.Z()
elif x == 5:
self.L()
elif x == 6:
self.J()
elif x == 7:
self.X()
elif x == 8:
self.H()
elif x == 9:
self.U()
elif x == 10:
self.Tbig()
elif x == 11:
self.t()
elif x == 16:
self.dot()
def rotate(self, r_amount):
self.rotation = (self.rotation + r_amount) % len(self.sqares)
self.cur_sqares = self.sqares[self.rotation]
def current_sqares(self):
return self.sqares[self.rotation]
def init_cur_sqares(self):
self.cur_sqares = []
for i in self.sqares[0]:
self.cur_sqares.append(i)
self.rotation = 0
| gpl-3.0 | 3,810,933,165,882,452,000 | 29.222892 | 72 | 0.404624 | false | 3.199617 | false | false | false |
jgarcial/star_migration | star_export.py | 1 | 10732 | # -*- coding: utf-8 -*-
import sys
import codecs
from win32com.client import Dispatch
from collections import defaultdict
from mapping import fields, tables
from utils import dump_to_json_file, data_dir, log_dir
sub = '\n ' #'|'
sep = '\t|'
sep_short = '|'
tag_start = '\n'
tag_end = '' #'%'
record_start = '='*100 + '\n'
record_end = '\x1d\n\n'
MAX_RECS = 500000
BATCH_SIZE = 1024
table = "T.CATADO"
def get_field(node):
if not node:
return ""
if node.NodesCount == 0:
# If a field has one occurrence...
return node.Value
# If field has two or more occurrences
# Loop through each occurrence
s = ""
node_type = node.Nodes(1)[0].NodeType if isinstance(node.Nodes(1), tuple) else node.Nodes(1).NodeType
if node_type == 3:
# First Child node is Occurrence, so display occurrences
for i in range(1, node.NodesCount+1):
n = node.Nodes(i)
if isinstance(n, tuple):
n = n[0]
if n != None and n.NodesCount == 0 and n.Value != None:
s += sub + (n.Label if n.Label else ' ') + sep_short + n.Value.strip()
else:
for j in range(1, n.NodesCount+1):
nj = n.Nodes(j)
if isinstance(nj, tuple):
nj = nj[0]
if nj != None and nj.NodesCount == 0 and nj.Value != None:
s += sub + nj.Label + sep_short + nj.Value.strip()
else:
# Iterate through Subfields
for i in range(1, node.NodesCount+1):
n = node.Nodes(i)
if isinstance(n, tuple):
n = n[0]
if n != None and n.Value != None:
s += sub + n.Label + sep_short + n.Value.strip()
return s
def update_progress(progress):
sys.stdout.write('\r[{0}{1}] {2}% '.format('#'*(int(progress)),
' '*(100-int(progress)),
round(progress, 2)))
sys.stdout.flush()
def dump_to_text_file(d, filename):
print(">>> Writing to text file...")
total = float(len(d.items()))
f = open(data_dir + filename, 'w')
out = ''
counter = 0
for r in sorted(d.items()):
out = record_start + 'RECNO' + sep + str(r[0]) + tag_end
for tag in sorted(r[1].items()):
if tag[1] != '' and tag[0] != 'RECNO':
out += tag_start + tag[0] + sep + tag[1] + tag_end
out += record_end
f.write(out.encode('utf-8'))
counter = counter + 1
update_progress(counter*100/total)
f.close()
def format_record(d):
out = record_start + 'RECNO' + sep + str(d['RECNO']) if 'RECNO' in d else '' + tag_end
if d.items():
# out += record_start
for tag in sorted(d.items()):
if tag[0] != 'RECNO':
out += tag_start + tag[0] + sep + tag[1] + tag_end
out += record_end
return out
def main():
item_type = sys.argv[1]
MAX_RECS = 2500000 #int(sys.argv[2])
tables = {
'vendors': ['T.LIBVENDORS'],
'orders': ['T.ACQPOS', 'T.ACQITEMS'], #'T.ACQITEMS2'], #['T.ACQFUNDS', 'T.ACQINVOICE', 'T.ACQISELRCI', 'T.ACQISELRCV', 'T.ORDERSTAT', 'T.ORDERTYPES'],
'patrons': ['T.APPUSERS'], # 'T.IAEAUSERS'],
'serials': ['T.SERHOLD', 'T.SERIALS', ], # 'T.SERROUTE', 'T.SFREQS', ], # ['T.SERSELEOYR', 'T.SERSELHOLD', 'T.SERSELVUP'],
'bibliographic': ['T.CATADO'], # Items
'checkouts': ['T.CATCIRC'],
'holds': ['T.RESPICK'],
}
with codecs.open(log_dir + 'log_export.txt', 'w', encoding='utf8') as logfile:
logfile.close()
tables_to_download = []
if item_type in tables:
tables_to_download = tables[item_type]
else:
tables_to_download = [item_type]
for table in tables_to_download:
print(">>> Extracting from %s..." % table)
try:
i = 1
batch = 10000
last_record = 110000
last_fetched = 0
recs = {}
errors = []
get_me_out_of_here = False
offset = 0
while last_fetched < last_record:
print("\n>>> Connecting (%d)..." % i)
conn = Dispatch("STARADO.Connection")
conn.Login("libstar", "11002", "jaime", "dedalus")
res = conn.OpenRecordset(table, "R>%d AND R<=%d" % (last_fetched, min(last_fetched+batch, last_record)), " ".join(fields[table]))
rs = res[0]
if rs:
rec_no = rs.RecordCount
print(">>> Downloading %s records. [ %d to %d ]" % (min(rec_no, MAX_RECS), last_fetched+1, min(last_fetched+batch, last_record)))
total = float(min(rec_no, MAX_RECS))
if rs.RecordCount > 0:
f = open(data_dir+table+'_'+str(i)+'.txt', 'w')
rs.BatchSize = BATCH_SIZE # Set number of records to be retrieved in cached batches.
rs.MoveFirst() # Move focus to first record of retrieved set.
field_list = rs.FieldList.split(' ')
# Loop through each record until end of set is reached.
counter = 1
while not rs.EOF:
d = defaultdict(lambda : None)
for field in field_list:
value = None
try:
value = get_field(rs.Record(field))
if value != None and value != '':
d[field] = value
except:
with codecs.open(log_dir + 'log_export.txt', 'a', encoding='utf8') as logfile:
logfile.write(("==================\n%d\n%s:\t%s\n\n" % (counter+offset, field, value if value else 'No value set yet')).replace('\n', '\r\n'))
logfile.close()
# try:
f.write(format_record(d).encode('utf-8'))
recs[counter+offset] = d
# print counter*i
rs.MoveNext() # Go to the next record in the set
update_progress(counter*100/total)
counter = counter + 1
# if counter >= MAX_RECS:
# break
offset = offset + rs.RecordCount
rs.MoveLast()
f.close()
else:
get_me_out_of_here = True
last_fetched = last_fetched + rs.RecordCount #batch #int(get_field(rs.Record("RECNO")))
i = i + 1
# else:
print("\n>>> Closing connection. This may take a while...")
rs.CloseRecordset()
conn.Logout()
conn.CloseConnection()
rs = None
if get_me_out_of_here:
break
else:
conn.Logout()
conn.CloseConnection()
break
# print("\n>>> Saving json...")
# dump_to_json_file(recs, table+'.json')
# dump_to_text_file(recs, table+'.txt')
except Exception as e:
print e
print("\n>>> There was an ERROR: saving everything...")
# rs.Cancel()
f.close()
# rs.CloseRecordset()
conn.Logout()
conn.CloseConnection()
rs = None
if len(recs):
dump_to_json_file(recs, table+'.json')
# dump_to_json_file(errors, table+'-errors.json')
# print("\n>>> There were %d errors") % len(errors)
print(">>> %d records fetched") % len(recs)
# sys.exit(0)
if __name__ == '__main__':
main()
#
# ['T.ACQFUNDS', 'T.ACQFUNDSED', 'T.ACQFUNDSEO', 'T.ACQFUNDSOL', 'T.ACQINVOICE',
# 'T.ACQISELRCI', 'T.ACQISELRCV', 'T.ACQITEMS', 'T.ACQITEMSO1', 'T.ACQITEMSO2',
# 'T.ACQITEMSW', 'T.ACQPOS', 'T.ACQPOS1', 'T.ACQPOSBAK', 'T.ACQPOSED',
# 'T.ACQTEST', 'T.APPUPDATE', 'T.APPUSERS', 'T.APPUSERSN', 'T.APPUSERSSN',
# 'T.APPUSERSSV', 'T.AV', 'T.AVUSE', 'T.BOILERPLAT', 'T.CAT', 'T.CATADDITEM',
# 'T.CATBAK', 'T.CATCIRC', 'T.CATIN', 'T.CATINS', 'T.CATLINK', 'T.CATMASTER',
# 'T.CATOUT', 'T.CATOUTS', 'T.CATOVERDUE', 'T.CATPARTS', 'T.CATPRO',
# 'T.CATPROADIT', 'T.CATPROANAL', 'T.CATPROBKS', 'T.CATPROSER', 'T.CATPROSP',
# 'T.CATPROTOC', 'T.CATRENEW', 'T.CATRENEWS', 'T.CATRES', 'T.CATSELREQ',
# 'T.CATSELREQA', 'T.CATSELREQC', 'T.CATSELREQW', 'T.CATSP', 'T.CATTITLES',
# 'T.CATTOC', 'T.CATWEB', 'T.CATWEB4', 'T.CATWEBDES', 'T.CATWEBSEL',
# 'T.CATWEBSRV', 'T.CLAIMINT', 'T.CLSAMPTHES', 'T.COLLECTION', 'T.COUNTERS',
# 'T.CURRENCY', 'T.DIVISION', 'T.EMAIL', 'T.EMAILDB', 'T.GLOBAL', 'T.GLOBAL1',
# 'T.GLOBAL2', 'T.GLOBAL3', 'T.GLOBAL4', 'T.HNPU', 'T.IAEACSBC', 'T.IAEAUSERS',
# 'T.INISCAT', 'T.INISCATV17', 'T.INISCATWEB', 'T.INISTHES', 'T.INVENTORY',
# 'T.ISSCODE', 'T.ISSDATES', 'T.ISSDATESEL', 'T.ISSN', 'T.LDOCKWS', 'T.LDOCKWSN',
# 'T.LIBSCFORMS', 'T.LIBSGLOBAL', 'T.LIBSSERVER', 'T.LIBTRACK', 'T.LIBTYPES',
# 'T.LIBVENDORS', 'T.LNUMTYPES', 'T.LREFREQS', 'T.LREFREQSC', 'T.LSELRESCAN',
# 'T.LSERVICES', 'T.LSERVICESP', 'T.LSERVICESR', 'T.LSERVICESW', 'T.LSERVNUMS',
# 'T.MARCTEMP', 'T.ORDERSTAT', 'T.ORDERTYPES', 'T.PICKCTRY', 'T.PICKCTRYED',
# 'T.PICKLANG', 'T.PICKLANGED', 'T.PICKSTEP', 'T.PROJECTS', 'T.PWSELECTS',
# 'T.RAINBOW', 'T.REGLIBUSE', 'T.REQTYPE', 'T.RESERVE', 'T.RESERVEC', 'T.RESPICK',
# 'T.SERHOLD', 'T.SERHOLD2', 'T.SERIALS', 'T.SERIALS2', 'T.SERIALSBK',
# 'T.SERIALSC1', 'T.SERIALSC2', 'T.SERIALSC3', 'T.SERIALSDM', 'T.SERIALSID',
# 'T.SERIALSID2', 'T.SERIALSIN', 'T.SERIALSNE', 'T.SERIALSNE2', 'T.SERIALSNT',
# 'T.SERIALSNT2', 'T.SERIALSUNX', 'T.SERROUTE', 'T.SERROUTE2', 'T.SERSELEOYR',
# 'T.SERSELHOLD', 'T.SERSELVUP', 'T.SFREQS', 'T.SP', 'T.STAFPUB', 'T.STATISTIC',
# 'T.STATS', 'T.SUBINFO', 'T.SUBSNO', 'T.TOPIC', 'T.TUNCAT', 'T.TUNCATTITL',
# 'T.TUNCATWEB4', 'T.TUNTITLES', 'T.VALDB', 'T.VALIDATION', 'T.WAITCODES',
# 'T.WEBDES', 'T.WEBPROFILE', 'T.WEBSAMPTHS', 'T.WEBSERVER', 'T.WEBTYPES',
# 'T.WHENX', 'T.WHENXSUPER',]
| gpl-3.0 | 9,014,909,907,211,440,000 | 44.2827 | 182 | 0.486303 | false | 3.12704 | false | false | false |
richlewis42/scikit-chem | skchem/pandas_ext/structure_methods.py | 1 | 2405 | #! /usr/bin/env python
#
# Copyright (C) 2015-2016 Rich Lewis <[email protected]>
# License: 3-clause BSD
""" # skchem.pandas.structure_methods
Tools for adding a default attribute to pandas objects."""
from sklearn.manifold import TSNE, MDS
from sklearn.decomposition import PCA
import pandas as pd
from pandas.core.base import NoNewAttributesMixin, AccessorProperty
from pandas.core.series import Series
from pandas.core.index import Index
from .. import core
from .. import features
DIM_RED = {
'tsne': TSNE,
'pca': PCA,
'mds': MDS
}
class StructureMethods(NoNewAttributesMixin):
""" Accessor for calling chemical methods on series of molecules. """
def __init__(self, data):
self._data = data
def add_hs(self, **kwargs):
return self._data.apply(lambda m: m.add_hs(**kwargs))
def remove_hs(self, **kwargs):
return self._data.apply(lambda m: m.remove_hs(**kwargs))
def visualize(self, fper='morgan', dim_red='tsne', dim_red_kw=None,
**kwargs):
if dim_red_kw is None:
dim_red_kw = {}
if isinstance(dim_red, str):
dim_red = DIM_RED.get(dim_red.lower())(**dim_red_kw)
fper = features.get(fper)
fper.verbose = False
feats = fper.transform(self._data)
feats = feats.fillna(feats.mean())
twod = pd.DataFrame(dim_red.fit_transform(feats))
ax = twod.plot.scatter(x=0, y=1, **kwargs)
ax.set_xticklabels([])
ax.set_xlabel('')
ax.set_yticklabels([])
ax.set_ylabel('')
@property
def atoms(self):
return self._data.apply(lambda m: m.atoms)
def only_contains_mols(ser):
return ser.apply(lambda s: isinstance(s, core.Mol)).all()
class StructureAccessorMixin(object):
""" Mixin to bind chemical methods to objects. """
def _make_structure_accessor(self):
if isinstance(self, Index):
raise AttributeError('Can only use .mol accessor with molecules,'
'which use np.object_ in scikit-chem.')
if not only_contains_mols(self):
raise AttributeError('Can only use .mol accessor with '
'Series that only contain mols.')
return StructureMethods(self)
mol = AccessorProperty(StructureMethods, _make_structure_accessor)
Series.__bases__ += StructureAccessorMixin,
| bsd-3-clause | 7,766,510,744,667,272,000 | 26.643678 | 77 | 0.62578 | false | 3.632931 | false | false | false |
dsparrow27/zoocore | zoo/libs/pyqt/extended/pythoneditor.py | 1 | 8958 | import os
import sys
import traceback
from qt import QtWidgets, QtGui, QtCore
from zoo.libs.pyqt.syntaxhighlighter import highlighter
from zoo.libs.pyqt.widgets import layouts
class NumberBar(QtWidgets.QWidget):
def __init__(self, edit):
super(NumberBar, self).__init__(edit)
self.edit = edit
self.adjustWidth(1)
def paintEvent(self, event):
self.edit.numberbarPaint(self, event)
super(NumberBar, self).paintEvent(event)
def adjustWidth(self, count):
width = self.fontMetrics().width(unicode(count))
if self.width() != width:
self.setFixedWidth(width)
def updateContents(self, rect, scroll):
if scroll:
self.scroll(0, scroll)
else:
self.update()
class TextEditor(QtWidgets.QPlainTextEdit):
def __init__(self, parent=None):
super(TextEditor, self).__init__(parent=parent)
self.setTextInteractionFlags(QtCore.Qt.TextEditorInteraction)
self.setWordWrapMode(QtGui.QTextOption.NoWrap)
self.setFrameStyle(QtWidgets.QFrame.NoFrame)
self.centerOnScroll()
self.highlight()
self.cursorPositionChanged.connect(self.highlight)
metrics = QtGui.QFontMetrics(self.document().defaultFont())
self.setTabStopWidth(4 * metrics.width(' '))
font = QtGui.QFont("Courier")
font.setStyleHint(QtGui.QFont.Monospace)
font.setFixedPitch(True)
self.setFont(font)
def highlight(self):
hi_selection = QtWidgets.QTextEdit.ExtraSelection()
# hi_selection.format.setBackground(self.palette().dark()) # temp
hi_selection.format.setProperty(QtGui.QTextFormat.FullWidthSelection, True)
hi_selection.cursor = self.textCursor()
hi_selection.cursor.clearSelection()
self.setExtraSelections([hi_selection])
def numberbarPaint(self, number_bar, event):
font_metrics = self.fontMetrics()
current_line = self.document().findBlock(self.textCursor().position()).blockNumber() + 1
block = self.firstVisibleBlock()
line_count = block.blockNumber()
painter = QtGui.QPainter(number_bar)
painter.fillRect(event.rect(), self.palette().base())
# Iterate over all visible text blocks in the document.
while block.isValid():
line_count += 1
block_top = self.blockBoundingGeometry(block).translated(self.contentOffset()).top()
# Check if the position of the block is out side of the visible
# area.
if not block.isVisible() or block_top >= event.rect().bottom():
break
# We want the line number for the selected line to be bold.
if line_count == current_line:
font = painter.font()
font.setBold(True)
painter.setFont(font)
else:
font = painter.font()
font.setBold(False)
painter.setFont(font)
# Draw the line number right justified at the position of the line.
paint_rect = QtCore.QRect(0, block_top, number_bar.width(), font_metrics.height())
painter.drawText(paint_rect, QtCore.Qt.AlignRight, unicode(line_count))
block = block.next()
painter.end()
def wheelEvent(self, event):
"""
Handles zoom in/out of the text.
"""
if event.modifiers() & QtCore.Qt.ControlModifier:
delta = event.delta()
if delta < 0:
self.zoom(-1)
elif delta > 0:
self.zoom(1)
return True
return super(TextEditor, self).wheelEvent(event)
def zoom(self, direction):
"""
Zoom in on the text.
"""
font = self.font()
size = font.pointSize()
if size == -1:
size = font.pixelSize()
size += direction
if size < 7:
size = 7
if size > 50:
return
style = """
QWidget {
font-size: %spt;
}
""" % (size,)
self.setStyleSheet(style)
def keyPressEvent(self, event):
if (event.modifiers() & QtCore.Qt.ShiftModifier and
event.key() in [QtCore.Qt.Key_Enter, QtCore.Qt.Key_Return]):
self.insertPlainText("\n")
event.accept()
elif event.key() == QtCore.Qt.Key_Tab:
# intercept the tab key and insert 4 spaces
self.insertPlainText(" ")
event.accept()
else:
super(TextEditor, self).keyPressEvent(event)
if event.key() in (QtCore.Qt.Key_Return, QtCore.Qt.Key_Enter) and event.modifiers() == QtCore.Qt.ControlModifier:
self.parent().execute()
class Editor(QtWidgets.QFrame):
outputText = QtCore.Signal(str)
def __init__(self, parent=None):
super(Editor, self).__init__(parent=parent)
self.setFrameStyle(QtWidgets.QFrame.StyledPanel | QtWidgets.QFrame.Sunken)
self._locals = {}
self.textEdit = TextEditor(parent=self)
self.numberBar = NumberBar(self.textEdit)
hbox = layouts.hBoxLayout(parent=self)
hbox.addWidget(self.numberBar)
hbox.addWidget(self.textEdit)
self.textEdit.blockCountChanged.connect(self.numberBar.adjustWidth)
self.textEdit.updateRequest.connect(self.numberBar.updateContents)
self.pythonHighlighter = highlighter.highlighterFromJson(os.path.join(os.path.dirname(highlighter.__file__),
"highlightdata.json"),
self.textEdit.document())
def text(self):
return self.textEdit.toPlainText()
def setText(self, text):
self.textEdit.setPlainText(text)
def isModified(self):
return self.edit.document().isModified()
def setModified(self, modified):
self.edit.document().setModified(modified)
def setLineWrapMode(self, mode):
self.edit.setLineWrapMode(mode)
def execute(self):
original_stdout = sys.stdout
class stdoutProxy():
def __init__(self, write_func):
self.write_func = write_func
self.skip = False
def write(self, text):
if not self.skip:
stripped_text = text.rstrip('\n')
self.write_func(stripped_text)
self.skip = not self.skip
def flush(self):
pass
sys.stdout = stdoutProxy(self.outputText.emit)
cursor = self.textEdit.textCursor()
script = cursor.selectedText()
script = script.replace(u"\u2029", "\n")
if not script:
script = str(self.toPlainText().strip())
if not script:
return
self.outputText.emit(script)
evalCode = True
try:
try:
outputCode = compile(script, "<string>", "eval")
except SyntaxError:
evalCode = False
outputCode = compile(script, "<string>", "exec")
except Exception:
trace = traceback.format_exc()
self.outputText.emit(trace)
return
# ok we've compiled the code now exec
if evalCode:
try:
results = eval(outputCode, globals(), self._locals)
self.outputText.emit(str(results))
except Exception:
trace = traceback.format_exc()
self.outputText.emit(trace)
else:
try:
exec (outputCode, globals(), self._locals)
except Exception:
trace = traceback.format_exc()
self.outputText.emit(trace)
finally:
sys.stdout = original_stdout
class TabbedEditor(QtWidgets.QTabWidget):
outputText = QtCore.Signal(str)
def __init__(self, parent):
super(TabbedEditor, self).__init__(parent=parent)
self.setTabsClosable(True)
self.setMovable(True)
self.newTabBtn = QtWidgets.QPushButton("+", parent=self)
self.newTabBtn.setMaximumWidth(40)
self.newTabBtn.setToolTip("Add New Tab")
self.setCornerWidget(self.newTabBtn, QtCore.Qt.TopLeftCorner)
self.newTabBtn.clicked.connect(self.addNewEditor)
self.tabCloseRequested.connect(self.closeCurrentTab)
def addNewEditor(self, name=None):
name = name or "New tab"
edit = Editor(parent=self)
self.addTab(edit, name)
edit.outputText.connect(self.outputText.emit)
edit.textEdit.moveCursor(QtGui.QTextCursor.Start)
self.setCurrentIndex(self.count() - 1)
def closeCurrentTab(self, index):
self.removeTab(index)
| gpl-3.0 | -568,334,776,551,056,200 | 32.803774 | 121 | 0.579035 | false | 4.201689 | false | false | false |
aloverso/loanbot | conversationHandler.py | 1 | 14630 | import time
import messengerClient
import sendEmailToLibrarian
'''
A class that deals with the messages we receive from users
'''
class ConversationHandler():
'''
create a new conversation handler with a given database client
'''
def __init__(self, database_client):
self.database_client = database_client
self.checkout_words = ['check', 'checking', 'checked', 'check out', 'checkout', 'checking out', 'take', 'took', 'taking', 'grabbing', 'grab', 'grabbed', 'checked out', 'borrow', 'borrowed', 'want']
self.return_words = ['return', 'returned','returning','brought', 'bring', 'bringing', 'dropping', 'dropped', 'took back', 'left', 'done', 'done with', 'finished']
self.closing_words = ['thanks', 'thank', 'ok', 'bye', 'goodbye', 'good-bye', 'okay', 'cancel', 'stop', 'fuck', 'yay']
self.available_words = ['available', 'there']
self.help_words = ['how do i', 'help', 'manual', 'documentation', 'how to', 'trouble', 'confused', 'what do i do with', 'what should i do', "i don't know"]
self.NO_CONTACT = 0
self.SENT_GREETING = 1
self.WANT_CHECKOUT = 2
self.CONFIRM_TOOL = 4
self.HOW_LONG = 5
self.CLOSING = 6
self.WANT_RETURN = 7
self.CONFIRM_TOOL_RETURN = 8
self.AVAILABILITY_QUESTION = 9
self.SEND_LIST = 10
'''
searches through a message looking for names of tools from the tools database
returns a list of tool names found, empty if none found
'''
def find_tools_in_message(self, message):
found_tools = []
tools_list = self.database_client.get_all_tools()
#loop through list looking for tool names in message
for tool in tools_list:
if tool['name'] in message:
found_tools.append(tool)
else:
for alt_name in tool['alternate_names']:
if alt_name in message:
found_tools.append(tool)
return found_tools
'''
creates a string of all tools a user is attempting to check out
'''
def make_tool_string(self, tool_list):
tool_string = ''
print('temp_tools', tool_list)
for tool in tool_list:
tool_string = tool_string + tool['name'] + " and " # allow for a list of tools
# remove final and from string
tool_string = tool_string[:-5]
print('tool string:', tool_string)
return tool_string
'''
Parses the loan time quick reply message to store a due_date
for the tool/s the user wants to check out. uses import time
TODO: handle the case when we somehow get a different message
than the quick reply options were expecting in a way other than
making the due date "0"
'''
def parse_due_date(self, message):
due_date = 0
SECONDS_IN_DAY = 3600*24
# they want a 24 hour loan
if message == 'yes':
due_date = int(time.time()) + 120 # !!!!!! CHANGE THIS BACK TO SECONDS_IN_DAY!!!!!!
# they want a 12 hour loan
elif message == '12 hours instead':
due_date = int(time.time()) + (SECONDS_IN_DAY/2)
#they want a 3 day loan
elif message == '3 days instead':
due_date = int(time.time()) + (SECONDS_IN_DAY*3)
return due_date
'''
Uses the user's stage to parse the message and determine how to reply
takes the message text string and a user (in dictionary format)
returns a tuple:
updated_user, response_text, quickreply
updated_user is the user dictionary, possibly changed or updated_user
response_text is the bot's response message
quickreply is a field indicating whether this should be a quickreply response
it either has the None value (not a quickreply message)
or a list of quickreply options
'''
def determine_response_for_user(self, message, user):
print('determine_response_for_user')
if any(word in message for word in self.closing_words):
response = "Glad to help!"
user['stage'] = self.NO_CONTACT
print(user['stage'])
return user, response, None
if any(word in message for word in self.help_words):
response = ''
tool_help_wanted = self.find_tools_in_message(message)
if len(tool_help_wanted) >0:
resource_links = ''
for tool in tool_help_wanted:
resource_links += ' ' + tool['resource_link']
response ="The Library gave me some resources that might be helpful, see if this is useful:" + resource_links
else:
response ="😵 I have no clue how to help you with this one! I've passed your question along to the librarians. Hopefully they know what to do and will contact you soon. 😅"
#TODO: send email to librarian here
return user, response, None
# this needs to be located above the NO_CONTACT check
# because if they said anything that's NOT "view more", then
# it needs to be treated like a NO_CONTACT message context
if user['stage'] == self.SEND_LIST:
user['stage'] = self.NO_CONTACT
print(user['stage'])
if message == 'view more':
response = "Check The Library's online database for the full tool list: https://olin.tind.io/"
return user, response, None
#if the user is initiating contact
if user['stage'] == self.NO_CONTACT:
# trying to return
if any(word in message for word in self.return_words):
user['stage'] = self.WANT_RETURN
print(user['stage'])
# checking availability status
elif any(word in message for word in self.available_words):
tools_wanted = self.find_tools_in_message(message)
response_string = ''
quickreply = None
if len(tools_wanted) >0:
unavailable_tools = []
for tool in tools_wanted:
available_modifier = ''
if tool['current_user'] != None:
available_modifier = 'not '
unavailable_tools.append(tool)
response_string += 'the {} is {}available and '.format(tool['name'], available_modifier)
response_string = response_string[:-5]
if len(unavailable_tools) > 0:
question = 'Would you like me to ask the tool borrowers to return them?'
response_string = response_string + '. ' + question
user['temp_tools'] = unavailable_tools
user['stage'] = self.AVAILABILITY_QUESTION
print(user['stage'])
quickreply = ['yes', 'no']
else:
response_string = "SEND_LIST"
user['stage'] = self.SEND_LIST
print(user['stage'])
return user, response_string, quickreply
# checking out
elif any(word in message for word in self.checkout_words):
user['stage'] = self.WANT_CHECKOUT
print(user['stage'])
else:
# send greeting and ask what tool
response = "😄 Hi there! I'm Loan Wrangler, what can I help you with?"
# user['stage'] = self.SENT_GREETING
return user, response, None
# if the user has asked about availability and we're finding out if we should
# send a reminder to the borrowers or not
if user['stage'] == self.AVAILABILITY_QUESTION:
if message == 'yes':
for tool in user['temp_tools']:
borrower_id = tool['current_user']
borrower_sender_id = self.database_client.find_user('_id', borrower_id)['sender_id']
# this is not the best code structure
# because we have this weird situation where the user we want to send a message to
# is not the user who sent us a message
messenger_client = messengerClient.MessengerClient()
reminder = "Hey, someone's interested in borrowing the {} that you have checked out. If you're done with it, could you bring it back?".format(tool['name'])
messenger_client.send_message(borrower_sender_id, reminder, None)
user['stage'] = self.NO_CONTACT
print(user['stage'])
user['temp_tools'] = []
return user, "Alright, I let them know someone's looking for it! 🔎", None
else:
user['stage'] = self.NO_CONTACT
print(user['stage'])
user['temp_tools'] = []
return user, "☺️ Alrighty. Is there something else I can help with?", None
#if the user wants to check out something
if user['stage'] == self.WANT_CHECKOUT or user['stage'] == self.SENT_GREETING:
tools_wanted = self.find_tools_in_message(message)
user['temp_tools'] = tools_wanted
#if we found a tool name/s in the message
if len(tools_wanted) > 0:
tool_string = self.make_tool_string(user['temp_tools'])
print('tool string in line:', tool_string)
response = "Sounds like you want to check out a {}, is that correct?".format(tool_string)
user['stage'] = self.CONFIRM_TOOL
print(user['stage'])
return user, response, ['yes','no']
#if we could not identify a tool name/s in the message
else:
user['stage'] = self.NO_CONTACT
print(user['stage'])
return user, "What can I do for ya?", None
#we check that we parsed the correct tool/s...
if user['stage'] == self.CONFIRM_TOOL:
#...if so, we find out how long the loan will be
if message == 'yes':
available = True
tools_out = []
# check if those tools are in right now
for tool in user['temp_tools']:
if tool['current_user'] != None:
available = False
tools_out.append(tool)
if available:
response = "Great! Is a loan time of 1 day okay?"
user['stage'] = self.HOW_LONG
print(user['stage'])
return user, response, ['yes', '12 hours instead', '3 days instead']
else:
response = "😓 Sorry, the following tools are not available right now: {}".format(self.make_tool_string(tools_out))
user['stage'] = self.NO_CONTACT
print(user['stage'])
return user, response, None
#...if not, we try again
else:
user['temp_tools'] = []
user['stage'] = self.NO_CONTACT
print(user['stage'])
return user, "😵 Sorry I misunderstood. What do you want to do?", None
#update user and tool db based on the loan time
if user['stage'] == self.HOW_LONG:
tool_string = self.make_tool_string(user['temp_tools'])
for tool in user['temp_tools']:
tool['current_user'] = user['_id']
tool['current_due_date'] = self.parse_due_date(message)
self.database_client.update_tool(tool)
user['tools'].append(tool['_id'])
# TODO: how to handle loan time if they are checking out more than one tool
#finish the interaction and reset the conversation stage
response = "😎 You're all set! I'll remind you to return the {} before it's due.".format(tool_string)
user['temp_tools'] = []
user['stage'] = self.NO_CONTACT
print(user['stage'])
return user, response, None
if user['stage'] == self.CONFIRM_TOOL_RETURN:
#...if so, we find out how long the loan will be
if message == 'yes':
tool_string = self.make_tool_string(user['temp_tools'])
# TODO: tell them if they're trying to return something they don't have
#update tool
for tool in user['temp_tools']:
if tool['current_user'] == user['_id']:
tool['current_user'] = None
tool['current_due_date'] = None
self.database_client.update_tool(tool)
# update user tool list
for checked_out_tool_id in user['tools']:
if checked_out_tool_id == tool['_id']:
user['tools'].remove(checked_out_tool_id)
user['temp_tools'] = []
user['stage'] = self.NO_CONTACT
print(user['stage'])
return user, "✨🏆✨ Thanks!!!! I'll let The Library know the {} has returned.".format(tool_string), None
#...if not, we try again
else:
user['temp_tools'] = []
user['stage'] = self.WANT_RETURN
print(user['stage'])
return user, "😓 Sorry I misunderstood. What tool do you want to return?", None
if user['stage'] == self.WANT_RETURN:
tools_returning = self.find_tools_in_message(message)
user['temp_tools'] = tools_returning
#if we found a tool name/s in the message
if len(tools_returning) > 0:
tool_string = self.make_tool_string(user['temp_tools'])
print('tool string in line:', tool_string)
response = "You're returning a {}, is that right?".format(tool_string)
user['stage'] = self.CONFIRM_TOOL_RETURN
print(user['stage'])
return user, response, ['yes','no']
#if we could not identify a tool name/s in the message
else:
user['stage'] = self.WANT_RETURN
print(user['stage'])
return user, "Which tool did you want to return?", None
print('I GOT TO THE END, OH NO')
return user
## TODO: check for cancelling
| mit | 7,896,362,138,672,726,000 | 43.907692 | 205 | 0.54135 | false | 4.275044 | false | false | false |
HTTP-APIs/hydrus | hydrus/conf.py | 1 | 3125 | """
Global variables are loaded or set here:
DEBUG
PORT
API_NAME
DB_URL
APIDOC_OBJ
HYDRUS_SERVER_URL
FOUND_DOC
"""
import os
import json
import yaml
import logging
from os.path import abspath, dirname
from pathlib import Path
from importlib.machinery import SourceFileLoader
from hydra_openapi_parser.openapi_parser import parse
logger = logging.getLogger(__file__)
try:
DEBUG = bool(os.environ["DEBUG"])
except KeyError:
DEBUG = False
# load form environment (as many globals as possible shall be in
# environment configuration)
PORT = int(os.environ["PORT"]) if "PORT" in dict(os.environ).keys() else 8080
API_NAME = os.environ["API_NAME"] if "API_NAME" in dict(os.environ).keys() else "api"
DB_URL = (
os.environ["DB_URL"]
if "DB_URL" in dict(os.environ).keys()
else "sqlite:///database.db"
)
def get_apidoc_path():
"""
Get the path of the apidoc.
:return - Tuple (path, boolean). path denotes path of the apidoc.
If apidoc is not present at specified path then it falls back at sample apidoc.
boolean is true if the apidoc is present at the specified path.
boolean is false if sample apidoc is being used.
"""
cwd_path = Path(dirname(dirname(abspath(__file__))))
try:
apidoc_env = os.environ["APIDOC_REL_PATH"]
apidoc_path = cwd_path / Path(apidoc_env)
found_doc = True
except KeyError:
found_doc = False
apidoc_path = cwd_path / "hydrus" / "samples" / "hydra_doc_sample.py"
return (apidoc_path, found_doc)
def load_apidoc(path):
"""
Parses docs of .jsonld, .py, .yaml format and loads apidoc from the given path.
:param path - Path for the apidoc to be loaded
:return - apidoc
:Raises:
FileNotFoundError: If the wrong path of hydradoc is specified.
BaseException: If hydradoc is specified in wrong format.
"""
path = str(path)
try:
apidoc_format = path.split(".")[-1]
if apidoc_format == "jsonld":
with open(path, "r") as f:
api_doc = json.load(f)
elif apidoc_format == "py":
api_doc = SourceFileLoader("doc", path).load_module().doc
elif apidoc_format == "yaml":
with open(path, "r") as stream:
api_doc = parse(yaml.load(stream))
else:
raise (
"Error - hydradoc format not supported."
"The supported formats are .py, .jsonld and .yaml"
)
logger.info(f"APIDOC path loaded from: {path}")
return api_doc
except FileNotFoundError:
logger.critical(
f"No Hydra ApiDoc file to load has been found"
f" at {path}. Cannot set APIDOC_OBJ"
)
raise
except BaseException:
logger.critical("Problem parsing specified hydradoc file")
raise
def get_host_domain():
"""
Returns host domain.
"""
HOST_DOMAIN = f"http://localhost:{PORT}"
return HOST_DOMAIN
(path, FOUND_DOC) = get_apidoc_path()
APIDOC_OBJ = load_apidoc(path)
HYDRUS_SERVER_URL = f"http://localhost:{PORT}/"
| mit | -6,645,050,183,180,705,000 | 27.669725 | 85 | 0.62176 | false | 3.535068 | false | false | false |
alephobjects/Cura2 | cura/Settings/QualityAndUserProfilesModel.py | 1 | 2957 | # Copyright (c) 2016 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from UM.Application import Application
from UM.Settings.ContainerRegistry import ContainerRegistry
from cura.QualityManager import QualityManager
from cura.Settings.ProfilesModel import ProfilesModel
from cura.Settings.ExtruderManager import ExtruderManager
## QML Model for listing the current list of valid quality and quality changes profiles.
#
class QualityAndUserProfilesModel(ProfilesModel):
def __init__(self, parent = None):
super().__init__(parent)
self._empty_quality = ContainerRegistry.getInstance().findInstanceContainers(id = "empty_quality")[0]
## Fetch the list of containers to display.
#
# See UM.Settings.Models.InstanceContainersModel._fetchInstanceContainers().
def _fetchInstanceContainers(self):
global_container_stack = Application.getInstance().getGlobalContainerStack()
if not global_container_stack:
return {}, {}
# Fetch the list of quality changes.
quality_manager = QualityManager.getInstance()
machine_definition = quality_manager.getParentMachineDefinition(global_container_stack.definition)
quality_changes_list = quality_manager.findAllQualityChangesForMachine(machine_definition)
extruder_manager = ExtruderManager.getInstance()
active_extruder = extruder_manager.getActiveExtruderStack()
extruder_stacks = self._getOrderedExtruderStacksList()
# Fetch the list of usable qualities across all extruders.
# The actual list of quality profiles come from the first extruder in the extruder list.
quality_list = quality_manager.findAllUsableQualitiesForMachineAndExtruders(global_container_stack, extruder_stacks)
# Filter the quality_change by the list of available quality_types
quality_type_set = set([x.getMetaDataEntry("quality_type") for x in quality_list])
# Also show custom profiles based on "Not Supported" quality profile
quality_type_set.add(self._empty_quality.getMetaDataEntry("quality_type"))
filtered_quality_changes = {qc.getId(): qc for qc in quality_changes_list if
qc.getMetaDataEntry("quality_type") in quality_type_set and
((qc.getMetaDataEntry("extruder") == active_extruder.definition.getMetaDataEntry("quality_definition") or
qc.getMetaDataEntry("extruder") == active_extruder.definition.getId()) if qc.getMetaDataEntry("extruder") is not None else True)}
result = filtered_quality_changes
for q in quality_list:
if q.getId() != "empty_quality":
result[q.getId()] = q
return result, {} #Only return true profiles for now, no metadata. The quality manager is not able to get only metadata yet.
| lgpl-3.0 | -6,902,799,699,902,808,000 | 53.792453 | 166 | 0.692256 | false | 4.248563 | false | false | false |
itsmeolivia/NOT-a-trivial-pursuit | steps/the_gender_binary.py | 1 | 1258 | import sqlite3 as sql
import os
def _url_for_id(id):
return 'http://www.j-archive.com/showplayer.php?player_id=' + str(id)
def the_gender_binary():
print 'Manually classifying genders...'
database_path = os.path.abspath('../archive.db')
con = sql.connect(database_path)
cur = con.cursor()
cur.execute('SELECT * FROM Players WHERE Gender = "andy" or (Gender <> "female" and Gender <> "male")')
unknown_genders = cur.fetchall()
for player in unknown_genders:
id, name, classification = player
check = classification.split('_')
if check[0] == 'mostly':
classification = check[1]
while ((classification != 'male') and
(classification != 'female') and
(classification != 'skip')):
print name,
print 'was classified as',
print classification
print _url_for_id(id)
classification = raw_input('What gender? ')
if classification != 'skip':
cur.execute('UPDATE Players SET Gender=? WHERE Id=?',
(classification, id))
con.commit()
con.close()
print 'Genders classified.'
if __name__ == '__main__':
the_gender_binary()
| mit | -6,991,509,410,899,145,000 | 24.673469 | 107 | 0.566773 | false | 4.019169 | false | false | false |
TheChymera/E2att | analysis/d_prime.py | 1 | 4383 | #!/usr/bin/env python
from __future__ import division
__author__ = 'Horea Christian'
from os import listdir, path
from lefunctions import get_dataframes_for_dp
from scipy.stats import ttest_ind, norm, sem, f_oneway
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import axis
from matplotlib.font_manager import FontProperties
from pylab import figure, show, errorbar, setp, legend
#from statsmodels.stats.anova import anova_lm
globalpath = '~/Data/shared/2att/' #root of results
bh_results = 'bh/' # behavioural test results
cq_results = 'cq/' # questionnaire results
globalpath = path.expanduser(globalpath)
bhpath = globalpath + bh_results
cqpath = globalpath + cq_results
files = [lefile for lefile in listdir(bhpath) if lefile.endswith('.csv')]
ids = [t.split('_',2)[0]+'_'+t.split('_',2)[1] for t in files]
ids = np.unique(ids)
spec = ['6245247_f']
h_dist = ['1236345_f','6779353_f','7310001_f','7714775_m','7816097_m','7865828_m','7922847_m']
l_dist = ['1975801_m','4724273_f','6268973_m','8963557_f','8286497_m','8963557_m','9651558_m','8240877_m','6887665_m','5559429_f','8582941_f','8582941_m','9302438_f','4276763_f','3878418_m','3537898_f','1247497_f','8717741_m','4744495_f','7117377_m']
test = ['chr1_f','chr2_f']
id_list = l_dist
isspec=False
t_cr_au,t_fa_au,t_ht_au,t_ms_au,t_cr_aa,t_fa_aa,t_ht_aa,t_ms_aa,t_cr_uu,t_fa_uu,t_ht_uu,t_ms_uu,all_au_dp,all_aa_dp,all_uu_dp = get_dataframes_for_dp(id_list, bhpath)
if isspec:
_,_,_,_,_,_,_,_,_,_,_,_,s_dp_au,s_dp_aa,s_dp_uu = get_dataframes_for_dp(spec, bhpath)
t_hr_au = t_ht_au / (t_ht_au+t_ms_au)
t_far_au = t_fa_au / (t_cr_au+t_fa_au)
t_zhr_au = norm.ppf(t_hr_au)
t_zfar_au = norm.ppf(t_far_au)
t_dp_au = t_zhr_au-t_zfar_au
t_hr_aa = t_ht_aa / (t_ht_aa+t_ms_aa)
t_far_aa = t_fa_aa / (t_cr_aa+t_fa_aa)
t_zhr_aa = norm.ppf(t_hr_aa)
t_zfar_aa = norm.ppf(t_far_aa)
t_dp_aa = t_zhr_aa-t_zfar_aa
t_hr_uu = t_ht_uu / (t_ht_uu+t_ms_uu)
t_far_uu = t_fa_uu / (t_cr_uu+t_fa_uu)
t_zhr_uu = norm.ppf(t_hr_uu)
t_zfar_uu = norm.ppf(t_far_uu)
t_dp_uu = t_zhr_uu-t_zfar_uu
ids = sorted(id_list)
pos_ids = np.arange(len(ids))
fig = figure(figsize=(pos_ids.max(), 5), dpi=80,facecolor='#eeeeee',tight_layout=True)
ax=fig.add_subplot(1,1,1)
width = 0.7
ax.yaxis.grid(True, linestyle='-', which='major', color='#dddddd',alpha=0.5, zorder = 1)
au_bars = plt.bar(pos_ids, all_au_dp, width/3 ,color='m', alpha=0.4, zorder = 1)
aa_bars = plt.bar(pos_ids+width/3, all_aa_dp, width/3 ,color='#488C0F', alpha=0.4, zorder = 1)
uu_bars = plt.bar(pos_ids+width*2/3, all_uu_dp, width/3 ,color='#0F8C2F', alpha=0.4, zorder = 1)
au_t_bar = plt.bar(pos_ids[-1]+1, np.mean(all_au_dp), width/3 ,color='m', alpha=0.8, zorder = 1)
au_t_err = errorbar(pos_ids[-1]+1+(width/6), np.mean(all_au_dp), yerr=sem(all_au_dp), ecolor='0.1', elinewidth='3', capsize=0, linestyle='None', zorder = 2)
aa_t_bar = plt.bar(pos_ids[-1]+1+width/3, np.mean(all_aa_dp), width/3 ,color='#488C0F', alpha=0.8, zorder = 1)
aa_t_err = errorbar(pos_ids[-1]+1+(width*3/6), np.mean(all_aa_dp), yerr=sem(all_aa_dp), ecolor='0.1', elinewidth='3', capsize=0, linestyle='None', zorder = 2)
uu_t_bar = plt.bar(pos_ids[-1]+1+width*2/3, np.mean(all_uu_dp), width/3,color='#0F8C2F', alpha=0.8, zorder = 1)
uu_t_err = errorbar(pos_ids[-1]+1+(width*5/6), np.mean(all_uu_dp), yerr=sem(all_uu_dp), ecolor='0.1', elinewidth='3', capsize=0, linestyle='None', zorder = 2)
if isspec:
s_au_bars = plt.bar(pos_ids[-1]+2, s_dp_au, width/3 ,color='m', alpha=0.4, zorder = 1)
s_aa_bars = plt.bar(pos_ids[-1]+2+width/3, s_dp_aa, width/3 ,color='#488C0F', alpha=0.4, zorder = 1)
s_uu_bars = plt.bar(pos_ids[-1]+2+width*2/3, s_dp_uu, width/3 ,color='#0F8C2F', alpha=0.4, zorder = 1)
print(f_oneway(all_au_dp,all_aa_dp,all_uu_dp))
if isspec:
ids=ids+['total',spec]
else:ids=ids+['total']
pos_ids = np.arange(len(ids))
ax.set_xlim(0, pos_ids.max()+0.7)
ax.set_ylim(0,9)
ax.set_ylabel('Sensitivity Index (d\')')
ax.set_xlabel('Participant ID')
ax.set_xticks(pos_ids + width/2)
ax.set_xticklabels(ids,fontsize=9,rotation=30)
#setp(ax.set_xticklabels, 'rotation', 'vertical')
for tick in ax.axes.get_xticklines():
tick.set_visible(False)
axis.Axis.zoom(ax.xaxis, -0.3)
legend((au_t_bar,aa_t_bar,uu_t_bar),('Mixed faces','Attractive faces only','Unattractive faces only'), 'upper right', shadow=False, frameon=False, prop= FontProperties(size='11'))
show()
| gpl-3.0 | 6,385,148,462,085,533,000 | 44.185567 | 250 | 0.663244 | false | 2.164444 | false | false | false |
sean-obrien/rpotter | rpotter.py | 1 | 8391 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
_\
\
O O-O
O O
O
Raspberry Potter
Version 0.1.5
Use your own wand or your interactive Harry Potter wands to control the IoT.
Updated for OpenCV 3.2
If you have an older version of OpenCV installed, please uninstall fully (check your cv2 version in python) and then install OpenCV following the guide here (but using version 3.2):
https://imaginghub.com/projects/144-installing-opencv-3-on-raspberry-pi-3/documentation
Copyright (c) 2015-2017 Sean O'Brien. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
import io
import sys
sys.path.insert(1, '/usr/lib/python2.7/dist-packages/picamera')
import picamera
import numpy as np
import cv2
import threading
import math
import time
import pigpio
GPIOS = 32
MODES = ["INPUT", "OUTPUT", "ALT5", "ALT4", "ALT0", "ALT1", "ALT2", "ALT3"]
pi = pigpio.pi()
#pin for Powerswitch (Lumos,Nox)
switch_pin = 16
pi.set_mode(switch_pin,pigpio.OUTPUT)
#pin for Trinket (Colovario)
trinket_pin = 12
pi.set_mode(trinket_pin,pigpio.OUTPUT)
# Parameters for image processing
lk_params = dict( winSize = (15,15),
maxLevel = 2,
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
dilation_params = (5, 5)
movment_threshold = 80
Scan()
# Scan starts camera input and runs FindNewPoints
def Scan():
cv2.namedWindow("Raspberry Potter")
stream = io.BytesIO()
cam = picamera.PiCamera()
cam.resolution = (640, 480)
cam.framerate = 24
try:
while True:
FindNewPoints()
except KeyboardInterrupt:
End()
exit
#FindWand is called to find all potential wands in a scene. These are then tracked as points for movement. The scene is reset every 3 seconds.
def FindNewPoints():
global old_frame,old_gray,p0,mask,color,ig,img,frame
try:
try:
old_frame = cam.capture(stream, format='jpeg')
except:
print("resetting points")
data = np.fromstring(stream.getvalue(), dtype=np.uint8)
old_frame = cv2.imdecode(data, 1)
cv2.flip(old_frame,1,old_frame)
old_gray = cv2.cvtColor(old_frame,cv2.COLOR_BGR2GRAY)
#cv2.equalizeHist(old_gray,old_gray)
#old_gray = cv2.GaussianBlur(old_gray,(9,9),1.5)
#dilate_kernel = np.ones(dilation_params, np.uint8)
#old_gray = cv2.dilate(old_gray, dilate_kernel, iterations=1)
#TODO: trained image recognition
p0 = cv2.HoughCircles(old_gray,cv2.HOUGH_GRADIENT,3,100,param1=100,param2=30,minRadius=4,maxRadius=15)
p0.shape = (p0.shape[1], 1, p0.shape[2])
p0 = p0[:,:,0:2]
mask = np.zeros_like(old_frame)
ig = [[0] for x in range(20)]
print("finding...")
TrackWand()
#This resets the scene every three seconds
threading.Timer(3, FindNewPoints).start()
except:
e = sys.exc_info()[1]
print("FindWand Error: %s" % e )
End()
exit
def TrackWand():
global old_frame,old_gray,p0,mask,color,ig,img,frame
color = (0,0,255)
try:
old_frame = cam.capture(stream, format='jpeg')
except:
print("resetting points")
data = np.fromstring(stream.getvalue(), dtype=np.uint8)
old_frame = cv2.imdecode(data, 1)
cv2.flip(old_frame,1,old_frame)
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
#cv2.equalizeHist(old_gray,old_gray)
#old_gray = cv2.GaussianBlur(old_gray,(9,9),1.5)
#dilate_kernel = np.ones(dilation_params, np.uint8)
#old_gray = cv2.dilate(old_gray, dilate_kernel, iterations=1)
# Take first frame and find circles in it
p0 = cv2.HoughCircles(old_gray,cv2.HOUGH_GRADIENT,3,100,param1=100,param2=30,minRadius=4,maxRadius=15)
try:
p0.shape = (p0.shape[1], 1, p0.shape[2])
p0 = p0[:,:,0:2]
except:
print("No points found")
# Create a mask image for drawing purposes
mask = np.zeros_like(old_frame)
while True:
frame = cam.capture(stream, format='jpeg')
data2 = np.fromstring(stream.getvalue(), dtype=np.uint8)
frame = cv2.imdecode(data2, 1)
cv2.flip(frame,1,frame)
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#equalizeHist(frame_gray,frame_gray)
#frame_gray = GaussianBlur(frame_gray,(9,9),1.5)
#dilate_kernel = np.ones(dilation_params, np.uint8)
#frame_gray = cv2.dilate(frame_gray, dilate_kernel, iterations=1)
try:
# calculate optical flow
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# Select good points
good_new = p1[st==1]
good_old = p0[st==1]
# draw the tracks
for i,(new,old) in enumerate(zip(good_new,good_old)):
a,b = new.ravel()
c,d = old.ravel()
# only try to detect gesture on highly-rated points (below 15)
if (i<15):
IsGesture(a,b,c,d,i)
dist = math.hypot(a - c, b - d)
if (dist<movment_threshold):
cv2.line(mask, (a,b),(c,d),(0,255,0), 2)
cv2.circle(frame,(a,b),5,color,-1)
cv2.putText(frame, str(i), (a,b), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0,0,255))
except IndexError:
print("Index error")
End()
break
except:
e = sys.exc_info()[0]
print("TrackWand Error: %s" % e )
End()
break
img = cv2.add(frame,mask)
cv2.putText(img, "Press ESC to close.", (5, 25),
cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255,255,255))
cv2.imshow("Raspberry Potter", frame)
# get next frame
frame = cam.capture(stream, format='jpeg')
data3 = np.fromstring(stream.getvalue(), dtype=np.uint8)
frame = cv2.imdecode(data3, 1)
# Now update the previous frame and previous points
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1,1,2)
#Spell is called to translate a named spell into GPIO or other actions
def Spell(spell):
#clear all checks
ig = [[0] for x in range(15)]
#Invoke IoT (or any other) actions here
cv2.putText(mask, spell, (5, 25),cv2.FONT_HERSHEY_SIMPLEX, 1.0, (255,0,0))
if (spell=="Colovaria"):
print("GPIO trinket")
pi.write(trinket_pin,0)
time.sleep(1)
pi.write(trinket_pin,1)
elif (spell=="Lumos"):
print("GPIO ON")
pi.write(switch_pin,1)
elif (spell=="Nox"):
print("GPIO OFF")
pi.write(switch_pin,0)
print("CAST: %s" %spell)
#IsGesture is called to determine whether a gesture is found within tracked points
def IsGesture(a,b,c,d,i):
print("point: %s" % i)
#record basic movements - TODO: trained gestures
if ((a<(c-5))&(abs(b-d)<1)):
ig[i].append("left")
elif ((c<(a-5))&(abs(b-d)<1)):
ig[i].append("right")
elif ((b<(d-5))&(abs(a-c)<5)):
ig[i].append("up")
elif ((d<(b-5))&(abs(a-c)<5)):
ig[i].append("down")
#check for gesture patterns in array
astr = ''.join(map(str, ig[i]))
if "rightup" in astr:
Spell("Lumos")
elif "rightdown" in astr:
Spell("Nox")
elif "leftdown" in astr:
Spell("Colovaria")
print(astr)
def End():
cam.close()
cv2.destroyAllWindows()
| mit | -5,147,636,503,961,173,000 | 36.627803 | 471 | 0.621618 | false | 3.117013 | false | false | false |
jjongbloets/julesTk | julesTk/view/window.py | 1 | 1675 | """Provides TopLevel views; i.e. Windows"""
from julesTk.view import tk, BaseView
from julesTk.view.viewset import BaseViewSet
__author__ = "Joeri Jongbloets <joeri@jongbloets>"
class Window(tk.Toplevel, BaseView):
def __init__(self, parent, controller):
tk.Toplevel.__init__(self, parent)
BaseView.__init__(self, parent, controller)
self.protocol("WM_DELETE_WINDOW", self.exit)
@property
def root(self):
"""Return the root view
:rtype: Tkinter.Tk or tkinter.Tk
"""
result = self.parent
if self.controller is not None:
result = self.controller.root
elif isinstance(result, BaseView):
result = self.parent.root
return result
@property
def application(self):
result = self.parent
if self.controller is not None:
result = self.controller.application
elif isinstance(result, BaseView):
result = self.parent.application
return result
def _prepare(self):
raise NotImplementedError
def _show(self):
self.deiconify()
def _hide(self):
self.withdraw()
return True
def _close(self):
if self.controller is not None and not self.controller.is_stopped():
self.controller.stop()
self.destroy()
return True
def exit(self):
self.close()
class WindowViewSet(Window, BaseViewSet):
"""A window that can contain multiple views"""
def _prepare(self):
raise NotImplementedError
def _close(self):
BaseViewSet.close_views(self)
return super(WindowViewSet, self)._close()
| mit | -6,477,295,053,180,432,000 | 24.378788 | 76 | 0.614925 | false | 4.177057 | false | false | false |
foxbunny/seagull | seagull/gallery/index.py | 1 | 5989 | #
# Seagull photo gallery app
# Copyright (C) 2016 Hajime Yamasaki Vukelic
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
import os
import locale
import hashlib
import logging
import functools
import urllib.request
from pathlib import PosixPath, WindowsPath
class Entry:
"""
This class encapsulates a single gallery entry. It contains information
about the file path, extension, modification timestamp, and file size.
Entries are created from ``os.Direntry`` objects returned by calls such as
``os.scandir()``.
Instantianting this class doubles as path validation. Passing objects that
have any of the following characteristics results in a ``ValidationError``:
- path is a directory
- path has no extension
- path has an extenion that is not supported
- path starts with an underscore
- file at the path has 0 size
"""
#: Supported extensions
EXTENSIONS = ('.jpg', '.png', '.gif')
def __init__(self, dentry):
self.validate(dentry)
self.path = dentry.path
self.name = dentry.name
self.ext = os.path.splitext(self.name)[1]
self.size = dentry.stat().st_size
self.mtime = dentry.stat().st_mtime
self._hash = None
@property
def hash(self):
"""
MD5 hash of the path
"""
if not self._hash:
md5 = hashlib.md5()
md5.update(self.path.encode('utf8'))
self._hash = md5.hexdigest()
return self._hash
@classmethod
def from_path(cls, path):
"""
Instantiate an entry from a path (string)
"""
try:
pentry = WindowsPath(path)
except NotImplementedError:
pentry = PosixPath(path)
return cls(pentry)
def validate(self, dentry):
"""
Validate the ``os.DirEntry`` object for use with ``Entry`` class
"""
path = dentry.path
# Is a directory
if dentry.is_dir():
raise ValueError('{} is a directory'.format(path))
if dentry.name.startswith('_'):
raise ValueError('{} starts with a dot'.format(path))
if dentry.stat().st_size <= 0:
raise ValueError('{} is an empty file'.format(path))
if '.' not in dentry.name:
raise ValueError('{} has no extension'.format(path))
if os.path.splitext(dentry.name)[1].lower() not in self.EXTENSIONS:
raise ValueError('{} has unsupported extension'.format(path))
@staticmethod
def cmp(entry):
"""
Comparison function to be used in ``key`` arguments when sorting
"""
collated_cmp = functools.cmp_to_key(locale.strcoll)
return collated_cmp(entry.path)
def __hash__(self):
return int(self.hash, 16)
def __str__(self):
return self.path
class Index:
"""
This class encapsulates the gallery index information (file list) and the
related methods. This object should be instantiated once and then used as
the authoritative source on the state of the gallery folder.
The Index also behaves as a container for the ``Entry`` objects and can be
iterated over, tested for inclusion, reversed, etc.
The entries in the gallery index are sorted alphabetically with full
support for Unicode collation according to currently active system locale.
"""
def __init__(self, path):
if not os.path.isdir(path):
raise ValueError('{} is missing or not a directory'.format(path))
self.path = path
self.entries = []
self.last_update = None
logging.debug('Setting up index for %s', self.path)
def check_last_update(self, entry):
"""
Update ``last_update`` property if ``entry`` is newer.
"""
if not self.last_update:
self.last_update = entry.mtime
if entry.mtime > self.last_update:
self.last_update = entry.mtime
def rescan(self):
"""
Perform full rescan of the gallery directory.
"""
entries = []
for dentry in os.scandir(self.path):
try:
entry = Entry(dentry)
except ValueError:
logging.debug('Omitted %s from gallery', dentry.path)
continue
self.check_last_update(entry)
entries.append(entry)
self.entries = self.sort(entries)
logging.debug('Added %s items to the index', len(self.entries))
def sort(self, entries=None):
"""
Sort the entries alphabetically
"""
entries = entries or self.entries
logging.debug('Sorting items')
entries.sort(key=Entry.cmp)
return entries
def get_relpath(self, entry):
"""
Return path of an entry relative to the gallery base path.
"""
# FIXME: This needs to guard against directory traversal
return os.path.relpath(entry.path, self.path)
def get_urlpath(self, entry):
"""
Return path of an entry relative to the gallery base path as posix url
"""
rpath = self.get_relpath(entry)
return urllib.request.pathname2url(rpath)
def __len__(self):
return len(self.entries)
def __getitem__(self, key):
return self.entries[key]
def __reversed__(self):
return reversed(self.entries)
def __contains__(self, item):
return item in self.entries
def __iter__(self):
return iter(self.entries)
| gpl-3.0 | -8,195,022,967,904,559,000 | 30.521053 | 79 | 0.615963 | false | 4.262633 | false | false | false |
glenjarvis/trestle | src/blog_project/blog/routes.py | 1 | 2863 | #!/usr/bin/env python
"""Additional URL/View routing for URL patterns and views
Django matches URL patterns based solely on pattern (not on pattern and HTTP
Verb). Other frameworks match on both.
In order to follow a more traditional convention as seen in other frameworks,
these methods will additionally route via the HTTP Verb.
For example, the index() and the create() 'actions' that are convention in
other frameworks have the same Django URL pattern. However, the GET requests
are routed to index() and POST requests are routed to create().
"""
from django import http
from blog import views
def index_create(request):
"""This meta-view handles index/create second level routing
GET /blog -> blog.views.index
POST /blog -> blog.views.create
See also the blog.routes module documentation (above)
"""
if request.method == "GET":
return views.index(request)
if request.method == "POST":
return views.create(request)
return http.HttpResponseNotFound(
'<h1>This is not a route for HTTP verb {0}</h1>'.format(
request.method))
def show_update_destroy(request, slug):
"""This meta-view handles show/update/destroy second level routing
GET /blog/<slug> -> blog.views.show
PUT /blog/<slug> -> blog.views.update
DELETE /blog/<slug> -> blog.views.destroy
See also the blog.routes module documentation (above)
"""
# Look for overloaded POSTS first
if request.method == "POST":
_method = request.POST.get("_method", None)
if _method is not None:
setattr(request, _method, request.POST)
request.method = _method
if request.method == "GET":
return views.show(request, slug)
if request.method == "PUT":
return views.update(request, slug)
if request.method == "DELETE":
return views.destroy(request, slug)
return http.HttpResponseNotFound(
'<h1>This is not a route for HTTP verb {0}</h1>'.format(
request.method))
def new(request):
"""This meta-view handles new second level routing
GET /blog/new -> blog.views.new
See the blog.routes module documentation (above)
"""
if request.method == "GET":
return views.new(request)
return http.HttpResponseNotFound(
'<h1>This is not a route for HTTP verb {0}</h1>'.format(
request.method))
def edit(request, slug):
"""This meta-view handles new second level routing
GET /blog/<slug>/edit -> blog.views.edit
See the blog.routes module documentation (above)
"""
if request.method == "GET":
return views.edit(request, slug)
return http.HttpResponseNotFound(
'<h1>This is not a route for HTTP verb {0}</h1>'.format(
request.method))
| bsd-3-clause | -7,224,021,736,548,871,000 | 26.528846 | 77 | 0.645477 | false | 4.078348 | false | false | false |
hanya/MRI | pythonpath/mytools_Mri/__init__.py | 1 | 27043 | # Copyright 2011 Tsutomu Uchino
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import traceback
import uno
from mytools_Mri import engine, node, values
from mytools_Mri.type import ExtType2, ExtAnyType2
from mytools_Mri.unovalues import MethodConcept, PropertyConcept, \
PropertyAttribute, ParamMode, TypeClass, TypeClassGroups
from mytools_Mri.config import Config
from com.sun.star.beans import UnknownPropertyException, PropertyVetoException
from com.sun.star.lang import WrappedTargetException, IllegalArgumentException
from com.sun.star.reflection import InvocationTargetException
class CancelException(Exception):
pass
Entry = engine.Entry
class RootEntry(node.Root):
pass
import mytools_Mri.web
import mytools_Mri.macros
from mytools_Mri.cg import CGMode, CGType, CodeEntry, CodeGenerator
class MRI(object):
def __init__(self, ctx, ui_class):
self.ctx = ctx
if values.MRI_DIR is None:
values.set_mri_dir(ctx)
self.config = Config(ctx)
self.config.property_only = False
self.web = mytools_Mri.web.create_IDL_opener(self, self.config, self.config.ref_by_doxygen)
self.engine = engine.MRIEngine(ctx)
self.history = RootEntry()
self.current = self.history
self.cg = CodeGenerator(self.config.code_type, False, True)
self.mode = True
self.open_new = False
self.macros = mytools_Mri.macros.Macros(self)
self.ui = ui_class(ctx, self)
def inspect(self, name, target):
try:
self.history.code_entry = None
entry = self.engine.create(self, name, target)
entry.code_entry = self.code(
type=CGType.NONE, key=name,
value_type=entry.type, args="", parent="ROOT", idl=None)
self.action_by_type(entry)
except Exception as e:
print(e)
traceback.print_exc()
def code(self, *args, **kwds):
try:
if not "parent" in kwds:
kwds["parent"] = self.current.code_entry
code_entry = self.cg.add(**kwds)
if self.mode:
self.ui.code_updated()
return code_entry
except Exception as e:
print(e)
traceback.print_exc()
return None
def set_mode(self, state):
""" Set mode which broadcast to ui or not. """
self.mode = not not state
def message(self, message, title=''):
"""shows message."""
if self.mode:
self.ui.message(message, title)
def error(self, message, title='Error'):
"""shows error."""
if self.mode:
self.ui.error(message, title)
def status(self, message):
"""status message."""
if self.mode:
self.ui.status(message)
def update_config(self, store=False):
"""change config."""
config = self.config
self.macros.set_user_dir(config.macros)
self.web.set_browser(config.browser)
self.web.set_sdk_path(config.sdk_path)
if store:
self.config.write()
def change_entry(self, entry):
if self.open_new:
self.open_new = False
self.create_service(
'mytools.Mri', nocode=True).inspect(entry.target)
else:
self.current.append_child(entry)
self.current = entry
self.ui.entry_changed(history=True, update=self.mode)
return entry
def set_current(self, entry):
self.current = entry
def change_history(self, index=None, entry=None):
if entry is None:
entry = self.history.get_history_entry(index)
#self.set_current(entry)
if entry != self.history:
self.current = entry
self.ui.entry_changed(history=False)
return True
def get_property_value(self, name):
entry = self.current
target = entry.target
inspected = entry.inspected
# normal property
if entry.has_interface("com.sun.star.beans.XPropertySet"):
psinfo = target.getPropertySetInfo()
if psinfo and psinfo.hasPropertyByName(name):
try:
value = target.getPropertyValue(name)
temp_type = psinfo.getPropertyByName(name).Type
if temp_type is None:
temp_type = uno.Type("any", TypeClass.ANY)
entry = self.engine.create(self, name, value)
idl = entry.type
ext_type = ExtType2(entry, self.engine,
temp_type.typeName, temp_type.typeClass)
entry.type = ext_type
entry.code_entry = self.code(
type=CGType.PROP ,mode=CGMode.GET, key=name, value_type=entry.type, idl=idl)
return self.action_by_type(entry)
except Exception as e:
self.error("Exception, to get property: %s, %s" % (name, str(e)))
traceback.print_exc()
if self.mode:
return
else:
raise
# pseud property
if inspected.hasMethod("get%s" % name, MethodConcept.ALL):
return self.call_method("get%s" % name, pseud=True)
elif inspected.hasMethod("is%s" % name, MethodConcept.ALL):
return self.call_method("is%s" % name, pseud=True)
elif inspected.hasMethod("set%s" % name, MethodConcept.ALL):
return self.status("Write only pseud property: %s" % name)
# interface attributes
if inspected.hasProperty(name, PropertyConcept.ATTRIBUTES):
psinfo = inspected.getProperty(name, PropertyConcept.ATTRIBUTES)
try:
value = getattr(target, name)
entry = self.engine.create(self, name, value)
#temp_type = entry.type
#if temp_type.getTypeClass() == TypeClass.SEQUENCE:
ext_type = ExtType2(entry, self.engine,
psinfo.Type.typeName, psinfo.Type.typeClass)
entry.type = ext_type
attr_def = self.engine.find_attribute_interface(
self.current, name)
if attr_def is False: attr_def = ""
entry.code_entry = self.code(
type=CGType.ATTR, mode=CGMode.GET, key=name, value_type=entry.type, idl=attr_def)
return self.action_by_type(entry)
except Exception as e:
self.error("Exception, to get attribute: %s, %s" % (name, str(e)))
traceback.print_exc()
if self.mode:
return
else:
raise
# XVclWindowPeer
if entry.has_interface("com.sun.star.awt.XVclWindowPeer"):
try:
value = target.getProperty(name)
temp_type = inspected.getProperty(name, PropertyConcept.ALL).Type
if temp_type is None:
temp_type = uno.Type("any", TypeClass.ANY)
entry = self.engine.create(self, name, value)
# ToDo code
return self.action_by_type(entry)
except Exception as e:
self.error("Exception, to get %s, %s" % (name, str(e)))
traceback.print_exc()
if self.mode:
return
else:
raise
def set_property_value(self, name, get_value=None, arg=None, get_args=None):
entry = self.current
target = entry.target
# normal property
if entry.has_interface("com.sun.star.beans.XPropertySet"):
psinfo = target.getPropertySetInfo()
if psinfo.hasPropertyByName(name):
pinfo = psinfo.getPropertyByName(name)
if pinfo.Attributes & PropertyAttribute.READONLY:
raise Exception("%s read-only property." % name)
if self.mode:
try:
old_value = target.getPropertyValue(name)
arg = get_value(name, pinfo.Type.typeName, pinfo.Type.typeClass,
("", ""), "current: " + str(old_value))
except CancelException:
return
except Exception as e:
self.status(str(e))
return
try:
if self.mode:
_arg, _any = self.extract_args(arg)
target.setPropertyValue(name, _arg)
entry = self.engine.create(self, name, _arg)
else:
# ToDo any
_arg, _any = self.extract_args(arg)
target.setPropertyValue(name, _arg)
entry = self.engine.create(self, name, _arg)
p_type = pinfo.Type
ext_type = ExtType2(entry, self.engine, p_type.typeName, p_type.typeClass)
entry.type = ext_type
entry.code_entry = self.code(
type=CGType.PROP, mode=CGMode.SET, key=name, value_type=entry.type, args=arg, idl=entry.type)
except WrappedTargetException as e:
te = e.TargetException
self.error("Exception: %s" % te.Message)
except IllegalArgumentException as e:
self.error("Illegal value for %s property." % prop_name)
except PropertyVetoException as e:
self.error("Veto to set the %s property value." % prop_name)
except UnknownPropertyException as e:
self.error("Unknown property! %s" % e)
except Exception as e:
self.error("Exception, to set %s property, %s" % (name, str(e)))
traceback.print_exc()
if self.mode:
return True
else:
return None
elif entry.inspected.hasProperty(name, PropertyConcept.ATTRIBUTES):
pinfo = entry.inspected.getProperty(name, PropertyConcept.ATTRIBUTES)
if pinfo.Attributes & PropertyAttribute.READONLY:
self.status("Attribute %s is readonly." % name)
raise Exception("%s read-only property." % name)
if self.mode:
try:
old_value = getattr(target, name)
arg = get_value(name, pinfo.Type.typeName, pinfo.Type.typeClass,
("", ""), "current: " + str(old_value))
except Exception as e:
return
try:
if self.mode:
setattr(target, name, arg)
entry = self.engine.create(self, name, arg)
else:
_arg, _any = self.extract_args(arg)
setattr(target, name, _arg)
entry = self.engine.create(self, name, _arg)
p_type = pinfo.Type
ext_type = ExtType2(entry, self.engine,
p_type.typeName, p_type.typeClass)
entry.type = ext_type
attr_def = self.engine.find_attribute_interface(
self.current, name)
if attr_def is False: attr_def = ""
entry.code_entry = self.code(
type=CGType.ATTR, mode=CGMode.SET, key=name, value_type=entry.type, args=arg, idl=attr_def)
except Exception as e:
print(("Error to set attribute: " + str(e)))
traceback.print_exc()
return None
method_name = "set%s" % name
if not entry.inspected.hasMethod(method_name, MethodConcept.ALL):
self.status("Property %s is readonly." % name)
if self.mode:
return
else:
raise AttributeError("Unknown method %s" % name)
return self.call_method(method_name, get_args=get_args, args=(arg,), pseud=True)
def call_method(self, name, get_args=None, args=None, pseud=False):
""" Frontend to invoke method. """
method = self.engine.get_method_info(self.current, name, raw=True)
if method is None: return
param_infos = method.getParameterInfos()
if self.mode:
if 0 < len(param_infos):
try:
if get_args:
args = tuple(get_args(method))
except CancelException:
return
except:
traceback.print_exc()
return
else:
args = ()
try:
return self.invoke_method(method, args, pseud=pseud)
except Exception as e:
self.status(str(e))
traceback.print_exc()
if self.mode:
return
else:
raise
def extract_args(self, args):
""" Extract value from Entry instance. """
_any = False
if isinstance(args, tuple) or isinstance(args, list):
a = []
for arg in args:
v, __any = self.extract_args(arg)
a.append(v)
if __any:
_any = True
return tuple(a), _any
else:
if isinstance(args, Entry):
target = args.get_target()
extracted, __any = self.extract_args(target)
if isinstance(target, uno.Any) or __any:
_any = True
return extracted, _any
else:
return args, _any
def get_out_param_index(self, idl):
""" Returns list of out/inout param indexes. """
params = idl.getParameterInfos()
if params:
return [i for i, info in enumerate(params)
if info.aMode == ParamMode.OUT or info.aMode == ParamMode.INOUT]
else:
return None
def invoke_method(self, method, args, name=None, pseud=False):
try:
if not name:
if args:
name = "%s(%s)" % (method.getName(),
", ".join([str(a) for a in args]))
else:
name = "%s()" % method.getName()
out_params = self.get_out_param_index(method)
if self.mode:
_args, _any = self.extract_args(args)
value, d = method.invoke(self.current.target, _args)
else:
_args, _any = self.extract_args(args)
if _any:
value, d = uno.invoke(method, "invoke", (self.current.target, _args))
else:
value, d = method.invoke(self.current.target, _args)
ret_type = method.getReturnType()
entry = self.engine.create(self, name, value)
if ret_type.getTypeClass() == TypeClass.ANY:
# check the method from container
if self.engine.check_method_from_container(method):
_type = self.current.target.getElementType()
ret_type = self.engine.for_name(_type.typeName)
# added to solve problem on new configuration
if ret_type.getTypeClass() == TypeClass.VOID:
ret_type = self.engine.get_type(entry)
entry.type = ret_type
value_type = ExtAnyType2(entry, self.engine, ret_type.getName(), ret_type.getTypeClass())
entry.type = value_type
if pseud:
code_type = CGType.PSEUD_PROP
else:
code_type = CGType.METHOD
entry.code_entry = self.code(
type=code_type, key=method.getName(),
value_type=value_type, args=args, idl=method)
if out_params:
param_infos = method.getParameterInfos()
_d = []
for i, v in zip(out_params, d):
_key = "%s_%s" % (name, i)
_entry = self.engine.create(self, _key, v)
_entry.type = param_infos[i]
type = _entry.type.aType
_entry.type = ExtType2(_entry, self.engine, type.getName(), type.getTypeClass())
_d.append(_entry)
_entry.code_entry = args[i].code_entry
ret = self.action_by_type(entry)
return (ret,) + tuple(_d)
else:
return self.action_by_type(entry)
except InvocationTargetException as e:
te = e.TargetException
self.error("Method: %s invocation exception.\nError Message: \n%s" % (
method.getName(), te.Message))
traceback.print_exc()
except Exception as e:
self.error("Method: %s unknown exception.\nError Message: \n%s" % (
name, str(e)))
traceback.print_exc()
def get_struct_element(self, name):
""" Get field value from current struct. """
entry = self.current
target = entry.target
try:
found = self.engine.find_field(name, self.engine.get_type(entry))
except:
return
try:
value = getattr(target, name)
field_type = found.getType()
if field_type == None:
field_type = self.engine.reflection.getType(value)
entry = self.engine.create(self, name, value)
# ToDo
ext_type = ExtAnyType2(entry, self.engine,
field_type.getName(), field_type.getTypeClass())
entry.type = ext_type
entry.code_entry = self.code(
type=CGType.FIELD, mode=CGMode.GET, key=name, value_type=entry.type, idl=self.engine.get_type(self.current))
return self.action_by_type(entry)
except Exception as e:
print(("Error: get_struct_element, " + str(e)))
traceback.print_exc()
def set_struct_element(self, name, value=None, get_value=None):
entry = self.current
target = entry.target
try:
found = self.engine.find_field(name, self.engine.get_type(entry))
except:
return
if self.mode:
try:
if get_value:
old_value = getattr(target, name)
value = get_value(name, found.getType().getName(), found.getType().getTypeClass(),
("", ""), "current: " + str(old_value))
except Exception as e:
print(e)
return
try:
if self.mode:
_arg, _any = self.extract_args(value)
setattr(target, name, _arg)
entry = self.engine.create(self, name, _arg)
else:
_arg, _any = self.extract_args(value)
setattr(target, name, _arg)
entry = self.engine.create(self, name, _arg)
field_type = found.getType()
if field_type == None:
field_type = self.engine.reflection.getType(value)
ext_type = ExtType2(entry, self.engine,
field_type.getName(), field_type.getTypeClass())
entry.type = ext_type
entry.code_entry = self.code(
type=CGType.FIELD, mode=CGMode.SET, key=name, value_type=entry.type, args=value, idl=self.engine.get_type(self.current))
except Exception as e:
print(("Error: get_struct_element, " + str(e)))
traceback.print_exc()
def action_by_type(self, entry):
if entry.target is None:
if self.mode:
return self.message("void")
else:
return None
type_name = entry.type.getName()
type_class = entry.type.getTypeClass()
if not self.mode and type_name in values.IGNORED_INTERFACES:
return self.error(
"You can not inspect \n%s \ntype value, sorry." % type_name,
"Listed in the IGNORED_INTERFACES list.")
try:
if type_class == TypeClass.ANY:
value_type = ExtAnyType2(entry, self.engine)
type_name = value_type.getName()
type_class = value_type.getTypeClass()
if type_class in TypeClassGroups.NUMERIC:
self.message(str(entry.target), type_name)
elif type_class == TypeClass.STRING:
if entry.target:
value = entry.target
else:
value = ""
self.message(value, type_name)
elif type_class == TypeClass.BOOLEAN:
self.message(str(entry.target), type_name)
elif type_class == TypeClass.INTERFACE:
self.change_entry(entry)
elif type_class == TypeClass.STRUCT:
self.change_entry(entry)
elif type_class == TypeClass.SEQUENCE:
self.change_entry(entry) # ToDo
elif type_class == TypeClass.ENUM:
self.message("%s.%s" % (type_name, entry.target.value), type_name)
elif type_class == TypeClass.BYTE:
self.message("%s" % entry.target, type_name)
elif type_class == TypeClass.TYPE:
self.message(entry.target.typeName, type_name)
elif type_class == TypeClass.VOID:
self.message("void", type_name)
elif type_class == TypeClass.CHAR:
self.message(entry.target.value, type_name)
else:
try:
self.message(str(entry.target), "unknown type")
except:
self.error("Error: value to string conversion.")
except Exception as e:
print(e)
print(("%s, %s" % (type_name, type_class)))
traceback.print_exc()
return entry
def manage_sequence(self, entry, k=None):
if len(entry.target) == 0:
self.message("empty sequence")
return None
value_type = entry.type
try:
c_type = value_type.getComponentType()
except:
value_type = self.engine.get_type(entry)
c_type = value_type.getComponentType()
comp_type = None
if c_type.getTypeClass() == TypeClass.SEQUENCE:
comp_type = self.engine.get_component_base_type(value_type)
type_class = c_type.getTypeClass()
#if not self.mode:
value = entry.target[k]
new_entry = self.engine.create(self, "[%s]" % k, value)
new_entry.type = c_type
new_entry.code_entry = self.code(
type=CGType.ELEMENT, mode=CGMode.GET, key=k, value_type=new_entry.type, idl=new_entry.type)
if type_class == TypeClass.INTERFACE or type_class == TypeClass.ANY:
self.change_entry(new_entry)
elif type_class == TypeClass.STRUCT:
self.change_entry(new_entry)
elif type_class == TypeClass.SEQUENCE:
self.change_entry(new_entry)
else:
self.action_by_type(new_entry)
return new_entry
def _get_value(self, args):
if isinstance(args, tuple):
return tuple([self._get_value(arg) for arg in args])
else:
return args.target if isinstance(args, Entry) else args
# for macros
def get_component_context(self):
entry = self.engine.create(self, "XComponentContext", self.ctx)
entry.code_entry = self.code(
type=CGType.CONTEXT, key="XComponentContext", value_type=entry.type, idl=entry.type)
return entry
def assign_element(self, k, value, append=False):
entry = self.current
self.code(
type=CGType.ELEMENT, mode=CGMode.SET, key=k, value_type=entry.type.getComponentType(), idl=entry.type.getComponentType(), args=value, misc=append)
def create_service(self, name, *args, **kwds):
"""
if args:
_args, _any = self.extract_args(args)
if _any:
obj, d = uno.invoke(self.ctx.getServiceManager(), "createInstanceWithArgumentsAndContext", (name, _args, self.ctx))
else:
obj = self.ctx.getServiceManager().\
createInstanceWithArgumentsAndContext(name, _args, self.ctx)
else:
"""
obj = self.ctx.getServiceManager().\
createInstanceWithContext(name, self.ctx)
if "nocode" in kwds: return obj
entry = self.engine.create(self, name, obj)
entry.code_entry = self.code(
type=CGType.SERVICE, key=name, value_type=entry.type,
args=args, idl=entry.type)
return entry
# ToDo initial arguments
def create_struct(self, type_name, *args, **kwds):
_args, _any = self.extract_args(args)
struct = uno.createUnoStruct(type_name, *_args)
if "nocode" in kwds: return struct
entry = self.engine.create(self, type_name, struct)
entry.code_entry = self.code(
type=CGType.STRUCT, key=type_name, value_type=entry.type, idl=entry.type, args=args)
return entry
# ToDo allows to pass initial values?
def create_sequence(self, type_name, length, var=None):
entry = self.engine.create(self, type_name, ())
entry.target = []
entry.type = self.engine.for_name(type_name)
entry.code_entry = self.code(
type=CGType.SEQ, key=type_name, value_type=entry.type, idl=entry.type, args=length, misc=var)
return entry
def declare_variable(self, type_name, value):
entry = self.engine.create(self, type_name, value)
entry.type = self.engine.for_name(type_name)
entry.code_entry = self.code(
type=CGType.VARIABLE, args=value, key=type_name, value_type=entry.type, idl=entry.type)
return entry
| apache-2.0 | 5,764,995,281,496,107,000 | 41.188768 | 158 | 0.531598 | false | 4.161102 | true | false | false |
ritstudentgovernment/chargeflask | app/invitations/controllers.py | 1 | 8705 | """
filename: controllers.py
description: Controllers for email invitations.
created by: Omar De La Hoz ([email protected])
created on: 10/12/17
"""
from flask_socketio import emit
from app.decorators import ensure_dict, get_user
from app import app, db, socketio
from app.users.models import Users
from app.invitations.models import Invitations
from app.invitations.invitations_response import Response
from flask import render_template
from sqlalchemy import and_
from app.email.models import huey
from app.email.controllers import send_email
import time
##
## @brief Sends an invitation to join a committee
## when user doesn't exist in ChargeTracker.
##
## @param committee The committee to join.
## @param new_user The user to be invited.
##
## @return True if email sent, False if not.
##
def send_invite(new_user, committee):
invite = and_(
Invitations.user_name == new_user,
Invitations.committee_id == committee.id,
Invitations.isInvite == True
)
if Invitations.query.filter(invite).first() is not None:
return Response.InviteExists
invitation = Invitations(
user_name= new_user,
committee= committee,
committee_id = committee.id,
charge_id = None,
isInvite= True
)
try:
db.session.add(invitation)
db.session.commit()
email = {}
email["title"] = "You're Invited"
email["sender"]=("SG TigerTracker", "[email protected]")
email["recipients"] = [new_user + "@rit.edu"]
email["subtype"] = "related"
email["html"] = render_template(
'committee_invitation.html',
user_name= new_user,
committee_name= committee.title,
committee_head= committee.head,
time_stamp= time.time(),
app_url= app.config['CLIENT_URL'] + str(invitation.id)
)
if not app.config['TESTING']:
send_email(email)
return Response.InviteSent
except Exception as e:
db.session.rollback()
return Response.InviteError
##
## @brief Sends a request email to join a committee
## to the committee head.
##
## @param new_user The user to be added.
## @param committee The committee to join.
##
## @return True if email sent, False if not.
##
def send_request(new_user, committee):
invite = and_(
Invitations.user_name == new_user.id,
Invitations.committee_id == committee.id,
Invitations.isInvite == False
)
if Invitations.query.filter(invite).first() is not None:
return Response.RequestExists
invitation = Invitations(
user_name= new_user.id,
committee= committee,
committee_id = committee.id,
charge_id = None,
isInvite= False
)
try:
db.session.add(invitation)
db.session.commit()
email = {}
email["title"] = "Great news, " + new_user.id + " wants to join!"
email["sender"] = ("SG TigerTracker", "[email protected]")
email["recipients"] = [committee.head + "@rit.edu"]
email["subtype"] = "related"
email["html"] = render_template(
'committee_request.html',
user_name= new_user.id,
committee_head= committee.head,
committee_name= committee.title,
time_stamp= time.time(),
request_url= app.config['CLIENT_URL'] + str(invitation.id)
)
if not app.config['TESTING']:
send_email(email)
return Response.RequestSent
except Exception as e:
db.session.rollback()
return Response.RequestError
##
## @brief Sends an invitation to a committee-head to close
## a charge.
##
## @param committee The committee to join.
## @param new_user The user to be invited.
##
## @return True if email sent, False if not.
##
def send_close_request(user, committee, chargeID):
admins = db.session.query(Users).filter(Users.is_admin == True).all()
admin_emails = []
for user in admins:
admin_emails.append(user.id + "@rit.edu")
invite = and_(
Invitations.user_name == committee.head,
Invitations.committee_id == committee.id,
Invitations.isInvite == False
)
invitation = Invitations (
user_name= user.id,
committee= committee,
committee_id = committee.id,
charge_id = chargeID,
isInvite=False
)
try:
db.session.add(invitation)
db.session.commit()
email = {}
email["title"] = "Close Charge Request"
email["sender"]=("SG TigerTracker", "[email protected]")
email["recipients"] = admin_emails
email["subtype"] = "related"
email["html"] = render_template(
'close_charge_request.html',
user_name= committee.head,
charge_name= chargeID,
time_stamp= time.time(),
request_url= app.config['CLIENT_URL'] + str(invitation.id)
)
if not app.config['TESTING']:
send_email(email)
return Response.RequestSent
except Exception as e:
db.session.rollback()
return Response.RequestError
##
## @brief Gets the data for a specific invitation/request.
##
## @param user_data The data to display a specific invitation,
## contains the keys (all required):
## - token: The token of the authenticated user
## - invitation_id: Id of invitation/request.
##
## @emit Data of a specific invitation or errors.
##
@socketio.on('get_invitation')
@ensure_dict
@get_user
def get_invitation(user, user_data):
invitation = Invitations.query.filter_by(id = user_data.get("invitation_id","")).first()
if invitation is None:
emit("get_invitation", Response.InviteDoesntExist)
return
if user is None:
emit("get_invitation", Response.NotAuthenticated)
return
committee = invitation.committee
# Check if user should be able to view
# invitation.
if (committee.head == user.id or
user.is_admin or
user.id == invitation.user_name):
invitation_data = {
"committee_id": committee.id,
"committee_head": committee.head,
"committee_title": committee.title,
"current_user": user.id,
"invite_user": invitation.user_name,
"is_invite": invitation.isInvite
}
emit("get_invitation", invitation_data)
else:
emit("get_invitation", Response.IncorrectPerms)
##
## @brief Changes the status of an invitation/request.
##
## @param user_data The data to modify a specific invitation,
## contains the keys (all required):
## - token: The token of the authenticated user
## - invitation_id: Id of invitation/request.
## - status: True to accept, false otherwise.
##
## @emit UserAdded, InviteDeleted or errors.
##
@socketio.on('set_invitation')
@ensure_dict
@get_user
def set_invitation(user, user_data):
invitation = Invitations.query.filter_by(id = user_data.get("invitation_id","")).first()
if invitation is None:
emit("set_invitation", Response.InviteDoesntExist)
return
if user is None:
emit("set_invitation", Response.NotAuthenticated)
return
if "status" not in user_data:
emit("set_invitation", Response.InvalidStatus)
return
if type(user_data.get("status","")) != type(True):
emit("set_invitation", Response.InvalidStatus)
return
com_head = Users.query.filter_by(id= invitation.committee.head).first()
com_id = invitation.committee.id
token = user.generate_auth()
# If invitation, use the committe heads token.
if invitation.isInvite:
token = com_head.generate_auth()
else:
if com_head != user or not user.is_admin:
emit("set_invitation", Response.IncorrectPerms)
return
if user_data["status"] == True:
add_data = {
"token": token,
"committee_id": com_id,
"user_id": invitation.user_name
}
from app.members.controllers import add_to_committee
returnValue = add_to_committee(add_data)
emit("set_invitation", Response.InviteAccept)
else:
emit("set_invitation", Response.InviteDeny)
# Remove the invitation.
db.session.delete(invitation)
| mit | -3,525,684,910,734,482,000 | 28.709898 | 92 | 0.597932 | false | 3.720085 | true | false | false |
torchingloom/edx-platform | cms/djangoapps/contentstore/features/problem-editor.py | 1 | 9464 | # disable missing docstring
# pylint: disable=C0111
import json
from lettuce import world, step
from nose.tools import assert_equal, assert_true # pylint: disable=E0611
from common import type_in_codemirror, open_new_course
from advanced_settings import change_value
from course_import import import_file, go_to_import
DISPLAY_NAME = "Display Name"
MAXIMUM_ATTEMPTS = "Maximum Attempts"
PROBLEM_WEIGHT = "Problem Weight"
RANDOMIZATION = 'Randomization'
SHOW_ANSWER = "Show Answer"
TIMER_BETWEEN_ATTEMPTS = "Timer Between Attempts"
@step('I have created a Blank Common Problem$')
def i_created_blank_common_problem(step):
world.create_course_with_unit()
step.given("I have created another Blank Common Problem")
@step('I have created another Blank Common Problem$')
def i_create_new_common_problem(step):
world.create_component_instance(
step=step,
category='problem',
component_type='Blank Common Problem'
)
@step('I edit and select Settings$')
def i_edit_and_select_settings(_step):
world.edit_component_and_select_settings()
@step('I see the advanced settings and their expected values$')
def i_see_advanced_settings_with_values(step):
world.verify_all_setting_entries(
[
[DISPLAY_NAME, "Blank Common Problem", True],
[MAXIMUM_ATTEMPTS, "", False],
[PROBLEM_WEIGHT, "", False],
[RANDOMIZATION, "Never", False],
[SHOW_ANSWER, "Finished", False],
[TIMER_BETWEEN_ATTEMPTS, "0", False]
])
@step('I can modify the display name')
def i_can_modify_the_display_name(_step):
# Verifying that the display name can be a string containing a floating point value
# (to confirm that we don't throw an error because it is of the wrong type).
index = world.get_setting_entry_index(DISPLAY_NAME)
world.set_field_value(index, '3.4')
verify_modified_display_name()
@step('my display name change is persisted on save')
def my_display_name_change_is_persisted_on_save(step):
world.save_component_and_reopen(step)
verify_modified_display_name()
@step('the problem display name is "(.*)"$')
def verify_problem_display_name(step, name):
assert_equal(name.upper(), world.browser.find_by_css('.problem-header').text)
@step('I can specify special characters in the display name')
def i_can_modify_the_display_name_with_special_chars(_step):
index = world.get_setting_entry_index(DISPLAY_NAME)
world.set_field_value(index, "updated ' \" &")
verify_modified_display_name_with_special_chars()
@step('my special characters and persisted on save')
def special_chars_persisted_on_save(step):
world.save_component_and_reopen(step)
verify_modified_display_name_with_special_chars()
@step('I can revert the display name to unset')
def can_revert_display_name_to_unset(_step):
world.revert_setting_entry(DISPLAY_NAME)
verify_unset_display_name()
@step('my display name is unset on save')
def my_display_name_is_persisted_on_save(step):
world.save_component_and_reopen(step)
verify_unset_display_name()
@step('I can select Per Student for Randomization')
def i_can_select_per_student_for_randomization(_step):
world.browser.select(RANDOMIZATION, "Per Student")
verify_modified_randomization()
@step('my change to randomization is persisted')
def my_change_to_randomization_is_persisted(step):
world.save_component_and_reopen(step)
verify_modified_randomization()
@step('I can revert to the default value for randomization')
def i_can_revert_to_default_for_randomization(step):
world.revert_setting_entry(RANDOMIZATION)
world.save_component_and_reopen(step)
world.verify_setting_entry(world.get_setting_entry(RANDOMIZATION), RANDOMIZATION, "Never", False)
@step('I can set the weight to "(.*)"?')
def i_can_set_weight(_step, weight):
set_weight(weight)
verify_modified_weight()
@step('my change to weight is persisted')
def my_change_to_weight_is_persisted(step):
world.save_component_and_reopen(step)
verify_modified_weight()
@step('I can revert to the default value of unset for weight')
def i_can_revert_to_default_for_unset_weight(step):
world.revert_setting_entry(PROBLEM_WEIGHT)
world.save_component_and_reopen(step)
world.verify_setting_entry(world.get_setting_entry(PROBLEM_WEIGHT), PROBLEM_WEIGHT, "", False)
@step('if I set the weight to "(.*)", it remains unset')
def set_the_weight_to_abc(step, bad_weight):
set_weight(bad_weight)
# We show the clear button immediately on type, hence the "True" here.
world.verify_setting_entry(world.get_setting_entry(PROBLEM_WEIGHT), PROBLEM_WEIGHT, "", True)
world.save_component_and_reopen(step)
# But no change was actually ever sent to the model, so on reopen, explicitly_set is False
world.verify_setting_entry(world.get_setting_entry(PROBLEM_WEIGHT), PROBLEM_WEIGHT, "", False)
@step('if I set the max attempts to "(.*)", it will persist as a valid integer$')
def set_the_max_attempts(step, max_attempts_set):
# on firefox with selenium, the behavior is different.
# eg 2.34 displays as 2.34 and is persisted as 2
index = world.get_setting_entry_index(MAXIMUM_ATTEMPTS)
world.set_field_value(index, max_attempts_set)
world.save_component_and_reopen(step)
value = world.css_value('input.setting-input', index=index)
assert value != "", "max attempts is blank"
assert int(value) >= 0
@step('Edit High Level Source is not visible')
def edit_high_level_source_not_visible(step):
verify_high_level_source_links(step, False)
@step('Edit High Level Source is visible')
def edit_high_level_source_links_visible(step):
verify_high_level_source_links(step, True)
@step('If I press Cancel my changes are not persisted')
def cancel_does_not_save_changes(step):
world.cancel_component(step)
step.given("I edit and select Settings")
step.given("I see the advanced settings and their expected values")
@step('I have enabled latex compiler')
def enable_latex_compiler(step):
url = world.browser.url
step.given("I select the Advanced Settings")
change_value(step, 'use_latex_compiler', 'true')
world.visit(url)
world.wait_for_xmodule()
@step('I have created a LaTeX Problem')
def create_latex_problem(step):
world.create_course_with_unit()
step.given('I have enabled latex compiler')
world.create_component_instance(
step=step,
category='problem',
component_type='Problem Written in LaTeX',
is_advanced=True
)
@step('I edit and compile the High Level Source')
def edit_latex_source(_step):
open_high_level_source()
type_in_codemirror(1, "hi")
world.css_click('.hls-compile')
@step('my change to the High Level Source is persisted')
def high_level_source_persisted(_step):
def verify_text(driver):
css_sel = '.problem div>span'
return world.css_text(css_sel) == 'hi'
world.wait_for(verify_text, timeout=10)
@step('I view the High Level Source I see my changes')
def high_level_source_in_editor(_step):
open_high_level_source()
assert_equal('hi', world.css_value('.source-edit-box'))
@step(u'I have an empty course')
def i_have_empty_course(step):
open_new_course()
@step(u'I go to the import page')
def i_go_to_import(_step):
go_to_import()
@step(u'I import the file "([^"]*)"$')
def i_import_the_file(_step, filename):
import_file(filename)
@step(u'I go to the vertical "([^"]*)"$')
def i_go_to_vertical(_step, vertical):
world.css_click("span:contains('{0}')".format(vertical))
@step(u'I go to the unit "([^"]*)"$')
def i_go_to_unit(_step, unit):
loc = "window.location = $(\"span:contains('{0}')\").closest('a').attr('href')".format(unit)
world.browser.execute_script(loc)
@step(u'I see a message that says "([^"]*)"$')
def i_can_see_message(_step, msg):
msg = json.dumps(msg) # escape quotes
world.css_has_text("h2.title", msg)
@step(u'I can edit the problem$')
def i_can_edit_problem(_step):
world.edit_component()
def verify_high_level_source_links(step, visible):
if visible:
assert_true(world.is_css_present('.launch-latex-compiler'),
msg="Expected to find the latex button but it is not present.")
else:
assert_true(world.is_css_not_present('.launch-latex-compiler'),
msg="Expected not to find the latex button but it is present.")
world.cancel_component(step)
def verify_modified_weight():
world.verify_setting_entry(world.get_setting_entry(PROBLEM_WEIGHT), PROBLEM_WEIGHT, "3.5", True)
def verify_modified_randomization():
world.verify_setting_entry(world.get_setting_entry(RANDOMIZATION), RANDOMIZATION, "Per Student", True)
def verify_modified_display_name():
world.verify_setting_entry(world.get_setting_entry(DISPLAY_NAME), DISPLAY_NAME, '3.4', True)
def verify_modified_display_name_with_special_chars():
world.verify_setting_entry(world.get_setting_entry(DISPLAY_NAME), DISPLAY_NAME, "updated ' \" &", True)
def verify_unset_display_name():
world.verify_setting_entry(world.get_setting_entry(DISPLAY_NAME), DISPLAY_NAME, 'Blank Advanced Problem', False)
def set_weight(weight):
index = world.get_setting_entry_index(PROBLEM_WEIGHT)
world.set_field_value(index, weight)
def open_high_level_source():
world.edit_component()
world.css_click('.launch-latex-compiler > a')
| agpl-3.0 | 6,597,404,825,452,531,000 | 31.522337 | 116 | 0.700127 | false | 3.350088 | false | false | false |
magudev17/gifsta | gifspool/migrations/0001_initial.py | 1 | 5240 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-05-13 21:34
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import gifspool.models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, default='', max_length=30)),
('num_gifs', models.IntegerField(default=0)),
('num_likes', models.IntegerField(default=0)),
('post_to', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='Gif',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('tags', models.CharField(blank=True, max_length=300)),
('upload_date', models.DateTimeField(auto_now_add=True)),
('likes_by', models.TextField(blank=True, default='')),
('likes', models.IntegerField(default=0)),
('shocked', models.IntegerField(default=0)),
('loved', models.IntegerField(default=0)),
('laugh', models.IntegerField(default=0)),
('post_to', models.BooleanField(default=None)),
('gif_file', models.FileField(upload_to=gifspool.models.user_directory_path)),
('jpg_path', models.CharField(blank=True, default='', max_length=60, null=True)),
('jpg_url', models.CharField(blank=True, default='', max_length=60, null=True)),
('views', models.IntegerField(blank=True, default=0, null=True)),
('prev_gif', models.IntegerField(blank=True, default=None, null=True)),
('next_gif', models.IntegerField(blank=True, default=None, null=True)),
('creator', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='GifHashtagLinker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('gif', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='gifspool.Gif')),
],
),
migrations.CreateModel(
name='GifView',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip_address', models.CharField(max_length=30)),
('view_date', models.DateTimeField(auto_now_add=True, null=True)),
('gif', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='gifspool.Gif')),
('user', models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Hashtag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hashtag', models.CharField(max_length=60, unique=True)),
('count', models.IntegerField(default=1)),
],
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('shocked', models.BooleanField(default=False)),
('loved', models.BooleanField(default=False)),
('laugh', models.BooleanField(default=False)),
('like_date', models.DateTimeField(auto_now_add=True, null=True)),
('gif_id', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='gifspool.Gif')),
('user_id', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='gifhashtaglinker',
name='hashtag',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='gifspool.Hashtag'),
),
migrations.AddField(
model_name='gif',
name='hashtags',
field=models.ManyToManyField(blank=True, related_name='gifs_hashtag', through='gifspool.GifHashtagLinker', to='gifspool.Hashtag'),
),
migrations.AddField(
model_name='gif',
name='liked_by',
field=models.ManyToManyField(blank=True, related_name='liked_by_user', through='gifspool.Like', to=settings.AUTH_USER_MODEL),
),
]
| mit | 7,252,843,298,969,034,000 | 49.384615 | 144 | 0.580534 | false | 4.077821 | false | false | false |
flavour/eden | modules/templates/SAFIRE/config.py | 5 | 54753 | # -*- coding: utf-8 -*-
from collections import OrderedDict
from gluon import current, URL
from gluon.storage import Storage
def config(settings):
"""
Template settings for SaFiRe: Sahana First Response
http://eden.sahanafoundation.org/wiki/BluePrint/SAFIRE
"""
T = current.T
settings.base.system_name = T("Sahana First Response")
settings.base.system_name_short = T("SAFIRE")
# PrePopulate data
settings.base.prepopulate.append("SAFIRE")
settings.base.prepopulate_demo.append("SAFIRE/Demo")
# Theme (folder to use for views/layout.html)
#settings.base.theme = "SAFIRE"
# Authentication settings
# Should users be allowed to register themselves?
#settings.security.self_registration = False
# Do new users need to verify their email address?
#settings.auth.registration_requires_verification = True
# Do new users need to be approved by an administrator prior to being able to login?
#settings.auth.registration_requires_approval = True
settings.auth.registration_requests_organisation = True
# Approval emails get sent to all admins
settings.mail.approver = "ADMIN"
settings.auth.registration_link_user_to = {"staff": T("Staff"),
}
settings.auth.registration_link_user_to_default = ["staff"]
# Uncomment to display the Map Legend as a floating DIV
settings.gis.legend = "float"
# Uncomment to Disable the Postcode selector in the LocationSelector
#settings.gis.postcode_selector = False # @ToDo: Vary by country (include in the gis_config!)
# Uncomment to show the Print control:
# http://eden.sahanafoundation.org/wiki/UserGuidelines/Admin/MapPrinting
#settings.gis.print_button = True
# L10n settings
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Security Policy
# http://eden.sahanafoundation.org/wiki/S3AAA#System-widePolicy
# 1: Simple (default): Global as Reader, Authenticated as Editor
# 2: Editor role required for Update/Delete, unless record owned by session
# 3: Apply Controller ACLs
# 4: Apply both Controller & Function ACLs
# 5: Apply Controller, Function & Table ACLs
# 6: Apply Controller, Function, Table ACLs and Entity Realm
# 7: Apply Controller, Function, Table ACLs and Entity Realm + Hierarchy
# 8: Apply Controller, Function, Table ACLs, Entity Realm + Hierarchy and Delegations
settings.security.policy = 5 # Controller, Function & Table ACLs
# -------------------------------------------------------------------------
# Comment/uncomment modules here to disable/enable them
# Modules menu is defined in modules/eden/menu.py
settings.modules = OrderedDict([
# Core modules which shouldn't be disabled
("default", Storage(
name_nice = "Home",
restricted = False, # Use ACLs to control access to this module
#access = None, # All Users (inc Anonymous) can see this module in the default menu & access the controller
module_type = None # This item is not shown in the menu
)),
("admin", Storage(
name_nice = "Administration",
#description = "Site Administration",
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
("appadmin", Storage(
name_nice = "Administration",
#description = "Site Administration",
module_type = None # No Menu
)),
("errors", Storage(
name_nice = "Ticket Viewer",
#description = "Needed for Breadcrumbs",
restricted = False,
module_type = None # No Menu
)),
("sync", Storage(
name_nice = "Synchronization",
#description = "Synchronization",
access = "|1|", # Only Administrators can see this module in the default menu & access the controller
module_type = None # This item is handled separately for the menu
)),
#("tour", Storage(
# name_nice = T("Guided Tour Functionality"),
# module_type = None,
#)),
#("translate", Storage(
# name_nice = T("Translation Functionality"),
# #description = "Selective translation of strings based on module.",
# module_type = None,
#)),
("gis", Storage(
name_nice = "Map",
#description = "Situation Awareness & Geospatial Analysis",
module_type = 6, # 6th item in the menu
)),
("pr", Storage(
name_nice = "Person Registry",
#description = "Central point to record details on People",
access = "|1|", # Only Administrators can see this module in the default menu (access to controller is possible to all still)
module_type = 10
)),
("org", Storage(
name_nice = "Organizations",
#description = 'Lists "who is doing what & where". Allows relief agencies to coordinate their activities',
module_type = 1
)),
("hrm", Storage(
name_nice = "Staff",
#description = "Human Resources Management",
module_type = 2,
)),
("vol", Storage(
name_nice = T("Volunteers"),
#description = "Human Resources Management",
module_type = 2,
)),
("cms", Storage(
name_nice = "Content Management",
#description = "Content Management System",
module_type = 10,
)),
("doc", Storage(
name_nice = "Documents",
#description = "A library of digital resources, such as photos, documents and reports",
module_type = 10,
)),
("msg", Storage(
name_nice = "Messaging",
#description = "Sends & Receives Alerts via Email & SMS",
# The user-visible functionality of this module isn't normally required. Rather it's main purpose is to be accessed from other modules.
module_type = None,
)),
("supply", Storage(
name_nice = "Supply Chain Management",
#description = "Used within Inventory Management, Request Management and Asset Management",
module_type = None, # Not displayed
)),
("inv", Storage(
name_nice = T("Warehouses"),
#description = "Receiving and Sending Items",
module_type = 4
)),
("asset", Storage(
name_nice = "Assets",
#description = "Recording and Assigning Assets",
module_type = 5,
)),
# Vehicle depends on Assets
("vehicle", Storage(
name_nice = "Vehicles",
#description = "Manage Vehicles",
module_type = 10,
)),
#("budget", Storage(
# name_nice = T("Budgets"),
# #description = "Tracks the location, capacity and breakdown of victims in Shelters",
# module_type = 10
#)),
("fin", Storage(
name_nice = T("Finance"),
module_type = 10
)),
("cr", Storage(
name_nice = T("Shelters"),
#description = "Tracks the location, capacity and breakdown of victims in Shelters",
module_type = 10
)),
("project", Storage(
name_nice = "Tasks",
#description = "Tracking of Projects, Activities and Tasks",
module_type = 2
)),
("req", Storage(
name_nice = "Requests",
#description = "Manage requests for supplies, assets, staff or other resources. Matches against Inventories where supplies are requested.",
module_type = 10,
)),
("hms", Storage(
name_nice = T("Hospitals"),
#description = "Helps to monitor status of hospitals",
module_type = 10
)),
#("dvr", Storage(
# name_nice = T("Disaster Victim Registry"),
# #description = "Allow affected individuals & households to register to receive compensation and distributions",
# module_type = 10,
#)),
("event", Storage(
name_nice = "Events",
#description = "Activate Events (e.g. from Scenario templates) for allocation of appropriate Resources (Human, Assets & Facilities).",
module_type = 10,
)),
#("transport", Storage(
# name_nice = T("Transport"),
# module_type = 10,
#)),
#("stats", Storage(
# name_nice = T("Statistics"),
# #description = "Manages statistics",
# module_type = None,
#)),
])
# -------------------------------------------------------------------------
# CMS
# -------------------------------------------------------------------------
settings.cms.richtext = True
# -------------------------------------------------------------------------
# Organisations
# -------------------------------------------------------------------------
settings.org.documents_tab = True
settings.org.projects_tab = False
# -------------------------------------------------------------------------
# Shelters
# -------------------------------------------------------------------------
settings.cr.people_registration = False
# -------------------------------------------------------------------------
def customise_cr_shelter_resource(r, tablename):
#table = current.s3db.cr_shelter
f = current.s3db.cr_shelter.shelter_service_id
f.readable = f.writable = False
settings.customise_cr_shelter_resource = customise_cr_shelter_resource
# -------------------------------------------------------------------------
# Events
# -------------------------------------------------------------------------
def event_rheader(r):
rheader = None
record = r.record
if record and r.representation == "html":
from gluon import A, DIV, TABLE, TR, TH
from s3 import s3_rheader_tabs
name = r.name
if name == "incident":
if settings.get_incident_label(): # == "Ticket"
label = T("Ticket Details")
else:
label = T("Incident Details")
tabs = [(label, None),
#(T("Tasks"), "task"),
#(T("Human Resources"), "human_resource"),
#(T("Equipment"), "asset"),
(T("Action Plan"), "plan"),
(T("Incident Reports"), "incident_report"),
(T("Logs"), "log"),
(T("Expenses"), "expense"),
(T("Situation Reports"), "sitrep"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
record_id = r.id
incident_type_id = record.incident_type_id
editable = current.auth.s3_has_permission("UPDATE", "event_incident", record_id)
if editable:
# Dropdown of Scenarios to select
stable = current.s3db.event_scenario
query = (stable.incident_type_id == incident_type_id) & \
(stable.deleted == False)
scenarios = current.db(query).select(stable.id,
stable.name,
)
if len(scenarios) and r.method != "event":
from gluon import SELECT, OPTION
dropdown = SELECT(_id="scenarios")
dropdown["_data-incident_id"] = record_id
dappend = dropdown.append
dappend(OPTION(T("Select Scenario")))
for s in scenarios:
dappend(OPTION(s.name, _value=s.id))
scenarios = TR(TH("%s: " % T("Scenario")),
dropdown,
)
s3 = current.response.s3
script = "/%s/static/themes/SAFIRE/js/incident_profile.js" % r.application
if script not in s3.scripts:
s3.scripts.append(script)
s3.js_global.append('''i18n.scenarioConfirm="%s"''' % T("Populate Incident with Tasks, Organizations, Positions and Equipment from the Scenario?"))
else:
scenarios = ""
else:
scenarios = ""
if record.exercise:
exercise = TH(T("EXERCISE"))
else:
exercise = TH()
if record.closed:
closed = TH(T("CLOSED"))
else:
closed = TH()
if record.event_id or r.method == "event" or not editable:
event = ""
else:
if settings.get_event_label(): # == "Disaster"
label = T("Assign to Disaster")
else:
label = T("Assign to Event")
event = A(label,
_href = URL(c = "event",
f = "incident",
args = [record_id, "event"],
),
_class = "action-btn"
)
table = r.table
rheader = DIV(TABLE(TR(exercise),
TR(TH("%s: " % table.name.label),
record.name,
),
TR(TH("%s: " % table.incident_type_id.label),
table.incident_type_id.represent(incident_type_id),
),
TR(TH("%s: " % table.location_id.label),
table.location_id.represent(record.location_id),
),
# @ToDo: Add Zone
TR(TH("%s: " % table.severity.label),
table.severity.represent(record.severity),
),
TR(TH("%s: " % table.level.label),
table.level.represent(record.level),
),
TR(TH("%s: " % table.organisation_id.label),
table.organisation_id.represent(record.organisation_id),
),
TR(TH("%s: " % table.person_id.label),
table.person_id.represent(record.person_id),
),
scenarios,
TR(TH("%s: " % table.comments.label),
record.comments,
),
TR(TH("%s: " % table.date.label),
table.date.represent(record.date),
),
TR(closed),
event,
), rheader_tabs)
elif name == "incident_report":
record_id = r.id
ltable = current.s3db.event_incident_report_incident
query = (ltable.incident_report_id == record_id)
link = current.db(query).select(ltable.incident_id,
limitby = (0, 1)
).first()
if link:
from s3 import S3Represent
represent = S3Represent(lookup="event_incident", show_link=True)
rheader = DIV(TABLE(TR(TH("%s: " % ltable.incident_id.label),
represent(link.incident_id),
),
))
else:
if settings.get_incident_label(): # == "Ticket"
label = T("Assign to Ticket")
else:
label = T("Assign to Incident")
rheader = DIV(A(label,
_href = URL(c = "event",
f = "incident_report",
args = [record_id, "assign"],
),
_class = "action-btn"
))
elif name == "event":
if settings.get_event_label(): # == "Disaster"
label = T("Disaster Details")
else:
label = T("Event Details")
if settings.get_incident_label(): # == "Ticket"
INCIDENTS = T("Tickets")
else:
INCIDENTS = T("Incidents")
tabs = [(label, None),
(INCIDENTS, "incident"),
(T("Documents"), "document"),
(T("Photos"), "image"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % table.event_type_id.label),
table.event_type_id.represent(record.event_type_id),
),
TR(TH("%s: " % table.name.label),
record.name,
),
TR(TH("%s: " % table.start_date.label),
table.start_date.represent(record.start_date),
),
TR(TH("%s: " % table.comments.label),
record.comments,
),
), rheader_tabs)
elif name == "scenario":
tabs = [(T("Scenario Details"), None),
#(T("Tasks"), "task"),
#(T("Human Resources"), "human_resource"),
#(T("Equipment"), "asset"),
(T("Action Plan"), "plan"),
(T("Incident Reports"), "incident_report"),
]
rheader_tabs = s3_rheader_tabs(r, tabs)
table = r.table
rheader = DIV(TABLE(TR(TH("%s: " % table.incident_type_id.label),
table.incident_type_id.represent(record.incident_type_id),
),
TR(TH("%s: " % table.organisation_id.label),
table.organisation_id.represent(record.organisation_id),
),
TR(TH("%s: " % table.location_id.label),
table.location_id.represent(record.location_id),
),
TR(TH("%s: " % table.name.label),
record.name,
),
TR(TH("%s: " % table.comments.label),
record.comments,
),
), rheader_tabs)
return rheader
# -------------------------------------------------------------------------
def customise_event_event_controller(**attr):
#s3 = current.response.s3
# No sidebar menu
#current.menu.options = None
attr["rheader"] = event_rheader
return attr
settings.customise_event_event_controller = customise_event_event_controller
# -------------------------------------------------------------------------
def customise_event_incident_report_resource(r, tablename):
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Log Call"),
title_display = T("Call Log Details"),
title_list = T("Call Logs"),
title_update = T("Edit Call Log"),
label_list_button = T("List Call Logs"),
label_delete_button = T("Delete Call Log"),
msg_record_created = T("Call Log added"),
msg_record_modified = T("Call Log updated"),
msg_record_deleted = T("Call Log removed"),
msg_list_empty = T("No Calls currently logged"),
)
settings.customise_event_incident_report_resource = customise_event_incident_report_resource
# -------------------------------------------------------------------------
def customise_event_incident_report_controller(**attr):
from gluon import A
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard postp
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
method = r.method
if method in (None, "create"):
current.s3db.gis_location.addr_street.label = T("Street Address or Location Details")
from s3 import S3SQLCustomForm
crud_form = S3SQLCustomForm((T("What is it?"), "name"),
"incident_type_id",
(T("Who am I speaking with?"), "reported_by"),
(T("How can we contact you?"), "contact"),
(T("Where did this Incident take place?"), "location_id"),
(T("Explain the Situation?"), "description"),
(T("What are your immediate needs?"), "needs"),
)
r.resource.configure(create_next = URL(args=["[id]", "assign"]),
crud_form = crud_form,
)
return True
s3.prep = custom_prep
# No sidebar menu
current.menu.options = None
req_args = current.request.args
if len(req_args) > 1 and req_args[1] == "assign":
if settings.get_incident_label(): # == "Ticket"
label = T("New Ticket")
else:
label = T("New Incident")
attr["rheader"] = A(label,
_class = "action-btn",
_href = URL(c="event", f="incident",
args = ["create"],
vars = {"incident_report_id": req_args[0]},
),
)
else:
attr["rheader"] = event_rheader
return attr
settings.customise_event_incident_report_controller = customise_event_incident_report_controller
# -------------------------------------------------------------------------
def event_incident_create_onaccept(form):
"""
Automate Level based on Type, Zone (intersect from Location) & Severity
@ToDo: Move this to SAFIRE/SC
"""
db = current.db
s3db = current.s3db
form_vars_get = form.vars.get
incident_id = form_vars_get("id")
# If Incident Type is Chemical then level must be > 2
level = form_vars_get("level")
if level and int(level) < 3:
incident_type_id = form_vars_get("incident_type_id")
ittable = s3db.event_incident_type
incident_type = db(ittable.id == incident_type_id).select(ittable.name,
limitby = (0,1)
).first().name
if incident_type == "Chemical Hazard":
itable = s3db.event_incident
db(itable.id == incident_id).update(level = 3)
current.response.warning = T("Chemical Hazard Incident so Level raised to 3")
# Alert Lead Agency
organisation_id = form_vars_get("organisation_id")
if organisation_id:
otable = s3db.org_organisation_tag
query = (otable.organisation_id == organisation_id) & \
(otable.tag == "duty")
duty = db(query).select(otable.value,
limitby = (0, 1)
).first()
if duty:
current.msg.send_sms_via_api(duty.value,
"You have been assigned an Incident: %s%s" % (settings.get_base_public_url(),
URL(c="event", f= "incident",
args = incident_id),
))
# -------------------------------------------------------------------------
def customise_event_incident_resource(r, tablename):
from s3 import S3LocationSelector
s3db = current.s3db
table = s3db.event_incident
f = table.severity
f.readable = f.writable = True
f = table.level
f.readable = f.writable = True
table.location_id.widget = S3LocationSelector(polygons = True,
show_address = True,
)
f = table.organisation_id
f.readable = f.writable = True
f.label = T("Lead Response Organization")
if r.method == "plan":
table.action_plan.label = T("Event Action Plan")
else:
f = table.action_plan
f.readable = f.writable = False
if r.interactive:
s3db.add_custom_callback(tablename,
"create_onaccept",
event_incident_create_onaccept,
)
settings.customise_event_incident_resource = customise_event_incident_resource
# -------------------------------------------------------------------------
def customise_event_incident_controller(**attr):
s3db = current.s3db
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard postp
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
resource = r.resource
# Redirect to action plan after create
resource.configure(create_next = URL(c="event", f="incident",
args = ["[id]", "plan"]),
)
method = r.method
if method == "create":
incident_report_id = r.get_vars.get("incident_report_id")
if incident_report_id:
# Got here from incident report assign => "New Incident"
# - prepopulate incident name from report title
# - copy incident type and location from report
# - onaccept: link the incident report to the incident
if r.http == "GET":
from s3 import s3_truncate
rtable = s3db.event_incident_report
incident_report = current.db(rtable.id == incident_report_id).select(rtable.name,
rtable.incident_type_id,
rtable.location_id,
limitby = (0, 1),
).first()
table = r.table
table.name.default = s3_truncate(incident_report.name, 64)
table.incident_type_id.default = incident_report.incident_type_id
table.location_id.default = incident_report.location_id
elif r.http == "POST":
def create_onaccept(form):
s3db.event_incident_report_incident.insert(incident_id = form.vars.id,
incident_report_id = incident_report_id,
)
s3db.add_custom_callback("event_incident",
"create_onaccept",
create_onaccept,
)
elif method == "plan" and settings.get_incident_label(): # == "Ticket"
s3db.event_task
s3db.event_organisation
crud_strings = s3.crud_strings
crud_strings.event_task.msg_list_empty = T("No Tasks currently registered for this ticket")
crud_strings.event_organisation.msg_list_empty = T("No Organizations currently registered in this ticket")
return True
s3.prep = custom_prep
# No sidebar menu
current.menu.options = None
attr["rheader"] = event_rheader
return attr
settings.customise_event_incident_controller = customise_event_incident_controller
# -------------------------------------------------------------------------
def customise_event_asset_resource(r, tablename):
table = current.s3db.event_asset
table.item_id.label = T("Item Type")
table.asset_id.label = T("Specific Item")
# DateTime
from gluon import IS_EMPTY_OR
from s3 import IS_UTC_DATETIME, S3CalendarWidget, S3DateTime
for f in (table.start_date, table.end_date):
f.requires = IS_EMPTY_OR(IS_UTC_DATETIME())
f.represent = lambda dt: S3DateTime.datetime_represent(dt, utc=True)
f.widget = S3CalendarWidget(timepicker = True)
if settings.get_incident_label(): # == "Ticket"
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Equipment"),
title_display = T("Equipment Details"),
title_list = T("Equipment"),
title_update = T("Edit Equipment"),
label_list_button = T("List Equipment"),
label_delete_button = T("Remove Equipment from this ticket"),
msg_record_created = T("Equipment added"),
msg_record_modified = T("Equipment updated"),
msg_record_deleted = T("Equipment removed"),
msg_list_empty = T("No Equipment currently registered for this ticket"))
else:
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Equipment"),
title_display = T("Equipment Details"),
title_list = T("Equipment"),
title_update = T("Edit Equipment"),
label_list_button = T("List Equipment"),
label_delete_button = T("Remove Equipment from this incident"),
msg_record_created = T("Equipment added"),
msg_record_modified = T("Equipment updated"),
msg_record_deleted = T("Equipment removed"),
msg_list_empty = T("No Equipment currently registered for this incident"))
settings.customise_event_asset_resource = customise_event_asset_resource
# -------------------------------------------------------------------------
def event_human_resource_onaccept(form, create=True):
"""
When a Position is assigned to an Incident:
- set_event_from_incident
- add Log Entry
- send Notification
"""
db = current.db
s3db = current.s3db
s3db.event_set_event_from_incident(form, "event_human_resource")
table = s3db.event_human_resource
form_vars = form.vars
form_vars_get = form_vars.get
link_id = form_vars_get("id")
incident_id = form_vars_get("incident_id")
if not incident_id:
link = db(table.id == link_id).select(table.incident_id,
limitby = (0, 1)
).first()
incident_id = link.incident_id
pe_id = None
if create:
person_id = form_vars_get("person_id")
if person_id:
ptable = s3db.pr_person
person = db(ptable.id == person_id).select(ptable.pe_id,
limitby = (0, 1)
).first()
pe_id = person.pe_id
job_title_id = form_vars_get("job_title_id")
if job_title_id:
s3db.event_incident_log.insert(incident_id = incident_id,
name = "Person Requested",
comments = s3db.event_human_resource.job_title_id.represent(job_title_id),
)
else:
# Update
record = form.record
if record: # Not True for a record merger
from s3dal import Field
changed = {}
for var in form_vars:
vvar = form_vars[var]
if isinstance(vvar, Field):
# modified_by/modified_on
continue
rvar = record.get(var, "NOT_PRESENT")
if rvar != "NOT_PRESENT" and vvar != rvar:
f = table[var]
if var == "pe_id":
pe_id = vvar
type_ = f.type
if type_ == "integer" or \
type_.startswith("reference"):
if vvar:
vvar = int(vvar)
if vvar == rvar:
continue
represent = table[var].represent
if represent:
if hasattr(represent, "show_link"):
represent.show_link = False
else:
represent = lambda o: o
if rvar:
changed[var] = "%s changed from %s to %s" % \
(f.label, represent(rvar), represent(vvar))
else:
changed[var] = "%s changed to %s" % \
(f.label, represent(vvar))
if changed:
table = s3db.event_incident_log
text = []
for var in changed:
text.append(changed[var])
text = "\n".join(text)
table.insert(incident_id = incident_id,
#name = "Person Assigned",
name = "Person Request Updated",
comments = text,
)
if pe_id:
# Notify Assignee
if settings.get_incident_label(): # == "Ticket"
label = T("Ticket")
else:
label = T("Incident")
current.msg.send_by_pe_id(pe_id,
subject = "",
message = "You have been assigned to an %s: %s%s" % \
(label,
settings.get_base_public_url(),
URL(c="event", f= "incident",
args = [incident_id, "human_resource", link_id]),
),
contact_method = "SMS")
# -------------------------------------------------------------------------
def customise_event_human_resource_resource(r, tablename):
s3db = current.s3db
table = s3db.event_human_resource
# DateTime
from gluon import IS_EMPTY_OR
from s3 import IS_UTC_DATETIME, S3CalendarWidget, S3DateTime
for f in (table.start_date, table.end_date):
f.requires = IS_EMPTY_OR(IS_UTC_DATETIME())
f.represent = lambda dt: S3DateTime.datetime_represent(dt, utc=True)
f.widget = S3CalendarWidget(timepicker = True)
if settings.get_incident_label(): # == "Ticket"
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Person"),
title_display = T("Person Details"),
title_list = T("Personnel"),
title_update = T("Edit Person"),
label_list_button = T("List Personnel"),
label_delete_button = T("Remove Person from this ticket"),
msg_record_created = T("Person added"),
msg_record_modified = T("Person updated"),
msg_record_deleted = T("Person removed"),
msg_list_empty = T("No Persons currently registered for this ticket"))
else:
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Person"),
title_display = T("Person Details"),
title_list = T("Personnel"),
title_update = T("Edit Person"),
label_list_button = T("List Personnel"),
label_delete_button = T("Remove Person from this incident"),
msg_record_created = T("Person added"),
msg_record_modified = T("Person updated"),
msg_record_deleted = T("Person removed"),
msg_list_empty = T("No Persons currently registered for this incident"))
s3db.configure(tablename,
# Deliberately over-rides
create_onaccept = event_human_resource_onaccept,
update_onaccept = lambda form:
event_human_resource_onaccept(form, create=False),
)
settings.customise_event_human_resource_resource = customise_event_human_resource_resource
# -------------------------------------------------------------------------
def customise_event_scenario_controller(**attr):
s3 = current.response.s3
# Custom prep
standard_prep = s3.prep
def custom_prep(r):
# Call standard postp
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
if r.method != "plan":
f = r.table.action_plan
f.readable = f.writable = False
if r.method == "create"and r.http == "POST":
r.resource.configure(create_next = URL(c="event", f="scenario",
args = ["[id]", "plan"]),
)
return True
s3.prep = custom_prep
# No sidebar menu
current.menu.options = None
attr["rheader"] = event_rheader
return attr
settings.customise_event_scenario_controller = customise_event_scenario_controller
# -------------------------------------------------------------------------
def customise_event_scenario_asset_resource(r, tablename):
table = current.s3db.event_scenario_asset
table.item_id.label = T("Item Type")
table.asset_id.label = T("Specific Item")
if settings.get_incident_label(): # == "Ticket"
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Equipment"),
title_display = T("Equipment Details"),
title_list = T("Equipment"),
title_update = T("Edit Equipment"),
label_list_button = T("List Equipment"),
label_delete_button = T("Remove Equipment from this ticket"),
msg_record_created = T("Equipment added"),
msg_record_modified = T("Equipment updated"),
msg_record_deleted = T("Equipment removed"),
msg_list_empty = T("No Equipment currently registered for this ticket"))
else:
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Equipment"),
title_display = T("Equipment Details"),
title_list = T("Equipment"),
title_update = T("Edit Equipment"),
label_list_button = T("List Equipment"),
label_delete_button = T("Remove Equipment from this incident"),
msg_record_created = T("Equipment added"),
msg_record_modified = T("Equipment updated"),
msg_record_deleted = T("Equipment removed"),
msg_list_empty = T("No Equipment currently registered for this incident"))
settings.customise_event_scenario_asset_resource = customise_event_scenario_asset_resource
# -------------------------------------------------------------------------
def customise_event_scenario_human_resource_resource(r, tablename):
if settings.get_incident_label(): # == "Ticket"
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Person"),
title_display = T("Person Details"),
title_list = T("Personnel"),
title_update = T("Edit Person"),
label_list_button = T("List Personnel"),
label_delete_button = T("Remove Person from this ticket"),
msg_record_created = T("Person added"),
msg_record_modified = T("Person updated"),
msg_record_deleted = T("Person removed"),
msg_list_empty = T("No Persons currently registered for this ticket"))
else:
current.response.s3.crud_strings[tablename] = Storage(
label_create = T("Add Person"),
title_display = T("Person Details"),
title_list = T("Personnel"),
title_update = T("Edit Person"),
label_list_button = T("List Personnel"),
label_delete_button = T("Remove Person from this incident"),
msg_record_created = T("Person added"),
msg_record_modified = T("Person updated"),
msg_record_deleted = T("Person removed"),
msg_list_empty = T("No Persons currently registered for this incident"))
settings.customise_event_scenario_human_resource_resource = customise_event_scenario_human_resource_resource
# -------------------------------------------------------------------------
# HRM
# -------------------------------------------------------------------------
settings.hrm.job_title_deploy = True
settings.hrm.org_dependent_job_titles = True
# -------------------------------------------------------------------------
# Organisations
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def customise_org_organisation_resource(r, tablename):
s3db = current.s3db
# Custom Components
s3db.add_components(tablename,
org_organisation_tag = (# On-call Duty Number
{"name": "duty",
"joinby": "organisation_id",
"filterby": {"tag": "duty",
},
"multiple": False,
},
),
)
from s3 import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineLink, \
IS_EMPTY_OR, IS_PHONE_NUMBER_MULTI, S3PhoneWidget, s3_phone_represent
# Individual settings for specific tag components
components_get = s3db.resource(tablename).components.get
duty = components_get("duty")
f = duty.table.value
f.represent = s3_phone_represent,
f.requires = IS_EMPTY_OR(IS_PHONE_NUMBER_MULTI())
f.widget = S3PhoneWidget()
crud_form = S3SQLCustomForm("name",
"acronym",
S3SQLInlineLink("organisation_type",
field = "organisation_type_id",
# Default 10 options just triggers which adds unnecessary complexity to a commonly-used form & commonly an early one (create Org when registering)
search = False,
label = T("Type"),
multiple = False,
widget = "multiselect",
),
"country",
(T("Reception Phone #"), "phone"),
S3SQLInlineComponent("duty",
label = T("On-call Duty Number"),
fields = [("", "value")],
multiple = False,
),
"website",
"logo",
"comments",
)
s3db.configure(tablename,
crud_form = crud_form,
)
settings.customise_org_organisation_resource = customise_org_organisation_resource
# -------------------------------------------------------------------------
# Projects
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
def project_task_onaccept(form, create=True):
"""
Send Person a Notification when they are assigned to a Task
Log changes in Incident Log
"""
if current.request.function == "scenario":
# Must be a Scenario
# - don't Log
# - don't send Notification
return
db = current.db
s3db = current.s3db
ltable = s3db.event_task
form_vars = form.vars
form_vars_get = form_vars.get
task_id = form_vars_get("id")
link = db(ltable.task_id == task_id).select(ltable.incident_id,
limitby = (0, 1)
).first()
if not link:
# Not attached to an Incident
# - don't Log
# - don't send Notification
return
incident_id = link.incident_id
if create:
pe_id = form_vars_get("pe_id")
# Log
name = form_vars_get("name")
if name:
s3db.event_incident_log.insert(incident_id = incident_id,
name = "Task Created",
comments = name,
)
else:
# Update
pe_id = None
record = form.record
if record: # Not True for a record merger
from s3dal import Field
table = s3db.project_task
changed = {}
for var in form_vars:
vvar = form_vars[var]
if isinstance(vvar, Field):
# modified_by/modified_on
continue
if var == "pe_id":
pe_id = vvar
rvar = record.get(var, "NOT_PRESENT")
if rvar != "NOT_PRESENT" and vvar != rvar:
f = table[var]
type_ = f.type
if type_ == "integer" or \
type_.startswith("reference"):
if vvar:
vvar = int(vvar)
if vvar == rvar:
continue
represent = table[var].represent
if represent:
if hasattr(represent, "show_link"):
represent.show_link = False
else:
represent = lambda o: o
if rvar:
changed[var] = "%s changed from %s to %s" % \
(f.label, represent(rvar), represent(vvar))
else:
changed[var] = "%s changed to %s" % \
(f.label, represent(vvar))
if changed:
table = s3db.event_incident_log
text = []
for var in changed:
text.append(changed[var])
text = "\n".join(text)
table.insert(incident_id = incident_id,
name = "Task Updated",
comments = text,
)
if pe_id:
# Notify Assignee
message = "You have been assigned a Task: %s%s" % \
(settings.get_base_public_url(),
URL(c="event", f= "incident",
args = [incident_id, "task", task_id]),
)
instance_type = s3db.pr_instance_type(pe_id)
if instance_type == "org_organisation":
# Notify the Duty Number for the Organisation, not everyone in the Organisation!
otable = s3db.org_organisation
ottable = s3db.org_organisation_tag
query = (otable.pe_id == pe_id) & \
(ottable.organisation_id == otable.id) & \
(ottable.tag == "duty")
duty = db(query).select(ottable.value,
limitby = (0, 1)
).first()
if duty:
current.msg.send_sms_via_api(duty.value,
message)
else:
task_notification = settings.get_event_task_notification()
if task_notification:
current.msg.send_by_pe_id(pe_id,
subject = "%s: Task assigned to you" % settings.get_system_name_short(),
message = message,
contact_method = task_notification)
# -------------------------------------------------------------------------
def customise_project_task_resource(r, tablename):
s3db = current.s3db
f = s3db.project_task.source
f.readable = f.writable = False
s3db.configure(tablename,
# No need to see time log: KISS
crud_form = None,
# NB We deliberatly over-ride the default one
create_onaccept = project_task_onaccept,
# In event_ActionPlan()
#list_fields = ["priority",
# "name",
# "pe_id",
# "status_id",
# "date_due",
# ],
update_onaccept = lambda form:
project_task_onaccept(form, create=False),
)
settings.customise_project_task_resource = customise_project_task_resource
# END =========================================================================
| mit | -8,412,422,623,254,072,000 | 44.287841 | 198 | 0.430059 | false | 5.09425 | false | false | false |
mcneela/Retina | demos/dim_reduction/pca_tester.py | 1 | 2344 | """
Executable code for the PCA user story.
Run disc() to explore a randomly generated flat disc data. Run hypesphere to explore a high dimensional ball
of randomly generated data.
"""
import pca_disc
from pca_disc import *
from PyDSTool.Toolbox import synthetic_data as sd
import random
import numpy as np
import __future__
DOI = [(-10,10),(-10,10)]
def disc():
pts = sd.generate_ball(100, 2, 10)
pts = np.concatenate((pts, np.zeros((100, 1))), axis=1)
trans_am = 12
trans_ax = 1
X = [[],[],[]]
for i in range(3):
X[i] = rotate_z(rotate_y(rotate_x(translate(pts, trans_ax, trans_am),
random.uniform(0, 2*np.pi)),
random.uniform(0, 2*np.pi)),
random.uniform(0, 2*np.pi))
X[i] = noise(X[i], 2, 0.3, 0, 10)
rot_layers = ['rot1', 'rot2', 'rot3']
rot_styles = ['r', 'g', 'b']
fig, [before, after, variance] = pca_disc.setupDisplay(rot_layers, rot_styles, DOI)
layer_obj = before.get_layer('orig_data')
layer_obj.add_data(pts[:,0], pts[:,1], pts[:,2])
layer_obj.set_style('y.')
before.build_layers()
after.build_layers()
variance.build_layers()
return ControlSys(fig, X, rot_layers, rot_styles, 2, before, after, variance)
def hypersphere(dim):
pts = sd.generate_ball(100, dim, 10)
#Create and stretch different hypersphere "clusters":
X1 = translate(stretch(stretch(sd.generate_ball(133, dim, 10), 0, 1.4), 1, 1.4), 0, 25)
X2 = translate(sd.generate_ball(110, dim, 10), 1, 20)
X3 = translate(noise(sd.generate_ball(95, dim, 10), 2, 0.6, 0, 2), 2, 15)
X = [X1, X2, X3]
clus_layers = ['clus1', 'clus2', 'clus3']
clus_styles = ['r', 'g', 'b']
fig, [before, after, variance] = pca_disc.setupDisplay(clus_layers, clus_styles, DOI)
proj_vecsHI = pca_disc.ortho_proj_mat(len(X[0][0]), 3)
proj_vecsLO = pca_disc.ortho_proj_mat(len(X[0][0]), 2)
#Plot the entire dataset in blue.
X_all = np.concatenate((X1,X2,X3))
layer_obj = before.get_layer('orig_data')
layer_obj.add_data(np.dot(X_all, proj_vecsHI).transpose())
layer_obj.set_style('y.')
return ControlSys(gui.masterWin, X, clus_layers, clus_styles, 2, proj_vecsLO, proj_vecsHI)
ctrl_sys = disc()
#ctrl_sys = hypersphere(6)
halt= True
| bsd-3-clause | -5,339,967,546,326,205,000 | 29.441558 | 108 | 0.601962 | false | 2.879607 | false | false | false |
inveniosoftware/invenio-oauthclient | tests/test_app.py | 1 | 4284 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Test helpers."""
import os
from copy import deepcopy
import pytest
from flask import Flask
from flask_oauthlib.client import OAuth as FlaskOAuth
from flask_oauthlib.client import OAuthRemoteApp
from invenio_db import InvenioDB, db
from sqlalchemy_utils.functions import create_database, database_exists
from invenio_oauthclient import InvenioOAuthClient
from invenio_oauthclient.contrib.orcid import REMOTE_APP
def test_version():
"""Test version import."""
from invenio_oauthclient import __version__
assert __version__
def test_init():
"""Test extension initialization."""
app = Flask('testapp')
FlaskOAuth(app)
ext = InvenioOAuthClient(app)
assert 'invenio-oauthclient' in app.extensions
app = Flask('testapp')
ext = InvenioOAuthClient(app)
assert 'invenio-oauthclient' in app.extensions
app = Flask('testapp')
FlaskOAuth(app)
ext = InvenioOAuthClient()
assert 'invenio-oauthclient' not in app.extensions
ext.init_app(app)
assert 'invenio-oauthclient' in app.extensions
class _CustomOAuthRemoteApp(OAuthRemoteApp):
"""Custom OAuthRemoteApp used for testing."""
def test_standard_remote_app_factory(base_app):
"""Test standard remote_app class."""
base_app.config.update(
OAUTHCLIENT_REMOTE_APPS=dict(
custom_app=REMOTE_APP
)
)
FlaskOAuth(base_app)
InvenioOAuthClient(base_app)
assert isinstance(
base_app.extensions['oauthlib.client'].remote_apps['custom_app'],
OAuthRemoteApp)
assert not isinstance(
base_app.extensions['oauthlib.client'].remote_apps['custom_app'],
_CustomOAuthRemoteApp)
def test_remote_app_factory_global_customization(base_app):
"""Test remote_app override with global variable."""
base_app.config.update(
OAUTHCLIENT_REMOTE_APP=_CustomOAuthRemoteApp,
OAUTHCLIENT_REMOTE_APPS=dict(
custom_app=REMOTE_APP
)
)
FlaskOAuth(base_app)
InvenioOAuthClient(base_app)
assert isinstance(
base_app.extensions['oauthlib.client'].remote_apps['custom_app'],
_CustomOAuthRemoteApp)
def test_remote_app_factory_local_customization(base_app):
"""Test custom remote_app for one app only."""
config_for_one_app = deepcopy(REMOTE_APP)
config_for_one_app['remote_app'] = _CustomOAuthRemoteApp
base_app.config.update(
OAUTHCLIENT_REMOTE_APPS=dict(
custom_app=config_for_one_app
)
)
FlaskOAuth(base_app)
InvenioOAuthClient(base_app)
assert isinstance(
base_app.extensions['oauthlib.client'].remote_apps['custom_app'],
_CustomOAuthRemoteApp)
def test_db(request):
"""Test database backend."""
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get(
'SQLALCHEMY_DATABASE_URI', 'sqlite://'
)
InvenioDB(app)
FlaskOAuth(app)
InvenioOAuthClient(app)
def teardown():
with app.app_context():
db.drop_all()
request.addfinalizer(teardown)
with app.app_context():
is_sqllite = str(db.engine.url) == 'sqlite://'
db_exists = database_exists(str(db.engine.url))
if not is_sqllite and not db_exists:
create_database(str(db.engine.url))
db.create_all()
tables = list(filter(lambda table: table.startswith('oauthclient'),
db.metadata.tables.keys()))
assert len(tables) == 3
def test_alembic(app):
"""Test alembic recipes."""
ext = app.extensions['invenio-db']
with app.app_context():
if db.engine.name == 'sqlite':
raise pytest.skip('Upgrades are not supported on SQLite.')
assert not ext.alembic.compare_metadata()
db.drop_all()
ext.alembic.upgrade()
assert not ext.alembic.compare_metadata()
ext.alembic.downgrade(target='96e796392533')
ext.alembic.upgrade()
assert not ext.alembic.compare_metadata()
ext.alembic.downgrade(target='96e796392533')
| mit | -2,229,394,456,313,082,400 | 28.342466 | 75 | 0.665033 | false | 3.781112 | true | false | false |
mgiugliano/iNeuron | iNeuron.py | 1 | 5988 | # iNeuron
#
#
# Michele Giugliano, 18-19/10/2014, Antwerpen
# http://www.ua.ac.be/michele.giugliano
#
# pythonista://MG/iNeuron?action=run
#
import string
import sound
from scene import *
from random import *
from time import localtime
from itertools import chain
from math import sin, exp, fmod
ipad = False #will be set in the setup method
# Our class inherits from Scene, so that its draw method
# is automatically called 60 times per second when we run it.
# This implies approximately once every 16.6666... ms
class iNeuron (Scene):
def setup(self):
global ipad, yy, tt
ipad = self.size.w > 700
#Render all the digits as individual images:
self.numbers = {}
self.numbers_small = {}
font_size = 150 if self.size.w > 700 else 60
for s in chain(string.digits, [':', '.']):
#render_text returns a tuple of
#an image name and its size.
self.numbers[s] = render_text(s, 'Helvetica-Light', font_size)
self.numbers_small[s] = render_text(s, 'Courier', 20)
#--------------------------------------------------------------------------
# Simulation definition and control, general parameters
self.umin = -100.; # Minimal voltage to be displayed
self.umax = 100.; # Maximal voltage to be displayed
self.t = 0; # Current sim. time [ms]
self.mdt = .1; # Integration time step [ms]
self.u = -70.6; # Membrane potential state variable
self.w = 0.; # Adaptation state variable
self.t0 =-9999.; # Last firing time [ms], for refractoryness
self.In = 0.; # Synaptic fluctuating background current
self.Iext = 0.; # External current, when the touch screen is touched
#--------------------------------------------------------------------------
# (1) Model neuron parameters (i.e. exponential I&F)
self.th = 20; #[mV] - peak value for a spike
self.C = 281; #[pF] - membrane capacitance
self.g_L = 30; #[nS] - leak conductance
self.E_L = -70.6; #[mV] - leak reversal potential (or resting potential)
self.V_T = -50.4; #[mV] - excitability threshold
self.Delta_T = 2; #[mV] - excitability slope
self.Tarp = 2.; #[ms] - absolute refractory period
#--------------------------------------------------------------------------
self.tau_w = 144; #[ms] - decay time constant for adaptation variable
self.a = 4; #[nS] - voltage-dependence of adaptation variable
self.b = 0.0805; #[nA] - spike-dependence of adaptation variable
#--------------------------------------------------------------------------
self.mu = 200.; # Mean of the synaptic background current
self.sigma = 400.; # Stddev of the syn. backgr. current (e.g., 2 * mu)
self.taux = 5.; # Autocorrelation time length [ms]
self.t1 = self.mdt / self.taux; # Temp. var.for convenience - refer to eqs.
self.t2 = sqrt(2.*self.t1); # Temp. var.for convenience - refer to eqs.
#--------------------------------------------------------------------------
def should_rotate(self, orientation):
return True
def draw(self):
global yy, tt
background(0., 0., 0.)
fill(0.6,0,0)
stroke(0.6,0,0)
stroke_weight(3)
#---------------------------------------------------------------------------------------
# Main simulation cycle, repeated as many are the horizontal points on scren
for kk in range(int(self.size.w)):
# Iteratively update the equation for the noisy external cu
self.In += (self.mu - self.In) * self.t1 + self.sigma * self.t2 * gauss(0,1);
if self.u==self.th: # threshold
self.u = self.E_L;
self.w += self.b;
self.t0 = self.t;
line(kk,0.1 * self.size.h,kk,self.size.h)
tmp = sound.play_effect('Drums_02', 100, 20)
#sound.stop_effect(tmp)
else:
if (abs(self.t-self.t0) >= self.Tarp):
udot = self.mdt/self.C*(-self.g_L*(self.u-self.E_L) + self.g_L*self.Delta_T*exp((self.u-self.V_T)/self.Delta_T) - self.w + self.In + self.Iext);
if ((self.u + udot) > self.th):
self.u = self.th
else:
self.u += udot
else:
self.u = self.E_L;
wdot = self.mdt/self.tau_w*(self.a*(self.u-self.E_L) - self.w);
self.w += wdot;
self.t += self.mdt;
ud = (self.u - self.umin)/(self.umax - self.umin) * self.size.h * 0.9 + 0.1 * self.size.h
if (fmod(kk,2)==0):
ellipse(kk, ud, 2, 2)
#------------------------------------------------------------------------------------------
t = localtime() # current time probed, in the structure "t"
minute = t.tm_min # minutes
second = t.tm_sec # seconds
hour = t.tm_hour # hours
#Format the elapsed time (dt):
s = '%02d:%02d.%02d' % (hour, minute, second)
#Determine overall size for centering:
w, h = 0.0, self.numbers['0'][1].h
for c in s:
size = self.numbers[c][1]
w += size.w
#Draw the digits:
x = int(self.size.w * 0.5 - w * 0.5)
y = int(self.size.h * 0.5 - h * 0.5)
for c in s:
img, size = self.numbers[c]
image(img, x, y, size.w, size.h)
x += size.w
#Format the real-time index:
# self.dt : time in seconds elapsed since the last "draw" operation
tmp1 = (0.001 * self.mdt * self.size.w) # simulated seconds per frame
tmp2 = tmp1 / self.dt
s = '%02f' % tmp2
#Determine overall size for centering:
w, h = 0.0, self.numbers_small['0'][1].h
for c in s:
size = self.numbers_small[c][1]
w += size.w
#Draw the digits:
x = int(self.size.w * 0.5 - w * 0.5)
y = int(self.size.h * 0.75 - h * 0.5)
for c in s:
img, size = self.numbers_small[c]
image(img, x, y, size.w, size.h)
x += size.w
def touch_began(self, touch):
self.Iext = 200.;
def touch_ended(self, touch):
self.Iext = 0.;
#Run the scene that we just defined (10 frames/sec --> "6")
run(iNeuron(),orientation=DEFAULT_ORIENTATION, frame_interval=6, anti_alias=False)
# 1: 60
# 2: 30
# 3: 20
# 4: 15
# 5: 12
# 6: 10
# 7: 60/7
# 8: 60/8
| gpl-2.0 | -2,123,712,184,735,349,500 | 35.072289 | 149 | 0.550434 | false | 2.825861 | false | false | false |
BirkbeckCTP/janeway | src/core/homepage_elements/featured/plugin_settings.py | 1 | 1922 | from django.db.utils import OperationalError
from django.contrib.contenttypes.models import ContentType
PLUGIN_NAME = 'Featured Articles'
DESCRIPTION = 'This is a homepage element that renders featured articles.'
AUTHOR = 'Martin Paul Eve'
def install():
import core.models as core_models
import journal.models as journal_models
import press.models as press_models
# check whether this homepage element has already been installed for all journals
journals = journal_models.Journal.objects.all()
for journal in journals:
content_type = ContentType.objects.get_for_model(journal)
element, created = core_models.HomepageElement.objects.get_or_create(
name=PLUGIN_NAME,
configure_url='featured_articles_setup',
template_path='journal/homepage_elements/featured.html',
content_type=content_type,
object_id=journal.pk,
has_config=True)
element.save()
presses = press_models.Press.objects.all()
for press in presses:
content_type = ContentType.objects.get_for_model(press)
element, created = core_models.HomepageElement.objects.get_or_create(
name=PLUGIN_NAME,
configure_url='featured_articles_setup',
template_path='journal/homepage_elements/featured.html',
content_type=content_type,
object_id=press.pk,
has_config=True)
element.save()
def hook_registry():
try:
install()
return {
'yield_homepage_element_context': {
'module': 'core.homepage_elements.featured.hooks',
'function': 'yield_homepage_element_context',
'name': PLUGIN_NAME,
}
}
except OperationalError:
# if we get here the database hasn't yet been created
return {}
except BaseException:
return {}
| agpl-3.0 | -9,114,365,655,021,099,000 | 32.137931 | 85 | 0.640999 | false | 4.280624 | false | false | false |
jcaraballo17/secret-webpage | woodypage/settings/base.py | 1 | 2559 | """
Django settings for woodypage project.
To setup the settings json file:
1. rename settings_template.json to settings.json
2. write all the necessary data in it
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import json
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
try:
with open(os.path.join(BASE_DIR, 'settings', 'settings.json')) as data_file:
data = json.load(data_file)
except IOError:
print("You need to setup the settings data file (see instructions in base.py file.)")
SECRET_KEY = data["secret_key"]
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
DEPLOYED = False
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_unused_media',
'adminsortable2',
'imagekit',
'paintings',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'woodypage.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'woodypage.wsgi.application'
# Database
DATABASES = {}
for database in data['databases']:
DATABASES[database['name']] = {
'ENGINE': database['engine'],
'NAME': database['schema'],
'USER': database['user'],
'PASSWORD': database['password'],
'HOST': database['host'],
'PORT': database['port'],
'OPTIONS': database['options']
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
MEDIA_ROOT = ''
MEDIA_URL = ''
| apache-2.0 | 8,229,514,066,246,645,000 | 24.088235 | 89 | 0.65338 | false | 3.671449 | false | false | false |
systers/postorius | src/postorius/templatetags/nav_helpers.py | 1 | 1532 | # -*- coding: utf-8 -*-
# Copyright (C) 1998-2018 by the Free Software Foundation, Inc.
#
# This file is part of Postorius.
#
# Postorius is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# Postorius is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# Postorius. If not, see <http://www.gnu.org/licenses/>.
from django import template
register = template.Library()
@register.inclusion_tag('postorius/menu/list_nav.html', takes_context=True)
def list_nav(context, current, title='', subtitle=''):
return dict(list=context['list'],
current=current,
user=context['request'].user,
title=title, subtitle=subtitle)
@register.inclusion_tag('postorius/menu/user_nav.html', takes_context=True)
def user_nav(context, current, title='', subtitle=''):
return dict(current=current,
user=context['request'].user,
title=title, subtitle=subtitle)
@register.simple_tag(takes_context=True)
def nav_active_class(context, current, view_name):
if current == view_name:
return 'active'
return ''
| gpl-3.0 | -1,302,206,280,047,807,700 | 33.044444 | 78 | 0.698433 | false | 3.878481 | false | false | false |
michaelpacer/scikit-image | doc/examples/plot_equalize.py | 18 | 2786 | """
======================
Histogram Equalization
======================
This examples enhances an image with low contrast, using a method called
*histogram equalization*, which "spreads out the most frequent intensity
values" in an image [1]_. The equalized image has a roughly linear cumulative
distribution function.
While histogram equalization has the advantage that it requires no parameters,
it sometimes yields unnatural looking images. An alternative method is
*contrast stretching*, where the image is rescaled to include all intensities
that fall within the 2nd and 98th percentiles [2]_.
.. [1] http://en.wikipedia.org/wiki/Histogram_equalization
.. [2] http://homepages.inf.ed.ac.uk/rbf/HIPR2/stretch.htm
"""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from skimage import data, img_as_float
from skimage import exposure
matplotlib.rcParams['font.size'] = 8
def plot_img_and_hist(img, axes, bins=256):
"""Plot an image along with its histogram and cumulative histogram.
"""
img = img_as_float(img)
ax_img, ax_hist = axes
ax_cdf = ax_hist.twinx()
# Display image
ax_img.imshow(img, cmap=plt.cm.gray)
ax_img.set_axis_off()
# Display histogram
ax_hist.hist(img.ravel(), bins=bins, histtype='step', color='black')
ax_hist.ticklabel_format(axis='y', style='scientific', scilimits=(0, 0))
ax_hist.set_xlabel('Pixel intensity')
ax_hist.set_xlim(0, 1)
ax_hist.set_yticks([])
# Display cumulative distribution
img_cdf, bins = exposure.cumulative_distribution(img, bins)
ax_cdf.plot(bins, img_cdf, 'r')
ax_cdf.set_yticks([])
return ax_img, ax_hist, ax_cdf
# Load an example image
img = data.moon()
# Contrast stretching
p2, p98 = np.percentile(img, (2, 98))
img_rescale = exposure.rescale_intensity(img, in_range=(p2, p98))
# Equalization
img_eq = exposure.equalize_hist(img)
# Adaptive Equalization
img_adapteq = exposure.equalize_adapthist(img, clip_limit=0.03)
# Display results
fig, axes = plt.subplots(nrows=2, ncols=4, figsize=(8, 5))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img, axes[:, 0])
ax_img.set_title('Low contrast image')
y_min, y_max = ax_hist.get_ylim()
ax_hist.set_ylabel('Number of pixels')
ax_hist.set_yticks(np.linspace(0, y_max, 5))
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_rescale, axes[:, 1])
ax_img.set_title('Contrast stretching')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_eq, axes[:, 2])
ax_img.set_title('Histogram equalization')
ax_img, ax_hist, ax_cdf = plot_img_and_hist(img_adapteq, axes[:, 3])
ax_img.set_title('Adaptive equalization')
ax_cdf.set_ylabel('Fraction of total intensity')
ax_cdf.set_yticks(np.linspace(0, 1, 5))
# prevent overlap of y-axis labels
fig.subplots_adjust(wspace=0.4)
plt.show()
| bsd-3-clause | -7,633,942,980,547,687,000 | 28.020833 | 78 | 0.700646 | false | 3.002155 | false | false | false |
ardekantur/pyglet | experimental/swigtypes/parse.py | 28 | 12636 | #!/usr/bin/env python
'''
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import gzip
import cPickle as marshal
import optparse
import os
import sys
import xml.sax
def parse_type(type_string):
'''Get a tuple of the type components for a SWIG-formatted type.
For example, given the type "p.f(p.struct _XExtData).int",
return ('int', ('f', ('struct _XExtData', 'p'),), 'p')
Qualifiers are ignored (removed).
'''
# Scan the type string left-to-right
buf = ''
stack = [()]
def flush(): # buf = flush()
if buf:
stack[-1] = stack[-1] + (buf,)
return ''
def push():
stack.append(())
def pop():
item = finalize(stack.pop())
if item is not None:
stack[-1] = stack[-1] + (item,)
def finalize(item):
assert type(item) is tuple
if not item:
# Empty tuple is dropped (empty param list)
return
elif item[0] == 'q':
# Discard qualifiers
return
# Reverse (puts pointers at end)
item = item[::-1]
# Reverse arguments of function
if item[-1] == 'f':
item = item[::-1]
# Empty out (void) param list
if item == ('f', ('void',)):
item = ('f',)
# Varargs encoding
elif item[-1] == 'v':
item = '...'
# Array encoding
elif item[-1] == 'a':
try:
item = ('a',) + tuple(int(j[0]) for j in item[-2::-1])
except (TypeError, ValueError):
# TODO arrays of dimension given by sizeof expression
item = ('a', 0)
# Remove one level of indirection for function types (CFUNCTYPE is
# already a pointer)
off = 0
for i, j in enumerate(item):
if type(j) is tuple and j and j[0] == 'f':
item = item[:i+1+off] + item[i+2+off:]
off -= 1
return item
for c in type_string:
if c == '.':
buf = flush()
elif c == '(':
push() # Push param list
buf = flush()
push() # Push item
elif c == ',':
buf = flush()
pop() # Pop item
push() # Push item
elif c == ')':
buf = flush()
pop() # Pop item
pop() # Pop param list
else:
buf += c
flush()
type_tuple = finalize(stack[0])
return type_tuple
class SwigInterfaceHandler(object):
def __init__(self):
self.name = None
self.cdecls = []
self.constants = []
def attribute(self, attrs):
if attrs['name'] == 'name':
self.name = str(attrs['value'])
def typemap(self, attrs):
return IgnoreElementHandler()
def cdecl(self, attrs):
handler = CDeclHandler(attrs)
self.cdecls.append(handler)
return handler
def constant(self, attrs):
handler = ConstantHandler(attrs)
self.constants.append(handler)
return handler
def class_(self, attrs):
handler = ClassHandler(attrs)
self.cdecls.append(handler)
return handler
def classforward(self, attrs):
handler = ClassForwardHandler(attrs)
self.cdecls.append(handler)
return handler
def enum(self, attrs):
handler = EnumHandler(attrs)
self.cdecls.append(handler)
return handler
def get_map(self):
map = {}
for cdecl in self.cdecls:
# ('typedef', type)
if cdecl.kind == 'typedef':
map[cdecl.name] = (cdecl.kind, cdecl.get_type(with_decl=True))
# ('enum', items)
elif cdecl.kind == 'enum':
enum = (cdecl.kind, cdecl.get_items())
map[cdecl.kind + ' ' + cdecl.name] = enum
map[cdecl.get_tdname()] = enum
# ('struct', variables)
# ('union', variables)
elif cdecl.kind in ('struct', 'union'):
class_ = (cdecl.kind, cdecl.get_variables())
map[cdecl.kind + ' ' + cdecl.name] = class_
map[cdecl.get_tdname()] = class_
# ('function', type)
elif cdecl.kind == 'function':
map[cdecl.name] = (cdecl.kind, cdecl.get_type(with_decl=True))
# ('variable', type)
elif cdecl.kind == 'variable':
map[cdecl.name] = (cdecl.kind, cdecl.get_type())
else:
assert False, (cdecl.kind, cdecl.type, cdecl.name)
# Constants: ('constant', value)
for constant in self.constants:
map[constant.name] = ('constant', constant.get_value())
import pprint
pprint.pprint(map)
return map
class IgnoreElementHandler(object):
pass
class ConstantHandler(object):
name = None
value = None
type = None
def __init__(self, attrs):
pass
def attribute(self, attrs):
name = attrs['name']
if name == 'name':
self.name = str(attrs['value'])
elif name == 'value':
self.value = str(attrs['value'])
elif name == 'type':
self.type = str(attrs['value'])
def get_value(self):
if self.type in ('int', 'long'):
# Yes, ugly and bad -- most C int constants can also be
# parsed as Python expressions; e.g. "1L << 8".
return int(eval(self.value))
return self.value
class EnumHandler(object):
name = None
tdname = None
kind = 'enum'
unnamed = False
def __init__(self, attrs):
self.items = []
def attribute(self, attrs):
name = attrs['name']
if name == 'name' and not self.unnamed:
self.name = str(attrs['value'])
elif name == 'unnamed':
self.name = str(attrs['value'])
self.unnamed = True
elif name == 'tdname':
self.tdname = str(attrs['value'])
def enumitem(self, attrs):
handler = EnumItemHandler(attrs)
self.items.append(handler)
return handler
def get_items(self):
items = []
index = 0
for item in self.items:
try:
# TODO parse enumvalueex properly
index = int(item.value)
except ValueError:
index += 1
items.append((item.name, index))
return tuple(items)
def get_tdname(self):
if self.tdname:
return self.tdname
else:
return self.name
class EnumItemHandler(object):
name = None
value = None
type = None
def __init__(self, attrs):
pass
def attribute(self, attrs):
name = attrs['name']
if name == 'name':
self.name = str(attrs['value'])
elif name == 'unnamed':
self.name = str(attrs['value'])
elif name == 'enumvalueex':
self.value = str(attrs['value'])
elif name == 'type':
self.type = str(attrs['value'])
def get_value(self):
if self.type in ('int', 'long'):
# Yes, ugly and bad -- most C int constants can also be
# parsed as Python expressions; e.g. "1L << 8".
return int(eval(self.value))
return self.value
class CDeclHandler(object):
name = None
kind = None
type = None
decl = ''
params = None
def __init__(self, attrs):
pass
def attribute(self, attrs):
name = attrs['name']
if name == 'name':
self.name = str(attrs['value'])
elif name == 'kind':
self.kind = str(attrs['value'])
elif name == 'type':
self.type = str(attrs['value'])
elif name == 'decl':
self.decl = str(attrs['value'])
def parmlist(self, attrs):
self.params = []
handler = ParmListHandler(attrs, self.params)
return handler
def get_params(self):
# (type, ...)
if self.params is None:
return None
return tuple(p.get_type() for p in self.params)
def get_type(self, with_decl=False):
if with_decl:
return parse_type(self.decl + self.type)
else:
return parse_type(self.type)
def __str__(self):
if self.params:
return self.name + \
'(' + ', '.join(map(str, self.params)) + ') : ' + self.type
else:
return self.name + ' : ' + self.type
class ParmListHandler(object):
def __init__(self, attrs, params):
self.params = params
def parm(self, attrs):
param = ParmHandler(attrs)
self.params.append(param)
return param
class ParmHandler(object):
name = ''
type = None
def __init__(self, attrs):
pass
def attribute(self, attrs):
name = attrs['name']
if name == 'name':
self.name = str(attrs['value'])
elif name == 'type':
self.type = str(attrs['value'])
def get_type(self):
return parse_type(self.type)
def __str__(self):
return self.name + ' : ' + self.type
class ClassHandler(object):
name = ''
kind = None
tdname = None
unnamed = False
def __init__(self, attrs):
self.cdecls = []
def attribute(self, attrs):
name = attrs['name']
if name == 'name' and not self.unnamed:
self.name = str(attrs['value'])
elif name == 'unnamed':
self.name = str(attrs['value'])
self.unnamed = True
elif name == 'kind':
self.kind = str(attrs['value'])
assert self.kind in ('struct', 'union'), self.kind
elif name == 'tdname':
self.tdname = str(attrs['value'])
def cdecl(self, attrs):
handler = CDeclHandler(attrs)
self.cdecls.append(handler)
return handler
def get_variables(self):
# ((name, type), ...)
return tuple((cdecl.name, cdecl.get_type(with_decl=True))
for cdecl in self.cdecls if cdecl.kind == 'variable')
def get_tdname(self):
if self.tdname:
return self.tdname
else:
return self.name
class ClassForwardHandler(object):
name = ''
kind = None
tdname = None
def __init__(self, attrs):
pass
def attribute(self, attrs):
name = attrs['name']
if name == 'name':
self.name = str(attrs['value'])
elif name == 'kind':
self.kind = str(attrs['value'])
assert self.kind in ('struct', 'union'), self.kind
elif name == 'tdname':
self.tdname = str(attrs['value'])
def get_variables(self):
return ()
def get_tdname(self):
if self.tdname:
return self.tdname
else:
return self.name
class FFIContentHandler(xml.sax.handler.ContentHandler):
def __init__(self):
self.swig_interface_handler = SwigInterfaceHandler()
self.stack = [self.swig_interface_handler]
def startElement(self, name, attrs):
if name == 'class':
name = 'class_'
top = self.stack[-1]
func = getattr(top, name, None)
if func:
self.stack.append(func(attrs))
else:
self.stack.append(top)
def endElement(self, name):
del self.stack[-1]
class KeepGoingErrorHandler(xml.sax.handler.ErrorHandler):
def error(self, exception):
print exception
def fatalError(self, exception):
print exception
def parse(xml_filename, output_filename):
handler = FFIContentHandler()
error_handler = KeepGoingErrorHandler()
xml.sax.parse(xml_filename, handler, error_handler)
map = handler.swig_interface_handler.get_map()
data = marshal.dumps(map)
output_file = gzip.open(output_filename, 'w')
output_file.write(data)
output_file.close()
if __name__ == '__main__':
usage = 'usage: %prog [options] <module.xml>'
op = optparse.OptionParser(usage=usage)
op.add_option('-o', '--output')
(options, args) = op.parse_args(sys.argv[1:])
if len(args) < 1:
print >> sys.stderr, 'No input file given'
sys.exit(1)
xml_filename = args[0]
module_name, _ = os.path.splitext(os.path.basename(xml_filename))
ffi_filename = module_name + '.ffi'
parse(xml_filename, ffi_filename)
| bsd-3-clause | 9,217,239,663,594,359,000 | 26.649891 | 78 | 0.517965 | false | 3.95493 | false | false | false |
mbr0wn/gnuradio | gr-uhd/python/uhd/__init__.py | 6 | 3117 | #
# Copyright 2010-2012 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
'''
Provides source and sink blocks to interface with the UHD library.
Used to send and receive data between the Ettus Research, LLC product
line.
'''
########################################################################
# Prepare uhd swig module to make it more pythonic
########################################################################
def _prepare_uhd_python():
try:
from . import uhd_python
except ImportError:
import os
dirname, filename = os.path.split(os.path.abspath(__file__))
__path__.append(os.path.join(dirname, "bindings"))
from . import uhd_python
#some useful typedefs for the user
setattr(uhd_python, 'freq_range_t', uhd_python.meta_range_t)
setattr(uhd_python, 'gain_range_t', uhd_python.meta_range_t)
#Make the python tune request object inherit from float
#so that it can be passed in GRC as a frequency parameter.
#The type checking in GRC will accept the tune request.
#Also use kwargs to construct individual struct elements.
class tune_request_t(uhd_python.tune_request_t):
# def __new__(self, *args, **kwargs): return float.__new__(self)
def __float__(self): return self.target_freq
def __init__(self, *args, **kwargs):
super().__init__(*args)
for key, val in list(kwargs.items()): setattr(self, key, val)
setattr(uhd_python, 'tune_request_t', tune_request_t)
#handle general things on all uhd_python attributes
#Install the __str__ and __repr__ handlers if applicable
#Create aliases for uhd swig attributes to avoid the "_t"
for attr in dir(uhd_python):
myobj = getattr(uhd_python, attr)
if hasattr(myobj, 'to_string'): myobj.__repr__ = lambda o: o.to_string().strip()
if hasattr(myobj, 'to_pp_string'): myobj.__str__ = lambda o: o.to_pp_string().strip()
if hasattr(myobj, 'to_bool'): myobj.__nonzero__ = lambda o: o.to_bool()
if hasattr(myobj, 'to_int'): myobj.__int__ = lambda o: o.to_int()
if hasattr(myobj, 'to_real'): myobj.__float__ = lambda o: o.to_real()
if attr.endswith('_t'): setattr(uhd_python, attr[:-2], myobj)
#make a new find devices that casts everything with the pythonized device_addr_t which has __str__
def find_devices(*args, **kwargs):
def to_pythonized_dev_addr(dev_addr):
new_dev_addr = uhd_python.device_addr_t()
for key in list(dev_addr.keys()): new_dev_addr[key] = dev_addr.get(key)
return new_dev_addr
return __builtins__['map'](to_pythonized_dev_addr, uhd_python.find_devices_raw(*args, **kwargs))
setattr(uhd_python, 'find_devices', find_devices)
########################################################################
# Initialize this module with the contents of uhd pybind
########################################################################
_prepare_uhd_python()
from .uhd_python import *
| gpl-3.0 | 3,382,835,436,989,460,500 | 44.173913 | 104 | 0.580045 | false | 3.724014 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.