repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
vrutkovs/github-review-dashboard | github_review_dashboard/github_reviews.py | 1 | 8004 | # -*- coding: utf-8 -*-
import dateutil.parser
import datetime
import os
import logging
import sys
from dateutil.tz import tzutc
from github_client import GithubClient
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(name)-25s: %(filename)s:%(lineno)-3d: %(message)s')
ch.setFormatter(formatter)
root.addHandler(ch)
logger = logging.getLogger('github_reviews')
NEVER = datetime.datetime(1970, 1, 1, 0, 0, 0, tzinfo=tzutc())
TOKEN = None
if 'TOKEN' in os.environ:
TOKEN = os.environ['TOKEN']
else:
print("Auth token not found, "
"please create a new token at Settings - 'Personal access tokens' "
"and set TOKEN env var")
def get_prs(client, user):
logger.debug("get_prs for user {}".format(user))
raw_prs = client.get_involved_pull_requests(user)
# Sort PRs by date - most likely the newest were not reviewed
sorted_prs = sorted(raw_prs, key=lambda x: dateutil.parser.parse(x['updated_at']), reverse=True)
pr_links = [x['html_url'] for x in sorted_prs]
logger.debug("pr_links: {}".format(pr_links))
for pr_link in pr_links:
owner, repo, number = GithubClient.get_pr_info_from_link(pr_link)
logger.debug("pr_links {}, owner {}, repo {}, number {}".format(
pr_link, owner, repo, number))
pr_reviews_raw = client.get_pr_reviews(owner, repo, number)
yield (pr_link, owner, repo, number, pr_reviews_raw)
def get_pr_reviews(pr_reviews_raw):
logger.debug("get_pr_reviews")
review_results = {}
pr_reviews_sorted = sorted(pr_reviews_raw,
key=lambda x: dateutil.parser.parse(x['submitted_at']))
for pr_review in pr_reviews_sorted:
user = pr_review['user']['login']
logger.debug("pr for user {} with state {}".format(
user, pr_review['state']))
# Don't replace approved/changes_required with 'commented'
# Github API quirk probably
existing_review = review_results.get(user, {}).get('state')
logger.debug("pr state {}".format(existing_review))
if existing_review in ['APPROVED', 'CHANGES_REQUESTED'] and \
pr_review['state'] == 'COMMENTED':
continue
review_results[user] = {
'state': pr_review['state'],
'date': dateutil.parser.parse(pr_review['submitted_at'])
}
logger.debug(review_results)
return review_results
def get_pr_review_requests(client, owner, repo, number):
requests_raw = client.get_pr_review_requests(owner, repo, number)
return [x['login'] for x in requests_raw]
def get_pr_comments(client, owner, repo, number):
comments = []
comments_raw = client.get_pr_comments(owner, repo, number)
for comment in comments_raw:
comments.append({
'user': comment['user']['login'],
'text': comment['body'],
'date': dateutil.parser.parse(comment['created_at'])
})
return comments
def get_pr_commits(client, owner, repo, number):
commits = []
commits_raw = client.get_pr_commits(owner, repo, number)
for commit in commits_raw:
commits.append({
'hash': commit['sha'][:8],
'message': commit['commit']['message'].split('\n')[0],
'user': commit['commit']['author']['name'],
'user_email': commit['commit']['author']['email'],
'date': dateutil.parser.parse(commit['commit']['author']['date'])
})
return commits
def prepare_report(user):
client = GithubClient(token=TOKEN)
return (client, get_prs(client, user))
def make_report(user, client, prs_with_reviews):
total_prs = None
for i, pr_data in enumerate(prs_with_reviews):
if not total_prs:
total_prs = client.total_count
progress = int(((i+1) / total_prs) * 100)
yield {'progress': progress}
pr_link, owner, repo, number, pr_reviews_raw = pr_data
logger.debug("PR {}".format(pr_link))
pr_info_raw = client.get_pr(owner, repo, number)
review_requested_from_users = get_pr_review_requests(client, owner, repo, number)
review_results = get_pr_reviews(pr_reviews_raw)
comments = get_pr_comments(client, owner, repo, number)
commits = get_pr_commits(client, owner, repo, number)
report_entry = {
'pr_link': pr_link,
'owner': owner,
'repo': repo,
'pr_number': number,
'pr_title': pr_info_raw['title'],
'pr_owner': pr_info_raw['user']['login'],
'pr_reviews': {},
'new_comments': [],
'new_commits': []
}
# If user was explicitely requested to review it - show it
user_was_requested_to_review = user in review_requested_from_users
# Print others review state
for pr_reviewer in review_results:
pr_review_result = review_results[pr_reviewer]['state']
report_entry['pr_reviews'][pr_reviewer] = pr_review_result
# Add requests from other users unless there is a review set by them there already
for pr_review_request in review_requested_from_users:
if pr_review_request not in report_entry['pr_reviews'].keys():
report_entry['pr_reviews'][pr_review_request] = 'REVIEW_REQUESTED'
last_user_review_date = review_results.get(user, {}).get('date') or NEVER
# Find last user comment
user_comments = filter(lambda x: x['user'] == user, comments)
sorted_user_comments = sorted(user_comments, key=lambda x: x['date'])
last_user_comment_date = sorted_user_comments[-1]['date'] if sorted_user_comments else NEVER
logger.debug("last_user_comment_date {}".format(last_user_comment_date))
# Get user email so we could filter out new commits by this user
user_info_raw = client.get_user_info(user)
user_email = user_info_raw['email']
user_commits = filter(lambda x: x['user_email'] == user_email, commits)
sorted_user_commits = sorted(user_commits, key=lambda x: x['date'])
last_user_commit_date = sorted_user_commits[-1]['date'] if sorted_user_commits else NEVER
logger.debug("last_user_commit_date {}".format(last_user_commit_date))
# If last activity date cannot be found the PR should be skipped
if not user_was_requested_to_review and \
last_user_comment_date == NEVER and \
last_user_review_date == NEVER and \
last_user_commit_date == NEVER:
continue
last_user_activity = max([
last_user_comment_date,
last_user_review_date,
last_user_commit_date
])
logger.debug("last_user_activity {}".format(last_user_activity))
# Collect new comments since last user activity
new_comments = [x for x in comments if x['date'] > last_user_activity]
for comment in new_comments:
report_entry['new_comments'].append({
'date': comment['date'],
'user': comment['user'],
'text': comment['text']
})
logger.debug("new_comments {}".format(new_comments))
# Collect new commits since last activity
new_commits = [x for x in commits
if x['date'] > last_user_activity]
for commit in new_commits:
report_entry['new_commits'].append({
'hash': commit['hash'],
'user': commit['user'],
'message': commit['message'],
'date': commit['date']
})
logger.debug("new_commits {}".format(new_commits))
# Skip PR if no new comments/commits available
if user_was_requested_to_review or \
report_entry['new_comments'] or \
report_entry['new_commits']:
yield report_entry
| mit | 9,067,402,440,644,792,000 | 36.227907 | 100 | 0.600575 | false | 3.775472 | false | false | false |
barry-scott/git-workbench | WebSite/build-web-site.py | 2 | 3322 | #!/usr/bin/env python3
import sys
import os
import pathlib
import shutil
def copyFile( src, dst_dir ):
dst = dst_dir / src.name
shutil.copy( str( src ), str( dst ) )
if len(sys.argv) < 3:
print( 'Usage: %s <version> <kits-folder> [--test] [--install]' % (sys.argv[0],) )
print( ' %s 0.9.3 /shared/Downloads/ScmWorkbench/0.9.3' % (sys.argv[0],) )
version = sys.argv[1]
built_kits_dir = pathlib.Path( sys.argv[2] )
testing = '--test' in sys.argv[3:]
install = '--install' in sys.argv[3:]
# source paths
builder_top_dir = pathlib.Path( os.environ['BUILDER_TOP_DIR'] )
src_dir = builder_top_dir / 'Source'
docs_dir = builder_top_dir / 'Docs'
web_site_dir = builder_top_dir / 'WebSite'
root_dir = web_site_dir / 'root'
docs_files_dir = docs_dir / 'scm-workbench_files'
# output paths
output_dir = pathlib.Path( 'tmp' )
output_kits_dir = output_dir / 'kits'
output_user_guide_dir = output_dir / 'user-guide'
output_user_guide_files_dir = output_dir / 'user-guide' / 'scm-workbench_files'
shutil.rmtree( str( output_dir ) )
output_dir.mkdir( parents=True, exist_ok=True )
output_kits_dir.mkdir( parents=True, exist_ok=True )
for src in root_dir.glob( '*.html' ):
copyFile( src, output_dir )
# use the user guide's CSS
copyFile( docs_dir / 'scm-workbench.css', output_dir )
rc = os.system( '"%s/build-docs.py" "%s"' % (docs_dir, output_user_guide_dir) )
if rc != 0:
print( 'build docs failed' )
sys.exit( 1 )
# copy doc images
output_user_guide_files_dir.mkdir( parents=True, exist_ok=True )
for src in docs_files_dir.glob( '*.png' ):
copyFile( src, output_user_guide_files_dir )
kit_values = {
'VERSION': version,
}
for kit_fmt in ('SCM-Workbench-%(VERSION)s-setup.exe',
'SCM-Workbench-%(VERSION)s.dmg'):
copyFile( built_kits_dir / (kit_fmt % kit_values), output_kits_dir )
with open( str( output_dir / 'index.html' ), encoding='utf-8' ) as f:
index = f.read()
with open( str( output_dir / 'index.html' ), 'w', encoding='utf-8' ) as f:
f.write( index % kit_values )
if testing:
index = output_dir / 'index.html'
if sys.platform == 'win32':
import ctypes
SW_SHOWNORMAL = 1
ShellExecuteW = ctypes.windll.shell32.ShellExecuteW
rc = ShellExecuteW( None, 'open', str(index), None, None, SW_SHOWNORMAL )
elif sys.platform == 'darwin':
cmd = '/usr/bin/open'
os.spawnvp( os.P_NOWAIT, cmd, [cmd, str(index)] )
else:
cmd = '/usr/bin/xdg-open'
os.spawnvp( os.P_NOWAIT, cmd, [cmd, str(index)] )
print( 'Web Site created in %s for version %s' % (output_dir, version) )
if install:
web_root = '/var/www/scm-workbench.barrys-emacs.org'
os.system( 'ssh [email protected] mkdir -p %s/kits' % (web_root,) )
os.system( 'ssh [email protected] mkdir -p %s/user-guide/scm-workbench_files' % (web_root,) )
os.system( 'scp tmp/index.html tmp/scm-workbench.css [email protected]:%s/' % (web_root,) )
os.system( 'scp tmp/kits/* [email protected]:%s/kits/' % (web_root,) )
os.system( 'scp tmp/user-guide/* [email protected]:%s/user-guide/' % (web_root,) )
os.system( 'scp tmp/user-guide/scm-workbench_files/* [email protected]:%s/user-guide/scm-workbench_files' % (web_root,) )
os.system( 'ssh [email protected] chmod -R -v a+r %s' % (web_root,) )
sys.exit( 0 )
| apache-2.0 | -2,889,871,900,023,997,000 | 33.247423 | 123 | 0.631547 | false | 2.661859 | false | false | false |
kamillus/MediaServer | client/settings.py | 1 | 3678 | import os
import json
import socket
class Settings(object):
protocol = "http"
host = "0.0.0.0" #socket.gethostbyname(socket.gethostname())
port = "1345"
user_settings_directory = "~/.MediaServer"
settings_file = "settings"
library_file = "library"
file_paths = []
library = {}
settings = {}
icon_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), "..", "icon.png"))
def __init__(self):
if not os.path.exists(self.icon_path):
self.icon_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), "../..", "icon.png"))
if not os.path.exists(self.icon_path):
self.icon_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), "icon.png"))
self.create_user_dirs()
self.get_media_library()
self.get_config_from_files()
def get_server_address(self):
return self.protocol + "://" + self.host + ":" + self.port
def create_user_dirs(self):
if not os.path.exists(self.get_settings_directory()):
os.makedirs(self.get_settings_directory())
def get_settings_directory(self):
return os.path.abspath(os.path.expanduser(self.user_settings_directory))
def get_settings_file_path(self):
return os.path.abspath(os.path.expanduser(os.path.join(self.user_settings_directory, self.settings_file)))
def get_library_file_path(self):
return os.path.abspath(os.path.expanduser(os.path.join(self.user_settings_directory, self.library_file)))
def get_config_from_files(self):
try:
f = open(self.get_settings_file_path(), 'r')
self.settings = json.loads(f.read())
f.close()
except Exception as e:
print e
self.settings = {"file_paths": [os.path.expanduser("~")]}
f = open(self.get_settings_file_path(), 'w')
f.write(json.dumps(self.settings))
f.close()
self.file_paths = self.settings["file_paths"]
def get_placeholder_image(self):
placeholder = os.path.abspath(os.path.join(os.path.dirname( __file__ ), "..", "static", "images", "lorempixel.jpg"))
if not os.path.exists(placeholder):
placeholder = os.path.abspath(os.path.join(os.path.dirname( __file__ ), "../..", "static", "images", "lorempixel.jpg"))
if not os.path.exists(placeholder):
placeholder = os.path.abspath(os.path.join(os.path.dirname( __file__ ), "static", "images", "lorempixel.jpg"))
return placeholder
def write_config_settings(self):
try:
print self.settings["file_paths"]
self.settings = {"file_paths": self.settings["file_paths"]}
f = open(self.get_settings_file_path(), 'w')
f.write(json.dumps(self.settings))
f.close()
except Exception as e:
print e
def get_media_library(self):
try:
f = open(self.get_library_file_path(), 'r')
self.library = json.loads(f.read())
f.close()
except Exception as e:
print e
self.library = {}
f = open(self.get_library_file_path(), 'w')
f.write(json.dumps(self.library))
f.close()
def write_library(self, library, index):
print "Writing to: " + self.get_library_file_path()
print index
self.library[index] = {"library": library}
f = open(self.get_library_file_path(), 'w')
f.write(json.dumps(self.library))
f.close()
| gpl-3.0 | -7,362,011,661,599,930,000 | 36.161616 | 135 | 0.56335 | false | 3.6308 | false | false | false |
vadyur/script.media.aggregator | plugin.py | 1 | 3404 | import urllib, sys
import log
def make_url(params):
url = 'plugin://script.media.aggregator/?' + urllib.urlencode(params)
return url
def get_params():
if len(sys.argv) < 3:
return None
param = dict()
paramstring = sys.argv[2]
if len(paramstring) >= 2:
params = sys.argv[2]
cleanedparams = params.replace('?', '')
if (params[len(params) - 1] == '/'):
params = params[0:len(params) - 2]
pairsofparams = cleanedparams.split('&')
param = {}
for i in range(len(pairsofparams)):
splitparams = {}
splitparams = pairsofparams[i].split('=')
if (len(splitparams)) == 2:
param[splitparams[0]] = splitparams[1]
# debug(param)
return param
def ScanMonitor():
import xbmc
class _ScanMonitor(xbmc.Monitor):
def __init__(self):
log.debug('ScanMonitor - __init__')
xbmc.Monitor.__init__(self)
self.do_exit = False
self.do_start = xbmc.getCondVisibility('Library.IsScanningVideo')
def onScanStarted(self, library):
log.debug('ScanMonitor - onScanFinished')
if library == 'video':
self.do_start = True
def onScanFinished(self, library):
log.debug('ScanMonitor - onScanFinished')
if library == 'video':
self.do_exit = True
return _ScanMonitor()
def wait_for_update(timeout=1000, monitor=None):
try:
import xbmc
log.debug('wait_for_update')
count = timeout
if not monitor:
monitor = ScanMonitor()
if not monitor.do_start:
log.debug('wait_for_update: no scan now')
del monitor
return
while not monitor.abortRequested() and count:
for i in xrange(10):
if monitor.waitForAbort(0.1) or monitor.do_exit:
log.debug('wait_for_update - Stop scan detected')
del monitor
return
if count % 10 == 1:
if not xbmc.getCondVisibility('Library.IsScanningVideo'):
log.debug('wait_for_update - Library.IsScanningVideo is False')
break
count -= 1
log.debug('wait_for_update - Library Scanning Video - wait ({}s)'.format(timeout-count))
del monitor
except BaseException:
log.print_tb()
import time
time.sleep(1)
def UpdateVideoLibrary(path=None, wait=False):
import xbmc, log
if path:
if isinstance(path,unicode):
path = path.encode('utf-8')
log.debug('UpdateLibrary: {}'.format(path))
command = 'UpdateLibrary(video, {})'.format(path)
else:
command = 'UpdateLibrary(video)'
if wait:
monitor = ScanMonitor()
while not monitor.abortRequested():
if not monitor.do_start or monitor.do_exit:
break
xbmc.sleep(100)
monitor.do_start = False
xbmc.executebuiltin(command, wait)
if wait:
log.debug('UpdateLibrary: wait for start')
while not monitor.abortRequested():
if monitor.do_start:
break
xbmc.sleep(100)
wait_for_update(monitor=monitor)
def string_to_ver(s):
import re
m = re.search(r'(\d+)\.(\d+)', s)
if m:
return ( m.group(1), m.group(2) )
def kodi_ver():
import xbmc
bv = xbmc.getInfoLabel("System.BuildVersion")
BuildVersions = string_to_ver(bv)
# import log
# log.debug(BuildVersions)
res = {}
res['major'] = int(BuildVersions[0])
res['minor'] = int(BuildVersions[1])
return res
def RunPlugin(**kwargs):
import xbmc
url = make_url(kwargs)
xbmc.executebuiltin('RunPlugin("%s")' % url)
def RunPluginSync(**kwargs):
import xbmc
url = make_url(kwargs)
xbmc.executebuiltin('RunPlugin("%s")' % url, wait=True)
if __name__ == "__main__":
r = string_to_ver('18.0 Git:20190128-d81c34c465') | gpl-3.0 | -2,530,008,572,893,542,000 | 21.401316 | 91 | 0.668919 | false | 2.96 | false | false | false |
akampjes/p0rk-crackling | p0rk/porkweb/migrations/0003_auto__del_task__add_jobtask.py | 1 | 7331 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Task'
db.delete_table(u'porkweb_task')
# Adding model 'JobTask'
db.create_table(u'porkweb_jobtask', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('job', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['porkweb.Job'])),
('taskid', self.gf('django.db.models.fields.CharField')(max_length=36)),
('taskstatus', self.gf('django.db.models.fields.CharField')(default='New', max_length=16)),
('taskresults', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'porkweb', ['JobTask'])
def backwards(self, orm):
# Adding model 'Task'
db.create_table(u'porkweb_task', (
('taskresults', self.gf('django.db.models.fields.TextField')(blank=True)),
('taskstatus', self.gf('django.db.models.fields.CharField')(default='New', max_length=16)),
('job', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['porkweb.Job'])),
('taskid', self.gf('django.db.models.fields.CharField')(max_length=36)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal(u'porkweb', ['Task'])
# Deleting model 'JobTask'
db.delete_table(u'porkweb_jobtask')
models = {
u'porkweb.attackparam': {
'Meta': {'object_name': 'AttackParam', '_ormbases': [u'porkweb.Param']},
'attack': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'params'", 'to': u"orm['porkweb.AttackType']"}),
u'param_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['porkweb.Param']", 'unique': 'True', 'primary_key': 'True'})
},
u'porkweb.attacktype': {
'Meta': {'object_name': 'AttackType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'porkweb.cracked': {
'Meta': {'object_name': 'Cracked'},
'hash': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['porkweb.Job']"}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'porkweb.hashtype': {
'Meta': {'object_name': 'HashType'},
'hashcat': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hashcatType': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'ocllite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'oclplus': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'porkweb.job': {
'Meta': {'object_name': 'Job'},
'attackType': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['porkweb.AttackType']"}),
'eta': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'finished': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'hashType': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['porkweb.HashType']"}),
'hashes': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jobServer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['porkweb.JobServer']", 'null': 'True', 'blank': 'True'}),
'progress': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'results': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'speed': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'New'", 'max_length': '16'})
},
u'porkweb.jobparam': {
'Meta': {'object_name': 'JobParam', '_ormbases': [u'porkweb.Param']},
'job': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'params'", 'to': u"orm['porkweb.Job']"}),
u'param_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['porkweb.Param']", 'unique': 'True', 'primary_key': 'True'})
},
u'porkweb.jobserver': {
'Meta': {'object_name': 'JobServer'},
'details': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipaddr': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'os': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'port': ('django.db.models.fields.IntegerField', [], {'default': '8117'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'Offline'", 'max_length': '16'})
},
u'porkweb.jobtask': {
'Meta': {'object_name': 'JobTask'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['porkweb.Job']"}),
'taskid': ('django.db.models.fields.CharField', [], {'max_length': '36'}),
'taskresults': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'taskstatus': ('django.db.models.fields.CharField', [], {'default': "'New'", 'max_length': '16'})
},
u'porkweb.log': {
'Meta': {'object_name': 'Log'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'line': ('django.db.models.fields.TextField', [], {}),
'when': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'porkweb.param': {
'Meta': {'object_name': 'Param'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '64'})
}
}
complete_apps = ['porkweb'] | bsd-3-clause | -3,612,208,758,449,123,300 | 59.595041 | 153 | 0.544128 | false | 3.502628 | false | false | false |
Samneetsingh/OutlierDetection | odt/clustering/dbscan.py | 1 | 1993 | ###############################################
## Local Outlier Factor (LOF) Implementation ##
###############################################
### Import Python Libraries ###
import pandas as pd
from pandas import DataFrame
from numpy import array, matrix
### Import R Libraries ###
import rpy2.robjects as R
from rpy2.robjects.packages import importr
from rpy2.robjects import pandas2ri
pandas2ri.activate()
base = importr("base")
utils = importr("utils")
odtpackage = importr("dbscan")
######################
## Global LOF Class ##
######################
class DBSCAN(object):
### DBSCAN Class Constructor ###
def __init__(self, xdf, epsilon, minPts):
self.xdf = xdf
self.epsilon = epsilon
self.minPts = minPts
self.label = []
self.cluster = []
### [TODO:] Implement Nromalization functionality ###
def normalizeData(self):
pass
### DBSCAN clustering estimation Function ###
def DBSCAN(self, xdf):
if len(xdf) > 100000:
print "Warning! DBSCAN might fail for large dataset."
rdf = pandas2ri.py2ri(xdf)
return odtpackage.dbscan(base.as_matrix(rdf), self.epsilon, self.minPts)
### DBSCAN Execution Function ###
def getOutlier(self):
cls = self.DBSCAN(self.xdf)
print cls
for i in array(cls.rx2('cluster')):
self.cluster.append(i)
if i == 0:
self.label.append('outlier')
else:
self.label.append('normal')
return DataFrame({'Cluster': self.cluster, 'Label': self.label})
if __name__ == "__main__":
url = '/Users/warchief/Documents/Projects/DataRepository/AnomalyDetection/test.csv'
df = DataFrame.from_csv(path=url, header=0, sep=',', index_col=False)
X = df['SL_RRC_CONN_AVG_PER_CELL'].values
Y = df['I_DL_DRB_CELL_TPUT_MBPS'].values
d = {'x': X, 'y': Y}
pdf = DataFrame(data=d)
db = DBSCAN(pdf, 0.5, 50)
print db.getOutlier()
| gpl-3.0 | -1,624,652,984,021,545,700 | 25.932432 | 87 | 0.569995 | false | 3.650183 | false | false | false |
astrocatalogs/supernovae | tasks/nedd.py | 1 | 4007 | '''Import tasks for NED-D, the galactic distances catalog.
'''
import csv
import os
from collections import OrderedDict
from html import unescape
from astrocats.catalog.utils import (get_sig_digits, is_number, pbar,
pretty_num, uniq_cdl)
from astropy import units as un
from astropy.cosmology import Planck15 as cosmo
from astropy.cosmology import z_at_value
from decimal import Decimal
from ..supernova import SUPERNOVA
from ..utils import host_clean, name_clean
def do_nedd(catalog):
task_str = catalog.get_current_task_str()
nedd_path = os.path.join(
catalog.get_current_task_repo(), 'NED26.10.1-D-13.1.0-20160930.csv')
f = open(nedd_path, 'r')
data = sorted(list(csv.reader(f, delimiter=',', quotechar='"'))[
13:], key=lambda x: (x[9], x[3]))
reference = "NED-D v" + nedd_path.split('-')[-2]
refurl = "http://ned.ipac.caltech.edu/Library/Distances/"
nedbib = "1991ASSL..171...89H"
olddistname = ''
loopcnt = 0
for r, row in enumerate(pbar(data, task_str)):
if r <= 12:
continue
distname = row[3]
name = name_clean(distname)
# distmod = row[4]
# moderr = row[5]
dist = row[6]
bibcode = unescape(row[8])
snname = name_clean(row[9])
redshift = row[10]
cleanhost = ''
if name != snname and (name + ' HOST' != snname):
cleanhost = host_clean(distname)
if cleanhost.endswith(' HOST') or cleanhost.startswith('SN'):
cleanhost = ''
if not is_number(dist):
print(dist)
if dist and cleanhost:
catalog.nedd_dict.setdefault(
cleanhost, []).append(Decimal(dist))
if snname and 'HOST' not in snname:
snname, secondarysource = catalog.new_entry(
snname, srcname=reference, bibcode=nedbib, url=refurl,
secondary=True)
if bibcode:
source = catalog.entries[snname].add_source(bibcode=bibcode)
sources = uniq_cdl([source, secondarysource])
else:
sources = secondarysource
if name == snname:
if redshift:
catalog.entries[snname].add_quantity(
SUPERNOVA.REDSHIFT, redshift, sources)
if dist:
catalog.entries[snname].add_quantity(
SUPERNOVA.COMOVING_DIST, dist, sources)
if not redshift:
try:
zatval = z_at_value(cosmo.comoving_distance,
float(dist) * un.Mpc, zmax=5.0)
sigd = get_sig_digits(str(dist))
redshift = pretty_num(zatval, sig=sigd)
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
pass
else:
cosmosource = catalog.entries[name].add_source(
bibcode='2016A&A...594A..13P')
combsources = uniq_cdl(sources.split(',') +
[cosmosource])
catalog.entries[snname].add_quantity(
SUPERNOVA.REDSHIFT, redshift, combsources,
derived=True)
if cleanhost:
catalog.entries[snname].add_quantity(
SUPERNOVA.HOST, cleanhost, sources)
if catalog.args.update and olddistname != distname:
catalog.journal_entries()
olddistname = distname
loopcnt = loopcnt + 1
if catalog.args.travis and loopcnt % catalog.TRAVIS_QUERY_LIMIT == 0:
break
catalog.journal_entries()
f.close()
return
| mit | -142,725,370,561,388,260 | 37.902913 | 79 | 0.512603 | false | 4.06802 | false | false | false |
FEniCS/uflacs | uflacs/backends/ufc/templates.py | 1 | 1188 |
import re
from ffc.backends.ufc import *
# TODO: Make cell_orientation a double +1.0|-1.0 instead of the int flag in ffc/ufc/dolfin
# TODO: Simplify ufc templates by introducing 'preamble' keyword in place of members, constructor, destructor
domain_background = """
/// This is just here to document the memory layout of the geometry data arrays
struct geometry_data
{
// Example dimensions
std::size_t gdim = 3;
std::size_t tdim = 2;
std::size_t num_points = 1;
// Memory layout of geometry data arrays
double x[num_points * gdim]; // x[i] -> x[ip*gdim + i]
double X[num_points * tdim]; // X[j] -> X[ip*tdim + j]
double J[num_points * gdim * tdim]; // J[i,j] -> J[ip*gdim*tdim + i*tdim + j]
double detJ[num_points]; // detJ -> detJ[ip]
double K[num_points * tdim * gdim]; // K[j,i] -> K[ip*tdim*gdim + j*gdim + i]
double n[num_points * gdim]; // n[i] -> n[ip*gdim + i]
// In the affine case we have the relation:
// x[i] = x0[i] + sum_j J[i,j] X[j]
// X[j] = sum_i K[j,i] (x[i] - x0[i])
};
"""
def extract_keywords(template):
r = re.compile(r"%\(([a-zA-Z0-9_]*)\)")
return set(r.findall(template))
| gpl-3.0 | 7,236,381,945,068,393,000 | 33.941176 | 109 | 0.599327 | false | 2.687783 | false | false | false |
gazally/chatbot-reply | chatbot_reply/script.py | 2 | 9930 | #! /usr/bin/env python
# Copyright (c) 2016 Gemini Lasswell
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
""" chatbot_reply.script, defines decorators and superclass for chatbot scripts
"""
from __future__ import unicode_literals
import collections
from functools import wraps
import random
import re
from chatbot_reply.six import with_metaclass
from chatbot_reply.constants import _HISTORY, _PREFIX
def rule(pattern_text, previous_reply="", weight=1):
""" decorator for rules in subclasses of Script """
def rule_decorator(func):
@wraps(func)
def func_wrapper(self, pattern=pattern_text,
previous_reply=previous_reply, weight=weight):
result = func(self)
try:
return self.process_reply(self.choose(result))
except Exception as e:
name = (func.__module__[len(_PREFIX):] + "." +
self.__class__.__name__ + "." + func.__name__)
msg = (" in @rule while processing return value "
"from {0}".format(name))
e.args = (e.args[0] + msg,) + e.args[1:]
raise
return func_wrapper
return rule_decorator
class ScriptRegistrar(type):
""" Metaclass of Script which keeps track of newly imported Script
subclasses in a list.
Public class attribute:
registry - a list of classes
Public class method:
clear - empty the registry list
"""
registry = []
def __new__(cls, name, bases, attributes):
new_cls = type.__new__(cls, name, bases, attributes)
if new_cls.__module__ != cls.__module__:
cls.registry.append(new_cls)
return new_cls
@classmethod
def clear(cls):
cls.registry = []
class Script(with_metaclass(ScriptRegistrar, object)):
"""Base class for Chatbot Engine Scripts
Classes derived from this one can be loaded by the ChatbotEngine.
Subclasses may define:
topic - This must be a class attribute, not an instance variable.
Contains a string that is inspected when the class is
imported. All rules and substitution functions in a class are
associated with a topic, and will only be used to process a user's
message when the user's topic is equal to the class
topic. Changing this value for a class after import will have no
effect. If topic is set to None, the class will not be instantiated,
so __init__, setup and setup_user will not be run. If you
want to share a lot of rules between two Script subclasses with
different topics, have them inherit them from a base class with
its topic set to None.
setup(self) - a method that may be used to define alternates (see below)
and to initialize bot variables. It will be called after each instance
of a script object is created, and may be called again if the engine
wants to reset bot variables.
setup_user(self, user) - a method that is called the first time the engine
is processing a message from a given user. This is a good place to
initialize user variables used by a script.
alternates - a dictionary of patterns. Key names must be alphanumeric and
may not begin with an underscore or number. The patterns must be simple
in that they can't contain references to variables or wildcards or
the memorization character _. When the patterns for the rules are
imported the alternates will be substituted in at import time, as
opposed to user and bot variables, which are substituted in every time
a rule is matched with a new message. If you have 20,000 rules, this
might make a performance difference, but if you have 20, it won't.
Changing self.alternates after import will have no effect on pattern
matching.
substitute(self, text, list_of_lists) - Any method name defined by a
subclass that begins with substitute will be called with the raw text
of every message (within its topic) and a list of list of words that
have been split on whitespace. It must return a list of lists of words
where the outer list is the same length. Use this to do things like
expand contractions, interpret ascii smileys such as >:| and otherwise
mess with the tokenizations. If there is more than one substitute method
for a topic, they will all be called in an unpredictable order, each
passed the output of the one before.
@rule(pattern, previous="", weight=1)
rule(self) - Methods decorated by @rule and beginning with "rule" are
the gears of the script engine. The engine will select one rule method
that matches a message and call it. The @rule decorator will run the
method's return value through first self.choose then self.process_reply.
Child classes may redefine self.choose and self.process_reply if they would
like different behavior.
choose(self, retval) - A method that returns a string. The @rule decorator
will call self.choose on the return values of all rules.
process_reply(self, string) - A method that takes a string and returns a
string. The @rule decorator will call this on the return value it gets
from self.choose.
Public instance variables that are meant to be used by child classes,
but not modified (with the exception that it's ok to change, add and
delete things in the variable dictionaries, just not the dictionary
objects themselves):
botvars - dictionary of variable names and values that are global for
all users of the chatbot engine
uservars - dictionary of variable names and values for the current user
userinfo - UserInfo object containing info about the sender
match - a Match object (see rules.py) representing the relationship between
the matched user input (and previous reply, if applicable) and the
rule's patterns
Public instance variable, ok to change in child classes:
current_topic - string giving current conversation topic, which
will limit the rule search for the next message
"""
topic = "all"
def __init__(self):
self.botvars = None
self.userinfo = None
self.match = None
@property
def uservars(self):
return self.userinfo.vars
def ct_get(self):
return self.userinfo.topic_name
def ct_set(self, newtopic):
self.userinfo.topic_name = newtopic
current_topic = property(ct_get, ct_set)
def setup(self):
""" placeholder """
pass
def setup_user(self, user):
""" placeholder """
pass
def choose(self, args):
""" Select a response from a list of possible responses. For increased
flexibility, since this is used to process all return values from all
rules, this can also be passed None or an empty string or list, in which
case it will return the empty string, or it may be passed a string,
which it will simply return.
If the argument is a list of strings, select one randomly and return it.
If the argument is a list of tuples containing a string and an integer
weight, select a string randomly with the probability of its selection
being proportional to the weight.
"""
if args is None or not args:
reply = ""
else:
reply = args
if isinstance(args, list) and args:
reply = random.choice(args)
if isinstance(args[0], tuple):
args = [(string, max(1, weight)) for string, weight in args]
total = sum([weight for string, weight in args])
choice = random.randrange(total)
for string, weight in args:
if choice < abs(weight):
reply = string
break
else:
choice -= abs(weight)
return reply
def process_reply(self, string):
""" Process a reply before returning it to the chatbot engine. The only
thing this does is use built-in string formatting to substitute in the
match results.
"""
return string.format(*[], **self.match)
class UserInfo(object):
""" A class for stashing per-user information. Public instance variables:
vars: a dictionary of variable names and values
info: a dictionary of information about the user
topic_name: the name of the topic the user is currently in
msg_history: a deque containing Targets for a few recent messages
repl_history: a deque containing Targets for a few recent replies
"""
def __init__(self, info):
self.vars = {}
self.info = info
self.topic_name = "all"
self.msg_history = collections.deque(maxlen=_HISTORY)
self.repl_history = collections.deque(maxlen=_HISTORY)
# ----- a couple of useful utility functions for writers of substitute methods
def split_on_whitespace(text):
""" Because this has to work in Py 2.6, and re.split doesn't do UNICODE
in 2.6. Return text broken into words by whitespace. """
matches = [m for m in re.finditer("[\S]+", text, flags=re.UNICODE)]
results = [text[m.span()[0]:m.span()[1]] for m in matches]
return results
def kill_non_alphanumerics(text):
"""remove any non-alphanumeric characters from a string and return the
result. re.sub doesn't do UNICODE in python 2.6.
"""
matches = [m for m in re.finditer("[\w]+", text, flags=re.UNICODE)]
result = "".join([text[m.span()[0]:m.span()[1]] for m in matches])
return result
| mpl-2.0 | 2,177,815,255,354,238,500 | 39.864198 | 80 | 0.65428 | false | 4.493213 | false | false | false |
brainstorm-ai/DIGITS | digits/model/images/generic/views.py | 1 | 21335 | # Copyright (c) 2015-2016, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
import re
import tempfile
import flask
import werkzeug.exceptions
from .forms import GenericImageModelForm
from .job import GenericImageModelJob
from digits import frameworks
from digits import utils
from digits.config import config_value
from digits.dataset import GenericImageDatasetJob
from digits.inference import ImageInferenceJob
from digits.status import Status
from digits.utils import filesystem as fs
from digits.utils.forms import fill_form_if_cloned, save_form_to_job
from digits.utils.routing import request_wants_json, job_from_request
from digits.webapp import app, scheduler
blueprint = flask.Blueprint(__name__, __name__)
@blueprint.route('/new', methods=['GET'])
@utils.auth.requires_login
def new():
"""
Return a form for a new GenericImageModelJob
"""
form = GenericImageModelForm()
form.dataset.choices = get_datasets()
form.standard_networks.choices = []
form.previous_networks.choices = get_previous_networks()
prev_network_snapshots = get_previous_network_snapshots()
## Is there a request to clone a job with ?clone=<job_id>
fill_form_if_cloned(form)
return flask.render_template('models/images/generic/new.html',
form = form,
frameworks = frameworks.get_frameworks(),
previous_network_snapshots = prev_network_snapshots,
previous_networks_fullinfo = get_previous_networks_fulldetails(),
multi_gpu = config_value('caffe_root')['multi_gpu'],
)
@blueprint.route('.json', methods=['POST'])
@blueprint.route('', methods=['POST'], strict_slashes=False)
@utils.auth.requires_login(redirect=False)
def create():
"""
Create a new GenericImageModelJob
Returns JSON when requested: {job_id,name,status} or {errors:[]}
"""
form = GenericImageModelForm()
form.dataset.choices = get_datasets()
form.standard_networks.choices = []
form.previous_networks.choices = get_previous_networks()
prev_network_snapshots = get_previous_network_snapshots()
## Is there a request to clone a job with ?clone=<job_id>
fill_form_if_cloned(form)
if not form.validate_on_submit():
if request_wants_json():
return flask.jsonify({'errors': form.errors}), 400
else:
return flask.render_template('models/images/generic/new.html',
form = form,
frameworks = frameworks.get_frameworks(),
previous_network_snapshots = prev_network_snapshots,
previous_networks_fullinfo = get_previous_networks_fulldetails(),
multi_gpu = config_value('caffe_root')['multi_gpu'],
), 400
datasetJob = scheduler.get_job(form.dataset.data)
if not datasetJob:
raise werkzeug.exceptions.BadRequest(
'Unknown dataset job_id "%s"' % form.dataset.data)
# sweeps will be a list of the the permutations of swept fields
# Get swept learning_rate
sweeps = [{'learning_rate': v} for v in form.learning_rate.data]
add_learning_rate = len(form.learning_rate.data) > 1
# Add swept batch_size
sweeps = [dict(s.items() + [('batch_size', bs)]) for bs in form.batch_size.data for s in sweeps[:]]
add_batch_size = len(form.batch_size.data) > 1
n_jobs = len(sweeps)
jobs = []
for sweep in sweeps:
# Populate the form with swept data to be used in saving and
# launching jobs.
form.learning_rate.data = sweep['learning_rate']
form.batch_size.data = sweep['batch_size']
# Augment Job Name
extra = ''
if add_learning_rate:
extra += ' learning_rate:%s' % str(form.learning_rate.data[0])
if add_batch_size:
extra += ' batch_size:%d' % form.batch_size.data[0]
job = None
try:
job = GenericImageModelJob(
username = utils.auth.get_username(),
name = form.model_name.data + extra,
dataset_id = datasetJob.id(),
)
# get framework (hard-coded to caffe for now)
fw = frameworks.get_framework_by_id(form.framework.data)
pretrained_model = None
#if form.method.data == 'standard':
if form.method.data == 'previous':
old_job = scheduler.get_job(form.previous_networks.data)
if not old_job:
raise werkzeug.exceptions.BadRequest(
'Job not found: %s' % form.previous_networks.data)
use_same_dataset = (old_job.dataset_id == job.dataset_id)
network = fw.get_network_from_previous(old_job.train_task().network, use_same_dataset)
for choice in form.previous_networks.choices:
if choice[0] == form.previous_networks.data:
epoch = float(flask.request.form['%s-snapshot' % form.previous_networks.data])
if epoch == 0:
pass
elif epoch == -1:
pretrained_model = old_job.train_task().pretrained_model
else:
for filename, e in old_job.train_task().snapshots:
if e == epoch:
pretrained_model = filename
break
if pretrained_model is None:
raise werkzeug.exceptions.BadRequest(
"For the job %s, selected pretrained_model for epoch %d is invalid!"
% (form.previous_networks.data, epoch))
if not (os.path.exists(pretrained_model)):
raise werkzeug.exceptions.BadRequest(
"Pretrained_model for the selected epoch doesn't exists. May be deleted by another user/process. Please restart the server to load the correct pretrained_model details")
break
elif form.method.data == 'custom':
network = fw.get_network_from_desc(form.custom_network.data)
pretrained_model = form.custom_network_snapshot.data.strip()
else:
raise werkzeug.exceptions.BadRequest(
'Unrecognized method: "%s"' % form.method.data)
policy = {'policy': form.lr_policy.data}
if form.lr_policy.data == 'fixed':
pass
elif form.lr_policy.data == 'step':
policy['stepsize'] = form.lr_step_size.data
policy['gamma'] = form.lr_step_gamma.data
elif form.lr_policy.data == 'multistep':
policy['stepvalue'] = form.lr_multistep_values.data
policy['gamma'] = form.lr_multistep_gamma.data
elif form.lr_policy.data == 'exp':
policy['gamma'] = form.lr_exp_gamma.data
elif form.lr_policy.data == 'inv':
policy['gamma'] = form.lr_inv_gamma.data
policy['power'] = form.lr_inv_power.data
elif form.lr_policy.data == 'poly':
policy['power'] = form.lr_poly_power.data
elif form.lr_policy.data == 'sigmoid':
policy['stepsize'] = form.lr_sigmoid_step.data
policy['gamma'] = form.lr_sigmoid_gamma.data
else:
raise werkzeug.exceptions.BadRequest(
'Invalid learning rate policy')
if config_value('caffe_root')['multi_gpu']:
if form.select_gpu_count.data:
gpu_count = form.select_gpu_count.data
selected_gpus = None
else:
selected_gpus = [str(gpu) for gpu in form.select_gpus.data]
gpu_count = None
else:
if form.select_gpu.data == 'next':
gpu_count = 1
selected_gpus = None
else:
selected_gpus = [str(form.select_gpu.data)]
gpu_count = None
# Set up augmentation structure
data_aug = {}
data_aug['flip'] = form.aug_flip.data
data_aug['quad_rot'] = form.aug_quad_rot.data
data_aug['rot_use'] = form.aug_rot_use.data
data_aug['rot'] = form.aug_rot.data
data_aug['scale_use'] = form.aug_scale_use.data
data_aug['scale'] = form.aug_scale.data
data_aug['hsv_use'] = form.aug_hsv_use.data
data_aug['hsv_h'] = form.aug_hsv_h.data
data_aug['hsv_s'] = form.aug_hsv_s.data
data_aug['hsv_v'] = form.aug_hsv_v.data
data_aug['conv_color'] = form.aug_conv_color.data
# Python Layer File may be on the server or copied from the client.
fs.copy_python_layer_file(
bool(form.python_layer_from_client.data),
job.dir(),
(flask.request.files[form.python_layer_client_file.name]
if form.python_layer_client_file.name in flask.request.files
else ''), form.python_layer_server_file.data)
job.tasks.append(fw.create_train_task(
job_dir = job.dir(),
dataset = datasetJob,
train_epochs = form.train_epochs.data,
snapshot_interval = form.snapshot_interval.data,
learning_rate = form.learning_rate.data[0],
lr_policy = policy,
gpu_count = gpu_count,
selected_gpus = selected_gpus,
batch_size = form.batch_size.data[0],
val_interval = form.val_interval.data,
pretrained_model= pretrained_model,
crop_size = form.crop_size.data,
use_mean = form.use_mean.data,
network = network,
random_seed = form.random_seed.data,
solver_type = form.solver_type.data,
shuffle = form.shuffle.data,
data_aug = data_aug,
)
)
## Save form data with the job so we can easily clone it later.
save_form_to_job(job, form)
jobs.append(job)
scheduler.add_job(job)
if n_jobs == 1:
if request_wants_json():
return flask.jsonify(job.json_dict())
else:
return flask.redirect(flask.url_for('digits.model.views.show', job_id=job.id()))
except:
if job:
scheduler.delete_job(job)
raise
if request_wants_json():
return flask.jsonify(jobs=[job.json_dict() for job in jobs])
# If there are multiple jobs launched, go to the home page.
return flask.redirect('/')
def show(job, related_jobs=None):
"""
Called from digits.model.views.models_show()
"""
return flask.render_template('models/images/generic/show.html', job=job, related_jobs=related_jobs)
@blueprint.route('/large_graph', methods=['GET'])
def large_graph():
"""
Show the loss/accuracy graph, but bigger
"""
job = job_from_request()
return flask.render_template('models/images/generic/large_graph.html', job=job)
@blueprint.route('/infer_one.json', methods=['POST'])
@blueprint.route('/infer_one', methods=['POST', 'GET'])
def infer_one():
"""
Infer one image
"""
model_job = job_from_request()
remove_image_path = False
if 'image_path' in flask.request.form and flask.request.form['image_path']:
image_path = flask.request.form['image_path']
elif 'image_file' in flask.request.files and flask.request.files['image_file']:
outfile = tempfile.mkstemp(suffix='.bin')
flask.request.files['image_file'].save(outfile[1])
image_path = outfile[1]
os.close(outfile[0])
else:
raise werkzeug.exceptions.BadRequest('must provide image_path or image_file')
epoch = None
if 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
layers = 'none'
if 'show_visualizations' in flask.request.form and flask.request.form['show_visualizations']:
layers = 'all'
resize_override = ''
if 'dont_resize' in flask.request.form and flask.request.form['dont_resize']:
resize_override = 'none'
# create inference job
inference_job = ImageInferenceJob(
username = utils.auth.get_username(),
name = "Infer One Image",
model = model_job,
images = [image_path],
epoch = epoch,
layers = layers,
resize_override = resize_override
)
# schedule tasks
scheduler.add_job(inference_job)
# wait for job to complete
inference_job.wait_completion()
# retrieve inference data
inputs, outputs, visualizations = inference_job.get_data()
# delete job folder and remove from scheduler list
scheduler.delete_job(inference_job)
if remove_image_path:
os.remove(image_path)
image = None
if inputs is not None and len(inputs['data']) == 1:
image = utils.image.embed_image_html(inputs['data'][0])
# print_image = 0
# if print_image == 1:
# import numpy as np
# import PIL.Image
# file_directory = '/home/brainstorm/srodrigues/nnworker/datasets/text/'
# # file_name = '/home/brainstorm/srodrigues/nnworker/datasets/text/' + 'output_predictions.txt'
# # f = open(file_name, 'w')
# res = dict((name, blob.tolist()) for name,blob in outputs.iteritems())['output']
# image = (np.array(res).reshape((32, 32, 3))).astype(np.uint8) # it should be already uint8, but...
# result = PIL.Image.fromarray(image)
# result.save(file_directory+'image.jpg')
# # f.write(str(res))
# # f.close()
if request_wants_json():
return flask.jsonify({'outputs': dict((name, blob.tolist()) for name,blob in outputs.iteritems())})
else:
return flask.render_template('models/images/generic/infer_one.html',
model_job = model_job,
job = inference_job,
image_src = image,
network_outputs = outputs,
visualizations = visualizations,
total_parameters= sum(v['param_count'] for v in visualizations if v['vis_type'] == 'Weights'),
)
@blueprint.route('/infer_db.json', methods=['POST'])
@blueprint.route('/infer_db', methods=['POST', 'GET'])
def infer_db():
"""
Infer a database
"""
model_job = job_from_request()
if not 'db_path' in flask.request.form or flask.request.form['db_path'] is None:
raise werkzeug.exceptions.BadRequest('db_path is a required field')
db_path = flask.request.form['db_path']
if not os.path.exists(db_path):
raise werkzeug.exceptions.BadRequest('DB "%s" does not exit' % db_path)
epoch = None
if 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
# create inference job
inference_job = ImageInferenceJob(
username = utils.auth.get_username(),
name = "Infer Many Images",
model = model_job,
images = db_path,
epoch = epoch,
layers = 'none',
)
# schedule tasks
scheduler.add_job(inference_job)
# wait for job to complete
inference_job.wait_completion()
# retrieve inference data
inputs, outputs, _ = inference_job.get_data()
# delete job folder and remove from scheduler list
scheduler.delete_job(inference_job)
if outputs is not None and len(outputs) < 1:
# an error occurred
outputs = None
if inputs is not None:
keys = [str(idx) for idx in inputs['ids']]
else:
keys = None
if request_wants_json():
result = {}
for i, key in enumerate(keys):
result[key] = dict((name, blob[i].tolist()) for name,blob in outputs.iteritems())
return flask.jsonify({'outputs': result})
else:
return flask.render_template('models/images/generic/infer_db.html',
model_job = model_job,
job = inference_job,
keys = keys,
network_outputs = outputs,
)
@blueprint.route('/infer_many.json', methods=['POST'])
@blueprint.route('/infer_many', methods=['POST', 'GET'])
def infer_many():
"""
Infer many images
"""
model_job = job_from_request()
image_list = flask.request.files.get('image_list')
if not image_list:
raise werkzeug.exceptions.BadRequest('image_list is a required field')
if 'image_folder' in flask.request.form and flask.request.form['image_folder'].strip():
image_folder = flask.request.form['image_folder']
if not os.path.exists(image_folder):
raise werkzeug.exceptions.BadRequest('image_folder "%s" does not exit' % image_folder)
else:
image_folder = None
if 'num_test_images' in flask.request.form and flask.request.form['num_test_images'].strip():
num_test_images = int(flask.request.form['num_test_images'])
else:
num_test_images = None
epoch = None
if 'snapshot_epoch' in flask.request.form:
epoch = float(flask.request.form['snapshot_epoch'])
paths = []
for line in image_list.readlines():
line = line.strip()
if not line:
continue
path = None
# might contain a numerical label at the end
match = re.match(r'(.*\S)\s+\d+$', line)
if match:
path = match.group(1)
else:
path = line
if not utils.is_url(path) and image_folder and not os.path.isabs(path):
path = os.path.join(image_folder, path)
paths.append(path)
if num_test_images is not None and len(paths) >= num_test_images:
break
# create inference job
inference_job = ImageInferenceJob(
username = utils.auth.get_username(),
name = "Infer Many Images",
model = model_job,
images = paths,
epoch = epoch,
layers = 'none',
resize_override = ''
)
# schedule tasks
scheduler.add_job(inference_job)
# wait for job to complete
inference_job.wait_completion()
# retrieve inference data
inputs, outputs, _ = inference_job.get_data()
# delete job folder and remove from scheduler list
scheduler.delete_job(inference_job)
if outputs is not None and len(outputs) < 1:
# an error occurred
outputs = None
if inputs is not None:
paths = [paths[idx] for idx in inputs['ids']]
if request_wants_json():
result = {}
for i, path in enumerate(paths):
result[path] = dict((name, blob[i].tolist()) for name,blob in outputs.iteritems())
return flask.jsonify({'outputs': result})
else:
return flask.render_template('models/images/generic/infer_many.html',
model_job = model_job,
job = inference_job,
paths = paths,
network_outputs = outputs,
)
def get_datasets():
return [(j.id(), j.name()) for j in sorted(
[j for j in scheduler.jobs.values() if isinstance(j, GenericImageDatasetJob) and (j.status.is_running() or j.status == Status.DONE)],
cmp=lambda x,y: cmp(y.id(), x.id())
)
]
def get_previous_networks():
return [(j.id(), j.name()) for j in sorted(
[j for j in scheduler.jobs.values() if isinstance(j, GenericImageModelJob)],
cmp=lambda x,y: cmp(y.id(), x.id())
)
]
def get_previous_networks_fulldetails():
return [(j) for j in sorted(
[j for j in scheduler.jobs.values() if isinstance(j, GenericImageModelJob)],
cmp=lambda x,y: cmp(y.id(), x.id())
)
]
def get_previous_network_snapshots():
prev_network_snapshots = []
for job_id, _ in get_previous_networks():
job = scheduler.get_job(job_id)
e = [(0, 'None')] + [(epoch, 'Epoch #%s' % epoch)
for _, epoch in reversed(job.train_task().snapshots)]
if job.train_task().pretrained_model:
e.insert(0, (-1, 'Previous pretrained model'))
prev_network_snapshots.append(e)
return prev_network_snapshots
| bsd-3-clause | 3,276,499,681,111,370,000 | 37.098214 | 209 | 0.559784 | false | 4.054542 | true | false | false |
Squeegee/vidis | convert_feeds.py | 1 | 56481 | #!/usr/bin/env python3
# -*- mode: python -*-
"""
Simple script for helping rename TV shows
"""
# TODO: Need support for custom in/out
# TODO: Need support for callable rename objects
# TODO: Use regex to get a list of all words in filename
# then make sure only ours exist in it
# If needed, use spellchecker or such to make sure extra words are valid
# TODO: Need way to specify alternatives for words, like USA or U.S.A. or the like.
# May be easiest to just allow refex for match, like its done for extraction.
# TODO: To fix duplicate names with appended extras (CSI, CSI Miami, CSI New York, etc)
# Search through all the regexs keeping all the match results. Whichever match result
# has the most words assoicated with it is the correct one. If there is more than one
# of them with the same number of words and matches, print error and continue.
# TODO: A new idea just occurred to me ... If I made a dictionary or regex that contained
# all the known words (all word: lists), then I could use it as a sort of sanity check in
# which to make sure that a show is really the show if it matches it's words: and didn't
# contain any of the rest of them ...
# TODO: Cannot search for a word twice (e.g. Brooklyn Nine Nine).
# TODO: Investigate '\w+' as re.findall() as it may be better/faster/etc.
import glob, sys, os
import re
from pprint import pprint
from vidis.models import db, FeedModel, RegexModel
DATEFMT = '{year:04d}-{month:02d}-{day:02d}'
DATES = [
# [yy]yy_mm_dd
'(?P<year>(?:19|20)[0-9]{2})[- /.](?P<month>(?:0[1-9]|1[012]))[- /.](?P<day>(?:0[1-9]|[12][0-9]|3[01]))',
# dd_mm_[yy]yy
'(?P<day>(?:0[1-9]|[12][0-9]|3[01]))[- /.](?P<month>(?:0[1-9]|1[012]))[- /.](?P<year>(?:19|20)[0-9]{2})',
# mm_dd_[yy]yy
'(?P<month>(?:0[1-9]|1[012]))[- /.](?P<day>(?:0[1-9]|[12][0-9]|3[01]))[- /.](?P<year>(?:19|20)[0-9]{2})'
]
# New (easier) way to add to RenameList
SimpleRenameList = {
'The Odd Couple': {},
'Eye Candy': {},
'Wolf Hall': {},
'X Company': {},
'Hindsight': {},
'Girlfriends Guide To Divorce': {},
'Catastrophe': { 'words': [ 'catastrophe' ] },
'Chasing Life': { 'words': [ 'chasing', 'life' ] },
'Better Call Saul': { 'words': [ 'better', 'call', 'saul' ] },
'Allegiance': { 'words': [ 'allegiance' ] },
'Fresh Off The Boat': { 'words': [ 'fresh', 'off', 'the', 'boat' ] },
'Schitts Creek': { 'words': [ 'schitts', 'creek' ] },
'Cockroaches': { 'words': [ 'cockroaches' ] },
'Backstrom': { 'words': [ 'backstrom' ] },
'12 Monkeys': { 'words': [ '12', 'monkeys' ] },
'Man Seeking Woman': { 'words': [ 'man', 'seeking', 'woman' ] },
'State Of Affairs': { 'words': [ 'state', 'of', 'affairs' ] },
'Galavant': { 'words': [ 'galavant' ] },
'Spy World': { 'words': [ 'spy', 'world' ] },
'Togetherness': { 'words': [ 'togetherness' ] },
'1600 Penn': { 'words': [ '1600', 'penn' ] },
'18 To Life': { 'words': [ '18', 'to', 'life' ] },
'2 Broke Girls': { 'words': [ '2', 'broke', 'girls' ] },
'24': { 'last': True, 'words': [ '24' ] },
'30 Rock': { 'words': [ '30', 'rock' ] },
'666 Park Avenue': { 'words': [ '666', 'park', 'avenue' ] },
'90210': { 'words': [ '90210' ] },
'A Gifted Man': { 'words': [ 'a', 'gifted', 'man' ] },
'A To Z': { 'words': [ 'a', 'to', 'z' ] },
'About A Boy': { 'words': [ 'about', 'a', 'boy' ] },
'Accidentally On Purpose': { 'words': [ 'accidentally', 'on', 'purpose' ] },
'Against The Wall': { 'words': [ 'against', 'the', 'wall' ] },
'Alaska State Troopers': { 'words': [ 'alaska', 'state', 'troopers' ] },
'Alcatraz': { 'words': [ 'alcatraz' ] },
'All About Aubrey': { 'words': [ 'all', 'about', 'aubrey' ] },
'All Worked Up': { 'words': [ 'all', 'worked', 'up' ] },
'Almost Human': { 'words': [ 'almost', 'human' ] },
'Almost Royal': { 'words': [ 'almost', 'royal' ] },
'Alphas': { 'words': [ 'alphas' ] },
'American Dad': { 'words': [ 'american', 'dad' ] },
'American Horror Story': { 'words': [ 'american', 'horror', 'story' ] },
'American Idol': { 'words': [ 'american', 'idol' ] },
'American Restoration': { 'words': [ 'american', 'restoration' ] },
'Animal Practice': { 'words': [ 'animal', 'practice' ] },
'Archer': { 'words': [ 'archer' ] },
'Are You There Chelsea': { 'words': [ 'are', 'you', 'there', 'chelsea' ] },
'Army Wives': { 'words': [ 'army', 'wives' ] },
'Arrow': { 'words': [ 'arrow' ] },
'Atlantis': { 'words': [ 'atlantis' ] },
'Awake': { 'words': [ 'awake' ] },
'Awkward': { 'words': [ 'awkward' ] },
'Ax Men': { 'words': [ 'ax', 'men' ] },
'Babylon': { 'words': [ 'babylon' ] },
'Back In The Game': { 'words': [ 'back', 'in', 'the', 'game' ] },
'Bad Judge': { 'words': [ 'bad', 'judge' ] },
'Bad Teacher': { 'words': [ 'bad', 'teacher' ] },
'Bang Goes The Theory': { 'words': [ 'bang', 'goes', 'the', 'theory' ] },
'Banshee': { 'words': [ 'banshee' ] },
'Bates Motel': { 'words': [ 'bates', 'motel' ] },
'Beauty And The Beast': { 'words': [ 'beauty', 'and', 'the', 'beast' ] },
'Becoming Human': { 'words': [ 'becoming', 'human' ] },
'Bedlam': { 'words': [ 'bedlam' ] },
'Being Eileen': { 'words': [ 'being', 'eileen' ] },
'Being Human (US)': { 'words': [ 'being', 'human', 'us' ] },
'Being Human': { 'last': True, 'words': [ 'being', 'human' ] },
'Being Mary Jane': { 'words': [ 'being', 'mary', 'jane' ] },
'Believe': { 'words': [ 'believe' ] },
'Ben And Kate': { 'words': [ 'ben', 'and', 'kate' ] },
'Bering Sea Gold Under The Ice': { 'words': [ 'bering', 'sea', 'gold', 'under', 'the', 'ice' ] },
'Bering Sea Gold': { 'words': [ 'bering', 'sea', 'gold' ] },
'Best Friends Forever': { 'words': [ 'best', 'friends', 'forever' ] },
'Betrayal': { 'words': [ 'betrayal' ] },
'Better Of Ted': { 'words': [ 'better', 'off', 'ted' ] },
'Better With You': { 'words': [ 'better', 'with', 'you' ] },
'Big Love': { 'words': [ 'big', 'love' ] },
'Big Tips Texas': { 'words': [ 'big', 'tips', 'texas' ] },
'Black Sails': { 'words': [ 'black', 'sails' ] },
'Black-ish': { 'words': [ 'black', 'ish' ] },
'Blue Bloods': { 'words': [ 'blue', 'bloods' ] },
'Boardwalk Empire': { 'words': [ 'boardwalk', 'empire' ] },
'Bobs Burgers': { 'words': [ 'bob', 'burger'] },
'Body Of Proof': { 'words': [ 'body' ] },
'Bones': { 'words': [ 'bones' ] },
'Bored To Death': { 'words': [ 'bored', 'to.death' ] },
'Boss': { 'words': [ 'boss' ] },
'Boston Med': { 'words': [ 'boston', 'med' ] },
'Bostons Finest': { 'words': [ 'boston', 'finest' ] },
'Breaking Bad': { 'words': [ 'breaking', 'bad' ] },
'Breaking In': { 'words': [ 'breaking', 'in' ] },
'Breakout Kings': { 'words': [ 'breakout', 'kings' ] },
'Breakout': { 'last': True, 'words': [ 'breakout' ] },
'Britain\'s Really Disgusting Food': { 'words': [ 'brdf' ] },
'Broadchurch': { 'words': [ 'broadchurch' ] },
'Brooklyn Nine Nine': { 'words': [ 'brooklyn', 'nine' ] }, # TODO: No way to search for two of a single word
'Brothers And Sisters': { 'words': [ 'brothers', 'and', 'sisters' ] },
'Brothers': { 'last': True, 'words': [ 'brothers' ] },
'Burn Notice': { 'words': [ 'burn', 'notice' ] },
'CSI Miami': { 'words': [ 'csi', 'miami' ] },
'CSI New York': { 'words': [ 'csi', 'ny' ] },
'CSI': { 'last': True, 'words': [ 'csi' ] },
'Californication': { 'words': [ 'californication' ] },
'Camelot': { 'words': [ 'camelot' ] },
'Camp': { 'words': [ 'camp' ] },
'Caprica': { 'words': [ 'caprica' ] },
'Castle': { 'words': [ 'castl' ] },
'Celebrity Rehab With Dr. Drew': { 'words': [ 'celebrity', 'rehab', 'drew' ] },
'Chaos': { 'words': [ 'chaos' ] },
'Charlies Angels': { 'words': [ 'charlies', 'angels' ] },
'Chase': { 'words': [ 'chase' ] },
'Chasing Mummies': { 'words': [ 'chasing', 'mummies' ] },
'Chicago Fire': { 'words': [ 'chicago', 'fire' ] },
'Chicago PD': { 'last': True, 'words': [ 'chicago', 'p', 'd' ] },
'Childrens Hospital': { 'words': [ 'childrens', 'hospital' ] },
'Chuck': { 'words': [ 'chuck' ] },
'City Homicide': { 'words': [ 'city', 'homicide' ] },
'Cold Case': { 'words': [ 'cold', 'case' ] },
'Collision': { 'words': [ 'collision' ] },
'Combat Hospital': { 'words': [ 'combat', 'hospital' ] },
'Community': { 'words': [ 'community' ] },
'Constantine': { 'words': [ 'constantine' ] },
'Continuum': { 'words': [ 'continuum' ] },
'Copper': { 'words': [ 'copper' ] },
'Cosmos - A Space Time Odyssey': { 'words': [ 'cosmos', 'a', 'space', 'time', 'odyssey' ] },
'Cougar Town': { 'words': [ 'cougar', 'town' ] },
'Covert Affairs': { 'words': [ 'covert', 'affairs' ] },
'Cracker': { 'words': [ 'cracker' ] },
'Creature Shop Challenge': { 'words': [ 'creature', 'shop', 'challenge' ] },
'Criminal Minds Suspect Behavior': { 'words': [ 'criminal', 'minds', 'suspect', 'behav' ] },
'Criminal Minds': { 'last': True, 'words': [ 'criminal', 'minds' ] },
'Crisis Control': { 'words': [ 'crisis', 'control' ] },
'Crisis': { 'words': [ 'crisis' ] },
'Cristela': { 'words': [ 'cristela' ] },
'Crossing Lines': { 'words': [ 'crossing', 'lines' ] },
'Cuckoo': { 'words': [ 'cuckoo' ] },
'Cult': { 'words': [ 'cult' ] },
'Curb Your Enthusiasm': { 'words': [ 'curb', 'your', 'enthusiasm' ] },
'Dada': { 'last': True, 'words': [ 'dads' ] },
'Damages': { 'words': [ 'damages' ] },
'Dancing On The Edge': { 'words': [ 'dancing', 'on', 'the', 'edge' ] },
'Dancing With The Stars': { 'words': [ 'dancing', 'with', 'the', 'stars' ] },
'Dark Blue': { 'words': [ 'dark', 'blue' ] },
'Deadliest Catch': { 'words': [ 'deadliest', 'catch' ] },
'Defiance': { 'words': [ 'defiance' ] },
'Desperate Housewives': { 'words': [ 'desperate', 'housewives' ] },
'Detroit 187': { 'words': [ 'det' ] },
'Devious Maids': { 'words': [ 'devious', 'maids' ] },
'Dexter': { 'words': [ 'dexter' ] },
'Do No Harm': { 'words': [ 'do', 'no', 'harm' ] },
'Doctor Who': { 'words': [ 'doctor', 'who', '2005' ] },
'Dollhouse': { 'words': [ 'dollhouse' ] },
'Dominion': { 'words': [ 'dominion' ] },
'Downfall': { 'words': [ 'downfall' ] },
'Dracula': { 'words': [ 'dracula', '2013' ] },
'Dream House': { 'words': [ 'dream', 'house' ] },
'Dream Machines': { 'words': [ 'dream', 'machines' ] },
'Drop Dead Diva': { 'words': [ 'drop', 'dead', 'diva' ] },
'Eagleheart': { 'words': [ 'eagleheart' ] },
'Eastwick': { 'words': [ 'eastwick' ] },
'Elementary': { 'words': [ 'elementary' ] },
'Emily Owens M.D.': { 'words': [ 'emily', 'owens' ] },
'Empire': { 'last': True, 'words': [ 'empire' ] },
'Enlightened': { 'words': [ 'enlightened' ] },
'Episodes': { 'words': [ 'episodes' ] },
'Eureka': { 'words': [ 'eureka' ] },
'Extant': { 'words': [ 'extant' ] },
'Fairly Legal': { 'words': [ 'fairly', 'legal' ] },
'Falling Skies': { 'words': [ 'falling', 'skies' ] },
'Family Guy': { 'words': [ 'family', 'guy' ] },
'Family Tools': { 'words': [ 'family', 'tools' ] },
'Family Tree': { 'words': [ 'family', 'tree' ] },
'Fargo': { 'words': [ 'fargo' ] },
'Fear Factor': { 'words': [ 'fear', 'factor' ] },
'Finding Bigfoot': { 'words': [ 'finding', 'bigfoot' ] },
'Finding Carter': { 'words': [ 'finding', 'carter' ] },
'Flash Forward': { 'words': [ 'flash', 'forward' ] },
'Flashpoint': { 'words': [ 'flash', 'point' ] },
'Forever': { 'words': [ 'forever' ] },
'Franklin And Bash': { 'words': [ 'franklin', 'and', 'bash' ] },
'Freakshow': { 'words': [ 'freakshow' ] },
'Free Agents': { 'words': [ 'free', 'agents' ] },
'Friends With Better Lives': { 'words': [ 'friends', 'with', 'better', 'lives' ] },
'Fringe': { 'words': [ 'fringe' ] },
'Funny As Hell': { 'words': [ 'funny', 'as', 'hell' ] },
'Futurama': { 'words': [ 'futurama' ] },
'Game Of Thrones': { 'words': [ 'game', 'of', 'thrones' ] },
'Gang World': { 'words': [ 'gang', 'world' ] },
'Gary Unmarried': { 'words': [ 'gary', 'unmarried' ] },
'Ghost Lab': { 'words': [ 'ghost', 'lab' ] },
'Ghost Whisperer': { 'words': [ 'ghost', 'whisperer' ] },
'Girl Meets World': { 'words': [ 'girl', 'meets', 'world' ] },
'Girls': { 'last': True, 'words':[ 'girls' ] },
'Glee': { 'words': [ 'glee' ] },
'Glue': { 'words': [ 'glue' ] },
'Good Vibes': { 'words': [ 'good', 'vibes' ] },
'Gossip Girl': { 'words': [ 'gossip', 'girl' ] },
'Gotham': { 'words': [ 'gotham' ] },
'Graceland': { 'words': [ 'graceland' ] },
'Gracepoint': { 'words': [ 'gracepoint' ] },
'Greek': { 'words': [ 'greek' ] },
'Greys Anatomy': { 'words': [ 'greys', 'anatomy' ] },
'Grimm': { 'words': [ 'grimm' ] },
'Ground Floor': { 'words': [ 'ground', 'floor' ] },
'Growing Up Fisher': { 'words': [ 'growing', 'up', 'fisher' ] },
'Hank': { 'words': [ 'hank' ] },
'Hannibal': { 'words': [ 'hannibal' ] },
'Happily Divorced': { 'words': [ 'happily', 'divorced' ] },
'Happy Endings': { 'words': [ 'happy', 'endings' ] },
'Happy Town': { 'words': [ 'happy', 'town' ] },
'Happy Valley': { 'words': [ 'happy', 'valley' ] },
'Hard Time': { 'words': [ 'hard', 'time' ] },
'Harrys Law': { 'words': [ 'harrys', 'law' ] },
'Haven': { 'words': [ 'haven' ] },
'Hawaii Five\'0': { 'words': [ 'hawaii', 'five', '0' ] },
'Hawthorne': { 'words': [ 'hawthorne' ] },
'Heavy': { 'words': [ 'heavy' ] },
'Helix': { 'words': [ 'helix' ] },
'Hellcats': { 'words': [ 'hellcats' ] },
'Hello Ladies': { 'words': [ 'hello', 'ladies' ] },
'Hello Ross': { 'words': [ 'hello', 'ross' ] },
'Hells Kitchen': { 'words': [ 'hells', 'kitchen' ] },
'Hens Behaving Badly': { 'words': [ 'hens', 'behaving', 'badly' ] },
'Heroes': { 'words': [ 'heroes' ] },
'High School USA': { 'words': [ 'high', 'school', 'usa' ] },
'Highway Patrol': { 'words': [ 'highway', 'patrol' ] },
'Hoarders': { 'words': [ 'hoarders' ] },
'Homeland': { 'words': [ 'homeland' ] },
'Hostages': { 'words': [ 'hostages' ] },
'Hot In Cleveland': { 'words': [ 'hot', 'in', 'cleveland' ] },
'Hotel Stephanie': { 'words': [ 'hotel', 'stephanie' ] },
'House Of Lies': { 'words': [ 'house', 'of', 'lies' ] },
'House': { 'last': True, 'words': [ 'house' ] },
'How I Met Your Mother': { 'words': [ 'how', 'i', 'met', 'your', 'mother' ] },
'How To Be A Gentleman': { 'words': [ 'how', 'to', 'be', 'a', 'gentleman' ] },
'How To Get Away With Murder': { 'words': [ 'how', 'to', 'get', 'away', 'with', 'murder' ] },
'How To Live With Your Parents': { 'words': [ 'how', 'to', 'live', 'with', 'your', 'parents' ] },
'Huge': { 'words': [ 'huge' ] },
'Human Target': { 'words': [ 'human', 'targe' ] },
'Hung': { 'words': [ 'hung' ] },
'Ice Road Truckers': { 'words': [ 'ice', 'road', 'truckers' ] },
'In Plain Sight': { 'words': [ 'in', 'plain', 'sight' ] },
'In The Flesh': { 'words': [ 'in', 'the', 'flesh' ] },
'In Treatment': { 'words': [ 'in', 'treatment' ] },
'Injustice': { 'words': [ 'injustice' ] },
'Inside Amy Schumer': { 'words': [ 'inside', 'amy', 'schumer' ] },
'Instant Mom': { 'words': [ 'instant', 'mom' ] },
'Intelligence': { 'last': True, 'words': [ 'intelligence' ] },
'Jane By Design': { 'words': [ 'jane', 'by', 'design' ] },
'Jane The Virgin': { 'words': [ 'jane', 'the', 'virgin' ] },
'Jennifer Falls': { 'words': [ 'jennifer', 'falls' ] },
'Justified': { 'words': [ 'justified' ] },
'Kingdom': { 'words': [ 'kingdom' ] },
'Kirstie': { 'words': [ 'kirstie' ] },
'Kitchen Nightmares': { 'words': [ 'kitchen', 'nightmares' ] },
'LA Ink': { 'words': [ 'la', 'ink' ] },
'Last Comic Standing': { 'words': [ 'last', 'comic', 'standing' ] },
'Last Man Standing': { 'words': [ 'last', 'man', 'standing' ] },
'Last Resort': { 'words': [ 'last', 'resort' ] },
'Law And Order CI': { 'words': [ 'law', 'and', 'order', 'criminal', 'intent' ] },
'Law And Order LA': { 'words': [ 'law', 'and', 'order', 'la' ] },
'Law And Order SVU': { 'words': [ 'law', 'and', 'order', 's', 'v', 'u' ] },
'Law And Order UK': { 'words': [ 'law', 'and', 'order', 'uk' ] },
'Law And Order': { 'last': True, 'words': [ 'law', 'and', 'order' ] },
'Legit': { 'words': [ 'legit' ] },
'Leverage': { 'words': [ 'leverage' ] },
'Lie To Me': { 'words': [ 'lie', 'to', 'me' ] },
'Life On A Wire': { 'words': [ 'life', 'on', 'a', 'wire' ] },
'Life Unexpected': { 'words': [ 'life', 'unexpected' ] },
'Lights Out': { 'words': [ 'lights', 'out' ] },
'Live To Dance': { 'words': [ 'live', 'to', 'dance' ] },
'Lone Star': { 'words': [ 'lone', 'star' ] },
'Long Island Medium': { 'words': [ 'long', 'island', 'medium' ] },
'Lost': { 'words': [ 'lost' ] },
'Love Bites': { 'words': [ 'love', 'bites' ] },
'Mad Dogs': { 'words': [ 'mad', 'dogs' ] },
'Mad Love': { 'words': [ 'mad', 'love' ] },
'Mad Men': { 'words': [ 'mad', 'men' ] },
'Madam Secretary': { 'words': [ 'madam', 'secretary' ] },
'Made In Jersey': { 'words': [ 'made', 'in', 'jersey' ] },
'Magic City': { 'words': [ 'magic', 'city' ] },
'Major Crimes': { 'words': [ 'major', 'crimes' ] },
'Man Up': { 'words': [ 'man', 'up' ] },
'Marry Me': { 'words': [ 'marry', 'me' ] },
'Marvels Agent Carter': { 'words': [ 'marvels', 'agent', 'carter' ] },
'Marvels Agents Of S.H.I.E.L.D.': { 'words': [ 'marvels', 'agents', 'of', 's.h.i.e.l.d' ] },
'Masters Of Sex': { 'words': [ 'masters', 'of', 'sex' ] },
'Matador (US)': { 'words': [ 'matador' ] },
'Medium': { 'words': [ 'medium' ] },
'Melissa And Joey': { 'words': [ 'melissa', 'and', 'joey' ] },
'Melrose Place': { 'words': [ 'melrose', 'place' ] },
'Memphis Beat': { 'words': [ 'memphis', 'beat' ] },
'Men Of A Certain Age': { 'words': [ 'men', 'of', 'a', 'certain', 'age' ] },
'Mercy': { 'words': [ 'mercy' ] },
'Merlin': { 'words': [ 'merli' ] },
'Miami Medical': { 'words': [ 'miami', 'medical' ] },
'Mike And Molly': { 'words': [ 'mike', 'and', 'molly' ] },
'Mind Games': { 'words': [ 'mind', 'games' ] },
'Miranda': { 'words': [ 'miranda' ] },
'Missing': { 'words': [ 'missing', '2012' ] },
'Mistresses': { 'words': [ 'mistresses' ] },
'Mixed Britannia': { 'words': [ 'mixed', 'britannia' ] },
'Mob City': { 'words': [ 'mob', 'city' ] },
'Mockingbird Lane': { 'words': [ 'mockingbird', 'lane' ] },
'Modern Family': { 'words': [ 'modern', 'family' ] },
'Mom': { 'words': [ 'mom' ] },
'Monday Mornings': { 'words': [ 'monday', 'mornings' ] },
'Mongrels': { 'words': [ 'mongrels' ] },
'Monk': { 'words': [ 'monk' ] },
'Motive': { 'words': [ 'motive' ] },
'Mr. Pickles': { 'words': [ 'mr', 'pickles' ] },
'Mr. Sunshine': { 'words': [ 'mr', 'sunshine' ] },
'Mulaney': { 'words': [ 'mulaney' ] },
'Murderland': { 'words': [ 'murderland' ] },
'My Babysitters A Vampire': { 'words': [ 'my', 'babysitters', 'a', 'vampire' ] },
'My Boys': { 'words': [ 'my', 'boys' ] },
'My Generation': { 'words': [ 'my', 'generatio' ] },
'NCIS Los Angeles': { 'words': [ 'ncis', 'los', 'angeles' ] },
'NCIS New Orleans': { 'words': [ 'ncis', 'new', 'orleans' ] },
'NCIS': { 'last': True, 'words': [ 'ncis' ] },
'NTSF-SD-SUV': { 'words': [ 'ntsf', 'sd', 'suv' ] },
'NYC 22': { 'words': [ 'nyc', '22' ] },
'Nashville': { 'words': { 'nashville' } },
'Necessary Roughness': { 'words': [ 'necessary', 'roughness' ] },
'New Girl': { 'words': [ 'new', 'girl' ] },
'Nikita': { 'words': [ 'nikita' ] },
'Nip Tuck': { 'words': [ 'nip', 'tuck' ] },
'No Ordinary Family': { 'words': [ 'no', 'ordinary', 'family' ] },
'Numb3rs': { 'words': [ 'numb3rs' ] },
'Nurse Jackie': { 'words': [ 'nurse', 'jackie' ] },
'Off The Map': { 'words': [ 'off', 'the', 'map' ] },
'Offspring': { 'words': [ 'offspring' ] },
'Once Upon A Time In Wonderland': { 'words': [ 'once', 'upon', 'a', 'time', 'in', 'wonderland' ] },
'Once Upon A Time': { 'last': True, 'words': [ 'once', 'upon', 'a', 'time' ] },
'One Tree Hill': { 'words': [ 'one', 'tree', 'hill' ] },
'Orphan Black': { 'words': [ 'orphan', 'black' ] },
'Outlaw': { 'words': [ 'outlaw' ] },
'Outsourced': { 'words': [ 'outsourced' ] },
'Package Deal': { 'words': [ 'package', 'deal' ] },
'Pan Am': { 'words': [ 'pan', 'am' ] },
'Parenthood': { 'words': [ 'parenthood' ] },
'Parks And Recreation': { 'words': [ 'parks', 'and', 'recreation' ] },
'Party Down': { 'words': [ 'party', 'down' ] },
'Past Life': { 'words': [ 'past', 'life' ] },
'Perception': { 'words': [ 'perception' ] },
'Perfect Couples': { 'words': [ 'perfect', 'couples' ] },
'Person Of Interest': { 'words': [ 'person', 'of', 'interest' ] },
'Persons Unknown': { 'words': [ 'person', 'unknown' ] },
'Plain Jane': { 'words': [ 'plain', 'jane' ] },
'Political Animals': { 'words': [ 'political', 'animals' ] },
'Portlandia': { 'words': [ 'portlandia' ] },
'Pretty Little Liars': { 'words': [ 'pretty', 'little', 'liars' ] },
'Prey (UK)': { 'words': [ 'prey', 'uk' ] },
'Prime Suspect': { 'words': [ 'prime', 'suspect' ] },
'Primeval New World': { 'words': [ 'primeval', 'new', 'world' ] },
'Primeval': { 'last': True, 'words': [ 'primeval' ] },
'Private Practice': { 'words': [ 'private', 'practice' ] },
'Psych': { 'words': [ 'psych' ] },
'Raising Hope': { 'words': [ 'raising', 'hope' ] },
'Rake': { 'words': [ 'rake' ] },
'Ravenswood': { 'words': [ 'ravenswood' ] },
'Ray Donovan': { 'words': [ 'ray', 'donovan' ] },
'Recjless': { 'words': [ 'reckless' ] },
'Red Band Society': { 'words': [ 'red', 'band', 'society' ] },
'Redwood Kings': { 'words': [ 'redwood', 'kings' ] },
'Reign': { 'words': [ 'reign' ] },
'Remember Me': { 'words': [ 'remember', 'me' ] },
'Rescue Me': { 'words': [ 'rescue', 'me' ] },
'Rescue Special Ops': { 'words': [ 'rescue', 'special', 'ops' ] },
'Resurrection': { 'words': [ 'resurrection' ] },
'Retired At 35': { 'words': [ 'retired', 'at', '35' ] },
'Revenge': { 'words': [ 'revenge' ] },
'Revolution': { 'words': [ 'revolution' ] },
'Ringer': { 'words': [ 'ringer' ] },
'Rizzoli And Isles': { 'words': [ 'rizzoli', 'and', 'isles' ] },
'Rob': { 'words': [ 'rob' ] },
'Romantically Challenged': { 'words': [ 'romantically', 'challenged' ] },
'Rookie Blue': { 'words': [ 'rookie', 'blue' ] },
'Royal Navy Caribbean Patrol': { 'words': [ 'royal', 'navy', 'caribbean', 'patrol' ] },
'Royal Pains': { 'words': [ 'royal', 'pains' ] },
'Rubicon': { 'words': [ 'rubicon' ] },
'Rules Of Engagement': { 'words': [ 'rules', 'of', 'engagement' ] },
'Running Wilde': { 'words': [ 'running', 'wilde' ] },
'Rush': { 'last': True, 'words': [ 'rush' ] },
'Salem': { 'words': [ 'salem' ] },
'Same Name': { 'words': [ 'same', 'name' ] },
'Sanctuary': { 'words': [ 'sanctuary' ] },
'Satisfaction': { 'words': [ 'satisfaction' ] },
'Saturday Night Live': { 'words': [ 'saturday', 'night', 'live' ] },
'Saving Grace': { 'words': [ 'saving', 'grace' ] },
'Saving Hope': { 'words': [ 'saving', 'hope' ] },
'Scandal': { 'words': [ 'scandal' ] },
'Scorpion': { 'words': [ 'scorpion' ] },
'Scoundrels': { 'words': [ 'scoundrels' ] },
'Scrubs': { 'words': [ 'scrubs' ] },
'Sea Patrol UK': { 'words': [ 'sea', 'patrol', 'uk' ] },
'Sea Patrol': { 'words': [ 'sea', 'patrol' ] },
'Sean Saves The World': { 'words': [ 'sean', 'saves', 'the', 'world' ] },
'Secret Diary Of A Call Girl': { 'words': [ 'secret', 'diary', 'call', 'girl' ] },
'Secret Girlfriend': { 'words': [ 'secret', 'girl', 'friend' ] },
'Secret State': { 'words': [ 'secret', 'state' ] },
'Seed': { 'words': [ 'seed' ] },
'Selfie': { 'words': [ 'selfie' ] },
'Sex Rehab With Dr. Drew': { 'words': [ 'sex', 'rehab', 'drew' ] },
'Shameless': { 'words': [ 'shameless' ] },
'Sherlock': { 'words': [ 'sherlock' ] },
'Sherri': { 'words': [ 'sherri' ] },
'Shit My Dad Says': { 'words': [ 'shit', 'my', 'dad', 'says' ] },
'Silicon Valley': { 'words': [ 'silicon', 'valley' ] },
'Sirens': { 'words': [ 'sirens' ] },
'Sister Wives': { 'words': [ 'sister', 'wives' ] },
'Skins (US)': { 'words': [ 'skins', 'us' ] },
'Sleepy Hollow': { 'words': [ 'sleepy', 'hollow' ] },
'Smash': { 'words': [ 'smash' ] },
'Sons Of Anarchy': { 'words': [ 'sons', 'of', 'anarchy' ] },
'Sons Of Tucson': { 'words': [ 'sons', 'of', 't' ] },
'Southland': { 'words': [ 'southland' ] },
'Spartacus Blood And Sand': { 'words': [ 'spartacus', 'blood', 'sand' ] },
'Spartacus Gods Of The Arena': { 'words': [ 'spartacus', 'gods', 'arena' ] },
'Spartacus': { 'words': [ 'spartacus' ] },
'Stalker': { 'words': [ 'stalker' ] },
'Star Wars Rebels': { 'words': [ 'star', 'wars', 'rebels' ] },
'Star Wars The Clone Wars': { 'words': [ 'star', 'wars', 'clone', 'wars' ] },
'Stargate Universe': { 'words': [ 'stargate', 'universe' ] },
'State Of Georgia': { 'words': [ 'state', 'of', 'georgia' ] },
'Steven Seagal Lawman': { 'words': [ 'steven', 'seagal', 'law', 'man' ] },
'Strange Empire': { 'words': [ 'strange', 'empire' ] },
'Suburgatory': { 'words': [ 'suburgatory' ] },
'Summer Camp': { 'words': [ 'summer', 'camp' ] },
'Super Fun Night': { 'words': [ 'super', 'fun', 'night' ] },
'Superjail': { 'words': [ 'superjail' ] },
'Supernanny': { 'words': [ 'supernanny' ] },
'Supernatural': { 'words': [ 'super', 'natural' ] },
'Surviving Jack': { 'words': [ 'surviving', 'jack' ] },
'Survivor': { 'last': True, 'words': [ 'survivor' ] },
'Survivors': { 'words': [ 'survivors', '2008' ] },
'Switched At Birth': { 'words': [ 'switched', 'at', 'birth' ] },
'Teen Wolf': { 'words': [ 'teen', 'wolf' ] },
'Terra Nova': { 'words': [ 'terra', 'nova' ] },
'Terriers': { 'words': [ 'terriers' ] },
'The 100': { 'words': [ 'the', '100' ] },
'The Affair': { 'words': [ 'the', 'affair' ] },
'The Americans': { 'words': [ 'the', 'americans' ] },
'The Big Bang Theory': { 'words': [ 'the', 'big', 'bang', 'theory' ] },
'The Big C': { 'last': True, 'words': [ 'the', 'big', 'c' ] },
'The Blacklist': { 'words': [ 'the', 'blacklist' ] },
'The Bomb Squad': { 'words': [ 'the', 'bomb', 'squad' ] },
'The Borgias': { 'words': [ 'the', 'borgias' ] },
'The Bridge': { 'words': [ 'the', 'bridge' ] },
'The Cape': { 'words': [ 'the', 'cap' ] },
'The Carrie Diaries': { 'words': [ 'the', 'carrie', 'diaries' ] },
'The Chicago Code': { 'words': [ 'chicago', 'code' ] },
'The Cleveland Show': { 'words': [ 'the', 'cleveland', 'show' ] },
'The Client List': { 'words': [ 'the', 'client', 'list' ] },
'The Closer': { 'words': [ 'the', 'closer' ] },
'The Crazy Ones': { 'words': [ 'the', 'crazy', 'ones' ] },
'The Cult': { 'words': [ 'the', 'cult' ] },
'The Deep End': { 'words': [ 'the', 'deep', 'end' ] },
'The Defenders': { 'words': [ 'the', 'defender' ] },
'The Event': { 'words': [ 'the', 'event' ] },
'The Exes': { 'words': [ 'the', 'exes' ] },
'The F Word ': { 'words': [ 'tfw' ] },
'The F Word': { 'words': [ 'the', 'f', 'word' ] },
'The Finder': { 'words': [ 'the', 'finder' ] },
'The Firm': { 'words': [ 'the', 'firm' ] },
'The Flash': { 'words': [ 'the', 'flash' ] },
'The Following': { 'words': [ 'the', 'following' ] },
'The Forgotten': { 'words': [ 'the', 'forgotten' ] },
'The Fosters': { 'words': [ 'the', 'fosters' ] },
'The Gates': { 'words': [ 'the', 'gates' ] },
'The Glades': { 'words': [ 'the', 'glades' ] },
'The Goldbergs': { 'words': [ 'the', 'goldbergs' ] },
'The Good Guys': { 'words': [ 'the', 'good', 'guys' ] },
'The Good Wife': { 'words': [ 'the', 'good', 'wife' ] },
'The Hard Times Of RJ Berger': { 'words': [ 'the', 'hard', 'times', 'of', 'r', 'j', 'berger' ] },
'The Intern': { 'words': [ 'the', 'intern' ] },
'The Killing': { 'words': [ 'the', 'killing' ] },
'The Last Ship': { 'words': [ 'the', 'last', 'ship' ] },
'The Listener': { 'words': [ 'the', 'listener' ] },
'The Lottery': { 'words': [ 'the', 'lottery' ] },
'The Lying Game': { 'words': [ 'the', 'lying', 'game' ] },
'The Marriage Ref': { 'words': [ 'the', 'marriage', 'ref' ] },
'The McCarthy\'s': { 'words': [ 'the', 'mccarthys' ] },
'The Mentalist': { 'words': [ 'the', 'mentalist' ] },
'The Michael J Fox Show': { 'words': [ 'the', 'michael', 'j', 'fox', 'show' ] },
'The Middle': { 'words': [ 'the', 'middle' ] },
'The Millers': { 'words': [ 'the', 'millers' ] },
'The Mindy Project': { 'words': [ 'the', 'mindy', 'project' ] },
'The Missing': { 'words': [ 'the', 'missing' ] },
'The Mob Doctor': { 'words': [ 'the', 'mob', 'doctor' ] },
'The Mysteries Of Laura': { 'words': [ 'the', 'mysteries', 'of', 'laura' ] },
'The Neighbors': { 'words': [ 'the', 'neighbors' ] },
'The New Adventures Of Old Christine': { 'words': [ 'new', 'adventures', 'old', 'christine' ] },
'The New Normal': { 'words': [ 'the', 'new', 'normal' ] },
'The Newsroom': { 'words': [ 'the', 'newsroom' ] },
'The Nine Lives Of Chloe King': { 'words': [ 'the', 'nine', 'lives', 'chloe', 'king' ] },
'The Office': { 'words': [ 'the', 'office', 'u?s?' ] },
'The Originals': { 'words': [ 'the', 'originals' ] },
'The Paradise': { 'words': [ 'the', 'paradise' ] },
'The Paul Reiser Show': { 'words': [ 'the', 'paul', 'r[ie]{2}ser', 'show' ] },
'The Playboy Club': { 'words': [ 'the', 'playbody', 'club' ] },
'The Protector': { 'words': [ 'the', 'protector' ] },
'The Real L Word': { 'words': [ 'the', 'real', 'l', 'word' ] },
'The River': { 'last': True, 'words': [ 'the', 'river' ] },
'The Sarah Jane Adventures': { 'words': [ 'sarah', 'jane', 'adventures' ] },
'The Secret Circle': { 'words': [ 'the', 'secret', 'circle' ] },
'The Secret Life Of The American Teenager': { 'words': [ 'secret', 'life', 'american', 'teenager' ] },
'The Simpsons': { 'words': [ 'the', 'simpsons' ] },
'The Strain': { 'words': [ 'the', 'strain' ] },
'The Tomorrow People': { 'words': [ 'the', 'tomorrow', 'people' ] },
'The Tudors': { 'words': [ 'the', 'tudors' ] },
'The Tunnel': { 'words': [ 'the', 'tunnel' ] },
'The Vampire Diaries': { 'words': [ 'the', 'vampire', 'diaries' ] },
'The Village': { 'words': [ 'the', 'village' ] },
'The Voice': { 'words': [ 'the', 'voice' ] },
'The Walking Dead': { 'words': [ 'the', 'walking', 'dead' ] },
'The Whole Truth': { 'words': [ 'the', 'whole', 'truth' ] },
'The X Factor': { 'words': [ 'the', 'x', 'factor' ] },
'This Is Not My Life': { 'words': [ 'this', 'is', 'not', 'my', 'life' ] },
'Three Rivers': { 'words': [ 'three', 'rivers' ] },
'Through The Wormhole': { 'words': [ 'through', 'wormhole' ] },
'Thundercats': { 'words': [ 'thundercats' ] },
'Til Death': { 'words': [ 'til', 'death' ] },
'Top Chef Masters': { 'words': [ 'top', 'chef', 'masters' ] },
'Top Chef': { 'words': [ 'top', 'chef' ] },
'Torchwood': { 'words': [ 'torchwood' ] },
'Total Divas': { 'words': [ 'total', 'divas' ] },
'Touch': { 'words': [ 'touch' ] },
'Traffic Light': { 'words': [ 'traffic', 'light' ] },
'Trauma': { 'words': [ 'trauma' ] },
'Treme': { 'words': [ 'treme' ] },
'Trophy Wife': { 'words': [ 'trophy', 'wife' ] },
'Truckers': { 'words': [ 'truckers' ] },
'True Blood': { 'words': [ 'true', 'blood' ] },
'True Detective': { 'words': [ 'true', 'detective' ] },
'True Justice': { 'words': [ 'true', 'justice' ] },
'True North': { 'words': [ 'true', 'north' ] },
'Turn': { 'words': [ 'turn' ] },
'Two And A Half Men': { 'words': [ 'two', 'and', 'a', 'half', 'men' ] },
'Tyrant': { 'words': [ 'tyrant' ] },
'Ugly Americans': { 'words': [ 'ugly', 'americans' ] },
'Ugly Betty': { 'words': [ 'ugly', 'betty' ] },
'Uncle': { 'last': True, 'words': [ 'uncle' ] },
'Under The Dome': { 'words': [ 'under', 'the', 'dome' ] },
'Undercover Boss': { 'words': [ 'undercover', 'boss' ] },
'Undercovers': { 'words': [ 'undercovers' ] },
'Underemployed': { 'words': [ 'underemployed' ] },
'Unforgettable': { 'words': [ 'unforgettable' ] },
'United States Of Tara': { 'words': [ 'united', 'states', 'of', 'tara' ] },
'Unsupervised': { 'words': [ 'unsupervised' ] },
'Up All Night': { 'words': [ 'up', 'all', 'night' ] },
'Utopia': { 'words': [ 'utopia' ] },
'V': { 'words': [ 'v', '2009' ] },
'Veep': { 'words': [ 'veep' ] },
'Vegas': { 'words': [ 'vegas' ] },
'Vikings': { 'words': [ 'vikings' ] },
'Warehouse 13': { 'words': [ 'warehouse', '13' ] },
'We Are Men': { 'words': [ 'we', 'are', 'men' ] },
'Web Therapy': { 'words': [ 'web', 'therapy' ] },
'Weeds': { 'words': [ 'weeds' ] },
'Welcome To The Family': { 'words': [ 'welcome', 'to', 'the', 'family' ] },
'White Collar': { 'words': [ 'white', 'collar' ] },
'Whitney': { 'words': [ 'whitney' ] },
'Wilfred': { 'words': [ 'wilfred' ] },
'Witches Of East End': { 'words': [ 'witches', 'of', 'east', 'end' ] },
'Working Class': { 'words': [ 'working', 'class' ] },
'You\'re The Worst': { 'words': [ 'youre', 'the', 'worst' ] },
'Z Nation': { 'words': [ 'z', 'nation' ] },
'Zero Hour': { 'words': [ 'zero', 'hour' ] },
DATEFMT + ' - WWE Friday Night Smackdown.{ext}':
{ 'words': [ 'wwe', 'smackdown' ],
'extract': [r + '.*\.(?P<ext>(?:avi|mkv|mp4))$' for r in DATES] },
DATEFMT + ' - WWE Saturday Morning Slam.{ext}':
{ 'words': [ 'wwe', 'saturday', 'morning', 'slam' ],
'extract': [r + '.*\.(?P<ext>(?:avi|mkv|mp4))$' for r in DATES] },
DATEFMT + ' - WWE Monday Night Raw.{ext}': { 'words': [ 'wwe', 'raw' ],
'extract': [r + '.*\.(?P<ext>(?:avi|mkv|mp4))$' for r in DATES] },
DATEFMT + ' - WWE Main Event.{ext}': { 'words': [ 'wwe', 'main', 'event' ],
'extract': [r + '.*\.(?P<ext>(?:avi|mkv|mp4))$' for r in DATES] },
DATEFMT + ' - iMPACT Wrestling.{ext}': { 'words': [ 'impact', 'wrestling' ],
'extract': [r + '.*\.(?P<ext>(?:avi|mkv|mp4))$' for r in DATES] }
}
db.drop_all()
db.create_all()
s = db.session()
i = 0
for name,items in SimpleRenameList.items():
if 'extract' in items:
r = RegexModel(title=name,
extractDefault=False,
identifyDefault=True,
renameDefault=False,
# TODO: This field should be converted into MULTIPLE regexes ...
extract=items['extract'],
identify=','.join(items['words']),
rename=name)
else:
if 'words' in items:
identify = ','.join(items['words'])
else:
identify = ','.join([word.lower() for word in name.split(' ')])
r = RegexModel(title=name,
extractDefault=True,
identifyDefault=True,
renameDefault=True,
extract=None,
identify=identify,
rename=name)
f = FeedModel(title=name, regexes=[r])
s.add(f)
s.commit()
i += 1
sys.exit(0)
class RenameShow:
split = re.compile(r".*\b[s]?(\d{1,2})[ex]?(\d{2})\b.*(mp4|mkv|avi)$", re.IGNORECASE)
split2 = re.compile(r".*\b[s]?(\d{1,2})[ex]?(\d{2})e?(\d{2})\b.*(mp4|mkv|avi)$", re.IGNORECASE)
_PERCENT = 0
_FORMAT = 1
def __init__(self, identify, replacement, extract = None):
expr = '^.*' + '.*'.join(identify) + '.*$'
self.identify = re.compile(expr, re.IGNORECASE)
self.replacement = replacement
if extract is None:
self.extract = [self.split, self.split2]
self.rfmt = self._PERCENT
else:
if type(extract) is list:
self.extract = [re.compile('^.*' + e) for e in extract]
self.rfmt = self._FORMAT
else: # TODO: is re?
self.extract = re.compile(extract)
self.rfmt = self._PERCENT
def RenameIf(self, filename):
m = self.identify.match(filename)
if m:
if self.rfmt == self._PERCENT:
for e in self.extract:
m = e.search(filename)
if m:
if len(m.groups()) > 3:
return self.replacement + ' - S%02dE%02d-E%02d.%s' % tuple(self._normalizeExtracted(m.groups()))
else:
return self.replacement + ' - S%02dE%02d.%s' % tuple(self._normalizeExtracted(m.groups()))
print("echo [INT] Show known, but cannot detect season and episode: " + filename)
else:
for e in self.extract:
m = e.match(filename)
if m:
return self.replacement.format(**self._normalizeExtracted(m.groupdict()))
print("echo [EXT] Show known, but cannot detect season and episode: " + filename)
return None
def _normalizeExtracted(self, fields):
if type(fields) is dict:
f = {}
for g in fields.keys():
if fields[g].isdigit():
f[g] = int(fields[g])
else:
f[g] = fields[g]
else:
f = []
for g in fields:
if g.isdigit():
f.append(int(g))
else:
f.append(g)
return f
def __repr__(self):
if self.rfmt == self._FORMAT:
e = '[ '
c = ''
for p in self.extract:
e += '%s%s' % (c, p.pattern)
c = ',\n\t\t'
e += ' ]'
else:
e = self.extract.pattern
return "{\n\t'extract': %s,\n\t'identify': %s,\n\t'replacement': %s,\n\t'rfmt': %s\n}" % \
( e, self.identify.pattern, self.replacement, self.rfmt )
RenameList = []
# Create the simple rename regex's
for name,items in SimpleRenameList.items():
#exp = '^.*'
if 'last' not in items or items['last'] == False:
#for n in items['words']:
# exp += n + '.*'
#exp += '(?:.*20[01][0-9])?.*?(?!264|720p|187)s?(\d?\d)(?:e|x)?(\d\d).*(avi|mkv|mp4)$'
if 'extract' in items:
obj = RenameShow(items['words'], name, items['extract'])
else:
obj = RenameShow(items['words'], name)
RenameList.append(obj)
# Create the simple rename regex's that are required to be after the
# others
for name,items in SimpleRenameList.items():
if 'last' in items and items['last'] == True:
if 'extract' in items:
obj = RenameShow(items['words'], name, items['extract'])
else:
obj = RenameShow(items['words'], name)
RenameList.append(obj)
# Execute the renames
testing = len(sys.argv) > 1
for e in ('*.mp4', '*.avi', '*.mkv'):
for f in glob.glob(e):
found = False
for r in RenameList:
o = r.RenameIf(f)
if o:
found = True
if f != o:
if testing:
break
print('mv "%s" "%s"' % (f, o))
break
if not found:
if testing:
print('Unknown: {0}'.format(f))
else:
print('echo "Unknown: {0}"'.format(f))
TODO="""
Table Shows: id (unique), title (final name of converted show title, overridable)
Table Identify: show_id (from Shows), sequence, regex?, criteria
Table Extract: show_id, sequence
Table Rename: show_id, sequence
"""
| gpl-2.0 | 8,169,383,276,372,346,000 | 70.494937 | 141 | 0.344541 | false | 3.654782 | false | false | false |
closeio/tasktiger | tests/test_queue_size.py | 1 | 2787 | """Test max queue size limits."""
from multiprocessing import Process
import datetime
import os
import signal
import time
import pytest
from tasktiger import Task, Worker
from tasktiger.exceptions import QueueFullException
from .config import DELAY
from .tasks import decorated_task_max_queue_size, simple_task, sleep_task
from .test_base import BaseTestCase
from .utils import external_worker
class TestMaxQueue(BaseTestCase):
"""TaskTiger test max queue size."""
def test_task_simple_delay(self):
"""Test enforcing max queue size using delay function."""
self.tiger.delay(simple_task, queue='a', max_queue_size=1)
self._ensure_queues(queued={'a': 1})
# Queue size would be 2 so it should fail
with pytest.raises(QueueFullException):
self.tiger.delay(simple_task, queue='a', max_queue_size=1)
# Process first task and then queuing a second should succeed
Worker(self.tiger).run(once=True, force_once=True)
self.tiger.delay(simple_task, queue='a', max_queue_size=1)
self._ensure_queues(queued={'a': 1})
def test_task_decorated(self):
"""Test max queue size with decorator."""
decorated_task_max_queue_size.delay()
self._ensure_queues(queued={'default': 1})
with pytest.raises(QueueFullException):
decorated_task_max_queue_size.delay()
def test_task_all_states(self):
"""Test max queue size with tasks in all three states."""
# Active
task = Task(self.tiger, sleep_task, queue='a')
task.delay()
self._ensure_queues(queued={'a': 1})
# Start a worker and wait until it starts processing.
worker = Process(target=external_worker)
worker.start()
time.sleep(DELAY)
# Kill the worker while it's still processing the task.
os.kill(worker.pid, signal.SIGKILL)
self._ensure_queues(active={'a': 1})
# Scheduled
self.tiger.delay(
simple_task,
queue='a',
max_queue_size=3,
when=datetime.timedelta(seconds=10),
)
# Queued
self.tiger.delay(simple_task, queue='a', max_queue_size=3)
self._ensure_queues(
active={'a': 1}, queued={'a': 1}, scheduled={'a': 1}
)
# Should fail to queue task to run immediately
with pytest.raises(QueueFullException):
self.tiger.delay(simple_task, queue='a', max_queue_size=3)
# Should fail to queue task to run in the future
with pytest.raises(QueueFullException):
self.tiger.delay(
simple_task,
queue='a',
max_queue_size=3,
when=datetime.timedelta(seconds=10),
)
| mit | 4,320,719,270,570,761,000 | 29.966667 | 73 | 0.616792 | false | 3.930889 | true | false | false |
jessemyers/lxmlbind | lxmlbind/tests/test_personlist.py | 1 | 2797 | from copy import deepcopy
from nose.tools import assert_raises, eq_, ok_
from six import b
from lxmlbind.api import List, of, tag
from lxmlbind.tests.test_person import Person
@tag("person-list")
@of(Person)
class PersonList(List):
"""
Example using typed list.
"""
pass
def test_person_list():
"""
Verify list operations.
"""
person1 = Person()
person1.first = "John"
person2 = Person()
person2.first = "Jane"
# test append and __len__
person_list = PersonList()
eq_(len(person_list), 0)
person_list.append(person1)
eq_(len(person_list), 1)
eq_(person1._parent, person_list)
person_list.append(person2)
eq_(len(person_list), 2)
eq_(person2._parent, person_list)
eq_(person_list.to_xml(),
b("""<person-list><person type="object"><first>John</first></person><person type="object"><first>Jane</first></person></person-list>""")) # noqa
# test that append is preserving order
person1_copy = deepcopy(person1)
person2_copy = deepcopy(person2)
person_list_reverse = PersonList()
person_list_reverse.append(person2_copy)
person_list_reverse.append(person1_copy)
eq_(person_list[0], person_list_reverse[1])
eq_(person_list[1], person_list_reverse[0])
eq_(person_list_reverse.to_xml(),
b"""<person-list><person type="object"><first>Jane</first></person><person type="object"><first>John</first></person></person-list>""") # noqa
# test that append is preserving order
person1_copy = deepcopy(person1)
person2_copy = deepcopy(person2)
person_list_reverse = PersonList()
person_list_reverse.append(person2_copy)
person_list_reverse.append(person1_copy)
eq_(person_list[0], person_list_reverse[1])
eq_(person_list[1], person_list_reverse[0])
eq_(person_list_reverse.to_xml(),
b"""<person-list><person type="object"><first>Jane</first></person><person type="object"><first>John</first></person></person-list>""") # noqa
# test __getitem__
eq_(person_list[0].first, "John")
eq_(person_list[0]._parent, person_list)
eq_(person_list[1].first, "Jane")
eq_(person_list[0]._parent, person_list)
# test __iter__
eq_([person.first for person in person_list], ["John", "Jane"])
ok_(all([person._parent == person_list for person in person_list]))
# test __delitem__
with assert_raises(IndexError):
del person_list[2]
del person_list[1]
eq_(len(person_list), 1)
eq_(person_list.to_xml(),
b("""<person-list><person type="object"><first>John</first></person></person-list>"""))
# test __setitem__
person_list[0] = person2
eq_(person_list.to_xml(),
b("""<person-list><person type="object"><first>Jane</first></person></person-list>"""))
| apache-2.0 | 3,924,461,606,874,291,700 | 31.905882 | 153 | 0.638184 | false | 3.337709 | true | false | false |
consulo/consulo-python | plugin/src/main/dist/helpers/pydev/_pydevd_bundle/pydevd_dont_trace.py | 21 | 3780 | '''
Support for a tag that allows skipping over functions while debugging.
'''
import linecache
import re
from _pydevd_bundle.pydevd_constants import dict_contains
# To suppress tracing a method, add the tag @DontTrace
# to a comment either preceding or on the same line as
# the method definition
#
# E.g.:
# #@DontTrace
# def test1():
# pass
#
# ... or ...
#
# def test2(): #@DontTrace
# pass
DONT_TRACE_TAG = '@DontTrace'
# Regular expression to match a decorator (at the beginning
# of a line).
RE_DECORATOR = re.compile(r'^\s*@')
# Mapping from code object to bool.
# If the key exists, the value is the cached result of should_trace_hook
_filename_to_ignored_lines = {}
def default_should_trace_hook(frame, filename):
'''
Return True if this frame should be traced, False if tracing should be blocked.
'''
# First, check whether this code object has a cached value
ignored_lines = _filename_to_ignored_lines.get(filename)
if ignored_lines is None:
# Now, look up that line of code and check for a @DontTrace
# preceding or on the same line as the method.
# E.g.:
# #@DontTrace
# def test():
# pass
# ... or ...
# def test(): #@DontTrace
# pass
ignored_lines = {}
lines = linecache.getlines(filename)
i_line = 0 # Could use enumerate, but not there on all versions...
for line in lines:
j = line.find('#')
if j >= 0:
comment = line[j:]
if DONT_TRACE_TAG in comment:
ignored_lines[i_line] = 1
#Note: when it's found in the comment, mark it up and down for the decorator lines found.
k = i_line - 1
while k >= 0:
if RE_DECORATOR.match(lines[k]):
ignored_lines[k] = 1
k -= 1
else:
break
k = i_line + 1
while k <= len(lines):
if RE_DECORATOR.match(lines[k]):
ignored_lines[k] = 1
k += 1
else:
break
i_line += 1
_filename_to_ignored_lines[filename] = ignored_lines
func_line = frame.f_code.co_firstlineno - 1 # co_firstlineno is 1-based, so -1 is needed
return not (
dict_contains(ignored_lines, func_line - 1) or #-1 to get line before method
dict_contains(ignored_lines, func_line)) #method line
should_trace_hook = None
def clear_trace_filter_cache():
'''
Clear the trace filter cache.
Call this after reloading.
'''
global should_trace_hook
try:
# Need to temporarily disable a hook because otherwise
# _filename_to_ignored_lines.clear() will never complete.
old_hook = should_trace_hook
should_trace_hook = None
# Clear the linecache
linecache.clearcache()
_filename_to_ignored_lines.clear()
finally:
should_trace_hook = old_hook
def trace_filter(mode):
'''
Set the trace filter mode.
mode: Whether to enable the trace hook.
True: Trace filtering on (skipping methods tagged @DontTrace)
False: Trace filtering off (trace methods tagged @DontTrace)
None/default: Toggle trace filtering.
'''
global should_trace_hook
if mode is None:
mode = should_trace_hook is None
if mode:
should_trace_hook = default_should_trace_hook
else:
should_trace_hook = None
return mode
| apache-2.0 | -3,880,196,339,799,125,500 | 28.76378 | 109 | 0.549735 | false | 4.204672 | false | false | false |
JQGoh/jqlearning | Simulation/PeriodicBoundary/python/ising_bc3.py | 1 | 2584 | #! /usr/bin/env python3
import textwrap
import argparse
import numpy as np
import random
import math
import time
proginfo = textwrap.dedent('''\
This python script compares the efficiencies of different schemes
implementing the periodic boundary conditions for Ising model problem.
Author: JQ e-mail: gohjingqiang [at] gmail.com
Date: 29-10-2014
''')
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=proginfo)
parser.add_argument('-l', '--L', type=int, default=10,
help='L, the number of spin along the edges of a \
2D square lattice. Default (10)')
parser.add_argument('-n', '--num', type=int, default=100,
help='The total number of Monte Carlo sweeps. \
Default (100)')
args = parser.parse_args()
start = time.time()
# Initialize the system
L = args.L
print(L)
spin = np.ones((L, L)) # 2D square lattice, spin up
T = 300 # 300 K, for temperature
# Method 3, using if-else conditions
random.seed(10)
for k in range(args.num):
for i in range(L):
for j in range(L):
# Determine the displacement along i-dimension
if i == 0:
di1 = L - 1
di2 = 1
elif i == L - 1:
di1 = L - 2
di2 = 0
else:
di1 = i - 1
di2 = i + 1
# Determine the displacement along j-dimension
if j == 0:
dj1 = L - 1
dj2 = 1
elif j == L - 1:
dj1 = L - 2
dj2 = 0
else:
dj1 = j - 1
dj2 = j + 1
# eflip, the change in the energy of system if we flip the
# spin[i, j]. eflip depends on the configuration of 4 neighboring
# spins. For instance, with reference to spin[i, j], we should evaluate
# eflip based on spin[i+1, j], spin[i-1, j], spin[i, j+1], spin[i, j-1]
eflip = 2*spin[i, j]*(
spin[di1, j] + # -1 in i-dimension
spin[di2, j] + # +1 in i-dimension
spin[i, dj1] + # -1 in j-dimension
spin[i, dj2] # +1 in j-dimension
)
# Metropolis algorithm
if eflip <= 0.0:
spin[i, j] = -1.0*spin[i, j]
else:
if (random.random() < math.exp(-1.0*eflip/T)):
spin[i, j] = -1.0*spin[i, j]
end = time.time()
print(spin)
print(end - start)
| gpl-3.0 | -7,825,178,804,035,132,000 | 30.901235 | 83 | 0.51161 | false | 3.554333 | false | false | false |
dbiesecke/dbiesecke.github.io | repo/service.vpn.manager/api.py | 1 | 2885 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Zomboided
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# This module allows some limited interaction with the service via
# a set of commands
import xbmcaddon
import xbmcvfs
import string
from libs.common import setAPICommand, clearAPICommand, getAPICommand
from libs.utility import debugTrace, errorTrace, infoTrace, newPrint, getID
# Get the first argument which will indicate the connection that's being dealt with
command = sys.argv[1]
lcommand = command.lower()
debugTrace("Entered api.py with parameter " + command)
if not getID() == "":
if lcommand == "disconnect":
setAPICommand("Disconnect")
elif lcommand == "cycle":
setAPICommand("Cycle")
elif lcommand == "fake":
setAPICommand("Fake")
elif lcommand == "real":
setAPICommand("Real")
elif lcommand == "pause":
setAPICommand("Pause")
elif lcommand == "restart":
setAPICommand("Restart")
elif lcommand == "reconnect":
setAPICommand("Reconnect")
elif lcommand == "getip":
setAPICommand("GetIP")
elif lcommand.startswith("connect"):
connection = command[8:].strip(' \t\n\r')
if connection.isdigit():
c = int(connection)
addon = xbmcaddon.Addon(getID())
# Adjust the 11 below to change conn_max
if c > 0 and c < 11:
connection = addon.getSetting(str(c) + "_vpn_validated")
if not connection == "":
setAPICommand(connection)
else:
errorTrace("api.py", "Connection requested, " + str(c) + " has not been validated")
else:
errorTrace("api.py", "Invalid connection, " + str(c) + " requested")
else:
if xbmcvfs.exists(connection):
setAPICommand(connection)
else:
errorTrace("api.py", "Requested connection, " + connection + " does not exist")
else:
errorTrace("api.py", "Unrecognised command: " + command)
else:
errorTrace("api.py", "VPN service is not ready")
debugTrace("-- Exit api.py --")
| mit | 6,293,123,317,180,647,000 | 36.466667 | 103 | 0.614211 | false | 3.851802 | false | false | false |
grace1912-cmis/grace1912-cmis-cs2 | recursion.py | 1 | 1728 | #def countdown(n):
# if n<= 0:
# print "Blastoff!"
# else:
# print n
# countdown(n-1)
#def countup(n):
# if n == 10:
# print "Blastoff!"
# else:
# print n
# countup(n+1)
#def main():
# countdown(10)
# countup(-5)
#
#main()
#
#def count_up_from(start, stop):
# if start> stop:
# print "Blastoff!"
# else:
# print start
# count_up_from(start + 1,stop)
#def count_down_from(start, stop):
# if start< stop:
# print "Blastoff!"
# else:
# print start
# count_down_from(start - 1, stop)
#def main():
# count_up_from(-1,20)
# count_down_from(20, -1)
#
#main()
#
#
#Adder
#def adder(Number, total):
# Number = raw_input("Next number: ")
# if Number == '':
# print "The running total is: " + str(total) + "."
# else:
# total = total + float(Number)
# print "Running total: " + str(total) + "."
# return adder(Number, total)
#
#adder("Number", 0)
#
#Biggest
#def Biggest(Numbers):
# Numeral = raw_input("Next Number: ")
# if Numeral == '':
# print str(Numbers)
# else:
# if Numbers > float(Numeral):
# Biggest(Numbers)
# else:
# Biggest(float(Numeral))
#Biggest(-float("inf"))
#Smallest
#def Smallest(Numbers):
# Numeral = raw_input("Next Number: ")
# if Numeral == '':
# print str(Numbers)
# else:
# if Numbers < float(Numeral):
# Smallest(Numbers)
# else:
# Smallest(float(Numeral))
#
#Smallest(float("inf"))
#Power
def pow_(x,n):
if n == 0:
return 1
else:
return x * pow_(x, n-1)
def main():
print pow_(2,4)
main()
| cc0-1.0 | 1,118,823,374,739,786,500 | 18.2 | 58 | 0.506366 | false | 2.725552 | false | false | false |
adamretter/exist-nightly-build | generate-exist-dist-html-table.py | 1 | 4078 | #!/usr/bin/env python3
from os import walk
import os, re, sys, subprocess
from datetime import datetime
import argparse
###
## Generates a HTML table of eXist-db dist artifacts
###
tmp_dir="/tmp/exist-nightly-build/dist"
default_build_dir = tmp_dir + "/source"
default_output_dir = tmp_dir + "/target"
# parse command line arguments
parser = argparse.ArgumentParser(description="Generate an index.html table of nightly builds")
parser.add_argument("-b", "--git-branch", default="develop", dest="git_branch", help="The git branch to use")
parser.add_argument("-u", "--github-repo-url", default="https://github.com/eXist-db/exist", dest="github_repo_url", help="Public URL of the GitHub repo")
parser.add_argument("-d", "--build-dir", default=default_build_dir, dest="build_dir", help="The directory containing the eXist-db build")
parser.add_argument("-o", "--output-dir", default=default_output_dir, dest="output_dir", help="The directory containing the built eXist-db artifacts")
parser.add_argument("-f", "--file-name", default="table.html", dest="filename", help="The name for the generated HTML file")
args = parser.parse_args()
print(f"""Generating {args.output_dir}/{args.filename}...""")
# find all files
existFiles = []
for (dirpath, dirnames, filenames) in walk(args.output_dir):
for filename in filenames:
if "exist" in filename and "SNAPSHOT" in filename and ".sha256" not in filename:
existFiles.append(filename)
# get hashes
buildLabelPattern = re.compile("exist-(?:distribution|installer)-[0-9]+\.[0-9]+\.[0-9]+(?:-RC[\-0-9]+)?-SNAPSHOT(?:-(?:win|unix))?\+([0-9]{12,14})\.(?:jar|dmg|tar\.bz2|zip)")
buildLabels = set()
for name in existFiles:
groups = buildLabelPattern.match(name).groups()
buildLabels.add(groups[0])
# start writing table
f = open(args.output_dir + "/" + args.filename, "w")
f.write("""<div>
<table id="myTable" class="tablesorter">
<thead>
<tr>
<th>Date</th>
<th>Build Label</th>
<th>Git Hash</th>
<th>Downloads</th>
</tr>
</thead>
<tbody>
""")
# iterate over hashes
fileExtPattern = re.compile(".+\.(jar|dmg|tar\.bz2|zip)$")
labelPattern = re.compile("exist-(?:distribution|installer)-[0-9]+\.[0-9]+\.[0-9]+(?:-RC[\-0-9]+)?-SNAPSHOT(?:-(?:win|unix))?\+([0-9]{12,14})\.(?:jar|dmg|tar\.bz2|zip)$")
for buildLabel in buildLabels:
# group files per download
types = {};
recentDate = ""
for file in existFiles:
if buildLabel in file:
groups = fileExtPattern.match(file).groups()
types[groups[0]] = file
changeDate = datetime.strptime(buildLabel, "%Y%m%d%H%M%S").strftime("%Y-%m-%d")
if changeDate > recentDate:
recentDate = changeDate
gitBeforeDate = datetime.strptime(buildLabel, "%Y%m%d%H%M%S").strftime("%Y-%m-%d %H:%M:%S")
gitProcess = subprocess.run(["git", "rev-list", "-1", "--before=\"" + gitBeforeDate + "\"", args.git_branch], cwd=args.build_dir, stdout=subprocess.PIPE, encoding='utf-8', check=True)
gitHash = gitProcess.stdout.strip()[:7]
labelGroups = labelPattern.match(file).groups()
label = labelGroups[0]
f.write(f""" <tr>
<td>{changeDate}</td>
<td>{label}</td>
<td><a href="{args.github_repo_url}/commit/{gitHash}">{gitHash}</a></td>
<td>
<ul>
""")
for type in types.keys():
f.write(f""" <li><a href="{str(types.get(type))}">{type}</a> ({('%.1f' % (float(os.path.getsize(args.output_dir + "/" + types.get(type))) / (1024 * 1024)))} MB) <a href="{str(types.get(type))}.sha256">SHA256</a></li>
""")
print(f"""Added {str(types.get(type))}""")
f.write(f""" </ul>
</tr>
</tbody>
</table>
""")
f.write("""<script>$(function(){$("#myTable").tablesorter({sortList : [[0,1]]}); });</script>
</div>""")
f.close()
print("Done.")
| bsd-3-clause | -5,322,992,537,409,844,000 | 38.211538 | 231 | 0.587788 | false | 3.291364 | false | false | false |
porter-liu/sync-tot | sync-tot.py | 1 | 3839 | #!/usr/bin/python
# Author: Porter Liu
import re
import sys
import json
import string
import urllib2
import os.path
from poster.encode import MultipartParam
from poster.encode import multipart_encode
from poster.streaminghttp import register_openers
# http://foo.com/allbuilds/iOS-client.123/InHouse_123.ipa
configuration_smaple = {
"base_url" : "http://foo.com/allbuilds/",
"build_pattern" : "iOS-client.(\\d+)",
"build_path" : "iOS-client.{bn}/InHouse_{bn}.ipa",
"tot_url" : "http://bar.com/tot/",
}
def show_configuration_file_syntax():
print( configuration_smaple )
# we need one argument for configuration file
if len( sys.argv ) != 2:
print( 'Usage: ' + sys.argv[0] + ' configuration_file' )
exit( 1 )
# generate build number filename from configuration filename
# for instance: foo.json => foo.buildnumber.txt
temp1, temp2 = os.path.splitext( os.path.basename( sys.argv[1] ) )
buildnumber_filename = temp1 + '.buildnumber.txt';
# open & load configuration in JSON format
try:
configurationFile = open( sys.argv[1] )
try:
config = json.load( configurationFile )
except Exception, e:
print( e )
exit( 1 )
finally:
configurationFile.close()
except Exception, e:
print( e )
exit( 1 )
# verify configuration file
for key in configuration_smaple.keys():
if key not in config:
print( 'Failed to find "' + key + '" in ' + sys.argv[1] )
show_configuration_file_syntax()
exit( 1 )
#
# load the last processed build number
#
build_number = 0
if os.path.exists( buildnumber_filename ):
temp = open( buildnumber_filename, 'r' )
build_number = string.atoi( temp.read() )
temp.close()
print( 'old build number = ' + str( build_number ) )
#
# find out the latest build number
#
try:
remotefile = urllib2.urlopen( config['base_url'] )
data = remotefile.read()
remotefile.close()
except Exception, e:
print( 'failed to access "' + config['base_url'] + '", ' + str( e ) )
exit( 1 )
temp_build_number = build_number
pattern = config['build_pattern']
po = re.compile( pattern )
mo = po.findall( data )
if mo:
for item in mo:
n = string.atoi( item )
if n > temp_build_number:
temp_build_number = n
print( 'current max build number = ' + str( temp_build_number ) )
if temp_build_number <= build_number:
print( 'no new build' )
sys.exit( 0 )
else:
build_number = temp_build_number
print( 'will use ' + str( build_number ) + ' as build number' )
#
# generate package url and download
#
url = ( config['base_url'] + config['build_path'] ).format( bn = build_number )
print( 'package URL = ' + url )
package_filename = os.path.basename( url )
print( 'package filename = ' + package_filename )
data = None
try:
remotefile = urllib2.urlopen( url )
data = remotefile.read()
remotefile.close()
except Exception, e:
print( 'failed to access package URL, ' + str( e ) )
sys.exit( 1 )
try:
localFile = open( package_filename, 'wb' )
localFile.write( data )
localFile.close()
except Exception, e:
print( 'failed to create local file, ' + str( e ) )
sys.exit( 1 )
#
# upload package file onto TOT
#
register_openers()
#datagen, headers = multipart_encode( { 'file' : open( ipa_filename, 'rb' ), 'changelog' : build_name + '.' + str( build_number ), 'submit' : 'Submit' } )
ipa = MultipartParam.from_file( 'file', package_filename )
ipa.filetype = 'application/octet-stream'
changelog = MultipartParam( 'changelog', str( build_number ) )
submit = MultipartParam( 'submit', 'Submit' )
datagen, headers = multipart_encode( [ ipa, changelog, submit ] )
request = urllib2.Request( config['tot_url'] + '/upload.php', datagen, headers )
print urllib2.urlopen( request ).read()
# delete the package
os.remove( package_filename )
#
# save the current build number
#
temp = open( buildnumber_filename, 'w' )
temp.write( str( build_number ) )
temp.close()
| mit | -6,783,168,711,163,809,000 | 23.767742 | 154 | 0.67752 | false | 2.985226 | true | false | false |
kgao/MediaDrop | mediadrop/lib/tests/players_test.py | 1 | 1427 | # -*- coding: utf-8 -*-
# This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2014 MediaDrop contributors
# For the exact contribution history, see the git revision log.
# The source code in this file is dual licensed under the MIT license or
# the GPLv3 or (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
from mediadrop.lib.filetypes import VIDEO
from mediadrop.lib.players import FileSupportMixin, RTMP
from mediadrop.lib.test.pythonic_testcase import *
from mediadrop.lib.uri import StorageURI
from mediadrop.model import MediaFile
class FileSupportMixinTest(PythonicTestCase):
def test_can_play_ignores_empty_container(self):
class FakePlayer(FileSupportMixin):
supported_containers = set(['mp4'])
supported_schemes = set([RTMP])
fake_player = FakePlayer()
media_file = MediaFile()
media_file.container = ''
media_file.type = VIDEO
uri = StorageURI(media_file, 'rtmp', 'test',
server_uri='rtmp://stream.host.example/play')
assert_equals('', uri.file.container,
message='It is important that the server uri has no container.')
assert_equals((True, ), fake_player.can_play([uri]))
import unittest
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(FileSupportMixinTest))
return suite
| gpl-3.0 | -6,628,470,833,905,030,000 | 39.771429 | 76 | 0.702873 | false | 3.867209 | true | false | false |
orospakr/hedgehoghd | hedgehoghd/importer/tile.py | 1 | 4085 | # HedgehogHD - Vector Graphics Platform Game Engine
# Copyright (C) 2010 Andrew Clunis <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
class Tile(object):
'''16x16 Tile instance in a Chunk
These are never shared between chunks; they are used only once each.
Contains the parameters describing the Collision, graphics Block,
and flip properties of a given 16x16 tile in a Chunk. These are
not shared by any other Chunks.
64 of which exist in a Chunk, arranged in 8x8.
They contain:
* horizontal and vertical flip bits
* a reference to an artwork tile and collision block (through the
index) by Tile ID collision solidity control bits, for the primary
and alternate layers (aka, paths)
# SSTT YXII IIII IIII
'''
def __init__(self, chunk, tile_word):
self.chunk = chunk
self.alternate_collision_solidity = (tile_word & 0xC000) >> 14
self.normal_collision_solidity = (tile_word & 0x3000) >> 12
self.tile_index = tile_word & 0x3FF
self.y_flipped = (tile_word & 0x800) >> 11
self.x_flipped = (tile_word & 0x400) >> 10
if(self.alternate_collision_solidity > 3):
logging.error("Impossible alternate collision value in chunk?!: %d" % self.aternate_collision)
exit(-1)
if(self.normal_collision_solidity > 3):
logging.error("Impossible normal collision value in chunk?!: %d" % self.normal_collision)
exit(-1)
# reaching back through all the references is really kinda icky,
# should really make better encapsulation.
self.primary_collision = None
# if((self.tile_index >= len(self.chunk.chunk_array.primary_collision_index.ids)) or (self.tile_index >= len(self.chunk.chunk_array.secondary_collision_index.ids))):
# logging.warning("Tile index greater than length of collision index asked for. available: %d/%d, index: %d" % (len(self.chunk.chunk_array.primary_collision_index.ids), len(self.chunk.chunk_array.primary_collision_index.ids), self.tile_index))
# else:
primary_col_id = self.chunk.chunk_array.primary_collision_index.ids[self.tile_index]
# TODO ick, this breaks encapsulation a bit too much
self.primary_collision = self.chunk.chunk_array.sonic2.coll1.tiles[primary_col_id]
if(self.chunk.chunk_array.secondary_collision_index is not None):
secondary_col_id = self.chunk.chunk_array.secondary_collision_index.ids[self.tile_index]
self.secondary_collision = self.chunk.chunk_array.sonic2.coll1.tiles[secondary_col_id]
def toSVG(self, xml):
if(self.primary_collision is not None):
colour = "000000"
if(self.x_flipped and self.y_flipped):
colour = "ff00ff"
elif(self.y_flipped):
colour = "ff0000"
elif(self.x_flipped):
colour = "0000ff"
# transform="scale(%d, %d)" % (-1 if self.x_flipped else 1, -1 if self.y_flipped else 1),
with xml.g(transform="translate(%d, %d) scale(%d, %d)" % (16 if self.x_flipped else 0, 16 if self.y_flipped else 0, -1 if self.x_flipped else 1, -1 if self.y_flipped else 1),
style="stroke:#%s" % colour):
# with xml.rect(width="16", height="16", style="fill:none;stroke:#000000"):
# pass
self.primary_collision.toSVG(xml)
| gpl-3.0 | 1,766,584,052,791,250,700 | 48.817073 | 256 | 0.656304 | false | 3.595951 | false | false | false |
ZeroQI/Hama.bundle | Contents/Code/__init__.py | 1 | 14544 | # -*- coding: utf-8 -*-
#
# To Do
# - 'Debug' mode: logs per serie folder, need to use scanner logging
# - search word pick serie, do levenstein i partially match only (few chars difference)
### Imports ###
# Python Modules #
import re
import os
import datetime
# HAMA Modules #
import common # Functions: GetPlexLibraries, write_logs, UpdateMeta Variables: PlexRoot, FieldListMovies, FieldListSeries, FieldListEpisodes, DefaultPrefs, SourceList
from common import Dict
import AnimeLists # Functions: GetMetadata, GetAniDBTVDBMap, GetAniDBMovieSets Variables: AniDBMovieSets
import tvdb4 # Functions: GetMetadata Variables: None
import TheTVDBv2 # Functions: GetMetadata, Search Variables: None
import AniDB # Functions: GetMetadata, Search, GetAniDBTitlesDB Variables: None
import TheMovieDb # Functions: GetMetadata, Search Variables: None
import FanartTV # Functions: GetMetadata Variables: None
import Plex # Functions: GetMetadata Variables: None
import TVTunes # Functions: GetMetadata Variables: None
import OMDb # Functions: GetMetadata Variables: None
import MyAnimeList # Functions: GetMetadata Variables: None
import AniList # Functions: GetMetadata Variables: None
import Local # Functions: GetMetadata Variables: None
import anidb34 # Functions: AdjustMapping Variables: None
### Variables ###
### Pre-Defined ValidatePrefs function Values in "DefaultPrefs.json", accessible in Settings>Tab:Plex Media Server>Sidebar:Agents>Tab:Movies/TV Shows>Tab:HamaTV #######
def ValidatePrefs():
Log.Info("".ljust(157, '='))
Log.Info ("ValidatePrefs(), PlexRoot: "+Core.app_support_path)
#Reset to default agent setting
Prefs['reset_to_defaults'] #avoid logs message on first accesslike: 'Loaded preferences from DefaultPrefs.json' + 'Loaded the user preferences for com.plexapp.agents.lambda'
filename_xml = os.path.join(common.PlexRoot, 'Plug-in Support', 'Preferences', 'com.plexapp.agents.hama.xml')
filename_json = os.path.join(common.PlexRoot, 'Plug-ins', 'Hama.bundle', 'Contents', 'DefaultPrefs.json')
Log.Info ("[?] agent settings json file: '{}'".format(os.path.relpath(filename_json, common.PlexRoot)))
Log.Info ("[?] agent settings xml prefs: '{}'".format(os.path.relpath(filename_xml , common.PlexRoot)))
if Prefs['reset_to_defaults'] and os.path.isfile(filename_xml): os.remove(filename_xml) #delete filename_xml file to reset settings to default
PrefsFieldList = list(set(common.FieldListMovies + common.FieldListSeries + common.FieldListEpisodes + common.DefaultPrefs)) # set is un-ordered lsit so order is lost
filename = os.path.join(Core.app_support_path, 'Plug-ins', 'Hama.bundle', 'Contents', 'DefaultPrefs.json')
if os.path.isfile(filename):
try: json = JSON.ObjectFromString(Core.storage.load(filename), encoding=None) ### Load 'DefaultPrefs.json' to have access to default settings ###
except Exception as e: json = None; Log.Info("Error :"+str(e)+", filename: "+filename)
if json:
Log.Info ("Loaded: "+filename)
Pref_list={}
for entry in json: #Build Pref_list dict from json file
Pref_list[entry['id']]=entry #if key in Prefs gives: KeyError: "No preference named '0' found." so building dict
if entry['type']=='bool':
if entry['type']==1: Pref_list[entry['id']]['value'] = 'true'
else: Pref_list[entry['id']]['value'] = 'false'
for entry in Pref_list: # Check fields not in PrefsFieldList and sources mispelled
if entry not in PrefsFieldList: Log.Info("Next entry not in PrefsFieldList, so will not be updated by the engine")
elif entry not in common.DefaultPrefs: # Check for mispelled metadata sources
for source in Prefs[entry].replace('|', ',').split(','):
if source.strip() not in common.SourceList+('None', ''):
Log.Info(" - Source '{}' invalid".format(source.strip()))
Log.Info("Prefs[{key:<{width}}] = {value:<{width2}}{default}".format(key=entry, width=max(map(len, PrefsFieldList)), value=Prefs[entry] if Prefs[entry]!='' else "Error, go in agent settings, set value and save", width2=max(map(len, [Pref_list[x]['default'] for x in Pref_list])), default=' (still default value)' if Prefs[entry] == Pref_list[entry]['default'] else " (Default: "+Pref_list[entry]['default']+")"))
for entry in PrefsFieldList:
if entry not in Pref_list:
Log.Info("Prefs[{key:<{width}}] does not exist".format(key=entry, width=max(map(len, PrefsFieldList))))
#Plex Media Server\Plug-in Support\Preferences\com.plexapp.agents.hama.xml
Log.Info("".ljust(157, '='))
return MessageContainer('Success', "DefaultPrefs.json valid")
### Pre-Defined Start function ############################################################################################################################################
def Start():
Log.Info("".ljust(157, '='))
Log.Info("HTTP Anidb Metadata Agent by ZeroQI (Forked from Atomicstrawberry's v0.4, AnimeLists XMLs by SdudLee) - CPU: {}, OS: {}".format(Platform.CPU, Platform.OS))
#HTTP.CacheTime = CACHE_1DAY # in sec: CACHE_1MINUTE, CACHE_1HOUR, CACHE_1DAY, CACHE_1WEEK, CACHE_1MONTH
HTTP.CacheTime = CACHE_1MINUTE*30
ValidatePrefs()
common.GetPlexLibraries()
# Load core files
AnimeLists.GetAniDBTVDBMap()
AnimeLists.GetAniDBMovieSets()
AniDB.GetAniDBTitlesDB()
### Movie/Serie search ###################################################################################################################################################
def Search(results, media, lang, manual, movie):
from common import Log #Import here for startup logging to go to the plex pms log
orig_title = media.name if movie else media.show
Log.Open(media=media, movie=movie, search=True)
Log.Info('=== Search() ==='.ljust(157, '='))
Log.Info("title: '%s', name: '%s', filename: '%s', manual: '%s', year: '%s'" % (orig_title, media.name, media.filename, str(manual), media.year)) #if media.filename is not None: filename = String.Unquote(media.filename) #auto match only
Log.Info("start: {}".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S,%f")))
Log.Info("".ljust(157, '='))
if not orig_title: return
#clear-cache directive
if orig_title == "clear-cache":
HTTP.ClearCache()
results.Append(MetadataSearchResult(id='clear-cache', name='Plex web cache cleared', year=media.year, lang=lang, score=0))
return
### Check if a guid is specified "Show name [anidb-id]" ###
Log.Info('--- force id ---'.ljust(157, '-'))
if orig_title and orig_title.isdigit(): orig_title = "xxx [tvdb-{}]".format(orig_title) #Support tvdbid as title, allow to support Xattr from FileBot with tvdbid filled in
match = re.search(r"(?P<show>.*?) ?\[(?P<source>(anidb(|[2-9])|tvdb(|[2-9])|tmdb|tsdb|imdb))-(?P<guid>[^\[\]]*)\]", orig_title, re.IGNORECASE)
if match is not None:
guid=match.group('source') + '-' + match.group('guid')
if guid.startswith('anidb') and not movie and max(map(int, media.seasons.keys()))>1: Log.Info('[!] multiple seasons = tvdb numbering, BAKA!')
results.Append(MetadataSearchResult(id=guid, name=match.group('show')+" ["+guid+']', year=media.year, lang=lang, score=100))
Log.Info("Forced ID - source: {}, id: {}, title: '{}'".format(match.group('source'), match.group('guid'), match.group('show')))
else: #if media.year is not None: orig_title = orig_title + " (" + str(media.year) + ")" ### Year - if present (manual search or from scanner but not mine), include in title ###
Log.Info('--- source searches ---'.ljust(157, '-'))
maxi, n = 0, 0
if movie or max(map(int, media.seasons.keys()))<=1: maxi, n = AniDB.Search(results, media, lang, manual, movie)
if maxi<50 and movie: maxi = TheMovieDb.Search(results, media, lang, manual, movie)
if maxi<80 and not movie or n>1: maxi = max(TheTVDBv2.Search(results, media, lang, manual, movie), maxi)
Log.Info("".ljust(157, '='))
Log.Info("end: {}".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S,%f")))
Log.Close()
### Update Movie/Serie from metadata.id assigned #########################################################################################################################
def Update(metadata, media, lang, force, movie):
from common import Log #Import here for startup logging to go to the plex pms log
Log.Open(media=media, movie=movie, search=False)
source = metadata.id.split('-', 1)[0]
error_log = { 'AniDB summaries missing' :[], 'AniDB posters missing' :[], 'anime-list AniDBid missing':[], 'anime-list studio logos' :[],
'TVDB posters missing' :[], 'TVDB season posters missing':[], 'anime-list TVDBid missing' :[], 'Plex themes missing' :[],
'Missing Episodes' :[], 'Missing Specials' :[], 'Missing Episode Summaries' :[], 'Missing Special Summaries':[]}
Log.Info('=== Update() ==='.ljust(157, '='))
Log.Info("id: {}, title: {}, lang: {}, force: {}, movie: {}".format(metadata.id, metadata.title, lang, force, movie))
Log.Info("start: {}".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S,%f")))
# Major meta source hard required orders (ignoring id info):
# mappingList: AnimeLists->TheTVDBv2/tvdb4/AniDB->AdjustMapping
# mappingList['season_map']: AnimeLists->TheTVDBv2->AdjustMapping
# mappingList['relations_map']: AniDB->AdjustMapping
# mappingList['absolute_map']: tvdb4->TheTVDBv2->AniDB
dict_AnimeLists, AniDBid, TVDBid, TMDbid, IMDbid, mappingList = AnimeLists.GetMetadata(media, movie, error_log, metadata.id)
dict_tvdb4 = tvdb4.GetMetadata(media, movie, source, TVDBid, mappingList)
dict_TheTVDB, IMDbid = TheTVDBv2.GetMetadata(media, movie, error_log, lang, source, AniDBid, TVDBid, IMDbid, mappingList)
dict_AniDB, ANNid, MALid = AniDB.GetMetadata(media, movie, error_log, source, AniDBid, TVDBid, AnimeLists.AniDBMovieSets, mappingList)
dict_TheMovieDb, TSDbid, TMDbid, IMDbid = TheMovieDb.GetMetadata(media, movie, TVDBid, TMDbid, IMDbid)
dict_FanartTV = FanartTV.GetMetadata( movie, TVDBid, TMDbid, IMDbid)
dict_Plex = Plex.GetMetadata(metadata, error_log, TVDBid, Dict(dict_TheTVDB, 'title'))
dict_TVTunes = TVTunes.GetMetadata(metadata, Dict(dict_TheTVDB, 'title'), Dict(mappingList, AniDBid, 'name')) #Sources[m:eval('dict_'+m)]
dict_OMDb = OMDb.GetMetadata(movie, IMDbid) #TVDBid=='hentai'
dict_MyAnimeList = MyAnimeList.GetMetadata(MALid, "movie" if movie else "tvshow", media)
dict_AniList = AniList.GetMetadata(AniDBid, MALid)
dict_Local = Local.GetMetadata(media, movie)
if anidb34.AdjustMapping(source, mappingList, dict_AniDB, dict_TheTVDB, dict_FanartTV):
dict_AniDB, ANNid, MALid = AniDB.GetMetadata(media, movie, error_log, source, AniDBid, TVDBid, AnimeLists.AniDBMovieSets, mappingList)
Log.Info('=== Update() ==='.ljust(157, '='))
Log.Info("AniDBid: '{}', TVDBid: '{}', TMDbid: '{}', IMDbid: '{}', ANNid:'{}', MALid: '{}'".format(AniDBid, TVDBid, TMDbid, IMDbid, ANNid, MALid))
common.write_logs(media, movie, error_log, source, AniDBid, TVDBid)
common.UpdateMeta(metadata, media, movie, {'AnimeLists': dict_AnimeLists, 'AniDB': dict_AniDB, 'TheTVDB': dict_TheTVDB, 'TheMovieDb': dict_TheMovieDb,
'FanartTV': dict_FanartTV, 'tvdb4': dict_tvdb4, 'Plex': dict_Plex, 'TVTunes': dict_TVTunes,
'OMDb': dict_OMDb, 'Local': dict_Local, 'AniList': dict_AniList, 'MyAnimeList': dict_MyAnimeList}, mappingList)
Log.Info("end: {}".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S,%f")))
Log.Close()
### Agent declaration ##################################################################################################################################################
class HamaTVAgent(Agent.TV_Shows): # 'com.plexapp.agents.none', 'com.plexapp.agents.opensubtitles'
name, primary_provider, fallback_agent, contributes_to, accepts_from = 'HamaTV', True, False, None, ['com.plexapp.agents.localmedia']
languages = [Locale.Language.English, 'fr', 'zh', 'sv', 'no', 'da', 'fi', 'nl', 'de', 'it', 'es', 'pl', 'hu', 'el', 'tr', 'ru', 'he', 'ja', 'pt', 'cs', 'ko', 'sl', 'hr']
def search (self, results, media, lang, manual): Search (results, media, lang, manual, False)
def update (self, metadata, media, lang, force ): Update (metadata, media, lang, force, False)
class HamaMovieAgent(Agent.Movies):
name, primary_provider, fallback_agent, contributes_to, accepts_from = 'HamaMovies', True, False, None, ['com.plexapp.agents.localmedia']
languages = [Locale.Language.English, 'fr', 'zh', 'sv', 'no', 'da', 'fi', 'nl', 'de', 'it', 'es', 'pl', 'hu', 'el', 'tr', 'ru', 'he', 'ja', 'pt', 'cs', 'ko', 'sl', 'hr']
def search (self, results, media, lang, manual): Search (results, media, lang, manual, True)
def update (self, metadata, media, lang, force ): Update (metadata, media, lang, force, True)
| gpl-3.0 | -3,552,288,789,778,248,700 | 83.55814 | 420 | 0.580033 | false | 3.545588 | false | false | false |
radical-software/mongrey | mongrey/validators.py | 1 | 3493 | # -*- coding: utf-8 -*-
import re
from .exceptions import ValidationError
from . import utils
from .constants import _
EMAIL_REGEX = re.compile(
# dot-atom
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*"
# quoted-string
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016-\177])*"'
# domain (max length of an ICAAN TLD is 22 characters)
r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,253}[A-Z0-9])?\.)+[A-Z]{2,22}$', re.IGNORECASE
)
def clean_domain(value, field_name=None, error_class=ValidationError):
new_value = "user@%s" % value
if not EMAIL_REGEX.match(new_value):
message = _(u"Invalid Domain: %s") % value
raise error_class(message, field_name=field_name)
def clean_email(value, field_name=None, error_class=ValidationError):
if not EMAIL_REGEX.match(value):
message = _(u"Invalid Email: %s") % value
raise error_class(message, field_name=field_name)
def clean_username(value, field_name=None, error_class=ValidationError):
#TODO:
pass
def clean_email_or_username(value, field_name=None, error_class=ValidationError):
valid_email = False
valid_username = False
try:
clean_email(value, field_name, error_class)
valid_email = True
except:
pass
if not valid_email:
try:
clean_username(value, field_name, error_class)
valid_username = True
except:
pass
if not valid_email and not valid_username:
message = _(u"Invalid Username: %s") % value
raise error_class(message, field_name=field_name)
def clean_email_or_domain(value, field_name=None, error_class=ValidationError):
valid_email = False
valid_domain = False
try:
clean_email(value, field_name, error_class)
valid_email = True
except:
pass
if not valid_email:
try:
clean_domain(value, field_name, error_class)
valid_domain = True
except:
pass
if not valid_email and not valid_domain:
message = _(u"Invalid Email or Domain: %s") % value
raise error_class(message, field_name=field_name)
def clean_ip_address(value, field_name=None, error_class=ValidationError):
valid = utils.check_ipv4(value) or utils.check_ipv6(value)
if not valid:
message = _(u"Invalid IP Address: %s") % value
raise error_class(message, field_name=field_name)
def clean_ip_address_or_network(value, field_name=None, error_class=ValidationError):
valid = utils.check_ipv4(value) or utils.check_ipv6(value) or utils.check_is_network(value)
if not valid:
message = _(u"Invalid IP Address: %s") % value
raise error_class(message, field_name=field_name)
def clean_hostname(value, field_name=None, error_class=ValidationError):
valid = True
if value:
vals = value.split(".")
if value is None or len(value.strip()) == 0:
valid = False
elif len(value) > 255:
valid = False
elif len(vals) < 3:
valid = False
elif value.strip().lower() == "unknow":
valid = False
domain = ".".join(vals[(len(vals) - 2):(len(vals))])
if len(domain) > 63:
valid = False
if not valid:
message = _(u"Invalid Hostname: %s") % value
raise error_class(message, field_name=field_name)
| bsd-3-clause | 329,091,669,721,205,440 | 28.854701 | 95 | 0.590896 | false | 3.475622 | false | false | false |
lnls-fac/fieldmaptrack | fieldmaptrack/beam.py | 1 | 1160 | import math
import mathphys.constants as consts
import mathphys.units as units
import mathphys.constants as consts
class Beam:
def __init__(self, energy, current = 0):
self.energy = energy
self.current = current
self.brho, self.velocity, self.beta, self.gamma = Beam.calc_brho(self.energy)
@staticmethod
def calc_brho(energy):
electron_rest_energy_GeV = units.joule_2_eV * consts.electron_rest_energy / 1e9
gamma = energy/electron_rest_energy_GeV
beta = math.sqrt(((gamma-1.0)/gamma)*((gamma+1.0)/gamma))
velocity = consts.light_speed * beta
brho = beta * (energy * 1e9) / consts.light_speed
return brho, velocity, beta, gamma
def __str__(self):
r = ''
r += '{0:<10s} {1:f} GeV'.format('energy:', self.energy)
r += '\n{0:<10s} {1:f}'.format('gamma:', self.gamma)
r += '\n{0:<10s} 1 - {1:e}'.format('beta:', 1.0-self.beta)
r += '\n{0:<10s} {1:.0f} - {2:f} m/s'.format('velocity:', consts.light_speed, consts.light_speed - self.velocity)
r += '\n{0:<10s} {1:f} T.m'.format('brho:', self.brho)
return r
| mit | 8,841,157,816,990,691,000 | 37.666667 | 121 | 0.583621 | false | 2.997416 | false | false | false |
tensorflow/federated | tensorflow_federated/python/simulation/baselines/emnist/digit_recognition_tasks.py | 1 | 8364 | # Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Library for creating digit recognition tasks on EMNIST."""
import enum
from typing import Optional, Union
import tensorflow as tf
from tensorflow_federated.python.learning import keras_utils
from tensorflow_federated.python.learning import model
from tensorflow_federated.python.simulation.baselines import baseline_task
from tensorflow_federated.python.simulation.baselines import client_spec
from tensorflow_federated.python.simulation.baselines import task_data
from tensorflow_federated.python.simulation.baselines.emnist import emnist_models
from tensorflow_federated.python.simulation.baselines.emnist import emnist_preprocessing
from tensorflow_federated.python.simulation.datasets import client_data
from tensorflow_federated.python.simulation.datasets import emnist
class DigitRecognitionModel(enum.Enum):
"""Enum for EMNIST digit recognition models."""
CNN_DROPOUT = 'cnn_dropout'
CNN = 'cnn'
TWO_LAYER_DNN = '2nn'
_DIGIT_RECOGNITION_MODELS = [e.value for e in DigitRecognitionModel]
def _get_digit_recognition_model(model_id: Union[str, DigitRecognitionModel],
only_digits: bool) -> tf.keras.Model:
"""Constructs a `tf.keras.Model` for digit recognition."""
try:
model_enum = DigitRecognitionModel(model_id)
except ValueError:
raise ValueError('The model argument must be one of {}, found {}'.format(
model, _DIGIT_RECOGNITION_MODELS))
if model_enum == DigitRecognitionModel.CNN_DROPOUT:
keras_model = emnist_models.create_conv_dropout_model(
only_digits=only_digits)
elif model_enum == DigitRecognitionModel.CNN:
keras_model = emnist_models.create_original_fedavg_cnn_model(
only_digits=only_digits)
elif model_enum == DigitRecognitionModel.TWO_LAYER_DNN:
keras_model = emnist_models.create_two_hidden_layer_model(
only_digits=only_digits)
else:
raise ValueError('The model id must be one of {}, found {}'.format(
model_enum, _DIGIT_RECOGNITION_MODELS))
return keras_model
def create_digit_recognition_task_from_datasets(
train_client_spec: client_spec.ClientSpec,
eval_client_spec: Optional[client_spec.ClientSpec],
model_id: Union[str, DigitRecognitionModel], only_digits: bool,
train_data: client_data.ClientData,
test_data: client_data.ClientData) -> baseline_task.BaselineTask:
"""Creates a baseline task for digit recognition on EMNIST.
Args:
train_client_spec: A `tff.simulation.baselines.ClientSpec` specifying how to
preprocess train client data.
eval_client_spec: An optional `tff.simulation.baselines.ClientSpec`
specifying how to preprocess evaluation client data. If set to `None`, the
evaluation datasets will use a batch size of 64 with no extra
preprocessing.
model_id: A string identifier for a digit recognition model. Must be one of
'cnn_dropout', 'cnn', or '2nn'. These correspond respectively to a CNN
model with dropout, a CNN model with no dropout, and a densely connected
network with two hidden layers of width 200.
only_digits: A boolean indicating whether to use the full EMNIST-62 dataset
containing 62 alphanumeric classes (`True`) or the smaller EMNIST-10
dataset with only 10 numeric classes (`False`).
train_data: A `tff.simulation.datasets.ClientData` used for training.
test_data: A `tff.simulation.datasets.ClientData` used for testing.
Returns:
A `tff.simulation.baselines.BaselineTask`.
"""
emnist_task = 'digit_recognition'
if eval_client_spec is None:
eval_client_spec = client_spec.ClientSpec(
num_epochs=1, batch_size=64, shuffle_buffer_size=1)
train_preprocess_fn = emnist_preprocessing.create_preprocess_fn(
train_client_spec, emnist_task=emnist_task)
eval_preprocess_fn = emnist_preprocessing.create_preprocess_fn(
eval_client_spec, emnist_task=emnist_task)
task_datasets = task_data.BaselineTaskDatasets(
train_data=train_data,
test_data=test_data,
validation_data=None,
train_preprocess_fn=train_preprocess_fn,
eval_preprocess_fn=eval_preprocess_fn)
def model_fn() -> model.Model:
return keras_utils.from_keras_model(
keras_model=_get_digit_recognition_model(model_id, only_digits),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
input_spec=task_datasets.element_type_structure,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return baseline_task.BaselineTask(task_datasets, model_fn)
def create_digit_recognition_task(
train_client_spec: client_spec.ClientSpec,
eval_client_spec: Optional[client_spec.ClientSpec] = None,
model_id: Union[str, DigitRecognitionModel] = 'cnn_dropout',
only_digits: bool = False,
cache_dir: Optional[str] = None,
use_synthetic_data: bool = False) -> baseline_task.BaselineTask:
"""Creates a baseline task for digit recognition on EMNIST.
The goal of the task is to minimize the sparse categorical crossentropy
between the output labels of the model and the true label of the image. When
`only_digits = True`, there are 10 possible labels (the digits 0-9), while
when `only_digits = False`, there are 62 possible labels (both numbers and
letters).
This classification can be done using a number of different models, specified
using the `model_id` argument. Below we give a list of the different models
that can be used:
* `model_id = cnn_dropout`: A moderately sized convolutional network. Uses
two convolutional layers, a max pooling layer, and dropout, followed by two
dense layers.
* `model_id = cnn`: A moderately sized convolutional network, without any
dropout layers. Matches the architecture of the convolutional network used
by (McMahan et al., 2017) for the purposes of testing the FedAvg algorithm.
* `model_id = 2nn`: A densely connected network with 2 hidden layers, each
with 200 hidden units and ReLU activations.
Args:
train_client_spec: A `tff.simulation.baselines.ClientSpec` specifying how to
preprocess train client data.
eval_client_spec: An optional `tff.simulation.baselines.ClientSpec`
specifying how to preprocess evaluation client data. If set to `None`, the
evaluation datasets will use a batch size of 64 with no extra
preprocessing.
model_id: A string identifier for a digit recognition model. Must be one of
'cnn_dropout', 'cnn', or '2nn'. These correspond respectively to a CNN
model with dropout, a CNN model with no dropout, and a densely connected
network with two hidden layers of width 200.
only_digits: A boolean indicating whether to use the full EMNIST-62 dataset
containing 62 alphanumeric classes (`True`) or the smaller EMNIST-10
dataset with only 10 numeric classes (`False`).
cache_dir: An optional directory to cache the downloadeded datasets. If
`None`, they will be cached to `~/.tff/`.
use_synthetic_data: A boolean indicating whether to use synthetic EMNIST
data. This option should only be used for testing purposes, in order to
avoid downloading the entire EMNIST dataset.
Returns:
A `tff.simulation.baselines.BaselineTask`.
"""
if use_synthetic_data:
synthetic_data = emnist.get_synthetic()
emnist_train = synthetic_data
emnist_test = synthetic_data
else:
emnist_train, emnist_test = emnist.load_data(
only_digits=only_digits, cache_dir=cache_dir)
return create_digit_recognition_task_from_datasets(train_client_spec,
eval_client_spec, model_id,
only_digits, emnist_train,
emnist_test)
| apache-2.0 | 4,423,842,963,618,045,000 | 44.456522 | 88 | 0.724295 | false | 3.788043 | true | false | false |
litex-hub/pythondata-cpu-blackparrot | pythondata_cpu_blackparrot/system_verilog/bp_fe/test/tb/bp_fe_icache/trace_script.py | 1 | 1103 | #!/bin/usr/python
import sys, getopt
from trace_gen import TraceGen
def main():
tracer = TraceGen(39, 28, 32)
filepath = sys.argv[1]
filename = filepath + "test_load.tr"
file = open(filename, "w")
file.write(tracer.print_header())
file.write(tracer.print_comment("Load from address - 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60"))
for i in range(0, 64, 4):
temp_vaddr = (1 << 31) | i
temp_ptag = (1<<19)
file.write(tracer.send_load(temp_vaddr, temp_ptag, False))
for i in range(0, 64, 4):
file.write(tracer.recv_data(i))
file.write(tracer.test_done())
file.close()
filename = filepath + "test_uncached_load.tr"
file = open(filename, "w")
file.write(tracer.print_header())
file.write(tracer.print_comment("Uncached Load from address 36"))
temp_vaddr = (1 << 31) | 36
temp_ptag = (1 << 19)
file.write(tracer.send_load(temp_vaddr, temp_ptag, True))
file.write(tracer.recv_data(36))
file.write(tracer.test_done())
file.close()
if __name__ == "__main__":
main()
| bsd-3-clause | 1,709,599,234,293,871,900 | 23.068182 | 117 | 0.598368 | false | 2.925729 | false | false | false |
aroberge/reeborg-docs | src/python/reeborg_fr.py | 1 | 21637 | """Ce module contient les fonctions, classes et exceptions qui peuvent être
utilisées dans un programme Python pour le Monde de Reeborg.
"""
# When generating documentation using sphinx, these modules are both
# unavailable and not needed
try:
from browser import window
RUR = window.RUR
except ImportError:
from collections import defaultdict
window = defaultdict(str)
print("\n --> Skipping importing from browser for sphinx.\n")
# All functions from Javascript used below should have names of the form
# RUR._xyz_ and be defined in commands.js; functions and methods should appear
# in the same order as they appear in the English version.
def au_but(): #py:at_goal
"""Indique si Reeborg a atteint la position demandée.
Returns:
True si Reeborg a atteint son but, False autrement.
"""
return RUR._at_goal_()
def observer(expr): #py:add_watch
"""Ajoute une expression Python valide (donnée comme une chaîne)
à la liste des variables à observer.
"""
RUR.add_watch(expr)
def construit_un_mur(): #py:build_wall
"""Indique à Reeborg de construire un mur devant sa position."""
RUR._build_wall_()
def transporte(obj=None): #py:carries_object
""" Indique si Reeborg transporte un ou des objets.
Args:
obj: paramètre optionnel qui est le nom d'un objet sous forme de
chaîne de caractères.
Returns:
une liste d'objets retrouvés. Si Reeborg ne transporte aucun objet,
ou si un objet spécifié comme paramètre n'est pas présent,
le résultat est une liste vide.
Exemples possibles:
>>> transporte()
["jeton", "pomme"]
>>> transporte("jeton")
["jeton"]
>>> transporte("fraise")
[]
"""
if obj is not None:
ans = RUR._carries_object_(obj)
else:
ans = RUR._carries_object_()
return list(ans)
def efface_print(): #py:clear_print
"""Efface le texte précédemment écrit avec des fonctions print()."""
RUR._clear_print_()
def robot_par_defaut(): #py:default_robot
"""Retourne un robot recréé pour correspondre au robot par défaut."""
class Robot(RobotUsage):
def __init__(self):
self.body = RUR._default_robot_body_()
return Robot()
def dir_js(obj): #py:dir_js
"""Liste les attributs et méthodes d'un objet Javascript."""
RUR._dir_js_(obj)
def dir_py(obj): #py:dir_py
"""Lists attributes and methods of a Python object, excluding
those whose name start with a double underscore and are
considered to be private.
"""
attrs = []
for attr in dir(obj):
if attr.startswith("__"):
continue
if callable(getattr(obj, attr)):
attr += "()"
attrs.append(attr)
print_html(str("\n".join(attrs)).replace("&", "&").replace("<", "<"
).replace(">", ">").replace("\n", "<br>"))
def termine(): #py:done
"""Termine l'exécution d'un programme."""
RUR._done_()
def rien_devant(): #py:front_is_clear
"""Indique si un obstacle (mur, clôture, eau, mur de brique, etc.)
bloque le chemin.
Returns:
True si le chemin est non bloqué, False autrement."""
return RUR._front_is_clear_()
def dans_le_sac(): #py:in_the_bag
return dict(RUR._in_the_bag_())
def est_face_au_nord(): #py:is_facing_north
"""Indique si Reeborg fait face au nord (haut de l'écran) ou non."""
return RUR._is_facing_north_()
def avance(): #py:move
"""Avance d'une case"""
RUR._move_()
def pas_de_surlignement(): #py:no_highlight
"""Empêche le surlignement de lignes de code d'être effectué.
Pour véritablement éliminer tout effet lié au surlignement de
lignes de code, il peut être nécessaire d'exécuter un programme
à deux reprises."""
RUR._no_highlight_()
def objet_ici(obj=None): #py:object_here
""" Indique si un ou des types d'objets se trouvent à la position du robot.
Args:
obj: paramètre optionnel qui est le nom d'un objet sous forme de
chaîne de caractères.
Returns:
une liste d'objets retrouvés. Si aucun objet n'est présent
ou si un objet spécifié comme paramètre n'est pas présent,
le résultat est une liste vide.
Exemples possibles:
>>> objet_ici()
["jeton", "pomme"]
>>> objet_ici("jeton")
["jeton"]
>>> objet_ici("fraise")
[]
"""
if obj is not None:
ans = RUR._object_here_(obj)
else:
ans = RUR._object_here_()
return list(ans) # convert from js list-like object to proper Python list
def pause(ms=None): #py:pause
"""Pause l'éxecution du programme à l'écran.
Si un argument (temps, en millisecondes) est fourni, l'exécution
redémarre automatiquement après que ce temps ait été écoulé.
"""
if ms is None:
RUR._pause_()
else:
RUR._pause_(ms)
def depose(obj=None): #py:put
"""Dépose un objet. Si Reeborg transporte plus d'un type d'objet,
on doit spécifier lequel sinon ceci causera une exception."""
if obj is None:
RUR._put_()
else:
RUR._put_(obj)
def enregistrement(bool): #py:recording
"""Arrête ou redémarre les enregistrement d'actions de Reeborg.
Args:
bool: True si on veut avoir des enregistrement, False autrement
"""
RUR._recording_(bool)
def plus_de_robots(): #py:remove_robots
"""Élimine tous les robots existants"""
RUR._remove_robots_()
def rien_a_droite(): #py:right_is_clear
"""Indique si un obstacle (mur, clôture, eau, mur de brique, etc.)
se trouve à la droite immédiate de Reeborg.
Returns:
True si un obstacle est à la droite, False autrement."""
return RUR._right_is_clear_()
def couleur_de_trace(couleur): #py:set_trace_color
"""Change la couleur de trace du robot.
Args:
couleur: quatre formats sont possibles soit les noms de
couleur du web (en anglais), les formats rgb et rgba,
et la notation hexadécimale.
Exemples possibles::
>>> couleur_de_trace("red") # nom de couleur en anglais
>>> couleur_de_trace("rgb(125, 0, 0)")
>>> couleur_de_trace("rgba(125, 0, 0, 0.5)")
>>> couleur_de_trace("#FF00FF")
"""
RUR._set_trace_color_(couleur)
def style_de_trace(style="normal"): #py:set_trace_style
"""Change le style de trace du robot.
Args:
style: "épais" ou "epais" (sans accent) pour une trace
plus visible, "invisible" pour une trace invisible(!),
"normal" ou ne pas spécifier d'argument pour avoir
le style normal.
Le choix "invisible" est équivalent à
couleur_de_trace("rgba(0, 0, 0, 0)") c'est-à-dire
une couleur complètement transparente.
La trace plus épaisse est centrée et ne permet pas
de voir qu'un virage à droite est constitué de trois
virages à gauche, ni de distinguer les aller-retours.
"""
if style in ["épais", "epais"]:
style = "thick"
elif style == "normal":
style = "default"
elif style == "invisible":
pass # leave as is
else:
raise ReeborgError("Valeur de style inconnue pour style_de_trace().")
RUR._set_trace_style_(style)
def son(bool): #py:sound
"""Active ou désactive les effets sonores."""
RUR._sound_(bool)
def prend(obj=None): #py:take
"""Prend un objet. Si plus d'un type d'objet se trouve à l'endroit où
Reeborg est, on doit spécifier lequel sinon ceci causera une exception.
"""
if obj is None:
RUR._take_()
else:
RUR._take_(obj)
def pense(ms): #py:think
"""Fixe un délai entre les actions de Reeborg à l'écran."""
RUR._think_(ms)
def tourne_a_gauche(): #py:turn_left
"""Reeborg tourne à sa gauche."""
RUR._turn_left_()
def voir_source_js(fn): #py:view_source_js
"""Affiche le code source d'une fonction Javascript."""
RUR._view_source_js_(fn)
def mur_devant(): #py:wall_in_front
"""Indique si un mur bloque le chemin.
Returns:
True si un mur est devant, False autrement."""
return RUR._wall_in_front_()
def mur_a_droite(): #py:wall_on_right
"""Indique si un mur se trouve immédiatement à la droite de Reeborg.
Returns:
True si un mur est à la droite, False autrement."""
return RUR._wall_on_right_()
def Monde(url, nom=None): #py:World
"""Permet de sélectioner un monde donné à l'intérieur d'un programme.
Si le monde présentement utilisé est différent, le résultat de
l'exécution de cette instruction fera en sorte que le monde spécifié
par le paramètre `url` sera choisi sans que le reste du programme
ne soit déjà exécuté. Si le monde spécifié est déjà le monde
choisi, la fonction `Monde(...)` est ignorée et le reste
du programme est exécuté.
Le monde spécifié sera ajouté au sélecteur s'il n'est pas
déjà présent.
Args:
url: deux choix possibles, soit un nom apparaissant dans le
sélecteur de monde, ou un lien à un document accessible
via Internet.
nom: paramètre optionnel; si ce paramètre est choisi, le nom
apparaissant dans le sélecteur sera nom.
Exemples:
>>> Monde("But 1") # monde inclus par défaut
>>> Monde("http://reeborg.ca/mon_monde") # exemple fictif
# le nom http://reeborg.ca/mon_monde sera ajouté au sélecteur
>>> Monde("http://reeborg.ca/mon_monde", "Bonjour")
# le nom Bonjour sera ajouté au sélecteur pour indiquer ce monde.
"""
if nom is None:
RUR.file_io.load_world_from_program(url)
else:
RUR.file_io.load_world_from_program(url, nom)
def max_nb_instructions(nb): #py:set_max_nb_instructions
"""Surtout destiné aux créateurs de mondes,
ceci permet de changer le nombre maximal d'instructions
exécutées par un robot.
"""
RUR._set_max_steps_(nb)
def max_nb_robots(nb): #py:set_max_nb_robots
"""Surtout destiné aux créateurs de mondes,
ceci permet de limiter le nombre de robots
permis dans un monde donné.
"""
RUR._set_max_nb_robots_(nb)
def print_html(html, append=False): #py:print_html
"""Surtout destiné aux créateurs de monde, la fonction print_html() est
semblable à print() sauf qu'elle accepte du texte html.
"""
RUR.output.print_html(html, append)
window['print_html'] = print_html
def nouvelles_images_de_robot(images): #py:new_robot_images
"""Surtout destiné aux créateurs de mondes, ceci permet de remplacer
les images utilisées pour le robot par d'autres images.
Une explication plus détaillée viendra.
"""
if "est" in images:
images["east"] = images["est"]
if "ouest" in images:
images["west"] = images["ouest"]
if "nord" in images:
images["north"] = images["nord"]
if "sud" in images:
images["south"] = images["sud"]
RUR._new_robot_images_(images)
def MenuPersonalise(contenu): #py:MakeCustomMenu
"""À l'intention des éducateurs. Permet de créer des menus de monde
personalisés. Voir la documentation pour plus de détails."""
RUR.custom_menu.make(contenu)
class RobotUsage(object): #py:UR
def __init__(self, x=1, y=1, orientation='est', jeton=None): #py:UR.__init__
"""Créé un robot usagé.
Args:
x: coordonnée horizontale; un entier supérieur ou égal à 1
y: coordonnée vertical; un entier supérieur ou égal à 1
orientation: une des valeurs suivante: "nord", "sud",
est", "ouest"
jeton: nombre initial de jetons à donner au robot;
un entier positif, ou la chaîne "inf" pour un
nombre infini.
"""
if jeton is None:
robot = RUR.robot.create_robot(x, y, orientation)
else:
robot = RUR.robot.create_robot(x, y, orientation, jeton)
self.body = robot
RUR.world.add_robot(self.body)
def __str__(self): #py:UR.__str__
location = "({}, {})".format(self.body.x, self.body.y)
if self.body._orientation == RUR.EAST:
facing = "est face à l'est"
elif self.body._orientation == RUR.WEST:
facing = "est face à l'ouest"
elif self.body._orientation == RUR.NORTH:
facing = "est face au nord"
elif self.body._orientation == RUR.SOUTH:
facing = "est face au sud"
if 'token' in self.body.objects:
if self.body.objects['token'] == 'inf':
carries = "transporte un nombre infini de jetons."
else:
carries = 'transporte %s jetons' % self.body.objects['token']
else:
carries = 'ne transporte pas de jetons'
return "RobotUsage situé en {} {} {}.".format(location, facing, carries) # NOQA
def avance(self): #py:UR.move
"""avance d'une case"""
RUR.control.move(self.body)
def au_but(self): #py:UR.at_goal
"""Indique si Reeborg a atteint la position demandée.
Returns:
True si Reeborg a atteint son but.
"""
return RUR.control.at_goal(self.body)
def construit_un_mur(self): #py:UR.build_wall
"""Indique à Reeborg de construire un mur devant sa position."""
RUR.control.build_wall(self.body)
def rien_devant(self): #py:UR.front_is_clear
"""Indique si un obstacle (mur, clôture, eau, mur de brique, ) bloque
le chemin.
Returns:
True si le chemin est non bloqué, False autrement."""
return RUR.control.front_is_clear(self.body)
def mur_devant(self): #py:UR.wall_in_front
"""Indique si un mur bloque le chemin.
Returns:
True si un mur est devant, False autrement."""
return RUR.control.wall_in_front(self.body)
def rien_a_droite(self): #py:UR.right_is_clear
"""Indique si un obstacle (mur, clôture, eau, mur de brique, etc.)
se trouve à la droite immédiate de Reeborg.
Returns:
True si un obstacle est à la droite, False autrement."""
return RUR.control.right_is_clear(self.body)
def mur_a_droite(self): #py:UR.wall_on_right
"""Indique si un mur se trouve immédiatement à la droite de Reeborg.
Returns:
True si un mur est à la droite, False autrement."""
return RUR.control.wall_on_right(self.body)
def dans_le_sac(self): #py:UR.in_the_bag
return dict(RUR._in_the_bag_(self.body))
def est_face_au_nord(self): #py:UR.is_facing_north
"""Indique si Reeborg fait face au nord (haut de l'écran) ou non."""
return RUR.control.is_facing_north(self.body)
def depose(self, obj=None): #py:UR.put
"""Dépose un objet. Si Reeborg transporte plus d'un type d'objet,
on doit spécifier lequel sinon ceci causera une exception."""
if obj is None:
RUR.control.put(self.body)
else:
RUR.control.put(self.body, obj)
def prend(self, obj=None): #py:UR.take
"""Prend un objet. Si plus d'un type d'objet se trouve à l'endroit où
Reeborg est, on doit spécifier lequel sinon ceci causera une
exception.
"""
if obj is None:
RUR.control.take(self.body)
else:
RUR.control.take(self.body, obj)
def objet_ici(self, obj=None): #py:UR.object_here
""" Indique si un ou des types d'objets se trouvent à la position du robot.
Args:
obj: paramètre optionnel qui est le nom d'un objet sous forme de
chaîne de caractères.
Returns:
une liste d'objets retrouvés. Si aucun objet n'est présent
ou si un objet spécifié comme paramètre n'est pas présent,
le résultat est une liste vide.
Exemples possibles:
>>> reeborg = RobotUsage()
>>> reeborg.objet_ici()
["jeton", "pomme"]
>>> reeborg.objet_ici("jeton")
["jeton"]
>>> reeborg.objet_ici("fraise")
[]
"""
if obj is not None:
return list(RUR.control.object_here(self.body, obj))
else:
return list(RUR.control.object_here(self.body))
def transporte(self, obj=None): #py:UR.carries_object
""" Indique si Reeborg transporte un ou des objets.
Args:
obj: paramètre optionnel qui est le nom d'un objet sous forme de
chaîne de caractères.
Returns:
une liste d'objets retrouvés. Si Reeborg ne transporte
aucun objet, ou si un objet spécifié comme paramètre n'est pas
présent, le résultat est une liste vide.
Exemples possibles:
>>> reeborg = RobotUsage()
>>> reeborg.transporte()
["jeton", "pomme"]
>>> reeborg.transporte("jeton")
["jeton"]
>>> reeborg.transporte("fraise")
[]
"""
if obj is not None:
return list(RUR.control.carries_object(self.body, obj))
else:
return list(RUR.control.carries_object(self.body))
def tourne_a_gauche(self): #py:UR.turn_left
RUR.control.turn_left(self.body)
def modele(self, modele): #py:UR.set_model
"""Permet de choisir le modèle du robot.
Args:
modele: un nombre de 0 à 3.
"""
RUR.control.set_model(self.body, modele)
def couleur_de_trace(self, couleur): #py:UR.set_trace_color
"""Change la couleur de trace du robot.
Args:
couleur: quatre formats sont possibles soit les noms de
couleur du web (en anglais), les formats rgb et rgba,
et la notation hexadécimale.
Exemples possibles::
>>> reeborg = RobotUsage()
>>> reeborg.couleur_de_trace("red") # nom anglais de couleur
>>> reeborg.couleur_de_trace("rgb(125, 0, 0)")
>>> reeborg.couleur_de_trace("rgba(125, 0, 0, 0.5)")
>>> reeborg.couleur_de_trace("#FF00FF")
"""
RUR.control.set_trace_color(self.body, couleur)
def style_de_trace(self, style): #py:UR.set_trace_style
"""Change le style de trace du robot.
Args:
style: "épais" ou "epais" (sans accent) pour une trace
plus visible, "invisible" pour une trace invisible(!),
"normal" ou ne pas spécifier d'argument pour avoir
le style normal.
La trace plus épaisse est centrée et ne permet pas
de voir qu'un virage à droite est constitué de trois
virages à gauche, ni de distinguer les aller-retours.
"""
if style in ["épais", "epais"]:
style = "thick"
elif style == "invisible":
pass
elif style == "normal":
style = "default"
else:
raise ReeborgError("Valeur de style inconnue pour style_de_trace().") # NOQA
RUR.control.set_trace_style(self.body, style)
class ReeborgError(Exception): #py:RE
"""Exceptions spécifique au monde de Reeborg.
Exemples possible::
def termine(): #py:
message = "Vous ne devez pas utiliser termine()."
raise ReeborgError(message)
#---- ou ------
try:
avance()
except ReeborgError: # ignore le mur qui bloquait le chemin
tourne_a_gauche()
"""
def __init__(self, message): #py:RE.__init__
self.reeborg_shouts = message
def __str__(self): #py:RE.__str__
return repr(self.reeborg_shouts)
try:
window['ReeborgError'] = ReeborgError
except:
pass
class WallCollisionError(ReeborgError): #py:WCE
"""Exception spécifique au monde de Reeborg.
A lieu lorsque Reeborg frappe un mur
"""
pass
try:
window['WallCollisionError'] = WallCollisionError
except:
pass
class InfoSatellite(): #py:SI
@property
def carte_du_monde(self): #py:SI.world_map
"""retourne un dict qui contient l'information au
sujet du monde.
"""
import json
return json.loads(RUR.control.get_world_map())
def imprime_carte(self): #py:SI.print_world_map
"""imprime une copie formattée de la carte"""
print(RUR.control.get_world_map())
try:
RUR.reeborg_loaded = True
window.console.log("reeborg loaded")
except:
pass
#py:obsolete
# Do not tranlate the following
def nombre_d_instructions(nb):
raise ReeborgError(
"nombre_d_instructions() a été remplacé par max_nb_instructions().")
def face_au_nord():
# obsolete
raise ReeborgError("face_au_nord() est désuet;" +
" utilisez est_face_au_nord()")
| cc0-1.0 | 2,362,399,738,499,656,700 | 30.866071 | 89 | 0.594004 | false | 3.121574 | false | false | false |
houseind/robothon | GlyphProofer/dist/GlyphProofer.app/Contents/Resources/lib/python2.6/numpy/distutils/mingw32ccompiler.py | 2 | 8852 | """
Support code for building Python extensions on Windows.
# NT stuff
# 1. Make sure libpython<version>.a exists for gcc. If not, build it.
# 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
# 3. Force windows to use g77
"""
import os
import sys
import log
# Overwrite certain distutils.ccompiler functions:
import numpy.distutils.ccompiler
# NT stuff
# 1. Make sure libpython<version>.a exists for gcc. If not, build it.
# 2. Force windows to use gcc (we're struggling with MSVC and g77 support)
# --> this is done in numpy/distutils/ccompiler.py
# 3. Force windows to use g77
import distutils.cygwinccompiler
from distutils.version import StrictVersion
from numpy.distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.errors import DistutilsExecError, CompileError, UnknownFileError
from distutils.unixccompiler import UnixCCompiler
from numpy.distutils.misc_util import msvc_runtime_library
# the same as cygwin plus some additional parameters
class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler):
""" A modified MingW32 compiler compatible with an MSVC built Python.
"""
compiler_type = 'mingw32'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
distutils.cygwinccompiler.CygwinCCompiler.__init__ (self,
verbose,dry_run, force)
# we need to support 3.2 which doesn't match the standard
# get_versions methods regex
if self.gcc_version is None:
import re
out = os.popen('gcc -dumpversion','r')
out_string = out.read()
out.close()
result = re.search('(\d+\.\d+)',out_string)
if result:
self.gcc_version = StrictVersion(result.group(1))
# A real mingw32 doesn't need to specify a different entry point,
# but cygwin 2.91.57 in no-cygwin-mode needs it.
if self.gcc_version <= "2.91.57":
entry_point = '--entry _DllMain@12'
else:
entry_point = ''
if self.linker_dll == 'dllwrap':
# Commented out '--driver-name g++' part that fixes weird
# g++.exe: g++: No such file or directory
# error (mingw 1.0 in Enthon24 tree, gcc-3.4.5).
# If the --driver-name part is required for some environment
# then make the inclusion of this part specific to that environment.
self.linker = 'dllwrap' # --driver-name g++'
elif self.linker_dll == 'gcc':
self.linker = 'g++'
# **changes: eric jones 4/11/01
# 1. Check for import library on Windows. Build if it doesn't exist.
build_import_library()
# **changes: eric jones 4/11/01
# 2. increased optimization and turned off all warnings
# 3. also added --driver-name g++
#self.set_executables(compiler='gcc -mno-cygwin -O2 -w',
# compiler_so='gcc -mno-cygwin -mdll -O2 -w',
# linker_exe='gcc -mno-cygwin',
# linker_so='%s --driver-name g++ -mno-cygwin -mdll -static %s'
# % (self.linker, entry_point))
if self.gcc_version <= "3.0.0":
self.set_executables(compiler='gcc -mno-cygwin -O2 -w',
compiler_so='gcc -mno-cygwin -mdll -O2 -w -Wstrict-prototypes',
linker_exe='g++ -mno-cygwin',
linker_so='%s -mno-cygwin -mdll -static %s'
% (self.linker, entry_point))
else:
self.set_executables(compiler='gcc -mno-cygwin -O2 -Wall',
compiler_so='gcc -mno-cygwin -O2 -Wall -Wstrict-prototypes',
linker_exe='g++ -mno-cygwin',
linker_so='g++ -mno-cygwin -shared')
# added for python2.3 support
# we can't pass it through set_executables because pre 2.2 would fail
self.compiler_cxx = ['g++']
# Maybe we should also append -mthreads, but then the finished
# dlls need another dll (mingwm10.dll see Mingw32 docs)
# (-mthreads: Support thread-safe exception handling on `Mingw32')
# no additional libraries needed
#self.dll_libraries=[]
return
# __init__ ()
def link(self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
export_symbols = None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# Include the appropiate MSVC runtime library if Python was built
# with MSVC >= 7.0 (MinGW standard is msvcrt)
runtime_library = msvc_runtime_library()
if runtime_library:
if not libraries:
libraries = []
libraries.append(runtime_library)
args = (self,
target_desc,
objects,
output_filename,
output_dir,
libraries,
library_dirs,
runtime_library_dirs,
None, #export_symbols, we do this in our def-file
debug,
extra_preargs,
extra_postargs,
build_temp,
target_lang)
if self.gcc_version < "3.0.0":
func = distutils.cygwinccompiler.CygwinCCompiler.link
else:
func = UnixCCompiler.link
func(*args[:func.im_func.func_code.co_argcount])
return
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
# added these lines to strip off windows drive letters
# without it, .o files are placed next to .c files
# instead of the build directory
drv,base = os.path.splitdrive(base)
if drv:
base = base[1:]
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.res' or ext == '.rc':
# these need to be compiled to object files
obj_names.append (os.path.join (output_dir,
base + ext + self.obj_extension))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
def build_import_library():
""" Build the import libraries for Mingw32-gcc on Windows
"""
if os.name != 'nt':
return
lib_name = "python%d%d.lib" % tuple(sys.version_info[:2])
lib_file = os.path.join(sys.prefix,'libs',lib_name)
out_name = "libpython%d%d.a" % tuple(sys.version_info[:2])
out_file = os.path.join(sys.prefix,'libs',out_name)
if not os.path.isfile(lib_file):
log.warn('Cannot build import library: "%s" not found' % (lib_file))
return
if os.path.isfile(out_file):
log.debug('Skip building import library: "%s" exists' % (out_file))
return
log.info('Building import library: "%s"' % (out_file))
from numpy.distutils import lib2def
def_name = "python%d%d.def" % tuple(sys.version_info[:2])
def_file = os.path.join(sys.prefix,'libs',def_name)
nm_cmd = '%s %s' % (lib2def.DEFAULT_NM, lib_file)
nm_output = lib2def.getnm(nm_cmd)
dlist, flist = lib2def.parse_nm(nm_output)
lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, open(def_file, 'w'))
dll_name = "python%d%d.dll" % tuple(sys.version_info[:2])
args = (dll_name,def_file,out_file)
cmd = 'dlltool --dllname %s --def %s --output-lib %s' % args
status = os.system(cmd)
# for now, fail silently
if status:
log.warn('Failed to build import library for gcc. Linking will fail.')
#if not success:
# msg = "Couldn't find import library, and failed to build it."
# raise DistutilsPlatformError, msg
return
| mit | -2,947,776,504,283,242,000 | 37.995595 | 96 | 0.549932 | false | 3.939475 | false | false | false |
PHOTOX/fuase | ase/ase/io/netcdftrajectory.py | 2 | 22214 | """
netcdftrajectory - I/O trajectory files in the AMBER NetCDF convention
More information on the AMBER NetCDF conventions can be found at
http://ambermd.org/netcdf/. This module supports extensions to
these conventions, such as writing of additional fields and writing to
HDF5 (NetCDF-4) files.
A Python NetCDF module is required. Supported are
netCDF4-python - http://code.google.com/p/netcdf4-python/
scipy.io.netcdf - http://docs.scipy.org/doc/scipy/reference/io.html
pupynere - https://bitbucket.org/robertodealmeida/pupynere/
Availability is checked in the above order of preference. Note that
scipy.io.netcdf and pupynere cannot write HDF5 NetCDF-4 files.
NetCDF files can be directly visualized using the libAtoms flavor of
AtomEye (http://www.libatoms.org/),
VMD (http://www.ks.uiuc.edu/Research/vmd/)
or Ovito (http://www.ovito.org/, starting with version 2.3).
"""
import os
import numpy as np
import ase
import ase.version
from ase.data import atomic_masses
from ase.lattice.spacegroup.cell import cellpar_to_cell, cell_to_cellpar
NC_NOT_FOUND = 0
NC_IS_NETCDF4 = 1
NC_IS_SCIPY = 2
NC_IS_PUPYNERE = 3
have_nc = NC_NOT_FOUND
# Check if we have netCDF4-python
try:
from netCDF4 import Dataset
have_nc = NC_IS_NETCDF4
except:
pass
#if not have_nc:
# # Check for scipy
# try:
# from scipy.io.netcdf import netcdf_file
# have_nc = NC_IS_SCIPY
# except:
# pass
if not have_nc:
# Check for pupynere (comes with ASE)
try:
from ase.io.pupynere import netcdf_file
have_nc = NC_IS_PUPYNERE
except:
pass
### Read/write NetCDF trajectories
class NetCDFTrajectory:
"""
Reads/writes Atoms objects into an AMBER-style .nc trajectory file.
"""
# netCDF4-python format strings to scipy.io.netcdf version numbers
_netCDF4_to_scipy = {'NETCDF3_CLASSIC': 1, 'NETCDF3_64BIT': 2}
_netCDF4_to_pupynere = ['NETCDF3_CLASSIC']
# Default dimension names
_frame_dim = 'frame'
_spatial_dim = 'spatial'
_atom_dim = 'atom'
_cell_spatial_dim = 'cell_spatial'
_cell_angular_dim = 'cell_angular'
_label_dim = 'label'
# Default field names. If it is a list, check for any of these names upon
# opening. Upon writing, use the first name.
_time_var = 'time'
_numbers_var = ['Z', 'atom_types', 'type']
_positions_var = 'coordinates'
_velocities_var = 'velocities'
_cell_origin_var = 'cell_origin'
_cell_lengths_var = 'cell_lengths'
_cell_angles_var = 'cell_angles'
_default_vars = reduce(lambda x, y: x + y,
[_numbers_var, [_positions_var], [_velocities_var],
[_cell_origin_var], [_cell_lengths_var],
[_cell_angles_var]])
def __init__(self, filename, mode='r', atoms=None, types_to_numbers=None,
double=True, netcdf_format='NETCDF3_CLASSIC', keep_open=None,
index_var='id', index_offset=-1):
"""
A NetCDFTrajectory can be created in read, write or append mode.
Parameters:
filename:
The name of the parameter file. Should end in .nc.
mode='r':
The mode.
'r' is read mode, the file should already exist, and no atoms
argument should be specified.
'w' is write mode. The atoms argument specifies the Atoms object
to be written to the file, if not given it must instead be given
as an argument to the write() method.
'a' is append mode. It acts a write mode, except that data is
appended to a preexisting file.
atoms=None:
The Atoms object to be written in write or append mode.
types_to_numbers=None:
Dictionary for conversion of atom types to atomic numbers when
reading a trajectory file.
double=True:
Create new variable in double precision.
netcdf_format='NETCDF3_CLASSIC':
Format string for the underlying NetCDF file format. Only relevant
if a new file is created. More information can be found at
https://www.unidata.ucar.edu/software/netcdf/docs/netcdf/File-Format.html
'NETCDF3_CLASSIC' is the original binary format.
'NETCDF3_64BIT' can be used to write larger files.
'NETCDF4_CLASSIC' is HDF5 with some NetCDF limitations.
'NETCDF4' is HDF5.
keep_open=None:
Keep the file open during consecutive read/write operations.
Default is to close file between writes to minimize chance of data
corruption, but keep file open if file is opened in read mode.
index_var='id':
Name of variable containing the atom indices. Atoms are reordered
by this index upon reading if this variable is present. Default
value is for LAMMPS output.
index_offset=-1:
Set to 0 if atom index is zero based, set to -1 if atom index is
one based. Default value is for LAMMPS output.
"""
if not have_nc:
raise RuntimeError('NetCDFTrajectory requires a NetCDF Python '
'module.')
self.nc = None
self.numbers = None
self.pre_observers = [] # Callback functions before write
self.post_observers = [] # Callback functions after write
# are called
self.has_header = False
self._set_atoms(atoms)
self.types_to_numbers = None
if types_to_numbers:
self.types_to_numbers = np.array(types_to_numbers)
self.index_var = index_var
self.index_offset = index_offset
self._default_vars += [self.index_var]
# 'l' should be a valid type according to the netcdf4-python
# documentation, but does not appear to work.
self.dtype_conv = {'l': 'i'}
if not double:
self.dtype_conv.update(dict(d='f'))
self.extra_per_frame_vars = []
self.extra_per_file_vars = []
# per frame atts are global quantities, not quantities stored for each
# atom
self.extra_per_frame_atts = []
self.mode = mode
self.netcdf_format = netcdf_format
if atoms:
self.n_atoms = len(atoms)
else:
self.n_atoms = None
self.filename = filename
if keep_open is None:
# Only netCDF4-python supports append to files
self.keep_open = self.mode == 'r' or have_nc != NC_IS_NETCDF4
else:
self.keep_open = keep_open
if (mode == 'a' or not self.keep_open) and have_nc != NC_IS_NETCDF4:
raise RuntimeError('netCDF4-python is required for append mode.')
def __del__(self):
self.close()
def _open(self):
"""
Opens the file.
For internal use only.
"""
if self.nc is not None:
return
if self.mode == 'a' and not os.path.exists(self.filename):
self.mode = 'w'
if have_nc == NC_IS_NETCDF4:
self.nc = Dataset(self.filename, self.mode,
format=self.netcdf_format)
elif have_nc == NC_IS_SCIPY:
if self.netcdf_format not in self._netCDF4_to_scipy:
raise ValueError("NetCDF format '%s' not supported by "
"scipy.io.netcdf." % self.netcdf_format)
version = self._netCDF4_to_scipy[self.netcdf_format]
if version == 1:
# This supports older scipy.io.netcdf versions that do not
# support the 'version' argument
self.nc = netcdf_file(self.filename, self.mode)
else:
self.nc = netcdf_file(
self.filename, self.mode,
version=self._netCDF4_to_scipy[self.netcdf_format]
)
elif have_nc == NC_IS_PUPYNERE:
if self.netcdf_format not in self._netCDF4_to_pupynere:
raise ValueError("NetCDF format '%s' not supported by "
"ase.io.pupynere." % self.netcdf_format)
self.nc = netcdf_file(self.filename, self.mode)
else:
# Should not happen
raise RuntimeError('Internal error: Unknown *have_nc* value.')
self.frame = 0
if self.mode == 'r' or self.mode == 'a':
self._read_header()
self.frame = self._len()
def _set_atoms(self, atoms=None):
"""
Associate an Atoms object with the trajectory.
For internal use only.
"""
if atoms is not None and not hasattr(atoms, 'get_positions'):
raise TypeError('"atoms" argument is not an Atoms object.')
self.atoms = atoms
def _read_header(self):
if not self.n_atoms:
if have_nc == NC_IS_NETCDF4:
self.n_atoms = len(self.nc.dimensions[self._atom_dim])
else:
self.n_atoms = self.nc.dimensions[self._atom_dim]
numbers_var = self._get_variable(self._numbers_var, exc=False)
if numbers_var is None:
self.numbers = np.ones(self.n_atoms, dtype=int)
else:
self.numbers = np.array(numbers_var[:])
if self.types_to_numbers is not None:
self.numbers = self.types_to_numbers[self.numbers]
self.masses = atomic_masses[self.numbers]
for name, var in self.nc.variables.iteritems():
# This can be unicode which confuses ASE
name = str(name)
# _default_vars is taken care of already
if name not in self._default_vars:
if len(var.dimensions) >= 2:
if var.dimensions[0] == self._frame_dim:
if var.dimensions[1] == self._atom_dim:
self.extra_per_frame_vars += [name]
else:
self.extra_per_frame_atts += [name]
elif len(var.dimensions) == 1:
if var.dimensions[0] == self._atom_dim:
self.extra_per_file_vars += [name]
elif var.dimensions[0] == self._frame_dim:
self.extra_per_frame_atts += [name]
self.has_header = True
def write(self, atoms=None, frame=None, arrays=None, time=None):
"""
Write the atoms to the file.
If the atoms argument is not given, the atoms object specified
when creating the trajectory object is used.
"""
self._open()
self._call_observers(self.pre_observers)
if atoms is None:
atoms = self.atoms
if hasattr(atoms, 'interpolate'):
# seems to be a NEB
neb = atoms
assert not neb.parallel
try:
neb.get_energies_and_forces(all=True)
except AttributeError:
pass
for image in neb.images:
self.write(image)
return
if not self.has_header:
self._write_header(atoms)
else:
if len(atoms) != self.n_atoms:
raise ValueError('Bad number of atoms!')
if self.frame > 0:
if (atoms.numbers != self.numbers).any():
raise ValueError('Bad atomic numbers!')
else:
self.numbers = atoms.get_atomic_numbers()
self._get_variable(self._numbers_var)[:] = \
atoms.get_atomic_numbers()
if frame is None:
i = self.frame
else:
i = frame
self._get_variable(self._positions_var)[i] = atoms.get_positions()
if atoms.has('momenta'):
self._add_velocities()
self._get_variable(self._velocities_var)[i] = \
atoms.get_momenta() / atoms.get_masses().reshape(-1, 1)
a, b, c, alpha, beta, gamma = cell_to_cellpar(atoms.get_cell())
cell_lengths = np.array([a, b, c]) * atoms.pbc
self._get_variable(self._cell_lengths_var)[i] = cell_lengths
self._get_variable(self._cell_angles_var)[i] = [alpha, beta, gamma]
if arrays is not None:
for array in arrays:
data = atoms.get_array(array)
self._add_array(atoms, array, data.dtype, data.shape)
self._get_variable(array)[i] = data
if time is not None:
self._get_variable(self._time_var)[i] = time
self._call_observers(self.post_observers)
self.frame += 1
self._close()
def write_arrays(self, atoms, frame, arrays):
self._open()
self._call_observers(self.pre_observers)
for array in arrays:
data = atoms.get_array(array)
self._add_array(atoms, array, data.dtype, data.shape)
self._get_variable(array)[frame] = data
self._call_observers(self.post_observers)
self._close()
def _define_file_structure(self, atoms):
if not hasattr(self.nc, 'Conventions'):
self.nc.Conventions = 'AMBER'
if not hasattr(self.nc, 'ConventionVersion'):
self.nc.ConventionVersion = '1.0'
if not hasattr(self.nc, 'program'):
self.nc.program = 'ASE'
if not hasattr(self.nc, 'programVersion'):
self.nc.programVersion = ase.version.version
if not self._frame_dim in self.nc.dimensions:
self.nc.createDimension(self._frame_dim, None)
if not self._spatial_dim in self.nc.dimensions:
self.nc.createDimension(self._spatial_dim, 3)
if not self._atom_dim in self.nc.dimensions:
self.nc.createDimension(self._atom_dim, len(atoms))
if not self._cell_spatial_dim in self.nc.dimensions:
self.nc.createDimension(self._cell_spatial_dim, 3)
if not self._cell_angular_dim in self.nc.dimensions:
self.nc.createDimension(self._cell_angular_dim, 3)
if not self._has_variable(self._numbers_var):
self.nc.createVariable(self._numbers_var[0], 'i',
(self._atom_dim,))
if not self._has_variable(self._positions_var):
self.nc.createVariable(self._positions_var, 'f4',
(self._frame_dim, self._atom_dim,
self._spatial_dim))
self.nc.variables[self._positions_var].units = 'Angstrom'
self.nc.variables[self._positions_var].scale_factor = 1.
if not self._has_variable(self._cell_lengths_var):
self.nc.createVariable(self._cell_lengths_var, 'd',
(self._frame_dim, self._cell_spatial_dim))
self.nc.variables[self._cell_lengths_var].units = 'Angstrom'
self.nc.variables[self._cell_lengths_var].scale_factor = 1.
if not self._has_variable(self._cell_angles_var):
self.nc.createVariable(self._cell_angles_var, 'd',
(self._frame_dim, self._cell_angular_dim))
self.nc.variables[self._cell_angles_var].units = 'degree'
def _add_velocities(self):
if not self._has_variable(self._velocities_var):
self.nc.createVariable(self._velocities_var, 'f4',
(self._frame_dim, self._atom_dim,
self._spatial_dim))
self.nc.variables[self._positions_var].units = \
'Angstrom/Femtosecond'
self.nc.variables[self._positions_var].scale_factor = 1.
def _add_array(self, atoms, array_name, type, shape):
if not self._has_variable(array_name):
dims = [self._frame_dim]
for i in shape:
if i == len(atoms):
dims += [self._atom_dim]
elif i == 3:
dims += [self._spatial_dim]
else:
raise TypeError("Don't know how to dump array of shape {0}"
" into NetCDF trajectory.".format(shape))
try:
t = self.dtype_conv[type.char]
except:
t = type
self.nc.createVariable(array_name, t, dims)
def _get_variable(self, name, exc=True):
if isinstance(name, list):
for n in name:
if n in self.nc.variables:
return self.nc.variables[n]
if exc:
raise RuntimeError('None of the variables {0} was found in the '
'NetCDF trajectory.'.format(
reduce(lambda x, y: x + ', ' + y, name)))
return None
else:
return self.nc.variables[name]
def _has_variable(self, name):
if isinstance(name, list):
for n in name:
if n in self.nc.variables:
return True
return False
else:
return name in self.nc.variables
def _write_header(self, atoms):
self._define_file_structure(atoms)
self._get_variable(self._numbers_var)[:] = \
np.asarray(atoms.get_atomic_numbers())
def close(self):
"""Close the trajectory file."""
if self.nc is not None:
self.nc.close()
self.nc = None
def _close(self):
if not self.keep_open:
self.close()
if self.mode == 'w':
self.mode = 'a'
def __getitem__(self, i=-1):
self._open()
if isinstance(i, slice):
return [self[j] for j in range(*i.indices(self._len()))]
N = self._len()
if 0 <= i < N:
# Non-periodic boundaries have cell_length == 0.0
cell_lengths = \
np.array(self.nc.variables[self._cell_lengths_var][i][:])
pbc = np.abs(cell_lengths > 1e-6)
# Do we have a cell origin?
if self._has_variable(self._cell_origin_var):
origin = np.array(self.nc.variables[self._cell_origin_var][i][:])
else:
origin = np.zeros([3], dtype=float)
# Do we have an index variable?
if self._has_variable(self.index_var):
index = np.array(self.nc.variables[self.index_var][i][:]) +\
self.index_offset
else:
index = np.arange(self.n_atoms)
# Read positions
positions_var = self.nc.variables[self._positions_var]
positions = np.array(positions_var[i][index])
# Determine cell size for non-periodic directions
for dim in np.arange(3)[np.logical_not(pbc)]:
origin[dim] = positions[:, dim].min()
cell_lengths[dim] = positions[:, dim].max() - origin[dim]
# Construct cell shape from cell lengths and angles
cell = cellpar_to_cell(
list(cell_lengths) +
list(self.nc.variables[self._cell_angles_var][i])
)
# Compute momenta from velocities (if present)
if self._has_variable(self._velocities_var):
momenta = self.nc.variables[self._velocities_var][i][index] * \
self.masses.reshape(-1, 1)
else:
momenta = None
# Fill info dict with additional data found in the NetCDF file
info = {}
for name in self.extra_per_frame_atts:
info[name] = np.array(self.nc.variables[name][i])
# Create atoms object
atoms = ase.Atoms(
positions=positions - origin.reshape(1, -1),
numbers=self.numbers,
cell=cell,
momenta=momenta,
masses=self.masses,
pbc=pbc,
info=info
)
# Attach additional arrays found in the NetCDF file
for name in self.extra_per_frame_vars:
atoms.set_array(name, self.nc.variables[name][i][index])
for name in self.extra_per_file_vars:
atoms.set_array(name, self.nc.variables[name][:])
self._close()
return atoms
i = N + i
if i < 0 or i >= N:
self._close()
raise IndexError('Trajectory index out of range.')
return self[i]
def _len(self):
if self._frame_dim in self.nc.dimensions:
return int(self._get_variable(self._positions_var).shape[0])
else:
return 0
def __len__(self):
self._open()
n_frames = self._len()
self._close()
return n_frames
def pre_write_attach(self, function, interval=1, *args, **kwargs):
"""
Attach a function to be called before writing begins.
function: The function or callable object to be called.
interval: How often the function is called. Default: every time (1).
All other arguments are stored, and passed to the function.
"""
if not callable(function):
raise ValueError('Callback object must be callable.')
self.pre_observers.append((function, interval, args, kwargs))
def post_write_attach(self, function, interval=1, *args, **kwargs):
"""
Attach a function to be called after writing ends.
function: The function or callable object to be called.
interval: How often the function is called. Default: every time (1).
All other arguments are stored, and passed to the function.
"""
if not callable(function):
raise ValueError('Callback object must be callable.')
self.post_observers.append((function, interval, args, kwargs))
def _call_observers(self, obs):
"""Call pre/post write observers."""
for function, interval, args, kwargs in obs:
if self.write_counter % interval == 0:
function(*args, **kwargs)
| gpl-2.0 | -966,150,789,107,956,700 | 35.778146 | 85 | 0.555235 | false | 3.934467 | false | false | false |
tsotetsi/textily-web | temba/settings_common.py | 1 | 40330 | from __future__ import unicode_literals
import iptools
import os
import sys
from celery.schedules import crontab
from datetime import timedelta
from django.utils.translation import ugettext_lazy as _
# -----------------------------------------------------------------------------------
# Default to debugging
# -----------------------------------------------------------------------------------
DEBUG = True
# -----------------------------------------------------------------------------------
# Sets TESTING to True if this configuration is read during a unit test
# -----------------------------------------------------------------------------------
TESTING = sys.argv[1:2] == ['test']
if TESTING:
PASSWORD_HASHERS = ('django.contrib.auth.hashers.MD5PasswordHasher',)
DEBUG = False
ADMINS = (
('RapidPro', '[email protected]'),
)
MANAGERS = ADMINS
# hardcode the postgis version so we can do reset db's from a blank database
POSTGIS_VERSION = (2, 1)
# -----------------------------------------------------------------------------------
# set the mail settings, override these in your settings.py
# if your site was at http://temba.io, it might look like this:
# -----------------------------------------------------------------------------------
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = '[email protected]'
DEFAULT_FROM_EMAIL = '[email protected]'
EMAIL_HOST_PASSWORD = 'mypassword'
EMAIL_USE_TLS = True
# Used when sending email from within a flow and the user hasn't configured
# their own SMTP server.
FLOW_FROM_EMAIL = '[email protected]'
# where recordings and exports are stored
AWS_STORAGE_BUCKET_NAME = 'dl-temba-io'
AWS_BUCKET_DOMAIN = AWS_STORAGE_BUCKET_NAME + '.s3.amazonaws.com'
STORAGE_ROOT_DIR = 'test_orgs' if TESTING else 'orgs'
# -----------------------------------------------------------------------------------
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone
# -----------------------------------------------------------------------------------
USE_TZ = True
TIME_ZONE = 'GMT'
USER_TIME_ZONE = 'Africa/Kigali'
MODELTRANSLATION_TRANSLATION_REGISTRY = "translation"
# -----------------------------------------------------------------------------------
# Default language used for this installation
# -----------------------------------------------------------------------------------
LANGUAGE_CODE = 'en-us'
# -----------------------------------------------------------------------------------
# Available languages for translation
# -----------------------------------------------------------------------------------
LANGUAGES = (
('en-us', _("English")),
('pt-br', _("Portuguese")),
('fr', _("French")),
('es', _("Spanish")))
DEFAULT_LANGUAGE = "en-us"
DEFAULT_SMS_LANGUAGE = "en-us"
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'your own secret key'
EMAIL_CONTEXT_PROCESSORS = ('temba.utils.email.link_components',)
# -----------------------------------------------------------------------------------
# Directory Configuration
# -----------------------------------------------------------------------------------
PROJECT_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)))
LOCALE_PATHS = (os.path.join(PROJECT_DIR, '../locale'),)
RESOURCES_DIR = os.path.join(PROJECT_DIR, '../resources')
FIXTURE_DIRS = (os.path.join(PROJECT_DIR, '../fixtures'),)
TESTFILES_DIR = os.path.join(PROJECT_DIR, '../testfiles')
STATICFILES_DIRS = (os.path.join(PROJECT_DIR, '../static'), os.path.join(PROJECT_DIR, '../media'), )
STATIC_ROOT = os.path.join(PROJECT_DIR, '../sitestatic')
STATIC_URL = '/sitestatic/'
COMPRESS_ROOT = os.path.join(PROJECT_DIR, '../sitestatic')
MEDIA_ROOT = os.path.join(PROJECT_DIR, '../media')
MEDIA_URL = "/media/"
# -----------------------------------------------------------------------------------
# Templates Configuration
# -----------------------------------------------------------------------------------
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(PROJECT_DIR, '../templates')],
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request',
'temba.context_processors.branding',
'temba.orgs.context_processors.user_group_perms_processor',
'temba.orgs.context_processors.unread_count_processor',
'temba.channels.views.channel_status_processor',
'temba.msgs.views.send_message_auto_complete_processor',
'temba.api.views.webhook_status_processor',
'temba.orgs.context_processors.settings_includer',
],
'loaders': [
'temba.utils.haml.HamlFilesystemLoader',
'temba.utils.haml.HamlAppDirectoriesLoader',
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
'django.template.loaders.eggs.Loader'
],
'debug': False if TESTING else DEBUG
},
},
]
if TESTING:
TEMPLATES[0]['OPTIONS']['context_processors'] += ('temba.tests.add_testing_flag_to_context', )
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'temba.utils.middleware.DisableMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'temba.middleware.BrandingMiddleware',
'temba.middleware.OrgTimezoneMiddleware',
'temba.middleware.FlowSimulationMiddleware',
'temba.middleware.ActivateLanguageMiddleware',
'temba.middleware.NonAtomicGetsMiddleware',
'temba.utils.middleware.OrgHeaderMiddleware',
)
ROOT_URLCONF = 'temba.urls'
# other urls to add
APP_URLS = []
SITEMAP = ('public.public_index',
'public.public_blog',
'public.video_list',
'api')
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.humanize',
'django.contrib.gis',
# django sitemaps
'django.contrib.sitemaps',
'redis',
# mo-betta permission management
'guardian',
# rest framework for api access
'rest_framework',
'rest_framework.authtoken',
# compress our CSS and js
'compressor',
# smartmin
'smartmin',
'smartmin.csv_imports',
'smartmin.users',
# django-timezone-field
'timezone_field',
# temba apps
'temba.assets',
'temba.auth_tweaks',
'temba.api',
'temba.public',
'temba.schedules',
'temba.orgs',
'temba.contacts',
'temba.channels',
'temba.msgs',
'temba.flows',
'temba.reports',
'temba.triggers',
'temba.utils',
'temba.campaigns',
'temba.ivr',
'temba.ussd',
'temba.locations',
'temba.values',
'temba.airtime',
)
# the last installed app that uses smartmin permissions
PERMISSIONS_APP = 'temba.airtime'
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'root': {
'level': 'WARNING',
'handlers': ['console'],
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'null': {
'class': 'logging.NullHandler',
},
},
'loggers': {
'pycountry': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
'django.security.DisallowedHost': {
'handlers': ['null'],
'propagate': False,
},
'django.db.backends': {
'level': 'ERROR',
'handlers': ['console'],
'propagate': False,
},
},
}
# -----------------------------------------------------------------------------------
# Branding Configuration
# -----------------------------------------------------------------------------------
BRANDING = {
'rapidpro.io': {
'slug': 'rapidpro',
'name': 'RapidPro',
'org': 'UNICEF',
'colors': dict(primary='#0c6596'),
'styles': ['brands/rapidpro/font/style.css'],
'welcome_topup': 1000,
'email': '[email protected]',
'support_email': '[email protected]',
'link': 'https://app.rapidpro.io',
'api_link': 'https://api.rapidpro.io',
'docs_link': 'http://docs.rapidpro.io',
'domain': 'app.rapidpro.io',
'favico': 'brands/rapidpro/rapidpro.ico',
'splash': '/brands/rapidpro/splash.jpg',
'logo': '/brands/rapidpro/logo.png',
'allow_signups': True,
'tiers': dict(import_flows=0, multi_user=0, multi_org=0),
'bundles': [],
'welcome_packs': [dict(size=5000, name="Demo Account"), dict(size=100000, name="UNICEF Account")],
'description': _("Visually build nationally scalable mobile applications from anywhere in the world."),
'credits': _("Copyright © 2012-2017 UNICEF, Nyaruka. All Rights Reserved.")
}
}
DEFAULT_BRAND = 'rapidpro.io'
# -----------------------------------------------------------------------------------
# Permission Management
# -----------------------------------------------------------------------------------
# this lets us easily create new permissions across our objects
PERMISSIONS = {
'*': ('create', # can create an object
'read', # can read an object, viewing it's details
'update', # can update an object
'delete', # can delete an object,
'list'), # can view a list of the objects
'api.apitoken': ('refresh',),
'api.resthook': ('api', 'list'),
'api.webhookevent': ('api',),
'api.resthooksubscriber': ('api',),
'campaigns.campaign': ('api',
'archived',
),
'campaigns.campaignevent': ('api',),
'contacts.contact': ('api',
'block',
'blocked',
'break_anon',
'customize',
'export',
'stopped',
'filter',
'history',
'import',
'omnibox',
'unblock',
'unstop',
'update_fields',
'update_fields_input'
),
'contacts.contactfield': ('api',
'json',
'managefields'),
'contacts.contactgroup': ('api',),
'ivr.ivrcall': ('start',),
'locations.adminboundary': ('alias',
'api',
'boundaries',
'geometry'),
'orgs.org': ('accounts',
'smtp_server',
'api',
'country',
'clear_cache',
'create_login',
'create_sub_org',
'download',
'edit',
'edit_sub_org',
'export',
'grant',
'home',
'import',
'join',
'languages',
'manage',
'manage_accounts',
'manage_accounts_sub_org',
'nexmo_configuration',
'nexmo_account',
'nexmo_connect',
'plivo_connect',
'profile',
'resthooks',
'service',
'signup',
'sub_orgs',
'surveyor',
'transfer_credits',
'transfer_to_account',
'trial',
'twilio_account',
'twilio_connect',
'webhook',
),
'orgs.usersettings': ('phone',),
'channels.channel': ('api',
'bulk_sender_options',
'claim',
'claim_africas_talking',
'claim_android',
'claim_blackmyna',
'claim_chikka',
'claim_clickatell',
'claim_dart_media',
'claim_external',
'claim_facebook',
'claim_globe',
'claim_high_connection',
'claim_hub9',
'claim_infobip',
'claim_jasmin',
'claim_junebug',
'claim_kannel',
'claim_line',
'claim_m3tech',
'claim_mblox',
'claim_nexmo',
'claim_plivo',
'claim_shaqodoon',
'claim_smscentral',
'claim_start',
'claim_telegram',
'claim_twilio',
'claim_twiml_api',
'claim_twilio_messaging_service',
'claim_twitter',
'claim_verboice',
'claim_viber',
'claim_viber_public',
'create_viber',
'claim_vumi',
'claim_vumi_ussd',
'claim_yo',
'claim_zenvia',
'configuration',
'create_bulk_sender',
'create_caller',
'errors',
'facebook_whitelist',
'search_nexmo',
'search_numbers',
),
'channels.channelevent': ('api',
'calls'),
'flows.flowstart': ('api',),
'flows.flow': ('activity',
'activity_chart',
'activity_list',
'analytics',
'api',
'archived',
'broadcast',
'campaign',
'completion',
'copy',
'editor',
'export',
'export_results',
'filter',
'json',
'read',
'recent_messages',
'results',
'revisions',
'run_table',
'simulate',
'upload_action_recording',
),
'flows.ruleset': ('analytics',
'choropleth',
'map',
'results',
),
'msgs.msg': ('api',
'archive',
'archived',
'export',
'failed',
'filter',
'flow',
'inbox',
'label',
'outbox',
'sent',
'test',
'update',
),
'msgs.broadcast': ('api',
'detail',
'schedule',
'schedule_list',
'schedule_read',
'send',
),
'msgs.label': ('api', 'create', 'create_folder'),
'orgs.topup': ('manage',),
'triggers.trigger': ('archived',
'catchall',
'follow',
'inbound_call',
'keyword',
'missed_call',
'new_conversation',
'referral',
'register',
'schedule',
'ussd',
),
}
# assigns the permissions that each group should have
GROUP_PERMISSIONS = {
"Service Users": ( # internal Temba services have limited permissions
'msgs.msg_create',
),
"Alpha": (
),
"Beta": (
),
"Surveyors": (
'contacts.contact_api',
'contacts.contactfield_api',
'flows.flow_api',
'locations.adminboundary_api',
'orgs.org_api',
'orgs.org_surveyor',
'msgs.msg_api',
),
"Granters": (
'orgs.org_grant',
),
"Customer Support": (
'auth.user_list',
'auth.user_update',
'contacts.contact_break_anon',
'flows.flow_editor',
'flows.flow_json',
'flows.flow_read',
'flows.flow_revisions',
'orgs.org_dashboard',
'orgs.org_grant',
'orgs.org_manage',
'orgs.org_update',
'orgs.org_service',
'orgs.topup_create',
'orgs.topup_manage',
'orgs.topup_update',
),
"Administrators": (
'airtime.airtimetransfer_list',
'airtime.airtimetransfer_read',
'api.apitoken_refresh',
'api.resthook_api',
'api.resthook_list',
'api.resthooksubscriber_api',
'api.webhookevent_api',
'api.webhookevent_list',
'api.webhookevent_read',
'campaigns.campaign.*',
'campaigns.campaignevent.*',
'contacts.contact_api',
'contacts.contact_block',
'contacts.contact_blocked',
'contacts.contact_create',
'contacts.contact_customize',
'contacts.contact_delete',
'contacts.contact_export',
'contacts.contact_filter',
'contacts.contact_history',
'contacts.contact_import',
'contacts.contact_list',
'contacts.contact_omnibox',
'contacts.contact_read',
'contacts.contact_stopped',
'contacts.contact_unblock',
'contacts.contact_unstop',
'contacts.contact_update',
'contacts.contact_update_fields',
'contacts.contact_update_fields_input',
'contacts.contactfield.*',
'contacts.contactgroup.*',
'csv_imports.importtask.*',
'ivr.ivrcall.*',
'ussd.ussdsession.*',
'locations.adminboundary_alias',
'locations.adminboundary_api',
'locations.adminboundary_boundaries',
'locations.adminboundary_geometry',
'orgs.org_accounts',
'orgs.org_smtp_server',
'orgs.org_api',
'orgs.org_country',
'orgs.org_create_sub_org',
'orgs.org_download',
'orgs.org_edit',
'orgs.org_edit_sub_org',
'orgs.org_export',
'orgs.org_home',
'orgs.org_import',
'orgs.org_languages',
'orgs.org_manage_accounts',
'orgs.org_manage_accounts_sub_org',
'orgs.org_nexmo_account',
'orgs.org_nexmo_connect',
'orgs.org_nexmo_configuration',
'orgs.org_plivo_connect',
'orgs.org_profile',
'orgs.org_resthooks',
'orgs.org_sub_orgs',
'orgs.org_transfer_credits',
'orgs.org_transfer_to_account',
'orgs.org_twilio_account',
'orgs.org_twilio_connect',
'orgs.org_webhook',
'orgs.topup_list',
'orgs.topup_read',
'orgs.usersettings_phone',
'orgs.usersettings_update',
'channels.channel_claim_nexmo',
'channels.channel_api',
'channels.channel_bulk_sender_options',
'channels.channel_claim',
'channels.channel_claim_africas_talking',
'channels.channel_claim_android',
'channels.channel_claim_blackmyna',
'channels.channel_claim_chikka',
'channels.channel_claim_clickatell',
'channels.channel_claim_dart_media',
'channels.channel_claim_external',
'channels.channel_claim_facebook',
'channels.channel_claim_globe',
'channels.channel_claim_high_connection',
'channels.channel_claim_hub9',
'channels.channel_claim_infobip',
'channels.channel_claim_jasmin',
'channels.channel_claim_junebug',
'channels.channel_claim_kannel',
'channels.channel_claim_line',
'channels.channel_claim_mblox',
'channels.channel_claim_m3tech',
'channels.channel_claim_plivo',
'channels.channel_claim_shaqodoon',
'channels.channel_claim_smscentral',
'channels.channel_claim_start',
'channels.channel_claim_telegram',
'channels.channel_claim_twilio',
'channels.channel_claim_twiml_api',
'channels.channel_claim_twilio_messaging_service',
'channels.channel_claim_twitter',
'channels.channel_claim_verboice',
'channels.channel_claim_viber',
'channels.channel_claim_viber_public',
'channels.channel_create_viber',
'channels.channel_claim_vumi',
'channels.channel_claim_vumi_ussd',
'channels.channel_claim_yo',
'channels.channel_claim_zenvia',
'channels.channel_configuration',
'channels.channel_create',
'channels.channel_create_bulk_sender',
'channels.channel_create_caller',
'channels.channel_facebook_whitelist',
'channels.channel_delete',
'channels.channel_list',
'channels.channel_read',
'channels.channel_search_nexmo',
'channels.channel_search_numbers',
'channels.channel_update',
'channels.channelevent.*',
'channels.channellog_list',
'channels.channellog_read',
'reports.report.*',
'flows.flow.*',
'flows.flowstart_api',
'flows.flowlabel.*',
'flows.ruleset.*',
'schedules.schedule.*',
'msgs.broadcast.*',
'msgs.broadcastschedule.*',
'msgs.label.*',
'msgs.msg_api',
'msgs.msg_archive',
'msgs.msg_archived',
'msgs.msg_delete',
'msgs.msg_export',
'msgs.msg_failed',
'msgs.msg_filter',
'msgs.msg_flow',
'msgs.msg_inbox',
'msgs.msg_label',
'msgs.msg_outbox',
'msgs.msg_sent',
'msgs.msg_update',
'triggers.trigger.*',
),
"Editors": (
'api.apitoken_refresh',
'api.resthook_api',
'api.resthook_list',
'api.resthooksubscriber_api',
'api.webhookevent_api',
'api.webhookevent_list',
'api.webhookevent_read',
'airtime.airtimetransfer_list',
'airtime.airtimetransfer_read',
'campaigns.campaign.*',
'campaigns.campaignevent.*',
'contacts.contact_api',
'contacts.contact_block',
'contacts.contact_blocked',
'contacts.contact_create',
'contacts.contact_customize',
'contacts.contact_delete',
'contacts.contact_export',
'contacts.contact_filter',
'contacts.contact_history',
'contacts.contact_import',
'contacts.contact_list',
'contacts.contact_omnibox',
'contacts.contact_read',
'contacts.contact_stopped',
'contacts.contact_unblock',
'contacts.contact_unstop',
'contacts.contact_update',
'contacts.contact_update_fields',
'contacts.contact_update_fields_input',
'contacts.contactfield.*',
'contacts.contactgroup.*',
'csv_imports.importtask.*',
'ivr.ivrcall.*',
'ussd.ussdsession.*',
'locations.adminboundary_alias',
'locations.adminboundary_api',
'locations.adminboundary_boundaries',
'locations.adminboundary_geometry',
'orgs.org_api',
'orgs.org_download',
'orgs.org_export',
'orgs.org_home',
'orgs.org_import',
'orgs.org_profile',
'orgs.org_resthooks',
'orgs.org_webhook',
'orgs.topup_list',
'orgs.topup_read',
'orgs.usersettings_phone',
'orgs.usersettings_update',
'channels.channel_api',
'channels.channel_bulk_sender_options',
'channels.channel_claim',
'channels.channel_claim_africas_talking',
'channels.channel_claim_android',
'channels.channel_claim_blackmyna',
'channels.channel_claim_chikka',
'channels.channel_claim_clickatell',
'channels.channel_claim_dart_media',
'channels.channel_claim_external',
'channels.channel_claim_facebook',
'channels.channel_claim_globe',
'channels.channel_claim_high_connection',
'channels.channel_claim_hub9',
'channels.channel_claim_infobip',
'channels.channel_claim_jasmin',
'channels.channel_claim_junebug',
'channels.channel_claim_kannel',
'channels.channel_claim_line',
'channels.channel_claim_mblox',
'channels.channel_claim_m3tech',
'channels.channel_claim_plivo',
'channels.channel_claim_shaqodoon',
'channels.channel_claim_smscentral',
'channels.channel_claim_start',
'channels.channel_claim_telegram',
'channels.channel_claim_twilio',
'channels.channel_claim_twiml_api',
'channels.channel_claim_twilio_messaging_service',
'channels.channel_claim_twitter',
'channels.channel_claim_verboice',
'channels.channel_claim_viber',
'channels.channel_claim_viber_public',
'channels.channel_create_viber',
'channels.channel_claim_vumi',
'channels.channel_claim_vumi_ussd',
'channels.channel_claim_yo',
'channels.channel_claim_zenvia',
'channels.channel_configuration',
'channels.channel_create',
'channels.channel_create_bulk_sender',
'channels.channel_create_caller',
'channels.channel_delete',
'channels.channel_list',
'channels.channel_read',
'channels.channel_search_numbers',
'channels.channel_update',
'channels.channelevent.*',
'reports.report.*',
'flows.flow.*',
'flows.flowstart_api',
'flows.flowlabel.*',
'flows.ruleset.*',
'schedules.schedule.*',
'msgs.broadcast.*',
'msgs.broadcastschedule.*',
'msgs.label.*',
'msgs.msg_api',
'msgs.msg_archive',
'msgs.msg_archived',
'msgs.msg_delete',
'msgs.msg_export',
'msgs.msg_failed',
'msgs.msg_filter',
'msgs.msg_flow',
'msgs.msg_inbox',
'msgs.msg_label',
'msgs.msg_outbox',
'msgs.msg_sent',
'msgs.msg_update',
'triggers.trigger.*',
),
"Viewers": (
'api.resthook_list',
'campaigns.campaign_archived',
'campaigns.campaign_list',
'campaigns.campaign_read',
'campaigns.campaignevent_read',
'contacts.contact_blocked',
'contacts.contact_export',
'contacts.contact_filter',
'contacts.contact_history',
'contacts.contact_list',
'contacts.contact_read',
'contacts.contact_stopped',
'locations.adminboundary_boundaries',
'locations.adminboundary_geometry',
'locations.adminboundary_alias',
'orgs.org_download',
'orgs.org_export',
'orgs.org_home',
'orgs.org_profile',
'orgs.topup_list',
'orgs.topup_read',
'channels.channel_list',
'channels.channel_read',
'channels.channelevent_calls',
'flows.flow_activity',
'flows.flow_activity_chart',
'flows.flow_archived',
'flows.flow_campaign',
'flows.flow_completion',
'flows.flow_export',
'flows.flow_export_results',
'flows.flow_filter',
'flows.flow_list',
'flows.flow_read',
'flows.flow_editor',
'flows.flow_json',
'flows.flow_recent_messages',
'flows.flow_results',
'flows.flow_run_table',
'flows.flow_simulate',
'flows.ruleset_analytics',
'flows.ruleset_results',
'flows.ruleset_choropleth',
'msgs.broadcast_schedule_list',
'msgs.broadcast_schedule_read',
'msgs.msg_archived',
'msgs.msg_export',
'msgs.msg_failed',
'msgs.msg_filter',
'msgs.msg_flow',
'msgs.msg_inbox',
'msgs.msg_outbox',
'msgs.msg_sent',
'triggers.trigger_archived',
'triggers.trigger_list',
)
}
# -----------------------------------------------------------------------------------
# Login / Logout
# -----------------------------------------------------------------------------------
LOGIN_URL = "/users/login/"
LOGOUT_URL = "/users/logout/"
LOGIN_REDIRECT_URL = "/org/choose/"
LOGOUT_REDIRECT_URL = "/"
# -----------------------------------------------------------------------------------
# Guardian Configuration
# -----------------------------------------------------------------------------------
AUTHENTICATION_BACKENDS = (
'smartmin.backends.CaseInsensitiveBackend',
'guardian.backends.ObjectPermissionBackend',
)
ANONYMOUS_USER_NAME = 'AnonymousUser'
# -----------------------------------------------------------------------------------
# Our test runner is standard but with ability to exclude apps
# -----------------------------------------------------------------------------------
TEST_RUNNER = 'temba.tests.ExcludeTestRunner'
TEST_EXCLUDE = ('smartmin',)
# -----------------------------------------------------------------------------------
# Debug Toolbar
# -----------------------------------------------------------------------------------
INTERNAL_IPS = iptools.IpRangeList(
'127.0.0.1',
'192.168.0.10',
'192.168.0.0/24', # network block
'0.0.0.0'
)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False, # disable redirect traps
}
# -----------------------------------------------------------------------------------
# Crontab Settings ..
# -----------------------------------------------------------------------------------
CELERYBEAT_SCHEDULE = {
"retry-webhook-events": {
'task': 'retry_events_task',
'schedule': timedelta(seconds=300),
},
"check-channels": {
'task': 'check_channels_task',
'schedule': timedelta(seconds=300),
},
"schedules": {
'task': 'check_schedule_task',
'schedule': timedelta(seconds=60),
},
"campaigns": {
'task': 'check_campaigns_task',
'schedule': timedelta(seconds=60),
},
"check-flows": {
'task': 'check_flows_task',
'schedule': timedelta(seconds=60),
},
"check-flow-timeouts": {
'task': 'check_flow_timeouts_task',
'schedule': timedelta(seconds=20),
},
"check-credits": {
'task': 'check_credits_task',
'schedule': timedelta(seconds=900)
},
"check-messages-task": {
'task': 'check_messages_task',
'schedule': timedelta(seconds=300)
},
"fail-old-messages": {
'task': 'fail_old_messages',
'schedule': crontab(hour=0, minute=0),
},
"purge-broadcasts": {
'task': 'purge_broadcasts_task',
'schedule': crontab(hour=1, minute=0),
},
"clear-old-msg-external-ids": {
'task': 'clear_old_msg_external_ids',
'schedule': crontab(hour=2, minute=0),
},
"trim-channel-log": {
'task': 'trim_channel_log_task',
'schedule': crontab(hour=3, minute=0),
},
"calculate-credit-caches": {
'task': 'calculate_credit_caches',
'schedule': timedelta(days=3),
},
"squash-flowruncounts": {
'task': 'squash_flowruncounts',
'schedule': timedelta(seconds=300),
},
"squash-flowpathcounts": {
'task': 'squash_flowpathcounts',
'schedule': timedelta(seconds=300),
},
"prune-flowpathrecentsteps": {
'task': 'prune_flowpathrecentsteps',
'schedule': timedelta(seconds=300),
},
"squash-channelcounts": {
'task': 'squash_channelcounts',
'schedule': timedelta(seconds=300),
},
"squash-systemlabels": {
'task': 'squash_systemlabels',
'schedule': timedelta(seconds=300),
},
"squash-topupcredits": {
'task': 'squash_topupcredits',
'schedule': timedelta(seconds=300),
},
"squash-contactgroupcounts": {
'task': 'squash_contactgroupcounts',
'schedule': timedelta(seconds=300),
},
}
# Mapping of task name to task function path, used when CELERY_ALWAYS_EAGER is set to True
CELERY_TASK_MAP = {
'send_msg_task': 'temba.channels.tasks.send_msg_task',
'start_msg_flow_batch': 'temba.flows.tasks.start_msg_flow_batch_task',
'handle_event_task': 'temba.msgs.tasks.handle_event_task',
}
# -----------------------------------------------------------------------------------
# Async tasks with celery
# -----------------------------------------------------------------------------------
REDIS_HOST = 'localhost'
REDIS_PORT = 6379
# we use a redis db of 10 for testing so that we maintain caches for dev
REDIS_DB = 10 if TESTING else 15
BROKER_URL = 'redis://%s:%d/%d' % (REDIS_HOST, REDIS_PORT, REDIS_DB)
# by default, celery doesn't have any timeout on our redis connections, this fixes that
BROKER_TRANSPORT_OPTIONS = {'socket_timeout': 5}
CELERY_RESULT_BACKEND = None
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
IS_PROD = False
HOSTNAME = "localhost"
# The URL and port of the proxy server to use when needed (if any, in requests format)
OUTGOING_PROXIES = {}
# -----------------------------------------------------------------------------------
# Cache to Redis
# -----------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": "redis://%s:%s/%s" % (REDIS_HOST, REDIS_PORT, REDIS_DB),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
}
}
}
# -----------------------------------------------------------------------------------
# Django-rest-framework configuration
# -----------------------------------------------------------------------------------
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'temba.api.support.APITokenAuthentication',
),
'DEFAULT_THROTTLE_CLASSES': (
'temba.api.support.OrgRateThrottle',
),
'DEFAULT_THROTTLE_RATES': {
'v2': '2500/hour',
'v2.contacts': '2500/hour',
'v2.messages': '2500/hour',
'v2.runs': '2500/hour',
'v2.api': '2500/hour',
},
'PAGE_SIZE': 250,
'DEFAULT_RENDERER_CLASSES': (
'temba.api.support.DocumentationRenderer',
'rest_framework.renderers.JSONRenderer',
'rest_framework_xml.renderers.XMLRenderer',
),
'EXCEPTION_HANDLER': 'temba.api.support.temba_exception_handler',
'UNICODE_JSON': False
}
REST_HANDLE_EXCEPTIONS = not TESTING
# -----------------------------------------------------------------------------------
# Django Compressor configuration
# -----------------------------------------------------------------------------------
if TESTING:
# if only testing, disable coffeescript and less compilation
COMPRESS_PRECOMPILERS = ()
else:
COMPRESS_PRECOMPILERS = (
('text/less', 'lessc --include-path="%s" {infile} {outfile}' % os.path.join(PROJECT_DIR, '../static', 'less')),
('text/coffeescript', 'coffee --compile --stdio')
)
COMPRESS_ENABLED = False
COMPRESS_OFFLINE = False
# build up our offline compression context based on available brands
COMPRESS_OFFLINE_CONTEXT = []
for brand in BRANDING.values():
context = dict(STATIC_URL=STATIC_URL, base_template='frame.html', debug=False, testing=False)
context['brand'] = dict(slug=brand['slug'], styles=brand['styles'])
COMPRESS_OFFLINE_CONTEXT.append(context)
MAGE_API_URL = 'http://localhost:8026/api/v1'
MAGE_AUTH_TOKEN = '___MAGE_TOKEN_YOU_PICK__'
# -----------------------------------------------------------------------------------
# RapidPro configuration settings
# -----------------------------------------------------------------------------------
######
# DANGER: only turn this on if you know what you are doing!
# could cause messages to be sent to live customer aggregators
SEND_MESSAGES = False
######
# DANGER: only turn this on if you know what you are doing!
# could cause external APIs to be called in test environment
SEND_WEBHOOKS = False
######
# DANGER: only turn this on if you know what you are doing!
# could cause emails to be sent in test environment
SEND_EMAILS = False
######
# DANGER: only turn this on if you know what you are doing!
# could cause airtime transfers in test environment
SEND_AIRTIME = False
######
# DANGER: only turn this on if you know what you are doing!
# could cause calls in test environments
SEND_CALLS = False
MESSAGE_HANDLERS = ['temba.triggers.handlers.TriggerHandler',
'temba.flows.handlers.FlowHandler',
'temba.triggers.handlers.CatchAllHandler']
# -----------------------------------------------------------------------------------
# Store sessions in our cache
# -----------------------------------------------------------------------------------
SESSION_ENGINE = "django.contrib.sessions.backends.cached_db"
SESSION_CACHE_ALIAS = "default"
# -----------------------------------------------------------------------------------
# 3rd Party Integration Keys
# -----------------------------------------------------------------------------------
TWITTER_API_KEY = os.environ.get('TWITTER_API_KEY', 'MISSING_TWITTER_API_KEY')
TWITTER_API_SECRET = os.environ.get('TWITTER_API_SECRET', 'MISSING_TWITTER_API_SECRET')
SEGMENT_IO_KEY = os.environ.get('SEGMENT_IO_KEY', '')
LIBRATO_USER = os.environ.get('LIBRATO_USER', '')
LIBRATO_TOKEN = os.environ.get('LIBRATO_TOKEN', '')
# -----------------------------------------------------------------------------------
# IP Addresses
# These are the externally accessible IP addresses of the servers running RapidPro.
# Needed for channel types that authenticate by whitelisting public IPs.
#
# You need to change these to real addresses to work with these.
# -----------------------------------------------------------------------------------
IP_ADDRESSES = ('172.16.10.10', '162.16.10.20')
| agpl-3.0 | 4,250,592,255,089,422,300 | 31.868786 | 119 | 0.501389 | false | 4.141933 | true | false | false |
mariosv/twitter-network | src/collector.py | 1 | 2178 | # This file is part of twitter-followers.
#
# Copyright (C) 2013 Marios Visvardis <[email protected]>
#
# twitter-followers is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# twitter-followers is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with twitter-followers. If not, see <http://www.gnu.org/licenses/>.
import sys
import networkx as nx
from client import Client_error
class Collector(object):
"""When collect() is called follower nodes are recursively visited and
their connections are saved in a graph structure.
Usage:
Collector must be initialized with a connected Client instance.
Call collect() to start the process and collect the results via the
graph attribute.
"""
def __init__(self, client, conf):
self._client = client
self._conf = conf
self._visited = set()
self.graph = nx.DiGraph()
def collect(self, start_node):
self._visit(start_node, self._conf.depth)
def _visit(self, uid, depth):
# terminate recursion
if(depth) == 0:
return
depth -= 1
f = None
try:
try:
cuid = int(uid)
ctype = 'user_id'
except:
cuid = uid
ctype = 'screen_name'
f = self._client.get_followers(**{ctype: cuid})
except Client_error as e:
sys.stderr.write('Error: %s\n' % str(e))
sys.exit(1)
print('%s followers: %d' % (str(uid), len(f)))
for i in f:
self.graph.add_edge(i, uid)
if i in self._visited:
continue
self._visit(i, depth)
self._visited.add(uid)
| gpl-3.0 | 6,474,551,986,281,247,000 | 31.029412 | 78 | 0.610652 | false | 4.125 | false | false | false |
SCOAP3/scoap3 | www/nations.py | 1 | 32845 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2014, 2015 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from cgi import escape
from urllib import urlencode
from invenio.webinterface_handler_config import HTTP_BAD_REQUEST, SERVER_RETURN
from invenio.webpage import pagefooteronly, pageheaderonly, page
from invenio.search_engine import perform_request_search
from invenio.search_engine import (get_coll_i18nname,
get_record,
get_collection_reclist,
get_creation_date)
from invenio.dbquery import run_sql
from invenio.utils import NATIONS_DEFAULT_MAP, multi_replace, get_doi
from invenio.bibrecord import record_get_field_values, record_get_field_value, field_get_subfield_values
import re
_AFFILIATIONS = (sorted(list(set(NATIONS_DEFAULT_MAP.values())))
+ ['HUMAN CHECK'])
CFG_JOURNALS = ['Acta',
'Advances in High Energy Physics',
'Chinese Physics C',
'European Physical Journal C',
'Journal of Cosmology and Astroparticle Physics',
'Journal of High Energy Physics',
'New Journal of Physics',
'Nuclear Physics B',
'Physics Letters B',
'Progress of Theoretical and Experimental Physics']
CFG_SELECTED_AFF = {'Andrews University':
('Andrews University',),
'Arkansas State University':
('Arkansas State University',),
'Black Hills State University':
('Black Hills State University',),
'Boise State University':
('Boise State University',),
'Brookhaven National Laboratory':
('Brookhaven National Laboratory',),
'Brown University':
('Brown University',),
'Chicago State University':
('Chicago State University',),
'Columbia University':
('Columbia University',),
'Creighton University':
('Creighton University',),
'Fairfield University':
('Fairfield University',),
'George Washington University':
('George Washington University',),
'Hampton University':
('Hampton University',),
'Houston Advanced Research Center':
('Houston Advanced Research Center',),
'Janelia Farm Research Campus':
('Janelia Farm Research Campus',),
'Long Island University':
('Long Island University',),
'Louisiana Tech University':
('Louisiana Tech University',),
'Luther College':
('Luther College',),
'Manhattan College':
('Manhattan College',),
'Milwaukee School of Engineering':
('Milwaukee School of Engineering',),
'Mississippi State University':
('Mississippi State University',),
'Muhlenberg College':
('Muhlenberg College',),
'New York City College of Technology':
('New York City College of Technology',),
'North Carolina Central University':
('North Carolina Central University',),
'Northern Illinois University':
('Northern Illinois University',),
'Oklahoma State University':
('Oklahoma State University',),
'Pacific Lutheran University':
('Pacific Lutheran University',),
'Philander Smith College':
('Philander Smith College',),
'Rutgers University':
('Rutgers University',),
'South Dakota School of Mines and Technology':
('South Dakota School of Mines and Tec',),
'Stanford University':
('Stanford University',),
'State University of New York (or SUNY) Albany':
('SUNY Albany', 'University at Albany (SUNY)', 'Albany'),
'State University of New York (or SUNY) Buffalo':
('University at Buffalo',
'State University of New York at Buffalo'),
'Syracuse University':
('Syracuse University',),
'Tennessee Tech University':
('Tennessee Tech University',),
'Texas Tech University':
('Texas Tech University',),
'The George Washington University':
('The George Washington University',),
('The Graduate School and University Center, '
'The City University of New York'):
(('The Graduate School and University Center, '
'The City University o'),),
'The Rockefeller University':
('The Rockefeller University',),
'The University of Alabama, Tuscaloosa':
('The University of Alabama, Tuscaloosa',),
'The University of Mississippi':
('The University of Mississippi',),
'Triangle Universities Nuclear Laboratory':
('Triangle Universities Nuclear Laboratory',),
'University of Connecticut':
('University of Connecticut',),
'University of Hawaii':
('University of Hawaii',),
'University of Houston':
('University of Houston',),
'University of Puerto Rico':
('University of Puerto Rico',),
'University of South Dakota':
('University of South Dakota',),
'Utah Valley University':
('Utah Valley University',),
'Virginia Military Institute':
('Virginia Military Institute',),
'Wayne State University':
('Wayne State University',),
'Wayne University':
('Wayne State university',),
'Western Michigan University':
('Western Michigan University',),
'Yale University': ('Yale University',)}
def _build_query(nation):
return '100__w:"{0}" OR 700__w:"{0}"'.format(nation)
def index(req):
req.content_type = "text/html"
req.write(pageheaderonly("Nation numbers", req=req))
req.write("<h1>Nation numbers</h1>")
req.flush()
req.write("<table>\n")
tr = ("<tr>"
"<td>{0}</td>"
"<td><a href='/search?{1}&sc=1'>{2}</a></td>"
"<td><a href='/nations.py/articles?i={3}' "
"target='_blank'>Articles</a> "
"(<a href='/nations.py/articles?mode=text&i={3}'>text</a>)"
"</td><tr>\n")
for i, nation in enumerate(_AFFILIATIONS):
query = _build_query(nation)
results = perform_request_search(p=query, of='intbitset')
req.write(tr.format(escape(nation),
escape(urlencode([("p", query)]), True),
len(results),
i))
req.flush()
req.write("</table>\n")
req.write(pagefooteronly(req=req))
return ""
def late(req):
req.content_type = "text/html"
print >> req, pageheaderonly("Late journals", req=req)
th = ("<tr><th>DOI</th><th>Title</th><th>DOI registration</th>"
"<th>Arrival in SCOAP3</th></tr>")
tr = ("<tr style='background-color: {0};'><td>"
"<a href='http://dx.doi.org/{1}' target='_blank'>{2}</td>"
"<td>{3}</td><td>{4}</td><td>{5}</td></tr>")
sql_bibrec = "SELECT creation_date FROM bibrec WHERE id=%s"
sql_doi = "SELECT creation_date FROM doi WHERE doi=%s"
for journal in CFG_JOURNALS:
print >> req, "<h2>%s</h2>" % escape(get_coll_i18nname(journal))
results = get_collection_reclist(journal)
print >> req, "<table>"
print >> req, th
for recid in results:
creation_date = run_sql(sql_bibrec, (recid, ))[0][0]
record = get_record(recid)
doi = record_get_field_value(record, '024', '7', code='a')
title = record_get_field_value(record, '245', code='a')
doi_date = run_sql(sql_doi, (doi, ))
background = "#eee"
if doi_date:
doi_date = doi_date[0][0]
if (creation_date - doi_date).days < 0:
background = "#66FF00"
elif (creation_date - doi_date).days < 1:
background = "#FF6600"
else:
background = "#FF0000"
else:
doi_date = ''
print >> req, tr.format(background,
escape(doi, True),
escape(doi),
title,
doi_date,
creation_date)
print >> req, "</table>"
def articles(req, i, mode='html'):
try:
i = int(i)
assert 0 <= i < len(_AFFILIATIONS)
except:
raise SERVER_RETURN(HTTP_BAD_REQUEST)
nation = _AFFILIATIONS[i]
ret = []
page_title = "SCOAP3 Articles by authors from %s" % nation
if mode == 'text':
req.content_type = "text/plain; charset=utf8"
req.headers_out['content-disposition'] = ('attachment; filename=%s.txt'
% nation)
else:
req.content_type = "text/html"
if mode == 'text':
print >> req, page_title
print >> req, "-" * len(page_title)
query = _build_query(nation)
for journal in CFG_JOURNALS:
results = perform_request_search(p=query, cc=journal, of='intbitset')
if not results:
continue
ret.append("<h2>%s (%s)</h2" % (escape(get_coll_i18nname(journal)),
len(results)))
ret.append("<p><ul>")
if mode == 'text':
print >> req, ""
print >> req, get_coll_i18nname(journal)
for recid in results:
record = get_record(recid)
title = record_get_field_value(record, '245', code='a')
doi = record_get_field_value(record, '024', '7', code='a')
if mode == 'text':
print >> req, "http://dx.doi.org/%s" % doi
li = ("<li><a href='http://dx.doi.org/{0}' "
"target='_blank'>{1}</a>: {2}</li>")
ret.append(li.format(escape(doi, True), escape(doi), title))
ret.append("</ul></p>")
body = '\n'.join(ret)
if mode == 'text':
return ""
return page(req=req, title=page_title, body=body)
def csv(req):
req.content_type = 'text/csv; charset=utf-8'
req.headers_out['content-disposition'] = 'attachment; filename=scoap3.csv'
header = (','.join(['Nation']
+ [get_coll_i18nname(journal) for journal in CFG_JOURNALS]))
print >> req, header
for nation in _AFFILIATIONS:
query = _build_query(nation)
line = (','.join([nation]
+ [str(len(perform_request_search(p=query,
cc=journal,
of='intbitset')))
for journal in CFG_JOURNALS]))
print >> req, line
def create_search_from_affiliation(aff):
return '|'.join(t for t in CFG_SELECTED_AFF[aff])
def us_affiliations(req):
from invenio.search_engine_utils import get_fieldvalues
req.content_type = "text/html"
print >> req, pageheaderonly("USA affiliations", req=req)
affiliations = []
tmp = []
tmp.extend(get_fieldvalues(perform_request_search(p="*"), '100__u', False))
tmp.extend(get_fieldvalues(perform_request_search(p="*"), '100__v', False))
tmp.extend(get_fieldvalues(perform_request_search(p="*"), '700__u', False))
tmp.extend(get_fieldvalues(perform_request_search(p="*"), '700__v', False))
def _find_usa(x):
return ("United States of America" in x
or "United States" in x
or "USA" in x
or "U.S.A" in x)
affiliations.extend(filter(_find_usa, tmp))
affiliations = set(affiliations)
replaces = [('United States of America', ''),
("United States", ''),
("USA", ''),
("U.S.A", ''),
("University", ''),
("State", ''),
('Department of Physics and Astronomy', ""),
('Department of Physics', ""),
('Department', ''),
(",", '')]
affs = map(lambda x: multi_replace(x, replaces).strip(), affiliations)
affiliations2 = zip(affiliations, affs)
for a in sorted(affiliations2, key=lambda aff: aff[1]):
req.write(a[0]+'<br />')
req.write(pagefooteronly(req=req))
return ""
def us_affiliations_csv(req):
req.content_type = 'text/csv; charset=utf-8'
req.headers_out['content-disposition'] = 'attachment; filename=us_aff.csv'
header = (';'.join(['University']
+ [get_coll_i18nname(journal) for journal in CFG_JOURNALS]
+ ['sum']))
print >> req, header
for university in sorted(CFG_SELECTED_AFF):
line = university
count = 0
search = create_search_from_affiliation(university)
for collection in CFG_JOURNALS:
res = perform_request_search(p='/%s/' % (search,), c=collection)
line = line + ";" + str(len(res))
count = count + len(res)
print >> req, line+";"+str(count)
def usa_papers(req):
req.content_type = "text/html"
print >> req, pageheaderonly("USA papers for selected affiliations",
req=req)
li = "<li><a href='https://repo.scoap3.org/record/{0}'>{1}</a></li>"
## print the list of linkt to the articles
for university in CFG_SELECTED_AFF:
print >> req, "<h2>%s</h2>" % (str(university),)
search = create_search_from_affiliation(university)
for collection in CFG_JOURNALS:
res = perform_request_search(p='/%s/' % (search,), c=collection)
if len(res):
print >> req, "<h3>%s (%i)</h3>" % (str(collection), len(res))
print >> req, "<ul>"
for rec_id in res:
rec = get_record(rec_id)
line = li.format(str(rec_id), str(rec['245'][0][0][0][1]))
print >> req, line
print >> req, "</ul>"
req.write(pagefooteronly(req=req))
return ""
def usa_papers_csv(req):
req.content_type = 'text/csv; charset=utf-8'
req.headers_out['content-disposition'] = ('attachment; '
'filename=usa_papers.csv')
li = "%s; https://repo.scoap3.org/record/%s"
## print the list of linkt to the articles
for university in CFG_SELECTED_AFF:
print >> req, university
search = create_search_from_affiliation(university)
for collection in CFG_JOURNALS:
res = perform_request_search(p='(%s)' % (search,), c=collection)
if len(res):
print >> req, collection
for rec_id in res:
rec = get_record(rec_id)
line = li.format(str(rec['245'][0][0][0][1]), str(rec_id))
print >> req, line
print >> req, ""
print >> req, ""
print >> req, ""
def papers_by_country_csv(req, country):
req.content_type = 'text/csv; charset=utf-8'
req.headers_out['content-disposition'] = ('attachment; '
'filename=papers_by_country.csv')
## print the list of linkt to the articles
count = 1
print >> req, country
search = "100__w:'%s' OR 700__w:'%s'" % (country, country)
res = perform_request_search(p='%s' % (search,))
print >> req, "#;Title;Author;Journal;DOI;Inspire record"
if len(res):
for rec_id in res:
author_count = 11
rec = get_record(rec_id)
title = ''
authors = ''
journal = ''
doi = ''
inspire_record = ''
if '245' in rec:
title = re.sub("<.*?>", "", rec['245'][0][0][0][1])
if '100' in rec:
authors = rec['100'][0][0][0][1]
if '700' in rec:
for auth in rec['700']:
if author_count > 1:
authors += " / %s" % (auth[0][0][1],)
author_count -= 1
elif author_count == 1:
authors += " / et al"
author_count -= 1
else:
break
for sub in rec['773'][0][0]:
if 'p' in sub[0]:
journal = sub[1]
doi = get_doi(rec_id)
if '035' in rec:
for f in rec['035'][0][0]:
if 'a' in f:
inspire_record = 'http://inspirehep.net/record/%s' % (f[1],)
print >> req, "%s;%s;%s;%s;%s;%s" % (count, title, authors, journal, doi, inspire_record)
count += 1
def papers_by_country_with_affs_csv(req, country):
req.content_type = 'text/csv; charset=utf-8'
req.headers_out['content-disposition'] = ('attachment; '
'filename=papers_by_country.csv')
## print the list of linkt to the articles
count = 1
print >> req, country
search = "100__w:'%s' OR 700__w:'%s'" % (country, country)
res = perform_request_search(p='%s' % (search,))
print >> req, "#;Title;Journal;DOI;Inspire record;Author;Affiliations"
if len(res):
for rec_id in res:
author_count = 11
rec = get_record(rec_id)
title = ''
authors = ''
journal = ''
doi = ''
inspire_record = ''
if '245' in rec:
title = re.sub("<.*?>", "", rec['245'][0][0][0][1])
for sub in rec['773'][0][0]:
if 'p' in sub[0]:
journal = sub[1]
doi = get_doi(rec_id)
if '035' in rec:
for f in rec['035'][0][0]:
if 'a' in f:
inspire_record = 'http://inspirehep.net/record/%s' % (f[1],)
print >> req, "%s;%s;%s;%s;%s;;" % (count, title, journal, doi, inspire_record)
if '100' in rec:
author = rec['100'][0][0][0][1]
affiliations = record_get_field_values(rec, tag='100', code='v')
print >> req, ";;;;;%s;%s" % (author, " | ".join(affiliations))
if '700' in rec:
for auth in rec['700']:
author = auth[0][0][1]
affiliations = field_get_subfield_values(auth, code='v')
print >> req, ";;;;;%s;%s" % (author, " | ".join(affiliations))
count += 1
def countries_by_publishers(req):
req.content_type = "text/html"
print >> req, pageheaderonly("Countries/publishers", req=req)
############
## PART 1 ##
# journals = []
# for pub in CFG_JOURNALS:
# ids = perform_request_search(cc=pub)
# journals.append((pub, ids))
# journals.append(("older_than_2014", perform_request_search(cc='older_than_2014')))
# countries = []
# for country in sorted(set(NATIONS_DEFAULT_MAP.itervalues())):
# ids = perform_request_search(p="country:%s" % (country,)) + perform_request_search(cc='older_than_2014', p="country:%s" % (country,))
# countries.append((country, ids))
req.write("<h1>Number of articles per country per journal</h1>")
req.write("<h2>Minimum one author from the country</h2>")
req.flush()
req.write("<table>\n")
req.write("<tr><th rowspan=2>Country</th><th colspan=10>Journals</th><th>Other</th></tr>")
req.write("""<tr>
<td>Acta</td>
<td>Advances in High Energy Physics</td>
<td>Chinese Physics C</td>
<td>European Physical Journal C</td>
<td>Journal of Cosmology and Astroparticle Physics</td>
<td>Journal of High Energy Physics</td>
<td>New Journal of Physics</td>
<td>Nuclear Physics B</td>
<td>Physics Letters B</td>
<td>Progress of Theoretical and Experimental Physics</td>
<td>older_than_2014</td></tr>""")
for country in sorted(set(NATIONS_DEFAULT_MAP.itervalues())):
req.write("<tr><td>%s</td>" % (country,))
for pub in CFG_JOURNALS + ["older_than_2014"]:
req.write("<td>%s</td>" % perform_request_search(p="country:%s" % (country,), cc=pub))
req.write("</tr>")
req.write('</table>')
############
## PART 2 ##
# journals = []
hitcount = {}
for pub in CFG_JOURNALS + ["older_than_2014"]:
ids = perform_request_search(cc=pub)
hitcount[pub] = {}
for country in sorted(set(NATIONS_DEFAULT_MAP.itervalues())):
hitcount[pub][country] = 0
for id in ids:
record = get_record(id)
countries = set(record_get_field_values(record, '700', '%', '%', 'w') + record_get_field_values(record, '100', '%', '%', 'w'))
if len(countries) == 1:
c = countries.pop()
if c in set(NATIONS_DEFAULT_MAP.itervalues()):
hitcount[pub][countries[0]] += 1
req.write("<h1>Number of articles per country per journal</h1>")
req.write("<h2>All author from the country</h2>")
req.flush()
req.write("<table>\n")
req.write("<tr><th rowspan=2>Country</th><th colspan=10>Journals</th><th>Other</th></tr>")
req.write("""<tr>
<td>Acta</td>
<td>Advances in High Energy Physics</td>
<td>Chinese Physics C</td>
<td>European Physical Journal C</td>
<td>Journal of Cosmology and Astroparticle Physics</td>
<td>Journal of High Energy Physics</td>
<td>New Journal of Physics</td>
<td>Nuclear Physics B</td>
<td>Physics Letters B</td>
<td>Progress of Theoretical and Experimental Physics</td>
<td>older_than_2014</td></tr>""")
for country in sorted(set(NATIONS_DEFAULT_MAP.itervalues())):
req.write("<tr><td>%s</td>" % (country,))
for pub in CFG_JOURNALS + ["older_than_2014"]:
req.write("<td>%s</td>" % hitcount[pub][country])
req.write("</tr>")
req.write('</table>')
req.write(pagefooteronly(req=req))
return ""
def impact_articles(req, year):
try:
year = int(year)
assert 2014 <= year
except:
raise SERVER_RETURN(HTTP_BAD_REQUEST)
req.content_type = 'text/csv; charset=utf-8'
req.headers_out['content-disposition'] = ('attachment; '
'filename=impact_articles.csv')
ids = perform_request_search(p="datecreated:{year}-01-01->{year}-12-31".format(year=year))
counter = 0
print >> req, "#;recid;journal;author;orcid;affiliation;countries"
for i in ids:
counter += 1
try:
rec = get_record(i)
except:
print >> req, "{c},{recid},Can't load metadata".format(c=counter, recid=i)
continue
journal = record_get_field_value(rec, tag='773', code='p')
for field in ['100', '700']:
if field in rec:
for author in rec[field]:
name = ""
orcid = ""
aff = ""
country = ""
for key, val in author[0]:
if key is 'a':
name = unicode(val, 'UTF-8').replace('\n', ' ').strip()
if key is 'j':
orcid = unicode(val, 'UTF-8').replace('\n', ' ').strip()
if key in ['v', 'u']:
aff += unicode(val, 'UTF-8').replace('\n', ' ').strip() + " | "
if key is 'w':
country += unicode(val, 'UTF-8').replace('\n', ' ').strip() + ";"
print >> req, "{c};{recid};{journal};{name};{orcid};{aff};{country}".format(c=counter, recid=i, journal=journal, name=name, orcid=orcid, aff=aff, country=country)
def national_authors_list(req, search_country):
req.content_type = 'text/csv; charset=utf-8'
req.headers_out['content-disposition'] = ('attachment; '
'filename=national_authors_list.csv')
ids = perform_request_search(p="country:'%s'" % (search_country,))
req.write("#;RECID;Title;Creation date;Publisher;Total # of authors;Authors name(given country only);Authors country;Authors affiliations\n")
for number, recid in enumerate(ids):
doi = record_get_field_value(get_record(recid), '024', ind1="7", code="a")
journal = record_get_field_value(get_record(recid), '773', code="p")
title = record_get_field_value(get_record(recid), '245', code="a")
del_date = get_creation_date(recid)
publisher = record_get_field_value(get_record(recid), '980', code="b")
if not publisher:
publisher = record_get_field_value(get_record(recid), '541', code="a")
rec = get_record(recid)
authors = []
author_count = 0
for f in ['100', '700']:
if f in rec:
for auth in rec[f]:
author_count += 1
aff = ''
name = ''
country = ''
hit = 0
for subfield, value in auth[0]:
if subfield == 'a':
name = value
if subfield in ['v', 'u']:
if aff:
aff += ', ' + value
else:
aff = value
if subfield == 'w':
if country:
country += ', ' + value
else:
country = value
if search_country in value:
hit = 1
if hit:
authors.append({'name': name,
'affiliation': aff.replace('\n',''),
'country': country})
for i, author in enumerate(authors):
if i == 0:
req.write("%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s\n" % (number+1, recid, title.replace('\n',''), del_date, publisher, author_count, author['name'], author['country'], author['affiliation']))
else:
req.write("||||||||%s|%s|%s\n" % (author['name'], author['country'], author['affiliation']))
def institutions_list(req, country, year=None):
from copy import deepcopy
def find_nations(affiliation):
NATIONS_DEFAULT_MAP['European Organization for Nuclear Research'] = 'CERN'
NATIONS_DEFAULT_MAP['Centre Europeen de Recherches Nucleaires'] = 'CERN'
NATIONS_DEFAULT_MAP['High Energy Accelerator Research Organization'] = 'KEK'
NATIONS_DEFAULT_MAP['KEK'] = 'KEK'
NATIONS_DEFAULT_MAP['FNAL'] = 'FNAL'
NATIONS_DEFAULT_MAP['Fermilab'] = 'FNAL'
NATIONS_DEFAULT_MAP['Fermi National'] = 'FNAL'
NATIONS_DEFAULT_MAP['SLAC'] = 'SLAC'
NATIONS_DEFAULT_MAP['DESY'] = 'DESY'
NATIONS_DEFAULT_MAP['Deutsches Elektronen-Synchrotron'] = 'DESY'
NATIONS_DEFAULT_MAP['JINR'] = 'JINR'
NATIONS_DEFAULT_MAP['JOINT INSTITUTE FOR NUCLEAR RESEARCH'] = 'JINR'
possible_affs = []
def _sublistExists(list1, list2):
return ''.join(map(str, list2)) in ''.join(map(str, list1))
values = set([y.lower().strip() for y in re.findall(ur"[\w']+", affiliation.replace('.','').decode("UTF-8"), re.UNICODE)])
for key, val in NATIONS_DEFAULT_MAP.iteritems():
key = unicode(key)
key_parts = set(key.lower().decode('utf-8').split())
if key_parts.issubset(values):
possible_affs.append(val)
values = values.difference(key_parts)
if not possible_affs:
possible_affs = ['HUMAN CHECK']
if 'CERN' in possible_affs and 'Switzerland' in possible_affs:
# Don't use remove in case of multiple Switzerlands
possible_affs = [x for x in possible_affs if x != 'Switzerland']
if 'KEK' in possible_affs and 'Japan' in possible_affs:
possible_affs = [x for x in possible_affs if x != 'Japan']
if 'FNAL' in possible_affs and 'USA' in possible_affs:
possible_affs = [x for x in possible_affs if x != 'USA']
if 'SLAC' in possible_affs and 'USA' in possible_affs:
possible_affs = [x for x in possible_affs if x != 'USA']
if 'DESY' in possible_affs and 'Germany' in possible_affs:
possible_affs = [x for x in possible_affs if x != 'Germany']
if 'JINR' in possible_affs and 'Russia' in possible_affs:
possible_affs = [x for x in possible_affs if x != 'Russia']
return sorted(list(set(possible_affs)))[0]
publisher_dict = {'New J. Phys.':0,
'Acta Physica Polonica B':0,
'Advances in High Energy Physics':0,
'Chinese Phys. C':0,
'EPJC':0,
'JCAP':0,
'JHEP':0,
'Nuclear Physics B':0,
'Physics letters B':0,
'PTEP':0}
if(year):
recids = perform_request_search(p='country:"%s" year:%s' % (country,year))
else:
recids = perform_request_search(p='country:"%s"' % (country,))
req.content_type = 'text/csv; charset=utf-8'
req.headers_out['content-disposition'] = ('attachment; '
'filename=%s_institutions_list.csv' % (country,))
req.write("recid|authors #|title|country|New J. Phys.|Acta Physica Polonica B|Advances in High Energy Physics|Chinese Phys. C|EPJC|JCAP|JHEP|Nuclear Physics B|Physics letters B|PTEP\n")
for recid in recids:
rec = get_record(recid)
global_affs = {}
author_count = 0
if '100' in rec:
author_count += len(rec['100'])
if '700' in rec:
author_count += len(rec['700'])
journal = record_get_field_value(rec, '773', ind1="%", ind2="%", code='p')
affs = []
affs.extend(record_get_field_values(rec, '100', ind1="%", ind2="%", code='v'))
affs.extend(record_get_field_values(rec, '700', ind1="%", ind2="%", code='v'))
for aff in affs:
if aff not in global_affs:
global_affs[aff] = deepcopy(publisher_dict)
global_affs[aff][journal] += 1
for aff, j in global_affs.iteritems():
req.write("%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s|%s\n" % (recid, author_count, aff.replace('\n', ' ').replace('\r', ''), find_nations(aff), j['New J. Phys.'],j['Acta Physica Polonica B'],j['Advances in High Energy Physics'],j['Chinese Phys. C'],j['EPJC'],j['JCAP'],j['JHEP'],j['Nuclear Physics B'],j['Physics letters B'],j['PTEP']))
| gpl-2.0 | 6,278,846,856,669,608,000 | 41.766927 | 349 | 0.507505 | false | 3.780937 | false | false | false |
jdemon519/cfme_tests | cfme/tests/containers/test_reports.py | 1 | 7753 | # -*- coding: utf-8 -*-
import re
import pytest
from cfme.containers.provider import ContainersProvider
from cfme.intelligence.reports.reports import CannedSavedReport
from utils import testgen
from utils.blockers import BZ
pytestmark = [
pytest.mark.usefixtures('setup_provider'),
pytest.mark.meta(
server_roles='+ems_metrics_coordinator +ems_metrics_collector +ems_metrics_processor'),
pytest.mark.tier(1)]
pytest_generate_tests = testgen.generate([ContainersProvider], scope='function')
@pytest.fixture(scope='module')
def node_hardwares_db_data(appliance):
"""Grabbing hardwares table data for nodes"""
db = appliance.db
hardwares_table = db['hardwares']
container_nodes = db['container_nodes']
out = {}
for node in db.session.query(container_nodes).all():
out[node.name] = hardwares_table.__table__.select().where(
hardwares_table.id == node.id
).execute().fetchone()
return out
@pytest.fixture(scope='function')
def pods_per_ready_status(provider):
"""Grabing the pods and their ready status from API"""
# TODO: Add later this logic to mgmtsystem
entities_j = provider.mgmt.api.get('pod')[1]['items']
out = {}
for entity_j in entities_j:
out[entity_j['metadata']['name']] = next(
(True if condition['status'].lower() == 'true' else False)
for condition in entity_j['status']['conditions']
if condition['type'].lower() == 'ready'
)
return out
def get_vpor_data_by_name(vporizer_, name):
return [vals for vals in vporizer_ if vals.resource_name == name]
def get_report(menu_name):
"""Queue a report by menu name , wait for finish and return it"""
path_to_report = ['Configuration Management', 'Containers', menu_name]
run_at = CannedSavedReport.queue_canned_report(path_to_report)
return CannedSavedReport(path_to_report, run_at)
@pytest.mark.meta(blockers=[BZ(1435958, forced_streams=["5.8"])])
@pytest.mark.polarion('CMP-9533')
def test_pods_per_ready_status(soft_assert, pods_per_ready_status):
report = get_report('Pods per Ready Status')
for row in report.data.rows:
name = row['# Pods per Ready Status']
readiness_ui = (True if row['Ready Condition Status'].lower() == 'true'
else False)
if soft_assert(name in pods_per_ready_status, # this check based on BZ#1435958
'Could not find pod "{}" in openshift.'
.format(name)):
soft_assert(pods_per_ready_status[name] == readiness_ui,
'For pod "{}" expected readiness is "{}" got "{}"'
.format(name, pods_per_ready_status[name], readiness_ui))
@pytest.mark.polarion('CMP-9536')
def test_report_nodes_by_capacity(appliance, soft_assert, node_hardwares_db_data):
report = get_report('Nodes By Capacity')
for row in report.data.rows:
hw = node_hardwares_db_data[row['Name']]
soft_assert(hw.cpu_total_cores == int(row['CPU Cores']),
'Number of CPU cores is wrong: expected {}'
' got {}'.format(hw.cpu_total_cores, row['CPU Cores']))
# The following block is to convert whatever we have to MB
memory_ui = float(re.sub(r'[a-zA-Z,]', '', row['Memory']))
if 'gb' in row['Memory'].lower():
memory_mb_ui = memory_ui * 1024
# Shift hw.memory_mb to GB, round to the number of decimals of memory_mb_db
# and shift back to MB:
memory_mb_db = round(hw.memory_mb / 1024.0,
len(str(memory_mb_ui).split('.')[1])) * 1024
else: # Assume it's MB
memory_mb_ui = memory_ui
memory_mb_db = hw.memory_mb
soft_assert(memory_mb_ui == memory_mb_db,
'Memory (MB) is wrong for node "{}": expected {} got {}'
.format(row['Name'], memory_mb_ui, memory_mb_db))
@pytest.mark.polarion('CMP-10034')
def test_report_nodes_by_cpu_usage(appliance, soft_assert, vporizer):
report = get_report('Nodes By CPU Usage')
for row in report.data.rows:
vpor_values = get_vpor_data_by_name(vporizer, row["Name"])[0]
usage_db = round(vpor_values.max_cpu_usage_rate_average, 2)
usage_report = round(float(row['CPU Usage (%)']), 2)
soft_assert(usage_db == usage_report,
'CPU usage is wrong for node "{}": expected {} got {}'
.format(row['Name'], usage_db, usage_report))
@pytest.mark.polarion('CMP-10033')
def test_report_nodes_by_memory_usage(appliance, soft_assert, vporizer):
report = get_report('Nodes By Memory Usage')
for row in report.data.rows:
vpor_values = get_vpor_data_by_name(vporizer, row["Name"])[0]
usage_db = round(vpor_values.max_mem_usage_absolute_average, 2)
usage_report = round(float(row['Memory Usage (%)']), 2)
soft_assert(usage_db == usage_report,
'CPU usage is wrong for node "{}": expected {} got {}.'
.format(row['Name'], usage_db, usage_report))
@pytest.mark.meta(blockers=[BZ(1436698, forced_streams=["5.6", "5.7"])])
@pytest.mark.polarion('CMP-10033')
def test_report_nodes_by_number_of_cpu_cores(soft_assert, node_hardwares_db_data):
report = get_report('Number of Nodes per CPU Cores')
for row in report.data.rows:
hw = node_hardwares_db_data[row['Name']]
soft_assert(hw.cpu_total_cores == int(row['Hardware Number of CPU Cores']),
'Hardware Number of CPU Cores is wrong for node "{}": expected {} got {}.'
.format(row['Name'], hw.cpu_total_cores, row['Hardware Number of CPU Cores']))
@pytest.mark.polarion('CMP-10008')
def test_report_projects_by_number_of_pods(appliance, soft_assert):
container_projects = appliance.db['container_projects']
container_pods = appliance.db['container_groups']
report = get_report('Projects by Number of Pods')
for row in report.data.rows:
pods_count = len(container_pods.__table__.select().where(
container_pods.container_project_id ==
container_projects.__table__.select().where(
container_projects.name == row['Project Name']).execute().fetchone().id
).execute().fetchall())
soft_assert(pods_count == int(row['Number of Pods']),
'Number of pods is wrong for project "{}". expected {} got {}.'
.format(row['Project Name'], pods_count, row['Number of Pods']))
@pytest.mark.polarion('CMP-10009')
def test_report_projects_by_cpu_usage(soft_assert, vporizer):
report = get_report('Projects By CPU Usage')
for row in report.data.rows:
vpor_values = get_vpor_data_by_name(vporizer, row["Name"])[0]
usage_db = round(vpor_values.max_cpu_usage_rate_average, 2)
usage_report = round(float(row['CPU Usage (%)']), 2)
soft_assert(usage_db == usage_report,
'CPU usage is wrong for project "{}": expected {} got {}'
.format(row['Name'], usage_db, usage_report))
@pytest.mark.polarion('CMP-10010')
def test_report_projects_by_memory_usage(soft_assert, vporizer):
report = get_report('Projects By Memory Usage')
for row in report.data.rows:
vpor_values = get_vpor_data_by_name(vporizer, row["Name"])[0]
usage_db = round(vpor_values.max_mem_usage_absolute_average, 2)
usage_report = round(float(row['Memory Usage (%)']), 2)
soft_assert(usage_db == usage_report,
'CPU usage is wrong for project "{}": expected {} got {}.'
.format(row['Name'], usage_db, usage_report))
| gpl-2.0 | 3,935,604,979,949,869,000 | 37.381188 | 98 | 0.613182 | false | 3.484494 | true | false | false |
gchrupala/imaginet | layers.py | 1 | 2037 | import theano
import theano.tensor as T
import passage.inits as inits
from theano.tensor.extra_ops import repeat
from passage.theano_utils import shared0s, floatX
### Directly compositional models
### Associative operators ###
class Add(object):
'''Elementwise addition.'''
def __init__(self, size):
self.size = size
self.id = T.alloc(0.0, 1, self.size)
def step(self, x_t, h_tm1):
return h_tm1 + x_t
class Mult(object):
'''Elementwise multiplication.'''
def __init__(self, size):
self.size = size
self.id = T.alloc(1.0, 1, self.size)
def step(self, x_t, h_tm1):
return h_tm1 * x_t
class MatrixMult(object):
'''Matrix multiplication.'''
def __init__(self, size):
self.size = size
self.sqrt_size = int(self.size**0.5)
self.id = T.eye(self.sqrt_size, self.sqrt_size).reshape((1,self.size))
def step(self, x_t, h_tm1):
h_t,_ = theano.scan(lambda x, z: T.dot(x, z),
sequences=[ x_t.reshape((x_t.shape[0], self.sqrt_size, self.sqrt_size)),
h_tm1.reshape((h_tm1.shape[0], self.sqrt_size, self.sqrt_size))])
return h_t.reshape((h_t.shape[0], self.size))
class Direct(object):
def __init__(self, n_features=256, size=256, init=inits.uniform, op=MatrixMult):
self.n_features = n_features
self.size = size
self.sqrt_size = int(self.size ** 0.5)
self.init = init
self.op = op(self.size)
self.input = T.imatrix()
self.embeddings = self.init((self.n_features, self.size))
self.params = [self.embeddings]
def embedded(self):
return self.embeddings[self.input]
def output(self, dropout_active=False):
X = self.embedded()
out, _ = theano.scan(self.op.step,
sequences=[X],
outputs_info=[repeat(self.op.id, X.shape[1], axis=0)]
)
return out[-1]
| mit | -3,363,936,435,873,267,000 | 30.338462 | 106 | 0.559647 | false | 3.344828 | false | false | false |
bird-house/bird-feeder | setup.py | 1 | 1481 | # -*- coding: utf-8 -*-
from setuptools import find_packages
from setuptools import setup
version = '0.2.0'
description = 'Bird Feeder publishes Thredds metadata catalogs to a Solr index service with birdhouse schema.'
long_description = (
open('README.rst').read() + '\n' +
open('AUTHORS.rst').read() + '\n' +
open('CHANGES.rst').read()
)
requires = [
'argcomplete',
'pysolr',
'threddsclient',
#'dateutil',
'nose',
]
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: Atmospheric Science',
]
setup(name='bird-feeder',
version=version,
description=description,
long_description=long_description,
classifiers=classifiers,
keywords='thredds solr python netcdf birdhouse anaconda',
author='Birdhouse Developers',
author_email='',
url='https://github.com/bird-house/bird-feeder',
license = "Apache License v2.0",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='nose.collector',
install_requires=requires,
entry_points = {
'console_scripts': [
'birdfeeder=birdfeeder:main',
]}
,
)
| apache-2.0 | 4,515,275,595,300,078,000 | 27.480769 | 110 | 0.605672 | false | 3.826873 | false | true | false |
SGover/monopoly | gameGui.py | 1 | 19395 | from gui import guiButton
import time, sys, os
from constants import *
from random import randrange
import pygame
from pygame.locals import QUIT
from threading import Thread
X = 550
POPUP_TOP=100
POPUP_LEFT=200
POPUP_SIZE=(700,500)
class PopupWindow():
def __init__(self,massage,buttons,image=None,texts=None,measures=((POPUP_TOP,POPUP_LEFT),POPUP_SIZE)):
#self.gameWindow=gameWindow
self.top=measures[0][0]
self.left=measures[0][1]
self.width=measures[1][0]
self.height=measures[1][1]
self.image=image
self.texts=texts
self.massage=massage
self.buttons=buttons
for button in self.buttons:
button.position = (self.left+button.position[0],self.top+button.position[1])
self.background=pygame.Surface((self.width,self.height))
pygame.font.init()
self.fnt = pygame.font.Font("fonts//Kabel.ttf", 20)
def draw(self,surf):
self.background.fill((25,25,25))
frame = pygame.Surface((self.width-10,self.height-10))
frame.fill((220,220,220))
self.background.blit(frame, (5,5))
if self.texts!=None:
for text in self.texts:
t=self.fnt.render(text[0],True,BLACK)
self.background.blit(t, text[1])
surf.blit(self.background,(self.left,self.top))
m=self.fnt.render(self.massage,True,BLACK)
if self.image!=None:
surf.blit(self.image,(330,140))
surf.blit(m,(self.left+30,self.top+15))
for button in self.buttons:
if button._enable:
surf.blit(button,button.position)
def handle_event(self,event):
for button in self.buttons:
button.handle_event(event)
def close(self):
del[self]
class TradeWindow(PopupWindow):
def __init__(self,buttons,trader,players):
self.buttons=buttons
self.trader=trader
self.players=players
self.money_interval=[50,50]
self.money1=0
self.money2=0
margin=5
curr_x=20
curr_y=50
block_size=(50,75)
headers=[(players[0].name+ ' assets',(800//4-1,20)),(players[1].name+ ' assets',(3*800//4,20)),
('money :$'+str(players[0].money),(800//4-170,20)),('money :$'+str(players[1].money),(-170+3*800//4,20)),
('Trade assets',(POPUP_SIZE[0]//4-100,260)),('Trade assets',(3*POPUP_SIZE[0]//4-100,260))]
for asset in players[0].assets_list():
self.buttons.append(guiButton('',(curr_x,curr_y),action=self.add1_asset,parameter=asset,image=get_asset_image(asset),name='add'+asset.name,sizing=0.5,y_sizing=0.5))
self.buttons.append(guiButton('',(curr_x,curr_y+260),action=self.rem1_asset,parameter=asset,image=get_asset_image(asset),name='rem'+asset.name,enabled=False,sizing=0.5,y_sizing=0.5))
if curr_x+block_size[0]<POPUP_SIZE[0]//2-margin:
curr_x=curr_x+block_size[0]
else:
curr_x=50
curr_y+=block_size[1]
curr_x=20
curr_y=50
for asset in players[1].assets_list():
self.buttons.append(guiButton('',(POPUP_SIZE[0]//2+curr_x,curr_y),action=self.add2_asset,parameter=asset,image=get_asset_image(asset),name='add'+asset.name,sizing=0.5,y_sizing=0.5))
self.buttons.append(guiButton('',(POPUP_SIZE[0]//2+curr_x,curr_y+260),action=self.rem2_asset,parameter=asset,image=get_asset_image(asset),name='rem'+asset.name,enabled=False,sizing=0.5,y_sizing=0.5))
if curr_x+block_size[0]<POPUP_SIZE[0]//2-margin:
curr_x=curr_x+block_size[0]
else:
curr_x=100
curr_y+=block_size[1]
self.buttons.append(guiButton('+',(50,500),action=self.add1_money))
self.buttons.append(guiButton('-',(120,500),action=self.rem1_money))
self.buttons.append(guiButton('+',(450,500),action=self.add2_money))
self.buttons.append(guiButton('-',(520,500),action=self.rem2_money))
PopupWindow.__init__(self,'',buttons,texts=headers,measures=((100,50),(800,600)))
self.top_dx=[40,40]
self.top_dy=[100,100]
self.down_dx=[40,40]
self.down_dy=[400,400]
def add1_money(self):
new_v=self.trader.player1_money+self.money_interval[0]
if new_v<self.players[0].money:
self.trader.set_money1(new_v)
self.money1=new_v
def add2_money(self):
new_v=self.trader.player2_money+self.money_interval[1]
if new_v<self.players[1].money:
self.trader.set_money2(new_v)
self.money2=new_v
def rem1_money(self):
new_v=self.trader.player1_money-self.money_interval[0]
if new_v>=0:
self.trader.set_money1(new_v)
self.money1=new_v
def rem2_money(self):
new_v=self.trader.player2_money-self.money_interval[1]
if new_v>=0:
self.trader.set_money2(new_v)
self.money2=new_v
def enable_asset(self,name,enabled):
for button in self.buttons:
if button.name==name:
button.set_enabled(enabled)
def add1_asset(self,asset):
self.trader.add_asset_1(asset)
self.enable_asset('add'+asset.name,False)
self.enable_asset('rem'+asset.name,True)
def rem1_asset(self,asset):
self.trader.remove_asset_1(asset)
self.enable_asset('add'+asset.name,True)
self.enable_asset('rem'+asset.name,False)
def add2_asset(self,asset):
self.trader.add_asset_2(asset)
self.enable_asset('add'+asset.name,False)
self.enable_asset('rem'+asset.name,True)
def rem2_asset(self,asset):
self.trader.remove_asset_2(asset)
self.enable_asset('add'+asset.name,True)
self.enable_asset('rem'+asset.name,False)
def draw(self,surf):
PopupWindow.draw(self,surf)
t=self.fnt.render('Trade money $'+str(self.money2),True,BLACK)
surf.blit(t, (self.left-50+3*700//4,self.top+550))
t=self.fnt.render('Trade money $'+str(self.money1),True,BLACK)
surf.blit(t, (self.left+700//4-100,self.top+550))
def update(self):
pass
class StatusWindow():
players = []
def __init__(self):
pass
def start(self, players):
self.players = players
# setting fonts
pygame.font.init()
self.fnt_name = pygame.font.Font("fonts//Kabel.ttf", 28)
self.fnt_money = pygame.font.Font("fonts//Kabel.ttf", 24)
self.fnt_asset = pygame.font.Font("fonts//Kabel.ttf", 13)
self.img = pygame.image.load("images//gui//status.png")
def draw(self, background):
self.img = self.img.convert_alpha()
l = 0
for p in self.players:
height = l * 270
background.blit(self.img, (X,height+5))
txt_name = self.fnt_name.render(p.name, True, P_COLORS[l])
textpos = txt_name.get_rect().move(X+15,15+height)
background.blit(txt_name, textpos)
background.blit(pygame.image.load(TOKENS[p.token_index]).convert_alpha(), (X+250,15+height))
txt_money = self.fnt_money.render("$"+str(p.money), True, (10, 10, 10))
textpos = txt_money.get_rect().move(X+320,25+height)
background.blit(txt_money, textpos)
i = 0
for c in p.assets:
color = COLORS[c]
text = ""
for asset in p.assets[c]:
text = text + asset.name + " | "
txt_money = self.fnt_asset.render(text, True, color)
textpos = txt_money.get_rect().move(X+10,68+height+(i*20))
background.blit(txt_money, textpos)
i += 1
l += 1
return background
def get_asset_image(asset):
#init fonts
fnt_title = pygame.font.Font("fonts//Kabel.ttf", 10)
fnt_des = pygame.font.Font("fonts//Kabel.ttf", 9)
#creating the image
surf=pygame.Surface((90,135))
surf.fill((255,255,255))
#filling the top
surf.fill(COLORS[asset.color],pygame.Rect(0,0,90,30))
#draw title
text=asset.name.split(' ')
title = fnt_title.render(text[0], True, BLACK)
pos = title.get_rect().move(1,2)
surf.blit(title,pos)
title = fnt_title.render(text[1], True, BLACK)
pos = title.get_rect().move(1,15)
surf.blit(title,pos)
#draw rent
if asset.color!=UTILITY and asset.color!=RW_STATION:
rent=fnt_des.render("Rent $"+str(asset.rent_list[0]), True, BLACK)
pos = rent.get_rect().move(5,30)
surf.blit(rent,pos)
for num in range (1,5):
rent=fnt_des.render(str(num)+" houses $"+str(asset.rent_list[num]), True, BLACK)
pos = rent.get_rect().move(5,30+num*11)
surf.blit(rent,pos)
rent=fnt_des.render("hotel $"+str(asset.rent_list[5]), True, BLACK)
pos = rent.get_rect().move(5,30+62)
surf.blit(rent,pos)
mortage=fnt_des.render("mortage $"+str(asset.price//2), True, BLACK)
pos = mortage.get_rect().move(5,30+72)
surf.blit(mortage,pos)
price=fnt_des.render("house price $"+str(asset.house_price), True, BLACK)
pos = price.get_rect().move(5,30+82)
else:
if asset.color==UTILITY:
descripton=[' Rent',
'own 1',
'dice roll X 4',
'',
'own 2',
'dice roll X 10']
else:
descripton=[' Rent',
'own 1 $25',
'own 2 $50',
'own 3 $100',
'own 4 $200']
for line in descripton:
tline=fnt_des.render(line, True, BLACK)
pos = tline.get_rect().move(5,40+descripton.index(line)*11)
surf.blit(tline,pos)
return surf
class GameWindow():
#get the board and the players
def __init__(self,board,players,console):
self.console=console
self.board=board
self.players=players
self.quit=False
self.statusWin=StatusWindow()
self.statusWin.start(self.players)
self.buttonPad = buttonPad()
self.popup=False
self.popupWindow=None
#creating a thread and run its draw function on it
def run(self):
self.thread = Thread(target=self.draw)
self.thread.daemon = True
self.thread.start()
def open_popup(self,popup):
self.popup=True
self.popupWindow=popup
def draw(self):
# Initialise screen
pygame.init()
os.environ['SDL_VIDEO_WINDOW_POS'] = "{},{}".format(50,20) # x,y position of the screen
screen = pygame.display.set_mode((1025, 700)) #witdth and height
pygame.display.set_caption('Monopoly')
# Fill background
background = pygame.Surface(screen.get_size())
background = background.convert()
clock = pygame.time.Clock()
#initate the tokens for players
token_list = []
for p in self.players:
token_list.append(pygame.image.load(TOKENS[p.token_index]).convert_alpha())
# Event loop
while 1:
clock.tick(60) #FPS
if not self.popup:
brd_img = pygame.image.load("images//monopoly.png")
brd_img = brd_img.convert()
for event in pygame.event.get():
self.buttonPad.handle_event(event)
if event.type == QUIT or self.quit:
pygame.quit()
os.kill(os.getpid(),0)
background.fill((50, 50, 50))
background = self.console.draw(background) # console
self.buttonPad.draw(background)
background = self.statusWin.draw(background) #status window
for block in self.board.blocks:
if not (block.color == RW_STATION or block.color == UTILITY or block.color == -1):
if block.hotel:
#draw hotel
h = pygame.image.load(BUILDINGS[0])
brd_img.blit(h, (block.position[0]-8,block.position[1]-5))
elif block.houses>=1:
#draw houses
h = pygame.image.load(BUILDINGS[block.houses])
brd_img.blit(h, (block.position[0]-8,block.position[1]-5))
#get players location on board
player_pos = []
for p in self.players:
player_pos.append(self.board.blocks[p.location].position)
#draw players
i = 0
check = []
for pos in player_pos:
for c in check:
if pos==c:
pos = (pos[0],pos[1]+25)
brd_img.blit(token_list[i], (pos[0]-15,pos[1]-10))
check.append(pos)
i += 1
background.blit(brd_img, (5,5))
screen.blit(background, (0, 0))
pygame.display.flip()
#popup
else:
for event in pygame.event.get():
if self.popupWindow!=None:
self.popupWindow.handle_event(event)
if event.type == QUIT or self.quit:
pygame.quit()
os.kill(os.getpid(),0)
if self.popupWindow!=None:
self.popupWindow.draw(screen)
pygame.display.flip()
def stop(self):
self.quit = True
def move_pawn(self,player,target_move):
while player.location!=target_move:
player.location=(player.location+1)%len(self.board.blocks)
time.sleep(0.25)
def prompt_commands(self, list_cmds):
return self.buttonPad.create_selection_menu(list_cmds)
def choose_from_options(self,actions,image=None):
i=0
self.buttons=[]
for name in actions.keys():
self.buttons.append(guiButton(name,(250+i//3*100, 65+(i%3)*50),actions[name],sizing=1.5))
i+=1
def check_click():
for control in self.buttons:
if control.clicked:
return True
return False
popup=PopupWindow('Choose',self.buttons,image,measures=((POPUP_TOP,POPUP_LEFT),(400,200)))
self.open_popup(popup)
while not check_click():
time.sleep(0.2)
self.popup=False
self.popupWindow=None
popup.close()
def create_trade_menu(self,players,image=None):
from gameClasses import Trader
trader=Trader(players[0],players[1])
self.buttons=[]
passb=guiButton('pass',(700//2+40,600-50),action=passf)
finishb=guiButton('finish',(700//2-40,600-50),action=trader.make_trade)
self.buttons.append(passb)
self.buttons.append(finishb)
def check_click():
if passb.clicked or finishb.clicked:
return True
return False
popup=TradeWindow(self.buttons,trader,players)
self.open_popup(popup)
while not check_click():
time.sleep(0.2)
self.popup=False
self.popupWindow=None
popup.close()
def choose_from_actions(self,actionsList,image=None,text='Choose',atexts=None):
try:
i=0
self.buttons=[]
margin=5
curr_x=20
curr_y=50
block_size=(95,150)
for action in actionsList:
if action.pic==None:
self.buttons.append(guiButton(action.name,(curr_x,curr_y),action=action.do_action))
else:
self.buttons.append(guiButton('',(curr_x,curr_y),action=action.do_action,image=action.pic))
if curr_x+block_size[0]<POPUP_SIZE[0]-margin:
curr_x=curr_x+block_size[0]
else:
curr_x=20
curr_y+=block_size[1]
self.buttons.append(guiButton('pass',(POPUP_SIZE[0]//2-40,POPUP_SIZE[1]-50),action=passf))
def check_click():
for control in self.buttons:
if control.clicked:
return True
return False
popup=PopupWindow(text,self.buttons,image,texts=atexts)
self.open_popup(popup)
while not check_click():
time.sleep(0.2)
self.popup=False
self.popupWindow=None
popup.close()
except:
print("Unexpected error:", sys.exc_info()[0])
raise
class buttonPad():
def __init__(self):
self.value=0
self.controls=[]
#replace prompt commands and prompt commands index
def create_selection_menu(self,options):
def set_value(value):
self.value=value
i=0
self.controls=[]
for option in options:
x=620+(i//3)*135
y=560+(i%3)*45
if len(str(option))>10:
self.controls.append(guiButton(str(option),(x,y),set_value,option,1.75,7))
else:
self.controls.append(guiButton(str(option),(x,y),set_value,option,1.75))
i+=1
self.value=0
while (self.value==0):
time.sleep(0.1)
print (self.value)
return self.value
def draw(self,surface):
if len(self.controls)>0:
for control in self.controls:
surface.blit(control,control.position)
return surface
def set_enabled(self, enable):
for control in self.controls:
control.set_enabled(enable)
#passing events from the main pygame thread(currently in gameWindow)
def handle_event(self,event):
for control in self.controls:
control.handle_event(event)
def passf():
pass
| unlicense | 1,911,387,531,245,233,000 | 38.322245 | 211 | 0.516834 | false | 3.689367 | false | false | false |
davidcusatis/horizon | openstack_dashboard/api/rest/nova.py | 6 | 17795 | # Copyright 2014, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API over the nova service.
"""
from django.http import HttpResponse
from django.template.defaultfilters import slugify
from django.utils import http as utils_http
from django.views import generic
from novaclient import exceptions
from openstack_dashboard import api
from openstack_dashboard.api.rest import json_encoder
from openstack_dashboard.api.rest import urls
from openstack_dashboard.api.rest import utils as rest_utils
@urls.register
class Keypairs(generic.View):
"""API for nova keypairs.
"""
url_regex = r'nova/keypairs/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of keypairs associated with the current logged-in
account.
The listing result is an object with property "items".
"""
result = api.nova.keypair_list(request)
return {'items': [u.to_dict() for u in result]}
@rest_utils.ajax(data_required=True)
def post(self, request):
"""Create a keypair.
Create a keypair using the parameters supplied in the POST
application/json object. The parameters are:
:param name: the name to give the keypair
:param public_key: (optional) a key to import
This returns the new keypair object on success.
"""
if 'public_key' in request.DATA:
new = api.nova.keypair_import(request, request.DATA['name'],
request.DATA['public_key'])
else:
new = api.nova.keypair_create(request, request.DATA['name'])
return rest_utils.CreatedResponse(
'/api/nova/keypairs/%s' % utils_http.urlquote(new.name),
new.to_dict()
)
@urls.register
class Keypair(generic.View):
url_regex = r'nova/keypairs/(?P<keypair_name>.+)/$'
def get(self, request, keypair_name):
"""Creates a new keypair and associates it to the current project.
* Since the response for this endpoint creates a new keypair and
is not idempotent, it normally would be represented by a POST HTTP
request. However, this solution was adopted as it
would support automatic file download across browsers.
:param keypair_name: the name to associate the keypair to
:param regenerate: (optional) if set to the string 'true',
replaces the existing keypair with a new keypair
This returns the new keypair object on success.
"""
try:
regenerate = request.GET.get('regenerate') == 'true'
if regenerate:
api.nova.keypair_delete(request, keypair_name)
keypair = api.nova.keypair_create(request, keypair_name)
except exceptions.Conflict:
return HttpResponse(status=409)
except Exception:
return HttpResponse(status=500)
else:
response = HttpResponse(content_type='application/binary')
response['Content-Disposition'] = ('attachment; filename=%s.pem'
% slugify(keypair_name))
response.write(keypair.private_key)
response['Content-Length'] = str(len(response.content))
return response
@urls.register
class Services(generic.View):
"""API for nova services.
"""
url_regex = r'nova/services/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of nova services.
Will return HTTP 501 status code if the service_list extension is
not supported.
"""
if api.base.is_service_enabled(request, 'compute') \
and api.nova.extension_supported('Services', request):
result = api.nova.service_list(request)
return {'items': [u.to_dict() for u in result]}
else:
raise rest_utils.AjaxError(501, '')
@urls.register
class AvailabilityZones(generic.View):
"""API for nova availability zones.
"""
url_regex = r'nova/availzones/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of availability zones.
The following get parameters may be passed in the GET
request:
:param detailed: If this equals "true" then the result will
include more detail.
The listing result is an object with property "items".
"""
detailed = request.GET.get('detailed') == 'true'
result = api.nova.availability_zone_list(request, detailed)
return {'items': [u.to_dict() for u in result]}
@urls.register
class Limits(generic.View):
"""API for nova limits.
"""
url_regex = r'nova/limits/$'
@rest_utils.ajax(json_encoder=json_encoder.NaNJSONEncoder)
def get(self, request):
"""Get an object describing the current project limits.
Note: the Horizon API doesn't support any other project (tenant) but
the underlying client does...
The following get parameters may be passed in the GET
request:
:param reserved: This may be set to "true" but it's not
clear what the result of that is.
The result is an object with limits as properties.
"""
reserved = request.GET.get('reserved') == 'true'
result = api.nova.tenant_absolute_limits(request, reserved)
return result
@urls.register
class Servers(generic.View):
"""API over all servers.
"""
url_regex = r'nova/servers/$'
_optional_create = [
'block_device_mapping', 'block_device_mapping_v2', 'nics', 'meta',
'availability_zone', 'instance_count', 'admin_pass', 'disk_config',
'config_drive'
]
@rest_utils.ajax()
def get(self, request):
"""Get a list of servers.
The listing result is an object with property "items". Each item is
a server.
Example GET:
http://localhost/api/nova/servers
"""
servers = api.nova.server_list(request)[0]
return {'items': [s.to_dict() for s in servers]}
@rest_utils.ajax(data_required=True)
def post(self, request):
"""Create a server.
Create a server using the parameters supplied in the POST
application/json object. The required parameters as specified by
the underlying novaclient are:
:param name: The new server name.
:param source_id: The ID of the image to use.
:param flavor_id: The ID of the flavor to use.
:param key_name: (optional extension) name of previously created
keypair to inject into the instance.
:param user_data: user data to pass to be exposed by the metadata
server this can be a file type object as well or a
string.
:param security_groups: An array of one or more objects with a "name"
attribute.
Other parameters are accepted as per the underlying novaclient:
"block_device_mapping", "block_device_mapping_v2", "nics", "meta",
"availability_zone", "instance_count", "admin_pass", "disk_config",
"config_drive"
This returns the new server object on success.
"""
try:
args = (
request,
request.DATA['name'],
request.DATA['source_id'],
request.DATA['flavor_id'],
request.DATA['key_name'],
request.DATA['user_data'],
request.DATA['security_groups'],
)
except KeyError as e:
raise rest_utils.AjaxError(400, 'missing required parameter '
"'%s'" % e.args[0])
kw = {}
for name in self._optional_create:
if name in request.DATA:
kw[name] = request.DATA[name]
new = api.nova.server_create(*args, **kw)
return rest_utils.CreatedResponse(
'/api/nova/servers/%s' % utils_http.urlquote(new.id),
new.to_dict()
)
@urls.register
class Server(generic.View):
"""API for retrieving a single server
"""
url_regex = r'nova/servers/(?P<server_id>[^/]+|default)$'
@rest_utils.ajax()
def get(self, request, server_id):
"""Get a specific server
http://localhost/api/nova/servers/1
"""
return api.nova.server_get(request, server_id).to_dict()
@urls.register
class ServerMetadata(generic.View):
"""API for server metadata.
"""
url_regex = r'nova/servers/(?P<server_id>[^/]+|default)/metadata$'
@rest_utils.ajax()
def get(self, request, server_id):
"""Get a specific server's metadata
http://localhost/api/nova/servers/1/metadata
"""
return api.nova.server_get(request,
server_id).to_dict().get('metadata')
@rest_utils.ajax()
def patch(self, request, server_id):
"""Update metadata items for a server
http://localhost/api/nova/servers/1/metadata
"""
updated = request.DATA['updated']
removed = request.DATA['removed']
if updated:
api.nova.server_metadata_update(request, server_id, updated)
if removed:
api.nova.server_metadata_delete(request, server_id, removed)
@urls.register
class Extensions(generic.View):
"""API for nova extensions.
"""
url_regex = r'nova/extensions/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of extensions.
The listing result is an object with property "items". Each item is
an image.
Example GET:
http://localhost/api/nova/extensions
"""
result = api.nova.list_extensions(request)
return {'items': [e.to_dict() for e in result]}
@urls.register
class Flavors(generic.View):
"""API for nova flavors.
"""
url_regex = r'nova/flavors/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of flavors.
The listing result is an object with property "items". Each item is
a flavor. By default this will return the flavors for the user's
current project. If the user is admin, public flavors will also be
returned.
:param is_public: For a regular user, set to True to see all public
flavors. For an admin user, set to False to not see public flavors.
:param get_extras: Also retrieve the extra specs.
Example GET:
http://localhost/api/nova/flavors?is_public=true
"""
is_public = request.GET.get('is_public')
is_public = (is_public and is_public.lower() == 'true')
get_extras = request.GET.get('get_extras')
get_extras = bool(get_extras and get_extras.lower() == 'true')
flavors = api.nova.flavor_list(request, is_public=is_public,
get_extras=get_extras)
result = {'items': []}
for flavor in flavors:
d = flavor.to_dict()
if get_extras:
d['extras'] = flavor.extras
result['items'].append(d)
return result
@rest_utils.ajax(data_required=True)
def post(self, request):
flavor_access = request.DATA.get('flavor_access', [])
flavor_id = request.DATA['id']
is_public = not flavor_access
flavor = api.nova.flavor_create(request,
name=request.DATA['name'],
memory=request.DATA['ram'],
vcpu=request.DATA['vcpus'],
disk=request.DATA['disk'],
ephemeral=request
.DATA['OS-FLV-EXT-DATA:ephemeral'],
swap=request.DATA['swap'],
flavorid=flavor_id,
is_public=is_public
)
for project in flavor_access:
api.nova.add_tenant_to_flavor(
request, flavor.id, project.get('id'))
return rest_utils.CreatedResponse(
'/api/nova/flavors/%s' % flavor.id,
flavor.to_dict()
)
@urls.register
class Flavor(generic.View):
"""API for retrieving a single flavor
"""
url_regex = r'nova/flavors/(?P<flavor_id>[^/]+)/$'
@rest_utils.ajax()
def get(self, request, flavor_id):
"""Get a specific flavor
:param get_extras: Also retrieve the extra specs.
Example GET:
http://localhost/api/nova/flavors/1
"""
get_extras = self.extract_boolean(request, 'get_extras')
get_access_list = self.extract_boolean(request, 'get_access_list')
flavor = api.nova.flavor_get(request, flavor_id, get_extras=get_extras)
result = flavor.to_dict()
# Bug: nova API stores and returns empty string when swap equals 0
# https://bugs.launchpad.net/nova/+bug/1408954
if 'swap' in result and result['swap'] == '':
result['swap'] = 0
if get_extras:
result['extras'] = flavor.extras
if get_access_list and not flavor.is_public:
access_list = [item.tenant_id for item in
api.nova.flavor_access_list(request, flavor_id)]
result['access-list'] = access_list
return result
@rest_utils.ajax()
def delete(self, request, flavor_id):
api.nova.flavor_delete(request, flavor_id)
@rest_utils.ajax(data_required=True)
def patch(self, request, flavor_id):
flavor_access = request.DATA.get('flavor_access', [])
is_public = not flavor_access
# Grab any existing extra specs, because flavor edit is currently
# implemented as a delete followed by a create.
extras_dict = api.nova.flavor_get_extras(request, flavor_id, raw=True)
# Mark the existing flavor as deleted.
api.nova.flavor_delete(request, flavor_id)
# Then create a new flavor with the same name but a new ID.
# This is in the same try/except block as the delete call
# because if the delete fails the API will error out because
# active flavors can't have the same name.
flavor = api.nova.flavor_create(request,
name=request.DATA['name'],
memory=request.DATA['ram'],
vcpu=request.DATA['vcpus'],
disk=request.DATA['disk'],
ephemeral=request
.DATA['OS-FLV-EXT-DATA:ephemeral'],
swap=request.DATA['swap'],
flavorid=flavor_id,
is_public=is_public
)
for project in flavor_access:
api.nova.add_tenant_to_flavor(
request, flavor.id, project.get('id'))
if extras_dict:
api.nova.flavor_extra_set(request, flavor.id, extras_dict)
def extract_boolean(self, request, name):
bool_string = request.GET.get(name)
return bool(bool_string and bool_string.lower() == 'true')
@urls.register
class FlavorExtraSpecs(generic.View):
"""API for managing flavor extra specs
"""
url_regex = r'nova/flavors/(?P<flavor_id>[^/]+)/extra-specs/$'
@rest_utils.ajax()
def get(self, request, flavor_id):
"""Get a specific flavor's extra specs
Example GET:
http://localhost/api/nova/flavors/1/extra-specs
"""
return api.nova.flavor_get_extras(request, flavor_id, raw=True)
@rest_utils.ajax(data_required=True)
def patch(self, request, flavor_id):
"""Update a specific flavor's extra specs.
This method returns HTTP 204 (no content) on success.
"""
if request.DATA.get('removed'):
api.nova.flavor_extra_delete(
request, flavor_id, request.DATA.get('removed')
)
api.nova.flavor_extra_set(
request, flavor_id, request.DATA['updated']
)
@urls.register
class AggregateExtraSpecs(generic.View):
"""API for managing aggregate extra specs
"""
url_regex = r'nova/aggregates/(?P<aggregate_id>[^/]+)/extra-specs/$'
@rest_utils.ajax()
def get(self, request, aggregate_id):
"""Get a specific aggregate's extra specs
Example GET:
http://localhost/api/nova/flavors/1/extra-specs
"""
return api.nova.aggregate_get(request, aggregate_id).metadata
@rest_utils.ajax(data_required=True)
def patch(self, request, aggregate_id):
"""Update a specific aggregate's extra specs.
This method returns HTTP 204 (no content) on success.
"""
updated = request.DATA['updated']
if request.DATA.get('removed'):
for name in request.DATA.get('removed'):
updated[name] = None
api.nova.aggregate_set_metadata(request, aggregate_id, updated)
| apache-2.0 | -1,027,541,799,600,478,800 | 33.823875 | 79 | 0.583872 | false | 4.262275 | false | false | false |
olibre/CodingGoodPractices | python/samples/long_file_names_v2.py | 2 | 1553 | #!/usr/bin/env python3
import argparse
import collections
import pathlib # available since Python 3.4 (else install it using pip/pipenv/…)
import sys
# Parse command line arguments
parser = argparse.ArgumentParser(
prog="long-file-names",
description="For files having same size, print the file names longer than the shortest one.",
)
parser.add_argument("--version", action="version", version="%(prog)s v2")
parser.add_argument("file_or_directory", type=str, nargs="+")
args = parser.parse_args()
# Collect regular files
regular_file_paths = set()
for pathname in args.file_or_directory:
path = pathlib.Path(pathname)
if path.is_file():
regular_file_paths.add(path)
elif path.is_dir():
for sub_path in path.glob("**/*"):
if sub_path.is_file():
regular_file_paths.add(sub_path)
# Sort files by file size
sorted_file_paths = collections.defaultdict(set)
min_name_lengths = collections.defaultdict(lambda: int(sys.maxsize))
for path in regular_file_paths:
sorted_file_paths[path.stat().st_size].add(path)
previous_min_length = min_name_lengths[path.stat().st_size]
if len(path.name) < previous_min_length:
min_name_lengths[path.stat().st_size] = len(path.name)
# For same content size, print all file paths except the one having the shortest file name
for file_size, file_paths in sorted_file_paths.items():
min_filename_length = min_name_lengths[file_size]
for path in file_paths:
if len(path.name) > min_filename_length:
print(path)
| cc0-1.0 | -2,579,672,594,505,361,000 | 35.928571 | 97 | 0.694391 | false | 3.477578 | false | false | false |
tectronics/3account | imports/ofxml2flat.py | 1 | 16364 | ########################################################################
# Copyright (C) 2007,8,9 Ehud Ben-Reuven
# [email protected]
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation version 2.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
########################################################################
""" ofxml2flat OFX-file flat-file
convert XML OFX file to flat lines.
"""
# TODO support INVOOLIST
from xml.dom.minidom import parse
import sys
from n3tagvalue import stagvalue,n3header,n3secid
import re
fout =sys.stdout # Where to write to. Modified by flat
hashing=False
def p(pnode,pre=''):
"""Debug utility"""
#print >>fout,"%s:%s\n" % (pre,pnode.nodeType)
nodelist=pnode.childNodes
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
print "%s%s\n" % (pre,node.data.strip())
elif node.nodeType == node.ELEMENT_NODE:
print "%s%s:\n" % (pre,node.tagName)
p(node,'+'+pre)
def get(s,tag,mandatory=False):
TAG=tag.upper().strip()
try:
VAL=s.getElementsByTagName(TAG)[0].firstChild.data.strip()
except:
if mandatory:
raise Exception('Missing mandatory tag %s'%tag)
return
return VAL
def sprt_tag(s,tag,mandatory=True,scale=1,ntag=None):
VAL=get(s,tag,mandatory)
if ntag:
ntag=ntag.upper()
else:
ntag=tag.upper()
if VAL:
if scale!=1:
return stagvalue(ntag,"%f"%(float(VAL)*scale))
else:
return stagvalue(ntag,VAL)
def prt_tag(s,tags,mandatory=True,ntag=None,scale=1,scale_assetclass=None):
mandatory=False # TODO, remove this!!!
if isinstance(tags,basestring):
tags=[tags]
if scale_assetclass in ['DEBT']:
scale=scale/100.0
if scale_assetclass in ['OPT']:
scale=scale*100.0
for tag in tags:
v=sprt_tag(s,tag,mandatory=mandatory,scale=scale,ntag=ntag)
if v:
print >>fout,v,
def prt_secid(s,acctid=None):
uniqueid=s.getElementsByTagName('UNIQUEID')[0].firstChild.data.strip()
uniqueidtype=s.getElementsByTagName('UNIQUEIDTYPE')[0].firstChild.data.strip()
print >>fout,n3secid(uniqueidtype,uniqueid),
pacctid(acctid)
def pacctid(acctid):
if acctid:
print >>fout,stagvalue('ACCTID',acctid,hashing=hashing),
def prt_cashacc(acctid,currency,p=None,accttype='CHECKING'):
if p:
try:
accttype=get(p,'SUBACCTFUND',mandatory=True)
except:
accttype=get(p,'ACCTTYPE',mandatory=True)
print >>fout,n3secid('CHECKING',currency),
pacctid(acctid)
print >>fout,stagvalue("ACCTTYPE",accttype),
if accttype in ['CHECKING', 'CASH']:
print >>fout,stagvalue('ASSETCLASS','CHECKING'),
def flat(fin,_fout,context=None,_hashing=False):
global fout,hashing
fout=_fout
hashing=_hashing
if isinstance(fin,str):
fin=open(fin)
else:
fin.seek(0)
n3header(fout,"$Id$"[1:-1],context)
dom = parse(fin) # parse an XML file by name
# Start with information on assets. In the examples I have of OFX files, this information
# appeared in the end of the file, but it should be processed first so latter on when processing
# holdings it will be easier to locate the relevant asset.
seclist=dom.getElementsByTagName('SECLIST')
if seclist:
for s in seclist[0].childNodes:
print >>fout,"a3:flatLine [ ",
if s.nodeType != s.ELEMENT_NODE:
continue # Skip comments
prt_secid(s)
ticker=get(s,'TICKER')
if ticker:
ticker=ticker.split()[0] # Some banks put their FIID after the ticker
print >>fout,stagvalue('TICKER',ticker),
v=get(s,'FIID')
if v:
print >>fout,stagvalue('FIID',v),
v = get(dom,'BROKERID')
if v:
print >>fout,stagvalue('BROKERID',v)
else:
v=get(dom,'ORG')
print 'FIID found but cant find BROKERID, using ORG instead'
if v:
print >>fout,stagvalue('ORG',v)
if get(s,'YIELD'):
if get(s,'DTYIELDASOF')!=get(s,'DTASOF'):
raise Exception("price and yield dates dont match")
prt_tag(s,['SECNAME','DTASOF'])
assetclass=s.tagName[:-4]
print >>fout,stagvalue("ASSETCLASS",assetclass),
prt_tag(s,'UNITPRICE',scale_assetclass=assetclass) # TODO check if it is better to scale the UNITS instead (looks like DEBT->UNITPRICE OPT->UNITS)
prt_tag(s,['ASSETCLASS','PARVALUE','DEBTTYPE','DEBTCLASS','DTMAT',
'COUPONRT','COUPONFREQ','MFTYPE','STOCKTYPE','YIELD'],
mandatory=False)
print >>fout,"];"
# Credit card accounts
stmttrnrs=dom.getElementsByTagName('CCSTMTTRNRS')
if stmttrnrs:
for stmtrs in stmttrnrs:
print >>fout,"a3:flatLine [ ",
acctid=get(stmtrs,'ACCTID')
curdef=get(stmtrs,'CURDEF')
# bind togther all the information about the cash holding.
prt_cashacc(acctid,curdef,accttype='CREDITCRD')
# info on the account
# bind togther all the information about the account.
# The DTSTART, DTEND are bounded to the unique pair <ACCTID,FILE> but the FILE is not
# specified because it is implicitly assumed everywhere in the line file
prt_tag(stmtrs,['CURDEF',
#'DTSTART','DTEND' # FIXME there are too many transaction outside range or the range is too big to be full
])
prt_tag(dom,['ORG','FID'],mandatory=False)
# info on the cash holding
l=stmtrs.getElementsByTagName('LEDGERBAL')[0]
prt_tag(l,'DTASOF')
prt_tag(l,'BALAMT',ntag='UNITS') # Ignore AVAILBAL
print >>fout,"];"
# Generate information on all transactions
for t in stmtrs.getElementsByTagName('STMTTRN'):
print >>fout,"a3:flatLine [ ",
# bind all information on transaction with the cash holding.
prt_cashacc(acctid,curdef,accttype='CREDITCRD')
prt_tag(t,['TRNTYPE','FITID'])
prt_tag(t,'TRNAMT',ntag='UNITS')
prt_tag(t,'DTPOSTED',ntag='DTSETTLE')
prt_tag(t,'DTAVAIL',mandatory=False)
prt_tag(t,['DTUSER','CHECKNUM','REFNUM','NAME','MEMO'],mandatory=False)
print >>fout,"];"
# Checking accounts
stmttrnrs=dom.getElementsByTagName('STMTTRNRS')
if stmttrnrs:
for stmtrs in stmttrnrs:
print >>fout,"a3:flatLine [ ",
acctid=get(stmtrs,'ACCTID')
curdef=get(stmtrs,'CURDEF')
# bind togther all the information about the cash holding.
prt_cashacc(acctid,curdef,stmtrs)
# info on the account
# bind togther all the information about the account.
# The DTSTART, DTEND are bounded to the unique pair <ACCTID,FILE> but the FILE is not
# specified because it is implicitly assumed everywhere in the line file
prt_tag(stmtrs,['CURDEF',
#'DTSTART','DTEND' # FIXME check if/when this can be done
])
prt_tag(dom,['ORG','FID'],mandatory=False)
# info on the cash holding
l=stmtrs.getElementsByTagName('LEDGERBAL')[0]
prt_tag(l,'DTASOF')
prt_tag(l,'BALAMT',ntag='UNITS') # Ignore AVAILBAL
print >>fout,"];"
# Generate information on all transactions
for t in stmtrs.getElementsByTagName('STMTTRN'):
print >>fout,"a3:flatLine [ ",
# bind all information on transaction with the cash holding.
prt_cashacc(acctid,curdef,stmtrs)
prt_tag(t,['TRNTYPE','FITID'])
prt_tag(t,'TRNAMT',ntag='UNITS')
prt_tag(t,'DTPOSTED',ntag='DTSETTLE')
prt_tag(t,'DTAVAIL',mandatory=False)
prt_tag(t,['DTUSER','CHECKNUM','REFNUM','NAME','MEMO'],mandatory=False)
print >>fout,"];"
# Investment accounts
invstmttrnrs=dom.getElementsByTagName('INVSTMTTRNRS')
if invstmttrnrs:
for invstmtrs in invstmttrnrs:
print >>fout,"a3:flatLine [ ",
# Every line should show the ACCTID
acctid=get(invstmtrs,'ACCTID')
curdef=get(invstmtrs,'CURDEF')
# bind togther all the information about the account.
# The DTSTART, DTEND are bounded to the unique pair <ACCTID,FILE> but the FILE is not
# specified because it is implicitly assumed everywhere in the line file
prt_tag(invstmtrs,['CURDEF','ACCTID',
#'DTSTART','DTEND' # Fixme check when it can be done
])
prt_tag(dom,['ORG','FID'],mandatory=False)
print >>fout,"];"
# generate statement line for CASH account
print >>fout,"a3:flatLine [ ",
prt_cashacc(acctid,curdef,accttype='CASH') # Make this match the CASH accounts used in investment transactions
prt_tag(invstmtrs,'AVAILCASH',ntag='UNITS')
prt_tag(invstmtrs,'DTASOF')
print >>fout,"];"
# Dump current portfolio of the account
for p in invstmtrs.getElementsByTagName('INVPOS'):
print >>fout,"a3:flatLine [ ",
prt_secid(p,acctid)
prt_tag(p,'DTPRICEASOF',ntag='DTASOF')
assetclass=p.parentNode.tagName[3:]
print >>fout,stagvalue("ASSETCLASS",assetclass),
prt_tag(p,'UNITPRICE',scale_assetclass=assetclass)
prt_tag(p,['POSTYPE','UNITS','MKTVAL'])
prt_tag(p,'MEMO',ntag='POSMEMO',mandatory=False) # POSMEMO in order not to confuse with transaction's MEMO
print >>fout,"];"
# Dump transactions
for trn in ['INVBUY','INVSELL']:
for p in invstmtrs.getElementsByTagName(trn):
print >>fout,"a3:flatLine [ ",
prt_secid(p,acctid)
prt_tag(p,['FITID','DTTRADE','DTSETTLE','MEMO','UNITS','COMMISSION','FEES','TOTAL'])
if trn=='INVBUY':
print >>fout,stagvalue("TRNTYPE","BUY"),
assetclass = p.parentNode.tagName[3:]
else:
print >>fout,stagvalue("TRNTYPE","SELL"),
assetclass = p.parentNode.tagName[4:]
print >>fout,stagvalue("ASSETCLASS",assetclass),
prt_tag(p,'UNITPRICE',scale_assetclass=assetclass)
prt_tag(p,['MARKUP','MARKDOWN'],mandatory=False,
scale_assetclass=assetclass)
accrdint=get(p.parentNode,'ACCRDINT') # ACCRDINT is outside the INVBUY/SELL structure.
if accrdint:
print >>fout,stagvalue('ACCRDINT',accrdint),
print >>fout,"];"
# generate line for current account
print >>fout,"a3:flatLine [ ",
prt_cashacc(acctid,curdef,p)
prt_tag(p,'FITID',ntag='RELFITID')
prt_tag(p,'DTSETTLE')
prt_tag(p,'TOTAL',ntag='UNITS')
if trn=='INVBUY':
print >>fout,stagvalue('TRNTYPE','DEBIT'),
else:
print >>fout,stagvalue('TRNTYPE','CREDIT'),
print >>fout,"];"
# ACCRDINT is real money when you buy/sell a debit but it does not appear in TOTAL
if accrdint:
print >>fout,"a3:flatLine [ ",
# generate line for current account
prt_cashacc(acctid,curdef,p)
prt_tag(p,'FITID',ntag='RELFITID')
prt_tag(p,'DTSETTLE')
print >>fout,stagvalue('UNITS',accrdint),
if trn=='INVBUY':
print >>fout,stagvalue('TRNTYPE','DEBIT'),
else:
print >>fout,stagvalue('TRNTYPE','CREDIT'),
print >>fout,stagvalue('PAYEEID','ACCRDINT'), # The money is not coming from the Asset issuer but from the side selling/buying the asset to us.
print >>fout,"];"
for p in invstmtrs.getElementsByTagName('INCOME'):
print >>fout,"a3:flatLine [ ",
prt_secid(p,acctid)
prt_tag(p,['FITID','DTTRADE','DTSETTLE','MEMO','TOTAL'])
prt_tag(p,'INCOMETYPE',ntag='TRNTYPE')
print >>fout,"];"
# generate line for current account
print >>fout,"a3:flatLine [ ",
prt_cashacc(acctid,curdef,p)
prt_tag(p,'FITID',ntag='RELFITID')
prt_tag(p,'DTSETTLE')
prt_tag(p,'TOTAL',ntag='UNITS')
print >>fout,stagvalue('TRNTYPE','CREDIT'),
print >>fout,"];"
for p in invstmtrs.getElementsByTagName('INVEXPENSE'):
print >>fout,"a3:flatLine [ ",
prt_secid(p,acctid)
prt_tag(p,['FITID','DTTRADE','DTSETTLE','MEMO'])
prt_tag(p,'TOTAL',scale=-1)
memo=get(p,'MEMO')
if re.search(r"\bTAX\b",memo,re.IGNORECASE):
prt_tag(p,'TOTAL',ntag='TAXES')
print >>fout,stagvalue("TRNTYPE","TAX"),
elif re.search(r"\bFEE\b",memo,re.IGNORECASE):
prt_tag(p,'TOTAL',ntag='FEES')
print >>fout,stagvalue("TRNTYPE","FEE"),
else:
print "Unknown expense",memo
prt_tag(p,'TOTAL',ntag='COMMISSION')
print >>fout,stagvalue("TRNTYPE","EXPENSE"),
print >>fout,"];"
# generate line for current account
print >>fout,"a3:flatLine [ ",
prt_cashacc(acctid,curdef,p)
prt_tag(p,'FITID',ntag='RELFITID')
prt_tag(p,'DTSETTLE')
prt_tag(p,'TOTAL',ntag='UNITS',scale=-1)
print >>fout,stagvalue('TRNTYPE','DEBIT'),
print >>fout,"];"
for p in invstmtrs.getElementsByTagName('TRANSFER'):
print >>fout,"a3:flatLine [ ",
prt_secid(p,acctid)
prt_tag(p,['FITID','DTTRADE','DTSETTLE','MEMO','UNITS'])
prt_tag(p,'TFERACTION',ntag='TRNTYPE')
print >>fout,"];"
# note that TRANSFER does not have a SUBACCTFUND to balance with
for p in invstmtrs.getElementsByTagName('INVBANKTRAN'):
print >>fout,"a3:flatLine [ ",
prt_cashacc(acctid,curdef,p)
prt_tag(p,['TRNTYPE','FITID','NAME','MEMO'])
prt_tag(p,'DTPOSTED',ntag='DTSETTLE')
prt_tag(p,'TRNAMT',ntag='UNITS')
print >>fout,"];"
dom.unlink()
print >>fout,"."
| gpl-2.0 | -5,259,371,252,796,897,000 | 43.832877 | 167 | 0.530188 | false | 3.760974 | false | false | false |
gizmag/django-generic-follow | generic_follow/model_mixins.py | 2 | 1513 | from django.contrib.contenttypes.models import ContentType
from .models import Follow
class UserFollowMixin(object):
def get_follow_set(self, model=None):
qs = Follow.objects.filter(
user=self
).prefetch_related('target')
if model:
model_type = ContentType.objects.get_for_model(model)
qs = qs.filter(target_content_type=model_type)
return [x.target for x in qs]
def follow(self, item):
item_type = ContentType.objects.get_for_model(item)
Follow.objects.get_or_create(
user=self,
target_content_type=item_type,
target_object_id=item.pk
)
def unfollow(self, item):
item_type = ContentType.objects.get_for_model(item)
Follow.objects.filter(
user=self,
target_content_type=item_type,
target_object_id=item.pk
).delete()
def is_following(self, item):
item_type = ContentType.objects.get_for_model(item)
return Follow.objects.filter(
user=self,
target_content_type=item_type,
target_object_id=item.pk
).exists()
class TargetFollowMixin(object):
def get_follower_set(self):
content_type = ContentType.objects.get_for_model(self)
follows = Follow.objects.filter(
target_content_type=content_type,
target_object_id=self.id,
).prefetch_related('user')
return [x.user for x in follows]
| mit | -430,271,871,194,127,400 | 27.54717 | 65 | 0.604759 | false | 3.88946 | false | false | false |
GoogleCloudPlatform/solutions-cloud-orchestrate | cli/src/orchestrate/commands/broker/machines/unassign.py | 1 | 3884 | # python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Unassign machines from users in connection broker.
Usage: orchestrate broker machines unassign <DEPLOYMENT> <MACHINE>
"""
import logging
import optparse
from orchestrate import base
from orchestrate.systems.teradici import camapi
log = logging.getLogger(__name__)
class Command(base.OrchestrateCommand):
"""Unassign machines from users in connection broker.
"""
@property
def description(self):
return """
Unassign all users from a given macines in connection broker.
Usage: orchestrate broker machines unassign <DEPLOYMENT> <MACHINE1> [ <MACHINE2>[ ...]]
""".lstrip()
@property
def defaults(self):
"""Returns default option values."""
return dict(
deployment=None,
)
@property
def options(self):
"""Returns command parser options."""
options = [
optparse.Option('--deployment', help=(
'Deployment name. Uses project name by default if not explicitly'
' provided')),
]
return options
def run(self, options, arguments):
"""Executes command.
Args:
options: Command-line options.
arguments: Command-line positional arguments
Returns:
True if successful. False, otherwise.
"""
log.debug('broker machines unassign %(options)s %(arguments)s', dict(
options=options, arguments=arguments))
if len(arguments) < 1:
log.error('Expected at least one machine name.')
return False
machine_names = arguments
deployment_name = options.deployment or options.project
self.unassign(options.project, deployment_name, machine_names)
def unassign(self, project, deployment_name, machine_names):
"""Unassign all users from given machines.
Args:
project: GCP project.
deployment_name: Deployment.
machine_names: Machine names.
Returns:
True if it succeeded. False otherwise.
"""
log.debug('Locating deployment: %s', deployment_name)
cam = camapi.CloudAccessManager(project=project,
scope=camapi.Scope.DEPLOYMENT)
deployment = cam.deployments.get(deployment_name)
# Get machine ids
all_machines = []
for machine_name in machine_names:
log.debug('Locating machine in CAM: %s', machine_name)
machines = cam.machines.get(deployment, machineName=machine_name)
if machines:
machine = machines[0]
log.debug('Found machine %s with ID %s', machine_name,
machine['machineId'])
all_machines.append(machine)
else:
message = (
'Could not locate machine {machine_name}. Check whether it exists'
' and that it was assigned to users. Skipping for now.'
).format(machine_name=machine_name)
log.warning(message)
# Find all entitlements for all machine ids collected and remove them
for machine in all_machines:
log.info(
'Locating entitlements for machine %(machineName)s %(machineId)s',
machine)
entitlements = cam.machines.entitlements.get(
deployment, machineName=machine['machineName'])
for entitlement in entitlements:
log.info('Removing entitlement %(entitlementId)s', entitlement)
cam.machines.entitlements.delete(entitlement)
return True
| apache-2.0 | -230,905,375,245,548,830 | 30.072 | 87 | 0.67585 | false | 4.263447 | false | false | false |
failys/CAIRIS | cairis/core/RoleEnvironmentProperties.py | 2 | 1430 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from .EnvironmentProperties import EnvironmentProperties
__author__ = 'Shamal Faily'
class RoleEnvironmentProperties(EnvironmentProperties):
def __init__(self,environmentName,responses,countermeasures,goals,requirements):
EnvironmentProperties.__init__(self,environmentName)
self.theResponses = responses
self.theCountermeasures = countermeasures
self.theGoals = goals
self.theRequirements = requirements
def responses(self): return self.theResponses
def countermeasures(self): return self.theCountermeasures
def goals(self): return self.theGoals
def requirements(self): return self.theRequirements
| apache-2.0 | -7,146,271,404,292,756,000 | 42.333333 | 82 | 0.776224 | false | 4.144928 | false | false | false |
rg3915/spark | spark/events/models.py | 1 | 2745 | from django.db import models
from django.contrib.auth.models import User
from django.shortcuts import resolve_url as r
from spark.activities.models import Activity
class Event(models.Model):
user = models.ForeignKey(User)
title = models.CharField('título', max_length=200)
date_start = models.DateField('data')
start = models.TimeField('início', null=True, blank=True)
description = models.TextField('descrição', blank=True)
address = models.TextField('local', null=True, blank=True)
likes = models.IntegerField(default=0)
class Meta:
ordering = ('date_start',)
verbose_name = 'evento'
verbose_name_plural = 'eventos'
def __str__(self):
return self.title
@staticmethod
def get_events(from_event=None):
if from_event is not None:
events = Event.objects.filter(id__lte=from_event)
else:
events = Event.objects.all()
return events
@staticmethod
def get_events_after(event):
events = Event.objects.filter(id__gt=event)
return events
def calculate_likes(self):
likes = Activity.objects.filter(activity_type=Activity.LIKE,
feed=self.pk).count()
self.likes = likes
self.save()
return self.likes
def get_likes(self):
likes = Activity.objects.filter(activity_type=Activity.LIKE,
feed=self.pk)
return likes
def get_likers(self):
likes = self.get_likes()
likers = []
for like in likes:
likers.append(like.user)
return likers
class Talk(models.Model):
title = models.CharField('título', max_length=200)
date_start = models.DateField('data')
start = models.TimeField('início', blank=True, null=True)
description = models.TextField('descrição', blank=True)
speakers = models.ManyToManyField(
'Speaker', verbose_name='palestrantes', blank=True)
# objects = PeriodManager()
class Meta:
ordering = ['start']
verbose_name = 'palestra'
verbose_name_plural = 'palestras'
def __str__(self):
return self.title
class Speaker(models.Model):
name = models.CharField('nome', max_length=255)
slug = models.SlugField('slug')
photo = models.URLField('foto')
website = models.URLField('website', blank=True)
description = models.TextField('descrição', blank=True)
class Meta:
ordering = ('name',)
verbose_name = 'palestrante'
verbose_name_plural = 'palestrantes'
def __str__(self):
return self.name
def get_absolute_url(self):
return r('speaker_detail', slug=self.slug)
| mit | -7,333,217,467,340,034,000 | 28.728261 | 68 | 0.619744 | false | 3.835905 | false | false | false |
henryhallam/piksi_tools | piksi_tools/console/console.py | 1 | 12523 | #!/usr/bin/env python
# Copyright (C) 2011-2014 Swift Navigation Inc.
# Contact: Fergus Noble <[email protected]>
#
# This source is subject to the license found in the file 'LICENSE' which must
# be be distributed together with this source. All other rights reserved.
#
# THIS CODE AND INFORMATION IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND,
# EITHER EXPRESSED OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND/OR FITNESS FOR A PARTICULAR PURPOSE.
import os
import struct
import sys
import signal
from piksi_tools import serial_link
import sbp.client.handler
from sbp.logging import SBP_MSG_PRINT
from sbp.piksi import SBP_MSG_RESET
from sbp.client.drivers.pyserial_driver import PySerialDriver
from sbp.client.drivers.pyftdi_driver import PyFTDIDriver
from sbp.ext_events import *
from piksi_tools.version import VERSION as CONSOLE_VERSION
# Shut chaco up for now
import warnings
warnings.simplefilter(action = "ignore", category = FutureWarning)
def get_args():
"""
Get and parse arguments.
"""
import argparse
parser = argparse.ArgumentParser(description='Swift Nav Console.')
parser.add_argument('-p', '--port', nargs=1, default=[None],
help='specify the serial port to use.')
parser.add_argument('-b', '--baud', nargs=1, default=[serial_link.SERIAL_BAUD],
help='specify the baud rate to use.')
parser.add_argument("-v", "--verbose",
help="print extra debugging information.",
action="store_true")
parser.add_argument("-l", "--log",
action="store_true",
help="serialize SBP messages to log file.")
parser.add_argument("-o", "--log-filename",
default=[serial_link.LOG_FILENAME], nargs=1,
help="file to log output to.")
parser.add_argument("-r", "--reset",
action="store_true",
help="reset device after connection.")
parser.add_argument("-u", "--update",
help="don't prompt about firmware/console updates.",
action="store_false")
parser.add_argument("-f", "--ftdi",
help="use pylibftdi instead of pyserial.",
action="store_true")
parser.add_argument('-t', '--toolkit', nargs=1, default=[None],
help="specify the TraitsUI toolkit to use, either 'wx' or 'qt4'.")
parser.add_argument('-e', '--expert', action='store_true',
help="Show expert settings.")
return parser.parse_args()
args = get_args()
port = args.port[0]
baud = args.baud[0]
log_filename = args.log_filename[0]
# Toolkit
from traits.etsconfig.api import ETSConfig
if args.toolkit[0] is not None:
ETSConfig.toolkit = args.toolkit[0]
else:
ETSConfig.toolkit = 'qt4'
# Logging
import logging
logging.basicConfig()
from traits.api import Str, Instance, Dict, HasTraits, Int, Button, List
from traitsui.api import Item, Label, View, HGroup, VGroup, VSplit, HSplit, Tabbed, \
InstanceEditor, EnumEditor, ShellEditor, Handler
# When bundled with pyInstaller, PythonLexer can't be found. The problem is
# pygments.lexers is doing some crazy magic to load up all of the available
# lexers at runtime which seems to break when frozen.
#
# The horrible workaround is to load the PythonLexer class explicitly and then
# manually insert it into the pygments.lexers module.
from pygments.lexers.agile import PythonLexer
import pygments.lexers
pygments.lexers.PythonLexer = PythonLexer
try:
import pygments.lexers.c_cpp
except ImportError:
pass
# These imports seem to be required to make pyinstaller work?
# (usually traitsui would load them automatically)
if ETSConfig.toolkit == 'qt4':
import pyface.ui.qt4.resource_manager
import pyface.ui.qt4.python_shell
from pyface.image_resource import ImageResource
if getattr(sys, 'frozen', False):
# we are running in a |PyInstaller| bundle
basedir = sys._MEIPASS
os.chdir(basedir)
else:
# we are running in a normal Python environment
basedir = os.path.dirname(__file__)
icon = ImageResource('icon', search_path=['images', os.path.join(basedir, 'images')])
from output_stream import OutputStream
from tracking_view import TrackingView
from almanac_view import AlmanacView
from solution_view import SolutionView
from baseline_view import BaselineView
from observation_view import ObservationView
from sbp_relay_view import SbpRelayView
from system_monitor_view import SystemMonitorView
from settings_view import SettingsView
from update_view import UpdateView
from enable.savage.trait_defs.ui.svg_button import SVGButton
CONSOLE_TITLE = 'Piksi Console, Version: ' + CONSOLE_VERSION
class ConsoleHandler(Handler):
"""
Handler that updates the window title with the device serial number
This Handler is used by Traits UI to manage making changes to the GUI in
response to changes in the underlying class/data.
"""
def object_device_serial_changed(self, info):
"""
Update the window title with the device serial number.
This is a magic method called by the handler in response to any changes in
the `device_serial` variable in the underlying class.
"""
if info.initialized:
info.ui.title = CONSOLE_TITLE + ' : ' + info.object.device_serial
class SwiftConsole(HasTraits):
link = Instance(sbp.client.handler.Handler)
console_output = Instance(OutputStream)
python_console_env = Dict
device_serial = Str('')
a = Int
b = Int
tracking_view = Instance(TrackingView)
solution_view = Instance(SolutionView)
baseline_view = Instance(BaselineView)
observation_view = Instance(ObservationView)
sbp_relay_view = Instance(SbpRelayView)
observation_view_base = Instance(ObservationView)
system_monitor_view = Instance(SystemMonitorView)
settings_view = Instance(SettingsView)
update_view = Instance(UpdateView)
paused_button = SVGButton(
label='', tooltip='Pause console update', toggle_tooltip='Resume console update', toggle=True,
filename=os.path.join(os.path.dirname(__file__), 'images', 'iconic', 'pause.svg'),
toggle_filename=os.path.join(os.path.dirname(__file__), 'images', 'iconic', 'play.svg'),
width=8, height=8
)
clear_button = SVGButton(
label='', tooltip='Clear console buffer',
filename=os.path.join(os.path.dirname(__file__), 'images', 'iconic', 'x.svg'),
width=8, height=8
)
view = View(
VSplit(
Tabbed(
Item('tracking_view', style='custom', label='Tracking'),
Item('solution_view', style='custom', label='Solution'),
Item('baseline_view', style='custom', label='Baseline'),
VSplit(
Item('observation_view', style='custom', show_label=False),
Item('observation_view_base', style='custom', show_label=False),
label='Observations',
),
Item('settings_view', style='custom', label='Settings'),
Item('update_view', style='custom', label='Firmware Update'),
Tabbed(
Item('system_monitor_view', style='custom', label='System Monitor'),
Item('sbp_relay_view', label='SBP Relay', style='custom',
show_label=False),
Item(
'python_console_env', style='custom',
label='Python Console', editor=ShellEditor()),
label='Advanced',
),
show_labels=False
),
VGroup(
HGroup(
Item('', show_label=False),
Item('paused_button', show_label=False),
Item('clear_button', show_label=False),
Item('', label='Console Log', emphasized=True),
),
Item(
'console_output',
style='custom',
editor=InstanceEditor(),
height=0.3,
show_label=False,
),
)
),
icon = icon,
resizable = True,
width = 1000,
height = 600,
handler = ConsoleHandler(),
title = CONSOLE_TITLE
)
def print_message_callback(self, sbp_msg):
try:
self.console_output.write(sbp_msg.payload.encode('ascii', 'ignore'))
except UnicodeDecodeError:
print "Critical Error encoding the serial stream as ascii."
def ext_event_callback(self, sbp_msg):
e = MsgExtEvent(sbp_msg)
print 'External event: %s edge on pin %d at wn=%d, tow=%d, time qual=%s' % (
"Rising" if (e.flags & (1<<0)) else "Falling", e.pin, e.wn, e.tow,
"good" if (e.flags & (1<<1)) else "unknown")
def _paused_button_fired(self):
self.console_output.paused = not self.console_output.paused
def _clear_button_fired(self):
self.console_output.reset()
def __init__(self, link, update):
self.console_output = OutputStream()
sys.stdout = self.console_output
sys.stderr = self.console_output
try:
self.link = link
self.link.add_callback(self.print_message_callback, SBP_MSG_PRINT)
self.link.add_callback(self.ext_event_callback, SBP_MSG_EXT_EVENT)
settings_read_finished_functions = []
self.tracking_view = TrackingView(self.link)
self.solution_view = SolutionView(self.link)
self.baseline_view = BaselineView(self.link)
self.observation_view = ObservationView(self.link,
name='Rover', relay=False)
self.observation_view_base = ObservationView(self.link,
name='Base', relay=True)
self.sbp_relay_view = SbpRelayView(self.link)
self.system_monitor_view = SystemMonitorView(self.link)
self.update_view = UpdateView(self.link, prompt=update)
settings_read_finished_functions.append(self.update_view.compare_versions)
# Once we have received the settings, update device_serial with the Piksi
# serial number which will be displayed in the window title
def update_serial():
serial_string = self.settings_view.settings['system_info']['serial_number'].value
self.device_serial = 'PK%04d' % int(serial_string)
settings_read_finished_functions.append(update_serial)
self.settings_view = \
SettingsView(self.link, settings_read_finished_functions,
hide_expert = not args.expert)
self.update_view.settings = self.settings_view.settings
self.python_console_env = {
'send_message': self.link.send,
'link': self.link,
}
self.python_console_env.update(self.tracking_view.python_console_cmds)
self.python_console_env.update(self.solution_view.python_console_cmds)
self.python_console_env.update(self.baseline_view.python_console_cmds)
self.python_console_env.update(self.observation_view.python_console_cmds)
self.python_console_env.update(self.sbp_relay_view.python_console_cmds)
self.python_console_env.update(self.system_monitor_view.python_console_cmds)
self.python_console_env.update(self.update_view.python_console_cmds)
self.python_console_env.update(self.settings_view.python_console_cmds)
except:
import traceback
traceback.print_exc()
class PortChooser(HasTraits):
ports = List()
port = Str(None)
traits_view = View(
VGroup(
Label('Select Piksi device:'),
Item('port', editor=EnumEditor(name='ports'), show_label=False),
),
buttons = ['OK', 'Cancel'],
close_result=False,
icon = icon,
width = 250,
title = 'Select serial device',
)
def __init__(self):
try:
self.ports = [p for p, _, _ in serial_link.get_ports()]
except TypeError:
pass
if not port:
port_chooser = PortChooser()
is_ok = port_chooser.configure_traits()
port = port_chooser.port
if not port or not is_ok:
print "No serial device selected!"
sys.exit(1)
else:
print "Using serial device '%s'" % port
# Make sure that SIGINT (i.e. Ctrl-C from command line) actually stops the
# application event loop (otherwise Qt swallows KeyboardInterrupt exceptions)
signal.signal(signal.SIGINT, signal.SIG_DFL)
with serial_link.get_driver(args.ftdi, port, baud) as driver:
with sbp.client.handler.Handler(driver.read, driver.write, args.verbose) as link:
with serial_link.get_logger(args.log, log_filename) as logger:
link.add_callback(logger)
if args.reset:
link.send(SBP_MSG_RESET, "")
console = SwiftConsole(link, update=args.update)
console.configure_traits()
# Force exit, even if threads haven't joined
try:
os._exit(0)
except:
pass
| lgpl-3.0 | -7,748,614,377,926,515,000 | 35.616959 | 98 | 0.667093 | false | 3.729303 | true | false | false |
bioinfo-core-BGU/neatseq-flow_modules | neatseq_flow_modules/Liron/RSEM_module/Merge_RSEM.py | 1 | 1404 | import os, re
import sys
import pandas as pd
files= sys.argv[1:]
flag=0
for file_name in files:
temp_data = pd.read_csv(file_name, sep='\t',index_col=0)
sample=file_name.split(os.sep)[-1]
path=file_name.rstrip(sample).rstrip(os.sep)
if sample.endswith('.genes.results'):
sample=sample.rstrip('.genes.results')
prefix='GeneMat_'
else:
sample=sample.rstrip('.isoforms.results')
prefix='IsoMat_'
if flag==0:
Data_counts=temp_data["expected_count"].copy()
Data_TPM=temp_data["TPM"].copy()
Data_FPKM=temp_data["FPKM"].copy()
Data_counts.name=sample
Data_TPM.name=sample
Data_FPKM.name=sample
flag=1
else:
temp_counts=temp_data["expected_count"].copy()
temp_TPM=temp_data["TPM"].copy()
temp_FPKM=temp_data["FPKM"].copy()
temp_counts.name=sample
temp_TPM.name=sample
temp_FPKM.name=sample
Data_counts=pd.concat([Data_counts,temp_counts],axis=1)
Data_TPM=pd.concat([Data_TPM,temp_TPM],axis=1)
Data_FPKM=pd.concat([Data_FPKM,temp_FPKM],axis=1)
if flag!=0:
with pd.option_context('display.max_rows', None, 'display.max_columns', None):
print(Data_counts.to_csv(sep="\t",index=True))
if len(path)>0:
Data_TPM.to_csv(os.sep.join([path,prefix+"TPM"]) ,sep='\t',index=True)
Data_FPKM.to_csv(os.sep.join([path,prefix+"FPKM"]) ,sep='\t',index=True)
else:
Data_TPM.to_csv(prefix+"TPM" ,sep='\t',index=True)
Data_FPKM.to_csv(prefix+"FPKM" ,sep='\t',index=True) | gpl-3.0 | 4,188,589,171,506,673,000 | 31.674419 | 79 | 0.690171 | false | 2.391823 | false | false | false |
dahak-metagenomics/dahak | workflows/functional_inference/antibiotic_resistance/antibiotic_res.py | 1 | 5001 | from __future__ import print_function
from clustergrammer_widget import *
#from antibiotic_res import *
from ipywidgets import interact, interactive, fixed, interact_manual
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import glob
import qgrid
import numpy as np
def concat_abricate_files(filenames):
x = glob.glob(filenames)
list_of_dfs = [pd.read_table(filename, header = 1) for filename in x]
for dataframe, filename in zip(list_of_dfs, x):
dataframe['filename'] = filename
combined_df = pd.concat(list_of_dfs, ignore_index=True)
return combined_df
def calc_total_genes_abricate():
combined_df = concat_abricate_files('*tab')
x = combined_df.groupby('filename').GENE.count()
y = x.to_frame()
bingo = y.sort_values('GENE',ascending=False)
bingo
return bingo
def calculate_unique_genes_abricate():
combined_df = concat_abricate_files("*tab")
x = combined_df.groupby('filename').GENE.nunique()
y = x.to_frame()
bingo = y.sort_values('GENE',ascending=False)
bingo
return bingo
def calc_total_genes_srst2():
combined_df = concat_srst2_txt('srst2/*results.txt')
x = combined_df.groupby('filename').gene.count()
y = x.to_frame()
bingo = y.sort_values('gene',ascending=False)
bingo
return bingo
def concat_srst2_txt(filenames):
x = glob.glob(filenames)
list_of_dfs = [pd.read_table(filename, header = 0) for filename in x]
for dataframe, filename in zip(list_of_dfs, x):
dataframe['filename'] = filename
combined_df = pd.concat(list_of_dfs, ignore_index=True, sort=True)
return combined_df
def calculate_unique_genes_srst2():
combined_df = concat_srst2_txt('srst2/*results.txt')
x = combined_df.groupby('filename').gene.nunique()
y = x.to_frame()
bingo = y.sort_values('gene',ascending=False)
bingo
return bingo
def interactive_table_abricate():
dense_df = create_abricate_presence_absence_gene_table()
return qgrid.show_grid(dense_df, show_toolbar=True)
def interactive_table_srst2():
dense_df = create_srst2_presence_absence_gene_table()
return qgrid.show_grid(dense_df, show_toolbar=True)
def interactive_map_abricate():
dense_df = create_abricate_presence_absence_gene_table()
# initialize network object
net = Network(clustergrammer_widget)
# load dataframe
net.load_df(dense_df)
# cluster using default parameters
net.cluster(enrichrgram=False)
# make the visualization
return net.widget()
def interactive_map_srst2():
dense_df = create_abricate_presence_absence_gene_table()
# initialize network object
net = Network(clustergrammer_widget)
# load dataframe
net.load_df(dense_df)
# cluster using default parameters
net.cluster(enrichrgram=False)
# make the visualization
return net.widget()
def create_abricate_presence_absence_gene_table():
# Creat concatenated tsv file
combined_df = concat_abricate_files('*tab')
# Remove columns keeping only 'gene' and 'filename'
# Drop any na values
combined_df.dropna(axis=0, inplace=True)
#new_combined_df.head()
g = combined_df.groupby('GENE')
ug = list(set(combined_df['GENE']))
a = []
for GENE in ug:
gene_group = g.get_group(GENE)
if len(gene_group['filename'])>1:
a.append(gene_group[['filename', 'GENE']])
from collections import defaultdict
gene_filenames = defaultdict(list)
for line in a:
gene_filenames[line['GENE'].iloc[0]].extend(line['filename'].tolist())
filenames = set()
for files in gene_filenames.values():
filenames.update(files)
filenames = list(filenames)
data = {}
for gene, files in gene_filenames.items():
data[gene] = [file in files for file in filenames]
dense_df = pd.DataFrame.from_dict(data, orient='index', columns=filenames)
return dense_df
def create_srst2_presence_absence_gene_table():
# Creat concatenated tsv file
combined_df = concat_srst2_txt('srst2/*results.txt')
# Remove columns keeping only 'gene' and 'filename'
# Drop any na values
combined_df.dropna(axis=0, subset=['gene'], inplace=True)
g = combined_df.groupby('gene')
ug = list(set(combined_df['gene']))
a = []
for gene in ug:
gene_group = g.get_group(gene)
if len(gene_group['filename'])>1:
a.append(gene_group[['filename', 'gene']])
from collections import defaultdict
gene_filenames = defaultdict(list)
for line in a:
gene_filenames[line['gene'].iloc[0]].extend(line['filename'].tolist())
filenames = set()
for files in gene_filenames.values():
filenames.update(files)
filenames = list(filenames)
data = {}
for gene, files in gene_filenames.items():
data[gene] = [file in files for file in filenames]
dense_df = pd.DataFrame.from_dict(data, orient='index', columns=filenames)
return dense_df
| bsd-3-clause | -1,330,198,413,099,557,000 | 31.057692 | 78 | 0.670466 | false | 3.388211 | false | false | false |
dwhagar/snowboard | snowboard/userChannel.py | 1 | 1845 | # This file is part of snowboard.
#
# snowboard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# snowboard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with snowboard. If not, see <http://www.gnu.org/licenses/>.
'''
Stores user privleges for a specific channel.
See https://github.com/dwhagar/snowboard/wiki/Class-Docs for documentation.
'''
from .userFlags import UserFlags
class UserChannel:
'''A place to store complete privleges for a user on a channel.'''
def __init__(self):
self.name = None
self.level = 0
self.flags = UserFlags()
def checkApproved(self, flag):
'''Checks with the users flags to see if they are approved.'''
return self.flags.checkApproved(flag, self.level)
def checkDenied(self, flag):
'''Checks with the users flags to see if they are denied.'''
return self.flags.checkDenied(flag)
def toData(self, data):
'''Decode a string into the object properties.'''
properties = data.split('/')
self.name = properties[0].lower()
self.level = int(properties[1])
self.flags.toData(properties[2].lower())
def toString(self):
'''Encodes the object properties to a string.'''
levelString = str(self.level)
flagString = self.flags.toString()
encoded = self.name + "/" + levelString + "/" + flagString
return encoded | gpl-3.0 | 1,624,387,356,584,009,200 | 33.185185 | 75 | 0.678591 | false | 4.028384 | false | false | false |
KenV99/testRenderCapture | resources/lib/utils/rolling_stats.py | 1 | 4234 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 KenV99
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from collections import deque
import queue
try:
import xbmc
except ImportError:
from time import sleep
import multiprocessing as foo
bar = foo.Process
foobar = foo.ProcessError
else:
import threading as foo
bar = foo.Thread
foobar = foo.ThreadError
class RollingStats(bar):
def __init__(self, expected_mean=0.0, windowsize=0, sleepinsecs=0.0001):
super(RollingStats, self).__init__(name='RollingStats')
self.lock = foo.Lock()
self.abort_evt = foo.Event()
self.valueQ = queue.Queue()
self.mean = float(expected_mean)
self.sleepinsec = sleepinsecs
self.n = 0
self.M2 = 0.0
if windowsize > 0:
self.window = windowsize
self.values = deque()
self.calc = self.calc_window
else:
self.calc = self.calc_nowindow
try:
from xbmc import sleep
self.using_xbmc = True
self.sleepfn = sleep
except ImportError:
from time import sleep
self.using_xbmc = False
self.sleepfn = sleep
self.start()
def sleep(self):
if self.using_xbmc:
self.sleepfn(self.sleepinsec * 1000.0)
else:
self.sleepfn(self.sleepinsec)
def stop(self, timeout=5):
self.abort_evt.set()
if self.is_alive():
try:
self.join(timeout)
except foobar:
pass
def add_value(self, value):
with self.lock:
self.valueQ.put_nowait(value)
def get_mean(self):
with self.lock:
return self.mean
def get_variance(self, population=False):
with self.lock:
if self.window:
denominator = self.window
else:
denominator = self.n
if population:
return self.M2 / denominator
else:
return self.M2 / (denominator - 1)
def calc_window(self, value):
self.values.append(value)
if self.n < self.window:
self.n += 1
d = value - self.mean
self.mean += d / self.n
self.M2 += d * (value - self.mean)
else:
valueo = self.values.popleft()
meano = self.mean
self.mean += (value - valueo) / self.window
self.M2 += (value - meano) * (value - self.mean) - (valueo - meano) * (valueo - self.mean)
def calc_nowindow(self, value):
self.n += 1
d = value - self.mean
self.mean += d / self.n
self.M2 += d * (value - self.mean)
def run(self):
while not self.abort_evt.is_set():
while not self.valueQ.empty():
with self.lock:
value = self.valueQ.get_nowait()
self.calc(value)
else:
self.sleep()
if __name__ == '__main__':
import numpy
lst = [float(i) for i in xrange(-100, 101)]
windowsize = 10
rs = RollingStats(expected_mean=0.0, windowsize=windowsize)
record = {}
for i, v in enumerate(lst):
rs.add_value(v)
if i >= windowsize:
record[i] = (rs.get_mean(), rs.get_variance(True), v)
rs.stop()
for i, v in enumerate(lst):
if i >= windowsize:
window = lst[i - windowsize:i]
print i, record[i][2], record[i][0], numpy.mean(window), record[i][1], numpy.var(window)
| gpl-2.0 | -3,416,934,110,689,314,000 | 29.028369 | 102 | 0.557865 | false | 3.82821 | false | false | false |
esbanarango/random_real_address | support/get_address.py | 1 | 1500 | #Created by Liang Sun <[email protected]> in 2012
import urllib2
import re, json, time,hashlib, random
import MultipartPostHandler
from gevent import pool
from gevent import monkey
from urllib2 import URLError, HTTPError
monkey.patch_all()
def get_address():
u = 100000.0
v = 1000000.0
longitude = int(random.gauss(116467615, u))
latitude = int(random.gauss(39923488, u))
print "longitude=%d,latitude=%d" % (longitude, latitude)
url = 'http://maps.googleapis.com/maps/api/geocode/json?latlng=%f,%f&sensor=false&language=zh-CN' % (latitude / v, longitude / v)
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(), urllib2.HTTPRedirectHandler())
try:
str_content = opener.open(url).read()
except HTTPError, e:
print 'Error code: ', e.code
time.sleep(36)
return get_address()
except URLError, e:
print e.reason
time.sleep(36)
return get_address()
if str_content:
content = json.loads(str_content)
if content['status'] == 'OK':
address = content['results'][0]['formatted_address']
if address.find(' ') > 0:
address = address[:address.find(' ')]
address = address.encode('utf-8')
return (longitude, latitude, address)
else:
print content['status'].encode('utf-8') + "!!!!!!!!!!!!!!!!!!!!!!!!!!!!"
time.sleep(36) # This is due to the 2500/24h limit.
return get_address() | mit | 7,659,958,008,000,969,000 | 31.630435 | 133 | 0.608667 | false | 3.740648 | false | false | false |
xuegang/gpdb | src/test/tinc/tincrepo/mpp/lib/gpstop.py | 9 | 6328 | """
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import time
import string
import tinctest
from gppylib.commands.base import Command
from tinctest.main import TINCException
'''
Utility functions for Gpstop
gpstop function automation
@class GpStop
@exception: GPstopException
'''
class GPstopException( TINCException ): pass
class GpStop():
def __init__(self):
self.gphome = os.environ.get('GPHOME')
self.master_dir = os.environ.get('MASTER_DATA_DIRECTORY')
if self.gphome is None:
raise GPstopException ("GPHOME environment variable is not set")
else:
if self.master_dir is None:
raise GPstopException ("MASTER_DATA_DIRECTORY environment variable is not set")
def run_gpstop_cmd(self, flag = '-a', mdd = None, logdir = None, masteronly = None, immediate = None ,
fast = None, smart = None, quietmode = None, restart = None, timeout = None, parallelproc=None,
notstandby = None, verbose = None, version = None, standby = None, reload = None, validate = True):
'''
GpStop function
@param flag: '-a' is the default option considered .Do not prompt the user for confirmation
@param mdd: The master host data directory.If not specified, the value set for $MASTER_DATA_DIRECTORY will be used
@param logdir:The directory to write the log file. Defaults to ~/gpAdminLogs.
@param masteronly: Shuts down only the master node
@param immediate: Immediate shut down.
@param fast: Fast shut down.
@param smart: Smart shut down.
@param quietmode: Command output is not displayed on the screen
@param restart: Restart after shutdown is complete
@param timeout: Specifies a timeout threshold (in seconds) to wait for a segment instance to shutdown
@type timeout: Integer
@param parallelproc: The number of segments to stop in parallel
@type parallelproc: Integer
@param verbose:Displays detailed status, progress and error messages output by the utility
@param notstandby:Do not stop the standby master process
@param version: Displays the version of this utility.
@param standby:Do not stop the standby master process
@param reload: This option reloads the pg_hba.conf files of the master and segments and the runtime parameters of the postgresql.conf files but does not shutdown the Greenplum Database array
'''
make_cmd = ''.join([self.gphome,'/bin/gpstop'])
# Check the version of gpstop
if version is not None:
arg = '--version'
make_cmd = ' '.join([make_cmd,arg])
cmd = Command(name='Run gpstop', cmdStr='source %s/greenplum_path.sh;%s' % (self.gphome, make_cmd))
tinctest.logger.info("Running gpstop : %s" % cmd)
cmd.run(validateAfter=validate)
result = cmd.get_results()
if result.rc != 0 or result.stderr:
return False
else:
tinctest.logger.info((result))
return True
# -d The master host data directory
if mdd is None:
mdd = ""
else:
mdd = " -d %s" % self.master_dir
# -q Quietmode
if quietmode is None:
quietmode = ""
else:
quietmode = "-q"
# -v Verbose
if verbose is None:
verbose = ""
else:
verbose = " -v"
# -y notstandby
if notstandby is None:
notstandby = ""
else:
notstandby = " -y"
# -t nnn Timeout
if timeout is None:
timeout = ""
else:
# Check if timeout is an integer
try:
int(timeout)
timeout=" -t %s" % timeout
except ValueError, e:
if e is not None:
raise GPstopException ("Gpstop timeout is not set correctly!")
# -B nnn Parallel Process
if parallelproc is None:
parallelproc = ""
else:
# Check if parallelprocs is an integer
try:
int(parallelproc)
parallelproc=" -B %s" % parallelproc
except ValueError, e:
if e is not None:
raise GPstopException ("Gpstop parallelproc is not set correctly!")
if logdir is None:
logdir = " "
else:
logdir='-l '+ (logdir)
make_cmd = ' '.join([make_cmd,mdd,quietmode,verbose,notstandby,timeout,logdir,parallelproc])
try:
if immediate is not None:
make_cmd = ' '.join([make_cmd, " -M immediate"])
elif masteronly is not None:
make_cmd = ' '.join([make_cmd, " -m"])
elif fast is not None:
make_cmd = ' '.join([make_cmd," -M fast"])
elif smart is not None:
make_cmd = ' '.join([make_cmd," -M smart"])
elif restart is not None:
make_cmd = ' '.join([make_cmd," -r"])
elif reload is not None:
make_cmd = ' '.join([make_cmd," -u"])
else:
make_cmd = ' '.join([make_cmd,''])
except Exception, e:
if e is not None:
raise
make_cmd = ' '.join([make_cmd,'-a'])
cmd = Command(name='Run gpstop', cmdStr='source %s/greenplum_path.sh;%s' % (self.gphome, make_cmd))
tinctest.logger.info("Running gpstop : %s" % cmd)
cmd.run(validateAfter=validate)
result = cmd.get_results()
if result.rc != 0 or result.stderr:
return False
else:
tinctest.logger.info((result))
return True
def get_version(self):
self.run_gpstop_cmd(version='y')
| apache-2.0 | -4,920,722,707,538,082,000 | 36.005848 | 220 | 0.606511 | false | 4 | true | false | false |
rspavel/spack | var/spack/repos/builtin/packages/superlu/package.py | 3 | 3630 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import glob
import os
class Superlu(Package):
"""SuperLU is a general purpose library for the direct solution of large,
sparse, nonsymmetric systems of linear equations on high performance
machines. SuperLU is designed for sequential machines."""
homepage = "http://crd-legacy.lbl.gov/~xiaoye/SuperLU/#superlu"
url = "http://crd-legacy.lbl.gov/~xiaoye/SuperLU/superlu_5.2.1.tar.gz"
version('5.2.1', sha256='28fb66d6107ee66248d5cf508c79de03d0621852a0ddeba7301801d3d859f463')
version('4.3', sha256='169920322eb9b9c6a334674231479d04df72440257c17870aaa0139d74416781')
variant('pic', default=True,
description='Build with position independent code')
depends_on('cmake', when='@5.2.1:', type='build')
depends_on('blas')
# CMake installation method
def install(self, spec, prefix):
cmake_args = [
'-Denable_blaslib=OFF',
'-DBLAS_blas_LIBRARY={0}'.format(spec['blas'].libs.joined())
]
if '+pic' in spec:
cmake_args.extend([
'-DCMAKE_POSITION_INDEPENDENT_CODE=ON'
])
cmake_args.extend(std_cmake_args)
with working_dir('spack-build', create=True):
cmake('..', *cmake_args)
make()
make('install')
# Pre-cmake installation method
@when('@4.3')
def install(self, spec, prefix):
config = []
# Define make.inc file
config.extend([
'PLAT = _x86_64',
'SuperLUroot = %s' % self.stage.source_path,
# 'SUPERLULIB = $(SuperLUroot)/lib/libsuperlu$(PLAT).a',
'SUPERLULIB = $(SuperLUroot)/lib/libsuperlu_{0}.a' \
.format(self.spec.version),
'BLASDEF = -DUSE_VENDOR_BLAS',
'BLASLIB = {0}'.format(spec['blas'].libs.ld_flags),
# or BLASLIB = -L/usr/lib64 -lblas
'TMGLIB = libtmglib.a',
'LIBS = $(SUPERLULIB) $(BLASLIB)',
'ARCH = ar',
'ARCHFLAGS = cr',
'RANLIB = {0}'.format('ranlib' if which('ranlib') else 'echo'),
'CC = {0}'.format(os.environ['CC']),
'FORTRAN = {0}'.format(os.environ['FC']),
'LOADER = {0}'.format(os.environ['CC']),
'CDEFS = -DAdd_'
])
if '+pic' in spec:
config.extend([
# Use these lines instead when pic_flag capability arrives
'CFLAGS = -O3 {0}'.format(self.compiler.cc_pic_flag),
'NOOPTS = {0}'.format(self.compiler.cc_pic_flag),
'FFLAGS = -O2 {0}'.format(self.compiler.f77_pic_flag),
'LOADOPTS = {0}'.format(self.compiler.cc_pic_flag)
])
else:
config.extend([
'CFLAGS = -O3',
'NOOPTS = ',
'FFLAGS = -O2',
'LOADOPTS = '
])
# Write configuration options to make.inc file
with open('make.inc', 'w') as inc:
for option in config:
inc.write('{0}\n'.format(option))
make(parallel=False)
# Install manually
install_tree('lib', prefix.lib)
headers = glob.glob(join_path('SRC', '*.h'))
mkdir(prefix.include)
for h in headers:
install(h, prefix.include)
| lgpl-2.1 | -3,001,714,705,535,062,500 | 34.940594 | 95 | 0.54022 | false | 3.514037 | true | false | false |
endlessm/chromium-browser | third_party/chromite/cros_bisect/chrome_on_cros_bisector_unittest.py | 1 | 24193 | # -*- coding: utf-8 -*-
# Copyright 2017 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Test chrome_on_cros_bisector module."""
from __future__ import print_function
import copy
import itertools
import os
import mock
from chromite.cli import flash
from chromite.cros_bisect import common
from chromite.cros_bisect import builder as builder_module
from chromite.cros_bisect import evaluator as evaluator_module
from chromite.cros_bisect import chrome_on_cros_bisector
from chromite.cros_bisect import git_bisector_unittest
from chromite.lib import commandline
from chromite.lib import cros_test_lib
from chromite.lib import gs
from chromite.lib import gs_unittest
from chromite.lib import partial_mock
class DummyEvaluator(evaluator_module.Evaluator):
"""Evaluator which just return empty score."""
# pylint: disable=unused-argument
def Evaluate(self, remote, build_label, repeat):
return common.Score()
def CheckLastEvaluate(self, build_label, repeat=1):
return common.Score()
class TestChromeOnCrosBisector(cros_test_lib.MockTempDirTestCase):
"""Tests ChromeOnCrosBisector class."""
BOARD = 'samus'
TEST_NAME = 'graphics_WebGLAquarium'
METRIC = 'avg_fps_1000_fishes/summary/value'
REPORT_FILE = 'reports.json'
DUT_ADDR = '192.168.1.1'
DUT = commandline.DeviceParser(commandline.DEVICE_SCHEME_SSH)(DUT_ADDR)
# Be aware that GOOD_COMMIT_INFO and BAD_COMMIT_INFO should be assigned via
# copy.deepcopy() as their users are likely to change the content.
GOOD_COMMIT_SHA1 = '44af5c9a5505'
GOOD_COMMIT_TIMESTAMP = 1486526594
GOOD_COMMIT_SCORE = common.Score([100])
GOOD_COMMIT_INFO = common.CommitInfo(
sha1=GOOD_COMMIT_SHA1, timestamp=GOOD_COMMIT_TIMESTAMP, title='good',
label='last-known-good ', score=GOOD_COMMIT_SCORE)
BAD_COMMIT_SHA1 = '6a163bb66c3e'
BAD_COMMIT_TIMESTAMP = 1486530021
BAD_COMMIT_SCORE = common.Score([80])
BAD_COMMIT_INFO = common.CommitInfo(
sha1=BAD_COMMIT_SHA1, timestamp=BAD_COMMIT_TIMESTAMP, title='bad',
label='last-known-bad ', score=BAD_COMMIT_SCORE)
GOOD_CROS_VERSION = 'R60-9592.50.0'
BAD_CROS_VERSION = 'R60-9592.51.0'
CULPRIT_COMMIT_SHA1 = '12345abcde'
CULPRIT_COMMIT_TIMESTAMP = 1486530000
CULPRIT_COMMIT_SCORE = common.Score([81])
CULPRIT_COMMIT_INFO = common.CommitInfo(
sha1=CULPRIT_COMMIT_SHA1, timestamp=CULPRIT_COMMIT_TIMESTAMP, title='bad',
score=CULPRIT_COMMIT_SCORE)
THRESHOLD_SPLITTER = 95 # Score between good and bad, closer to good side.
THRESHOLD = 5 # Distance between good score and splitter.
REPEAT = 3
GOOD_METADATA_CONTENT = '\n'.join([
'{',
' "metadata-version": "2",',
' "toolchain-url": "2017/05/%(target)s-2017.05.25.101355.tar.xz",',
' "suite_scheduling": true,',
' "build_id": 1644146,',
' "version": {',
' "full": "R60-9592.50.0",',
' "android-branch": "git_mnc-dr-arc-m60",',
' "chrome": "60.0.3112.53",',
' "platform": "9592.50.0",',
' "milestone": "60",',
' "android": "4150402"',
' }',
'}'])
def setUp(self):
"""Sets up test case."""
self.options = cros_test_lib.EasyAttr(
base_dir=self.tempdir, board=self.BOARD, reuse_repo=True,
good=self.GOOD_COMMIT_SHA1, bad=self.BAD_COMMIT_SHA1, remote=self.DUT,
eval_repeat=self.REPEAT, auto_threshold=False, reuse_eval=False,
cros_flash_sleep=0.01, cros_flash_retry=3, cros_flash_backoff=1,
eval_raise_on_error=False, skip_failed_commit=False)
self.repo_dir = os.path.join(self.tempdir,
builder_module.Builder.DEFAULT_REPO_DIR)
self.SetUpBisector()
def SetUpBisector(self):
"""Instantiates self.bisector using self.options."""
self.evaluator = DummyEvaluator(self.options)
self.builder = builder_module.Builder(self.options)
self.bisector = chrome_on_cros_bisector.ChromeOnCrosBisector(
self.options, self.builder, self.evaluator)
def SetUpBisectorWithCrosVersion(self):
"""Instantiates self.bisector using CrOS version as good and bad options."""
self.options.good = self.GOOD_CROS_VERSION
self.options.bad = self.BAD_CROS_VERSION
self.SetUpBisector()
def SetDefaultCommitInfo(self):
"""Sets up default commit info."""
self.bisector.good_commit_info = copy.deepcopy(self.GOOD_COMMIT_INFO)
self.bisector.bad_commit_info = copy.deepcopy(self.BAD_COMMIT_INFO)
def testInit(self):
"""Tests __init__() with SHA1 as good and bad options."""
self.assertEqual(self.GOOD_COMMIT_SHA1, self.bisector.good_commit)
self.assertIsNone(self.bisector.good_cros_version)
self.assertEqual(self.BAD_COMMIT_SHA1, self.bisector.bad_commit)
self.assertIsNone(self.bisector.bad_cros_version)
self.assertFalse(self.bisector.bisect_between_cros_version)
self.assertEqual(self.DUT_ADDR, self.bisector.remote.raw)
self.assertEqual(self.REPEAT, self.bisector.eval_repeat)
self.assertEqual(self.builder, self.bisector.builder)
self.assertEqual(self.repo_dir, self.bisector.repo_dir)
self.assertIsNone(self.bisector.good_commit_info)
self.assertIsNone(self.bisector.bad_commit_info)
self.assertEqual(0, len(self.bisector.bisect_log))
self.assertIsNone(self.bisector.threshold)
self.assertTrue(not self.bisector.current_commit)
def testInitCrosVersion(self):
"""Tests __init__() with CrOS version as good and bad options."""
self.SetUpBisectorWithCrosVersion()
self.assertEqual(self.GOOD_CROS_VERSION, self.bisector.good_cros_version)
self.assertIsNone(self.bisector.good_commit)
self.assertEqual(self.BAD_CROS_VERSION, self.bisector.bad_cros_version)
self.assertIsNone(self.bisector.bad_commit)
self.assertTrue(self.bisector.bisect_between_cros_version)
self.assertEqual(self.DUT_ADDR, self.bisector.remote.raw)
self.assertEqual(self.REPEAT, self.bisector.eval_repeat)
self.assertEqual(self.builder, self.bisector.builder)
self.assertEqual(self.repo_dir, self.bisector.repo_dir)
self.assertIsNone(self.bisector.good_commit_info)
self.assertIsNone(self.bisector.bad_commit_info)
self.assertEqual(0, len(self.bisector.bisect_log))
self.assertIsNone(self.bisector.threshold)
self.assertTrue(not self.bisector.current_commit)
def testInitMissingRequiredArgs(self):
"""Tests that ChromeOnCrosBisector raises for missing required argument."""
options = cros_test_lib.EasyAttr()
with self.assertRaises(common.MissingRequiredOptionsException) as cm:
chrome_on_cros_bisector.ChromeOnCrosBisector(options, self.builder,
self.evaluator)
exception_message = str(cm.exception)
self.assertIn('Missing command line', exception_message)
self.assertIn('ChromeOnCrosBisector', exception_message)
for arg in chrome_on_cros_bisector.ChromeOnCrosBisector.REQUIRED_ARGS:
self.assertIn(arg, exception_message)
def testCheckCommitFormat(self):
"""Tests CheckCommitFormat()."""
CheckCommitFormat = (
chrome_on_cros_bisector.ChromeOnCrosBisector.CheckCommitFormat)
self.assertEqual(self.GOOD_COMMIT_SHA1,
CheckCommitFormat(self.GOOD_COMMIT_SHA1))
self.assertEqual(self.GOOD_CROS_VERSION,
CheckCommitFormat(self.GOOD_CROS_VERSION))
self.assertEqual('R60-9592.50.0',
CheckCommitFormat('60.9592.50.0'))
invalid = 'bad_sha1'
self.assertIsNone(CheckCommitFormat(invalid))
def testObtainBisectBoundaryScoreImpl(self):
"""Tests ObtainBisectBoundaryScoreImpl()."""
git_mock = self.StartPatcher(git_bisector_unittest.GitMock(self.repo_dir))
git_mock.AddRunGitResult(['checkout', self.GOOD_COMMIT_SHA1])
git_mock.AddRunGitResult(['checkout', self.BAD_COMMIT_SHA1])
build_deploy_eval_mock = self.PatchObject(
chrome_on_cros_bisector.ChromeOnCrosBisector, 'BuildDeployEval')
build_deploy_eval_mock.side_effect = [self.GOOD_COMMIT_SCORE,
self.BAD_COMMIT_SCORE]
self.assertEqual(self.GOOD_COMMIT_SCORE,
self.bisector.ObtainBisectBoundaryScoreImpl(True))
self.assertEqual(self.BAD_COMMIT_SCORE,
self.bisector.ObtainBisectBoundaryScoreImpl(False))
self.assertEqual(
[mock.call(customize_build_deploy=None, eval_label=None),
mock.call(customize_build_deploy=None, eval_label=None)],
build_deploy_eval_mock.call_args_list)
def testObtainBisectBoundaryScoreImplCrosVersion(self):
"""Tests ObtainBisectBoundaryScoreImpl() with CrOS version."""
self.SetUpBisectorWithCrosVersion()
# Inject good_commit and bad_commit as if
# bisector.ResolveChromeBisectRangeFromCrosVersion() being run.
self.bisector.good_commit = self.GOOD_COMMIT_SHA1
self.bisector.bad_commit = self.BAD_COMMIT_SHA1
git_mock = self.StartPatcher(git_bisector_unittest.GitMock(self.repo_dir))
git_mock.AddRunGitResult(['checkout', self.GOOD_COMMIT_SHA1])
git_mock.AddRunGitResult(['checkout', self.BAD_COMMIT_SHA1])
self.PatchObject(chrome_on_cros_bisector.ChromeOnCrosBisector,
'UpdateCurrentCommit')
evaluate_mock = self.PatchObject(DummyEvaluator, 'Evaluate')
# Mock FlashCrosImage() to verify that customize_build_deploy is assigned
# as expected.
flash_cros_image_mock = self.PatchObject(
chrome_on_cros_bisector.ChromeOnCrosBisector, 'FlashCrosImage')
evaluate_mock.return_value = self.GOOD_COMMIT_SCORE
self.assertEqual(self.GOOD_COMMIT_SCORE,
self.bisector.ObtainBisectBoundaryScoreImpl(True))
flash_cros_image_mock.assert_called_with(
self.bisector.GetCrosXbuddyPath(self.GOOD_CROS_VERSION))
evaluate_mock.assert_called_with(
self.DUT, 'cros_%s' % self.GOOD_CROS_VERSION, self.REPEAT)
evaluate_mock.return_value = self.BAD_COMMIT_SCORE
self.assertEqual(self.BAD_COMMIT_SCORE,
self.bisector.ObtainBisectBoundaryScoreImpl(False))
flash_cros_image_mock.assert_called_with(
self.bisector.GetCrosXbuddyPath(self.BAD_CROS_VERSION))
evaluate_mock.assert_called_with(
self.DUT, 'cros_%s' % self.BAD_CROS_VERSION, self.REPEAT)
def testObtainBisectBoundaryScoreImplCrosVersionFlashError(self):
"""Tests ObtainBisectBoundaryScoreImpl() with CrOS version."""
self.SetUpBisectorWithCrosVersion()
# Inject good_commit and bad_commit as if
# bisector.ResolveChromeBisectRangeFromCrosVersion() being run.
self.bisector.good_commit = self.GOOD_COMMIT_SHA1
self.bisector.bad_commit = self.BAD_COMMIT_SHA1
git_mock = self.StartPatcher(git_bisector_unittest.GitMock(self.repo_dir))
git_mock.AddRunGitResult(['checkout', self.GOOD_COMMIT_SHA1])
git_mock.AddRunGitResult(['checkout', self.BAD_COMMIT_SHA1])
self.PatchObject(chrome_on_cros_bisector.ChromeOnCrosBisector,
'UpdateCurrentCommit')
evaluate_mock = self.PatchObject(DummyEvaluator, 'Evaluate')
# Mock FlashCrosImage() to verify that customize_build_deploy is assigned
# as expected.
flash_cros_image_mock = self.PatchObject(
chrome_on_cros_bisector.ChromeOnCrosBisector, 'FlashCrosImage')
flash_cros_image_mock.side_effect = flash.FlashError('Flash failed.')
with self.assertRaises(flash.FlashError):
self.bisector.ObtainBisectBoundaryScoreImpl(True)
flash_cros_image_mock.assert_called_with(
self.bisector.GetCrosXbuddyPath(self.GOOD_CROS_VERSION))
evaluate_mock.assert_not_called()
with self.assertRaises(flash.FlashError):
self.bisector.ObtainBisectBoundaryScoreImpl(False)
flash_cros_image_mock.assert_called_with(
self.bisector.GetCrosXbuddyPath(self.BAD_CROS_VERSION))
evaluate_mock.assert_not_called()
def testGetCrosXbuddyPath(self):
"""Tests GetCrosXbuddyPath()."""
self.assertEqual(
'xbuddy://remote/%s/%s/test' % (self.BOARD, self.GOOD_CROS_VERSION),
self.bisector.GetCrosXbuddyPath(self.GOOD_CROS_VERSION))
def testExchangeChromeSanityCheck(self):
"""Tests the flow of exchanging Chrome between good and bad CrOS."""
self.SetUpBisectorWithCrosVersion()
# Inject good_commit and bad_commit as if
# bisector.ResolveChromeBisectRangeFromCrosVersion() has been run.
self.bisector.good_commit = self.GOOD_COMMIT_SHA1
self.bisector.bad_commit = self.BAD_COMMIT_SHA1
# Inject commit_info and threshold as if
# bisector.ObtainBisectBoundaryScore() and bisector.GetThresholdFromUser()
# has been run.
self.SetDefaultCommitInfo()
self.bisector.threshold = self.THRESHOLD
# Try bad Chrome first.
git_mock = self.StartPatcher(git_bisector_unittest.GitMock(self.repo_dir))
git_mock.AddRunGitResult(['checkout', self.BAD_COMMIT_SHA1])
git_mock.AddRunGitResult(['checkout', self.GOOD_COMMIT_SHA1])
self.PatchObject(chrome_on_cros_bisector.ChromeOnCrosBisector,
'UpdateCurrentCommit')
evaluate_mock = self.PatchObject(DummyEvaluator, 'Evaluate')
expected_evaluate_calls = [
mock.call(self.DUT, x, self.REPEAT) for x in [
'cros_%s_cr_%s' % (self.GOOD_CROS_VERSION, self.BAD_COMMIT_SHA1),
'cros_%s_cr_%s' % (self.BAD_CROS_VERSION, self.GOOD_COMMIT_SHA1)]]
# Mock FlashCrosImage() to verify that customize_build_deploy is assigned
# as expected.
flash_cros_image_mock = self.PatchObject(
chrome_on_cros_bisector.ChromeOnCrosBisector, 'FlashCrosImage')
expected_flash_cros_calls = [
mock.call(self.bisector.GetCrosXbuddyPath(self.GOOD_CROS_VERSION)),
mock.call(self.bisector.GetCrosXbuddyPath(self.BAD_CROS_VERSION))]
# Make sure bisector.BuildDeploy() is also called.
build_deploy_mock = self.PatchObject(
chrome_on_cros_bisector.ChromeOnCrosBisector, 'BuildDeploy')
# Assume culprit commit is in Chrome side, i.e. first score is bad.
evaluate_mock.side_effect = [self.BAD_COMMIT_SCORE, self.GOOD_COMMIT_SCORE]
self.assertTrue(self.bisector.ExchangeChromeSanityCheck())
flash_cros_image_mock.assert_has_calls(expected_flash_cros_calls)
evaluate_mock.assert_has_calls(expected_evaluate_calls)
self.assertEqual(2, build_deploy_mock.call_count)
flash_cros_image_mock.reset_mock()
evaluate_mock.reset_mock()
build_deploy_mock.reset_mock()
# Assume culprit commit is not in Chrome side, i.e. first score is good.
evaluate_mock.side_effect = [self.GOOD_COMMIT_SCORE, self.BAD_COMMIT_SCORE]
self.assertFalse(self.bisector.ExchangeChromeSanityCheck())
flash_cros_image_mock.assert_has_calls(expected_flash_cros_calls)
evaluate_mock.assert_has_calls(expected_evaluate_calls)
self.assertEqual(2, build_deploy_mock.call_count)
def testExchangeChromeSanityCheckFlashError(self):
"""Tests the flow of exchanging Chrome between good and bad CrOS."""
self.SetUpBisectorWithCrosVersion()
# Inject good_commit and bad_commit as if
# bisector.ResolveChromeBisectRangeFromCrosVersion() has been run.
self.bisector.good_commit = self.GOOD_COMMIT_SHA1
self.bisector.bad_commit = self.BAD_COMMIT_SHA1
# Inject commit_info and threshold as if
# bisector.ObtainBisectBoundaryScore() and bisector.GetThresholdFromUser()
# has been run.
self.SetDefaultCommitInfo()
self.bisector.threshold = self.THRESHOLD
# Try bad Chrome first.
git_mock = self.StartPatcher(git_bisector_unittest.GitMock(self.repo_dir))
git_mock.AddRunGitResult(['checkout', self.BAD_COMMIT_SHA1])
git_mock.AddRunGitResult(['checkout', self.GOOD_COMMIT_SHA1])
self.PatchObject(chrome_on_cros_bisector.ChromeOnCrosBisector,
'UpdateCurrentCommit')
evaluate_mock = self.PatchObject(DummyEvaluator, 'Evaluate')
# Mock FlashCrosImage() to verify that customize_build_deploy is assigned
# as expected.
flash_cros_image_mock = self.PatchObject(
chrome_on_cros_bisector.ChromeOnCrosBisector, 'FlashCrosImage',
side_effect=flash.FlashError('Flash failed.'))
build_deploy_mock = self.PatchObject(
chrome_on_cros_bisector.ChromeOnCrosBisector, 'BuildDeploy')
with self.assertRaises(flash.FlashError):
self.bisector.ExchangeChromeSanityCheck()
evaluate_mock.assert_not_called()
flash_cros_image_mock.assert_called()
build_deploy_mock.assert_not_called()
def testFlashImage(self):
"""Tests FlashImage()."""
flash_mock = self.PatchObject(flash, 'Flash')
xbuddy_path = self.bisector.GetCrosXbuddyPath(self.GOOD_CROS_VERSION)
self.bisector.FlashCrosImage(xbuddy_path)
flash_mock.assert_called_with(
self.DUT, xbuddy_path, board=self.BOARD, clobber_stateful=True,
disable_rootfs_verification=True)
def testFlashImageRetry(self):
"""Tests FlashImage() with retry success."""
flash_mock_call_counter = itertools.count()
def flash_mock_return(*unused_args, **unused_kwargs):
nth_call = next(flash_mock_call_counter)
if nth_call < 3:
raise flash.FlashError('Flash failed.')
flash_mock = self.PatchObject(flash, 'Flash')
flash_mock.side_effect = flash_mock_return
xbuddy_path = self.bisector.GetCrosXbuddyPath(self.GOOD_CROS_VERSION)
self.bisector.FlashCrosImage(xbuddy_path)
flash_mock.assert_called_with(
self.DUT, xbuddy_path, board=self.BOARD, clobber_stateful=True,
disable_rootfs_verification=True)
def testFlashImageRetryFailed(self):
"""Tests FlashImage() with retry failed."""
flash_mock = self.PatchObject(flash, 'Flash')
flash_mock.side_effect = flash.FlashError('Flash failed.')
xbuddy_path = self.bisector.GetCrosXbuddyPath(self.GOOD_CROS_VERSION)
with self.assertRaises(flash.FlashError):
self.bisector.FlashCrosImage(xbuddy_path)
flash_mock.assert_called_with(
self.DUT, xbuddy_path, board=self.BOARD, clobber_stateful=True,
disable_rootfs_verification=True)
def testCrosVersionToChromeCommit(self):
"""Tests CrosVersionToChromeCommit()."""
metadata_url = (
'gs://chromeos-image-archive/%s-release/%s/partial-metadata.json' %
(self.BOARD, self.GOOD_CROS_VERSION))
gs_mock = self.StartPatcher(gs_unittest.GSContextMock())
gs_mock.AddCmdResult(['cat', metadata_url],
output=self.GOOD_METADATA_CONTENT)
git_log_content = '\n'.join([
'8967dd66ad72 (tag: 60.0.3112.53) Publish DEPS for Chromium '
'60.0.3112.53',
'27ed0cc0c2f4 Incrementing VERSION to 60.0.3112.53'])
git_mock = self.StartPatcher(git_bisector_unittest.GitMock(self.repo_dir))
git_mock.AddRunGitResult(['log', '--oneline', '-n', '2', '60.0.3112.53'],
output=git_log_content)
self.bisector.gs_ctx = gs.GSContext()
self.assertEqual(
'27ed0cc0c2f4',
self.bisector.CrosVersionToChromeCommit(self.GOOD_CROS_VERSION))
def testCrosVersionToChromeCommitFail(self):
"""Tests failure case of CrosVersionToChromeCommit()."""
metadata_url = (
'gs://chromeos-image-archive/%s-release/%s/partial-metadata.json' %
(self.BOARD, self.GOOD_CROS_VERSION))
gs_mock = self.StartPatcher(gs_unittest.GSContextMock())
gs_mock.AddCmdResult(['cat', metadata_url], returncode=1)
self.bisector.gs_ctx = gs.GSContext()
self.assertIsNone(
self.bisector.CrosVersionToChromeCommit(self.GOOD_CROS_VERSION))
metadata_content = 'not_a_json'
gs_mock.AddCmdResult(['cat', metadata_url], output=metadata_content)
self.assertIsNone(
self.bisector.CrosVersionToChromeCommit(self.GOOD_CROS_VERSION))
metadata_content = '\n'.join([
'{',
' "metadata-version": "2",',
' "toolchain-url": "2017/05/%(target)s-2017.05.25.101355.tar.xz",',
' "suite_scheduling": true,',
' "build_id": 1644146,',
' "version": {}',
'}'])
gs_mock.AddCmdResult(['cat', metadata_url], output=metadata_content)
self.assertIsNone(
self.bisector.CrosVersionToChromeCommit(self.GOOD_CROS_VERSION))
gs_mock.AddCmdResult(['cat', metadata_url],
output=self.GOOD_METADATA_CONTENT)
git_mock = self.StartPatcher(git_bisector_unittest.GitMock(self.repo_dir))
git_mock.AddRunGitResult(['log', '--oneline', '-n', '2', '60.0.3112.53'],
returncode=128)
self.assertIsNone(
self.bisector.CrosVersionToChromeCommit(self.GOOD_CROS_VERSION))
def testResolveChromeBisectRangeFromCrosVersion(self):
"""Tests ResolveChromeBisectRangeFromCrosVersion()."""
self.SetUpBisectorWithCrosVersion()
cros_to_chrome_mock = self.PatchObject(
chrome_on_cros_bisector.ChromeOnCrosBisector,
'CrosVersionToChromeCommit')
cros_to_chrome_mock.side_effect = [self.GOOD_COMMIT_SHA1,
self.BAD_COMMIT_SHA1]
self.assertTrue(self.bisector.ResolveChromeBisectRangeFromCrosVersion())
self.assertTrue(self.GOOD_COMMIT_SHA1, self.bisector.good_commit)
self.assertTrue(self.BAD_COMMIT_SHA1, self.bisector.bad_commit)
cros_to_chrome_mock.assert_has_calls([mock.call(self.GOOD_CROS_VERSION),
mock.call(self.BAD_CROS_VERSION)])
cros_to_chrome_mock.reset_mock()
cros_to_chrome_mock.side_effect = [None]
self.assertFalse(self.bisector.ResolveChromeBisectRangeFromCrosVersion())
cros_to_chrome_mock.assert_called_with(self.GOOD_CROS_VERSION)
cros_to_chrome_mock.reset_mock()
cros_to_chrome_mock.side_effect = [self.GOOD_COMMIT_SHA1, None]
self.assertFalse(self.bisector.ResolveChromeBisectRangeFromCrosVersion())
cros_to_chrome_mock.assert_has_calls([mock.call(self.GOOD_CROS_VERSION),
mock.call(self.BAD_CROS_VERSION)])
def testPrepareBisect(self):
"""Tests PrepareBisect()."""
# Pass SanityCheck().
git_mock = self.StartPatcher(git_bisector_unittest.GitMock(self.repo_dir))
git_mock.AddRunGitResult(
partial_mock.InOrder(['rev-list', self.GOOD_COMMIT_SHA1]))
git_mock.AddRunGitResult(
partial_mock.InOrder(['rev-list', self.BAD_COMMIT_SHA1]))
git_mock.AddRunGitResult(
partial_mock.InOrder(['show', self.GOOD_COMMIT_SHA1]),
output=str(self.GOOD_COMMIT_TIMESTAMP))
git_mock.AddRunGitResult(
partial_mock.InOrder(['show', self.BAD_COMMIT_SHA1]),
output=str(self.BAD_COMMIT_TIMESTAMP))
# Inject score for both side.
git_mock.AddRunGitResult(['checkout', self.GOOD_COMMIT_SHA1])
git_mock.AddRunGitResult(['checkout', self.BAD_COMMIT_SHA1])
build_deploy_eval_mock = self.PatchObject(
chrome_on_cros_bisector.ChromeOnCrosBisector, 'BuildDeployEval')
build_deploy_eval_mock.side_effect = [self.GOOD_COMMIT_SCORE,
self.BAD_COMMIT_SCORE]
# Set auto_threshold.
self.bisector.auto_threshold = True
self.assertTrue(self.bisector.PrepareBisect())
def testPrepareBisectCrosVersion(self):
"""Tests PrepareBisect() with CrOS version."""
self.SetUpBisectorWithCrosVersion()
self.StartPatcher(gs_unittest.GSContextMock())
self.PatchObject(builder_module.Builder, 'SyncToHead')
self.PatchObject(
chrome_on_cros_bisector.ChromeOnCrosBisector,
'ResolveChromeBisectRangeFromCrosVersion').return_value = True
self.PatchObject(
chrome_on_cros_bisector.ChromeOnCrosBisector,
'SanityCheck').return_value = True
self.PatchObject(
chrome_on_cros_bisector.ChromeOnCrosBisector,
'ObtainBisectBoundaryScore').return_value = True
self.PatchObject(
chrome_on_cros_bisector.ChromeOnCrosBisector,
'GetThresholdFromUser').return_value = True
self.PatchObject(
chrome_on_cros_bisector.ChromeOnCrosBisector,
'ExchangeChromeSanityCheck').return_value = True
self.assertTrue(self.bisector.PrepareBisect())
| bsd-3-clause | 2,018,605,499,905,117,400 | 41.743816 | 80 | 0.702476 | false | 3.266676 | true | false | false |
t-yanaka/zabbix-report | customer_api/api/migrations/0005_auto_20170727_1529.py | 1 | 1388 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-27 06:29
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('api', '0004_auto_20170725_1332'),
]
operations = [
migrations.AlterField(
model_name='cace',
name='sales_main_staff',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sales_main_staff', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='cace',
name='sales_sub_staff',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sales_sub_staff', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='cace',
name='technical_main_staff',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='technical_main_staff', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='cace',
name='technical_sub_staff',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='technical_sub_staff', to=settings.AUTH_USER_MODEL),
),
]
| mit | -8,209,237,910,542,917,000 | 36.513514 | 147 | 0.636888 | false | 3.813187 | false | false | false |
klahnakoski/cloc | cloc/util/thread/multiprocess.py | 1 | 2859 | # encoding: utf-8
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import unicode_literals
from __future__ import division
from dzAlerts.util.thread.threads import Queue
from ..env.logs import Log
class worker(object):
def __init__(func, inbound, outbound, logging):
logger = Log_usingInterProcessQueue(logging)
class Log_usingInterProcessQueue(Log):
def __init__(self, outbound):
self.outbound = outbound
def write(self, template, params):
self.outbound.put({"template": template, "param": params})
class Multiprocess(object):
# THE COMPLICATION HERE IS CONNECTING THE DISPARATE LOGGING TO
# A CENTRAL POINT
def __init__(self, functions):
self.outbound = Queue()
self.inbound = Queue()
self.inbound = Queue()
# MAKE
# MAKE THREADS
self.threads = []
for t, f in enumerate(functions):
thread = worker(
"worker " + unicode(t),
f,
self.inbound,
self.outbound,
)
self.threads.append(thread)
def __enter__(self):
return self
# WAIT FOR ALL QUEUED WORK TO BE DONE BEFORE RETURNING
def __exit__(self, a, b, c):
try:
self.inbound.close() # SEND STOPS TO WAKE UP THE WORKERS WAITING ON inbound.pop()
except Exception, e:
Log.warning("Problem adding to inbound", e)
self.join()
# IF YOU SENT A stop(), OR STOP, YOU MAY WAIT FOR SHUTDOWN
def join(self):
try:
# WAIT FOR FINISH
for t in self.threads:
t.join()
except (KeyboardInterrupt, SystemExit):
Log.note("Shutdow Started, please be patient")
except Exception, e:
Log.error("Unusual shutdown!", e)
finally:
for t in self.threads:
t.keep_running = False
for t in self.threads:
t.join()
self.inbound.close()
self.outbound.close()
# RETURN A GENERATOR THAT HAS len(parameters) RESULTS (ANY ORDER)
def execute(self, parameters):
# FILL QUEUE WITH WORK
self.inbound.extend(parameters)
num = len(parameters)
def output():
for i in xrange(num):
result = self.outbound.pop()
yield result
return output()
# EXTERNAL COMMAND THAT RETURNS IMMEDIATELY
def stop(self):
self.inbound.close() # SEND STOPS TO WAKE UP THE WORKERS WAITING ON inbound.pop()
for t in self.threads:
t.keep_running = False
| mpl-2.0 | -4,370,935,611,232,605,700 | 25.472222 | 93 | 0.580972 | false | 3.842742 | false | false | false |
ric2b/Vivaldi-browser | chromium/tools/perf/benchmarks/dummy_benchmark.py | 2 | 1650 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Dummy benchmarks for the bisect FYI integration tests.
The number they produce aren't meant to represent any actual performance
data of the browser. For more information about these dummy benchmarks,
see: https://goo.gl/WvZiiW
"""
import random
from telemetry import benchmark
from telemetry.page import legacy_page_test
from core import perf_benchmark
from page_sets import dummy_story_set
class _DummyTest(legacy_page_test.LegacyPageTest):
def __init__(self, avg, std):
super(_DummyTest, self).__init__()
self._avg = avg
self._std = std
def ValidateAndMeasurePage(self, page, tab, results):
del tab # unused
value = random.gauss(self._avg, self._std)
results.AddMeasurement('gaussian-value', 'ms', value)
class _DummyBenchmark(perf_benchmark.PerfBenchmark):
page_set = dummy_story_set.DummyStorySet
@benchmark.Info(emails=['[email protected]'], component='Test>Telemetry')
class DummyBenchmarkOne(_DummyBenchmark):
"""A low noise benchmark with mean=100 & std=1."""
def CreatePageTest(self, options):
return _DummyTest(168, 1)
@classmethod
def Name(cls):
return 'dummy_benchmark.stable_benchmark_1'
@benchmark.Info(emails=['[email protected]'], component='Test>Telemetry')
class DummyBenchmarkTwo(_DummyBenchmark):
"""A noisy benchmark with mean=50 & std=20."""
def CreatePageTest(self, options):
return _DummyTest(50, 20)
@classmethod
def Name(cls):
return 'dummy_benchmark.noisy_benchmark_1'
| bsd-3-clause | 7,245,285,478,421,240,000 | 27.448276 | 77 | 0.734545 | false | 3.666667 | true | false | false |
guo-xuan/Sipros-Ensemble | Scripts/HierarchicalClustering.py | 1 | 1992 |
#!/usr/bin/python
import sys, getopt, warnings, os, re
def mergeClosestClusters (CandidateCluster, distanceThreshold) :
bReVal = True
firstBestCulsterId = -1
secondBestCulsterId = -1
#print CandidateCluster
for i in range(len(CandidateCluster) - 1) :
averageFirst = sum(CandidateCluster[i])/float(len(CandidateCluster[i]))
averageSecond = sum(CandidateCluster[i+1])/float(len(CandidateCluster[i+1]))
if (averageFirst > averageSecond) :
print "wrong rank!"
sys.exit(0)
currentDistance = averageSecond - averageFirst
if ( currentDistance <= distanceThreshold) :
if ((firstBestCulsterId == -1) or (secondBestCulsterId == -1)): # first pair of good clusters
minDistance = currentDistance
firstBestCulsterId = i
secondBestCulsterId = i+1
elif ( currentDistance < minDistance) : # two bettter clusters
minDistance = currentDistance
firstBestCulsterId = i
secondBestCulsterId = i+1
# print minDistance, currentDistance
if ((firstBestCulsterId != -1) and (secondBestCulsterId != -1)) :
#merge two clusters
mergedCluster = CandidateCluster [firstBestCulsterId] + CandidateCluster [secondBestCulsterId]
del CandidateCluster[firstBestCulsterId]
del CandidateCluster[firstBestCulsterId]
CandidateCluster.insert(firstBestCulsterId, mergedCluster)
else :
bReVal = False
return bReVal
def hierarchicalClustering (ldCandidatePct, distanceThreshold) :
ldCandidatePct.sort()
CandidateCluster = []
if (len(ldCandidatePct) == 1) :
CandidateCluster.append([ldCandidatePct[0]])
elif (len(ldCandidatePct) > 1) :
# each cluster has one candidate
for i in range(len(ldCandidatePct)) :
CandidateCluster.append([ldCandidatePct[i]])
#cluters merge
bMerge = mergeClosestClusters(CandidateCluster, distanceThreshold)
while (bMerge) :
# print CandidateCluster
bMerge = mergeClosestClusters(CandidateCluster, distanceThreshold)
return CandidateCluster
| gpl-3.0 | 6,426,118,372,494,261,000 | 33.344828 | 96 | 0.729418 | false | 3.331104 | false | false | false |
onema/evee | evee/generic_event.py | 1 | 1477 | #
# This file is part of the onema.io evee Package.
# For the full copyright and license information,
# please view the LICENSE file that was distributed
# with this source code.
#
# @author Juan Manuel Torres <[email protected]>
#
from collections import MutableMapping
from evee.event import Event
class GenericEvent(Event, MutableMapping):
def __init__(self, subject=None, arguments: dict = None):
super().__init__()
if arguments:
self._arguments = arguments
else:
self._arguments = {}
self.__subject = subject
def get_subject(self):
return self.__subject
def get_argument(self, key):
try:
return self[key]
except KeyError:
raise KeyError('Argument "{}" not found.'.format(key))
def set_argument(self, key, value):
self[key] = value
def get_arguments(self):
return dict(self._arguments)
def set_arguments(self, args: dict = None):
if args:
self._arguments = args
return self
def has_argument(self, key):
return key in self
def __delitem__(self, key):
del(self._arguments[key])
def __setitem__(self, key, value):
self._arguments[key] = value
def __iter__(self):
return iter(self._arguments)
def __getitem__(self, key):
return self._arguments[key]
def __len__(self):
return len(self._arguments)
| mit | -4,976,885,365,516,149,000 | 24.465517 | 66 | 0.590386 | false | 4.22 | false | false | false |
LeonardCohen/coding | py/ExtractTxt.py | 1 | 1279 | # Python 3.4 on Mac OS X
import glob
import re
import csv
from pandas import DataFrame,Series
class Jd:
def __init__(self):
self.ls = []
def file_list(self, path):
self.file_list = glob.glob(path)
def contents(self, f_name):
with open(f_name) as f:
# readlines don't work as regex need string instead of list
self.f_c = f.read()
def parse(self, tag):
if tag == 'id':
pattern = 'Auto\sreq\sID.*\n'
elif tag == 'title':
pattern = 'Job\sTitle.*\n'
elif tag == 'status':
pattern = 'Job\sStatus.*\n'
result = ''.join(re.findall(pattern, self.f_c))
_, value = re.split('\t', result)
self.ls.append(value)
def export_csv(self, path, rows):
with open(path,'a') as f:
writer = csv.writer(f)
writer.writerow(rows)
self.ls = []
if __name__ == '__main__':
tags = ['id', 'title','status']
path = '/Users/Sail/Desktop/*.txt'
jd = Jd()
jd.file_list(path)
jd.export_csv('jd_modified.csv', tags)
for f in jd.file_list:
jd.contents(f)
for tag in tags:
jd.parse(tag)
jd.export_csv('jd_modified.csv', jd.ls)
print('Bingo')
| gpl-2.0 | 7,452,650,952,246,396,000 | 23.132075 | 71 | 0.521501 | false | 3.279487 | false | false | false |
collective/collective.subsitebehaviors | src/collective/subsitebehaviors/browser/cssviewlet.py | 1 | 2489 | import re
from plone import api
from plone.app.layout.viewlets.common import ViewletBase
from plone.app.layout.navigation.root import getNavigationRoot
#from ftw.subsite.interfaces import ISubsite
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
from ..behaviors import ISubSite
from ..utils import all_dexterity_fieldnames
decamel_regxp = re.compile('[A-Z][^A-Z]+')
class CSSViewlet(ViewletBase):
template = ViewPageTemplateFile('cssviewlet.pt')
def __init__(self, context, request, view, manager=None):
super(CSSViewlet, self).__init__(context, request, view, manager)
self.subsite = None
def render(self):
nav_root = api.portal.get_navigation_root(context=self.context)
if ISubSite.providedBy(nav_root):
self.subsite = nav_root
return self.template()
else:
self.subsite = None
return ''
def parse_fieldname(self, name):
"parse css_tag[tagname]_id[id]_class[classname]_attr[attrname] format"
parsethese = ("tag", "id", "class", "cssattr")
rawspecs = name.split('_')
parsed_spec = [None, None, None, None]
for rawspec in rawspecs:
for idx, parsed in enumerate(parsethese):
value = rawspec[len(parsed):] if parsed in rawspec else None
if value:
parsed_spec[idx] = value
return parsed_spec
def get_css(self):
"generate css from specially formatted fields"
if not self.subsite:
return ''
css_fields = [n for n in all_dexterity_fieldnames(self.subsite) if n.startswith("css_")]
styles = []
for css_fn in css_fields:
tag, id, klass, cssattr = self.parse_fieldname(css_fn[4:]) # strip prefix
selector = tag
selector += '#' + '-'.join(decamel_regxp.findall(id)).lower() if id else ''
selector += '.' + '-'.join(decamel_regxp.findall(klass)).lower() if klass else ''
# convert BackgroundColor to background-color
cssattr = '-'.join(decamel_regxp.findall(cssattr)).lower() if cssattr else ''
# if cssatr, field value has just css attr value, othewise full 'attr: val' css"
field_value = getattr(self.context, css_fn)
if not field_value:
field_value = ''
attr_plus_val = cssattr + ": " + field_value if cssattr else field_value
style = selector + " {\n %s;\n}" % attr_plus_val
styles.append(style)
return '\n'.join(styles)
| gpl-2.0 | -163,639,943,893,151,740 | 32.635135 | 94 | 0.634793 | false | 3.622999 | false | false | false |
yaoshengzhe/vitess | py/vtdb/vtgate_utils.py | 1 | 4537 | import logging
import re
import time
from vtdb import dbexceptions
from vtdb import vtdb_logger
from vtproto import vtrpc_pb2
INITIAL_DELAY_MS = 5
NUM_RETRIES = 3
MAX_DELAY_MS = 100
BACKOFF_MULTIPLIER = 2
def log_exception(exc, keyspace=None, tablet_type=None):
"""This method logs the exception.
Args:
exc: exception raised by calling code
keyspace: keyspace for the exception
tablet_type: tablet_type for the exception
"""
logger_object = vtdb_logger.get_logger()
shard_name = None
if isinstance(exc, dbexceptions.IntegrityError):
logger_object.integrity_error(exc)
else:
logger_object.vtclient_exception(keyspace, shard_name, tablet_type,
exc)
def exponential_backoff_retry(
retry_exceptions,
initial_delay_ms=INITIAL_DELAY_MS,
num_retries=NUM_RETRIES,
backoff_multiplier=BACKOFF_MULTIPLIER,
max_delay_ms=MAX_DELAY_MS):
"""Decorator for exponential backoff retry.
Log and raise exception if unsuccessful.
Do not retry while in a session.
Args:
retry_exceptions: tuple of exceptions to check.
initial_delay_ms: initial delay between retries in ms.
num_retries: number max number of retries.
backoff_multiplier: multiplier for each retry e.g. 2 will double the
retry delay.
max_delay_ms: upper bound on retry delay.
Returns:
A decorator method that returns wrapped method.
"""
def decorator(method):
def wrapper(self, *args, **kwargs):
attempt = 0
delay = initial_delay_ms
while True:
try:
return method(self, *args, **kwargs)
except retry_exceptions as e:
attempt += 1
if attempt > num_retries or self.session:
# In this case it is hard to discern keyspace
# and tablet_type from exception.
log_exception(e)
raise e
logging.error(
"retryable error: %s, retrying in %d ms, attempt %d of %d", e,
delay, attempt, num_retries)
time.sleep(delay/1000.0)
delay *= backoff_multiplier
delay = min(max_delay_ms, delay)
return wrapper
return decorator
class VitessError(Exception):
"""VitessError is raised by an RPC with a server-side application error.
VitessErrors have an error code and message.
"""
_errno_pattern = re.compile(r'\(errno (\d+)\)')
def __init__(self, method_name, error=None):
"""Initializes a VitessError with appropriate defaults from an error dict.
Args:
method_name: RPC method name, as a string, that was called.
error: error dict returned by an RPC call.
"""
if error is None or not isinstance(error, dict):
error = {}
self.method_name = method_name
self.code = error.get('Code', vtrpc_pb2.UNKNOWN_ERROR)
self.message = error.get('Message', 'Missing error message')
# Make self.args reflect the error components
super(VitessError, self).__init__(self.message, method_name, self.code)
def __str__(self):
"""Print the error nicely, converting the proto error enum to its name"""
return '%s returned %s with message: %s' % (self.method_name,
vtrpc_pb2.ErrorCode.Name(self.code), self.message)
def convert_to_dbexception(self, args):
"""Converts from a VitessError to the appropriate dbexceptions class.
Args:
args: argument tuple to use to create the new exception.
Returns:
An exception from dbexceptions.
"""
if self.code == vtrpc_pb2.TRANSIENT_ERROR:
return dbexceptions.TransientError(args)
if self.code == vtrpc_pb2.INTEGRITY_ERROR:
# Prune the error message to truncate after the mysql errno, since
# the error message may contain the query string with bind variables.
msg = self.message.lower()
parts = self._errno_pattern.split(msg)
pruned_msg = msg[:msg.find(parts[2])]
new_args = (pruned_msg,) + tuple(args[1:])
return dbexceptions.IntegrityError(new_args)
return dbexceptions.DatabaseError(args)
def extract_rpc_error(method_name, response):
"""Extracts any app error that's embedded in an RPC response.
Args:
method_name: RPC name, as a string.
response: response from an RPC.
Raises:
VitessError if there is an app error embedded in the reply
"""
reply = response.reply
if not reply or not isinstance(reply, dict):
return response
# Handle the case of new client => old server
err = reply.get('Err', None)
if err:
raise VitessError(method_name, err)
| bsd-3-clause | -2,225,089,024,428,708,000 | 29.863946 | 78 | 0.668724 | false | 3.831926 | false | false | false |
DAInamite/uav_position_controller | path_follower/src/path_follower_test.py | 1 | 2566 | #! /usr/bin/env python
import roslib
roslib.load_manifest('path_follower')
import rospy
# Brings in the SimpleActionClient
import actionlib
# Brings in the messages used by the fibonacci action, including the
# goal message and the result message.
import path_follower.msg
import math
from tf.transformations import quaternion_from_euler
from geometry_msgs.msg import Quaternion
from geometry_msgs.msg import PoseStamped
# Convert from given degree of yaw rotation to geometry_msgs.msg.Quaternion
def quaternion_from_yaw_degree(yaw_degree):
q = quaternion_from_euler(0, 0, math.radians(yaw_degree))
return Quaternion(*q)
# Define a fixed example path
def fixed_path():
p00 = PoseStamped()
p00.pose.position.x = 0.0
p00.pose.orientation = quaternion_from_yaw_degree(0)
p0 = PoseStamped()
p0.pose.position.x = 2.0
p0.pose.orientation = quaternion_from_yaw_degree(0)
p1 = PoseStamped()
p1.pose.position.x = 2.0
p1.pose.position.y = 2.0
p1.pose.orientation = quaternion_from_yaw_degree(0)
p2 = PoseStamped()
p2.pose.position.x = 0.0
p2.pose.position.y = 2.0
p2.pose.orientation = quaternion_from_yaw_degree(0)
p3 = PoseStamped()
p3.pose.position.x = 0.0
p3.pose.position.y = 0.0
p3.pose.orientation = quaternion_from_yaw_degree(0)
return [p00,p0, p1, p2, p3]
def test():
# Creates the SimpleActionClient, passing the type of the action to the constructor.
client = actionlib.SimpleActionClient('/follow_path', path_follower.msg.FollowPathAction)
# Waits until the action server has started up and started
# listening for goals.
client.wait_for_server()
# Creates a goal to send to the action server.
goal = path_follower.msg.FollowPathGoal()
path = goal.path
path.header.stamp = rospy.Time.now()
path.header.frame_id = 'odom'
path.poses = fixed_path()
# Sends the goal to the action server.
client.send_goal(goal)
# Waits for the server to finish performing the action.
client.wait_for_result()
print client.get_state(), client.get_goal_status_text()
# Prints out the result of executing the action
return client.get_result() # final pose
if __name__ == '__main__':
try:
# Initializes a rospy node so that the SimpleActionClient can
# publish and subscribe over ROS.
rospy.init_node('path_follower_test')
result = test()
print "Result:", result
except rospy.ROSInterruptException:
print "program interrupted before completion"
| gpl-3.0 | -6,383,331,141,924,541,000 | 27.197802 | 93 | 0.692907 | false | 3.28133 | false | false | false |
cgranade/python-quaec | src/qecc/circuit.py | 1 | 23198 | #!/usr/bin/python
# -*- coding: utf-8 -*-
##
# circuit.py: Modeling for stabilizer circuits.
##
# © 2012 Christopher E. Granade ([email protected]) and
# Ben Criger ([email protected]).
# This file is a part of the QuaEC project.
# Licensed under the AGPL version 3.
##
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##
## IMPORTS ##
from sys import version_info
if version_info[0] == 3:
PY3 = True
from importlib import reload
elif version_info[0] == 2:
PY3 = False
else:
raise EnvironmentError("sys.version_info refers to a version of "
"Python neither 2 nor 3. This is not permitted. "
"sys.version_info = {}".format(version_info))
from copy import copy
from operator import add, mul
import itertools as it
from functools import reduce
if PY3:
from . import PauliClass as pc
from . import CliffordClass as cc
from . import utils as u
else:
import PauliClass as pc
import CliffordClass as cc
import utils as u
## ALL ##
__all__ = [
'Location', 'Circuit',
'ensure_loc', 'propagate_fault', 'possible_output_faults', 'possible_faults'
]
## INTERNAL FUNCTIONS ##
def qubits_str(qubits, qubit_names=None):
if qubit_names is None:
return ' '.join('q{}'.format(idx + 1) for idx in qubits)
else:
return ' '.join(qubit_names[idx] for idx in qubits)
## CLASSES ##
class Location(object):
"""
Represents a gate, wait, measurement or preparation location in a
circuit.
Note that currently, only gate locations are implemented.
:param kind: The kind of location to be created. Each kind is an
abbreviation drawn from ``Location.KIND_NAMES``, or is the index in
``Location.KIND_NAMES`` corresponding to the desired location kind.
:type kind: int or str
:param qubits: Indicies of the qubits on which this location acts.
:type qubits: tuple of ints.
"""
## PRIVATE CLASS CONSTANTS ##
_CLIFFORD_GATE_KINDS = [
'I', 'X', 'Y', 'Z', 'H', 'R_pi4', 'CNOT', 'CZ', 'SWAP'
]
_CLIFFORD_GATE_FUNCS = {
'I': lambda nq, idx: cc.eye_c(nq),
'X': lambda nq, idx: pc.elem_gen(nq, idx, 'X').as_clifford(),
'Y': lambda nq, idx: pc.elem_gen(nq, idx, 'Y').as_clifford(),
'Z': lambda nq, idx: pc.elem_gen(nq, idx, 'Z').as_clifford(),
'H': cc.hadamard,
'R_pi4': cc.phase,
'CNOT': cc.cnot,
'CZ': cc.cz,
'SWAP': cc.swap
}
_QCVIEWER_NAMES = {
'I': 'I', # This one is implemented by a gate definition
# included by Circuit.as_qcviewer().
'X': 'X', 'Y': 'Y', 'Z': 'Z',
'H': 'H',
'R_pi4': 'P',
'CNOT': 'tof',
'CZ': 'Z',
'SWAP': 'swap'
}
## PUBLIC CLASS CONSTANTS ##
#: Names of the kinds of locations used by QuaEC.
KIND_NAMES = sum([
_CLIFFORD_GATE_KINDS
], [])
## INITIALIZER ##
def __init__(self, kind, *qubits):
if isinstance(kind, int):
self._kind = kind
elif isinstance(kind, str):
self._kind = self.KIND_NAMES.index(kind)
else:
raise TypeError("Location kind must be an int or str.")
#if not all(isinstance(q, int) for q in qubits):
# raise TypeError('Qubit indices must be integers. Got {} instead, which is of type {}.'.format(
# *(iter((q, type(q)) for q in qubits if not isinstance(q, int)).next())
# ))
try:
self._qubits = tuple(map(int, qubits))
except TypeError as e:
raise TypeError('Qubit integers must be int-like.')
self._is_clifford = bool(self.kind in self._CLIFFORD_GATE_KINDS)
## REPRESENTATION METHODS ##
def __str__(self):
return " {:<4} {}".format(self.kind, ' '.join(map(str, self.qubits)))
def __repr__(self):
return "<{} Location on qubits {}>".format(self.kind, self.qubits)
def __hash__(self):
return hash((self._kind,) + self.qubits)
## IMPORT METHODS ##
@staticmethod
def from_quasm(source):
"""
Returns a :class:`qecc.Location` initialized from a QuASM-formatted line.
:type str source: A line of QuASM code specifying a location.
:rtype: :class:`qecc.Location`
:returns: The location represented by the given QuASM source.
"""
parts = source.split()
return Location(parts[0], *list(map(int, parts[1:])))
## PROPERTIES ##
@property
def kind(self):
"""
Returns a string defining which kind of location this instance
represents. Guaranteed to be a string that is an element of
``Location.KIND_NAMES``.
"""
return self.KIND_NAMES[self._kind]
@property
def qubits(self):
"""
Returns a tuple of ints describing which qubits this location acts upon.
"""
return self._qubits
@property
def nq(self):
"""
Returns the number of qubits in the smallest circuit that can contain
this location without relabeling qubits. For a :class:`qecc.Location`
``loc``, this property is defined as ``1 + max(loc.nq)``.
"""
return 1 + max(self.qubits)
@property
def is_clifford(self):
"""
Returns ``True`` if and only if this location represents a gate drawn
from the Clifford group.
"""
return self._is_clifford
@property
def wt(self):
"""
Returns the number of qubits on which this location acts.
"""
return len(self.qubits)
## SIMULATION METHODS ##
def as_clifford(self, nq=None):
"""
If this location represents a Clifford gate, returns the action of that
gate. Otherwise, a :obj:`RuntimeError` is raised.
:param int nq: Specifies how many qubits to represent this location as
acting upon. If not specified, defaults to the value of the ``nq``
property.
:rtype: :class:`qecc.Clifford`
"""
if not self.is_clifford:
raise RuntimeError("Location must be a Clifford gate.")
else:
if nq is None:
nq = self.nq
elif nq < self.nq:
raise ValueError('nq must be greater than or equal to the nq property.')
return self._CLIFFORD_GATE_FUNCS[self.kind](nq, *self.qubits)
## EXPORT METHODS ##
def as_qcviewer(self, qubit_names=None):
"""
Returns a representation of this location in a format suitable for
inclusion in a QCViewer file.
:param qubit_names: If specified, the given aliases will be used for the
qubits involved in this location when exporting to QCViewer.
Defaults to "q1", "q2", etc.
:rtype: str
Note that the identity (or "wait") location requires the following to be
added to QCViewer's ``gateLib``::
NAME wait
DRAWNAME "1"
SYMBOL I
1 , 0
0 , 1
"""
# FIXME: link to QCViewer in the docstring here.
return ' {gatename} {gatespec}\n'.format(
gatename=self._QCVIEWER_NAMES[self.kind],
gatespec=qubits_str(self.qubits, qubit_names),
)
## OTHER METHODS ##
def relabel_qubits(self, relabel_dict):
"""
Returns a new location related to this one by a relabeling of the
qubits. The relabelings are to be indicated by a dictionary that
specifies what each qubit index is to be mapped to.
>>> import qecc as q
>>> loc = q.Location('CNOT', 0, 1)
>>> print loc
CNOT 0 1
>>> print loc.relabel_qubits({1: 2})
CNOT 0 2
:param dict relabel_dict: If `i` is a key of `relabel_dict`, then qubit
`i` will be replaced by `relabel_dict[i]` in the returned location.
:rtype: :class:`qecc.Location`
:returns: A new location with the qubits relabeled as
specified by `relabel_dict`.
"""
return Location(self.kind, *tuple(relabel_dict[i] if i in relabel_dict else i for i in self.qubits))
def ensure_loc(loc):
if isinstance(loc, tuple):
loc = Location(*loc)
elif not isinstance(loc, Location):
raise TypeError('Locations must be specified either as Location instances or as tuples.')
return loc
class Circuit(list):
def __init__(self, *locs):
# Circuit(('CNOT', 0, 2), ('H', 1)) works, but
# Circuit('CNOT', 0, 2) doesn't work.
list.__init__(self, list(map(ensure_loc, locs)))
## SEQUENCE PROTOCOL ##
def append(self, newval):
super(Circuit, self).append(ensure_loc(newval))
append.__doc__ = list.append.__doc__
def insert(self, at, newval):
super(Circuit, self).insert(at, ensure_loc(newval))
insert.__doc__ = list.insert.__doc__
def __getitem__(self, *args):
item = super(Circuit, self).__getitem__(*args)
if not isinstance(item, list):
return item
else:
return Circuit(*item)
def __getslice__(self, *args):
return Circuit(*super(Circuit, self).__getslice__(*args))
def __add__(self, other):
if not isinstance(other, Circuit):
other = Circuit(*other)
return Circuit(*super(Circuit, self).__add__(other))
def __iadd__(self, other):
if not isinstance(other, Circuit):
other = Circuit(*other)
return Circuit(*super(Circuit, self).__iadd__(other))
## PROPERTIES ##
@property
def nq(self):
"""
Returns the number of qubits on which this circuit acts.
"""
return max(loc.nq for loc in self) if self else 0
@property
def size(self):
"""
Returns the number of locations in this circuit. Note that this property
is synonymous with :obj:`len`, in that ``len(circ) == circ.size`` for
all :class:`qecc.Circuit` instances.
"""
return len(self)
@property
def depth(self):
"""
Returns the minimum number of timesteps required to implement exactly
this circuit in parallel.
"""
return len(list(self.group_by_time()))
## IMPORT CLASS METHODS ##
@staticmethod
def from_quasm(source):
"""Returns a :class:`qecc.Circuit` object from a QuASM-formatted
file, producing one location per line."""
if not isinstance(source, str):
# Assume source is a file-like, so that iter(source) returns lines
# in the file.
it = iter(source)
else:
it = iter(source.split('\n'))
return Circuit(*list(map(Location.from_quasm, it)))
## PRETTY PRINTING ##
def __repr__(self):
return "Circuit({})".format(", ".join(map(repr, self)))
def __str__(self):
return "\n".join(map(str, self))
def as_quasm(self):
"""
Returns a representation of the circuit in an assmembler-like format.
In this format, each location is represented by a single line where
the first field indicates the kind of location and the remaining fields
indicate the qubits upon which the location acts.
>>> import qecc as q
>>> circ = q.Circuit(('CNOT', 0, 2), ('H', 2), ('SWAP', 1, 2), ('I', 0))
>>> print circ.as_quasm()
CNOT 0 2
H 2
SWAP 1 2
I 0
"""
return str(self)
def as_qcviewer(self, inputs=(0,), outputs=(0,), qubit_names=None):
"""
Returns a string representing this circuit in the format recognized by
`QCViewer`_.
:param tuple inputs: Specifies which qubits should be marked as inputs
in the exported QCViewer circuit.
:param tuple outputs: Specifies which qubits should be marked as outputs
in the exported QCViewer circuit.
:param qubit_names: Names to be used for each qubit when exporting to
QCViewer.
.. _QCViewer: http://qcirc.iqc.uwaterloo.ca/index.php?n=Projects.QCViewer
"""
header = '.v ' + qubits_str(list(range(self.nq)), qubit_names) + '\n'
header += '.i ' + qubits_str(inputs, qubit_names) + '\n'
header += '.o ' + qubits_str(outputs, qubit_names) + '\n'
circ_text = 'BEGIN\n'
for loc in self:
circ_text += loc.as_qcviewer(qubit_names)
circ_text += 'END\n'
return header + circ_text
def as_qcircuit(self, C=None, R=None):
r"""
Typesets this circuit using the `Qcircuit`_ package for
:math:`\text{\LaTeX}`.
:param float C: Width (in ems) of each column.
:param float R: Height (in ems) of each column.
:rtype: :obj:`str`
:returns: A string containing :math:`\text{\LaTeX}` source code for use
with `Qcircuit`_.
.. _Qcircuit: http://www.cquic.org/Qcircuit/
"""
trans_cells = []
for timestep in self.group_by_time():
col = [r'\qw'] * self.nq # If nothing else, place a \qw.
hidden_qubits = set()
for loc in timestep:
if any(qubit in hidden_qubits for qubit in range(min(loc.qubits), max(loc.qubits)+1)):
# A qubit is hidden, so append and reset.
trans_cells.append(col)
col = [r'\qw'] * self.nq # If nothing else, place a \qw.
hidden_qubits = set()
if loc.wt == 1:
col[loc.qubits[0]] = r"\gate{{{0}}}".format(loc.kind if loc.kind != "I" else r"\id")
elif loc.kind == 'CNOT':
col[loc.qubits[0]] = r'\ctrl{{{0}}}'.format(loc.qubits[1] - loc.qubits[0])
col[loc.qubits[1]] = r'\targ'
else:
raise NotImplementedError("Location kind {0.kind} not supported by this method.".format(loc))
hidden_qubits.update(list(range(min(loc.qubits), max(loc.qubits)+1)))
trans_cells.append(col)
cells = u.transpose([[''] * self.nq] + trans_cells + [[r'\qw'] * self.nq])
return r"""
\Qcircuit {C} {R} {{
{0}
}}
""".format(u.latex_array_contents(cells),
C="@C{}em".format(C) if C is not None else "",
R="@R{}em".format(R) if R is not None else ""
)
## CIRCUIT SIMULATION METHODS ##
def as_clifford(self):
"""
If this circuit is composed entirely of Clifford operators, converts it
to a :class:`qecc.Clifford` instance representing the action of the
entire circuit. If the circuit is not entirely Clifford gates, this method
raises a :obj:`RuntimeError`.
"""
if not all(loc.is_clifford for loc in self):
raise RuntimeError('All locations must be Clifford gates in order to represent a circuit as a Clifford operator.')
nq = self.nq
return reduce(mul, (loc.as_clifford(nq) for loc in reversed(self)), cc.eye_c(nq))
## CIRCUIT SIMPLIFICATION METHODS ##
def cancel_selfinv_gates(self, start_at=0):
"""
Transforms the circuit, removing any self-inverse gates from the circuit
if possible. Note that not all self-inverse gates are currently
supported by this method.
:param int start_at: Specifies which location to consider first. Any
locations before ``start_at`` are not considered for cancelation by
this method.
"""
SELFINV_GATES = ['H', 'X', 'Y', 'Z', 'CNOT']
if start_at == len(self):
return self
loc = self[start_at]
if loc.kind in SELFINV_GATES:
if len(loc.qubits) == 1:
# TODO: add two-qubit gates.
q = loc.qubits[0]
for idx_future in range(start_at + 1, len(self)):
if q in self[idx_future].qubits:
# Check that the kind matches.
if self[idx_future].kind == loc.kind:
self.pop(idx_future)
self.pop(start_at)
return self.cancel_selfinv_gates(start_at=start_at)
else:
# Go on to the next gate, since there's another gate
# between here.
return self.cancel_selfinv_gates(start_at=start_at+1)
return self.cancel_selfinv_gates(start_at=start_at+1)
def replace_cz_by_cnot(self):
"""
Changes all controlled-:math:`Z` gates in this circuit to
controlled-NOT gates, adding Hadamard locations as required.
"""
# FIXME: this is inefficient as hell right now.
try:
idx = next((idx for idx in range(len(self)) if self[idx].kind == 'CZ'))
q = self[idx].qubits
self[idx] = Location('CNOT', *q)
self.insert(idx + 1, ('H', q[1]))
self.insert(idx, ('H', q[1]))
return self.replace_cz_by_cnot()
except StopIteration:
return self
def group_by_time(self, pad_with_waits=False):
"""
Returns an iterator onto subcircuits of this circuit, each of depth 1.
:param bool pad_with_waits: If ``True``, each subcircuit will have
wait locations added such that every qubit is acted upon in every
subcircuit.
:yields: each depth-1 subcircuit, corresponding to time steps of the
circuit
"""
nq = self.nq
found = [False] * nq
group_acc = Circuit()
for loc in self:
if any(found[qubit] for qubit in loc.qubits):
if pad_with_waits:
group_acc += [('I', qubit) for qubit in range(nq) if not found[qubit]]
yield group_acc
found = [False] * nq
group_acc = Circuit()
for qubit in loc.qubits:
found[qubit] = True
group_acc.append(loc)
if pad_with_waits:
group_acc += [('I', qubit) for qubit in range(nq) if not found[qubit]]
yield group_acc
def pad_with_waits(self):
"""
Returns a copy of the :class:`qecc.Circuit` ``self``, which contains
explicit wait locations.
"""
return sum(self.group_by_time(pad_with_waits=True), Circuit())
## OTHER METHODS ##
def relabel_qubits(self, relabel_dict):
"""
Returns a new circuit related to this one by a relabeling of the
qubits. The relabelings are to be indicated by a dictionary that
specifies what each qubit index is to be mapped to.
>>> import qecc as q
>>> loc = q.Location('CNOT', 0, 1)
>>> print loc
CNOT 0 1
>>> print loc.relabel_qubits({1: 2})
CNOT 0 2
:param dict relabel_dict: If `i` is a key of `relabel_dict`, then qubit
`i` will be replaced by `relabel_dict[i]` in the returned circuit.
:rtype: :class:`qecc.Circuit`
:returns: A new circuit with the qubits relabeled as
specified by `relabel_dict`.
"""
return Circuit(*[
loc.relabel_qubits(relabel_dict) for loc in self
])
## FUNCTIONS ##
def propagate_fault(circuitlist, fault):
"""
Given a list of circuits representing a list of timesteps (see
:meth:`qecc.Circuit.group_by_time`) and a Pauli fault, propagates that
fault through the remainder of the time-sliced circuit.
:param list circuitlist: A list of :class:`qecc.Circuit` instances
representing the timesteps of a larger circuit.
:param qecc.Pauli fault: A Pauli fault to occur immediately before timestep
``timestep``.
:param int timestep: The timestep immediately following when the fault
to be propagated occured.
:rtype: :class:`qecc.Pauli`
:returns: The effective fault after propagating ``fault`` through the
remainder of ``circuitlist``.
"""
fault_out = fault
for step in circuitlist:
fault_out = step.as_clifford().conjugate_pauli(fault_out)
return fault_out
def possible_faults(circuit):
"""
Takes a sub-circuit which has been padded with waits, and returns an
iterator onto Paulis which may occur as faults after this sub-circuit.
:param qecc.Circuit circuit: Subcircuit to in which faults are to be
considered.
"""
return it.chain.from_iterable(
pc.restricted_pauli_group(loc.qubits, circuit.nq)
for loc in circuit
)
def possible_output_faults(circuitlist):
"""
Gives an iterator onto all possible effective faults due to 1-fault paths
occuring within ``circuitlist``, assuming it has been padded with waits.
:param list circuitlist: A list of :class:`qecc.Circuit` instances
representing timesteps in a larger circuit. See
:meth:`qecc.Circuit.group_by_time`.
:yields: :class:`qecc.Pauli` instances representing possible effective
faults due to 1-fault paths within the circuit represented by
``circuitlist``.
"""
outputs = iter([])
for timestep_idx in range(len(circuitlist)):
outputs = it.imap(
lambda fault: propagate_fault(
circuitlist[timestep_idx+1:],fault),
possible_faults(
circuitlist[timestep_idx]
))
for output in outputs:
yield output
| agpl-3.0 | 5,946,945,021,224,020,000 | 34.253799 | 126 | 0.555158 | false | 3.919074 | false | false | false |
Bachmann1234/diff-cover | setup.py | 1 | 1920 | #!/usr/bin/env python
import os
from setuptools import setup
from diff_cover import VERSION, DESCRIPTION
REQUIREMENTS = [line.strip() for line in
open(os.path.join("requirements", "requirements.txt")).readlines()]
setup(
name='diff_cover',
version=VERSION,
author='Matt Bachmann',
url='https://github.com/Bachmann1234/diff-cover',
test_suite='nose.collector',
description=DESCRIPTION,
license='Apache 2.0',
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
classifiers=['Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Quality Assurance'],
packages=['diff_cover', 'diff_cover/violationsreporters'],
package_data={'diff_cover': ['templates/*.txt', 'templates/*.html', 'templates/*.css']},
install_requires=REQUIREMENTS,
entry_points={
'console_scripts': ['diff-cover = diff_cover.diff_cover_tool:main',
'diff-quality = diff_cover.diff_quality_tool:main']
}
)
| apache-2.0 | 3,118,059,828,008,584,700 | 44.714286 | 92 | 0.56875 | false | 4.528302 | false | false | false |
tryolabs/luminoth | luminoth/models/ssd/feature_extractor.py | 1 | 5954 | import sonnet as snt
import tensorflow as tf
from sonnet.python.modules.conv import Conv2D
from tensorflow.contrib.layers.python.layers import utils
from luminoth.models.base import BaseNetwork
VALID_SSD_ARCHITECTURES = set([
'truncated_vgg_16',
])
class SSDFeatureExtractor(BaseNetwork):
def __init__(self, config, parent_name=None, name='ssd_feature_extractor',
**kwargs):
super(SSDFeatureExtractor, self).__init__(config, name=name, **kwargs)
if self._architecture not in VALID_SSD_ARCHITECTURES:
raise ValueError('Invalid architecture "{}"'.format(
self._architecture
))
self.parent_name = parent_name
self.activation_fn = tf.nn.relu
def _init_vgg16_extra_layers(self):
self.conv6 = Conv2D(1024, [3, 3], rate=6, name='conv6')
self.conv7 = Conv2D(1024, [1, 1], name='conv7')
self.conv8_1 = Conv2D(256, [1, 1], name='conv8_1')
self.conv8_2 = Conv2D(512, [3, 3], stride=2, name='conv8_2')
self.conv9_1 = Conv2D(128, [1, 1], name='conv9_1')
self.conv9_2 = Conv2D(256, [3, 3], stride=2, name='conv9_2')
self.conv10_1 = Conv2D(128, [1, 1], name='conv10_1')
self.conv10_2 = Conv2D(256, [3, 3], padding='VALID', name='conv10_2')
self.conv11_1 = Conv2D(128, [1, 1], name='conv11_1')
self.conv11_2 = Conv2D(256, [3, 3], padding='VALID', name='conv11_2')
def _build(self, inputs, is_training=True):
"""
Args:
inputs: A Tensor of shape `(batch_size, height, width, channels)`.
Returns:
A dict of feature maps to be consumed by an SSD network
"""
# TODO: Is there a better way to manage scoping in these cases?
scope = self.module_name
if self.parent_name:
scope = self.parent_name + '/' + scope
base_net_endpoints = super(SSDFeatureExtractor, self)._build(
inputs, is_training=is_training)['end_points']
if self.truncated_vgg_16_type:
# As it is pointed out in SSD and ParseNet papers, `conv4_3` has a
# different features scale compared to other layers, to adjust it
# we need to add a spatial normalization before adding the
# predictors.
vgg_conv4_3 = base_net_endpoints[scope + '/vgg_16/conv4/conv4_3']
tf.summary.histogram('conv4_3_hist', vgg_conv4_3)
with tf.variable_scope('conv_4_3_norm'):
# Normalize through channels dimension (dim=3)
vgg_conv4_3_norm = tf.nn.l2_normalize(
vgg_conv4_3, 3, epsilon=1e-12
)
# Scale.
scale_initializer = tf.ones(
[1, 1, 1, vgg_conv4_3.shape[3]]
) * 20.0 # They initialize to 20.0 in paper
scale = tf.get_variable(
'gamma',
dtype=vgg_conv4_3.dtype.base_dtype,
initializer=scale_initializer
)
vgg_conv4_3_norm = tf.multiply(vgg_conv4_3_norm, scale)
tf.summary.histogram('conv4_3_normalized_hist', vgg_conv4_3)
tf.add_to_collection('FEATURE_MAPS', vgg_conv4_3_norm)
# The original SSD paper uses a modified version of the vgg16
# network, which we'll modify here
vgg_network_truncation_endpoint = base_net_endpoints[
scope + '/vgg_16/conv5/conv5_3']
tf.summary.histogram(
'conv5_3_hist',
vgg_network_truncation_endpoint
)
# Extra layers for vgg16 as detailed in paper
with tf.variable_scope('extra_feature_layers'):
self._init_vgg16_extra_layers()
net = tf.nn.max_pool(
vgg_network_truncation_endpoint, [1, 3, 3, 1],
padding='SAME', strides=[1, 1, 1, 1], name='pool5'
)
net = self.conv6(net)
net = self.activation_fn(net)
net = self.conv7(net)
net = self.activation_fn(net)
tf.summary.histogram('conv7_hist', net)
tf.add_to_collection('FEATURE_MAPS', net)
net = self.conv8_1(net)
net = self.activation_fn(net)
net = self.conv8_2(net)
net = self.activation_fn(net)
tf.summary.histogram('conv8_hist', net)
tf.add_to_collection('FEATURE_MAPS', net)
net = self.conv9_1(net)
net = self.activation_fn(net)
net = self.conv9_2(net)
net = self.activation_fn(net)
tf.summary.histogram('conv9_hist', net)
tf.add_to_collection('FEATURE_MAPS', net)
net = self.conv10_1(net)
net = self.activation_fn(net)
net = self.conv10_2(net)
net = self.activation_fn(net)
tf.summary.histogram('conv10_hist', net)
tf.add_to_collection('FEATURE_MAPS', net)
net = self.conv11_1(net)
net = self.activation_fn(net)
net = self.conv11_2(net)
net = self.activation_fn(net)
tf.summary.histogram('conv11_hist', net)
tf.add_to_collection('FEATURE_MAPS', net)
# This parameter determines onto which variables we try to load the
# pretrained weights
self.pretrained_weights_scope = scope + '/vgg_16'
# It's actually an ordered dict
return utils.convert_collection_to_dict('FEATURE_MAPS')
def get_trainable_vars(self):
"""
Returns a list of the variables that are trainable.
Returns:
trainable_variables: a tuple of `tf.Variable`.
"""
return snt.get_variables_in_module(self)
| bsd-3-clause | 2,943,543,094,307,964,000 | 41.22695 | 79 | 0.545516 | false | 3.650521 | false | false | false |
CenterForOpenScience/modular-file-renderer | mfr/extensions/image/render.py | 2 | 1613 | import os
import furl
from mako.lookup import TemplateLookup
from mfr.core import extension
from mfr.extensions.image import settings
from mfr.extensions.utils import munge_url_for_localdev, escape_url_for_template
class ImageRenderer(extension.BaseRenderer):
TEMPLATE = TemplateLookup(
directories=[
os.path.join(os.path.dirname(__file__), 'templates')
]).get_template('viewer.mako')
def render(self):
self.metrics.add('needs_export', False)
if self.metadata.ext in settings.EXPORT_EXCLUSIONS:
download_url = munge_url_for_localdev(self.url)
safe_url = escape_url_for_template(download_url.geturl())
return self.TEMPLATE.render(base=self.assets_url, url=safe_url)
exported_url = furl.furl(self.export_url)
if settings.EXPORT_MAXIMUM_SIZE and settings.EXPORT_TYPE:
exported_url.args['format'] = '{}.{}'.format(settings.EXPORT_MAXIMUM_SIZE, settings.EXPORT_TYPE)
elif settings.EXPORT_TYPE:
exported_url.args['format'] = settings.EXPORT_TYPE
else:
download_url = munge_url_for_localdev(self.url)
safe_url = escape_url_for_template(download_url.geturl())
return self.TEMPLATE.render(base=self.assets_url, url=safe_url)
self.metrics.add('needs_export', True)
safe_url = escape_url_for_template(exported_url.url)
return self.TEMPLATE.render(base=self.assets_url, url=safe_url)
@property
def file_required(self):
return False
@property
def cache_result(self):
return False
| apache-2.0 | -63,360,576,098,337,810 | 34.844444 | 108 | 0.66398 | false | 3.691076 | false | false | false |
battlemidget/conjure-up | conjureup/juju.py | 1 | 18086 | """ Juju helpers
"""
import os
import sys
from concurrent import futures
from functools import partial, wraps
from subprocess import DEVNULL, PIPE, CalledProcessError, Popen, TimeoutExpired
import yaml
import macumba
from bundleplacer.charmstore_api import CharmStoreID
from conjureup import async
from conjureup.app_config import app
from conjureup.utils import juju_path, run
from macumba.v2 import JujuClient
JUJU_ASYNC_QUEUE = "juju-async-queue"
this = sys.modules[__name__]
# vars
this.IS_AUTHENTICATED = False
this.CLIENT = None
this.USER_TAG = None
# login decorator
def requires_login(f):
def _decorator(*args, **kwargs):
if not this.IS_AUTHENTICATED:
login(force=True)
return f(*args, **kwargs)
return wraps(f)(_decorator)
def read_config(name):
""" Reads a juju config file
Arguments:
name: filename without extension (ext defaults to yaml)
Returns:
dictionary of yaml object
"""
abs_path = os.path.join(juju_path(), "{}.yaml".format(name))
if not os.path.isfile(abs_path):
raise Exception("Cannot load {}".format(abs_path))
return yaml.safe_load(open(abs_path))
def get_current_controller():
""" Grabs the current default controller
"""
try:
return get_controllers()['current-controller']
except KeyError:
return None
def get_controller(id):
""" Return specific controller
Arguments:
id: controller id
"""
if 'controllers' in get_controllers() \
and id in get_controllers()['controllers']:
return get_controllers()['controllers'][id]
return None
def get_controller_in_cloud(cloud):
""" Returns a controller that is bootstrapped on the named cloud
Arguments:
cloud: cloud to check for
Returns:
available controller or None if nothing available
"""
controllers = get_controllers()['controllers'].items()
for controller_name, controller in controllers:
if cloud == controller['cloud']:
return controller_name
return None
def login(force=False):
""" Login to Juju API server
"""
if this.IS_AUTHENTICATED is True and not force:
return
if app.current_controller is None:
raise Exception("Unable to determine current controller")
if app.current_model is None:
raise Exception("Tried to login with no current model set.")
env = get_controller(app.current_controller)
account = get_account(app.current_controller)
uuid = get_model(app.current_controller, app.current_model)['model-uuid']
server = env['api-endpoints'][0]
this.USER_TAG = "user-{}".format(account['user'].split("@")[0])
url = os.path.join('wss://', server, 'model', uuid, 'api')
this.CLIENT = JujuClient(
user=this.USER_TAG,
url=url,
password=account['password'])
try:
this.CLIENT.login()
except macumba.errors.LoginError as e:
raise e
this.IS_AUTHENTICATED = True # noqa
def bootstrap(controller, cloud, series="xenial", credential=None):
""" Performs juju bootstrap
If not LXD pass along the newly defined credentials
Arguments:
controller: name of your controller
cloud: name of local or public cloud to deploy to
series: define the bootstrap series defaults to xenial
log: application logger
credential: credentials key
"""
cmd = "juju-2.0 bootstrap {} {} " \
"--config image-stream=daily ".format(
controller, cloud)
cmd += "--config enable-os-upgrade=false "
cmd += "--default-model conjure-up "
if app.argv.http_proxy:
cmd += "--config http-proxy={} ".format(app.argv.http_proxy)
if app.argv.https_proxy:
cmd += "--config https-proxy={} ".format(app.argv.https_proxy)
if app.argv.apt_http_proxy:
cmd += "--config apt-http-proxy={} ".format(app.argv.apt_http_proxy)
if app.argv.apt_https_proxy:
cmd += "--config apt-https-proxy={} ".format(app.argv.apt_https_proxy)
if app.argv.no_proxy:
cmd += "--config no-proxy={} ".format(app.argv.no_proxy)
if app.argv.bootstrap_timeout:
cmd += "--config bootstrap-timeout={} ".format(
app.argv.bootstrap_timeout)
if app.argv.bootstrap_to:
cmd += "--to {} ".format(app.argv.bootstrap_to)
cmd += "--bootstrap-series={} ".format(series)
if cloud != "localhost":
cmd += "--credential {}".format(credential)
app.log.debug("bootstrap cmd: {}".format(cmd))
try:
pathbase = os.path.join(app.config['spell-dir'],
'{}-bootstrap').format(app.current_controller)
with open(pathbase + ".out", 'w') as outf:
with open(pathbase + ".err", 'w') as errf:
p = Popen(cmd, shell=True, stdout=outf,
stderr=errf)
while p.poll() is None:
async.sleep_until(2)
return p
except CalledProcessError:
raise Exception("Unable to bootstrap.")
except async.ThreadCancelledException:
p.terminate()
try:
p.wait(timeout=2)
except TimeoutExpired:
p.kill()
p.wait()
return p
except Exception as e:
raise e
def bootstrap_async(controller, cloud, credential=None, exc_cb=None):
""" Performs a bootstrap asynchronously
"""
return async.submit(partial(bootstrap,
controller=controller,
cloud=cloud,
credential=credential), exc_cb,
queue_name=JUJU_ASYNC_QUEUE)
def model_available():
""" Checks if juju is available
Returns:
True/False if juju status was successful and a working model is found
"""
try:
run('juju-2.0 status', shell=True,
check=True, stderr=DEVNULL, stdout=DEVNULL)
except CalledProcessError:
return False
return True
def autoload_credentials():
""" Automatically checks known places for cloud credentials
"""
try:
run('juju-2.0 autoload-credentials', shell=True, check=True)
except CalledProcessError:
return False
return True
def get_credential(cloud, user):
""" Get credentials for user
Arguments:
cloud: cloud applicable to user credentials
user: user listed in the credentials
"""
creds = get_credentials()
if cloud in creds.keys():
if user in creds[cloud].keys():
return creds[cloud][user]
raise Exception(
"Unable to locate credentials for: {}".format(user))
def get_credentials(secrets=True):
""" List credentials
This will fallback to reading the credentials file directly
Arguments:
secrets: True/False whether to show secrets (ie password)
Returns:
List of credentials
"""
cmd = 'juju list-credentials --format yaml'
if secrets:
cmd += ' --show-secrets'
sh = run(cmd, shell=True, stdout=PIPE, stderr=PIPE)
if sh.returncode > 0:
try:
env = read_config('credentials')
return env['credentials']
except:
raise Exception(
"Unable to list credentials: {}".format(
sh.stderr.decode('utf8')))
env = yaml.safe_load(sh.stdout.decode('utf8'))
return env['credentials']
def get_clouds():
""" List available clouds
Returns:
Dictionary of all known clouds including newly created MAAS/Local
"""
sh = run('juju-2.0 list-clouds --format yaml',
shell=True, stdout=PIPE, stderr=PIPE)
if sh.returncode > 0:
raise Exception(
"Unable to list clouds: {}".format(sh.stderr.decode('utf8'))
)
return yaml.safe_load(sh.stdout.decode('utf8'))
def get_cloud(name):
""" Return specific cloud information
Arguments:
name: name of cloud to query, ie. aws, lxd, local:provider
Returns:
Dictionary of cloud attributes
"""
if name in get_clouds().keys():
return get_clouds()[name]
raise LookupError("Unable to locate cloud: {}".format(name))
def deploy(bundle):
""" Juju deploy bundle
Arguments:
bundle: Name of bundle to deploy, can be a path to local bundle file or
charmstore path.
"""
try:
return run('juju-2.0 deploy {}'.format(bundle), shell=True,
stdout=DEVNULL, stderr=PIPE)
except CalledProcessError as e:
raise e
def add_machines(machines, msg_cb=None, exc_cb=None):
"""Add machines to model
Arguments:
machines: list of dictionaries of machine attributes.
The key 'series' is required, and 'constraints' is the only other
supported key
"""
def _prepare_constraints(constraints):
new_constraints = {}
if not isinstance(constraints, str):
app.log.debug(
"Invalid constraints: {}, skipping".format(
constraints))
return new_constraints
list_constraints = constraints.split(' ')
for c in list_constraints:
try:
constraint, constraint_value = c.split('=')
new_constraints[constraint] = constraint_value
except ValueError as e:
app.log.debug("Skipping constraint: {} ({})".format(c, e))
return new_constraints
@requires_login
def _add_machines_async():
machine_params = [{"series": m['series'],
"constraints": _prepare_constraints(
m.get('constraints', "")),
"jobs": ["JobHostUnits"]}
for m in machines]
app.log.debug("AddMachines: {}".format(machine_params))
try:
machine_response = this.CLIENT.Client(
request="AddMachines", params={"params": machine_params})
app.log.debug("AddMachines returned {}".format(machine_response))
except Exception as e:
if exc_cb:
exc_cb(e)
return
if msg_cb:
msg_cb("Added machines: {}".format(machine_response))
return machine_response
return async.submit(_add_machines_async,
exc_cb,
queue_name=JUJU_ASYNC_QUEUE)
def deploy_service(service, default_series, msg_cb=None, exc_cb=None):
"""Juju deploy service.
If the service's charm ID doesn't have a revno, will query charm
store to get latest revno for the charm.
If the service's charm ID has a series, use that, otherwise use
the provided default series.
Arguments:
service: Service to deploy
msg_cb: message callback
exc_cb: exception handler callback
Returns a future that will be completed after the deploy has been
submitted to juju
"""
@requires_login
def _deploy_async():
if service.csid.series == "":
service.csid.series = default_series
if service.csid.rev == "":
id_no_rev = service.csid.as_str_without_rev()
mc = app.metadata_controller
futures.wait([mc.metadata_future])
info = mc.get_charm_info(id_no_rev, lambda _: None)
service.csid = CharmStoreID(info["Id"])
# Add charm to Juju
app.log.debug("Adding Charm {}".format(service.csid.as_str()))
rv = this.CLIENT.Client(request="AddCharm",
params={"url": service.csid.as_str()})
app.log.debug("AddCharm returned {}".format(rv))
# We must load any resources prior to deploying
resources = app.metadata_controller.get_resources(
service.csid.as_str_without_rev())
app.log.debug("Resources: {}".format(resources))
if resources:
params = {"tag": "application-{}".format(service.csid.name),
"url": service.csid.as_str(),
"resources": resources}
app.log.debug("AddPendingResources: {}".format(params))
resource_ids = this.CLIENT.Resources(
request="AddPendingResources",
params=params)
app.log.debug("AddPendingResources returned: {}".format(
resource_ids))
application_to_resource_map = {}
for idx, resource in enumerate(resources):
pid = resource_ids['pending-ids'][idx]
application_to_resource_map[resource['Name']] = pid
service.resources = application_to_resource_map
deploy_args = service.as_deployargs()
deploy_args['series'] = service.csid.series
app_params = {"applications": [deploy_args]}
app.log.debug("Deploying {}: {}".format(service, app_params))
deploy_message = "Deploying {}... ".format(
service.service_name)
if msg_cb:
msg_cb("{}".format(deploy_message))
rv = this.CLIENT.Application(request="Deploy",
params=app_params)
app.log.debug("Deploy returned {}".format(rv))
if msg_cb:
msg_cb("{} deployed.".format(service.service_name))
return async.submit(_deploy_async,
exc_cb,
queue_name=JUJU_ASYNC_QUEUE)
def set_relations(services, msg_cb=None, exc_cb=None):
""" Juju set relations
Arguments:
services: list of services with relations to set
msg_cb: message callback
exc_cb: exception handler callback
"""
relations = set()
for service in services:
for a, b in service.relations:
if (a, b) not in relations and (b, a) not in relations:
relations.add((a, b))
@requires_login
def do_add_all():
if msg_cb:
msg_cb("Setting application relations")
for a, b in list(relations):
params = {"Endpoints": [a, b]}
try:
app.log.debug("AddRelation: {}".format(params))
rv = this.CLIENT.Application(request="AddRelation",
params=params)
app.log.debug("AddRelation returned: {}".format(rv))
except Exception as e:
if exc_cb:
exc_cb(e)
return
if msg_cb:
msg_cb("Completed setting application relations")
return async.submit(do_add_all,
exc_cb,
queue_name=JUJU_ASYNC_QUEUE)
def get_controller_info(name=None):
""" Returns information on current controller
Arguments:
name: if set shows info controller, otherwise displays current.
"""
cmd = 'juju-2.0 show-controller --format yaml'
if name is not None:
cmd += ' {}'.format(name)
sh = run(cmd, shell=True, stdout=PIPE, stderr=PIPE)
if sh.returncode > 0:
raise Exception(
"Unable to determine controller: {}".format(
sh.stderr.decode('utf8')))
out = yaml.safe_load(sh.stdout.decode('utf8'))
try:
return next(iter(out.values()))
except:
return out
def get_controllers():
""" List available controllers
Returns:
List of known controllers
"""
sh = run('juju-2.0 list-controllers --format yaml',
shell=True, stdout=PIPE, stderr=PIPE)
if sh.returncode > 0:
raise LookupError(
"Unable to list controllers: {}".format(sh.stderr.decode('utf8')))
env = yaml.safe_load(sh.stdout.decode('utf8'))
return env
def get_account(controller):
""" List account information for controller
Arguments:
controller: controller id
Returns:
Dictionary containing list of accounts for controller and the
current account in use.
"""
return get_accounts().get(controller, {})
def get_accounts():
""" List available accounts
Returns:
List of known accounts
"""
env = os.path.join(juju_path(), 'accounts.yaml')
if not os.path.isfile(env):
raise Exception(
"Unable to find: {}".format(env))
with open(env, 'r') as c:
env = yaml.load(c)
return env['controllers']
raise Exception("Unable to find accounts")
def get_model(controller, name):
""" List information for model
Arguments:
name: model name
controller: name of controller to work in
Returns:
Dictionary of model information
"""
models = get_models(controller)['models']
for m in models:
if m['name'] == name:
return m
raise LookupError(
"Unable to find model: {}".format(name))
def add_model(name, controller):
""" Adds a model to current controller
Arguments:
controller: controller to add model in
"""
sh = run('juju-2.0 add-model {} -c {}'.format(name, controller),
shell=True, stdout=DEVNULL, stderr=PIPE)
if sh.returncode > 0:
raise Exception(
"Unable to create model: {}".format(sh.stderr.decode('utf8')))
def get_models(controller):
""" List available models
Arguments:
controller: existing controller to get models for
Returns:
List of known models
"""
sh = run('juju-2.0 list-models --format yaml -c {}'.format(controller),
shell=True, stdout=PIPE, stderr=PIPE)
if sh.returncode > 0:
raise LookupError(
"Unable to list models: {}".format(sh.stderr.decode('utf8')))
out = yaml.safe_load(sh.stdout.decode('utf8'))
return out
def get_current_model():
try:
return get_models()['current-model']
except:
return None
def version():
""" Returns version of Juju
"""
sh = run('juju-2.0 version', shell=True, stdout=PIPE, stderr=PIPE)
if sh.returncode > 0:
raise Exception(
"Unable to get Juju Version".format(sh.stderr.decode('utf8')))
out = sh.stdout.decode('utf8')
if isinstance(out, list):
return out.pop()
else:
return out
| mit | 3,829,067,942,046,718,000 | 29.244147 | 79 | 0.594493 | false | 4.130167 | true | false | false |
javazquez/vertx-web | src/test/sockjs-protocol/venv/lib/python2.7/site-packages/ws4py/streaming.py | 13 | 8217 | # -*- coding: utf-8 -*-
from ws4py.messaging import TextMessage, BinaryMessage, CloseControlMessage,\
PingControlMessage, PongControlMessage
from ws4py.framing import Frame, OPCODE_CONTINUATION, OPCODE_TEXT, \
OPCODE_BINARY, OPCODE_CLOSE, OPCODE_PING, OPCODE_PONG
from ws4py.exc import FrameTooLargeException, ProtocolException, InvalidBytesError,\
TextFrameEncodingException, UnsupportedFrameTypeException, StreamClosed
class Stream(object):
def __init__(self):
"""
Represents a websocket stream of bytes flowing in and out.
The stream doesn't know about the data provider itself and
doesn't even know about sockets. Instead the stream simply
yields for more bytes whenever it requires it. The stream owner
is responsible to provide the stream with those bytes until
a frame can be interpreted.
>>> s = Stream()
>>> s.parser.send(BYTES)
>>> s.has_messages
False
>>> s.parser.send(MORE_BYTES)
>>> s.has_messages
True
>>> s.messages.pop()
<TextMessage ... >
"""
self.message = None
"""
Parsed test or binary messages. Whenever the parser
reads more bytes from a fragment message, those bytes
are appended to the most recent message.
"""
self.pings = []
"""
Parsed ping control messages. They are instances of
messaging.PingControlMessage
"""
self.pongs = []
"""
Parsed pong control messages. They are instances of
messaging.PongControlMessage
"""
self.closing = None
"""
Parsed close control messsage. Instance of
messaging.CloseControlMessage
"""
self.errors = []
"""
Detected errors while parsing. Instances of
messaging.CloseControlMessage
"""
self.parser = self.receiver()
"""
Parser in charge to process bytes it is fed with.
"""
# Python generators must be initialized once.
self.parser.next()
def text_message(self, text):
"""
Returns a messaging.TextMessage instance
ready to be built. Convenience method so
that the caller doesn't need to import the
TextMessage class itself.
@param text: data to be carried by the message
"""
return TextMessage(text=text)
def binary_message(self, bytes):
"""
Returns a messaging.BinaryMessage instance
ready to be built. Convenience method so
that the caller doesn't need to import the
BinaryMessage class itself.
@param text: data to be carried by the message
"""
return BinaryMessage(bytes)
@property
def has_message(self):
"""
Checks if the stream has received any message
which, if fragmented, is completed.
"""
if self.message is not None:
return self.message.completed
return False
def close(self, code=1000, reason=''):
"""
Returns a close control message built from
a messaging.CloseControlMessage instance.
@param code: closing status code
@param reason: status message
@return: bytes representing a close control single framed message
"""
return CloseControlMessage(code=code, reason=reason).single()
def ping(self, data=''):
"""
Returns a ping control message built from
a messaging.PingControlMessage instance.
@param data: ping data
@return: bytes representing a ping single framed message
"""
return PingControlMessage(data).single()
def pong(self, data=''):
"""
Returns a ping control message built from
a messaging.PongControlMessage instance.
@param data: pong data
@return: bytes representing a pong single framed message
"""
return PongControlMessage(data).single()
def receiver(self):
"""
Parser that keeps trying to interpret bytes it is fed with as
incoming frames part of a message.
Control message are single frames only while data messages, like text
and binary, may be fragmented accross frames.
The way it works is by instanciating a framing.Frame object,
then running its parser generator which yields how much bytes
it requires to performs its task. The stream parser yields this value
to its caller and feeds the frame parser.
When the frame parser raises StopIteration, the stream parser
tries to make sense of the parsed frame. It dispatches the frame's bytes
to the most appropriate message type based on the frame's opcode.
Overall this makes the stream parser totally agonstic to
the data provider.
"""
running = True
while running:
frame = Frame()
while True:
try:
bytes = (yield frame.parser.next())
if bytes is None:
raise InvalidBytesError()
frame.parser.send(bytes)
except StopIteration:
bytes = frame.body or ''
if frame.masking_key and bytes:
bytes = frame.unmask(bytes)
if frame.opcode == OPCODE_TEXT:
if self.message and not self.message.completed:
# We got a text frame before we completed the previous one
raise ProtocolException()
try:
m = TextMessage(bytes.decode("utf-8", "replace"))
m.completed = (frame.fin == 1)
self.message = m
except UnicodeDecodeError:
self.errors.append(CloseControlMessage(code=1007))
break
elif frame.opcode == OPCODE_BINARY:
m = BinaryMessage(bytes)
m.completed = (frame.fin == 1)
self.message = m
elif frame.opcode == OPCODE_CONTINUATION:
m = self.message
if m is None:
raise ProtocolException()
m.completed = (frame.fin == 1)
if m.opcode == OPCODE_TEXT:
try:
m.extend(bytes.decode("utf-8", "replace"))
except UnicodeDecodeError:
self.errors.append(CloseControlMessage(code=1007))
break
else:
m.extend(bytes)
elif frame.opcode == OPCODE_CLOSE:
self.closing = CloseControlMessage(reason=bytes.decode("utf-8", "replace"))
elif frame.opcode == OPCODE_PING:
self.pings.append(PingControlMessage(bytes))
elif frame.opcode == OPCODE_PONG:
self.pongs.append(PongControlMessage(bytes))
else:
self.errors.append(CloseControlMessage(code=1003))
# When the frame's payload is empty, we must yield
# once more so that the caller is properly aligned
if not bytes:
yield 0
break
except ProtocolException:
self.errors.append(CloseControlMessage(code=1002))
except FrameTooLargeException:
self.errors.append(CloseControlMessage(code=1004))
except StreamClosed:
running = False
break
frame.parser.close()
| apache-2.0 | -558,925,618,375,780,300 | 34.726087 | 99 | 0.539978 | false | 5.311571 | false | false | false |
seize-the-dave/XlsxWriter | xlsxwriter/test/comparison/test_types05.py | 8 | 1978 | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2015, John McNamara, [email protected]
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'types05.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = ['xl/calcChain.xml',
'[Content_Types].xml',
'xl/_rels/workbook.xml.rels']
self.ignore_elements = {}
def test_write_formula_default(self):
"""Test writing formulas with strings_to_formulas on."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, '=1+1', None, 2)
worksheet.write_string(1, 0, '=1+1')
workbook.close()
self.assertExcelEqual()
def test_write_formula_implicit(self):
"""Test writing formulas with strings_to_formulas on."""
workbook = Workbook(self.got_filename, {'strings_to_formulas': True})
worksheet = workbook.add_worksheet()
worksheet.write(0, 0, '=1+1', None, 2)
worksheet.write_string(1, 0, '=1+1')
workbook.close()
self.assertExcelEqual()
def test_write_formula_explicit(self):
"""Test writing formulas with strings_to_formulas off."""
workbook = Workbook(self.got_filename, {'strings_to_formulas': False})
worksheet = workbook.add_worksheet()
worksheet.write_formula(0, 0, '=1+1', None, 2)
worksheet.write(1, 0, '=1+1')
workbook.close()
self.assertExcelEqual()
| bsd-2-clause | -7,885,417,370,544,912,000 | 27.666667 | 79 | 0.581901 | false | 3.732075 | true | false | false |
jpablio/Directrices-JPV | tools/copy_branches.py | 21 | 4362 | # -*- coding: utf-8 -*-
"""
Dependency:
`git-remote-bzr` from https://github.com/felipec/git-remote-bzr
must be in the `$PATH`.
"""
from __future__ import absolute_import, print_function
import argparse
import os
import subprocess
from contextlib import contextmanager
from pkg_resources import resource_string
import yaml
@contextmanager
def cd(path):
cwd = os.getcwd()
os.chdir(path)
yield
os.chdir(cwd)
class Migrate(object):
def __init__(self, path, push=False, mapping=None):
self.path = path
self.push = push
self.mapping = mapping
def _init_git(self, project):
# we keep the serie's name so we can handle both projects:
# lp:banking-addons/7.0
# lp:banking-addons/bank-statement-reconcile-7.0
name = project.replace('/', '-')
repo = os.path.join(self.path, name)
print('Working on', repo)
if not os.path.exists(repo):
os.mkdir(repo)
with cd(repo):
print(' git init', name)
subprocess.check_output(['git', 'init'])
return repo
def _add_remote(self, repo, name, remote):
with cd(repo):
remotes = subprocess.check_output(['git', 'remote'])
remotes = remotes.split('\n')
if name not in remotes:
print(' git remote add', name, remote)
subprocess.check_output(['git', 'remote', 'add',
name, remote])
def _add_bzr_branch(self, repo, bzr_branch, gh_branch):
with cd(repo):
self._add_remote(repo, gh_branch, "bzr::%s" % bzr_branch)
print(' git fetch', gh_branch, 'from', bzr_branch)
subprocess.check_output(['git', 'fetch', gh_branch])
def _push_to_github(self, repo, refs):
with cd(repo):
print(' git push github', refs)
if self.push:
subprocess.check_output(
['git', 'push', 'github', refs])
def _push_tags_to_github(self, repo):
with cd(repo):
print(' git push github --tags')
if self.push:
subprocess.check_output(
['git', 'push', 'github', '--tags'])
def _parse_mapping(self):
if self.mapping:
projects = open(self.mapping, 'r')
else:
projects = resource_string(__name__, 'branches.yaml')
projects = yaml.load(projects)
return projects
def copy_branches(self, only_projects=None):
projects = self._parse_mapping()
for project in projects['projects']:
gh_url = project['github']
gh_name = gh_url[15:-4]
if only_projects:
if gh_name not in only_projects:
continue
repo = self._init_git(gh_name)
self._add_remote(repo, 'github', gh_url)
for source, gh_branch in project['branches']:
self._add_bzr_branch(repo, source, gh_branch)
refs = ('refs/remotes/{branch}/master:'
'refs/heads/{branch}'.format(branch=gh_branch))
self._push_to_github(repo, refs)
self._push_tags_to_github(repo)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("path",
help="Branches directory")
parser.add_argument("--no-push", dest="push", action='store_false')
parser.add_argument("--push", dest="push", action='store_true')
parser.add_argument("--mapping",
help="File that contains the declaration of the "
"mapping.")
parser.add_argument("--projects", nargs='*',
help="Name of the Github projects that you want to "
"migrate.")
parser.set_defaults(push=False)
args = parser.parse_args()
if not os.path.exists(args.path):
exit("Path %s does not exist" % args.path)
if args.mapping and not os.path.exists(args.mapping):
exit("File %s does not exist" % args.mapping)
migration = Migrate(os.path.abspath(args.path),
push=args.push,
mapping=args.mapping)
migration.copy_branches(only_projects=args.projects)
if __name__ == '__main__':
main()
| agpl-3.0 | -3,905,283,719,602,350,000 | 32.553846 | 76 | 0.545621 | false | 3.969063 | false | false | false |
aewallin/allantools | tests/realtime/test_rt_nbs14_1k.py | 2 | 4384 | """
NBS14 test for allantools (https://github.com/aewallin/allantools)
nbs14 datasets are from http://www.ieee-uffc.org/frequency-control/learning-riley.asp
Stable32 was used to calculate the deviations we compare against.
The small dataset and deviations are from
http://www.ieee-uffc.org/frequency-control/learning-riley.asp
http://www.wriley.com/paper1ht.htm
see also:
NIST Special Publication 1065
Handbook of Frequency Stability Analysis
http://tf.nist.gov/general/pdf/2220.pdf
around page 107
"""
import math
import time
import sys
import pytest
import numpy
import allantools as allan
# 1000 point deviations from:
# http://www.ieee-uffc.org/frequency-control/learning-riley.asp Table III
# http://www.wriley.com/paper1ht.htm
# http://tf.nist.gov/general/pdf/2220.pdf page 108
nbs14_1000_devs = [ [2.922319e-01, 9.965736e-02, 3.897804e-02], # 0 ADEV 1, 10, 100
[2.922319e-01, 9.159953e-02, 3.241343e-02], # 1 OADEV
[2.922319e-01, 6.172376e-02, 2.170921e-02], # 2 MDEV
#[2.922319e-01, 9.172131e-02, 3.501795e-02], # TOTDEV, http://www.ieee-uffc.org/frequency-control/learning-riley.asp
# "Calculated using bias-corrected reflected method from endpoint-matched phase data"
[2.922319e-01, 9.134743e-02, 3.406530e-02], # 3 TOTDEV, http://tf.nist.gov/general/pdf/2220.pdf page 108
# "Calculated using doubly reflected TOTVAR method"
[2.943883e-01, 1.052754e-01, 3.910860e-02], # 4 HDEV
[1.687202e-01, 3.563623e-01, 1.253382e-00], # 5 TDEV
[2.943883e-01, 9.581083e-02, 3.237638e-02], # 6 OHDEV
[2.884664e-01, 9.296352e-02, 3.206656e-02], # 7 standard deviation, sample (not population)
[2.943883e-01, 9.614787e-02, 3.058103e-02], # 8 HTOTDEV
#[2.418528e-01, 6.499161e-02, 2.287774e-02], # 9 MTOTDEV (from published table, WITH bias correction)
[2.0664e-01, 5.5529e-02, 1.9547e-02], # MTOTDEV (from Stable32 v1.60 decade run, NO bias correction)
#[1.396338e-01, 3.752293e-01, 1.320847e-00], # 10 TTOTDEV (from published table, WITH bias correction)
[1.1930e-01, 3.2060e-01, 1.1285e+00 ], # 10 TTOTDEV (from Stable 32 v1.60 decade run, NO bias correction)
[1.0757e-01, 3.1789e-02, 5.0524e-03 ], ] # 11 THEO1 (tau= 10,100,1000, from Stable32, NO bias correction
# this generates the nbs14 1000 point frequency dataset.
# random number generator described in
# http://www.ieee-uffc.org/frequency-control/learning-riley.asp
# http://tf.nist.gov/general/pdf/2220.pdf page 107
# http://www.wriley.com/tst_suit.dat
def nbs14_1000():
"""
1000-point test dataset.
data is fractional frequency
"""
n = [0]*1000
n[0] = 1234567890
for i in range(999):
n[i+1] = (16807*n[i]) % 2147483647
# the first three numbers are given in the paper, so check them:
assert( n[1] == 395529916 and n[2] == 1209410747 and n[3] == 633705974 )
n = [x/float(2147483647) for x in n] # normalize so that n is in [0, 1]
return n
nbs14_f = nbs14_1000()
nbs14_phase = allan.frequency2phase(nbs14_f, 1.0)
def check_dev(name, tau, a, b):
print(name," tau=",tau, " ", a ," == ", b)
assert( numpy.isclose( a, b) )
def test_oadev_rt_nbs14_1k():
oadev_rt = allan.realtime.oadev_realtime(afs=[1,10,100],tau0=1.0)
for x in nbs14_phase:
oadev_rt.add_phase(x)
for n in range(3):
check_dev('OADEV', oadev_rt.taus()[n], oadev_rt.dev[n], nbs14_1000_devs[1][n])
def test_ohdev_rt_nbs14_1k():
dev_rt = allan.realtime.ohdev_realtime(afs=[1,10,100],tau0=1.0)
for x in nbs14_phase:
dev_rt.add_phase(x)
for n in range(3):
check_dev('OHDEV', dev_rt.taus()[n], dev_rt.dev[n], nbs14_1000_devs[6][n])
def test_tdev_rt_nbs14_1k():
dev_rt = allan.realtime.tdev_realtime(afs=[1,10,100],tau0=1.0)
for x in nbs14_phase:
dev_rt.add_phase(x)
for n in range(3):
check_dev('TDEV', dev_rt.taus()[n], dev_rt.dev[n], nbs14_1000_devs[5][n])
if __name__ == "__main__":
test_oadev_rt_nbs14_1k()
test_ohdev_rt_nbs14_1k()
test_tdev_rt_nbs14_1k()
| lgpl-3.0 | 8,062,652,023,265,584,000 | 43.282828 | 137 | 0.614051 | false | 2.642556 | true | false | false |
Passw/gn_GFW | build/android/pylib/local/device/local_device_linker_test_run.py | 5 | 2336 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import sys
import traceback
from pylib.base import base_test_result
from pylib.linker import test_case
from pylib.local.device import local_device_environment
from pylib.local.device import local_device_test_run
class LinkerExceptionTestResult(base_test_result.BaseTestResult):
"""Test result corresponding to a python exception in a host-custom test."""
def __init__(self, test_name, exc_info):
"""Constructs a LinkerExceptionTestResult object.
Args:
test_name: name of the test which raised an exception.
exc_info: exception info, ostensibly from sys.exc_info().
"""
exc_type, exc_value, exc_traceback = exc_info
trace_info = ''.join(traceback.format_exception(exc_type, exc_value,
exc_traceback))
log_msg = 'Exception:\n' + trace_info
super(LinkerExceptionTestResult, self).__init__(
test_name,
base_test_result.ResultType.FAIL,
log="%s %s" % (exc_type, log_msg))
class LocalDeviceLinkerTestRun(local_device_test_run.LocalDeviceTestRun):
def _CreateShards(self, tests):
return tests
def _GetTests(self):
min_device_sdk = min(d.build_version_sdk for d in self._env.devices)
return self._test_instance.GetTests(min_device_sdk)
def _GetUniqueTestName(self, test):
return test.qualified_name
def _RunTest(self, device, test):
assert isinstance(test, test_case.LinkerTestCaseBase)
try:
result = test.Run(device)
except Exception: # pylint: disable=broad-except
logging.exception('Caught exception while trying to run test: ' +
test.tagged_name)
exc_info = sys.exc_info()
result = LinkerExceptionTestResult(test.tagged_name, exc_info)
return result, None
def SetUp(self):
@local_device_environment.handle_shard_failures_with(
on_failure=self._env.BlacklistDevice)
def individual_device_set_up(dev):
dev.Install(self._test_instance.test_apk)
self._env.parallel_devices.pMap(individual_device_set_up)
def _ShouldShard(self):
return True
def TearDown(self):
pass
def TestPackage(self):
pass
| gpl-3.0 | -3,789,683,687,704,482,300 | 29.736842 | 78 | 0.689212 | false | 3.773829 | true | false | false |
Peilonrayz/instruction-follower | src/instruction_follower/hrm.py | 1 | 7959 | import re
import string
class HRMException(Exception):
pass
class TileError(HRMException):
def __init__(self, data):
super().__init__(
"Bad tile address! "
"Tile with address {} does not exist! "
"Where do you think you're going?".format(data)
)
class OutOfBoundsError(HRMException):
def __init__(self):
super().__init__(
"Overflow! "
"Each data unit is restricted to values between -999 and 999. "
"That should be enough for anybody."
)
class OperandsError(HRMException):
def __init__(self, operator):
super().__init__(
"You can't {0} with mixed operands! "
"{0}'ing between one letter and one number is invalid. "
"Only nice respectable pairs of two letters or two numbers are allowed.! ".format(
operator
)
)
class HRMType:
letters = set()
def get(self, *_):
return self.data
class Empty(HRMType):
def __init__(self, data):
self.data = data
class Number(HRMType):
letters = set(string.digits)
def __init__(self, data):
self.data = int(data)
class Word(HRMType):
letters = set(string.ascii_letters)
def __init__(self, data):
self.data = str(data)
class Pointer:
letters = set("[]")
def __init__(self, other):
self.other = other
self.letters |= other.letters
self.pointer = False
self.data = None
def __call__(self, data):
data = str(data)
self.pointer = False
if data[0] == "[":
if data[-1] != "]":
raise HRMException("Mismatched parenths")
self.pointer = True
data = data[1:-1]
self.data = self.other(data).get()
return self
def get(self, hrm):
if self.pointer:
d = hrm[self.data]
return d.data if isinstance(d, HRMBox) else d
return self.data
class HRMBox:
def __init__(self, data):
if isinstance(data, HRMBox):
self.word = data.word
self.data = data.data
return
self.word = False
data = str(data)
if set(data) <= set(string.digits + "-"):
data = int(data)
elif not len(data):
raise ValueError("HRMBox needs to be at least a size of one.")
elif set(data) <= set(string.ascii_letters):
self.word = True
data = ord(data[0].upper()) - 64
else:
raise ValueError("HRMBox can only be numbers and digits.")
self.data = data
@property
def data(self):
return self._data
@data.setter
def data(self, value):
if value >= 1000 or value <= -1000:
raise OutOfBoundsError()
self._data = value
@property
def item(self):
if self.word:
return chr(self.data + 64)
return self.data
def __int__(self):
if self.word:
pass
return self.data
def __index__(self):
return self.__int__()
def __repr__(self):
return "HRMBox({})".format(self.item)
def __sub__(self, other):
if not isinstance(other, HRMBox):
other = HRMBox(other)
if self.word is not other.word:
raise OperandsError("")
return HRMBox(self.data - other.data)
def __add__(self, other):
if not isinstance(other, HRMBox):
other = HRMBox(other)
if self.word is not other.word:
raise OperandsError("")
return HRMBox(self.data + other.data)
def __eq__(self, other):
if not isinstance(other, HRMBox):
other = HRMBox(other)
return self.data == other.data
def __lt__(self, other):
if not isinstance(other, HRMBox):
other = HRMBox(other)
return self.data < other.data
COMMANDS = {}
def hrm_fn(*types):
def wrap(fn):
def call(self, *args):
def data():
fn(self, *[t(a).get(self) for t, a in zip(types, args)])
return data
call.letters = [t.letters for t in types]
COMMANDS[fn.__name__.upper()[1:]] = call
return call
return wrap
class HRM:
def __init__(self, program, tiles=0, tile_defaults=None):
if tile_defaults is None:
tile_defaults = {}
self.tokens = list(remove_invalid_tokens(tokenise(program)))
self.labels = {
places[0]: i
for i, (command, places) in enumerate(self.tokens)
if command == "LABEL"
}
self.tiles = [None for _ in range(tiles)]
for tile, value in tile_defaults.items():
self.tiles[tile] = HRMBox(value)
self.hand = None
@property
def hand(self):
return self._hand
@hand.setter
def hand(self, value):
if value is None:
self._hand = HRMBox(value)
self._hand = value
def __getitem__(self, index):
try:
return self.tiles[index]
except IndexError:
raise MemoryError(index)
def __setitem__(self, index, value):
try:
self.tiles[index] = HRMBox(value)
except IndexError:
raise MemoryError(index)
def __call__(self, input):
self.input = iter(input)
self.output = []
self.command = 0
self.hand = None
commands = [COMMANDS[command](self, *value) for command, value in self.tokens]
while True:
try:
commands[self.command]()
except IndexError: # No more commands
break
except StopIteration: # No more input
break
self.command += 1
return self.output
@hrm_fn(Empty)
def _inbox(self):
self.hand = HRMBox(next(self.input))
@hrm_fn(Empty)
def _outbox(self):
self.output.append(self.hand.item)
self.hand = None
@hrm_fn(Pointer(Number))
def _copyfrom(self, index):
self.hand = self[index]
@hrm_fn(Pointer(Number))
def _copyto(self, index):
self[index] = self.hand
@hrm_fn(Pointer(Number))
def _add(self, index):
self.hand += self[index]
@hrm_fn(Pointer(Number))
def _sub(self, index):
self.hand -= self[index]
@hrm_fn(Pointer(Number))
def _bumpup(self, index):
self[index] += 1
self.hand = self[index]
@hrm_fn(Pointer(Number))
def _bumpdn(self, index):
self[index] -= 1
self.hand = self[index]
@hrm_fn(Word)
def _jump(self, label):
self.command = self.labels[label]
@hrm_fn(Word)
def _jumpz(self, label):
if self.hand == 0:
self.command = self.labels[label]
@hrm_fn(Word)
def _jumpn(self, label):
if self.hand < 0:
self.command = self.labels[label]
@hrm_fn(Number)
def _comment(self, comment):
pass
@hrm_fn(Word)
def _label(self, label):
pass
COMMAND_TYPES = {command: fn.letters for command, fn in COMMANDS.items()}
def tokenise(hrm_string):
for line in hrm_string.split("\n"):
line = line.strip()
if re.match("--", line) is not None:
continue
label = re.match("(\w+):", line)
if label is not None:
yield "LABEL", label.group(1)
continue
expression = line.split()
if expression and all(re.match("\w+|\[\w+\]$", e) for e in expression):
yield expression
continue
def remove_invalid_tokens(tokens):
for command, *values in tokens:
command = command.upper()
command_types = COMMAND_TYPES.get(command, None)
if command_types is not None and all(
set(v) <= c for c, v in zip(command_types, values)
):
yield command, values
| mit | 7,252,481,977,393,019,000 | 24.186709 | 94 | 0.535746 | false | 3.839363 | false | false | false |
flacjacket/qtile | libqtile/widget/gmail_checker.py | 3 | 2668 | # Copyright (c) 2014 Sean Vig
# Copyright (c) 2014 zordsdavini
# Copyright (c) 2014 Alexandr Kriptonov
# Copyright (c) 2014 Tycho Andersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from libqtile.log_utils import logger
from . import base
import imaplib
import re
class GmailChecker(base.ThreadedPollText):
"""A simple gmail checker"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
("update_interval", 30, "Update time in seconds."),
("username", None, "username"),
("password", None, "password"),
("email_path", "INBOX", "email_path"),
("fmt", "inbox[%s],unseen[%s]", "fmt"),
("status_only_unseen", False, "Only show unseen messages"),
]
def __init__(self, **config):
base._TextBox.__init__(self, **config)
self.add_defaults(GmailChecker.defaults)
def poll(self):
self.gmail = imaplib.IMAP4_SSL('imap.gmail.com')
self.gmail.login(self.username, self.password)
answer, raw_data = self.gmail.status(self.email_path,
'(MESSAGES UNSEEN)')
if answer == "OK":
dec = raw_data[0].decode()
messages = int(re.search(r'MESSAGES\s+(\d+)', dec).group(1))
unseen = int(re.search(r'UNSEEN\s+(\d+)', dec).group(1))
if(self.status_only_unseen):
return self.fmt % unseen
else:
return self.fmt % (messages, unseen)
else:
logger.exception(
'GmailChecker UNKNOWN error, answer: %s, raw_data: %s',
answer, raw_data)
return "UNKNOWN ERROR"
| mit | -2,250,641,655,460,438,800 | 41.349206 | 79 | 0.651049 | false | 3.994012 | false | false | false |
PaddlePaddle/Paddle | python/paddle/fluid/tests/unittests/test_while_loop_op.py | 2 | 23080 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import unittest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.fluid.layers as layers
import paddle.fluid.framework as framework
from paddle.fluid.executor import Executor
from paddle.fluid.framework import Program, program_guard
from paddle.fluid.backward import append_backward
paddle.enable_static()
class TestApiWhileLoop(unittest.TestCase):
def test_var_tuple(self):
def cond(i):
return layers.less_than(i, ten)
def body(i):
return layers.elementwise_add(x=i, y=one)
main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
i = layers.fill_constant(shape=[1], dtype='int64', value=0)
one = layers.fill_constant(shape=[1], dtype='int64', value=1)
ten = layers.fill_constant(shape=[1], dtype='int64', value=10)
out = layers.while_loop(cond, body, (i, ))
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
res = exe.run(main_program, fetch_list=out)
self.assertTrue(
np.allclose(np.asarray(res[0]), np.full((1), 10, np.int64)))
def test_var_list(self):
def cond(i, mem):
return layers.less_than(i, ten)
def body(i, mem):
mem = layers.elementwise_add(x=mem, y=one)
i = layers.increment(i)
return [i, mem]
main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
i = layers.zeros(shape=[1], dtype='int64')
ten = layers.fill_constant(shape=[1], dtype='int64', value=10)
mem = fluid.data(name='mem', shape=[10], dtype='float32')
one = layers.fill_constant(shape=[10], dtype='float32', value=1)
out = layers.while_loop(cond, body, [i, mem])
data = np.random.rand(10).astype('float32')
data_one = np.ones(10).astype('float32')
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
res = exe.run(main_program, feed={'mem': data}, fetch_list=out)
for i in range(10):
data = np.add(data, data_one)
self.assertTrue(np.allclose(np.asarray(res[1]), data))
def test_var_dict(self):
def cond(i, ten, test_dict, test_list, test_list_dict):
return layers.less_than(i, ten)
def body(i, ten, test_dict, test_list, test_list_dict):
test_dict["test_key"] = i
test_dict["test_key"] += 1
test_list[0] = fluid.layers.reshape(test_list[0], [2, -1]) + 1
test_list_dict[0]["test_key"] += 1
test_list_dict[0]["test_key"] = fluid.layers.relu(test_list_dict[0][
"test_key"])
i = layers.increment(i)
return [i, ten, test_dict, test_list, test_list_dict]
main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
i = layers.zeros(shape=[1], dtype='int64')
ten = layers.fill_constant(shape=[1], dtype='int64', value=10)
test_data = layers.fill_constant(shape=[1], dtype='int64', value=0)
test_dict = {"test_key": test_data}
test_list = [
layers.fill_constant(
shape=[1, 2], dtype='int64', value=0)
]
test_list_dict = [{
"test_key": layers.fill_constant(
shape=[1], dtype='float32', value=0)
}]
i, ten, test_dict, test_list, test_list_dict = layers.while_loop(
cond, body, [i, ten, test_dict, test_list, test_list_dict])
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
res = exe.run(main_program,
fetch_list=[
test_dict["test_key"], test_list[0],
test_list_dict[0]["test_key"]
])
self.assertTrue(
np.allclose(
np.asarray(res[0]),
np.full(
shape=(1), fill_value=10, dtype=np.int64)))
self.assertTrue(
np.allclose(
np.asarray(res[1]),
np.full(
shape=(2, 1), fill_value=10, dtype=np.int64)))
self.assertTrue(
np.allclose(
np.asarray(res[2]),
np.full(
shape=(1), fill_value=10, dtype=np.float32)))
class TestApiWhileLoop_Nested(unittest.TestCase):
def test_nested_net(self):
def external_cond(i, j, init, sums):
return layers.less_than(i, loop_len1)
def external_body(i, j, init, sums):
def internal_cond(j, init, sums):
return layers.less_than(j, loop_len2)
def internal_body(j, init, sums):
init = layers.elementwise_add(x=init, y=ones)
sums = layers.elementwise_add(x=init, y=sums)
j = layers.increment(j)
return [j, init, sums]
result = layers.while_loop(internal_cond, internal_body,
[j, init, sums])
j = result[0]
init = result[1]
sums = result[2]
sums = layers.elementwise_add(x=init, y=sums)
i = layers.increment(i)
return [i, j, init, sums]
main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
i = layers.zeros(shape=[1], dtype='int64')
j = layers.zeros(shape=[1], dtype='int64')
init = fluid.data(name='init', shape=[3, 3], dtype='float32')
sums = fluid.data(name='sums', shape=[3, 3], dtype='float32')
loop_len1 = layers.fill_constant(shape=[1], dtype='int64', value=2)
loop_len2 = layers.fill_constant(shape=[1], dtype='int64', value=3)
ones = layers.fill_constant(shape=[3, 3], dtype='float32', value=1)
out = layers.while_loop(external_cond, external_body,
[i, j, init, sums])
data = np.random.rand(3, 3).astype('float32')
data_sums = np.zeros([3, 3]).astype('float32')
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
res = exe.run(main_program,
feed={'init': data,
'sums': data_sums},
fetch_list=out)
for i in range(3):
data = np.add(data, 1)
data_sums = np.add(data, data_sums)
for j in range(2):
data_sums = np.add(data, data_sums)
self.assertTrue(np.allclose(np.asarray(res[3]), data_sums))
class TestApiWhileLoop_Backward(unittest.TestCase):
def test_while_loop_backward(self):
def cond(i, x):
return layers.less_than(i, eleven)
def body(i, x):
x = layers.elementwise_mul(x=i, y=i)
i = layers.increment(i)
return [i, x]
main_program = Program()
startup_program = Program()
with fluid.program_guard(main_program, startup_program):
i = fluid.data(name='i', shape=[1], dtype='float32')
i.stop_gradient = False
eleven = layers.fill_constant(shape=[1], dtype='float32', value=11)
one = layers.fill_constant(shape=[1], dtype='float32', value=1)
x = fluid.data(name='x', shape=[1], dtype='float32')
x.stop_gradient = False
out = layers.while_loop(cond, body, [i, x])
mean = layers.mean(out[1])
append_backward(mean)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
feed_i = np.ones(1).astype('float32')
feed_x = np.ones(1).astype('float32')
data = np.asarray([100]).astype('float32')
i_grad = np.asarray([110]).astype('float32')
res = exe.run(main_program,
feed={'i': feed_i,
'x': feed_x},
fetch_list=[mean.name, i.grad_name])
self.assertTrue(np.allclose(np.asarray(res[0]), data))
self.assertTrue(
np.allclose(np.asarray(res[1]), i_grad),
msg=" \nres = \n{} \n\n ans = \n{}".format(res[1], i_grad))
def test_while_loop_backward2(self):
def cond(i, x):
return i < 3
def body(i, x):
x = x * i
i = i + 1
return [i, x]
main_program = Program()
startup_program = Program()
with fluid.program_guard(main_program, startup_program):
i = fluid.data(name='i', shape=[1], dtype='float32')
i.stop_gradient = False
x = fluid.data(name='x', shape=[1], dtype='float32')
x.stop_gradient = False
out = layers.while_loop(cond, body, [i, x])
mean = layers.mean(out[1])
append_backward(mean)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
feed_i = np.ones(1).astype('float32')
feed_x = np.ones(1).astype('float32')
data = np.asarray([2]).astype('float32')
i_grad = np.asarray([3]).astype('float32')
x_grad = np.asarray([2]).astype('float32')
res = exe.run(main_program,
feed={'i': feed_i,
'x': feed_x},
fetch_list=[mean.name, i.grad_name, x.grad_name])
self.assertTrue(np.allclose(np.asarray(res[0]), data))
self.assertTrue(
np.allclose(np.asarray(res[1]), i_grad),
msg=" \nres = \n{} \n\n ans = \n{}".format(res[1], i_grad))
self.assertTrue(
np.allclose(np.asarray(res[2]), x_grad),
msg=" \nres = \n{} \n\n ans = \n{}".format(res[2], x_grad))
class TestApiWhileLoop_NestedWithBackwardAndLoDTensorArray(unittest.TestCase):
def test_nested_net_with_backward_and_lodtensor(self):
def external_cond(i, j, x, mem_array):
return layers.less_than(i, array_len)
def external_body(i, j, x, mem_array):
def internal_cond(j, x, mem_array):
return layers.less_than(j, array_len2)
def internal_body(j, x, mem_array):
inner_data = layers.array_read(array=data_array, i=j)
inner_prev = layers.array_read(array=mem_array, i=j)
inner_sum_0 = layers.elementwise_add(x=inner_data, y=inner_prev)
inner_sum_1 = layers.elementwise_add(x=x, y=inner_sum_0)
j = layers.increment(x=j, in_place=True)
layers.array_write(inner_sum_1, i=j, array=mem_array)
return [j, x, mem_array]
outer_data = layers.array_read(array=data_array, i=i)
outer_prev = layers.array_read(array=mem_array, i=i)
outer_sum_0 = layers.elementwise_add(x=outer_data, y=outer_prev)
outer_sum_1 = layers.elementwise_add(x=x, y=outer_sum_0)
i = layers.increment(x=i, in_place=True)
layers.array_write(outer_sum_1, i=i, array=mem_array)
j, x, mem_array = layers.while_loop(internal_cond, internal_body,
[j, x, mem_array])
return [i, j, x, mem_array]
main_program = Program()
startup_program = Program()
with fluid.program_guard(main_program, startup_program):
d0 = fluid.data(name='d0', shape=[10], dtype='float32')
d1 = fluid.data(name='d1', shape=[10], dtype='float32')
d2 = fluid.data(name='d2', shape=[10], dtype='float32')
x = fluid.data(name='x', shape=[10], dtype='float32')
x.stop_gradient = False
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
init = layers.zeros(shape=[10], dtype='float32')
mem_array = layers.array_write(x=init, i=i)
data_array = layers.array_write(x=d0, i=i)
i = layers.increment(i)
layers.array_write(d1, i, array=data_array)
i = layers.increment(i)
layers.array_write(d2, i, array=data_array)
i = layers.zeros(shape=[1], dtype='int64')
i.stop_gradient = True
array_len = layers.fill_constant(shape=[1], dtype='int64', value=1)
j = layers.fill_constant(shape=[1], dtype='int64', value=1)
j.stop_gradient = True
array_len2 = layers.fill_constant(shape=[1], dtype='int64', value=3)
out = layers.while_loop(external_cond, external_body,
[i, j, x, mem_array])
sum_result = layers.array_read(array=mem_array, i=j)
mean = layers.mean(sum_result)
append_backward(mean)
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
d = []
for i in range(3):
d.append(np.random.random(size=[10]).astype('float32'))
feed_x = np.ones(10).astype('float32')
data_sum = d[0] + d[1] + d[2] + 3 * feed_x
x_grad = [0.3] * 10
res = exe.run(
main_program,
feed={'d0': d[0],
'd1': d[1],
'd2': d[2],
'x': feed_x},
fetch_list=[sum_result.name, x.grad_name])
self.assertTrue(np.allclose(res[0], data_sum))
self.assertTrue(np.allclose(res[1], x_grad))
class TestApiWhileLoopWithSwitchCase(unittest.TestCase):
def test_with_switch_case(self):
def cond(i):
return layers.less_than(i, ten)
def body(i):
def fn_add_three():
data_add_three = layers.elementwise_add(x=i, y=three)
return data_add_three
def fn_square():
data_mul_data = layers.elementwise_mul(x=i, y=i)
return data_mul_data
def fn_add_one():
data_add_one = layers.elementwise_add(x=i, y=one)
return data_add_one
return layers.switch_case(
branch_index=i,
branch_fns={2: fn_add_three,
5: fn_square},
default=fn_add_one)
main_program = Program()
startup_program = Program()
with fluid.program_guard(main_program, startup_program):
i = layers.fill_constant(shape=[1], dtype='int64', value=1)
ten = layers.fill_constant(shape=[1], dtype='int64', value=10)
three = layers.fill_constant(shape=[1], dtype='int64', value=3)
one = layers.fill_constant(shape=[1], dtype='int64', value=1)
out = layers.while_loop(cond, body, [i])
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
res = exe.run(main_program, fetch_list=out)
data = np.asarray([25]).astype('int64')
self.assertTrue(np.allclose(np.asarray(res[0]), data))
class TestApiWhileLoop_Error(unittest.TestCase):
def test_error(self):
def cond_returns_constant(i):
return 1
def cond_returns_not_bool_tensor(i):
return layers.increment(i)
def cond_returns_bool_tensor(i):
return layers.less_than(i, ten)
def cond_returns_2d_tensor(i):
return layers.less_than(i, ten_2d)
def cond_receives_two_args(i, ten):
return layers.less_than(i, ten)
def body(i):
return layers.increment(i)
def body_returns_error_length(i):
i = layers.increment(i)
return [i, i]
def body_returns_error_type(i, ten):
return layers.increment(i)
def cond_returns_with_mutable_dict(i, test_dict):
return i > 0
def body_returns_with_mutable_dict(i, test_dict):
test_dict['new_key'] = layers.fill_constant(
shape=[1], dtype='int64', value=1)
return layers.increment(i), test_dict
def cond_returns_with_mutable_list(i, test_list):
return i > 0
def body_returns_with_mutable_list(i, test_list):
test_list.append(
layers.fill_constant(
shape=[1], dtype='int64', value=1))
return layers.increment(i), test_list
main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
data = layers.fill_constant(shape=[1], dtype='int64', value=1)
data_1d = layers.fill_constant(shape=[1], dtype='int64', value=1)
data_2d = layers.fill_constant(shape=[2, 2], dtype='int64', value=1)
ten = layers.fill_constant(shape=[1], dtype='int64', value=10)
ten_2d = layers.fill_constant(shape=[2, 2], dtype='int64', value=10)
# The type of `cond` in Op(while_loop) must be callable
def type_error_cond():
out = layers.while_loop(data, body, [data_1d])
self.assertRaises(TypeError, type_error_cond)
# The type of `body` in Op(while_loop) must be callable
def type_error_body():
out = layers.while_loop(cond_returns_bool_tensor, data,
[data_1d])
self.assertRaises(TypeError, type_error_body)
# The type of `loop_vars` in Op(while_loop) must be list or tuple
def type_error_loop_vars():
out = layers.while_loop(cond_returns_bool_tensor, body, data_1d)
self.assertRaises(TypeError, type_error_loop_vars)
# The value of `loop_vars` is empty
def value_error_loop_vars():
out = layers.while_loop(cond_returns_bool_tensor, body, [])
self.assertRaises(ValueError, value_error_loop_vars)
# The type of `cond` returns in Op(while_loop) must be Variable
def type_error_cond_returns_not_variable():
out = layers.while_loop(cond_returns_constant, body, [data_1d])
self.assertRaises(TypeError, type_error_cond_returns_not_variable)
# The type of `cond` returns in Op(while_loop) must be a bollean variable
def type_error_cond_returns_not_boolean():
out = layers.while_loop(cond_returns_not_bool_tensor, body,
[data_1d])
self.assertRaises(TypeError, type_error_cond_returns_not_boolean)
# The shape of `cond` returns in Op(while_loop) must be 1
def type_error_shape_cond_returns_2d():
out = layers.while_loop(cond_returns_2d_tensor, body, [data_2d])
self.assertRaises(TypeError, type_error_shape_cond_returns_2d)
# The length of `body` returns in Op(while_loop) must be same as `loop_vars`
def value_error_body_returns_error_length():
out = layers.while_loop(cond_returns_bool_tensor,
body_returns_error_length, [data])
self.assertRaises(ValueError, value_error_body_returns_error_length)
# The type of `body` returns in Op(while_loop) must be same as `loop_vars`
def value_error_body_returns_error_type():
out = layers.while_loop(cond_receives_two_args,
body_returns_error_type, [data, ten])
self.assertRaises(ValueError, value_error_body_returns_error_type)
# The length of `output_vars` with mutable value should keep same with `loop_vars`
def value_error_body_returns_with_mutable_dict():
test_dict = {
"int_constant": layers.fill_constant(
shape=[2, 2], dtype='int64', value=1)
}
out = layers.while_loop(cond_returns_with_mutable_dict,
body_returns_with_mutable_dict,
[data, test_dict])
self.assertRaises(ValueError,
value_error_body_returns_with_mutable_dict)
def value_error_body_returns_with_mutable_list():
test_list = [
layers.fill_constant(
shape=[2, 2], dtype='int64', value=1)
]
out = layers.while_loop(cond_returns_with_mutable_list,
body_returns_with_mutable_list,
[data, test_list])
self.assertRaises(ValueError,
value_error_body_returns_with_mutable_list)
class TestApiWhileLoopSliceInBody(unittest.TestCase):
def test_var_slice(self):
def cond(z, i):
return i + 1 <= x_shape[0]
def body(z, i):
z = z + x[i]
i += 1
return z, i
main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
x = fluid.layers.data(name='x', shape=[5], dtype='int32')
z = fluid.layers.fill_constant([1], 'int32', 0)
x_shape = fluid.layers.shape(x)
i = fluid.layers.fill_constant([1], 'int32', 0)
z, _ = fluid.layers.while_loop(cond, body, [z, i])
place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
np_x = np.array([1, 2, 3, 4, 5], dtype='int32')
res = exe.run(main_program, feed={'x': np_x}, fetch_list=[z])
self.assertTrue(np.array_equal(res[0], [np.sum(np_x)]))
if __name__ == '__main__':
unittest.main()
| apache-2.0 | -7,682,790,755,158,995,000 | 39.209059 | 94 | 0.541681 | false | 3.651899 | true | false | false |
zstackio/zstack-woodpecker | integrationtest/vm/mini/image_replication/test_replicate_iso_image.py | 1 | 1246 | '''
New Integration test for image replication.
@author: Legion
'''
import os
import time
import random
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
image_name = 'iso-image-replication-test-' + time.strftime('%y%m%d%H%M%S', time.localtime())
test_stub = test_lib.lib_get_test_stub()
img_repl = test_stub.ImageReplication()
def test():
os.environ['ZSTACK_BUILT_IN_HTTP_SERVER_IP'] = os.getenv('zstackHaVip')
bs_list = img_repl.get_bs_list()
bs = random.choice(bs_list)
img_repl.add_image(image_name, bs_uuid=bs.uuid, img_format='iso')
img_repl.create_iso_vm()
img_repl.wait_for_image_replicated(image_name)
img_repl.check_image_data(image_name)
test_util.test_pass('ISO Image Replication Test Success')
img_repl.clean_on_expunge()
def env_recover():
img_repl.delete_image()
img_repl.expunge_image()
img_repl.reclaim_space_from_bs()
try:
img_repl.vm.destroy()
except:
pass
#Will be called only if exception happens in test().
def error_cleanup():
try:
img_repl.delete_image()
img_repl.expunge_image()
img_repl.reclaim_space_from_bs()
img_repl.vm.destroy()
except:
pass | apache-2.0 | 1,099,374,511,047,678,200 | 22.980769 | 92 | 0.668539 | false | 3.046455 | true | false | false |
StefanRijnhart/partner-contact | partner_firstname/__openerp__.py | 7 | 1822 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi. Copyright Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Partner first name, last name',
'description': """
This module splits first name and last name for non company partners
====================================================================
The field 'name' becomes a stored function field concatenating lastname and
firstname
Note: in version 7.0, installing this module invalidates a yaml test in the
'edi' module
Contributors
============
Jonathan Nemry <[email protected]>
Olivier Laurent <[email protected]>
""",
'version': '1.2',
'author': 'Camptocamp',
'maintainer': 'Camptocamp, Acsone',
'category': 'Extra Tools',
'website': 'http://www.camptocamp.com, http://www.acsone.eu',
'depends': ['base'],
'data': [
'partner_view.xml',
'res_user_view.xml',
],
'demo': [],
'test': [],
'auto_install': False,
'installable': True,
'images': []
}
| agpl-3.0 | -7,672,724,424,427,872,000 | 33.377358 | 78 | 0.587816 | false | 3.96087 | false | false | false |
atomic-labs/zulip | zerver/management/commands/create_stream.py | 9 | 1146 | from __future__ import absolute_import
from __future__ import print_function
from django.core.management.base import BaseCommand
from zerver.lib.actions import do_create_stream
from zerver.models import Realm, get_realm
import sys
class Command(BaseCommand):
help = """Create a stream, and subscribe all active users (excluding bots).
This should be used for TESTING only, unless you understand the limitations of
the command."""
def add_arguments(self, parser):
parser.add_argument('domain', metavar='<domain>', type=str,
help='domain in which to create the stream')
parser.add_argument('stream_name', metavar='<stream name>', type=str,
help='name of stream to create')
def handle(self, *args, **options):
domain = options['domain']
stream_name = options['stream_name']
encoding = sys.getfilesystemencoding()
try:
realm = get_realm(domain)
except Realm.DoesNotExist:
print("Unknown domain %s" % (domain,))
exit(1)
do_create_stream(realm, stream_name.decode(encoding))
| apache-2.0 | -5,861,407,564,355,417,000 | 32.705882 | 79 | 0.644852 | false | 4.374046 | false | false | false |
annahs/atmos_research | AL_incand_calib_SP244.py | 1 | 3411 | import sys
import os
import datetime
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib.colors as colors
from pprint import pprint
import sqlite3
import calendar
from datetime import datetime
from datetime import timedelta
import math
import numpy.polynomial.polynomial as poly
986.22
725.78
1567.4
2941
5127.9
8463.7
13283
18566
24234
37216
52182
#mass fg, pk_ht, UNCORR
AL_HG_incand_calib = [
[0.11144 ,986.22 ],
[0.22994 ,725.78 ],
[0.41189 ,1567.4 ],
[0.67707 ,2941 ],
[1.04293 ,5127.9 ],
[1.52461 ,8463.7 ],
[2.13496 ,13283 ],
[2.88464 ,18566 ],
[3.78215 ,24234 ],
[6.04449 ,37216 ],
[8.95095 ,52182 ],
]
AL_LG_incand_calib = [
[0.67707 ,276.81],
[1.04293 ,494.74],
[1.52461 ,843],
[2.13496 ,1325.2],
[2.88464 ,1869.5],
[3.78215 ,2448],
[6.04449 ,3801.7],
[8.95095 ,5368.3],
]
HG_pkht = np.array([row[1] for row in AL_HG_incand_calib])
HG_mass = np.array([row[0] for row in AL_HG_incand_calib])
HG_mass_corr = np.array([row[0]/0.7 for row in AL_HG_incand_calib])
HG_fit = poly.polyfit(HG_pkht, HG_mass_corr, 2)
print 'HG AD corr fit', HG_fit
for line in AL_HG_incand_calib:
incand_pk_ht = line[1]
uncorr_mass_fg = line[0]
AD_corr_fit = HG_fit[0] + HG_fit[1]*incand_pk_ht + HG_fit[2]*incand_pk_ht*incand_pk_ht
line.append(AD_corr_fit)
LG_pkht = np.array([row[1] for row in AL_LG_incand_calib])
LG_mass = np.array([row[0] for row in AL_LG_incand_calib])
LG_mass_corr = np.array([row[0]/0.7 for row in AL_LG_incand_calib])
LG_fit = poly.polyfit(LG_pkht, LG_mass_corr, 2)
print 'LG AD corr fit', LG_fit
for line in AL_LG_incand_calib:
incand_pk_ht = line[1]
uncorr_mass_fg = line[0]
AD_corr_fit = LG_fit[0] + LG_fit[1]*incand_pk_ht + LG_fit[2]*incand_pk_ht*incand_pk_ht
line.append(AD_corr_fit)
HG_pk_ht = [row[1] for row in AL_HG_incand_calib]
HG_uncorr_mass = [row[0] for row in AL_HG_incand_calib]
HG_uncorr_fit = [row[2]*0.7 for row in AL_HG_incand_calib]
HG_ADcorr_fit = [row[2] for row in AL_HG_incand_calib]
LG_pk_ht = [row[1] for row in AL_LG_incand_calib]
LG_uncorr_mass = [row[0] for row in AL_LG_incand_calib]
LG_uncorr_fit = [row[2]*0.7 for row in AL_LG_incand_calib]
LG_ADcorr_fit = [row[2] for row in AL_LG_incand_calib]
fig = plt.figure(figsize=(12,10))
ax = fig.add_subplot(111)
ax.scatter(HG_pk_ht,HG_uncorr_mass,color='r', label = 'HG uncorrected calibration')
ax.plot(HG_pk_ht,HG_ADcorr_fit, '--r', label = 'HG Aquadag correction applied')
ax.plot(HG_pk_ht,HG_uncorr_fit, '-r')
ax.scatter(LG_pk_ht,LG_uncorr_mass,color = 'blue', label = 'LG uncorrected calibration')
ax.plot(LG_pk_ht,LG_ADcorr_fit, '--b', label = 'LG Aquadag correction applied')
ax.plot(LG_pk_ht,LG_uncorr_fit, '-b')
plt.xlabel('Incandescent pk height (a.u.)')
plt.ylabel('rBC mass (fg)')
plt.text(9600,8, 'Aquadag corrected fit:\nrBC mass = 0.26887 + 1.9552E-4*HG_pkht + 8.31906E-10*HG_pkht^2')
plt.text(5900,12, 'Aquadag corrected fit:\nrBC mass = 0.56062 + 1.7402E-3*LG_pkht + 1.0009E-7*LG_pkht^2')
#plt.axhspan(1.8,12.8, color='g', alpha=0.25, lw=0)
#plt.axhspan(0,1.8, color='c', alpha=0.25, lw=0)
#plt.axhspan(12.8,41, color='y', alpha=0.25, lw=0)
ax.set_ylim(0,16)
ax.set_xlim(0,55000)
plt.legend()
os.chdir('C:/Users/Sarah Hanna/Documents/Data/Alert Data/SP2 Calibrations/')
plt.savefig('Alert SP2#44 Aquadag calibration curves.png', bbox_inches='tight')
plt.show() | mit | -5,794,640,837,672,244,000 | 25.045802 | 106 | 0.673116 | false | 2.186538 | false | false | false |
lutris/lutris | lutris/services/steam.py | 1 | 6617 | """Steam service"""
import json
import os
from gettext import gettext as _
from gi.repository import Gio
from lutris import settings
from lutris.config import LutrisConfig, write_game_config
from lutris.database.games import add_game, get_game_by_field, get_games
from lutris.database.services import ServiceGameCollection
from lutris.game import Game
from lutris.installer.installer_file import InstallerFile
from lutris.services.base import BaseService
from lutris.services.service_game import ServiceGame
from lutris.services.service_media import ServiceMedia
from lutris.util.log import logger
from lutris.util.steam.appmanifest import AppManifest, get_appmanifests
from lutris.util.steam.config import get_steam_library, get_steamapps_paths, get_user_steam_id
from lutris.util.strings import slugify
class SteamBanner(ServiceMedia):
service = "steam"
size = (184, 69)
dest_path = os.path.join(settings.CACHE_DIR, "steam/banners")
file_pattern = "%s.jpg"
api_field = "appid"
url_pattern = "http://cdn.akamai.steamstatic.com/steam/apps/%s/capsule_184x69.jpg"
class SteamCover(ServiceMedia):
service = "steam"
size = (200, 300)
dest_path = os.path.join(settings.CACHE_DIR, "steam/covers")
file_pattern = "%s.jpg"
api_field = "appid"
url_pattern = "http://cdn.steamstatic.com/steam/apps/%s/library_600x900.jpg"
class SteamBannerLarge(ServiceMedia):
service = "steam"
size = (460, 215)
dest_path = os.path.join(settings.CACHE_DIR, "steam/header")
file_pattern = "%s.jpg"
api_field = "appid"
url_pattern = "https://cdn.cloudflare.steamstatic.com/steam/apps/%s/header.jpg"
class SteamGame(ServiceGame):
"""ServiceGame for Steam games"""
service = "steam"
installer_slug = "steam"
runner = "steam"
@classmethod
def new_from_steam_game(cls, steam_game, game_id=None):
"""Return a Steam game instance from an AppManifest"""
game = cls()
game.appid = steam_game["appid"]
game.game_id = steam_game["appid"]
game.name = steam_game["name"]
game.slug = slugify(steam_game["name"])
game.runner = cls.runner
game.details = json.dumps(steam_game)
return game
class SteamService(BaseService):
id = "steam"
name = _("Steam")
icon = "steam-client"
medias = {
"banner": SteamBanner,
"banner_large": SteamBannerLarge,
"cover": SteamCover,
}
default_format = "banner"
is_loading = False
runner = "steam"
excluded_appids = [
"221410", # Steam for Linux
"228980", # Steamworks Common Redistributables
"1070560", # Steam Linux Runtime
]
game_class = SteamGame
def load(self):
"""Return importable Steam games"""
if self.is_loading:
logger.warning("Steam games are already loading")
return
self.is_loading = True
steamid = get_user_steam_id()
if not steamid:
logger.error("Unable to find SteamID from Steam config")
return
steam_games = get_steam_library(steamid)
if not steam_games:
raise RuntimeError(_("Failed to load games. Check that your profile is set to public during the sync."))
for steam_game in steam_games:
if steam_game["appid"] in self.excluded_appids:
continue
game = self.game_class.new_from_steam_game(steam_game)
game.save()
self.match_games()
self.is_loading = False
return steam_games
def get_installer_files(self, installer, installer_file_id):
steam_uri = "$WINESTEAM:%s:." if installer.runner == "winesteam" else "$STEAM:%s:."
appid = str(installer.script["game"]["appid"])
return [
InstallerFile(installer.game_slug, "steam_game", {
"url": steam_uri % appid,
"filename": appid
})
]
def install_from_steam(self, manifest):
"""Create a new Lutris game based on an existing Steam install"""
if not manifest.is_installed():
return
appid = manifest.steamid
if appid in self.excluded_appids:
return
service_game = ServiceGameCollection.get_game(self.id, appid)
if not service_game:
return
lutris_game_id = "%s-%s" % (self.id, appid)
existing_game = get_game_by_field(lutris_game_id, "slug")
if existing_game:
return
game_config = LutrisConfig().game_level
game_config["game"]["appid"] = appid
configpath = write_game_config(lutris_game_id, game_config)
game_id = add_game(
name=service_game["name"],
runner="steam",
slug=slugify(service_game["name"]),
installed=1,
installer_slug=lutris_game_id,
configpath=configpath,
platform="Linux",
service=self.id,
service_id=appid,
)
return game_id
def add_installed_games(self):
games = []
steamapps_paths = get_steamapps_paths()
for steamapps_path in steamapps_paths:
for appmanifest_file in get_appmanifests(steamapps_path):
app_manifest_path = os.path.join(steamapps_path, appmanifest_file)
self.install_from_steam(AppManifest(app_manifest_path))
return games
def generate_installer(self, db_game):
"""Generate a basic Steam installer"""
return {
"name": db_game["name"],
"version": self.name,
"slug": slugify(db_game["name"]) + "-" + self.id,
"game_slug": slugify(db_game["name"]),
"runner": self.runner,
"appid": db_game["appid"],
"script": {
"game": {"appid": db_game["appid"]}
}
}
def install(self, db_game):
appid = db_game["appid"]
db_games = get_games(filters={"service_id": appid, "installed": "1", "service": self.id})
existing_game = self.match_existing_game(db_games, appid)
if existing_game:
logger.debug("Found steam game: %s", existing_game)
game = Game(existing_game.id)
game.save()
return
service_installers = self.get_installers_from_api(appid)
if not service_installers:
service_installers = [self.generate_installer(db_game)]
application = Gio.Application.get_default()
application.show_installer_window(service_installers, service=self, appid=appid)
| gpl-3.0 | 8,328,365,703,079,465,000 | 34.385027 | 116 | 0.612211 | false | 3.508484 | true | false | false |
darencard/RADpipe | process_rawreads.py | 1 | 15286 | #!/usr/bin/env python
##print __name__
import re
import sys
import os
import optparse
import subprocess
usage_line = """
process_rawreads.py
Version 2.0 (2 December, 2014)
License: GNU GPLv2
To report bugs or errors, please contact Daren Card ([email protected]).
This script is provided as-is, with no support and no guarantee of proper or desirable functioning.
Script that process raw RADseq reads that are generated using the Peterson et al. 2012 ddRADseq \
protocol. The script filters out PCR clones, trims away the 8bp unique molecular identifiers at \
the beginning of each read, parses by combinatorial barcodes (an inline barcode and standard Illumina \
index), and quality trims using either Stacks or Trimmomatic. The script will handle either single-end \
or paired-end reads appropriately. User must input the raw reads (unzipped fastq format) and a sample \
sheet (example is included as part of this repository). The script also includes the flexibility of \
running certain portions of the pipeline, which is useful if one doesn't need to repeat a specific step. \
The pipeline steps are as follows (with numbers corresponding to those passed using the -x flag):
1. Setup the environment - creates necessary directories.
2. Clone filtering - filters out PCR clones
3. Lead trimming - trims away 8bp UMI sequences
4. Sample parsing - parses sample sheet, creates barcode input, parses processed reads down to sample \
using this information, and renames the files logically using the sample sheet.
5. Quality filtering - quality filters the reads using Trimmomatic. If user specifies read quality \
filtering in Stacks, this filtering takes place simulteneously with read parsing (step 4).
Dependencies include the Stacks pipeline (v. 1.10 - 1.19), the FastX toolkit, and Trimmomatic v. 0.32 \
(if desired), and all need to be installed in the users path.
python process_rawreads.py -t <#threads> -s <samplesheet.txt> [-p -r] -c/-q -1 <single-end.fastq> \
[-2 <paired-end.fastq>] --renz1 <RE_1> --renz2 <RE_2> --bar_loc <inline/index> [-x [1,2,3,4,5]
"""
#################################################
### Parse command options ###
#################################################
usage = usage_line
parser = optparse.OptionParser(usage=usage)
parser.add_option("-t", action="store", type = "string", dest = "threads", help = "threads")
parser.add_option("-s", action="store", type = "string", dest = "sheet", help = "Sample sheet file (see sample)")
parser.add_option("-p", action="store_true", dest = "paired", help = "paired reads flag")
parser.add_option("-c", action="store_true", dest = "clean", help = "quality trim reads using Stacks")
parser.add_option("-q", action="store_true", dest = "quality", help = "quality trim reads using Trimmomatic")
parser.add_option("-r", action="store_true", dest = "rescue", help = "rescue barcodes/restriction sites in Stacks (default settings)")
parser.add_option("-1", action="store", type = "string", dest = "read1", help = "single end read")
parser.add_option("-2", action="store", type = "string", dest = "read2", help = "paired end read")
parser.add_option("--renz1", action="store", type = "string", dest = "renz1", help = "restriction enzyme 1 (common cutter)")
parser.add_option("--renz2", action="store", type = "string", dest = "renz2", help = "restriction enzyme 2 (rare cutter)")
parser.add_option("--bar_loc", action="store", type = "string", dest = "bar_loc", help = "location of barcode & index (per process_radtags documentation)")
parser.add_option("-x", action="store", type = "string", dest = "run", help = "processes to run, separated by commas (e.g., 1,2,...,5) [1,2,3,4,5]", default = "1,2,3,4,5")
options, args = parser.parse_args()
#################################################
### Setup the environment ###
#################################################
def setup(r1nm):
print "\n***Setting up the command environment***\n"
### Create output directories ###
os.system("mkdir clone_filtered")
os.system("mkdir lead_trimmed")
os.system("mkdir parsed")
os.system("mkdir cleaned")
os.system("mkdir ./parsed/"+r1nm)
#################################################
### Clone filter reads ###
#################################################
def PE_clone_filter():
print "\n***Filtering PCR duplicates***\n"
os.system("clone_filter -1 "+options.read1+" -2 "+options.read2+" -o ./clone_filtered/ 2>&1 | tee ./clone_filtered/"+options.read1+".clonefilter.log")
def SE_clone_filter():
print "\n***Filtering PCR duplicates***\n"
os.system("clone_filter -1 "+options.read1+" -2 "+options.read1+" -o ./clone_filtered/ 2>&1 | tee ./clone_filtered/"+options.read1+".clonefilter.log")
os.system("rm -f ./clone_filtered/*.fil.fq_2")
#################################################
### Trim leading 8bp UMI ###
#################################################
def PE_lead_trim(r1nm, r2nm):
print "\n***Trimming away leading 8bp unique molecular identifiers***\n"
os.system("fastx_trimmer -Q 33 -f 9 -i ./clone_filtered/"+r1nm+".fil.fq_1 -o ./lead_trimmed/"+r1nm+".1.clone.trim.fastq")
os.system("fastx_trimmer -Q 33 -f 9 -i ./clone_filtered/"+r2nm+".fil.fq_2 -o ./lead_trimmed/"+r2nm+".2.clone.trim.fastq")
def SE_lead_trim(r1nm):
print "\n***Trimming away leading 8bp unique molecular identifiers***\n"
os.system("fastx_trimmer -Q 33 -f 9 -i ./clone_filtered/"+r1nm+".fil.fq_1 -o ./lead_trimmed/"+r1nm+".1.clone.trim.fastq")
#################################################
### Parse samples ###
#################################################
def parse_sample_sheet():
print "\n***Parsing reads by sample***\n"
### Parse the sample sheet to create barcodes file ###
barcodes = open("barcodes.txt", "w")
for line in open(options.sheet, "r"):
if not line.strip().startswith("#"):
bar = line.rstrip().split("\t")
if options.paired == True:
# print bar[0], bar[1], bar[2], bar[3], bar[4]
out = bar[3] + "\t" + bar[4] + "\n"
# print out
barcodes.write(out)
else:
out = bar[3] + "\n"
barcodes.write(out)
barcodes.close()
### process_radtags subroutine ###
def PE_sample_parser(r1nm, r2nm):
if options.rescue == True:
if options.clean == True:
alert = open("./cleaned/ATTENTION", "w")
line = "You elected to quality-trim your reads using Stacks. This trimming was done simultaneously with parsing. See the 'parsed' folder for your trimmed reads."
alert.write(line)
alert.close()
os.system("process_radtags -r -c -q -b barcodes.txt -o ./parsed/"+str(r1nm)+" --inline_index --renz_1 "+str(options.renz1)+" --renz_2 "+str(options.renz2)+" -1 ./lead_trimmed/"+str(r1nm)+".1.clone.trim.fastq -2 ./lead_trimmed/"+r2nm+".2.clone.trim.fastq 2>&1 | tee ./parsed/"+str(r1nm)+"/"+str(r1nm)+".parse.log")
print "\n***Quality-trimming reads using Stacks***\n"
else:
os.system("process_radtags -r -b barcodes.txt -o ./parsed/"+str(r1nm)+" --inline_index --renz_1 "+str(options.renz1)+" --renz_2 "+str(options.renz2)+" -1 ./lead_trimmed/"+str(r1nm)+".1.clone.trim.fastq -2 ./lead_trimmed/"+r2nm+".2.clone.trim.fastq 2>&1 | tee ./parsed/"+str(r1nm)+"/"+str(r1nm)+".parse.log")
print "\n***Quality-trimming reads using Trimmomatic***\n"
else:
if options.clean == True:
os.system("process_radtags -c -q -b barcodes.txt -o ./parsed/"+str(r1nm)+" --inline_index --renz_1 "+str(options.renz1)+" --renz_2 "+str(options.renz2)+" -1 ./lead_trimmed/"+str(r1nm)+".1.clone.trim.fastq -2 ./lead_trimmed/"+r2nm+".2.clone.trim.fastq 2>&1 | tee ./parsed/"+str(r1nm)+"/"+str(r1nm)+".parse.log")
print "\n***Quality-trimming reads using Stacks***\n"
alert = open("./cleaned/ATTENTION", "w")
line = "You elected to quality-trim your reads using Stacks. This trimming was done simultaneously with parsing. See the 'parsed' folder for your trimmed reads."
alert.write(line)
alert.close()
else:
os.system("process_radtags -b barcodes.txt -o ./parsed/"+str(r1nm)+" --inline_index --renz_1 "+str(options.renz1)+" --renz_2 "+str(options.renz2)+" -1 ./lead_trimmed/"+str(r1nm)+".1.clone.trim.fastq -2 ./lead_trimmed/"+r2nm+".2.clone.trim.fastq 2>&1 | tee ./parsed/"+str(r1nm)+"/"+str(r1nm)+".parse.log")
print "\n***Quality-trimming reads using Trimmomatic***\n"
def SE_sample_parser(r1nm):
if options.rescue == True:
if options.clean == True:
alert = open("./cleaned/ATTENTION", "w")
line = "You elected to quality-trim your reads using Stacks. This trimming was done simultaneously with parsing. See the 'parsed' folder for your trimmed reads."
alert.write(line)
alert.close()
os.system("process_radtags -r -c -q -b barcodes.txt -o ./parsed/"+str(r1nm)+" --inline_null --renz_1 "+str(options.renz1)+" --renz_2 "+str(options.renz2)+" -f ./lead_trimmed/"+str(r1nm)+".1.clone.trim.fastq 2>&1 | tee ./parsed/"+str(r1nm)+"/"+str(r1nm)+".parse.log")
print "\n***Quality-trimming reads using Stacks***\n"
else:
os.system("process_radtags -r -b barcodes.txt -o ./parsed/"+str(r1nm)+" --inline_null --renz_1 "+str(options.renz1)+" --renz_2 "+str(options.renz2)+" -f ./lead_trimmed/"+str(r1nm)+".1.clone.trim.fastq 2>&1 | tee ./parsed/"+str(r1nm)+"/"+str(r1nm)+".parse.log")
print "\n***Quality-trimming reads using Trimmomatic***\n"
else:
if options.clean == True:
alert = open("./cleaned/ATTENTION", "w")
line = "You elected to quality-trim your reads using Stacks. This trimming was done simultaneously with parsing. See the 'parsed' folder for your trimmed reads."
alert.write(line)
alert.close()
os.system("process_radtags -c -q -b barcodes.txt -o ./parsed/"+str(r1nm)+" --inline_null --renz_1 "+str(options.renz1)+" --renz_2 "+str(options.renz2)+" -f ./lead_trimmed/"+str(r1nm)+".1.clone.trim.fastq 2>&1 | tee ./parsed/"+str(r1nm)+"/"+str(r1nm)+".parse.log")
print "\n***Quality-trimming reads using Stacks***\n"
else:
os.system("process_radtags -b barcodes.txt -o ./parsed/"+str(r1nm)+" --inline_null --renz_1 "+str(options.renz1)+" --renz_2 "+str(options.renz2)+" -f ./lead_trimmed/"+str(r1nm)+".1.clone.trim.fastq 2>&1 | tee ./parsed/"+str(r1nm)+"/"+str(r1nm)+".parse.log")
print "\n***Quality-trimming reads using Trimmomatic***\n"
### file renaming subroutine ###
def PE_sample_rename(r1nm):
for foo in open(options.sheet).read().splitlines():
bar = foo.split()
parsep1_rename = "mv ./parsed/"+str(r1nm)+"/sample_"+bar[3]+"-"+bar[4]+".1.fq ./parsed/"+str(r1nm)+"/"+bar[0]+"_"+bar[3]+"-"+bar[4]+".P1.fq"
parsep2_rename = "mv ./parsed/"+str(r1nm)+"/sample_"+bar[3]+"-"+bar[4]+".2.fq ./parsed/"+str(r1nm)+"/"+bar[0]+"_"+bar[3]+"-"+bar[4]+".P2.fq"
remp1_rename = "mv ./parsed/"+str(r1nm)+"/sample_"+bar[3]+"-"+bar[4]+".rem.1.fq ./parsed/"+str(r1nm)+"/"+bar[0]+"_"+bar[3]+"-"+bar[4]+".rem.P1.fq"
remp2_rename = "mv ./parsed/"+str(r1nm)+"/sample_"+bar[3]+"-"+bar[4]+".rem.2.fq ./parsed/"+str(r1nm)+"/"+bar[0]+"_"+bar[3]+"-"+bar[4]+".rem.P2.fq"
combine_broken = "cat ./parsed/"+str(r1nm)+"/"+bar[0]+"_"+bar[3]+"-"+bar[4]+".rem.P1.fq /parsed/"+str(r1nm)+"/"+bar[0]+"_"+bar[3]+"-"+bar[4]+".rem.P2.fq > /parsed/"+str(r1nm)+"/"+bar[0]+"_"+bar[3]+"-"+bar[4]+".rem.cat.fq"
os.system(parsep1_rename)
os.system(parsep2_rename)
os.system(remp1_rename)
os.system(remp2_rename)
os.system(combine_broken)
### Place restriction site trimming routine here ###
def SE_sample_rename(r1nm):
for foo in open(options.sheet).read().splitlines():
bar = foo.split()
parse_single = "mv ./parsed/"+str(r1nm)+"/sample_"+bar[3]+".fq ./parsed/"+str(r1nm)+"/"+bar[0]+"_"+bar[3]+".S1.fq"
os.system(parse_single)
### Place restriction site trimming routine here ###
#################################################
### Quality-trim samples ###
#################################################
def PE_quality_trim(r1nm):
if options.quality == True:
for foo in open(options.sheet).read().splitlines():
bar = foo.split()
handle = bar[0]+"_"+bar[3]+"-"+bar[4]
threads = options.threads
PEclean = "trimmomatic-0.35.jar PE -threads "+threads+" -trimlog ./cleaned/"+handle+"_paired.qtrim.log ./parsed/"+str(r1nm)+"/"+handle+".P1.fq ./parsed/"+str(r1nm)+"/"+handle+".P2.fq ./cleaned/"+handle+".P1.qtrim ./cleaned/"+handle+".S1.qtrim ./cleaned/"+handle+".P2.qtrim ./cleaned/"+handle+".S2.qtrim LEADING:10 TRAILING:10 SLIDINGWINDOW:4:15 MINLEN:36 TOPHRED33 2>&1 | tee ./cleaned/"+handle+"_paired.qtrim.summary.log"
broken_clean = "trimmomatic-0.35.jar SE -threads "+threads+" -trimlog ./cleaned/"+handle+"_broken.qtrim.log ./parsed/"+str(r1nm)+"/"+handle+".rem.cat.fq ./cleaned/"+handle+".broken.qtrim LEADING:10 TRAILING:10 SLIDINGWINDOW:4:15 MINLEN:36 TOPHRED33 2>&1 | tee ./cleaned/"+handle+".broken.qtrim.summary.log"
os.system(str(PEclean))
os.system(str(broken_clean))
os.system("sed -i 's/\_1$/\ 1/g' ./cleaned/"+handle+".P1.qtrim")
os.system("sed -i 's/\_2$/\ 2/g' ./cleaned/"+handle+".P2.qtrim")
os.system("sed -i 's/\_1$/\ 1/g' ./cleaned/"+handle+".S1.qtrim")
os.system("sed -i 's/\_2$/\ 2/g' ./cleaned/"+handle+".S2.qtrim")
### Put command to trim away restriction site here and below else for Trimmomatic option ###
def SE_quality_trim(r1nm):
if options.quality == True:
for foo in open(options.sheet).read().splitlines():
bar = foo.split()
handle = bar[0]+"_"+bar[3]
threads = options.threads
SEclean = "trimmomatic-0.35.jar SE -threads "+threads+" -trimlog ./cleaned/"+handle+".qtrim.log ./parsed/"+str(r1nm)+"/"+handle+".S1.fq ./cleaned/"+handle+".S1.qtrim LEADING:10 TRAILING:10 SLIDINGWINDOW:4:15 MINLEN:36 TOPHRED33 2>&1 | tee ./cleaned/"+handle+".qtrim.summary.log"
os.system(str(SEclean))
os.system("sed -i 's/\_1$/\ 1/g' ./cleaned/"+handle+".S1.qtrim")
os.system("sed -i 's/\_2$/\ 2/g' ./cleaned/"+handle+".S2.qtrim")
### Put command to trim away restriction site here and below else for Trimmomatic option ###
#################################################
### Specify processes ###
#################################################
def main():
if options.paired == True:
r1nm, r1ext = os.path.splitext(options.read1)
r2nm, r2ext = os.path.splitext(options.read2)
if "1" in options.run:
setup(r1nm)
if "2" in options.run:
PE_clone_filter()
if "3" in options.run:
PE_lead_trim(r1nm, r2nm)
if "4" in options.run:
parse_sample_sheet()
PE_sample_parser(r1nm, r2nm)
PE_sample_rename(r1nm)
if "5" in options.run:
PE_quality_trim(r1nm)
else:
r1nm, r1ext = os.path.splitext(options.read1)
if "1" in options.run:
setup(r1nm)
if "2" in options.run:
SE_clone_filter()
if "3" in options.run:
SE_lead_trim(r1nm)
if "4" in options.run:
parse_sample_sheet()
SE_sample_parser(r1nm)
SE_sample_rename(r1nm)
if "5" in options.run:
SE_quality_trim(r1nm)
main()
| gpl-2.0 | 7,832,913,055,881,202,000 | 54.384058 | 425 | 0.611278 | false | 2.983216 | false | false | false |
datasciencesg/knowledge-base | MOOC/Computer-Science-and-Python/Lecture 9.py | 1 | 1648 | class intSet(object):
"""An intSet is a set of integers
The value is represented by a list of ints, self.vals.
Each int in the set occurs in self.vals exactly once."""
def __init__(self):
"""Create an empty set of integers"""
self.vals = []
def insert(self, e):
"""Assumes e is an integer and inserts e into self"""
if not e in self.vals:
self.vals.append(e)
def member(self, e):
"""Assumes e is an integer
Returns True if e is in self, and False otherwise"""
return e in self.vals
def remove(self, e):
"""Assumes e is an integer and removes e from self
Raises ValueError if e is not in self"""
try:
self.vals.remove(e)
except:
raise ValueError(str(e) + ' not found')
def __str__(self):
"""Returns a string representation of self"""
self.vals.sort()
return '{' + ','.join([str(e) for e in self.vals]) + '}'
def intersect(self, other):
'''returns a new intSet containing elements that appear in both sets'''
intersect_set = intSet()
for i in other.vals:
if self.member(i):
intersect_set.insert(i)
return intersect_set
def __len__(self):
return len(self.vals)
# L11 Problem 6
class Queue(object):
def __init__(self):
self.queue = []
def insert(self, e):
self.queue.append(e)
def remove(self):
if self.queue == []:
raise ValueError()
else:
return self.queue.pop(0)
| mit | -147,776,047,993,633,660 | 27.912281 | 79 | 0.536408 | false | 4.140704 | false | false | false |
LearnPythonAndMakeGames/BasicPythonTutorialSeries | basic_tutorials/loops.py | 1 | 1425 | import random
def attack(attack_power, percent_to_hit, percent_to_critical=0.01):
"""Calculates the damage done based on attack power and percent to
hit. Also calculates critical strike.
Parameters:
attack_power - attack power
percent_to_hit - percent to hit
Optional:
percent_to_critical - percent to critical strike [default: 0.01]
Returns:
Returns damage
"""
damage_value = 0
# Calculate if creature was hit
chance_to_hit = random.random()
if chance_to_hit <= percent_to_hit:
creature_was_hit = True
else:
creature_was_hit = False
# Calculate final damage value
if creature_was_hit:
damage_value = random.randint(1, attack_power)
if chance_to_hit <= percent_to_critical:
damage_value = attack_power + damage_value
return damage_value
attack_power = raw_input("What is the attack power? ")
percent_to_hit = raw_input("What is the percent to hit? ")
percent_to_critical = raw_input("What is the percent to critical? ")
attack_power = int(attack_power)
percent_to_hit = float(percent_to_hit)
percent_to_critical = float(percent_to_critical)
player_wants_to_continue = True
while(player_wants_to_continue):
print attack(attack_power, percent_to_hit, percent_to_critical)
answer = raw_input("Continue ([Y]/n)? ")
if answer == "n":
player_wants_to_continue = False
| apache-2.0 | 1,480,331,657,702,606,000 | 28.6875 | 72 | 0.666667 | false | 3.544776 | false | false | false |
philippgovernale/dijkstrapy | mathfuncs.py | 1 | 1868 | import math
from decimal import *
from fractions import Fraction
def cube_root(operand):
'''Return the cube root of x'''
power = 1 / 3.0
result = math.pow(operand, power)
return result
def sci_notation(operand1, operand2):
'''scientific notation. For nums x and y retuns x *10^y'''
multiplier = math.pow(10, operand2)
result = operand1 * multiplier
return result
def invert_sign(operand):
'''invert sign of operand'''
return -operand
def decimal_places(operand1, operand2):
'''changes the decimal precision of a floating point number'''
precision = int(operand2)
result = round(operand1, precision)
return result
def rnd(operand1, operand2):
'''rounds to given precision'''
num_before_dot = len(str(operand1).split('.')[0])
operand1 = float(operand1)
operand2 = int(operand2)
if num_before_dot +1 < operand2:
num_after_dot = operand2 - num_before_dot
round_str = '0.'
for dig in range(num_after_dot -1):
round_str += '0'
round_str += '1'
result= float(Decimal(operand1).quantize(Decimal(round_str), rounding=ROUND_HALF_UP))
elif num_before_dot == operand2:
result = int(Decimal(operand1).quantize(Decimal('1'),rounding=ROUND_HALF_UP))
else:
a = "%.*e" %(operand2-1, operand1)
if num_before_dot < operand2:
result = float(a)
else:
f = float(a)
result = int(f)
return result
def fract(operand):
'''finds a fractional approximation to a given floating point number'''
return Fraction(operand).limit_denominator()
def ncr(operand1, operand2):
'''n Choose r function'''
result = math.factorial(operand1) / math.factorial(operand2) / math.factorial(operand1-operand2)
return result
| gpl-3.0 | 8,808,939,666,276,561,000 | 30.77193 | 100 | 0.620985 | false | 3.751004 | false | false | false |
0x1mason/GribApi.XP | grib_api/examples/python/bufr_read_synop.py | 1 | 2952 | # Copyright 2005-2017 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
# Python implementation: bufr_read_synop
#
# Description: how to read data values from BUFR messages.
#
# Please note that SYNOP reports can be encoded in various ways in BUFR.
# Therefore the code below might not work directly for other types of SYNOP
# messages than the one used in the example. It is advised to use bufr_dump to
# understand the structure of the messages.
import traceback
import sys
from eccodes import *
INPUT = '../../data/bufr/syno_multi.bufr'
VERBOSE = 1 # verbose error reporting
def example():
# open bufr file
f = open(INPUT)
# define the keys to be printed
keys = [
'blockNumber',
'stationNumber',
'latitude',
'longitude',
'airTemperatureAt2M',
'dewpointTemperatureAt2M',
'windSpeedAt10M',
'windDirectionAt10M',
'#1#cloudAmount', # cloud amount (low and mid level)
'#1#heightOfBaseOfCloud',
'#1#cloudType', # cloud type (low clouds)
'#2#cloudType', # cloud type (middle clouds)
'#3#cloudType' # cloud type (highclouds)
]
# The cloud information is stored in several blocks in the
# SYNOP message and the same key means a different thing in different
# parts of the message. In this example we will read the first
# cloud block introduced by the key
# verticalSignificanceSurfaceObservations=1.
# We know that this is the first occurrence of the keys we want to
# read so in the list above we used the # (occurrence) operator
# accordingly.
cnt = 0
# loop for the messages in the file
while 1:
# get handle for message
bufr = codes_bufr_new_from_file(f)
if bufr is None:
break
print "message: %s" % cnt
# we need to instruct ecCodes to expand all the descriptors
# i.e. unpack the data values
codes_set(bufr, 'unpack', 1)
# print the values for the selected keys from the message
for key in keys:
try:
print ' %s: %s' % (key, codes_get(bufr, key))
except CodesInternalError as err:
print 'Error with key="%s" : %s' % (key, err.msg)
cnt += 1
# delete handle
codes_release(bufr)
# close the file
f.close()
def main():
try:
example()
except CodesInternalError as err:
if VERBOSE:
traceback.print_exc(file=sys.stderr)
else:
sys.stderr.write(err.msg + '\n')
return 1
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | -3,858,405,223,084,148,700 | 27.384615 | 78 | 0.631436 | false | 3.789474 | false | false | false |
YoeriDijkstra/iFlow | packages/semi_analytical2DV/hydro/HydroLead.py | 1 | 8896 | """
Date: 01-06-15
Authors: R.L. Brouwer
"""
import logging
import numpy as np
from nifty.functionTemplates.NumericalFunctionWrapper import NumericalFunctionWrapper
import nifty as ny
from zetaFunctionUncoupled import zetaFunctionUncoupled
from src.util.diagnostics import KnownError
class HydroLead:
# Variables
logger = logging.getLogger(__name__)
# Methods
def __init__(self, input):
self.input = input
return
def run(self):
"""Run function to initiate the calculation of the leading order water level and velocities
Returns:
Dictionary with results. At least contains the variables listed as output in the registry
"""
self.logger.info('Running module HydroLead')
# Initiate variables
self.OMEGA = self.input.v('OMEGA')
self.G = self.input.v('G')
self.L = self.input.v('L')
self.x = self.input.v('grid', 'axis', 'x') * self.input.v('L')
jmax = self.input.v('grid', 'maxIndex', 'x')
kmax = self.input.v('grid', 'maxIndex', 'z')
fmax = self.input.v('grid', 'maxIndex', 'f')
self.z = self.input.v('grid', 'axis', 'z', 0, range(0, kmax+1))
self.zarr = ny.dimensionalAxis(self.input.slice('grid'), 'z')[:, :, 0]-self.input.v('R', x=self.x/self.L).reshape((len(self.x), 1)) #YMD 22-8-17 includes reference level; note that we take a reference frame z=[-H-R, 0]
self.bca = ny.amp_phase_input(self.input.v('A0'), self.input.v('phase0'), (2,))[1]
# Prepare output
d = dict()
d['zeta0'] = {}
d['u0'] = {}
d['w0'] = {}
zeta = np.zeros((jmax+1, 1, fmax+1), dtype=complex)
zetax = np.zeros((jmax+1, 1, fmax+1), dtype=complex)
zetaxx = np.zeros((jmax+1, 1, fmax+1), dtype=complex)
# Run computations
zeta[:, 0, 1], zetax[:, 0, 1], zetaxx[:, 0, 1] = self.waterlevel()
u, w = self.velocity(zeta[:, 0, 1], zetax[:, 0, 1], zetaxx[:, 0, 1])
# Save water level results
nf = NumericalFunctionWrapper(zeta, self.input.slice('grid'))
nf.addDerivative(zetax, 'x')
nf.addDerivative(zetaxx, 'xx')
d['zeta0']['tide'] = nf.function
# Save velocity results
nfu = NumericalFunctionWrapper(u[0], self.input.slice('grid'))
nfu.addDerivative(u[1], 'x')
nfu.addDerivative(u[2], 'z')
nfu.addDerivative(u[3], 'zz')
nfu.addDerivative(u[4], 'zzx')
d['u0']['tide'] = nfu.function
nfw = NumericalFunctionWrapper(w[0], self.input.slice('grid'))
nfw.addDerivative(w[1], 'z')
d['w0']['tide'] = nfw.function
return d
def rf(self, x):
"""Calculate the root r = \sqrt(i\sigma / Av) of the characteristic equation and its derivatives wrt x.
Parameters:
x - x-coordinate
Returns:
r - root of the characteristic equation of the leading order horizontal velocity
"""
Av = self.input.v('Av', x=x/self.L, z=0, f=0)
Avx = self.input.d('Av', x=x/self.L, z=0, f=0, dim='x')
Avxx = self.input.d('Av', x=x/self.L, z=0, f=0, dim='xx')
r = np.sqrt(1j * self.OMEGA / Av)
rx = -np.sqrt(1j * self.OMEGA) * Avx / (2. * Av**(3./2.))
rxx = np.sqrt(1j * self.OMEGA) * (3. * Avx**2 - 2. * Av * Avxx) / (4. * Av**(5./2.))
return r, rx, rxx
def af(self, x, r, rx, rxx):
"""Calculate the coefficient alpha that appears in the solution for the leading order horizontal velocity.
Parameters:
x - x-coordinatemm
Returns:
a - coefficient alpha
"""
H = self.input.v('H', x=x/self.L) + self.input.v('R', x=x/self.L)
Hx = self.input.d('H', x=x/self.L, dim='x') + self.input.d('R', x=x/self.L, dim='x'),
Hxx = self.input.d('H', x=x/self.L, dim='xx') + self.input.d('R', x=x/self.L, dim='xx') # YMD 15-08-17 Reference level
Av = self.input.v('Av', x=x/self.L, z=0, f=0)
Avx = self.input.d('Av', x=x/self.L, z=0, f=0, dim='x')
Avxx = self.input.d('Av', x=x/self.L, z=0, f=0, dim='xx')
sf = self.input.v('Roughness', x=x/self.L, z=0, f=0)
sfx = self.input.d('Roughness', x=x/self.L, z=0, f=0, dim='x')
sfxx = self.input.d('Roughness', x=x/self.L, z=0, f=0, dim='xx')
# sf = sf[:, 0] # BUG (?) 23-02-2018
# Define trigonometric values for ease of reference
sinhrh = np.sinh(r * H)
coshrh = np.cosh(r * H)
cothrh = coshrh / sinhrh
cschrh = 1 / sinhrh
# Define parameters and their (second) derivative wrt x
E = rx * H + r * Hx
Ex = rxx * H + 2. * rx * Hx + r * Hxx
F = rx + r * E * cothrh
Fx = rxx + r * Ex * cothrh + E * (rx * cothrh - r * E**2 * cschrh**2)
K = r * Avx + Av * F + sfx * cothrh + sf * E
Kx = (r * Avxx + rx * Avx + Avx * F + Av * Fx + sfxx * cothrh -
sfx * E * cschrh**2 + sfx * E + sf * Ex)
G = r * Av * sinhrh + sf * coshrh
Gx = sinhrh * K
Gxx = E * K * coshrh + Kx * sinhrh
# Calculate coefficient alpha
a = sf / G # a
ax = sfx / G - sf * Gx / G**2 # a_x
axx = (sfxx-(2.*sfx*Gx + sf*Gxx)/G + 2.*sf*Gx**2/G**2) / G # YMD bug corrected 27-2-2018
return a, ax, axx
def waterlevel(self):
"""Solves the boundary value problem for the water level
Returns:
zeta - water level and its first and second derivative w.r.t. x
"""
jmax = self.input.v('grid', 'maxIndex', 'x')
fmax = self.input.v('grid', 'maxIndex', 'f')
r, rx, rxx = self.rf(self.x)
a, ax, axx = self.af(self.x, r, rx, rxx)
H = self.input.v('H', x=self.x / self.L) + self.input.v('R', x=self.x / self.L)
M = ((a * np.sinh(r * H) / r) - H) * self.input.v('B', x=self.x / self.L) * (self.G / (1j * self.OMEGA))
F = np.zeros((jmax+1, 1), dtype=complex) # Forcing term shape (x, number of right-hand sides)
Fopen = np.zeros((1, 1), dtype=complex) # Forcing term shape (1, number of right-hand sides)
Fclosed = np.zeros((1, 1), dtype=complex) # Forcing term shape (1, number of right-hand sides)
Fopen[0,0] = self.bca
Z, Zx, _ = zetaFunctionUncoupled(1, M, F, Fopen, Fclosed, self.input, hasMatrix = False)
zeta = Z[:, 0]
zeta_x = Zx[:, 0]
zeta_xx = np.gradient(Zx[:, 0], self.x[1], edge_order=2)
return zeta, zeta_x, zeta_xx
def velocity(self, zeta0, zetax, zetaxx):
"""Calculates the horizontal and vertical flow velocities based on the water level zeta
Parameters:
zeta - water level and its first and second derivative w.r.t. x
Returns:
u - horizontal flow velocity and several derivatives w.r.t. x and z
w - vertical flow velocity and its derivative w.r.t. z
"""
# Initiate variables
u = np.zeros((5, len(self.x), len(self.z), 3), dtype=complex)
w = np.zeros((2, len(self.x), len(self.z), 3), dtype=complex)
# Extract parameters alpha and r and B
r, rx, rxx = self.rf(self.x)
a, ax, axx = self.af(self.x, r, rx, rxx)
r = r.reshape(len(self.x), 1)
rx = rx.reshape(len(self.x), 1)
a = a.reshape(len(self.x), 1)
ax = ax.reshape(len(self.x), 1)
B = self.input.v('B', x=self.x/self.L).reshape(len(self.x), 1)
Bx = self.input.d('B', x=self.x/self.L, dim='x').reshape(len(self.x), 1)
# reshape (derivatives of) zeta
zeta0 = zeta0.reshape(len(self.x), 1)
zetax = zetax.reshape(len(self.x), 1)
zetaxx = zetaxx.reshape(len(self.x), 1)
# Calculate velocities and derivatives
c = self.G / (1j * self.OMEGA)
sinhrz = np.sinh(r * self.zarr)
coshrz = np.cosh(r * self.zarr)
var1 = c * zetax
var2 = (a * coshrz - 1.)
var3 = a * rx * self.zarr * sinhrz
# u
u[0, :, :, 1] = var1 * var2
# u_x
u[1, :, :, 1] = c * zetaxx * var2 + var1 * (ax * coshrz + var3)
# u_z
u[2, :, :, 1] = var1 * (a * r * sinhrz)
# u_zz
u[3, :, :, 1] = var1 * (a * r**2 * coshrz)
# u_zz_x
u[4, :, :, 1] = c * (zetaxx * a * r**2 * coshrz + zetax * (ax * r**2 * coshrz + 2. * a * r * rx * coshrz +
r**2 * var3))
# w
w[0, :, :, 1] = c * ((zetaxx + (Bx / B) * zetax) * (self.zarr - (a / r) * sinhrz) - (1 / r) * zetax *
(sinhrz * ax + a * rx * (self.zarr * coshrz - (sinhrz / r))) - self.OMEGA**2 * zeta0 / self.G)
# w_z
w[1, :, :, 1] = -c * (var2 * (zetaxx + (Bx / B) * zetax) + zetax * (ax * coshrz + var3))
return u, w
| lgpl-3.0 | 5,931,138,916,524,291,000 | 39.072072 | 231 | 0.520459 | false | 2.822335 | false | false | false |
stggh/PyAbel | abel/direct.py | 2 | 8751 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from .tools.math import gradient
try:
from .lib.direct import _cabel_direct_integral
cython_ext = True
except (ImportError, UnicodeDecodeError):
cython_ext = False
###########################################################################
# direct - calculation of forward and inverse Abel transforms by direct
# numerical integration
#
# Roman Yurchak - Laboratoire LULI, Ecole Polytechnique/CNRS/CEA, France
# 07.2018: DH fixed the correction for the case where r[0] = 0
# 03.2018: DH changed the default grid from 0.5, 1.5 ... to 0, 1, 2.
# 01.2018: DH dhanged the integration method to trapz
# 12.2015: RY Added a pure python implementation
# 11.2015: RY moved to PyAbel, added more unit tests, reorganized code base
# 2012: RY first implementation in hedp.math.abel
###########################################################################
def _construct_r_grid(n, dr=None, r=None):
""" Internal function to construct a 1D spatial grid """
if dr is None and r is None:
# default value, we don't care about the scaling
# since the mesh size was not provided
dr = 1.0
if dr is not None and r is not None:
raise ValueError('Both r and dr input parameters cannot be specified \
at the same time')
elif dr is None and r is not None:
if r.ndim != 1 or r.shape[0] != n:
raise ValueError('The input parameter r should be a 1D array'
'of shape = ({},), got shape = {}'.format(
n, r.shape))
# not so sure about this, needs verification -RY
dr = np.gradient(r)
else:
if isinstance(dr, np.ndarray):
raise NotImplementedError
r = (np.arange(n))*dr
return r, dr
def direct_transform(fr, dr=None, r=None, direction='inverse',
derivative=gradient, int_func=np.trapz,
correction=True, backend='C', **kwargs):
"""
This algorithm performs a direct computation of the Abel transform
integrals. When correction=False, the pixel at the lower bound of the
integral (where y=r) is skipped, which causes a systematic error in the
Abel transform. However, if correction=True is used, then an analytical
transform transform is applied to this pixel, which makes the approximation
that the function is linear across this pixel. With correction=True, the
Direct method produces reasonable results.
The Direct method is implemented in both Python and, if Cython is available
during PyAbel's installation, a compiled C version, which is much faster.
The implementation can be selected using the backend argument.
By default, integration at all other pixels is performed using the
Trapezoidal rule.
Parameters
----------
fr : 1d or 2d numpy array
input array to which direct/inverse Abel transform will be applied.
For a 2d array, the first dimension is assumed to be the z axis and
the second the r axis.
dr : float
spatial mesh resolution (optional, default to 1.0)
r : 1D ndarray
the spatial mesh (optional). Unusually, direct_transform should, in
principle, be able to handle non-uniform data. However, this has not
been regorously tested.
direction : string
Determines if a forward or inverse Abel transform will be applied.
can be 'forward' or 'inverse'.
derivative : callable
a function that can return the derivative of the fr array
with respect to r. (only used in the inverse Abel transform).
int_func : function
This function is used to complete the integration. It should resemble
np.trapz, in that it must be callable using axis=, x=, and dx=
keyword arguments.
correction : boolean
If False the pixel where the weighting function has a singular value
(where r==y) is simply skipped, causing a systematic under-estimation
of the Abel transform.
If True, integration near the singular value is performed analytically,
by assuming that the data is linear across that pixel. The accuracy
of this approximation will depend on how the data is sampled.
backend : string
There are currently two implementations of the Direct transform,
one in pure Python and one in Cython. The backend paremeter selects
which method is used. The Cython code is converted to C and compiled,
so this is faster.
Can be 'C' or 'python' (case insensitive).
'C' is the default, but 'python' will be used
if the C-library is not available.
Returns
-------
out : 1d or 2d numpy array of the same shape as fr
with either the direct or the inverse abel transform.
"""
backend = backend.lower()
if backend not in ['c', 'python']:
raise ValueError
f = np.atleast_2d(fr.copy())
r, dr = _construct_r_grid(f.shape[1], dr=dr, r=r)
if direction == "inverse":
f = derivative(f)/dr
f *= - 1./np.pi
else:
f *= 2*r[None, :]
if backend == 'c':
if not cython_ext:
print('Warning: Cython extensions were not built, \
the C backend is not available!')
print('Falling back to a pure Python backend...')
backend = 'python'
elif not is_uniform_sampling(r):
print('Warning: non uniform sampling is currently not \
supported by the C backend!')
print('Falling back to a pure Python backend...')
backend = 'python'
f = np.asarray(f, order='C', dtype='float64')
if backend == 'c':
out = _cabel_direct_integral(f, r, int(correction))
else:
out = _pyabel_direct_integral(f, r, int(correction), int_func)
if f.shape[0] == 1:
return out[0]
else:
return out
def _pyabel_direct_integral(f, r, correction, int_func=np.trapz):
"""
Calculation of the integral used in Abel transform
(both direct and inverse).
∞
⌠
⎮ f(r)
⎮ ────────────── dr
⎮ ___________
⎮ ╱ 2 2
⎮ ╲╱ y - r
⌡
y
Returns:
--------
np.array: of the same shape as f with the integral evaluated at r
"""
if correction not in [0, 1]:
raise ValueError
if is_uniform_sampling(r):
int_opts = {'dx': abs(r[1] - r[0])}
else:
int_opts = {'x': r}
out = np.zeros(f.shape)
R, Y = np.meshgrid(r, r, indexing='ij')
i_vect = np.arange(len(r), dtype=int)
II, JJ = np.meshgrid(i_vect, i_vect, indexing='ij')
mask = (II < JJ)
I_sqrt = np.zeros(R.shape)
I_sqrt[mask] = np.sqrt((Y**2 - R**2)[mask])
I_isqrt = np.zeros(R.shape)
I_isqrt[mask] = 1./I_sqrt[mask]
# create a mask that just shows the first two points of the integral
mask2 = ((II > JJ-2) & (II < JJ+1))
for i, row in enumerate(f): # loop over rows (z)
P = row[None, :] * I_isqrt # set up the integral
out[i, :] = int_func(P, axis=1, **int_opts) # take the integral
# correct for the extra triangle at the start of the integral
out[i, :] = out[i, :] - 0.5*int_func(P*mask2, axis=1, **int_opts)
"""
Compute the correction. Here we apply an
analytical integration of the cell with the singular value,
assuming a piecewise linear behaviour of the data.
The analytical abel transform for this trapezoid is:
c0*acosh(r1/y) - c_r*y*acosh(r1/y) + c_r*sqrt(r1**2 - y**2)
see: https://github.com/luli/hedp/blob/master/hedp/math/abel.py#L87-L104
"""
if correction == 1:
# precompute a few variables outside the loop:
f_r = (f[:, 1:] - f[:, :-1])/np.diff(r)[None, :]
isqrt = I_sqrt[II+1 == JJ]
if r[0] < r[1]*1e-8: # special case for r[0] = 0
ratio = np.append(np.cosh(1), r[2:]/r[1:-1])
else:
ratio = r[1:]/r[:-1]
acr = np.arccosh(ratio)
for i, row in enumerate(f): # loop over rows (z)
out[i, :-1] += isqrt*f_r[i] + acr*(row[:-1] - f_r[i]*r[:-1])
return out
def is_uniform_sampling(r):
"""
Returns True if the array is uniformly spaced to within 1e-13.
Otherwise False.
"""
dr = np.diff(r)
ddr = np.diff(dr)
return np.allclose(ddr, 0, atol=1e-13)
| mit | 7,123,239,178,088,770,000 | 35.405858 | 79 | 0.59407 | false | 3.747201 | false | false | false |
shaoziyang/SensorTilePocketWatch | src/main.py | 1 | 4685 | # SensorTile Poket watch
# by shaoziyang 2017
# http://www.micropython.org.cn
# https://github.com/shaoziyang/SensorTilePocketWatch
import pyb
from st import SensorTile
from pyb import Timer, Pin, ExtInt, RTC
from micropython import const
import baticon
SLEEPCNT = const(18)
SW_PIN = 'PG11'
VUSB_PIN = 'PG10'
st = SensorTile()
from machine import I2C
i2c=machine.I2C(-1, sda=machine.Pin("C1"), scl=machine.Pin("C0"), freq=400000)
from ssd1306 import SSD1306_I2C
oled = SSD1306_I2C(128, 64, i2c)
oled.framebuf.rect(0,0,127,63,1)
oled.msg('Pocket',40,8)
oled.msg('Watch',44,28)
oled.text('MPY SensorTile', 8, 48)
oled.show()
pyb.delay(1000)
oled.fill(0)
oled.show()
flag = 1
sleepcnt = SLEEPCNT
keypressed = 0
keycnt = 0
page = 0
def rtcisr(t):
pyb.LED(1).toggle()
return
rtc=RTC()
#rtc.init()
rtc.wakeup(1000, rtcisr)
def tmisr(t):
global flag
flag = 1
tm = Timer(1, freq=1, callback=tmisr)
def show_bat():
oled.puts('%4.2fV'%st.BatVolt(), 16, 56)
oled.puts('%2d'%sleepcnt, 112, 56)
oled.show()
def show_press(page):
if(page==1):
oled.puts('%8.3f'%st.P(), 64, 0)
elif(page==2):
oled.msg('%8.3f'%st.P(), 48, 20)
oled.msg("%5.1fC"%st.T(), 72, 36)
def show_temp():
oled.puts("%5.1fC"%st.T(), 64, 56)
def show_accel(page):
if(page==1):
oled.puts("%7.2f"%st.AX(), 64, 8)
oled.puts("%7.2f"%st.AY(), 64, 16)
oled.puts("%7.2f"%st.AZ(), 64, 24)
elif(page==3):
oled.msg("%7.2f"%st.AX(), 56, 0)
oled.msg("%7.2f"%st.AY(), 56, 16)
oled.msg("%7.2f"%st.AZ(), 56, 32)
def show_gyro(page):
if(page==1):
oled.puts("%7.2f"%st.GX(), 64, 32)
oled.puts("%7.2f"%st.GY(), 64, 40)
oled.puts("%7.2f"%st.GZ(), 64, 48)
elif(page==4):
oled.msg("%7.2f"%st.GX(), 56, 0)
oled.msg("%7.2f"%st.GY(), 56, 16)
oled.msg("%7.2f"%st.GZ(), 56, 32)
def show_title(page):
oled.fill(0) # clear screen
if(page==1):
oled.puts("Press:", 0, 0)
oled.puts("Accel:", 0, 8)
oled.puts("Gyro:", 0, 32)
elif(page==2):
oled.msg("Press", 0, 0)
elif(page==3):
oled.msg("Accel", 0, 0)
elif(page==4):
oled.msg("Gyro", 0, 0)
def show_time():
d = rtc.datetime()
if(page==0):
s = "%04d"%d[0]+"-"+"%02d"%d[1]+"-"+"%02d"%d[2]
oled.msg(s, 16, 4)
s = "%02d"%d[4]+":"+"%02d"%d[5]+":"+"%02d"%d[6]
oled.msg(s, 16, 28)
oled.puts("%8.1fC"%st.T(), 64, 56)
else:
s = "%02d"%d[4]+":"+"%02d"%d[5]+":"+"%02d"%d[6]
oled.puts(s, 64, 56)
def swisr(t):
global keypressed
keypressed = 1
#print('.')
def showbaticon(n, x, y):
if(n > 10):
n = 10
if(n < 0):
n = 0
for i in range(16):
d = baticon.font[n*16+i]
for j in range(8):
oled.pixel(x+i, y+7-j, d&(1<<j))
sw = pyb.ExtInt(SW_PIN, pyb.ExtInt.IRQ_FALLING, pyb.Pin.PULL_UP, callback=swisr)
btn = pyb.Pin(SW_PIN, pyb.Pin.IN, pull=pyb.Pin.PULL_UP)
vusb = pyb.Pin(VUSB_PIN, pyb.Pin.IN, pull=pyb.Pin.PULL_NONE)
batc = st.Bat()
def showbat():
global batc
if(vusb()):
batc = batc + 1
if(batc > 10):
batc = st.Bat()
else:
batc = st.Bat()
showbaticon(batc, 0, 56)
oled.puts('%4.2fV'%st.BatVolt(), 16, 56)
show_title(page)
while True:
if(flag):
flag = 0
# keypressed
if(keypressed):
keypressed = 0
sleepcnt = SLEEPCNT
page = (page + 1)%5
show_title(page)
# key long pressed
if(btn()==0):
keycnt = keycnt + 1
if(keycnt > 3):
machine.soft_reset()
else:
keycnt = 0
#show sensor
show_press(page)
show_accel(page)
show_gyro(page)
#show battery
showbat()
show_time()
#power save
if(vusb()==0):
if(sleepcnt>0):
sleepcnt = sleepcnt - 1
else:
oled.poweroff()
while True:
machine.idle()
#machine.sleep()
if(btn()==0):
break;
keypressed = 0
oled.poweron()
sleepcnt = SLEEPCNT
oled.puts('%d'%sleepcnt, 120, 48)
else:
oled.puts(' ', 120, 48)
oled.show()
| mit | -1,308,226,705,722,466,000 | 22.149485 | 80 | 0.47492 | false | 2.645398 | false | false | false |
zzz0072/Python_Exercises | 07_RSI/ch01/logic_gate.py | 1 | 4604 | #!/usr/bin/env python3
class LogicGate:
def __init__(self, label):
self.name = label
self.output = None
def getName(self):
return self.name
def getOutput(self):
self.output = self.calcLogicOutput()
return self.output
class BinaryGate(LogicGate):
def __init__(self, label):
LogicGate.__init__(self, label)
self.pinA = None
self.pinB = None
def getPinA(self):
if self.pinA == None:
UsrPinA = input("Enter Pin A value for " + self.name + ": ")
return int(UsrPinA) >= 1
else:
return self.pinA.getFrom().getOutput()
def getPinB(self):
if self.pinB == None:
UsrPinB = input("Enter Pin B value for " + self.name + ": ")
return int(UsrPinB) >= 1
else:
return self.pinB.getFrom().getOutput()
def SetSrcPin(self, source):
if self.pinA == None:
self.pinA = source
else:
if self.pinB == None:
self.pinB = source
else:
print("Source pins are already occupied")
class AndGate(BinaryGate):
def __init__(self, label):
BinaryGate.__init__(self, label)
def calcLogicOutput(self):
pinA = self.getPinA()
pinB = self.getPinB()
return int(pinA == 1 and pinB == 1)
class OrGate(BinaryGate):
def __init__(self, label):
BinaryGate.__init__(self, label)
def calcLogicOutput(self):
pinA = self.getPinA()
pinB = self.getPinB()
return int(pinA == 1 or pinB == 1)
class NorGate(BinaryGate):
def __init__(self, label):
BinaryGate.__init__(self, label)
def calcLogicOutput(self):
pinA = self.getPinA()
pinB = self.getPinB()
return int(not(pinA == 1 or pinB == 1))
class NandGate(BinaryGate):
def __init__(self, label):
BinaryGate.__init__(self, label)
def calcLogicOutput(self):
pinA = self.getPinA()
pinB = self.getPinB()
return int(not(pinA == 1 and pinB == 1))
class XorGate(BinaryGate):
def __init__(self, label):
BinaryGate.__init__(self, label)
def calcLogicOutput(self):
pinA = self.getPinA()
pinB = self.getPinB()
return int(pinA != pinB)
class UnaryGate(LogicGate):
def __init__(self, label):
LogicGate.__init__(self, label)
self.pin = None
def getPin(self):
if self.pin == None:
UsrPin = input("Enter Pin value for " + self.name + ": ")
return int(UsrPin) >= 1
else:
return self.pin.getFrom().getOutput()
def SetSrcPin(self, source):
if self.pin == None:
self.pin = source
else:
print("Source pins are already occupied")
class NotGate(UnaryGate):
def __init__(self, label):
UnaryGate.__init__(self, label)
def calcLogicOutput(self):
return int(not self.getPin())
class CommonInput(LogicGate):
def __init__(self, label):
LogicGate.__init__(self, label)
self.pin = None
def calcLogicOutput(self):
if self.pin == None:
self.pin = input("Enter Pin value for " + self.name + ": ")
self.pin = int(self.pin) >= 1
return self.pin
else:
return self.pin
class Connector:
def __init__(self, fromGate, toGate):
self.fromGate = fromGate
self.toGate = toGate
toGate.SetSrcPin(self)
def getFrom(self):
return self.fromGate
def getTo(self):
return self.toGate
def HalfAdder():
g1 = CommonInput("A")
g2 = CommonInput("B")
g3 = XorGate("Sum")
g4 = AndGate("Carrier")
c1 = Connector(g1, g3)
c2 = Connector(g2, g3)
c3 = Connector(g1, g4)
c4 = Connector(g2, g4)
print(g3.getOutput())
print(g4.getOutput())
def Test1():
g1 = AndGate("G1")
g2 = AndGate("G2")
g3 = OrGate("G3")
g4 = NotGate("G4")
c1 = Connector(g1, g3)
c2 = Connector(g2, g3)
c3 = Connector(g3, g4)
print(g4.getOutput())
def Test2():
g1 = AndGate("G1")
g2 = AndGate("G2")
g3 = NotGate("G3")
g4 = NotGate("G4")
g5 = AndGate("G5")
c1 = Connector(g1, g3)
c2 = Connector(g2, g4)
c3 = Connector(g3, g5)
c4 = Connector(g4, g5)
print(g5.getOutput())
g1 = XorGate("xor")
print(g1.getOutput())
if __name__ == "__main__":
#g1 = NandGate("l1")
#print(g1.calcLogicOutput())
#g2 = NorGate("12")
#print(g2.calcLogicOutput())
#Test1()
HalfAdder()
| bsd-2-clause | 6,697,281,215,483,313,000 | 23.104712 | 72 | 0.54735 | false | 3.256011 | false | false | false |
itucsdb1616/itucsdb1616 | messageList.py | 1 | 2294 | import psycopg2 as dbapi2
from flask import current_app
from message import Message
from flask_login import current_user
class MessageList:
def __init__(self):
self.messages = {}
self.last_key = 0
def add_message(self, message):
connection = dbapi2.connect(current_app.config['dsn'])
cursor = connection.cursor()
cursor.execute("""SELECT ID FROM USERS WHERE USERNAME=%s""", (message.reciever,))
recieverid = cursor.fetchone()
cursor.execute("""SELECT ID FROM USERS WHERE USERNAME=%s""", (current_user.username,))
senderid = cursor.fetchone()
cursor.execute("""INSERT INTO MESSAGES (SENDERID, RECIEVERID, CONTENT, SENT) VALUES (%s, %s, %s, %s)""", (senderid, recieverid, message.content, message.sent))
connection.commit()
def delete_message(self, messageid):
connection = dbapi2.connect(current_app.config['dsn'])
cursor = connection.cursor()
cursor.execute("DELETE FROM MESSAGES WHERE MESSAGEID = %s""", (messageid,))
connection.commit()
def get_message(self):
connection = dbapi2.connect(current_app.config['dsn'])
cursor = connection.cursor()
query = "SELECT MESSAGES.SENDERID, MESSAGES.RECIEVERID, MESSAGES.CONTENT, USERPROFILE.NICKNAME FROM MESSAGES INNER JOIN USERPROFILE ON MESSAGES.SENDERID = USERPROFILE.ID"
cursor.execute(query)
senderid, recieverid, content, nickname = cursor.fetchone()
return Message(nickname, recieverid, content)
def get_messages(self):
connection = dbapi2.connect(current_app.config['dsn'])
cursor = connection.cursor()
cursor.execute("""SELECT ID FROM USERS WHERE USERNAME=%s""", (current_user.username,))
userid = cursor.fetchone()
cursor.execute("SELECT T1.MESSAGEID, T1.SENDERID, T1.RECIEVERID, T1.CONTENT, T2.NICKNAME AS SENDERNICK, T3.NICKNAME AS RECIEVERNICK FROM MESSAGES AS T1 INNER JOIN USERPROFILE AS T2 ON T1.SENDERID = T2.ID INNER JOIN USERPROFILE AS T3 ON T1.RECIEVERID = T3.ID WHERE SENDERID = %s OR RECIEVERID = %s""",(userid,userid))
messages = [(key, Message(sendernick, recievernick, content))
for key, sender, reciever, content, sendernick, recievernick in cursor]
return messages | gpl-3.0 | 7,035,937,284,581,702,000 | 52.372093 | 324 | 0.677855 | false | 3.652866 | false | false | false |
imsut/commons | src/python/twitter/pants/commands/filemap.py | 2 | 1912 | # ==================================================================================================
# Copyright 2011 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
from __future__ import print_function
__author__ = 'John Sirois'
from . import Command
from twitter.pants.base import BuildFile, Target
class Filemap(Command):
"""Outputs a mapping from source file to the target that owns the source file."""
__command__ = 'filemap'
def setup_parser(self, parser, args):
parser.set_usage("%prog filemap")
parser.epilog = """Outputs a mapping from source file to the target that owns the source file.
The mapping is output in 2 columns."""
def __init__(self, root_dir, parser, argv):
Command.__init__(self, root_dir, parser, argv)
if self.args:
self.error("The filemap subcommand accepts no arguments.")
def execute(self):
for buildfile in BuildFile.scan_buildfiles(self.root_dir):
for address in Target.get_all_addresses(buildfile):
target = Target.get(address)
if hasattr(target, 'sources') and target.sources is not None:
for sourcefile in target.sources:
print(sourcefile, address)
| apache-2.0 | -1,253,404,614,502,270,200 | 39.680851 | 100 | 0.600418 | false | 4.552381 | false | false | false |
simvisage/aramis_cdt | aramis_cdt/npy_gen/aramis_npy_gen.py | 1 | 8720 | #-------------------------------------------------------------------------------
#
# Copyright (c) 2013
# IMB, RWTH Aachen University,
# ISM, Brno University of Technology
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in the AramisCDT top directory "license.txt" and may be
# redistributed only under the conditions described in the aforementioned
# license.
#
# Thanks for using Simvisage open source!
#
#-------------------------------------------------------------------------------
from traits.api import \
HasTraits, Property, cached_property, Int, Array, Instance, Tuple, Button, List, Float
from traitsui.api import View, UItem
import numpy as np
import os
import re
import platform
import time
if platform.system() == 'Linux':
sysclock = time.time
elif platform.system() == 'Windows':
sysclock = time.clock
from aramis_cdt.aramis_info import AramisInfo
class AramisNPyGen(HasTraits):
'''Class providing tools for preparation (generation) *.npy data files from
Aramis *.txt files for Aramis Crack Detection Tool (AramisCDT). *.npy files
enable faster loading from disk.
'''
aramis_info = Instance(AramisInfo)
# TODO: add to UI and multiply force
force_t_mult_coef = Float(100.0)
'''Multiplication coefficient to obtain force_t from AD channel value
'''
#===========================================================================
# Undeformed state data
#===========================================================================
X = Property(Array, depends_on='aramis_info.data_dir')
'''Array of values for undeformed state in the first step.
'''
@cached_property
def _get_X(self):
'''Load data (undeformed coordinates) for the first step from *.txt and
save as *.npy.
'''
fname = self.aramis_info.undeformed_coords_filename
print 'loading', fname, '...'
start_t = sysclock()
dir_npy = self.aramis_info.npy_dir
if os.path.exists(dir_npy) == False:
os.mkdir(dir_npy)
fname_npy = os.path.join(dir_npy, fname + '.npy')
fname_txt = os.path.join(self.aramis_info.data_dir, fname + '.txt')
data_arr = np.loadtxt(fname_txt,
# skiprows=14, # not necessary
usecols=[0, 1, 2, 3, 4])
self.x_idx_min_0 = int(np.min(data_arr[:, 0]))
self.y_idx_min_0 = int(np.min(data_arr[:, 1]))
self.x_idx_max_0 = int(np.max(data_arr[:, 0]))
self.y_idx_max_0 = int(np.max(data_arr[:, 1]))
self.ni = int(self.x_idx_max_0 - self.x_idx_min_0 + 1)
self.nj = int(self.y_idx_max_0 - self.y_idx_min_0 + 1)
data_arr = self._prepare_data_structure(data_arr)
np.save(fname_npy, data_arr)
print 'loading time =', sysclock() - start_t
print 'number of missing facets is', np.sum(np.isnan(data_arr).astype(int))
return data_arr
x_idx_min_0 = Int
'''Minimum value of the indices in the first column of the undeformed state.
'''
y_idx_min_0 = Int
'''Minimum value of the indices in the second column of the undeformed state.
'''
x_idx_max_0 = Int
'''Maximum value of the indices in the first column of the undeformed state.
'''
y_idx_max_0 = Int
'''Maximum value of the indices in the second column of the undeformed state.
'''
ni = Int
'''Number of facets in x-direction
'''
nj = Int
'''Number of facets in y-direction
'''
ad_channels_lst = List
'''List of tuples (undeformed, deformed) obtained from AD channels in
aramis file header
'''
x_0_shape = Property(Tuple, depends_on='X')
'''Shape of undeformed data array.
'''
@cached_property
def _get_x_0_shape(self):
return (3, self.nj, self.ni)
i = Property(Int, depends_on='X')
'''Indices in the first column of the undeformed state starting with zero.
'''
@cached_property
def _get_i(self):
return (np.arange(self.ni)[np.newaxis, :] *
np.ones(self.nj)[:, np.newaxis]).astype(int)
j = Property(Int, depends_on='X')
'''Indices in the first column of the undeformed state starting with zero.
'''
@cached_property
def _get_j(self):
return (np.arange(self.nj)[np.newaxis, :] *
np.ones(self.ni)[:, np.newaxis]).T
ad_channels_arr = Property(Array)
def _get_ad_channels_arr(self):
data= np.array(self.ad_channels_lst, dtype=float)
return data
generate_npy = Button
'''Generate npy files from Aramis *.txt data
'''
def _generate_npy_fired(self):
self.ad_channels_lst = []
for step_idx in self.aramis_info.step_list:
self._load_step_data(step_idx)
self.__decompile_ad_channels(step_idx)
np.save(os.path.join(self.aramis_info.npy_dir, 'ad_channels.npy'),
self.ad_channels_arr)
#===========================================================================
# Data preparation methods
#===========================================================================
def _load_step_data(self, step_idx):
'''Load data for the specified step from *.npy file. If file *.npy does
not exist the data is load from *.txt and saved as *.npy.
(improve speed of loading)
'''
fname = '%s%d' % (self.aramis_info.displacements_basename,
self.aramis_info.aramis_stage_list[step_idx])
print 'loading', fname, '...'
start_t = sysclock()
dir_npy = self.aramis_info.npy_dir
if os.path.exists(dir_npy) == False:
os.mkdir(dir_npy)
fname_npy = os.path.join(dir_npy, fname + '.npy')
fname_txt = os.path.join(self.aramis_info.data_dir, fname + '.txt')
# if os.path.exists(fname_npy):
# data_arr = np.load(fname_npy)
# else:
data_arr = np.loadtxt(fname_txt,
# skiprows=14, # not necessary
usecols=[0, 1, 2, 3, 4])
data_arr = self._prepare_data_structure(data_arr)
np.save(fname_npy, data_arr)
print 'loading time =', sysclock() - start_t
print 'number of missing facets is', np.sum(np.isnan(data_arr).astype(int))
return data_arr
def _prepare_data_structure(self, input_arr):
if self.ni == 0:
self.X
data_arr = np.empty((self.ni * self.nj,
input_arr.shape[1] - 2), dtype=float)
data_arr.fill(np.nan)
# input indices (columns 1 and 2)
in_indices = input_arr[:, :2].astype(int)
in_indices[:, 0] -= self.x_idx_min_0
in_indices[:, 1] -= self.y_idx_min_0
in_indices = in_indices.view([('', in_indices.dtype)] * in_indices.shape[1])
# undeformed state indices
un_indices = np.hstack((self.i.ravel()[:, np.newaxis],
self.j.ravel()[:, np.newaxis])).astype(int)
un_indices = un_indices.view([('', un_indices.dtype)] * un_indices.shape[1])
# data for higher steps have the same order of rows as
# undeformed one but missing values
mask = np.in1d(un_indices, in_indices, assume_unique=True)
data_arr[mask] = input_arr[:, 2:]
print data_arr.shape, self.x_0_shape
data_arr = data_arr.T.reshape(self.x_0_shape)
return data_arr
def __decompile_ad_channels(self, step_idx):
fname = '%s%d' % (self.aramis_info.displacements_basename,
self.aramis_info.aramis_stage_list[step_idx])
with open(os.path.join(self.aramis_info.data_dir, fname + '.txt')) as infile:
for i in range(20):
line = infile.readline()
m = re.match(r'#\s+AD-0:\s+[-+]?\d+\.\d+\s+(?P<force>[-+]?\d+\.\d+)', line)
if m:
force = float(m.groups('force')[0])
else:
force = 0
m = re.match(r'#\s+deformt:\s+(?P<time>[-+]?\d+\.\d+)', line)
if m:
time = float(m.groups('time')[0])
self.ad_channels_lst.append([time, force])
view = View(
UItem('generate_npy'),
)
if __name__ == '__main__':
ns=['TTb-1C-3cm-0-3300EP-V3_B1-Aramis2d-sideview-Xf15s13-Yf15s13']
data_dir = '/media/raid/Aachen/simdb_large_txt/'
for n in ns:
AI = AramisInfo(data_dir=data_dir+n)
print AI.step_list
AG = AramisNPyGen(aramis_info=AI)
#AG.configure_traits()
AG.generate_npy = True
| bsd-3-clause | 3,170,812,733,524,312,600 | 35.033058 | 91 | 0.54656 | false | 3.489396 | false | false | false |
sserrot/champion_relationships | venv/Lib/site-packages/networkx/algorithms/bipartite/basic.py | 1 | 8118 | # -*- coding: utf-8 -*-
"""
==========================
Bipartite Graph Algorithms
==========================
"""
# Copyright (C) 2013-2019 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
from networkx.algorithms.components import connected_components
__author__ = """\n""".join(['Jordi Torrents <[email protected]>',
'Aric Hagberg <[email protected]>'])
__all__ = ['is_bipartite',
'is_bipartite_node_set',
'color',
'sets',
'density',
'degrees']
def color(G):
"""Returns a two-coloring of the graph.
Raises an exception if the graph is not bipartite.
Parameters
----------
G : NetworkX graph
Returns
-------
color : dictionary
A dictionary keyed by node with a 1 or 0 as data for each node color.
Raises
------
exc:`NetworkXError` if the graph is not two-colorable.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.path_graph(4)
>>> c = bipartite.color(G)
>>> print(c)
{0: 1, 1: 0, 2: 1, 3: 0}
You can use this to set a node attribute indicating the biparite set:
>>> nx.set_node_attributes(G, c, 'bipartite')
>>> print(G.nodes[0]['bipartite'])
1
>>> print(G.nodes[1]['bipartite'])
0
"""
if G.is_directed():
import itertools
def neighbors(v):
return itertools.chain.from_iterable([G.predecessors(v),
G.successors(v)])
else:
neighbors = G.neighbors
color = {}
for n in G: # handle disconnected graphs
if n in color or len(G[n]) == 0: # skip isolates
continue
queue = [n]
color[n] = 1 # nodes seen with color (1 or 0)
while queue:
v = queue.pop()
c = 1 - color[v] # opposite color of node v
for w in neighbors(v):
if w in color:
if color[w] == color[v]:
raise nx.NetworkXError("Graph is not bipartite.")
else:
color[w] = c
queue.append(w)
# color isolates with 0
color.update(dict.fromkeys(nx.isolates(G), 0))
return color
def is_bipartite(G):
""" Returns True if graph G is bipartite, False if not.
Parameters
----------
G : NetworkX graph
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.path_graph(4)
>>> print(bipartite.is_bipartite(G))
True
See Also
--------
color, is_bipartite_node_set
"""
try:
color(G)
return True
except nx.NetworkXError:
return False
def is_bipartite_node_set(G, nodes):
"""Returns True if nodes and G/nodes are a bipartition of G.
Parameters
----------
G : NetworkX graph
nodes: list or container
Check if nodes are a one of a bipartite set.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.path_graph(4)
>>> X = set([1,3])
>>> bipartite.is_bipartite_node_set(G,X)
True
Notes
-----
For connected graphs the bipartite sets are unique. This function handles
disconnected graphs.
"""
S = set(nodes)
for CC in (G.subgraph(c).copy() for c in connected_components(G)):
X, Y = sets(CC)
if not ((X.issubset(S) and Y.isdisjoint(S)) or
(Y.issubset(S) and X.isdisjoint(S))):
return False
return True
def sets(G, top_nodes=None):
"""Returns bipartite node sets of graph G.
Raises an exception if the graph is not bipartite or if the input
graph is disconnected and thus more than one valid solution exists.
See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
for further details on how bipartite graphs are handled in NetworkX.
Parameters
----------
G : NetworkX graph
top_nodes : container, optional
Container with all nodes in one bipartite node set. If not supplied
it will be computed. But if more than one solution exists an exception
will be raised.
Returns
-------
X : set
Nodes from one side of the bipartite graph.
Y : set
Nodes from the other side.
Raises
------
AmbiguousSolution
Raised if the input bipartite graph is disconnected and no container
with all nodes in one bipartite set is provided. When determining
the nodes in each bipartite set more than one valid solution is
possible if the input graph is disconnected.
NetworkXError
Raised if the input graph is not bipartite.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.path_graph(4)
>>> X, Y = bipartite.sets(G)
>>> list(X)
[0, 2]
>>> list(Y)
[1, 3]
See Also
--------
color
"""
if G.is_directed():
is_connected = nx.is_weakly_connected
else:
is_connected = nx.is_connected
if top_nodes is not None:
X = set(top_nodes)
Y = set(G) - X
else:
if not is_connected(G):
msg = 'Disconnected graph: Ambiguous solution for bipartite sets.'
raise nx.AmbiguousSolution(msg)
c = color(G)
X = {n for n, is_top in c.items() if is_top}
Y = {n for n, is_top in c.items() if not is_top}
return (X, Y)
def density(B, nodes):
"""Returns density of bipartite graph B.
Parameters
----------
G : NetworkX graph
nodes: list or container
Nodes in one node set of the bipartite graph.
Returns
-------
d : float
The bipartite density
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.complete_bipartite_graph(3,2)
>>> X=set([0,1,2])
>>> bipartite.density(G,X)
1.0
>>> Y=set([3,4])
>>> bipartite.density(G,Y)
1.0
Notes
-----
The container of nodes passed as argument must contain all nodes
in one of the two bipartite node sets to avoid ambiguity in the
case of disconnected graphs.
See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
for further details on how bipartite graphs are handled in NetworkX.
See Also
--------
color
"""
n = len(B)
m = nx.number_of_edges(B)
nb = len(nodes)
nt = n - nb
if m == 0: # includes cases n==0 and n==1
d = 0.0
else:
if B.is_directed():
d = m / (2.0 * float(nb * nt))
else:
d = m / float(nb * nt)
return d
def degrees(B, nodes, weight=None):
"""Returns the degrees of the two node sets in the bipartite graph B.
Parameters
----------
G : NetworkX graph
nodes: list or container
Nodes in one node set of the bipartite graph.
weight : string or None, optional (default=None)
The edge attribute that holds the numerical value used as a weight.
If None, then each edge has weight 1.
The degree is the sum of the edge weights adjacent to the node.
Returns
-------
(degX,degY) : tuple of dictionaries
The degrees of the two bipartite sets as dictionaries keyed by node.
Examples
--------
>>> from networkx.algorithms import bipartite
>>> G = nx.complete_bipartite_graph(3,2)
>>> Y=set([3,4])
>>> degX,degY=bipartite.degrees(G,Y)
>>> dict(degX)
{0: 2, 1: 2, 2: 2}
Notes
-----
The container of nodes passed as argument must contain all nodes
in one of the two bipartite node sets to avoid ambiguity in the
case of disconnected graphs.
See :mod:`bipartite documentation <networkx.algorithms.bipartite>`
for further details on how bipartite graphs are handled in NetworkX.
See Also
--------
color, density
"""
bottom = set(nodes)
top = set(B) - bottom
return (B.degree(top, weight), B.degree(bottom, weight))
| mit | -4,047,282,145,665,920,000 | 25.442997 | 78 | 0.574526 | false | 3.852871 | false | false | false |
mstojek/pms5003-logger | pmlog.py | 1 | 8814 | #!/usr/bin/python
import sys
import os
import time
import signal
import struct
import logging
import argparse
from collections import namedtuple
from threading import Event
from periphery import GPIO, Serial
import urllib
import urllib2
logging.basicConfig(level=logging.DEBUG, stream=sys.stderr,
format='%(asctime)-15s %(levelname)-8s %(message)s')
log = logging.getLogger()
parser = argparse.ArgumentParser(description='PMS5003 data logger')
parser.add_argument(
"-p", "--serial-port", type=str, default="/dev/ttyS1",
help="Serial port connected to the PMS5003 sensor")
parser.add_argument(
"--reset-pin", type=int, default=None,
help="GPIO number connected to the RESET signal")
parser.add_argument(
"--enable-pin", type=int, default=None,
help="GPIO number connected to the SET (enable) signal")
parser.add_argument(
"--warmup-time", type=int, default=30,
help="Seconds to wait before reading data")
subparsers = parser.add_subparsers(dest="cmd")
cmd_monitor_parser = subparsers.add_parser("monitor")
cmd_monitor_parser.add_argument(
"--measure-period", type=int, default=60 * 5,
help="Seconds between measurements")
cmd_oneshot_parser = subparsers.add_parser("oneshot")
cmd_domoticz_parser = subparsers.add_parser("domoticz")
cmd_domoticz_parser.add_argument(
"-ip", "--domoticz-ip", required=True,
help="IP address of domoticz server")
cmd_domoticz_parser.add_argument(
"-p", "--domoticz-port", default=8080,
help="Port of domoticz server")
cmd_domoticz_parser.add_argument(
"-m", "--mode", default='oneshot', choices=['oneshot', 'monitor'],
help="Monitor or oneshot mode")
cmd_domoticz_parser.add_argument(
"--measure-period", type=int, default=60 * 5,
help="Seconds between measurements")
cmd_domoticz_parser.add_argument(
"--pm_1_idx",
help="IDX of PM1 - if empty nothing will be reported to domoticz")
cmd_domoticz_parser.add_argument(
"--pm_25_idx",
help="IDX of PM2.5 - if empty nothing will be reported to domoticz")
cmd_domoticz_parser.add_argument(
"--pm_10_idx",
help="IDX of PM10 - if empty nothing will be reported to domoticz")
cmd_domoticz_parser.add_argument(
"--pm_1_percent_idx",
help="IDX of PM1 percent (100%% is 25 ug/m3) - if empty nothing will be reported to domoticz")
cmd_domoticz_parser.add_argument(
"--pm_25_percent_idx",
help="IDX of PM2.5 percent (100%% is 25 ug/m3) - if empty nothing will be reported to domoticz")
cmd_domoticz_parser.add_argument(
"--pm_10_percent_idx",
help="IDX of PM10 percent (100%% is 50 ug/m3) - if empty nothing will be reported to domoticz")
Packet = namedtuple('Packet', [
'pm1_std', 'pm25_std', 'pm10_std', 'pm01_atm', 'pm2_5_atm',
'pm10_atm', 'count_03um', 'count_05um', 'count_1um',
'count_2_5um', 'count_5um', 'count_10um'])
class PMS5003(object):
def __init__(self, port, enable_pin=None, reset_pin=None):
self.port = Serial(port, 9600)
self.gpio_enable = None
self.gpio_reset = None
self.stop = Event()
# suspend sensor by default
if enable_pin:
self.gpio_enable = GPIO(enable_pin, "low")
if reset_pin:
self.gpio_reset = GPIO(reset_pin, "high")
def reset(self):
if self.gpio_reset is None:
return
self.gpio_reset.write(False)
self.enable()
time.sleep(.1)
self.gpio_reset.write(True)
def enable(self):
if not self.gpio_enable: return
log.info("Enable sensor (via gpio %s)", self.gpio_enable.pin)
self.gpio_enable.write(True)
def disable(self):
if not self.gpio_enable: return
log.info("Disable sensor (via gpio %s)", self.gpio_enable.pin)
self.gpio_enable.write(False)
def discard_input(self):
while self.port.input_waiting(): self.port.read(4096, 0)
def warmup(self, seconds):
log.info("Warming up for %s seconds", seconds)
self.stop.wait(seconds)
self.discard_input()
@staticmethod
def packet_from_data(data):
numbers = struct.unpack('>16H', data)
csum = sum(data[:-2])
if csum != numbers[-1]:
log.warn("Bad packet data: %s / %s", data, csum)
return
return Packet(*numbers[2:-2])
def receive_one(self):
while not self.stop.is_set():
c = self.port.read(1)
if not c or c != '\x42':
continue
c = self.port.read(1, .1)
if not c or c != '\x4d':
continue
data = bytearray((0x42, 0x4d,))
data += self.port.read(30, .1)
if len(data) != 32:
continue
p = self.packet_from_data(data)
if p: return p
def run_monitor(sensor, args):
start_at = time.time()
sleep_period = args.measure_period - args.warmup_time
if args.enable_pin:
sensor.enable()
if args.warmup_time:
sensor.warmup(args.warmup_time)
try:
while not sensor.stop.is_set():
packet = sensor.receive_one()
if not packet: break
packet_at = time.time()
log.info("@{: 6.2f}\t{}".format((packet_at - start_at), packet))
if args.cmd == "domoticz":
report_to_domoticz(packet, args)
if sleep_period > 0:
sensor.disable()
sensor.stop.wait(sleep_period)
if sensor.stop.is_set(): break
sensor.reset()
sensor.enable()
sensor.warmup(args.warmup_time)
else:
sensor.stop.wait(args.measure_period)
except KeyboardInterrupt:
log.info("Bye bye.")
finally:
sensor.disable()
def run_oneshot(sensor, args):
if args.enable_pin:
sensor.enable()
if args.warmup_time:
sensor.warmup(args.warmup_time)
try:
packet = sensor.receive_one()
log.info("{}".format(packet))
if args.cmd == "domoticz":
report_to_domoticz(packet, args)
except KeyboardInterrupt:
log.info("Bye bye.")
finally:
sensor.disable()
sensor.disable()
def install_signal_handlers(sensor):
def _sighandler(signum, frame):
log.info("Got %s", signum)
sensor.stop.set()
signal.signal(signal.SIGINT, _sighandler)
signal.signal(signal.SIGTERM, _sighandler)
def report_to_domoticz(packet, args):
if args.pm_1_idx:
send_http_request_to_domoticz(ip=args.domoticz_ip, port=args.domoticz_port, idx=args.pm_1_idx, idx_value=packet.pm01_atm)
if args.pm_25_idx:
send_http_request_to_domoticz(ip=args.domoticz_ip, port=args.domoticz_port, idx=args.pm_25_idx,
idx_value=packet.pm2_5_atm)
if args.pm_10_idx:
send_http_request_to_domoticz(ip=args.domoticz_ip, port=args.domoticz_port, idx=args.pm_10_idx, idx_value=packet.pm10_atm)
if args.pm_1_percent_idx:
send_http_request_to_domoticz(ip=args.domoticz_ip, port=args.domoticz_port, idx=args.pm_1_percent_idx,
idx_value=packet.pm01_atm * 4)
if args.pm_25_percent_idx:
send_http_request_to_domoticz(ip=args.domoticz_ip, port=args.domoticz_port, idx=args.pm_25_percent_idx,
idx_value=packet.pm2_5_atm * 4)
if args.pm_10_percent_idx:
send_http_request_to_domoticz(ip=args.domoticz_ip, port=args.domoticz_port, idx=args.pm_10_percent_idx,
idx_value=packet.pm10_atm * 2)
def send_http_request_to_domoticz(ip, port, idx, idx_value):
url = "http://" + ip + ":" + port + "/json.htm?type=command¶m=udevice&nvalue=0&idx=" + str(
idx) + "&svalue=" + str(idx_value)
# print(url)
request = urllib2.Request(url)
try:
response = urllib2.urlopen(request)
except urllib2.HTTPError, e:
log.info('HTTPError = ' + str(e.code))
except urllib2.URLError, e:
log.info('URLError = ' + str(e.reason))
# except httplib.HTTPException, e:
# log.info('HTTPException')
except Exception:
import traceback
log.info('generic exception: ' + traceback.format_exc())
def main():
args = parser.parse_args()
sensor = PMS5003(args.serial_port, args.enable_pin, args.reset_pin)
sensor.reset()
install_signal_handlers(sensor)
if args.cmd == "monitor":
run_monitor(sensor, args)
elif args.cmd == "oneshot":
run_oneshot(sensor, args)
elif args.cmd == "domoticz":
if args.mode == "monitor":
run_monitor(sensor, args)
elif args.mode == "oneshot":
run_oneshot(sensor, args)
if __name__ == "__main__":
main()
| bsd-3-clause | -835,108,918,223,888,900 | 32.513308 | 130 | 0.609145 | false | 3.263236 | false | false | false |
masayukig/tempest | tempest/lib/services/volume/v3/versions_client.py | 1 | 2391 | # Copyright 2017 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import time
from oslo_serialization import jsonutils as json
from tempest.lib.api_schema.response.volume import versions as schema
from tempest.lib.common import rest_client
from tempest.lib.services.volume import base_client
class VersionsClient(base_client.BaseClient):
def list_versions(self):
"""List API versions
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v3/#list-all-api-versions
"""
version_url = self._get_base_version_url()
start = time.time()
resp, body = self.raw_request(version_url, 'GET')
end = time.time()
# NOTE: We need a raw_request() here instead of request() call because
# "list API versions" API doesn't require an authentication and we can
# skip it with raw_request() call.
self._log_request('GET', version_url, resp, secs=(end - start),
resp_body=body)
self._error_checker(resp, body)
body = json.loads(body)
self.validate_response(schema.list_versions, resp, body)
return rest_client.ResponseBody(resp, body)
def show_version(self, version):
"""Show API version details
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v3/#show-api-v3-details
"""
version_url = os.path.join(self._get_base_version_url(), version)
resp, body = self.get(version_url)
body = json.loads(body)
self.validate_response(schema.volume_api_version_details, resp, body)
return rest_client.ResponseBody(resp, body)
| apache-2.0 | -7,938,392,986,946,735,000 | 37.564516 | 82 | 0.675031 | false | 3.998328 | false | false | false |
Samfox2/motioneye | setup.py | 1 | 2175 |
import os.path
import subprocess
from codecs import open
from setuptools.command.sdist import sdist
from setuptools import setup
import motioneye
here = os.path.abspath(os.path.dirname(__file__))
name = 'motioneye'
version = motioneye.VERSION
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
# update the version according to git
git_version = subprocess.Popen('git describe --tags',
stdout=subprocess.PIPE, stderr=open('/dev/null'), shell=True).communicate()[0].strip()
if git_version:
print 'detected git version %s' % git_version
version = git_version
else:
print 'using found version %s' % version
class custom_sdist(sdist):
def run(self):
if git_version:
subprocess.Popen("sed -ri 's/VERSION = (.+)/VERSION = \"%s\"/' %s/__init__.py" % (git_version, name),
shell=True).communicate()
sdist.run(self)
setup(
name=name,
version=version,
description='motionEye server',
long_description=long_description,
url='https://bitbucket.org/ccrisan/motioneye/',
author='Calin Crisan',
author_email='[email protected]',
license='GPLv3',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: End Users/Desktop',
'Topic :: Multimedia :: Video',
'License :: OSI Approved :: GNU Lesser General Public License v3 (LGPLv3)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7'
],
keywords='motion video surveillance frontend',
packages=['motioneye'],
install_requires=['tornado>=3.1', 'jinja2', 'pillow', 'pycurl'],
package_data={
'motioneye': [
'static/*.*',
'static/*/*',
'templates/*'
]
},
data_files=[
(os.path.join('share/%s' % name, root), [os.path.join(root, f) for f in files])
for (root, dirs, files) in os.walk('extra')
],
entry_points={
'console_scripts': [
'meyectl=motioneye.meyectl:main',
],
},
cmdclass={
'sdist': custom_sdist
}
)
| gpl-3.0 | -6,330,338,415,794,202,000 | 21.894737 | 113 | 0.594023 | false | 3.717949 | false | false | false |
jcastrojob/kata_tucan | main.py | 1 | 1738 | __author__ = 'jcastro'
from tournament import Tournament
import abc
class Game(object):
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def calculate_score(self, score):
"""Returns final score for player"""
class Basketball(Game):
point_position = {
'G': [2, 3, 1],
'F': [2, 2, 2],
'C': [2, 1, 3]
}
def calculate_score(self, game):
score_player = game['score']
position_player = game['position']
score_total = 0
if position_player in self.point_position:
score_position = self.point_position[position_player]
for i in range(len(score_player)):
score_total += int(score_player[i]) * int(score_position[i])
return score_total
# player 1;nick1;4;Team A;G;10;2;7
# player 2;nick2;8;Team A;F;0;10;0
# player 3;nick3;15;Team A;C;15;10;4
# player 4;nick4;16;Team B;G;20;0;0
# player 5;nick5;23;Team B;F;4;7;7
# player 6;nick6;42;Team B;C;8;10;0
my_tournament = Tournament()
my_tournament.create_game("basketball", Basketball())
strPlayer = "player 1;nick1;4;Team A;G;10;2;7"
strPlayer2 = "player 2;nick2;8;Team A;F;0;10;0"
strPlayer3 = "player 4;nick4;16;Team B;G;20;0;0"
my_tournament.parser("basketball", strPlayer)
my_tournament.parser("basketball", strPlayer2)
my_tournament.parser("basketball", strPlayer3)
my_tournament.create_game("basketballHARD", Basketball())
strPlayer = "player 1;nick1;4;Team A;G;10;2;7"
strPlayer2 = "player 2;nick2;8;Team A;F;0;10;0"
strPlayer3 = "player 4;nick4;16;Team B;G;230;0;0"
my_tournament.parser("basketballHARD", strPlayer)
my_tournament.parser("basketballHARD", strPlayer2)
my_tournament.parser("basketballHARD", strPlayer3)
print(my_tournament.mvp()) | apache-2.0 | 8,572,080,306,230,914,000 | 27.508197 | 76 | 0.662831 | false | 2.767516 | false | false | false |
mayfield/ecmcli | ecmcli/commands/clients.py | 1 | 6055 | """
Harvest a detailed list of clients seen by online routers.
"""
import itertools
import pickle
import pkg_resources
from . import base
class List(base.ECMCommand):
""" Show the currently connected clients on a router. The router must be
connected to ECM for this to work. """
# XXX Broken when len(clients) > page_size
name = 'ls'
wifi_bw_modes = {
0: "20",
1: "40",
2: "80"
}
wifi_modes = {
0: "802.11b",
1: "802.11g",
2: "802.11n",
3: "802.11n-only",
4: "802.11ac"
}
wifi_bands = {
0: "2.4",
1: "5"
}
def setup_args(self, parser):
self.add_router_argument('idents', nargs='*')
self.add_argument('-v', '--verbose', action="store_true")
self.inject_table_factory()
@property
def mac_db(self):
try:
return self._mac_db
except AttributeError:
mac_db = pkg_resources.resource_stream('ecmcli', 'mac.db')
self._mac_db = pickle.load(mac_db)
return self._mac_db
def mac_lookup_short(self, info):
return self.mac_lookup(info, 0)
def mac_lookup_long(self, info):
return self.mac_lookup(info, 1)
def mac_lookup(self, info, idx):
mac = int(''.join(info['mac'].split(':')[:3]), 16)
localadmin = mac & 0x20000
# This really only pertains to cradlepoint devices.
if localadmin and mac not in self.mac_db:
mac &= 0xffff
return self.mac_db.get(mac, [None, None])[idx]
def make_dns_getter(self, ids):
dns = {}
for leases in self.api.get_pager('remote', 'status/dhcpd/leases',
id__in=','.join(ids)):
if not leases['success'] or not leases['data']:
continue
dns.update(dict((x['mac'], x['hostname'])
for x in leases['data']))
return lambda x: dns.get(x['mac'], '')
def make_wifi_getter(self, ids):
wifi = {}
radios = {}
for x in self.api.get_pager('remote', 'config/wlan/radio',
id__in=','.join(ids)):
if x['success']:
radios[x['id']] = x['data']
for x in self.api.get_pager('remote', 'status/wlan/clients',
id__in=','.join(ids)):
if not x['success'] or not x['data']:
continue
for client in x['data']:
client['radio_info'] = radios[x['id']][client['radio']]
wifi[client['mac']] = client
return lambda x: wifi.get(x['mac'], {})
def wifi_status_acc(self, client, default):
""" Accessor for WiFi RSSI, txrate and mode. """
if not client:
return default
status = [
self.get_wifi_rssi(client),
'%d Mbps' % client['txrate'],
self.wifi_modes[client['mode']],
]
return ', '.join(status)
def get_wifi_rssi(self, wifi_info):
rssi_vals = []
for i in itertools.count(0):
try:
rssi_vals.append(wifi_info['rssi%d' % i])
except KeyError:
break
rssi = sum(rssi_vals) / len(rssi_vals)
if rssi > -40:
fmt = '<b><green>%.0f</green></b>'
elif rssi > -55:
fmt = '<green>%.0f</green>'
elif rssi > -65:
fmt = '<yellow>%.0f</yellow>'
elif rssi > -80:
fmt = '<red>%.0f</red>'
else:
fmt = '<b><red>%.0f</red></b>'
return fmt % rssi + ' dBm'
def wifi_bss_acc(self, client, default):
""" Accessor for WiFi access point. """
if not client:
return default
radio = client['radio_info']
bss = radio['bss'][client['bss']]
band = self.wifi_bands[client['radio_info']['wifi_band']]
return '%s (%s Ghz)' % (bss['ssid'], band)
def run(self, args):
if args.idents:
routers = [self.api.get_by_id_or_name('routers', x)
for x in args.idents]
else:
routers = self.api.get_pager('routers', state='online',
product__series=3)
ids = dict((x['id'], x['name']) for x in routers)
if not ids:
raise SystemExit("No online routers found")
data = []
for clients in self.api.get_pager('remote', 'status/lan/clients',
id__in=','.join(ids)):
if not clients['success']:
continue
by_mac = {}
for x in clients['data']:
x['router'] = ids[str(clients['id'])]
if x['mac'] in by_mac:
by_mac[x['mac']]['ip_addresses'].append(x['ip_address'])
else:
x['ip_addresses'] = [x['ip_address']]
by_mac[x['mac']] = x
data.extend(by_mac.values())
dns_getter = self.make_dns_getter(ids)
ip_getter = lambda x: ', '.join(sorted(x['ip_addresses'], key=len))
headers = ['Router', 'IP Addresses', 'Hostname', 'MAC', 'Hardware']
accessors = ['router', ip_getter, dns_getter, 'mac']
if not args.verbose:
accessors.append(self.mac_lookup_short)
else:
wifi_getter = self.make_wifi_getter(ids)
headers.extend(['WiFi Status', 'WiFi AP'])
na = ''
accessors.extend([
self.mac_lookup_long,
lambda x: self.wifi_status_acc(wifi_getter(x), na),
lambda x: self.wifi_bss_acc(wifi_getter(x), na)
])
with self.make_table(headers=headers, accessors=accessors) as t:
t.print(data)
class Clients(base.ECMCommand):
name = 'clients'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.add_subcommand(List, default=True)
command_classes = [Clients]
| mit | -3,376,188,361,616,969,000 | 32.826816 | 76 | 0.490834 | false | 3.687576 | false | false | false |
josh-willis/pycbc | pycbc/inference/io/emcee_pt.py | 4 | 4898 | # Copyright (C) 2018 Collin Capano
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# self.option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Provides I/O support for emcee_pt.
"""
from __future__ import absolute_import
import numpy
from .base_sampler import BaseSamplerFile
from .base_mcmc import EnsembleMCMCMetadataIO
from .base_multitemper import (CommonMultiTemperedMetadataIO,
write_samples,
ensemble_read_raw_samples)
class EmceePTFile(EnsembleMCMCMetadataIO, CommonMultiTemperedMetadataIO,
BaseSamplerFile):
"""Class to handle file IO for the ``emcee`` sampler."""
name = 'emcee_pt_file'
@property
def betas(self):
"""The betas that were used."""
return self[self.sampler_group].attrs["betas"]
def write_samples(self, samples, **kwargs):
r"""Writes samples to the given file.
Calls :py:func:`base_multitemper.write_samples`. See that function for
details.
Parameters
----------
samples : dict
The samples to write. Each array in the dictionary should have
shape ntemps x nwalkers x niterations.
\**kwargs :
All other keyword arguments are passed to
:py:func:`base_multitemper.write_samples`.
"""
write_samples(self, samples, **kwargs)
def read_raw_samples(self, fields, **kwargs):
r"""Base function for reading samples.
Calls :py:func:`base_multitemper.ensemble_read_raw_samples`. See that
function for details.
Parameters
-----------
fields : list
The list of field names to retrieve.
\**kwargs :
All other keyword arguments are passed to
:py:func:`base_multitemper.ensemble_read_raw_samples`.
Returns
-------
dict
A dictionary of field name -> numpy array pairs.
"""
return ensemble_read_raw_samples(self, fields, **kwargs)
def write_sampler_metadata(self, sampler):
"""Adds writing betas to MultiTemperedMCMCIO.
"""
super(EmceePTFile, self).write_sampler_metadata(sampler)
self[self.sampler_group].attrs["betas"] = sampler.betas
def read_acceptance_fraction(self, temps=None, walkers=None):
"""Reads the acceptance fraction.
Parameters
-----------
temps : (list of) int, optional
The temperature index (or a list of indices) to retrieve. If None,
acfs from all temperatures and all walkers will be retrieved.
walkers : (list of) int, optional
The walker index (or a list of indices) to retrieve. If None,
samples from all walkers will be obtained.
Returns
-------
array
Array of acceptance fractions with shape (requested temps,
requested walkers).
"""
group = self.sampler_group + '/acceptance_fraction'
if walkers is None:
wmask = numpy.ones(self.nwalkers, dtype=bool)
else:
wmask = numpy.zeros(self.nwalkers, dtype=bool)
wmask[walkers] = True
if temps is None:
tmask = numpy.ones(self.ntemps, dtype=bool)
else:
tmask = numpy.zeros(self.ntemps, dtype=bool)
tmask[temps] = True
return self[group][:][numpy.ix_(tmask, wmask)]
def write_acceptance_fraction(self, acceptance_fraction):
"""Write acceptance_fraction data to file.
Results are written to ``[sampler_group]/acceptance_fraction``; the
resulting dataset has shape (ntemps, nwalkers).
Parameters
-----------
acceptance_fraction : numpy.ndarray
Array of acceptance fractions to write. Must have shape
ntemps x nwalkers.
"""
# check
assert acceptance_fraction.shape == (self.ntemps, self.nwalkers), (
"acceptance fraction must have shape ntemps x nwalker")
group = self.sampler_group + '/acceptance_fraction'
try:
self[group][:] = acceptance_fraction
except KeyError:
# dataset doesn't exist yet, create it
self[group] = acceptance_fraction
| gpl-3.0 | 7,250,579,334,593,895,000 | 34.751825 | 78 | 0.621478 | false | 4.25543 | false | false | false |
davidparks21/qso_lya_detection_pipeline | dla_cnn/scripts/analyze_sl.py | 1 | 1607 | #!/usr/bin/env python
"""
Script to generate a PDF of desired sightline
Requires specdb for the spectral data
"""
import pdb
def parser(options=None):
import argparse
# Parse
parser = argparse.ArgumentParser(
description='Analyze the desired sightline and generate a PDF (v1.0)')
parser.add_argument("plate", type=int, help="Plate")
parser.add_argument("fiber", type=int, help="Fiber")
parser.add_argument("survey", type=str, help="SDSS_DR7, DESI_MOCK")
if options is None:
args = parser.parse_args()
else:
args = parser.parse_args(options)
return args
def main(args=None):
from pkg_resources import resource_filename
from dla_cnn.data_model.sdss_dr7 import process_catalog_dr7
from dla_cnn.data_model.desi_mocks import process_catalog_desi_mock
if args is None:
pargs = parser()
else:
pargs = args
default_model = resource_filename('dla_cnn', "models/model_gensample_v7.1")
if pargs.survey == 'SDSS_DR7':
process_catalog_dr7(kernel_size=400, model_checkpoint=default_model,
output_dir="./", pfiber=(pargs.plate, pargs.fiber),
make_pdf=True)
elif pargs.survey == 'DESI_MOCK':
process_catalog_desi_mock(kernel_size=400, model_checkpoint=default_model,
output_dir="./", pfiber=(pargs.plate, pargs.fiber),
make_pdf=True)
#
print("See predictions.json file for outputs")
# Command line execution
if __name__ == '__main__':
args = parser()
main(args)
| mit | 1,677,781,577,510,265,600 | 30.509804 | 82 | 0.625389 | false | 3.587054 | false | false | false |
jwalgran/otm-core | opentreemap/treemap/search_fields.py | 4 | 12102 | # -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext_noop
import copy
import re
from treemap.DotDict import DotDict
from treemap.lib.object_caches import udf_defs
DEFAULT_MOBILE_SEARCH_FIELDS = DotDict({
'standard': [
{'identifier': 'species.id'},
{'identifier': 'tree.diameter'},
{'identifier': 'tree.height'}
],
'missing': [
{'identifier': 'species.id'},
{'identifier': 'tree.diameter'},
{'identifier': 'mapFeaturePhoto.id'}
]
})
DEFAULT_SEARCH_FIELDS = DotDict({
'general': [
{'identifier': 'mapFeature.updated_at'},
{'identifier': 'mapFeature.updated_by'}
],
'missing': [
{'identifier': 'species.id'},
{'identifier': 'tree.diameter'},
{'identifier': 'plot.owner_orig_id'},
{'identifier': 'mapFeaturePhoto.id'}
],
'Plot': [
{'identifier': 'plot.owner_orig_id'}
],
'Tree': [
{'identifier': 'tree.diameter'},
{'identifier': 'tree.date_planted'}
]
})
DEFAULT_MOBILE_API_FIELDS = (
{'header': ugettext_noop('Tree Information'),
'model': 'tree',
'field_keys': ['tree.species', 'tree.diameter',
'tree.height', 'tree.date_planted']},
{'header': ugettext_noop('Planting Site Information'),
'model': 'plot',
'field_keys': ['plot.width', 'plot.length']},
{'header': ugettext_noop('Stewardship'),
'collection_udf_keys': ['plot.udf:Stewardship', 'tree.udf:Stewardship'],
'sort_key': 'Date'}
)
DEFAULT_WEB_DETAIL_FIELDS = (
{'header': ugettext_noop('Tree Information'),
'model': 'tree',
'field_keys': ['tree.id', 'tree.species', 'tree.diameter', 'tree.height',
'tree.canopy_height', 'tree.date_planted',
'tree.date_removed'],
'collection_udf_keys': ['tree.udf:Stewardship']},
{'header': ugettext_noop('Planting Site Information'),
'model': 'plot',
'field_keys': ['plot.width', 'plot.length', 'plot.address_street',
'plot.address_city', 'plot.address_zip',
'plot.owner_orig_id'],
'collection_udf_keys': ['plot.udf:Stewardship']},
)
INSTANCE_FIELD_ERRORS = {
'no_field_groups': _('Must be a non-empty list'),
'group_has_no_header': _(
'Every mobile field group must have a non-empty header'),
'group_has_no_keys': _(
'All mobile field groups must have either a "field_keys" or '
'"collection_udf_keys" containing a non-empty list'),
'group_has_both_keys': _(
'Mobile field groups cannot contain both "field_keys" and '
'"collection_udf_keys" properties'),
'group_has_no_sort_key': _(
'Collection field groups must have a non-empty "sort_key" property '
'defined'),
'group_has_missing_cudf': _(
'Collection field groups can only contain existing custom collection '
'fields'),
'group_has_invalid_sort_key': _(
'The "sort_key" property of a collection field group must be the name '
'of a field on present on every collection field in the group'),
'duplicate_fields': _('Fields cannot be specified more than once'),
'group_missing_model': _(
'Normal field groups need a model property of either "tree" or "plot"'
),
'group_invalid_model': _(
'Normal field groups can only have keys that match their "model"'
),
'missing_field': _(
'Normal field groups may only contain existing fields. If you specify '
'a custom field, it cannot be a collection field'),
}
ALERT_IDENTIFIER_PATTERN = re.compile(r'udf:(tree|plot):(\d+)\..+')
def advanced_search_fields(instance, user):
from treemap.models import Tree, MapFeature # prevent circular import
def make_display_filter(feature_name):
if feature_name == 'Plot':
plural = _('empty planting sites')
feature_name = 'EmptyPlot'
else:
plural = get_plural_feature_name(feature_name)
return {
'label': _('Show %(models)s') % {'models': plural.lower()},
'model': feature_name
}
def get_plural_feature_name(feature_name):
if feature_name == 'Tree':
Feature = Tree
else:
Feature = MapFeature.get_subclass(feature_name)
return Feature.terminology(instance)['plural']
def get_visible_fields(field_infos, user):
visible_fields = []
for field_info in field_infos:
model, field_name = _parse_field_info(instance, field_info)
if model.field_is_visible(user, field_name):
visible_fields.append(field_info)
return visible_fields
fields = copy.deepcopy(instance.search_config)
fields = {category: get_visible_fields(field_infos, user)
for category, field_infos in fields.iteritems()}
for field_info in fields.get('missing', []):
_set_missing_search_label(instance, field_info)
field_info['search_type'] = 'ISNULL'
field_info['value'] = 'true'
fields['display'] = [make_display_filter('Tree'),
make_display_filter('Plot')]
fields['display'] += [
make_display_filter(feature)
for feature in sorted(instance.map_feature_types) if feature != 'Plot']
num = 0
for filters in fields.itervalues():
for field in filters:
# It makes styling easier if every field has an identifier
id = "%s_%s" % (field.get('identifier', ''), num)
id = id.replace(' ', '_')
field['id'] = id
num += 1
more = []
for feature_name in sorted(instance.map_feature_types):
if feature_name in fields and feature_name != 'Plot':
filters = fields.pop(feature_name)
filters = get_visible_fields(filters, user)
if len(filters) > 0:
more.append({
'name': feature_name,
'title': get_plural_feature_name(feature_name),
'fields': filters
})
fields['more'] = more
return fields
def mobile_search_fields(instance):
from treemap.templatetags.form_extras import (field_type_label_choices,
ADD_BLANK_NEVER)
search_fields = copy.deepcopy(instance.mobile_search_fields)
for field in search_fields['standard']:
identifier = field['identifier']
alert_info = get_alert_field_info(identifier, instance)
if alert_info is not None:
field.update(alert_info)
continue
Model, field_name = _parse_field_info(instance, field)
set_search_field_label(instance, field)
field_type, __, __, choices = field_type_label_choices(
Model, field_name, add_blank=ADD_BLANK_NEVER)
if identifier == 'species.id':
field['search_type'] = 'SPECIES'
elif field_type in {'int', 'float'}:
field['search_type'] = 'RANGE'
elif field_type in {'date', 'datetime'}:
field['search_type'] = 'DATERANGE'
elif field_type == 'string':
field['search_type'] = 'STRING'
elif field_type == 'bool':
field['search_type'] = 'BOOL'
elif field_type == 'choice':
field['search_type'] = 'CHOICE'
elif field_type == 'multichoice':
field['search_type'] = 'MULTICHOICE'
if choices:
field['choices'] = choices
for field in search_fields['missing']:
_set_missing_search_label(instance, field)
return search_fields
def _set_missing_search_label(instance, field_info):
label = get_search_field_label(instance, field_info)
field_info['label'] = _('Show Missing %(field)s') % {'field': label}
def set_search_field_label(instance, field_info):
if 'label' not in field_info:
field_info['label'] = get_search_field_label(instance, field_info)
return field_info
def get_search_field_label(instance, field_info):
"""
Searches for missing data are controlled by fields, and those fields
need labels. Two wrinkles: 1) Fields like species.id and mapFeaturePhoto.id
need special handling. 2) Fields from all models are shown in the
"Missing Data" category, so prefix the field name with the model name.
"""
from treemap.templatetags.form_extras import field_type_label_choices
Model, field_name = _parse_field_info(instance, field_info)
if field_name == 'id':
if hasattr(Model, 'terminology'):
label = Model.terminology(instance)['plural']
else:
label = Model._meta.verbose_name_plural
else:
__, label, __, __ = field_type_label_choices(Model, field_name, '')
if hasattr(Model, 'terminology'):
prefix = force_text(Model.terminology(instance)['singular'])
else:
prefix = force_text(Model._meta.verbose_name)
label = force_text(label)
if not label.startswith(prefix):
label = "%s %s" % (prefix, label)
return label
def _parse_field_info(instance, field_info):
from treemap.util import get_model_for_instance
model_name, field_name = field_info['identifier'].split('.', 2)
Model = get_model_for_instance(model_name, instance)
return Model, field_name
def get_udfc_search_fields(instance, user):
from treemap.models import InstanceUser
from treemap.udf import UDFModel
from treemap.util import to_object_name, leaf_models_of_class
from treemap.lib.perms import udf_write_level, READ, WRITE
try:
iu = instance.instanceuser_set.get(user__pk=user.pk)
except InstanceUser.DoesNotExist:
iu = None
data = DotDict({'models': set(), 'udfc': {}})
for clz in leaf_models_of_class(UDFModel):
model_name = clz.__name__
if model_name not in ['Tree'] + instance.map_feature_types:
continue
for k, v in clz.collection_udf_settings.items():
udfds = (u for u in udf_defs(instance, model_name) if u.name == k)
for udfd in udfds:
if udf_write_level(iu, udfd) in (READ, WRITE):
_base_nest_path = 'udfc.%s.' % (to_object_name(k))
ids_nest_path = ('%sids.%s'
% (_base_nest_path,
to_object_name(model_name)))
models_nest_path = ('%smodels.%s' %
(_base_nest_path,
to_object_name(model_name)))
data[ids_nest_path] = udfd.pk
data[models_nest_path] = {
'udfd': udfd,
'fields': udfd.datatype_dict[0]['choices']
}
p = 'udfc.%s.' % to_object_name(k)
data[p + 'action_verb'] = v['action_verb']
data[p + 'range_field_key'] = v['range_field_key']
data[p + 'action_field_key'] = v['action_field_key']
data['models'] |= {clz}
return data
def get_alert_field_info(identifier, instance):
from treemap.util import get_model_for_instance
alert_match = ALERT_IDENTIFIER_PATTERN.match(identifier)
if alert_match:
model_name, pk = alert_match.groups()
Model = get_model_for_instance(model_name, instance)
udf_def = next(udf for udf in udf_defs(instance) if udf.pk == int(pk))
display_name = force_text(Model.terminology(instance)['singular'])
return {
'identifier': identifier,
'search_type': 'DEFAULT',
'default_identifier': udf_def.full_name,
'label': 'Open %(model)s Alerts' % {'model': display_name},
}
return None
| gpl-3.0 | -6,159,999,500,896,712,000 | 35.451807 | 79 | 0.580648 | false | 3.841905 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.