repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
Eigenlabs/EigenD | plg_sampler2/sf2.py | 1 | 7476 |
#
# Copyright 2009 Eigenlabs Ltd. http://www.eigenlabs.com
#
# This file is part of EigenD.
#
# EigenD is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EigenD is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EigenD. If not, see <http://www.gnu.org/licenses/>.
#
import piw
from pi import riff
from pi.riff import List,Struct,StructList,String,Root
import math
import copy
import struct
def read_samples_indirect(name,pos,len):
return piw.create_samplearray(name,pos,len)
class Sample:
def read(self,c):
return c.read_indirect(read_samples_indirect)
GEN_STARTADDROFS=0
GEN_ENDADDROFS=1
GEN_STARTLOOPADDROFS=2
GEN_ENDLOOPADDROFS=3
GEN_STARTADDRCOARSEOFS=4
GEN_ENDADDRCOARSEOFS=12
GEN_PAN=17
GEN_INSTRUMENT=41
GEN_KEYRANGE=43
GEN_VELRANGE=44
GEN_INITIALATTENUATION=48
GEN_STARTLOOPADDRCOARSEOFS=45
GEN_ENDLOOPADDRCOARSEOFS=50
GEN_COARSETUNE=51
GEN_FINETUNE=52
GEN_SAMPLEID=53
GEN_SAMPLEMODE=54
GEN_OVERRIDEROOTKEY=58
GEN_AHDSR_DELAY=33
GEN_AHDSR_ATTACK=34
GEN_AHDSR_HOLD=35
GEN_AHDSR_DECAY=36
GEN_AHDSR_SUSTAIN=37
GEN_AHDSR_RELEASE=38
gen_defaults = {
GEN_AHDSR_DELAY: -12000.0,
GEN_AHDSR_ATTACK: -12000.0,
GEN_AHDSR_HOLD: -12000.0,
GEN_AHDSR_SUSTAIN: 0.0,
GEN_AHDSR_RELEASE: -12000.0,
GEN_AHDSR_DECAY: -12000.0,
GEN_COARSETUNE: 0.0,
GEN_FINETUNE: 0.0,
GEN_KEYRANGE: (0,127),
GEN_VELRANGE: (0,127),
}
gen_preset_ignore = ( GEN_KEYRANGE, GEN_VELRANGE )
class GenList:
def read1(self,r):
id = struct.unpack('<H',r[0:2])[0]
if id in (GEN_KEYRANGE,GEN_VELRANGE):
return (id,struct.unpack('BB',r[2:4]))
else:
return (id,struct.unpack('<h',r[2:4])[0])
def read(self,c):
s = []
while c.remain>=4:
s.append(self.read1(c.read(4)))
return s
SF2 = Root('sfbk', List(
INFO=List(
ifil=Struct('<HH'), isng=String(), INAM=String(), irom=String(),
iver=Struct('<HH'), ICRD=String(), IENG=String(), IPRD=String(),
ICOP=String(), ICMT=String(65536), ISFT=String()),
sdta=List(smpl=Sample()),
pdta=List(
phdr=StructList('<20sHHHLLL'), pbag=StructList('<HH'), pmod=StructList('<HHhHH'), pgen=GenList(),
inst=StructList('<20sH'), ibag=StructList('<HH'), imod=StructList('<HHhHH'), igen=GenList(),
shdr=StructList('<20sLLLLLBbHH'))))
def mtof(m,transpose):
m = float(m)-float(transpose)
return 440.0*math.pow(2.0,(m-69.0)/12.0)
def mtov(m):
return float(m)/127.0
def etot(m):
m = pow(2.0,float(m)/1200.0)
if m<0.01: m=0
return m
def etos(m):
if m <= 0: return 1.0
if m >= 1000: return 0.0
return 1.0-(float(m)/1000.0)
def etop(m):
return float(m)/500.0
class ZoneBuilder:
def __init__(self,bag,gen,mod,index,base=None,add=None):
self.gen = {}
if base:
self.gen.update(base.gen)
gs,ms = bag[index]
ge,me = bag[index+1]
for g in range(gs,ge): self.__addgen(gen[g])
for m in range(ms,me): self.__addmod(mod[m])
if add:
for k,v in add.gen.iteritems():
if k in gen_preset_ignore:
continue
o = self.genget(k)
if type(v)==tuple:
#self.gen[k] = (max(o[0],v[0]),min(o[1],v[1]))
self.gen[k] = v
else:
self.gen[k] = o+v
def __addgen(self,g):
self.gen[g[0]] = g[1]
def __addmod(self,m):
pass
def __adjustpos(self,val,fg,cg):
return val+self.genget(fg)+(32768*self.genget(cg))
def genget(self,k):
return self.gen.get(k,gen_defaults.get(k,0))
def zone(self,smpl,shdr,transpose):
kr = self.genget(GEN_KEYRANGE)
vr = self.genget(GEN_VELRANGE)
de = etot(self.genget(GEN_AHDSR_DELAY))
a = etot(self.genget(GEN_AHDSR_ATTACK))
h = etot(self.genget(GEN_AHDSR_HOLD))
dc = etot(self.genget(GEN_AHDSR_DECAY))
sus = etos(self.genget(GEN_AHDSR_SUSTAIN))
r = etot(self.genget(GEN_AHDSR_RELEASE))
p = etop(self.genget(GEN_PAN))
n,s,e,ls,le,sr,op,_,_,_ = shdr[self.gen[GEN_SAMPLEID]]
rk = float(self.gen.get(GEN_OVERRIDEROOTKEY,op))
rk -= float(self.genget(GEN_COARSETUNE))
rk -= (float(self.genget(GEN_FINETUNE))/100.0)
rf = mtof(rk,transpose)
looping = False
if self.gen.has_key(GEN_SAMPLEMODE):
if self.gen[GEN_SAMPLEMODE] != 0:
looping = True
start = self.__adjustpos(s,GEN_STARTADDROFS,GEN_STARTADDRCOARSEOFS)
end = self.__adjustpos(e,GEN_ENDADDROFS,GEN_ENDADDRCOARSEOFS)
if looping:
loopstart = self.__adjustpos(ls,GEN_STARTLOOPADDROFS,GEN_STARTLOOPADDRCOARSEOFS)
loopend = self.__adjustpos(le,GEN_ENDLOOPADDROFS,GEN_ENDLOOPADDRCOARSEOFS)
else:
loopstart = 0
loopend = 0
attcb = float(self.gen.get(GEN_INITIALATTENUATION,0))
att = math.pow(10.0,-attcb/200.0)
smpl = piw.create_sample(smpl,start,end,loopstart,loopend,sr,rf,att)
zz = piw.create_zone(mtof(float(kr[0])-0.5,transpose), mtof(float(kr[1])+0.5,transpose), mtov(float(vr[0])-0.5), mtov(float(vr[1])+0.5),de,a,h,dc,sus,r,p,smpl)
return zz
def __str__(self):
return str(self.gen)
def load_soundfont(file,bk,pre,transpose):
print 'loading bank',bk,'preset',pre,'from',file
f = open(file,'rb',0)
sf = SF2.read(f,name=file)
f.close()
pbs = None
pbe = None
for (n,p,b,i,l,g,m) in sf['pdta']['phdr']:
if pbs is not None:
pbe = i
break
if p==pre and b==bk:
pbs = i
if pbs is None or pbe is None:
raise RuntimeError('preset %d bank %d not found in soundfont %s' % (pre,bk,file))
p = piw.create_preset()
gpzb = None
gizb = None
for pi in range(pbs,pbe):
pzb = ZoneBuilder(sf['pdta']['pbag'],sf['pdta']['pgen'],sf['pdta']['pmod'],pi,base=gpzb)
inst = pzb.gen.get(GEN_INSTRUMENT)
if inst is not None:
for ii in range(sf['pdta']['inst'][inst][1],sf['pdta']['inst'][inst+1][1]):
izb = ZoneBuilder(sf['pdta']['ibag'],sf['pdta']['igen'],sf['pdta']['imod'],ii,base=gizb,add=pzb)
if izb.gen.has_key(GEN_SAMPLEID):
p.add_zone(izb.zone(sf['sdta']['smpl'],sf['pdta']['shdr'],transpose))
else:
if gizb is None:
gizb = izb
else:
if gpzb is None:
gpzb = pzb
return p
SF2info = Root('sfbk', List(pdta=List(phdr=StructList('<20sHH14x'))))
def __trim(s):
if s.count('\0'):
return s[:s.index('\0')]
return s
def sf_info(file):
file = open(file,'rb',0)
data = SF2info.read(file)
file.close()
for n,p,b in data['pdta']['phdr'][:-1]:
yield __trim(n),p,b
| gpl-3.0 | 9,204,759,465,939,505,000 | 27.753846 | 167 | 0.58534 | false |
qiyuangong/APA | anatomizer.py | 1 | 3783 | import random, heapq
import pdb
# by Qiyuan Gong
# [email protected]
# @INPROCEEDINGS{
# author = {Xiao, Xiaokui and Tao, Yufei},
# title = {Anatomy: simple and effective privacy preservation},
# booktitle = {Proceedings of the 32nd international conference on Very large data
# bases},
# year = {2006},
# series = {VLDB '06},
# pages = {139--150},
# publisher = {VLDB Endowment},
# acmid = {1164141},
# location = {Seoul, Korea},
# numpages = {12}
# }
_DEBUG = True
class SABucket(object):
def __init__(self, data_index, index):
self.member_index = data_index[:]
self.index = index
def pop_element(self):
"""pop an element from SABucket
"""
return self.member_index.pop()
class Group(object):
def __init__(self):
self.index = 0
self.member_index = []
self.checklist = set()
def add_element(self, record_index, index):
"""add element pair (record, index) to Group
"""
self.member_index.append(record_index)
self.checklist.add(index)
def check_index(self, index):
"""Check if index is in checklist
"""
if index in self.checklist:
return True
return False
def list_to_str(value_list, sep=';'):
"""covert sorted str list (sorted by default) to str
value (splited by sep). This fuction is value safe, which means
value_list will not be changed.
"""
temp = value_list[:]
return sep.join(temp)
def anatomizer(data, L):
"""
only one SA is supported in anatomy.
Separation grouped member into QIT and SAT
Use heap to get l largest buckets
L is the denote l in l-diversity.
data is a list, i.e. [qi1,qi2,sa]
"""
groups = []
buckets = {}
result = []
suppress = []
h = []
if _DEBUG:
print '*' * 10
print "Begin Anatomizer!"
print "L=%d" % L
# Assign SA into buckets
for i, temp in enumerate(data):
# convert list to str
list_temp = list_to_str(temp[-1])
try:
buckets[list_temp].append(i)
except:
buckets[list_temp] = [i]
# group stage
# use heap to sort buckets
for i, temp in enumerate(buckets.values()):
# push to heap reversely
pos = len(temp) * -1
if pos == 0:
continue
heapq.heappush(h, (pos, SABucket(temp, i)))
while len(h) >= L:
newgroup = Group()
length_list = []
SAB_list = []
# chosse l largest buckets
for i in range(L):
(length, temp) = heapq.heappop(h)
length_list.append(length)
SAB_list.append(temp)
# pop a element from chosen buckets
for i in range(L):
temp = SAB_list[i]
length = length_list[i]
newgroup.add_element(temp.pop_element(), temp.index)
length += 1
if length == 0:
continue
# push new tuple to heap
heapq.heappush(h, (length, temp))
groups.append(newgroup)
# residue-assign stage
while len(h):
(length, temp) = heapq.heappop(h)
index = temp.index
while temp.member_index:
for g in groups:
if g.check_index(index) == False:
g.add_element(temp.pop_element(), index)
break
else:
suppress.extend(temp.member_index[:])
break
# transform result
for i, t in enumerate(groups):
t.index = i
result.append(t.member_index[:])
if _DEBUG:
print 'NO. of Suppress after anatomy = %d' % len(suppress)
print 'NO. of groups = %d' % len(result)
return result
| mit | 1,053,349,836,134,681,500 | 25.089655 | 84 | 0.548506 | false |
xlqian/navitia | source/jormungandr/jormungandr/scenarios/ridesharing/ridesharing_journey.py | 1 | 2827 | # Copyright (c) 2001-2018, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# channel `#navitia` on riot https://riot.im/app/#/room/#navitia:matrix.org
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
class Gender(object):
"""
Used as an enum
"""
UNKNOWN = 0
MALE = 1
FEMALE = 2
class Individual(object):
# https://stackoverflow.com/a/28059785/1614576
__slots__ = ('alias', 'gender', 'image', 'rate', 'rate_count')
def __init__(self, alias, gender, image, rate, rate_count):
self.alias = alias
self.gender = gender
self.image = image
self.rate = rate
self.rate_count = rate_count
class Place(object):
__slots__ = ('addr', 'lat', 'lon')
def __init__(self, addr, lat, lon):
self.addr = addr
self.lat = lat
self.lon = lon
class MetaData(object):
__slots__ = ('system_id', 'network', 'rating_scale_min', 'rating_scale_max')
def __init__(self, system_id, network, rating_scale_min, rating_scale_max):
self.system_id = system_id
self.network = network
self.rating_scale_min = rating_scale_min
self.rating_scale_max = rating_scale_max
class RidesharingJourney(object):
__slots__ = (
'metadata',
'distance',
'shape', # a list of type_pb2.GeographicalCoord()
'ridesharing_ad',
'pickup_place',
'dropoff_place',
'pickup_date_time',
'dropoff_date_time',
# driver will be Individual
'driver',
'price',
'currency', # "centime" (EURO cents) is the preferred currency (price is filled accordingly)
'total_seats',
'available_seats',
)
| agpl-3.0 | -8,374,270,853,840,085,000 | 30.411111 | 101 | 0.651928 | false |
acca90/django-tests | cello/produto/migrations/0001_initial.py | 1 | 1354 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-11 00:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Produto',
fields=[
('prod_codigo', models.BigIntegerField(db_column='prod_codigo', primary_key=True, serialize=False)),
('prod_alias', models.CharField(db_column='prod_alias', max_length=50)),
('prod_descricao', models.CharField(db_column='prod_descricao', max_length=255)),
('prod_valor_venda', models.DecimalField(db_column='prod_valor_venda', decimal_places=2, max_digits=13)),
('prod_valor_compra', models.DecimalField(db_column='prod_valor_compra', decimal_places=2, max_digits=13)),
('prod_peso_b', models.DecimalField(db_column='prod_peso_b', decimal_places=2, max_digits=13)),
('prod_peso_l', models.DecimalField(db_column='prod_peso_l', decimal_places=2, max_digits=13)),
],
options={
'verbose_name': 'Produto',
'verbose_name_plural': 'Produtos',
'managed': True,
'db_table': 'produto',
},
),
]
| mit | 7,758,737,332,258,354,000 | 38.823529 | 123 | 0.573855 | false |
coxmediagroup/dolphin | dolphin/testutils.py | 1 | 4503 | import os
from sys import stdout, stderr
from contextlib import contextmanager
from django.db.models import get_apps
from django.utils import simplejson as sj
from django.core import serializers
from django.conf import settings
from django.utils.itercompat import product
from .middleware import LocalStoreMiddleware
@contextmanager
def set_active(key, val):
"""Allows a flag to be switched to enabled"""
overrides = LocalStoreMiddleware.local.setdefault('overrides', {})
overrides[key] = val
yield
del overrides[key]
def load_redis_fixtures(fixture_labels, backend):
# taken a modified from django.core.management.commands.loaddata
# Keep a count of the installed objects and fixtures
# changes marked by # + or # - and endchanges for +
# - removed intro code
fixture_count = 0
loaded_object_count = 0
fixture_object_count = 0
models = set()
humanize = lambda dirname: "'%s'" % dirname if dirname else 'absolute path'
# - removed cursor code, compression types
app_module_paths = []
for app in get_apps():
if hasattr(app, '__path__'):
# It's a 'models/' subpackage
for path in app.__path__:
app_module_paths.append(path)
else:
# It's a models.py module
app_module_paths.append(app.__file__)
app_fixtures = [os.path.join(os.path.dirname(path), 'fixtures') for path in app_module_paths]
# - remove try, connection constraint
for fixture_label in fixture_labels:
parts = fixture_label.split('.')
if len(parts) == 1: # - remove compression
fixture_name = parts[0]
formats = serializers.get_public_serializer_formats()
else:
fixture_name, format = '.'.join(parts[:-1]), parts[-1]
if format in serializers.get_public_serializer_formats():
formats = [format]
else:
formats = []
# - remove formats
if os.path.isabs(fixture_name):
fixture_dirs = [fixture_name]
else:
fixture_dirs = app_fixtures + list(settings.FIXTURE_DIRS) + ['']
for fixture_dir in fixture_dirs:
# - remove verbosity
label_found = False
# - remove compression formats, verbosity
for format in formats:
file_name = '.'.join(
p for p in [
fixture_name, format
]
if p
)
full_path = os.path.join(fixture_dir, file_name)
# - remove compression method
try:
fixture = open(full_path, 'r')
except IOError:
# - remove verbosity
pass
else:
try:
if label_found:
stderr.write("Multiple fixtures named '%s' in %s. Aborting.\n" %
(fixture_name, humanize(fixture_dir)))
# - remove commit
return
fixture_count += 1
objects_in_fixture = 0
loaded_objects_in_fixture = 0
# - remove verbosity
# - remove generalized loading of fixture
# + customized loading of fixture
objects = sj.load(fixture)
for obj in objects:
objects_in_fixture += 1
#begin customization
if obj['model'] == 'dolphin.featureflag':
fields = obj['fields']
key = fields['name']
backend.update(key, fields)
#endchanges
loaded_object_count += loaded_objects_in_fixture
fixture_object_count += objects_in_fixture
label_found = True
finally:
fixture.close()
# If the fixture we loaded contains 0 objects, assume that an
# error was encountered during fixture loading.
if objects_in_fixture == 0:
# - remove verbosity
return
# - remove everything else
| mit | 5,224,790,747,942,182,000 | 34.456693 | 97 | 0.50433 | false |
cmollet/macro-chef | yummly.py | 1 | 3288 | import requests
import time
try:
import simplejson as json
except ImportError:
import json
from pymongo import Connection
from paleo import paleo_ingredients, excludedIngredients
app_key = open('app.key').read().strip()
app_id = open('app_id.key').read().strip()
def mongoconn(name='bespin'):
return Connection()[name]
# Constants
BMR = 2142.87
multipliers = {
'sedentary': 1.2,
'lightly active': 1.375,
'moderately active': 1.55,
'very active': 1.725,
'extra active': 1.9
}
def genmacros():
macros = dict()
macros['max_carbs'] = 150
macros['max_sugar'] = 50
macros['min_fiber'] = 50
macros['min_protein'] = 200
macros['max_cals'] = BMR * multipliers['moderately active']
macros['rest_cals'] = 2630
macros['rest_carbs'] = 226.3
macros['rest_fat'] = 100.6
macros['workout_cals'] = 3945
macros['workout_carbs'] = 390.6
macros['workout_fat'] = 173.6
return macros
def makeurl(q, course='Main+Dishes'):
URL = 'http://api.yummly.com/v1/api/recipes?_app_id=%s&_app_key=%s&allowedAllergy[]=Gluten-Free&allowedAllergy[]=Dairy-Free&allowedAllergy[]=Peanut-Free&allowedAllergy[]=Soy-Free&allowedAllergy[]=Wheat-Free' % (app_id, app_key)
for i in excludedIngredients:
URL += '&excludedIngredient[]=%s' % i
URL += '&allowedCourse[]=%s' % course
URL += '&q=%s' % q
return URL
def load_recipes(q):
# Generate URL based on query and course type
# Get (up to) 10 recipes
# run getrecipe()
r = requests.get(url=makeurl(q))
matches = json.loads(r.text)
recipes = []
for i in matches['matches']:
time.sleep(2)
r = getrecipe(i['id'])
if r:
recipes.append(r)
return recipes
def getrecipe(recipe_id):
URL = 'http://api.yummly.com/v1/api/recipe/'+ recipe_id + '?app_id=%s&_app_key=%s' % (app_id, app_key)
r = requests.get(URL, headers = {'X-Yummly-App-ID': app_id, 'X-Yummly-App-Key': app_key})
recipe = json.loads(r.text)
if recipe['nutritionEstimates']:
return recipe
def nutristats(recipe):
macros = { 'cals': 0, 'protein': 0, 'carbs': 0, 'fat': 0, 'fiber' : 0}
for i in recipe['nutritionEstimates']:
if i['attribute'] == 'ENERC_KCAL':
macros['cals'] += recipe['numberOfServings'] * i['value']
elif i['attribute'] == 'PROCNT':
macros['protein'] += recipe['numberOfServings'] * i['value']
elif i['attribute'] == 'CHOCDF':
macros['carbs'] += recipe['numberOfServings'] * i['value']
elif i['attribute'] == 'FAT':
macros['fat'] += recipe['numberOfServings'] * i['value']
elif i['attribute'] == 'FIBTG':
macros['fiber'] += recipe['numberOfServings'] * i['value']
return macros
def macro_resolve(recipe, day):
macros = genmacros()
rec_macros = nutristats(recipe)
new_macros = {}
new_macros['cals'] = macros[day+'_cals'] - rec_macros['cals']
new_macros['protein'] = macros['min_protein'] - rec_macros['protein']
new_macros['carbs'] = macros[day+'_carbs'] - rec_macros['carbs']
new_macros['fat'] = macros[day+'_fat'] - rec_macros['fat']
new_macros['fiber'] = macros['min_fiber'] - rec_macros['fiber']
return new_macros
| mit | -4,144,650,630,414,836,000 | 32.896907 | 231 | 0.601277 | false |
shendo/peerz | peerz/examples/visualise.py | 1 | 1704 | # Peerz - P2P python library using ZeroMQ sockets and gevent
# Copyright (C) 2014-2015 Steve Henderson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import tempfile
import urllib
import urllib2
import webbrowser
from peerz.persistence import LocalStorage
def get_tree(root, port):
local = LocalStorage(root, port)
return local.fetch('nodetree')
def render_graph(dot):
with tempfile.NamedTemporaryFile(suffix='.png', delete=False) as tmp:
data = urllib.urlencode({'cht': 'gv:dot', 'chl': dot})
print data
u = urllib2.urlopen('http://chart.apis.google.com/chart', data)
tmp.write(u.read())
tmp.close()
webbrowser.open_new_tab(tmp.name)
if __name__ == '__main__':
"""
Simple tool to read the state files from running helloworld example
and plot the routing tree for the chosen node using google charts.
"""
root = '/tmp/testing'
port = 7111
if len(sys.argv) > 2:
port = int(sys.argv[2])
if len(sys.argv) > 1:
root = sys.argv[1]
dot = get_tree(root, port).visualise()
render_graph(dot)
| gpl-3.0 | 1,749,261,066,399,993,300 | 33.08 | 73 | 0.692488 | false |
nathanielvarona/airflow | tests/providers/qubole/operators/test_qubole_check.py | 1 | 7409 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from datetime import datetime
from unittest import mock
import pytest
from qds_sdk.commands import HiveCommand
from airflow.exceptions import AirflowException
from airflow.models import DAG
from airflow.providers.qubole.hooks.qubole import QuboleHook
from airflow.providers.qubole.hooks.qubole_check import QuboleCheckHook
from airflow.providers.qubole.operators.qubole_check import (
QuboleCheckOperator,
QuboleValueCheckOperator,
SQLCheckOperator,
SQLValueCheckOperator,
_QuboleCheckOperatorMixin,
)
# pylint: disable=unused-argument
@pytest.mark.parametrize(
"operator_class, kwargs, parent_check_operator",
[
(QuboleCheckOperator, dict(sql='Select * from test_table'), SQLCheckOperator),
(
QuboleValueCheckOperator,
dict(sql='Select * from test_table', pass_value=95),
SQLValueCheckOperator,
),
],
)
class TestQuboleCheckMixin:
def setup(self):
self.task_id = 'test_task'
def __construct_operator(self, operator_class, **kwargs):
dag = DAG('test_dag', start_date=datetime(2017, 1, 1))
return operator_class(task_id=self.task_id, dag=dag, command_type='hivecmd', **kwargs)
def test_get_hook_with_context(self, operator_class, kwargs, parent_check_operator):
operator = self.__construct_operator(operator_class=operator_class, **kwargs)
assert isinstance(operator.get_hook(), QuboleCheckHook)
context = {'exec_date': 'today'}
operator._hook_context = context
hook = operator.get_hook()
assert hook.context == context
@mock.patch.object(_QuboleCheckOperatorMixin, "get_db_hook")
@mock.patch.object(_QuboleCheckOperatorMixin, "get_hook")
def test_get_db_hook(
self, mock_get_hook, mock_get_db_hook, operator_class, kwargs, parent_check_operator
):
operator = self.__construct_operator(operator_class=operator_class, **kwargs)
operator.get_db_hook()
mock_get_db_hook.assert_called_once()
operator.get_hook()
mock_get_hook.assert_called_once()
def test_execute(self, operator_class, kwargs, parent_check_operator):
operator = self.__construct_operator(operator_class=operator_class, **kwargs)
with mock.patch.object(parent_check_operator, 'execute') as mock_execute:
operator.execute()
mock_execute.assert_called_once()
@mock.patch('airflow.providers.qubole.operators.qubole_check.handle_airflow_exception')
def test_execute_fail(self, mock_handle_airflow_exception, operator_class, kwargs, parent_check_operator):
operator = self.__construct_operator(operator_class=operator_class, **kwargs)
with mock.patch.object(parent_check_operator, 'execute') as mock_execute:
mock_execute.side_effect = AirflowException()
operator.execute()
mock_execute.assert_called_once()
mock_handle_airflow_exception.assert_called_once()
class TestQuboleValueCheckOperator(unittest.TestCase):
def setUp(self):
self.task_id = 'test_task'
self.conn_id = 'default_conn'
def __construct_operator(self, query, pass_value, tolerance=None, results_parser_callable=None):
dag = DAG('test_dag', start_date=datetime(2017, 1, 1))
return QuboleValueCheckOperator(
dag=dag,
task_id=self.task_id,
conn_id=self.conn_id,
query=query,
pass_value=pass_value,
results_parser_callable=results_parser_callable,
command_type='hivecmd',
tolerance=tolerance,
)
def test_pass_value_template(self):
pass_value_str = "2018-03-22"
operator = self.__construct_operator('select date from tab1;', "{{ ds }}")
result = operator.render_template(operator.pass_value, {'ds': pass_value_str})
assert operator.task_id == self.task_id
assert result == pass_value_str
@mock.patch.object(QuboleValueCheckOperator, 'get_hook')
def test_execute_pass(self, mock_get_hook):
mock_hook = mock.Mock()
mock_hook.get_first.return_value = [10]
mock_get_hook.return_value = mock_hook
query = 'select value from tab1 limit 1;'
operator = self.__construct_operator(query, 5, 1)
operator.execute(None)
mock_hook.get_first.assert_called_once_with(query)
@mock.patch.object(QuboleValueCheckOperator, 'get_hook')
def test_execute_assertion_fail(self, mock_get_hook):
mock_cmd = mock.Mock()
mock_cmd.status = 'done'
mock_cmd.id = 123
mock_cmd.is_success = mock.Mock(return_value=HiveCommand.is_success(mock_cmd.status))
mock_hook = mock.Mock()
mock_hook.get_first.return_value = [11]
mock_hook.cmd = mock_cmd
mock_get_hook.return_value = mock_hook
operator = self.__construct_operator('select value from tab1 limit 1;', 5, 1)
with pytest.raises(AirflowException, match='Qubole Command Id: ' + str(mock_cmd.id)):
operator.execute()
mock_cmd.is_success.assert_called_once_with(mock_cmd.status)
@mock.patch.object(QuboleValueCheckOperator, 'get_hook')
def test_execute_assert_query_fail(self, mock_get_hook):
mock_cmd = mock.Mock()
mock_cmd.status = 'error'
mock_cmd.id = 123
mock_cmd.is_success = mock.Mock(return_value=HiveCommand.is_success(mock_cmd.status))
mock_hook = mock.Mock()
mock_hook.get_first.return_value = [11]
mock_hook.cmd = mock_cmd
mock_get_hook.return_value = mock_hook
operator = self.__construct_operator('select value from tab1 limit 1;', 5, 1)
with pytest.raises(AirflowException) as ctx:
operator.execute()
assert 'Qubole Command Id: ' not in str(ctx.value)
mock_cmd.is_success.assert_called_once_with(mock_cmd.status)
@mock.patch.object(QuboleCheckHook, 'get_query_results')
@mock.patch.object(QuboleHook, 'execute')
def test_results_parser_callable(self, mock_execute, mock_get_query_results):
mock_execute.return_value = None
pass_value = 'pass_value'
mock_get_query_results.return_value = pass_value
results_parser_callable = mock.Mock()
results_parser_callable.return_value = [pass_value]
operator = self.__construct_operator(
'select value from tab1 limit 1;', pass_value, None, results_parser_callable
)
operator.execute()
results_parser_callable.assert_called_once_with([pass_value])
| apache-2.0 | 7,868,135,222,770,688,000 | 36.419192 | 110 | 0.671751 | false |
Telestream/telestream-cloud-python-sdk | telestream_cloud_qc_sdk/test/test_kag_size_test.py | 1 | 1406 | # coding: utf-8
"""
Qc API
Qc API # noqa: E501
The version of the OpenAPI document: 3.0.0
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import datetime
import telestream_cloud_qc
from telestream_cloud_qc.models.kag_size_test import KagSizeTest # noqa: E501
from telestream_cloud_qc.rest import ApiException
class TestKagSizeTest(unittest.TestCase):
"""KagSizeTest unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def make_instance(self, include_optional):
"""Test KagSizeTest
include_option is a boolean, when False only required
params are included, when True both required and
optional params are included """
# model = telestream_cloud_qc.models.kag_size_test.KagSizeTest() # noqa: E501
if include_optional :
return KagSizeTest(
size = 56,
reject_on_error = True,
checked = True
)
else :
return KagSizeTest(
)
def testKagSizeTest(self):
"""Test KagSizeTest"""
inst_req_only = self.make_instance(include_optional=False)
inst_req_and_optional = self.make_instance(include_optional=True)
if __name__ == '__main__':
unittest.main()
| mit | -818,138,193,350,723,100 | 24.563636 | 86 | 0.622333 | false |
AvishaySebban/NTM-Monitoring | ansible/psutil-3.0.1/psutil/__init__.py | 1 | 66350 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""psutil is a cross-platform library for retrieving information on
running processes and system utilization (CPU, memory, disks, network)
in Python.
"""
from __future__ import division
import collections
import errno
import functools
import os
import signal
import subprocess
import sys
import time
try:
import pwd
except ImportError:
pwd = None
from . import _common
from ._common import memoize
from ._compat import callable, long
from ._compat import PY3 as _PY3
from ._common import (STATUS_RUNNING, # NOQA
STATUS_SLEEPING,
STATUS_DISK_SLEEP,
STATUS_STOPPED,
STATUS_TRACING_STOP,
STATUS_ZOMBIE,
STATUS_DEAD,
STATUS_WAKING,
STATUS_LOCKED,
STATUS_IDLE, # bsd
STATUS_WAITING) # bsd
from ._common import (CONN_ESTABLISHED,
CONN_SYN_SENT,
CONN_SYN_RECV,
CONN_FIN_WAIT1,
CONN_FIN_WAIT2,
CONN_TIME_WAIT,
CONN_CLOSE,
CONN_CLOSE_WAIT,
CONN_LAST_ACK,
CONN_LISTEN,
CONN_CLOSING,
CONN_NONE)
from ._common import (NIC_DUPLEX_FULL, # NOQA
NIC_DUPLEX_HALF,
NIC_DUPLEX_UNKNOWN)
if sys.platform.startswith("linux"):
from . import _pslinux as _psplatform
from ._pslinux import (IOPRIO_CLASS_NONE, # NOQA
IOPRIO_CLASS_RT,
IOPRIO_CLASS_BE,
IOPRIO_CLASS_IDLE)
# Linux >= 2.6.36
if _psplatform.HAS_PRLIMIT:
from ._psutil_linux import (RLIM_INFINITY, # NOQA
RLIMIT_AS,
RLIMIT_CORE,
RLIMIT_CPU,
RLIMIT_DATA,
RLIMIT_FSIZE,
RLIMIT_LOCKS,
RLIMIT_MEMLOCK,
RLIMIT_NOFILE,
RLIMIT_NPROC,
RLIMIT_RSS,
RLIMIT_STACK)
# Kinda ugly but considerably faster than using hasattr() and
# setattr() against the module object (we are at import time:
# speed matters).
from . import _psutil_linux
try:
RLIMIT_MSGQUEUE = _psutil_linux.RLIMIT_MSGQUEUE
except AttributeError:
pass
try:
RLIMIT_NICE = _psutil_linux.RLIMIT_NICE
except AttributeError:
pass
try:
RLIMIT_RTPRIO = _psutil_linux.RLIMIT_RTPRIO
except AttributeError:
pass
try:
RLIMIT_RTTIME = _psutil_linux.RLIMIT_RTTIME
except AttributeError:
pass
try:
RLIMIT_SIGPENDING = _psutil_linux.RLIMIT_SIGPENDING
except AttributeError:
pass
del _psutil_linux
elif sys.platform.startswith("win32"):
from . import _pswindows as _psplatform
from ._psutil_windows import (ABOVE_NORMAL_PRIORITY_CLASS, # NOQA
BELOW_NORMAL_PRIORITY_CLASS,
HIGH_PRIORITY_CLASS,
IDLE_PRIORITY_CLASS,
NORMAL_PRIORITY_CLASS,
REALTIME_PRIORITY_CLASS)
from ._pswindows import CONN_DELETE_TCB # NOQA
elif sys.platform.startswith("darwin"):
from . import _psosx as _psplatform
elif sys.platform.startswith("freebsd"):
from . import _psbsd as _psplatform
elif sys.platform.startswith("sunos"):
from . import _pssunos as _psplatform
from ._pssunos import (CONN_IDLE, # NOQA
CONN_BOUND)
else:
raise NotImplementedError('platform %s is not supported' % sys.platform)
__all__ = [
# exceptions
"Error", "NoSuchProcess", "ZombieProcess", "AccessDenied",
"TimeoutExpired",
# constants
"version_info", "__version__",
"STATUS_RUNNING", "STATUS_IDLE", "STATUS_SLEEPING", "STATUS_DISK_SLEEP",
"STATUS_STOPPED", "STATUS_TRACING_STOP", "STATUS_ZOMBIE", "STATUS_DEAD",
"STATUS_WAKING", "STATUS_LOCKED", "STATUS_WAITING", "STATUS_LOCKED",
"CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
"CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
"CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", "CONN_NONE",
"AF_LINK",
"NIC_DUPLEX_FULL", "NIC_DUPLEX_HALF", "NIC_DUPLEX_UNKNOWN",
# classes
"Process", "Popen",
# functions
"pid_exists", "pids", "process_iter", "wait_procs", # proc
"virtual_memory", "swap_memory", # memory
"cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count", # cpu
"net_io_counters", "net_connections", "net_if_addrs", # network
"net_if_stats",
"disk_io_counters", "disk_partitions", "disk_usage", # disk
"users", "boot_time", # others
]
__all__.extend(_psplatform.__extra__all__)
__author__ = "Giampaolo Rodola'"
__version__ = "3.0.1"
version_info = tuple([int(num) for num in __version__.split('.')])
AF_LINK = _psplatform.AF_LINK
_TOTAL_PHYMEM = None
_POSIX = os.name == 'posix'
_WINDOWS = os.name == 'nt'
_timer = getattr(time, 'monotonic', time.time)
# Sanity check in case the user messed up with psutil installation
# or did something weird with sys.path. In this case we might end
# up importing a python module using a C extension module which
# was compiled for a different version of psutil.
# We want to prevent that by failing sooner rather than later.
# See: https://github.com/giampaolo/psutil/issues/564
if (int(__version__.replace('.', '')) !=
getattr(_psplatform.cext, 'version', None)):
msg = "version conflict: %r C extension module was built for another " \
"version of psutil (different than %s)" % (_psplatform.cext.__file__,
__version__)
raise ImportError(msg)
# =====================================================================
# --- exceptions
# =====================================================================
class Error(Exception):
"""Base exception class. All other psutil exceptions inherit
from this one.
"""
def __init__(self, msg=""):
self.msg = msg
def __str__(self):
return self.msg
class NoSuchProcess(Error):
"""Exception raised when a process with a certain PID doesn't
or no longer exists.
"""
def __init__(self, pid, name=None, msg=None):
Error.__init__(self, msg)
self.pid = pid
self.name = name
self.msg = msg
if msg is None:
if name:
details = "(pid=%s, name=%s)" % (self.pid, repr(self.name))
else:
details = "(pid=%s)" % self.pid
self.msg = "process no longer exists " + details
class ZombieProcess(NoSuchProcess):
"""Exception raised when querying a zombie process. This is
raised on OSX, BSD and Solaris only, and not always: depending
on the query the OS may be able to succeed anyway.
On Linux all zombie processes are querable (hence this is never
raised). Windows doesn't have zombie processes.
"""
def __init__(self, pid, name=None, ppid=None, msg=None):
Error.__init__(self, msg)
self.pid = pid
self.ppid = ppid
self.name = name
self.msg = msg
if msg is None:
if name and ppid:
details = "(pid=%s, name=%s, ppid=%s)" % (
self.pid, repr(self.name), self.ppid)
elif name:
details = "(pid=%s, name=%s)" % (self.pid, repr(self.name))
else:
details = "(pid=%s)" % self.pid
self.msg = "process still exists but it's a zombie " + details
class AccessDenied(Error):
"""Exception raised when permission to perform an action is denied."""
def __init__(self, pid=None, name=None, msg=None):
Error.__init__(self, msg)
self.pid = pid
self.name = name
self.msg = msg
if msg is None:
if (pid is not None) and (name is not None):
self.msg = "(pid=%s, name=%s)" % (pid, repr(name))
elif (pid is not None):
self.msg = "(pid=%s)" % self.pid
else:
self.msg = ""
class TimeoutExpired(Error):
"""Raised on Process.wait(timeout) if timeout expires and process
is still alive.
"""
def __init__(self, seconds, pid=None, name=None):
Error.__init__(self, "timeout after %s seconds" % seconds)
self.seconds = seconds
self.pid = pid
self.name = name
if (pid is not None) and (name is not None):
self.msg += " (pid=%s, name=%s)" % (pid, repr(name))
elif (pid is not None):
self.msg += " (pid=%s)" % self.pid
# push exception classes into platform specific module namespace
_psplatform.NoSuchProcess = NoSuchProcess
_psplatform.ZombieProcess = ZombieProcess
_psplatform.AccessDenied = AccessDenied
_psplatform.TimeoutExpired = TimeoutExpired
# =====================================================================
# --- Process class
# =====================================================================
def _assert_pid_not_reused(fun):
"""Decorator which raises NoSuchProcess in case a process is no
longer running or its PID has been reused.
"""
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
if not self.is_running():
raise NoSuchProcess(self.pid, self._name)
return fun(self, *args, **kwargs)
return wrapper
class Process(object):
"""Represents an OS process with the given PID.
If PID is omitted current process PID (os.getpid()) is used.
Raise NoSuchProcess if PID does not exist.
Note that most of the methods of this class do not make sure
the PID of the process being queried has been reused over time.
That means you might end up retrieving an information referring
to another process in case the original one this instance
refers to is gone in the meantime.
The only exceptions for which process identity is pre-emptively
checked and guaranteed are:
- parent()
- children()
- nice() (set)
- ionice() (set)
- rlimit() (set)
- cpu_affinity (set)
- suspend()
- resume()
- send_signal()
- terminate()
- kill()
To prevent this problem for all other methods you can:
- use is_running() before querying the process
- if you're continuously iterating over a set of Process
instances use process_iter() which pre-emptively checks
process identity for every yielded instance
"""
def __init__(self, pid=None):
self._init(pid)
def _init(self, pid, _ignore_nsp=False):
if pid is None:
pid = os.getpid()
else:
if not _PY3 and not isinstance(pid, (int, long)):
raise TypeError('pid must be an integer (got %r)' % pid)
if pid < 0:
raise ValueError('pid must be a positive integer (got %s)'
% pid)
self._pid = pid
self._name = None
self._exe = None
self._create_time = None
self._gone = False
self._hash = None
# used for caching on Windows only (on POSIX ppid may change)
self._ppid = None
# platform-specific modules define an _psplatform.Process
# implementation class
self._proc = _psplatform.Process(pid)
self._last_sys_cpu_times = None
self._last_proc_cpu_times = None
# cache creation time for later use in is_running() method
try:
self.create_time()
except AccessDenied:
# we should never get here as AFAIK we're able to get
# process creation time on all platforms even as a
# limited user
pass
except ZombieProcess:
# Let's consider a zombie process as legitimate as
# tehcnically it's still alive (it can be queried,
# although not always, and it's returned by pids()).
pass
except NoSuchProcess:
if not _ignore_nsp:
msg = 'no process found with pid %s' % pid
raise NoSuchProcess(pid, None, msg)
else:
self._gone = True
# This pair is supposed to indentify a Process instance
# univocally over time (the PID alone is not enough as
# it might refer to a process whose PID has been reused).
# This will be used later in __eq__() and is_running().
self._ident = (self.pid, self._create_time)
def __str__(self):
try:
pid = self.pid
name = repr(self.name())
except ZombieProcess:
details = "(pid=%s (zombie))" % self.pid
except NoSuchProcess:
details = "(pid=%s (terminated))" % self.pid
except AccessDenied:
details = "(pid=%s)" % (self.pid)
else:
details = "(pid=%s, name=%s)" % (pid, name)
return "%s.%s%s" % (self.__class__.__module__,
self.__class__.__name__, details)
def __repr__(self):
return "<%s at %s>" % (self.__str__(), id(self))
def __eq__(self, other):
# Test for equality with another Process object based
# on PID and creation time.
if not isinstance(other, Process):
return NotImplemented
return self._ident == other._ident
def __ne__(self, other):
return not self == other
def __hash__(self):
if self._hash is None:
self._hash = hash(self._ident)
return self._hash
# --- utility methods
def as_dict(self, attrs=None, ad_value=None):
"""Utility method returning process information as a
hashable dictionary.
If 'attrs' is specified it must be a list of strings
reflecting available Process class' attribute names
(e.g. ['cpu_times', 'name']) else all public (read
only) attributes are assumed.
'ad_value' is the value which gets assigned in case
AccessDenied or ZombieProcess exception is raised when
retrieving that particular process information.
"""
excluded_names = set(
['send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait',
'is_running', 'as_dict', 'parent', 'children', 'rlimit'])
retdict = dict()
ls = set(attrs or [x for x in dir(self)])
for name in ls:
if name.startswith('_'):
continue
if name in excluded_names:
continue
try:
attr = getattr(self, name)
if callable(attr):
ret = attr()
else:
ret = attr
except (AccessDenied, ZombieProcess):
ret = ad_value
except NotImplementedError:
# in case of not implemented functionality (may happen
# on old or exotic systems) we want to crash only if
# the user explicitly asked for that particular attr
if attrs:
raise
continue
retdict[name] = ret
return retdict
def parent(self):
"""Return the parent process as a Process object pre-emptively
checking whether PID has been reused.
If no parent is known return None.
"""
ppid = self.ppid()
if ppid is not None:
ctime = self.create_time()
try:
parent = Process(ppid)
if parent.create_time() <= ctime:
return parent
# ...else ppid has been reused by another process
except NoSuchProcess:
pass
def is_running(self):
"""Return whether this process is running.
It also checks if PID has been reused by another process in
which case return False.
"""
if self._gone:
return False
try:
# Checking if PID is alive is not enough as the PID might
# have been reused by another process: we also want to
# check process identity.
# Process identity / uniqueness over time is greanted by
# (PID + creation time) and that is verified in __eq__.
return self == Process(self.pid)
except NoSuchProcess:
self._gone = True
return False
# --- actual API
@property
def pid(self):
"""The process PID."""
return self._pid
def ppid(self):
"""The process parent PID.
On Windows the return value is cached after first call.
"""
# On POSIX we don't want to cache the ppid as it may unexpectedly
# change to 1 (init) in case this process turns into a zombie:
# https://github.com/giampaolo/psutil/issues/321
# http://stackoverflow.com/questions/356722/
# XXX should we check creation time here rather than in
# Process.parent()?
if _POSIX:
return self._proc.ppid()
else:
self._ppid = self._ppid or self._proc.ppid()
return self._ppid
def name(self):
"""The process name. The return value is cached after first call."""
if self._name is None:
name = self._proc.name()
if _POSIX and len(name) >= 15:
# On UNIX the name gets truncated to the first 15 characters.
# If it matches the first part of the cmdline we return that
# one instead because it's usually more explicative.
# Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon".
try:
cmdline = self.cmdline()
except AccessDenied:
pass
else:
if cmdline:
extended_name = os.path.basename(cmdline[0])
if extended_name.startswith(name):
name = extended_name
self._proc._name = name
self._name = name
return self._name
def exe(self):
"""The process executable as an absolute path.
May also be an empty string.
The return value is cached after first call.
"""
def guess_it(fallback):
# try to guess exe from cmdline[0] in absence of a native
# exe representation
cmdline = self.cmdline()
if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'):
exe = cmdline[0] # the possible exe
# Attempt to guess only in case of an absolute path.
# It is not safe otherwise as the process might have
# changed cwd.
if (os.path.isabs(exe) and
os.path.isfile(exe) and
os.access(exe, os.X_OK)):
return exe
if isinstance(fallback, AccessDenied):
raise fallback
return fallback
if self._exe is None:
try:
exe = self._proc.exe()
except AccessDenied as err:
return guess_it(fallback=err)
else:
if not exe:
# underlying implementation can legitimately return an
# empty string; if that's the case we don't want to
# raise AD while guessing from the cmdline
try:
exe = guess_it(fallback=exe)
except AccessDenied:
pass
self._exe = exe
return self._exe
def cmdline(self):
"""The command line this process has been called with."""
return self._proc.cmdline()
def status(self):
"""The process current status as a STATUS_* constant."""
try:
return self._proc.status()
except ZombieProcess:
return STATUS_ZOMBIE
def username(self):
"""The name of the user that owns the process.
On UNIX this is calculated by using *real* process uid.
"""
if _POSIX:
if pwd is None:
# might happen if python was installed from sources
raise ImportError(
"requires pwd module shipped with standard python")
real_uid = self.uids().real
try:
return pwd.getpwuid(real_uid).pw_name
except KeyError:
# the uid can't be resolved by the system
return str(real_uid)
else:
return self._proc.username()
def create_time(self):
"""The process creation time as a floating point number
expressed in seconds since the epoch, in UTC.
The return value is cached after first call.
"""
if self._create_time is None:
self._create_time = self._proc.create_time()
return self._create_time
def cwd(self):
"""Process current working directory as an absolute path."""
return self._proc.cwd()
def nice(self, value=None):
"""Get or set process niceness (priority)."""
if value is None:
return self._proc.nice_get()
else:
if not self.is_running():
raise NoSuchProcess(self.pid, self._name)
self._proc.nice_set(value)
if _POSIX:
def uids(self):
"""Return process UIDs as a (real, effective, saved)
namedtuple.
"""
return self._proc.uids()
def gids(self):
"""Return process GIDs as a (real, effective, saved)
namedtuple.
"""
return self._proc.gids()
def terminal(self):
"""The terminal associated with this process, if any,
else None.
"""
return self._proc.terminal()
def num_fds(self):
"""Return the number of file descriptors opened by this
process (POSIX only).
"""
return self._proc.num_fds()
# Linux, BSD and Windows only
if hasattr(_psplatform.Process, "io_counters"):
def io_counters(self):
"""Return process I/O statistics as a
(read_count, write_count, read_bytes, write_bytes)
namedtuple.
Those are the number of read/write calls performed and the
amount of bytes read and written by the process.
"""
return self._proc.io_counters()
# Linux and Windows >= Vista only
if hasattr(_psplatform.Process, "ionice_get"):
def ionice(self, ioclass=None, value=None):
"""Get or set process I/O niceness (priority).
On Linux 'ioclass' is one of the IOPRIO_CLASS_* constants.
'value' is a number which goes from 0 to 7. The higher the
value, the lower the I/O priority of the process.
On Windows only 'ioclass' is used and it can be set to 2
(normal), 1 (low) or 0 (very low).
Available on Linux and Windows > Vista only.
"""
if ioclass is None:
if value is not None:
raise ValueError("'ioclass' must be specified")
return self._proc.ionice_get()
else:
return self._proc.ionice_set(ioclass, value)
# Linux only
if hasattr(_psplatform.Process, "rlimit"):
def rlimit(self, resource, limits=None):
"""Get or set process resource limits as a (soft, hard)
tuple.
'resource' is one of the RLIMIT_* constants.
'limits' is supposed to be a (soft, hard) tuple.
See "man prlimit" for further info.
Available on Linux only.
"""
if limits is None:
return self._proc.rlimit(resource)
else:
return self._proc.rlimit(resource, limits)
# Windows, Linux and BSD only
if hasattr(_psplatform.Process, "cpu_affinity_get"):
def cpu_affinity(self, cpus=None):
"""Get or set process CPU affinity.
If specified 'cpus' must be a list of CPUs for which you
want to set the affinity (e.g. [0, 1]).
(Windows, Linux and BSD only).
"""
# Automatically remove duplicates both on get and
# set (for get it's not really necessary, it's
# just for extra safety).
if cpus is None:
return list(set(self._proc.cpu_affinity_get()))
else:
self._proc.cpu_affinity_set(list(set(cpus)))
if _WINDOWS:
def num_handles(self):
"""Return the number of handles opened by this process
(Windows only).
"""
return self._proc.num_handles()
def num_ctx_switches(self):
"""Return the number of voluntary and involuntary context
switches performed by this process.
"""
return self._proc.num_ctx_switches()
def num_threads(self):
"""Return the number of threads used by this process."""
return self._proc.num_threads()
def threads(self):
"""Return threads opened by process as a list of
(id, user_time, system_time) namedtuples representing
thread id and thread CPU times (user/system).
"""
return self._proc.threads()
@_assert_pid_not_reused
def children(self, recursive=False):
"""Return the children of this process as a list of Process
instances, pre-emptively checking whether PID has been reused.
If recursive is True return all the parent descendants.
Example (A == this process):
A ─┐
│
├─ B (child) ─┐
│ └─ X (grandchild) ─┐
│ └─ Y (great grandchild)
├─ C (child)
└─ D (child)
>>> import psutil
>>> p = psutil.Process()
>>> p.children()
B, C, D
>>> p.children(recursive=True)
B, X, Y, C, D
Note that in the example above if process X disappears
process Y won't be listed as the reference to process A
is lost.
"""
if hasattr(_psplatform, 'ppid_map'):
# Windows only: obtain a {pid:ppid, ...} dict for all running
# processes in one shot (faster).
ppid_map = _psplatform.ppid_map()
else:
ppid_map = None
ret = []
if not recursive:
if ppid_map is None:
# 'slow' version, common to all platforms except Windows
for p in process_iter():
try:
if p.ppid() == self.pid:
# if child happens to be older than its parent
# (self) it means child's PID has been reused
if self.create_time() <= p.create_time():
ret.append(p)
except (NoSuchProcess, ZombieProcess):
pass
else:
# Windows only (faster)
for pid, ppid in ppid_map.items():
if ppid == self.pid:
try:
child = Process(pid)
# if child happens to be older than its parent
# (self) it means child's PID has been reused
if self.create_time() <= child.create_time():
ret.append(child)
except (NoSuchProcess, ZombieProcess):
pass
else:
# construct a dict where 'values' are all the processes
# having 'key' as their parent
table = collections.defaultdict(list)
if ppid_map is None:
for p in process_iter():
try:
table[p.ppid()].append(p)
except (NoSuchProcess, ZombieProcess):
pass
else:
for pid, ppid in ppid_map.items():
try:
p = Process(pid)
table[ppid].append(p)
except (NoSuchProcess, ZombieProcess):
pass
# At this point we have a mapping table where table[self.pid]
# are the current process' children.
# Below, we look for all descendants recursively, similarly
# to a recursive function call.
checkpids = [self.pid]
for pid in checkpids:
for child in table[pid]:
try:
# if child happens to be older than its parent
# (self) it means child's PID has been reused
intime = self.create_time() <= child.create_time()
except (NoSuchProcess, ZombieProcess):
pass
else:
if intime:
ret.append(child)
if child.pid not in checkpids:
checkpids.append(child.pid)
return ret
def cpu_percent(self, interval=None):
"""Return a float representing the current process CPU
utilization as a percentage.
When interval is 0.0 or None (default) compares process times
to system CPU times elapsed since last call, returning
immediately (non-blocking). That means that the first time
this is called it will return a meaningful 0.0 value.
When interval is > 0.0 compares process times to system CPU
times elapsed before and after the interval (blocking).
In this case is recommended for accuracy that this function
be called with at least 0.1 seconds between calls.
Examples:
>>> import psutil
>>> p = psutil.Process(os.getpid())
>>> # blocking
>>> p.cpu_percent(interval=1)
2.0
>>> # non-blocking (percentage since last call)
>>> p.cpu_percent(interval=None)
2.9
>>>
"""
blocking = interval is not None and interval > 0.0
num_cpus = cpu_count()
if _POSIX:
def timer():
return _timer() * num_cpus
else:
def timer():
return sum(cpu_times())
if blocking:
st1 = timer()
pt1 = self._proc.cpu_times()
time.sleep(interval)
st2 = timer()
pt2 = self._proc.cpu_times()
else:
st1 = self._last_sys_cpu_times
pt1 = self._last_proc_cpu_times
st2 = timer()
pt2 = self._proc.cpu_times()
if st1 is None or pt1 is None:
self._last_sys_cpu_times = st2
self._last_proc_cpu_times = pt2
return 0.0
delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)
delta_time = st2 - st1
# reset values for next call in case of interval == None
self._last_sys_cpu_times = st2
self._last_proc_cpu_times = pt2
try:
# The utilization split between all CPUs.
# Note: a percentage > 100 is legitimate as it can result
# from a process with multiple threads running on different
# CPU cores, see:
# http://stackoverflow.com/questions/1032357
# https://github.com/giampaolo/psutil/issues/474
overall_percent = ((delta_proc / delta_time) * 100) * num_cpus
except ZeroDivisionError:
# interval was too low
return 0.0
else:
return round(overall_percent, 1)
def cpu_times(self):
"""Return a (user, system) namedtuple representing the
accumulated process time, in seconds.
This is the same as os.times() but per-process.
"""
return self._proc.cpu_times()
def memory_info(self):
"""Return a tuple representing RSS (Resident Set Size) and VMS
(Virtual Memory Size) in bytes.
On UNIX RSS and VMS are the same values shown by 'ps'.
On Windows RSS and VMS refer to "Mem Usage" and "VM Size"
columns of taskmgr.exe.
"""
return self._proc.memory_info()
def memory_info_ex(self):
"""Return a namedtuple with variable fields depending on the
platform representing extended memory information about
this process. All numbers are expressed in bytes.
"""
return self._proc.memory_info_ex()
def memory_percent(self):
"""Compare physical system memory to process resident memory
(RSS) and calculate process memory utilization as a percentage.
"""
rss = self._proc.memory_info()[0]
# use cached value if available
total_phymem = _TOTAL_PHYMEM or virtual_memory().total
try:
return (rss / float(total_phymem)) * 100
except ZeroDivisionError:
return 0.0
def memory_maps(self, grouped=True):
"""Return process' mapped memory regions as a list of namedtuples
whose fields are variable depending on the platform.
If 'grouped' is True the mapped regions with the same 'path'
are grouped together and the different memory fields are summed.
If 'grouped' is False every mapped region is shown as a single
entity and the namedtuple will also include the mapped region's
address space ('addr') and permission set ('perms').
"""
it = self._proc.memory_maps()
if grouped:
d = {}
for tupl in it:
path = tupl[2]
nums = tupl[3:]
try:
d[path] = map(lambda x, y: x + y, d[path], nums)
except KeyError:
d[path] = nums
nt = _psplatform.pmmap_grouped
return [nt(path, *d[path]) for path in d] # NOQA
else:
nt = _psplatform.pmmap_ext
return [nt(*x) for x in it]
def open_files(self):
"""Return files opened by process as a list of
(path, fd) namedtuples including the absolute file name
and file descriptor number.
"""
return self._proc.open_files()
def connections(self, kind='inet'):
"""Return connections opened by process as a list of
(fd, family, type, laddr, raddr, status) namedtuples.
The 'kind' parameter filters for connections that match the
following criteria:
Kind Value Connections using
inet IPv4 and IPv6
inet4 IPv4
inet6 IPv6
tcp TCP
tcp4 TCP over IPv4
tcp6 TCP over IPv6
udp UDP
udp4 UDP over IPv4
udp6 UDP over IPv6
unix UNIX socket (both UDP and TCP protocols)
all the sum of all the possible families and protocols
"""
return self._proc.connections(kind)
if _POSIX:
def _send_signal(self, sig):
# XXX: according to "man 2 kill" PID 0 has a special
# meaning as it refers to <<every process in the process
# group of the calling process>>, so should we prevent
# it here?
try:
os.kill(self.pid, sig)
except OSError as err:
if err.errno == errno.ESRCH:
self._gone = True
raise NoSuchProcess(self.pid, self._name)
if err.errno == errno.EPERM:
raise AccessDenied(self.pid, self._name)
raise
@_assert_pid_not_reused
def send_signal(self, sig):
"""Send a signal to process pre-emptively checking whether
PID has been reused (see signal module constants) .
On Windows only SIGTERM is valid and is treated as an alias
for kill().
"""
if _POSIX:
self._send_signal(sig)
else:
if sig == signal.SIGTERM:
self._proc.kill()
else:
raise ValueError("only SIGTERM is supported on Windows")
@_assert_pid_not_reused
def suspend(self):
"""Suspend process execution with SIGSTOP pre-emptively checking
whether PID has been reused.
On Windows this has the effect ot suspending all process threads.
"""
if _POSIX:
self._send_signal(signal.SIGSTOP)
else:
self._proc.suspend()
@_assert_pid_not_reused
def resume(self):
"""Resume process execution with SIGCONT pre-emptively checking
whether PID has been reused.
On Windows this has the effect of resuming all process threads.
"""
if _POSIX:
self._send_signal(signal.SIGCONT)
else:
self._proc.resume()
@_assert_pid_not_reused
def terminate(self):
"""Terminate the process with SIGTERM pre-emptively checking
whether PID has been reused.
On Windows this is an alias for kill().
"""
if _POSIX:
self._send_signal(signal.SIGTERM)
else:
self._proc.kill()
@_assert_pid_not_reused
def kill(self):
"""Kill the current process with SIGKILL pre-emptively checking
whether PID has been reused.
"""
if _POSIX:
self._send_signal(signal.SIGKILL)
else:
self._proc.kill()
def wait(self, timeout=None):
"""Wait for process to terminate and, if process is a children
of os.getpid(), also return its exit code, else None.
If the process is already terminated immediately return None
instead of raising NoSuchProcess.
If timeout (in seconds) is specified and process is still alive
raise TimeoutExpired.
To wait for multiple Process(es) use psutil.wait_procs().
"""
if timeout is not None and not timeout >= 0:
raise ValueError("timeout must be a positive integer")
return self._proc.wait(timeout)
# =====================================================================
# --- Popen class
# =====================================================================
class Popen(Process):
"""A more convenient interface to stdlib subprocess module.
It starts a sub process and deals with it exactly as when using
subprocess.Popen class but in addition also provides all the
properties and methods of psutil.Process class as a unified
interface:
>>> import psutil
>>> from subprocess import PIPE
>>> p = psutil.Popen(["python", "-c", "print 'hi'"], stdout=PIPE)
>>> p.name()
'python'
>>> p.uids()
user(real=1000, effective=1000, saved=1000)
>>> p.username()
'giampaolo'
>>> p.communicate()
('hi\n', None)
>>> p.terminate()
>>> p.wait(timeout=2)
0
>>>
For method names common to both classes such as kill(), terminate()
and wait(), psutil.Process implementation takes precedence.
Unlike subprocess.Popen this class pre-emptively checks wheter PID
has been reused on send_signal(), terminate() and kill() so that
you don't accidentally terminate another process, fixing
http://bugs.python.org/issue6973.
For a complete documentation refer to:
http://docs.python.org/library/subprocess.html
"""
def __init__(self, *args, **kwargs):
# Explicitly avoid to raise NoSuchProcess in case the process
# spawned by subprocess.Popen terminates too quickly, see:
# https://github.com/giampaolo/psutil/issues/193
self.__subproc = subprocess.Popen(*args, **kwargs)
self._init(self.__subproc.pid, _ignore_nsp=True)
def __dir__(self):
return sorted(set(dir(Popen) + dir(subprocess.Popen)))
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
try:
return object.__getattribute__(self.__subproc, name)
except AttributeError:
raise AttributeError("%s instance has no attribute '%s'"
% (self.__class__.__name__, name))
def wait(self, timeout=None):
if self.__subproc.returncode is not None:
return self.__subproc.returncode
ret = super(Popen, self).wait(timeout)
self.__subproc.returncode = ret
return ret
# =====================================================================
# --- system processes related functions
# =====================================================================
def pids():
"""Return a list of current running PIDs."""
return _psplatform.pids()
def pid_exists(pid):
"""Return True if given PID exists in the current process list.
This is faster than doing "pid in psutil.pids()" and
should be preferred.
"""
if pid < 0:
return False
elif pid == 0 and _POSIX:
# On POSIX we use os.kill() to determine PID existence.
# According to "man 2 kill" PID 0 has a special meaning
# though: it refers to <<every process in the process
# group of the calling process>> and that is not we want
# to do here.
return pid in pids()
else:
return _psplatform.pid_exists(pid)
_pmap = {}
def process_iter():
"""Return a generator yielding a Process instance for all
running processes.
Every new Process instance is only created once and then cached
into an internal table which is updated every time this is used.
Cached Process instances are checked for identity so that you're
safe in case a PID has been reused by another process, in which
case the cached instance is updated.
The sorting order in which processes are yielded is based on
their PIDs.
"""
def add(pid):
proc = Process(pid)
_pmap[proc.pid] = proc
return proc
def remove(pid):
_pmap.pop(pid, None)
a = set(pids())
b = set(_pmap.keys())
new_pids = a - b
gone_pids = b - a
for pid in gone_pids:
remove(pid)
for pid, proc in sorted(list(_pmap.items()) +
list(dict.fromkeys(new_pids).items())):
try:
if proc is None: # new process
yield add(pid)
else:
# use is_running() to check whether PID has been reused by
# another process in which case yield a new Process instance
if proc.is_running():
yield proc
else:
yield add(pid)
except NoSuchProcess:
remove(pid)
except AccessDenied:
# Process creation time can't be determined hence there's
# no way to tell whether the pid of the cached process
# has been reused. Just return the cached version.
yield proc
def wait_procs(procs, timeout=None, callback=None):
"""Convenience function which waits for a list of processes to
terminate.
Return a (gone, alive) tuple indicating which processes
are gone and which ones are still alive.
The gone ones will have a new 'returncode' attribute indicating
process exit status (may be None).
'callback' is a function which gets called every time a process
terminates (a Process instance is passed as callback argument).
Function will return as soon as all processes terminate or when
timeout occurs.
Typical use case is:
- send SIGTERM to a list of processes
- give them some time to terminate
- send SIGKILL to those ones which are still alive
Example:
>>> def on_terminate(proc):
... print("process {} terminated".format(proc))
...
>>> for p in procs:
... p.terminate()
...
>>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate)
>>> for p in alive:
... p.kill()
"""
def check_gone(proc, timeout):
try:
returncode = proc.wait(timeout=timeout)
except TimeoutExpired:
pass
else:
if returncode is not None or not proc.is_running():
proc.returncode = returncode
gone.add(proc)
if callback is not None:
callback(proc)
if timeout is not None and not timeout >= 0:
msg = "timeout must be a positive integer, got %s" % timeout
raise ValueError(msg)
gone = set()
alive = set(procs)
if callback is not None and not callable(callback):
raise TypeError("callback %r is not a callable" % callable)
if timeout is not None:
deadline = _timer() + timeout
while alive:
if timeout is not None and timeout <= 0:
break
for proc in alive:
# Make sure that every complete iteration (all processes)
# will last max 1 sec.
# We do this because we don't want to wait too long on a
# single process: in case it terminates too late other
# processes may disappear in the meantime and their PID
# reused.
max_timeout = 1.0 / len(alive)
if timeout is not None:
timeout = min((deadline - _timer()), max_timeout)
if timeout <= 0:
break
check_gone(proc, timeout)
else:
check_gone(proc, max_timeout)
alive = alive - gone
if alive:
# Last attempt over processes survived so far.
# timeout == 0 won't make this function wait any further.
for proc in alive:
check_gone(proc, 0)
alive = alive - gone
return (list(gone), list(alive))
# =====================================================================
# --- CPU related functions
# =====================================================================
@memoize
def cpu_count(logical=True):
"""Return the number of logical CPUs in the system (same as
os.cpu_count() in Python 3.4).
If logical is False return the number of physical cores only
(e.g. hyper thread CPUs are excluded).
Return None if undetermined.
The return value is cached after first call.
If desired cache can be cleared like this:
>>> psutil.cpu_count.cache_clear()
"""
if logical:
return _psplatform.cpu_count_logical()
else:
return _psplatform.cpu_count_physical()
def cpu_times(percpu=False):
"""Return system-wide CPU times as a namedtuple.
Every CPU time represents the seconds the CPU has spent in the given mode.
The namedtuple's fields availability varies depending on the platform:
- user
- system
- idle
- nice (UNIX)
- iowait (Linux)
- irq (Linux, FreeBSD)
- softirq (Linux)
- steal (Linux >= 2.6.11)
- guest (Linux >= 2.6.24)
- guest_nice (Linux >= 3.2.0)
When percpu is True return a list of namedtuples for each CPU.
First element of the list refers to first CPU, second element
to second CPU and so on.
The order of the list is consistent across calls.
"""
if not percpu:
return _psplatform.cpu_times()
else:
return _psplatform.per_cpu_times()
_last_cpu_times = cpu_times()
_last_per_cpu_times = cpu_times(percpu=True)
def cpu_percent(interval=None, percpu=False):
"""Return a float representing the current system-wide CPU
utilization as a percentage.
When interval is > 0.0 compares system CPU times elapsed before
and after the interval (blocking).
When interval is 0.0 or None compares system CPU times elapsed
since last call or module import, returning immediately (non
blocking). That means the first time this is called it will
return a meaningless 0.0 value which you should ignore.
In this case is recommended for accuracy that this function be
called with at least 0.1 seconds between calls.
When percpu is True returns a list of floats representing the
utilization as a percentage for each CPU.
First element of the list refers to first CPU, second element
to second CPU and so on.
The order of the list is consistent across calls.
Examples:
>>> # blocking, system-wide
>>> psutil.cpu_percent(interval=1)
2.0
>>>
>>> # blocking, per-cpu
>>> psutil.cpu_percent(interval=1, percpu=True)
[2.0, 1.0]
>>>
>>> # non-blocking (percentage since last call)
>>> psutil.cpu_percent(interval=None)
2.9
>>>
"""
global _last_cpu_times
global _last_per_cpu_times
blocking = interval is not None and interval > 0.0
def calculate(t1, t2):
t1_all = sum(t1)
t1_busy = t1_all - t1.idle
t2_all = sum(t2)
t2_busy = t2_all - t2.idle
# this usually indicates a float precision issue
if t2_busy <= t1_busy:
return 0.0
busy_delta = t2_busy - t1_busy
all_delta = t2_all - t1_all
busy_perc = (busy_delta / all_delta) * 100
return round(busy_perc, 1)
# system-wide usage
if not percpu:
if blocking:
t1 = cpu_times()
time.sleep(interval)
else:
t1 = _last_cpu_times
_last_cpu_times = cpu_times()
return calculate(t1, _last_cpu_times)
# per-cpu usage
else:
ret = []
if blocking:
tot1 = cpu_times(percpu=True)
time.sleep(interval)
else:
tot1 = _last_per_cpu_times
_last_per_cpu_times = cpu_times(percpu=True)
for t1, t2 in zip(tot1, _last_per_cpu_times):
ret.append(calculate(t1, t2))
return ret
# Use separate global vars for cpu_times_percent() so that it's
# independent from cpu_percent() and they can both be used within
# the same program.
_last_cpu_times_2 = _last_cpu_times
_last_per_cpu_times_2 = _last_per_cpu_times
def cpu_times_percent(interval=None, percpu=False):
"""Same as cpu_percent() but provides utilization percentages
for each specific CPU time as is returned by cpu_times().
For instance, on Linux we'll get:
>>> cpu_times_percent()
cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0,
irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
>>>
interval and percpu arguments have the same meaning as in
cpu_percent().
"""
global _last_cpu_times_2
global _last_per_cpu_times_2
blocking = interval is not None and interval > 0.0
def calculate(t1, t2):
nums = []
all_delta = sum(t2) - sum(t1)
for field in t1._fields:
field_delta = getattr(t2, field) - getattr(t1, field)
try:
field_perc = (100 * field_delta) / all_delta
except ZeroDivisionError:
field_perc = 0.0
field_perc = round(field_perc, 1)
if _WINDOWS:
# XXX
# Work around:
# https://github.com/giampaolo/psutil/issues/392
# CPU times are always supposed to increase over time
# or at least remain the same and that's because time
# cannot go backwards.
# Surprisingly sometimes this might not be the case on
# Windows where 'system' CPU time can be smaller
# compared to the previous call, resulting in corrupted
# percentages (< 0 or > 100).
# I really don't know what to do about that except
# forcing the value to 0 or 100.
if field_perc > 100.0:
field_perc = 100.0
elif field_perc < 0.0:
field_perc = 0.0
nums.append(field_perc)
return _psplatform.scputimes(*nums)
# system-wide usage
if not percpu:
if blocking:
t1 = cpu_times()
time.sleep(interval)
else:
t1 = _last_cpu_times_2
_last_cpu_times_2 = cpu_times()
return calculate(t1, _last_cpu_times_2)
# per-cpu usage
else:
ret = []
if blocking:
tot1 = cpu_times(percpu=True)
time.sleep(interval)
else:
tot1 = _last_per_cpu_times_2
_last_per_cpu_times_2 = cpu_times(percpu=True)
for t1, t2 in zip(tot1, _last_per_cpu_times_2):
ret.append(calculate(t1, t2))
return ret
# =====================================================================
# --- system memory related functions
# =====================================================================
def virtual_memory():
"""Return statistics about system memory usage as a namedtuple
including the following fields, expressed in bytes:
- total:
total physical memory available.
- available:
the actual amount of available memory that can be given
instantly to processes that request more memory in bytes; this
is calculated by summing different memory values depending on
the platform (e.g. free + buffers + cached on Linux) and it is
supposed to be used to monitor actual memory usage in a cross
platform fashion.
- percent:
the percentage usage calculated as (total - available) / total * 100
- used:
memory used, calculated differently depending on the platform and
designed for informational purposes only:
OSX: active + inactive + wired
BSD: active + wired + cached
LINUX: total - free
- free:
memory not being used at all (zeroed) that is readily available;
note that this doesn't reflect the actual memory available
(use 'available' instead)
Platform-specific fields:
- active (UNIX):
memory currently in use or very recently used, and so it is in RAM.
- inactive (UNIX):
memory that is marked as not used.
- buffers (BSD, Linux):
cache for things like file system metadata.
- cached (BSD, OSX):
cache for various things.
- wired (OSX, BSD):
memory that is marked to always stay in RAM. It is never moved to disk.
- shared (BSD):
memory that may be simultaneously accessed by multiple processes.
The sum of 'used' and 'available' does not necessarily equal total.
On Windows 'available' and 'free' are the same.
"""
global _TOTAL_PHYMEM
ret = _psplatform.virtual_memory()
# cached for later use in Process.memory_percent()
_TOTAL_PHYMEM = ret.total
return ret
def swap_memory():
"""Return system swap memory statistics as a namedtuple including
the following fields:
- total: total swap memory in bytes
- used: used swap memory in bytes
- free: free swap memory in bytes
- percent: the percentage usage
- sin: no. of bytes the system has swapped in from disk (cumulative)
- sout: no. of bytes the system has swapped out from disk (cumulative)
'sin' and 'sout' on Windows are meaningless and always set to 0.
"""
return _psplatform.swap_memory()
# =====================================================================
# --- disks/paritions related functions
# =====================================================================
def disk_usage(path):
"""Return disk usage statistics about the given path as a namedtuple
including total, used and free space expressed in bytes plus the
percentage usage.
"""
return _psplatform.disk_usage(path)
def disk_partitions(all=False):
"""Return mounted partitions as a list of
(device, mountpoint, fstype, opts) namedtuple.
'opts' field is a raw string separated by commas indicating mount
options which may vary depending on the platform.
If "all" parameter is False return physical devices only and ignore
all others.
"""
return _psplatform.disk_partitions(all)
def disk_io_counters(perdisk=False):
"""Return system disk I/O statistics as a namedtuple including
the following fields:
- read_count: number of reads
- write_count: number of writes
- read_bytes: number of bytes read
- write_bytes: number of bytes written
- read_time: time spent reading from disk (in milliseconds)
- write_time: time spent writing to disk (in milliseconds)
If perdisk is True return the same information for every
physical disk installed on the system as a dictionary
with partition names as the keys and the namedtuple
described above as the values.
On recent Windows versions 'diskperf -y' command may need to be
executed first otherwise this function won't find any disk.
"""
rawdict = _psplatform.disk_io_counters()
if not rawdict:
raise RuntimeError("couldn't find any physical disk")
if perdisk:
for disk, fields in rawdict.items():
rawdict[disk] = _common.sdiskio(*fields)
return rawdict
else:
return _common.sdiskio(*[sum(x) for x in zip(*rawdict.values())])
# =====================================================================
# --- network related functions
# =====================================================================
def net_io_counters(pernic=False):
"""Return network I/O statistics as a namedtuple including
the following fields:
- bytes_sent: number of bytes sent
- bytes_recv: number of bytes received
- packets_sent: number of packets sent
- packets_recv: number of packets received
- errin: total number of errors while receiving
- errout: total number of errors while sending
- dropin: total number of incoming packets which were dropped
- dropout: total number of outgoing packets which were dropped
(always 0 on OSX and BSD)
If pernic is True return the same information for every
network interface installed on the system as a dictionary
with network interface names as the keys and the namedtuple
described above as the values.
"""
rawdict = _psplatform.net_io_counters()
if not rawdict:
raise RuntimeError("couldn't find any network interface")
if pernic:
for nic, fields in rawdict.items():
rawdict[nic] = _common.snetio(*fields)
return rawdict
else:
return _common.snetio(*[sum(x) for x in zip(*rawdict.values())])
def net_connections(kind='inet'):
"""Return system-wide connections as a list of
(fd, family, type, laddr, raddr, status, pid) namedtuples.
In case of limited privileges 'fd' and 'pid' may be set to -1
and None respectively.
The 'kind' parameter filters for connections that fit the
following criteria:
Kind Value Connections using
inet IPv4 and IPv6
inet4 IPv4
inet6 IPv6
tcp TCP
tcp4 TCP over IPv4
tcp6 TCP over IPv6
udp UDP
udp4 UDP over IPv4
udp6 UDP over IPv6
unix UNIX socket (both UDP and TCP protocols)
all the sum of all the possible families and protocols
On OSX this function requires root privileges.
"""
return _psplatform.net_connections(kind)
def net_if_addrs():
"""Return the addresses associated to each NIC (network interface
card) installed on the system as a dictionary whose keys are the
NIC names and value is a list of namedtuples for each address
assigned to the NIC. Each namedtuple includes 4 fields:
- family
- address
- netmask
- broadcast
'family' can be either socket.AF_INET, socket.AF_INET6 or
psutil.AF_LINK, which refers to a MAC address.
'address' is the primary address, 'netmask' and 'broadcast'
may be None.
Note: you can have more than one address of the same family
associated with each interface.
"""
has_enums = sys.version_info >= (3, 4)
if has_enums:
import socket
rawlist = _psplatform.net_if_addrs()
rawlist.sort(key=lambda x: x[1]) # sort by family
ret = collections.defaultdict(list)
for name, fam, addr, mask, broadcast in rawlist:
if has_enums:
try:
fam = socket.AddressFamily(fam)
except ValueError:
if os.name == 'nt' and fam == -1:
fam = _psplatform.AF_LINK
elif (hasattr(_psplatform, "AF_LINK") and
_psplatform.AF_LINK == fam):
# Linux defines AF_LINK as an alias for AF_PACKET.
# We re-set the family here so that repr(family)
# will show AF_LINK rather than AF_PACKET
fam = _psplatform.AF_LINK
ret[name].append(_common.snic(fam, addr, mask, broadcast))
return dict(ret)
def net_if_stats():
"""Return information about each NIC (network interface card)
installed on the system as a dictionary whose keys are the
NIC names and value is a namedtuple with the following fields:
- isup: whether the interface is up (bool)
- duplex: can be either NIC_DUPLEX_FULL, NIC_DUPLEX_HALF or
NIC_DUPLEX_UNKNOWN
- speed: the NIC speed expressed in mega bits (MB); if it can't
be determined (e.g. 'localhost') it will be set to 0.
- mtu: the maximum transmission unit expressed in bytes.
"""
return _psplatform.net_if_stats()
# =====================================================================
# --- other system related functions
# =====================================================================
def boot_time():
"""Return the system boot time expressed in seconds since the epoch."""
# Note: we are not caching this because it is subject to
# system clock updates.
return _psplatform.boot_time()
def users():
"""Return users currently connected on the system as a list of
namedtuples including the following fields.
- user: the name of the user
- terminal: the tty or pseudo-tty associated with the user, if any.
- host: the host name associated with the entry, if any.
- started: the creation time as a floating point number expressed in
seconds since the epoch.
"""
return _psplatform.users()
def test():
"""List info of all currently running processes emulating ps aux
output.
"""
import datetime
today_day = datetime.date.today()
templ = "%-10s %5s %4s %4s %7s %7s %-13s %5s %7s %s"
attrs = ['pid', 'cpu_percent', 'memory_percent', 'name', 'cpu_times',
'create_time', 'memory_info']
if _POSIX:
attrs.append('uids')
attrs.append('terminal')
print(templ % ("USER", "PID", "%CPU", "%MEM", "VSZ", "RSS", "TTY",
"START", "TIME", "COMMAND"))
for p in process_iter():
try:
pinfo = p.as_dict(attrs, ad_value='')
except NoSuchProcess:
pass
else:
if pinfo['create_time']:
ctime = datetime.datetime.fromtimestamp(pinfo['create_time'])
if ctime.date() == today_day:
ctime = ctime.strftime("%H:%M")
else:
ctime = ctime.strftime("%b%d")
else:
ctime = ''
cputime = time.strftime("%M:%S",
time.localtime(sum(pinfo['cpu_times'])))
try:
user = p.username()
except KeyError:
if _POSIX:
if pinfo['uids']:
user = str(pinfo['uids'].real)
else:
user = ''
else:
raise
except Error:
user = ''
if _WINDOWS and '\\' in user:
user = user.split('\\')[1]
vms = pinfo['memory_info'] and \
int(pinfo['memory_info'].vms / 1024) or '?'
rss = pinfo['memory_info'] and \
int(pinfo['memory_info'].rss / 1024) or '?'
memp = pinfo['memory_percent'] and \
round(pinfo['memory_percent'], 1) or '?'
print(templ % (
user[:10],
pinfo['pid'],
pinfo['cpu_percent'],
memp,
vms,
rss,
pinfo.get('terminal', '') or '?',
ctime,
cputime,
pinfo['name'].strip() or '?'))
del memoize, division
if sys.version_info < (3, 0):
del num
if __name__ == "__main__":
test()
| artistic-2.0 | -1,882,063,374,360,191,700 | 34.048626 | 79 | 0.550685 | false |
Elico-Corp/openerp-7.0 | stock_pack_wizard/__openerp__.py | 1 | 1466 | # -*- coding: utf-8 -*-
# © 2014 Elico Corp (https://www.elico-corp.com)
# Licence AGPL-3.0 or later(http://www.gnu.org/licenses/agpl.html)
{
'name': 'Stock Pack Wizard',
'version': '7.0.1.0.0',
'author': 'Elico Corp',
'website': 'https://www.elico-corp.com',
'summary': '',
'description' : """
This module add new functionalities to Pack:
Split Pack at picking and picking_line
New fields added to pack:
- Customer Reference: Customer code
- Fullname: Customer Code + Sequence
- Address: Customer Address
- Dimensions: L, W, H, CBM
- Weights: NW and GW
New object created:
- Pack Template:
- Name and Code
- Dimensions: L, W, H, CBM
- Weights: NW and GW
Wizard created: a wizard will let user assign Stock Moves to Pack
Report created: Packing List (can be printed from Pack Tree view)
""",
'depends': ['base','stock','report_webkit'],
'category': '',
'sequence': 10,
'demo': [],
'data': [
'product_ul_view.xml',
'stock_tracking_view.xml',
'wizard/wizard_picking_tracking_view.xml',
'stock_picking_view.xml',
'stock_tracking_report.xml',
'data/product.ul.csv',
],
'test': [],
'installable': True,
'application': False,
'auto_install': False,
'css': [],
}
| agpl-3.0 | 3,104,632,123,113,778,700 | 27.173077 | 73 | 0.539249 | false |
VirusTotal/msticpy | tools/toollib/url_checker_async.py | 1 | 7616 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""Python file import analyzer."""
from collections import defaultdict, namedtuple
from pathlib import Path
from typing import Dict, Set, Optional, Iterable
from urllib import parse
import asyncio
import markdown
from bs4 import BeautifulSoup
from aiohttp import ClientSession, ClientResponseError, ClientConnectionError
# pylint: disable=relative-beyond-top-level
# from . import VERSION
# __version__ = VERSION
__author__ = "Ian Hellen"
UrlResult = namedtuple("UrlResult", "status, history, url, message")
def check_docs(
doc_path: str, recurse: bool = True, max_threads: int = 10, delay: float = 0
) -> Dict[str, Dict[str, UrlResult]]:
"""
Check multiple HTML files in `doc_path`.
Parameters
----------
doc_path : str
Path
recurse: bool
If True, recurse subfolders, default is True
max_threads: int, optional
The maximum number of async threads to run
delay: float, optional
Seconds delay between requests
Returns
-------
Dict[str, Dict[str, UrlResult]]
Dictionary of pages checked. Results for each page
is a dictionary of checked links for the page.
"""
page_results: Dict[str, Dict[str, UrlResult]] = defaultdict(dict)
link_results: Dict[str, UrlResult] = {}
links_to_check = _get_links_from_files(doc_path, recurse)
print(f"Checking links {len(links_to_check)}...")
checked_links = check_uris(links_to_check, max_threads, delay)
print("\ndone")
for result in checked_links:
link_results[result.url] = result
src_pages = links_to_check[result.url]
for src_page in src_pages:
page_results[src_page][result.url] = result
_print_url_results(page_results)
return page_results
# pyline: disable=broad-except
def _get_links_from_files(doc_path: str, recurse: bool = True) -> Dict[str, Set[str]]:
links_to_check: Dict[str, Set[str]] = defaultdict(set)
html_glob_pattern = "**/*.html" if recurse else "*.html"
all_files = list(Path(doc_path).glob(html_glob_pattern))
md_glob_pattern = "**/*.md" if recurse else "*.md"
md_files = list(Path(doc_path).glob(md_glob_pattern))
all_files.extend(md_files)
print(f"reading {len(all_files)} files...")
for file_name in all_files:
pg_links = _get_doc_links(file_name)
page = str(file_name.relative_to(Path(doc_path)))
for link in pg_links:
links_to_check[link].add(page)
return links_to_check
def _get_doc_links(doc_path: Path) -> Set[str]:
"""
Check links in an HTML or Markdown document.
Parameters
----------
doc_path : str
Path to the document
Returns
-------
Set[str]
Set of links
"""
html_content = None
try:
html_content = doc_path.read_text(encoding="utf-8")
except UnicodeDecodeError:
html_content = doc_path.read_text(encoding="mbcs")
if doc_path.suffix.casefold() == ".md":
html_content = markdown.markdown(html_content)
soup = BeautifulSoup(html_content, "html.parser")
links = soup.find_all("a")
links = {link.get("href") for link in links}
links = {link for link in links if link.casefold().startswith("http")}
return links
def _resolve_rel_link(
url_link: str, all_links: bool, page_url: str, top_root: str
) -> Optional[str]:
if url_link[0:4] == "http":
if all_links or (top_root.lower() not in url_link.lower()):
return url_link
else:
if url_link.startswith("#"):
# don't follow fragments
return None
url_link = parse.urljoin(page_url, url_link)
if all_links:
return url_link
return None
def check_uris(
uris_to_check: Iterable[str], max_threads: int = 10, delay: float = 0
) -> Iterable[UrlResult]:
"""
Check URIs.
Parameters
----------
uris_to_check : Iterable[str]
Iterable of URI strings
max_threads: int, optional
The maximum number of async threads to run
delay: float, optional
Seconds delay between requests
Returns
-------
Iterable[UrlResult]
Iterable of UrlResults
"""
loop = asyncio.get_event_loop()
future = asyncio.ensure_future(_check_uris_async(uris_to_check, max_threads, delay))
return loop.run_until_complete(future)
async def _check_url_async(url: str, session: ClientSession) -> UrlResult:
"""
Connect to URL and return response status.
Parameters
----------
url : str
URL to check
session : ClientSession
aiohttp client session
Returns
-------
UrlResult
Tuple of status code, redirect history, requested url,
status/error message.
"""
try:
async with session.get(url) as resp:
try:
await resp.read()
if resp.history:
result = UrlResult(
resp.status,
resp.history,
url,
"No error. Redirect to " + str(resp.url),
)
elif resp.status == 200:
result = UrlResult(
resp.status, resp.history, url, "No error. No redirect."
)
else:
result = UrlResult(resp.status, resp.history, url, "Error?")
except ClientResponseError as client_err:
return UrlResult(client_err.status, [], url, client_err)
except ClientConnectionError as err:
result = UrlResult(404, [], url, err)
return result
async def _check_uri_with_sem_async(sem, url, session) -> Iterable[UrlResult]:
# Getter function with semaphore.
async with sem:
return await _check_url_async(url, session)
async def _check_uris_async(
links_to_check: Iterable[str], max_threads: int = 10, delay: float = 0
) -> Iterable[UrlResult]:
tasks = []
# create instance of Semaphore
sem = asyncio.Semaphore(max_threads)
# Create client session that will ensure we dont open new connection
# per each request.
async with ClientSession() as session:
for uri in links_to_check:
if delay:
asyncio.sleep(delay)
# pass Semaphore and session to every GET request
task = asyncio.ensure_future(_check_uri_with_sem_async(sem, uri, session))
tasks.append(task)
results = await asyncio.gather(*tasks)
return results
def _print_url_results(results: Dict[str, Dict[str, UrlResult]]):
"""
Print results of any URLs that did not return 200 status.
Parameters
----------
results : Dict[str, Dict[str, UrlResult]]
List of URLs checks to print.
"""
print("\n\nResults")
# non-200s
print("\n==========\nERRORS")
for page, result_dict in results.items():
page_errors = []
for result in result_dict.values():
if result.status != 200:
page_errors.append(f"{result.status} - {result.url}")
if page_errors:
print(f"Document {page}")
for err in page_errors:
print(err)
# if __name__ == "__main__":
# t_results = check_docs("..//..")
| mit | -6,428,462,102,758,438,000 | 28.75 | 88 | 0.588629 | false |
vinaykrdahiya/justuploads | main/__init__.py | 1 | 2292 | import falcon
# from main.settings import DB as db
# from main.helpers import QueryParser
import json
import urlparse
from werkzeug.http import parse_options_header
from werkzeug.formparser import parse_form_data
from cStringIO import StringIO
from werkzeug.wsgi import LimitedStream
from werkzeug import secure_filename
class CreateTemplateExclusiveImage:
"""End point for creating dealtype"""
def on_get(self, req, resp, stream, form={}, files={}):
"""return status 405. asks to use post api.
"""
resp.content_type = "application/json"
resp_dict = {"status": "error",
"summary": "use post request for logout"}
resp.body = (json.dumps(resp_dict))
def on_post(self, req, resp, stream, form={}, files={}):
"""
"""
file = files.get('file', [''])[0]
if file:
filename = secure_filename(file.filename)
file.save(filename)
resp.status = falcon.HTTP_200
resp.content_type = "application/json"
resp_dict = {"status": "success",
"summary": "File uploaded"}
resp.body = (json.dumps(resp_dict))
def generate_formdata(req, resp, params):
"""sets params['form'], params['files'], params['stream']
to pass to every endpoint.
"""
if req.method != 'GET':
mimetype, options = parse_options_header(req.get_header('content-type'))
data = req.stream.read()
environ = {'wsgi.input': StringIO(data),
'CONTENT_LENGTH': str(len(data)),
'CONTENT_TYPE': '%s; boundary=%s' %
(mimetype, options['boundary']),
'REQUEST_METHOD': 'POST'}
stream, form, files = parse_form_data(environ)
params['stream'], params['form'], params['files'] = stream, dict(form),\
dict(files)
return True
else:
di = urlparse.parse_qsl(req.query_string)
params['form'] = dict(di)
params['stream'] = LimitedStream()
params['files'] = dict()
return True
# hooks to be executed on every request before reaching to the endpoint
app = falcon.API(before=[generate_formdata])
# importing all the endpoints
cr = CreateTemplateExclusiveImage()
app.add_route('/upload', cr)
| unlicense | -2,264,388,575,298,547,000 | 33.727273 | 80 | 0.603403 | false |
aphelps/platformio | platformio/builder/tools/platformio.py | 1 | 10717 | # Copyright (C) Ivan Kravets <[email protected]>
# See LICENSE for details.
import atexit
import platform
import re
from os import getenv, listdir, remove, sep, walk
from os.path import basename, dirname, isdir, isfile, join, normpath
from time import sleep
from SCons.Script import Exit, SConscript, SConscriptChdir
from serial import Serial
from platformio.util import get_serialports
def ProcessGeneral(env):
corelibs = []
# specific linker script
if "ldscript" in env.get("BOARD_OPTIONS", {}).get("build", {}):
env.Append(
LINKFLAGS=["-T", join(
"$PIOHOME_DIR", "packages", "ldscripts",
"${BOARD_OPTIONS['build']['ldscript']}")]
)
if "extra_flags" in env.get("BOARD_OPTIONS", {}).get("build", {}):
env.MergeFlags(env.subst("${BOARD_OPTIONS['build']['extra_flags']}"))
if "BUILD_FLAGS" in env:
env.MergeFlags(env['BUILD_FLAGS'])
if "FRAMEWORK" in env:
if env['FRAMEWORK'] in ("arduino", "energia"):
env.ConvertInoToCpp()
for f in env['FRAMEWORK'].split(","):
SConscriptChdir(0)
env, libs = SConscript(
env.subst(join("$PIOBUILDER_DIR", "scripts",
"frameworks", "%s.py" % f.strip().lower())),
exports="env")
corelibs += libs
return corelibs
def BuildFirmware(env, corelibs):
src = env.Clone()
vdirs = src.VariantDirRecursive(
join("$BUILD_DIR", "src"), join("$PROJECT_DIR", "src"))
# build dependent libs
deplibs = src.BuildDependentLibraries(join("$PROJECT_DIR", "src"))
src.MergeFlags(getenv("PIOSRCBUILD_FLAGS", "$SRCBUILD_FLAGS"))
return src.Program(
join("$BUILD_DIR", "firmware"),
[src.GlobCXXFiles(vdir) for vdir in vdirs],
LIBS=deplibs + corelibs,
LIBPATH="$BUILD_DIR",
PROGSUFFIX=".elf")
def GlobCXXFiles(env, path):
files = []
for suff in ["*.c", "*.cpp", "*.S"]:
_list = env.Glob(join(path, suff))
if _list:
files += _list
return files
def VariantDirRecursive(env, variant_dir, src_dir, duplicate=True,
ignore_pattern=None):
if not ignore_pattern:
ignore_pattern = (".git", ".svn")
variants = []
src_dir = env.subst(src_dir)
for root, _, _ in walk(src_dir):
_src_dir = root
_var_dir = variant_dir + root.replace(src_dir, "")
if any([s in _var_dir.lower() for s in ignore_pattern]):
continue
env.VariantDir(_var_dir, _src_dir, duplicate)
variants.append(_var_dir)
return variants
def BuildLibrary(env, variant_dir, library_dir, ignore_files=None):
lib = env.Clone()
vdirs = lib.VariantDirRecursive(
variant_dir, library_dir, ignore_pattern=(".git", ".svn", "examples"))
srcfiles = []
for vdir in vdirs:
for item in lib.GlobCXXFiles(vdir):
if not ignore_files or item.name not in ignore_files:
srcfiles.append(item)
return lib.Library(
lib.subst(variant_dir),
srcfiles
)
def BuildDependentLibraries(env, src_dir): # pylint: disable=R0914
INCLUDES_RE = re.compile(r"^\s*#include\s+(\<|\")([^\>\"\']+)(?:\>|\")",
re.M)
LIBSOURCE_DIRS = [env.subst(d) for d in env.get("LIBSOURCE_DIRS", [])]
# start internal prototypes
class IncludeFinder(object):
def __init__(self, base_dir, name, is_system=False):
self.base_dir = base_dir
self.name = name
self.is_system = is_system
self._inc_path = None
self._lib_dir = None
self._lib_name = None
def getIncPath(self):
return self._inc_path
def getLibDir(self):
return self._lib_dir
def getLibName(self):
return self._lib_name
def run(self):
if not self.is_system and self._find_in_local():
return True
return self._find_in_system()
def _find_in_local(self):
if isfile(join(self.base_dir, self.name)):
self._inc_path = join(self.base_dir, self.name)
return True
else:
return False
def _find_in_system(self):
for lsd_dir in LIBSOURCE_DIRS:
if not isdir(lsd_dir):
continue
for ld in listdir(lsd_dir):
inc_path = normpath(join(lsd_dir, ld, self.name))
lib_dir = inc_path[:inc_path.index(sep, len(lsd_dir) + 1)]
lib_name = basename(lib_dir)
# ignore user's specified libs
if "IGNORE_LIBS" in env and lib_name in env['IGNORE_LIBS']:
continue
if not isfile(inc_path):
# if source code is in "src" dir
lib_dir = join(lsd_dir, lib_name, "src")
inc_path = join(lib_dir, self.name)
if isfile(inc_path):
self._lib_dir = lib_dir
self._lib_name = lib_name
self._inc_path = inc_path
return True
return False
def _get_dep_libs(src_dir):
state = {
"paths": set(),
"libs": set(),
"ordered": set()
}
state = _process_src_dir(state, env.subst(src_dir))
result = []
for item in sorted(state['ordered'], key=lambda s: s[0]):
result.append((item[1], item[2]))
return result
def _process_src_dir(state, src_dir):
for root, _, _ in walk(src_dir):
for node in (env.GlobCXXFiles(root) +
env.Glob(join(root, "*.h"))):
state = _parse_includes(state, node)
return state
def _parse_includes(state, node):
if node.path in state['paths']:
return state
else:
state['paths'].add(node.path)
skip_includes = ("arduino.h", "energia.h")
matches = INCLUDES_RE.findall(node.get_text_contents())
for (inc_type, inc_name) in matches:
base_dir = dirname(node.path)
if inc_name.lower() in skip_includes:
continue
if join(base_dir, inc_name) in state['paths']:
continue
else:
state['paths'].add(join(base_dir, inc_name))
finder = IncludeFinder(base_dir, inc_name, inc_type == "<")
if finder.run():
_lib_dir = finder.getLibDir()
if _lib_dir and _lib_dir not in state['libs']:
state['ordered'].add((
len(state['ordered']) + 1, finder.getLibName(),
_lib_dir))
_parse_includes(state, env.File(finder.getIncPath()))
if _lib_dir and _lib_dir not in state['libs']:
state['libs'].add(_lib_dir)
state = _process_src_dir(state, _lib_dir)
return state
# end internal prototypes
deplibs = _get_dep_libs(src_dir)
env.Append(CPPPATH=[join("$BUILD_DIR", l) for (l, _) in deplibs])
# add automatically "utility" dir from the lib (Arduino issue)
env.Append(CPPPATH=[join("$BUILD_DIR", l, "utility") for (l, ld) in deplibs
if isdir(join(ld, "utility"))])
libs = []
for (libname, inc_dir) in reversed(deplibs):
lib = env.BuildLibrary(
join("$BUILD_DIR", libname), inc_dir)
env.Clean(libname, lib)
libs.append(lib)
return libs
def ConvertInoToCpp(env):
def delete_tmpcpp(files):
for f in files:
remove(f)
tmpcpp = []
items = (env.Glob(join("$PROJECT_DIR", "src", "*.ino")) +
env.Glob(join("$PROJECT_DIR", "src", "*.pde")))
for item in items:
cppfile = item.get_path()[:-3] + "cpp"
if isfile(cppfile):
continue
ino_contents = item.get_text_contents()
re_includes = re.compile(r"^(#include\s+(?:\<|\")[^\r\n]+)",
re.M | re.I)
includes = re_includes.findall(ino_contents)
prototypes = re.findall(
r"""^(
(?:\s*[a-z_\d]+){1,2} # return type
\s+[a-z_\d]+\s* # name of prototype
\([a-z_,\.\*\&\[\]\s\d]*\) # args
)\s*\{ # must end with {
""",
ino_contents,
re.X | re.M | re.I
)
# print includes, prototypes
# disable previous includes
ino_contents = re_includes.sub(r"//\1", ino_contents)
# create new temporary C++ valid file
with open(cppfile, "w") as f:
f.write("#include <Arduino.h>\n")
if includes:
f.write("%s\n" % "\n".join(includes))
if prototypes:
f.write("%s;\n" % ";\n".join(prototypes))
f.write("#line 1 \"%s\"\n" % basename(item.path))
f.write(ino_contents)
tmpcpp.append(cppfile)
if tmpcpp:
atexit.register(delete_tmpcpp, tmpcpp)
def FlushSerialBuffer(env, port):
s = Serial(env.subst(port))
s.flushInput()
s.setDTR(False)
s.setRTS(False)
sleep(0.1)
s.setDTR(True)
s.setRTS(True)
s.close()
def TouchSerialPort(env, port, baudrate):
s = Serial(port=env.subst(port), baudrate=baudrate)
s.close()
if platform.system() != "Darwin":
sleep(0.3)
def WaitForNewSerialPort(_, before):
new_port = None
elapsed = 0
while elapsed < 10:
now = [i['port'] for i in get_serialports()]
diff = list(set(now) - set(before))
if diff:
new_port = diff[0]
break
before = now
sleep(0.25)
elapsed += 0.25
if not new_port:
Exit("Error: Couldn't find a board on the selected port. "
"Check that you have the correct port selected. "
"If it is correct, try pressing the board's reset "
"button after initiating the upload.")
return new_port
def exists(_):
return True
def generate(env):
env.AddMethod(ProcessGeneral)
env.AddMethod(BuildFirmware)
env.AddMethod(GlobCXXFiles)
env.AddMethod(VariantDirRecursive)
env.AddMethod(BuildLibrary)
env.AddMethod(BuildDependentLibraries)
env.AddMethod(ConvertInoToCpp)
env.AddMethod(FlushSerialBuffer)
env.AddMethod(TouchSerialPort)
env.AddMethod(WaitForNewSerialPort)
return env
| mit | -2,971,078,524,403,639,300 | 30.15407 | 79 | 0.528599 | false |
dlarochelle/extractor_train | tests/test_forms.py | 1 | 2252 | # -*- coding: utf-8 -*-
import pytest
from extractor_train.public.forms import LoginForm
from extractor_train.user.forms import RegisterForm
from .factories import UserFactory
class TestRegisterForm:
def test_validate_user_already_registered(self, user):
# Enters username that is already registered
form = RegisterForm(username=user.username, email='[email protected]',
password='example', confirm='example')
assert form.validate() is False
assert 'Username already registered' in form.username.errors
def test_validate_email_already_registered(self, user):
# enters email that is already registered
form = RegisterForm(username='unique', email=user.email,
password='example', confirm='example')
assert form.validate() is False
assert 'Email already registered' in form.email.errors
def test_validate_success(self, db):
form = RegisterForm(username='newusername', email='[email protected]',
password='example', confirm='example')
assert form.validate() is True
class TestLoginForm:
def test_validate_success(self, user):
user.set_password('example')
user.save()
form = LoginForm(username=user.username, password='example')
assert form.validate() is True
assert form.user == user
def test_validate_unknown_username(self, db):
form = LoginForm(username='unknown', password='example')
assert form.validate() is False
assert 'Unknown username' in form.username.errors
assert form.user is None
def test_validate_invalid_password(self, user):
user.set_password('example')
user.save()
form = LoginForm(username=user.username, password='wrongpassword')
assert form.validate() is False
assert 'Invalid password' in form.password.errors
def test_validate_inactive_user(self, user):
user.active = False
user.set_password('example')
user.save()
# Correct username and password, but user is not activated
form = LoginForm(username=user.username, password='example')
assert form.validate() is False
assert 'User not activated' in form.username.errors | bsd-3-clause | -4,317,048,631,488,596,000 | 35.934426 | 74 | 0.672735 | false |
aniversarioperu/django-manolo | scrapers/manolo_scraper/spiders/minem.py | 2 | 3163 | # -*- coding: utf-8 -*-
import math
import scrapy
from .spiders import ManoloBaseSpider
from ..items import ManoloItem
from ..item_loaders import ManoloItemLoader
from ..utils import make_hash, get_dni
# url: http://intranet.minem.gob.pe/GESTION/visitas_pcm
class MinemSpider(ManoloBaseSpider):
name = 'minem'
allowed_domains = ['http://intranet.minem.gob.pe']
NUMBER_OF_PAGES_PER_PAGE = 20
def initial_request(self, date):
date_str = date.strftime("%d/%m/%Y")
request = self.make_form_request(date_str, self.parse_pages, 1)
return request
def make_form_request(self, date_str, callback, page_number):
page_url = 'http://intranet.minem.gob.pe/GESTION/visitas_pcm/Busqueda/DMET_html_SelectMaestraBuscador'
start_from_record = self.NUMBER_OF_PAGES_PER_PAGE * (page_number - 1) + 1
params = {
'TXT_FechaVisita_Inicio': date_str,
'Ls_Pagina': str(start_from_record),
'Li_ResultadoPorPagina': '20',
'FlgBuscador': '1',
'Ls_ParametrosBuscador': 'TXT_FechaVisita_Inicio=10/08/2015|Ls_Pagina={}'.format(start_from_record),
}
request = scrapy.FormRequest(url=page_url, formdata=params,
meta={'date': date_str},
dont_filter=True,
callback=callback)
return request
def parse_pages(self, response):
total_of_records = response.css('#HID_CantidadRegistros').xpath('./@value').extract_first(default=1)
total_of_records = int(total_of_records)
number_of_pages = self.get_number_of_pages(total_of_records)
for page in range(1, number_of_pages + 1):
request = self.make_form_request(response.meta['date'], self.parse, page)
yield request
def get_number_of_pages(self, total_of_records):
return int(math.ceil(total_of_records / float(self.NUMBER_OF_PAGES_PER_PAGE)))
def parse(self, response):
date = self.get_date_item(response.meta['date'], '%d/%m/%Y')
rows = response.xpath("//tr")
for row in rows:
l = ManoloItemLoader(item=ManoloItem(), selector=row)
l.add_value('institution', 'minem')
l.add_value('date', date)
l.add_xpath('full_name', './td[3]/center/text()')
l.add_xpath('entity', './td[5]/center/text()')
l.add_xpath('reason', './td[6]/center/text()')
l.add_xpath('host_name', './td[7]/center/text()')
l.add_xpath('office', './td[8]/center/text()')
l.add_xpath('meeting_place', './td[9]/center/text()')
l.add_xpath('time_start', './td[10]/center/text()')
l.add_xpath('time_end', './td[11]/center/text()')
document_identity = row.xpath('td[4]/center/text()').extract_first(default='')
id_document, id_number = get_dni(document_identity)
l.add_value('id_document', id_document)
l.add_value('id_number', id_number)
item = l.load_item()
item = make_hash(item)
yield item
| bsd-3-clause | 4,511,216,025,104,690,000 | 35.356322 | 112 | 0.579829 | false |
SurfasJones/djcmsrc3 | venv/lib/python2.7/site-packages/setuptools/command/test.py | 1 | 6484 | from setuptools import Command
from distutils.errors import DistutilsOptionError
import sys
from pkg_resources import (resource_listdir, resource_exists,
normalize_path, working_set, _namespace_packages, add_activation_listener,
require, EntryPoint)
from unittest import TestLoader
class ScanningLoader(TestLoader):
def loadTestsFromModule(self, module):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
tests = []
if module.__name__ != 'setuptools.tests.doctest': # ugh
tests.append(TestLoader.loadTestsFromModule(self, module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file != '__init__.py':
submodule = module.__name__ + '.' + file[:-3]
else:
if resource_exists(module.__name__, file + '/__init__.py'):
submodule = module.__name__+'.'+file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests) != 1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build"
user_options = [
('test-module=','m', "Run 'test_suite' in specified module"),
('test-suite=','s',
"Test suite to run (e.g. 'some_module.test_suite')"),
('test-runner=', 'r', "Test runner to use"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
self.test_runner = None
def finalize_options(self):
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module + ".test_suite"
elif self.test_module:
raise DistutilsOptionError(
"You may specify a module or a suite, but not both"
)
self.test_args = [self.test_suite]
if self.verbose:
self.test_args.insert(0,'--verbose')
if self.test_loader is None:
self.test_loader = getattr(self.distribution, 'test_loader', None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
if self.test_runner is None:
self.test_runner = getattr(self.distribution, 'test_runner', None)
def with_project_on_sys_path(self, func):
with_2to3 = (
sys.version_info >= (3,)
and getattr(self.distribution, 'use_2to3', False)
)
if with_2to3:
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
sys.path.insert(0, normalize_path(ei_cmd.egg_base))
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
func()
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
if self.test_suite:
cmd = ' '.join(self.test_args)
if self.dry_run:
self.announce('skipping "unittest %s" (dry run)' % cmd)
else:
self.announce('running "unittest %s"' % cmd)
self.with_project_on_sys_path(self.run_tests)
def run_tests(self):
import unittest
# Purge modules under test from sys.modules. The test loader will
# re-import them from the build location. Required when 2to3 is used
# with namespace packages.
if sys.version_info >= (3,) and getattr(self.distribution, 'use_2to3', False):
module = self.test_args[-1].split('.')[0]
if module in _namespace_packages:
del_modules = []
if module in sys.modules:
del_modules.append(module)
module += '.'
for name in sys.modules:
if name.startswith(module):
del_modules.append(name)
list(map(sys.modules.__delitem__, del_modules))
unittest.main(
None, None, [unittest.__file__]+self.test_args,
testLoader=self._resolve_as_ep(self.test_loader),
testRunner=self._resolve_as_ep(self.test_runner),
)
@staticmethod
def _resolve_as_ep(val):
"""
Load the indicated attribute value, called, as a as if it were
specified as an entry point.
"""
if val is None:
return
parsed = EntryPoint.parse("x=" + val)
return parsed.load(require=False)()
| mit | -4,802,770,465,874,498,000 | 35.840909 | 86 | 0.566009 | false |
dderichs/piradio | piradio/observer.py | 1 | 5042 | # -*- coding: utf-8 -*-
"""
* Copyright (C) 2009, Michael "Svedrin" Ziegler <[email protected]>
*
* Omikron is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This package is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
"""
from copy import deepcopy
class OperationCanceled( Exception ):
""" Can be fired by a listener function to cancel the signal. """
pass
class Listener( object ):
""" Prepares args for and calls the observer function. """
def __init__( self, func, args, kwargs ):
""" Creates a listener associated with func, and stores base args to be
passed to func when the event is fired.
"""
self.func = func
self.args = args
self.kwargs = kwargs
def __call__( self, *args, **kwargs ):
""" Call the associated listener function and merge our args to the base args. """
origkw = deepcopy( self.kwargs )
origkw.update( kwargs )
return self.func( *( self.args + args ), **origkw )
class Signal( object ):
""" Handles calling the Listener functions and canceling of events. """
def __init__( self, cancel = True ):
self.cancel = cancel
self.exception = None
self.listeners = []
def __call__( self, *args, **kwargs ):
""" Call observers. If this signal can be canceled and one of the listeners
returns False, cancel execution and return False, otherwise return True.
"""
self.exception = None
for lst in self.listeners:
try:
ret = lst( *args, **kwargs )
except OperationCanceled, instance:
self.exception = instance
return False
else:
if self.cancel and ret == False:
return False
return True
def addListener( self, func, *args, **kwargs ):
""" Add func as a listener to this signal. """
assert callable( func ), "Listeners must be callable!"
self.listeners.append( Listener( func, args, kwargs ) )
def removeListener( self, func ):
""" Remove the first listener that is associated to func. """
entry = None
for lst in self.listeners:
if lst.func == func:
entry = lst
break
if entry:
self.listeners.remove( entry )
return entry
class Dispatcher( object ):
""" Keeps track of existing events and handles firing. """
def __init__( self ):
self.signals = {}
def addEvent( self, event, cancel = True ):
""" Add a Signal handler for an event.
This does NOT check if another handler already exists. If so, the old one will be overwritten.
"""
self.signals[event] = Signal( cancel )
def removeEvent( self, event ):
""" Remove the Signal handler for the given event. """
sig = self.signals[event]
del self.signals[event]
return sig
def fireEvent( self, event, *args, **kwargs ):
""" Fire an event. """
sig = self.signals[event]
return sig( *args, **kwargs )
def hasEvent( self, event ):
""" Return True if an event of the given name is known. """
return event in self.signals
def __getitem__( self, event ):
""" Get an event handler. """
return self.signals[event]
def __setitem__( self, event, cancel ):
""" Shortcut for addEvent. """
self.addEvent( event, cancel )
def __contains__( self, event ):
""" Shortcut for hasEvent. """
return self.hasEvent( event )
def __delitem__( self, event ):
""" Shortcut for removeEvent. """
return self.removeEvent( event )
def __call__( self, event, *args, **kwargs ):
""" Shortcut for fireEvent. """
return self.fireEvent( event, *args, **kwargs )
if __name__ == '__main__':
dis = Dispatcher()
dis.addEvent( 'ohai' )
def myfunc( *args, **kwargs ):
print 'myfunc haz been called!'
print args
print kwargs
return kwargs['drei'] == 3
def check( *args, **kwargs ):
print 'check haz been called!'
print args
print kwargs
print "drei has been 3!"
dis['ohai'].addListener( myfunc, 1, 2, 3, eins=1, zwei=2, drei=3 )
dis['ohai'].addListener( check )
if dis('ohai', 3, 4, 5, eins=1, zwei=2, drei=5, vier=6 ):
print "success!"
else:
print "fail"
if dis('ohai', 3, 4, 5 ):
print "success!"
else:
print "fail"
dis.addEvent( "kthxbai" )
dis( "kthxbai" )
| gpl-3.0 | -8,911,325,307,187,469,000 | 29.932515 | 106 | 0.576359 | false |
saskartt/P4UL | pyNetCDF/syncMaskWithNetCdf.py | 1 | 5983 | #!/usr/bin/env python
from netcdfTools import *
from mapTools import *
from utilities import writeLog
import sys
import argparse
import numpy as np
'''
Description:
Author: Mikko Auvinen
[email protected]
University of Helsinki &
Finnish Meteorological Institute
'''
#============= functions ==================================#
def domainBoundsAndResolution( xf, yf ):
xb = np.array([ np.min(xf), np.max(xf) ])
yb = np.array([ np.min(yf), np.max(yf) ])
dx = (xb[1]-xb[0])/float(len(xf)-1)
dy = (yb[1]-yb[0])/float(len(yf)-1)
return xb, yb, dx, dy
#============ Main ====================================#
parser = argparse.ArgumentParser(prog='syncMaskWithNetCDF.py')
parser.add_argument("-fn", "--fileNetCDF",type=str, help="Name of input NETCDF file.")
parser.add_argument("-fm", "--fileMask",type=str, help="Name of input 2D Mask file.")
parser.add_argument("-d", "--decomp", action="store_true", default=False, \
help="Decomposed into mean (V_m) and fluctuating (V^prime) components.")
parser.add_argument("-dd", "--decompOnly", help="Output V_m and V^prime components only.",\
action="store_true", default=False)
parser.add_argument("-c", "--coarse", help="Coarsening level for the NETCDF data. Int > 1.",\
type=int, default=1)
args = parser.parse_args()
writeLog( parser, args )
#==========================================================#
# Initial renaming operations and variable declarations
fnc = args.fileNetCDF
fmsk = args.fileMask
fout = fnc.strip('.nc')+'-Msk.nc'
cl = abs(int(args.coarse))
# Boolean switch for the decomposition option.
decompOn = args.decomp or args.decompOnly
'''
Establish two boolean variables which indicate whether the created variable is an
independent or dependent variable in function createNetcdfVariable().
'''
parameter = True; variable = False
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = #
'''
Create a NETCDF input dataset (ds), and its associated lists of dependent (varList)
and independent (dimList) variables.
'''
ds, varList, paramList = netcdfDataset(fnc)
# Create a NETCDF output dataset (dso) for writing out the data.
dso = netcdfOutputDataset( fout )
'''
Read cell center coordinates and time.
Create the output independent variables right away and empty memory.
'''
time, time_dims = read1DVariableFromDataset('time', ds, paramList, 0, 0, 1 ) # All values.
tv = createNetcdfVariable( dso, time,'time', len(time),'s','f4',('time',), parameter )
time = None
x, x_dims = read1DVariableFromDataset( 'x',ds, paramList, 0, 0, cl ) # All values.
print(' x_dims = {} '.format(x_dims))
x[np.isnan(x)] = 0. # Special treatment.
xv = createNetcdfVariable( dso, x , 'x' , len(x) , 'm', 'f4', ('x',) , parameter )
y, y_dims = read1DVariableFromDataset( 'y',ds, paramList, 0, 0, cl )
print(' y_dims = {} '.format(y_dims))
y[np.isnan(y)] = 0. # Special treatment.
yv = createNetcdfVariable( dso, y , 'y' , len(y) , 'm', 'f4', ('y',) , parameter )
# Determine the NETCDF domain bounds and resolution.
xb, yb, dx, dy = domainBoundsAndResolution( x, y )
x = None; y = None # Clear memory ASAP.
z, z_dims = read1DVariableFromDataset( 'z',ds, paramList, 0, 0, cl )
print(' z_dims = {} '.format(z_dims))
zv = createNetcdfVariable( dso, z , 'z' , len(z) , 'm', 'f4', ('z',) , parameter )
z = None
# - - - - First, read u-component - - - - - - - - - -
u, u_dims = read3DVariableFromDataset( 'u', ds, varList, 0, 0, cl ) # All values.
print(' u_dims = {} '.format(u_dims))
yx_dims = np.array(u_dims[2:])
z_dim = u_dims[1]; t_dim = u_dims[0]
'''
At this point the mask raster data can be treated because
it needs one scalar NETCDF variable to determine the required
index bounds and coarsening level.
'''
# Read the mask raster info.
Rdict = readNumpyZTile(fmsk)
R = Rdict['R']
R_dims = np.array(np.shape(R))
ROrig = Rdict['GlobOrig']
dPx = Rdict['dPx']
Rdict = None
dr = entry2Int( dPx ) # Resolution as a single number
clr = int( dx/dr ) # Raster to NETCDF coarsening factor
print(' Orig mask dims = {} '.format(R_dims))
# We need y_max value for the Raster data to determine the reversed j-indecies.
ybr_max = R_dims[0]*dr
print(' ybr_max = {}'.format(ybr_max))
# Determine the index range for the raster data to match the NETCDF (sub)domain.
# NOTE: dy is subtracted to make first index 0-based.
irx = np.array([ int(xb[0]-dy) , int(xb[1]) ])/ dr # xb[0]:=min, xb[1]:=max
jry = np.array([ int(ybr_max-yb[1]-dy), int(ybr_max-yb[0]) ])/ dr
print(' irx = {}, iry = {}'.format(irx, jry))
# Create sub-region of the raster domain. This should match the NETCDF yx-domain.
Rsub = R[jry[0]:jry[1]:clr, irx[0]:irx[1]:clr]
Rsub_dims = np.shape( Rsub )
if( not (yx_dims==(Rsub_dims)).all ):
print(' xy-dimensions do not match: nc={} vs. r={}. Exiting ...'.format(yx_dims, Rsub_dims))
sys.exit(1)
# Create mask array m(z,y,x)
m = np.zeros( u_dims[1:], 'uint8') # u_dims[1:] := (z_dim, y_dim, x_dim)
# Copy raster data onto each z-plane. NOTE: y-direction is reversed.
for i in xrange(z_dim):
m[i,:,:] = Rsub[::-1,:]
# The mask data R, by default, may contain values 0 and >0. It has to be converted into
# a proper mask data [0,1]:
m[m>0] = 1
mv = createNetcdfVariable( dso, m, 'mask', 1, ' ', 'i4',('z','y','x',) , variable )
m = None
# To finalize, the NETCDF variables need to be copied to the new file.
uv = createNetcdfVariable( dso, u, 'u', t_dim, 'm/s', 'f4',('time','z','y','x',) , variable )
u = None
v, v_dims = read3DVariableFromDataset( 'v', ds, varList, 0, 0, cl ) # All values.
vv = createNetcdfVariable( dso, v, 'v', t_dim, 'm/s', 'f4',('time','z','y','x',) , variable )
v = None
w, w_dims = read3DVariableFromDataset( 'w', ds, varList, 0, 0, cl ) # All values.
wv = createNetcdfVariable( dso, w, 'w', t_dim, 'm/s', 'f4',('time','z','y','x',) , variable )
w = None
# - - - - Done , finalize the output - - - - - - - - - -
netcdfWriteAndClose( dso )
| mit | -1,528,385,973,083,507,700 | 34.402367 | 94 | 0.623767 | false |
murex/murex-coding-dojo | Paris/2015/2015-05-21-Power4-Python-Randori/power4_test.py | 1 | 1373 | import power4
#def test_final():
# assert power4.isOver([
# [0, 1, 2, 1, 2, 1, 1],
# [0, 2, 1, 1, 1, 2, 2],
# [0, 1, 2, 2, 2, 1, 1],
# [1, 2, 1, 1, 1, 2, 2],
# [2, 1, 1, 2, 2, 2, 1],
# [1, 2, 2, 1, 2, 1, 2],
# ]) == True
def test_p1_win_one_line():
assert power4.isOver([[0, 1, 1, 1, 1, 2, 1]]) == True
def test_p2_win_one_line():
assert power4.isOver([[0, 1, 2, 2, 2, 2, 1]]) == True
def test_draw_one_line():
assert power4.isOver([[0, 1, 2, 1, 2, 2, 1]]) == False
def test_p1_win_with_two_lines():
assert power4.isOver([
[0, 1, 2, 1, 2, 2, 1],
[0, 1, 1, 1, 1, 2, 1]
]) == True
def test_p1_wins_one_column():
assert power4.isOver([
[0],
[0],
[1],
[1],
[1],
[1]
]) == True
def test_p2_wins_one_column():
assert power4.isOver([
[0],
[0],
[2],
[2],
[2],
[2]
]) == True
def test_p2_wins_one_column():
assert power4.isOver([
[0,0],
[0,0],
[0,1],
[2,1],
[2,1],
[2,1]
]) == True
def test_diagonal():
assert power4.isOver([
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 1, 2],
[0, 0, 0, 0, 1, 2, 1],
[0, 0, 0, 1, 2, 1, 2],
]) == True | mit | 4,722,288,057,178,266,000 | 23.105263 | 58 | 0.383831 | false |
yuma-m/pychord | test/test_progression.py | 1 | 3528 | # -*- coding: utf-8 -*-
import unittest
from pychord import Chord, ChordProgression
class TestChordProgressionCreations(unittest.TestCase):
def test_none(self):
cp = ChordProgression()
self.assertEqual(cp.chords, [])
def test_one_chord(self):
c = Chord("C")
cp = ChordProgression(c)
self.assertEqual(cp.chords, [c])
def test_one_chord_str(self):
c = "C"
cp = ChordProgression(c)
self.assertEqual(cp.chords, [Chord(c)])
def test_one_chord_list(self):
c = Chord("C")
cp = ChordProgression([c])
self.assertEqual(cp.chords, [c])
def test_one_chord_list_str(self):
c = "C"
cp = ChordProgression([c])
self.assertEqual(cp.chords, [Chord(c)])
def test_multiple_chords(self):
c1 = Chord("C")
c2 = Chord("D")
cp = ChordProgression([c1, c2])
self.assertEqual(cp.chords, [c1, c2])
def test_multiple_chords_str(self):
c1 = "C"
c2 = "D"
cp = ChordProgression([c1, c2])
self.assertEqual(cp.chords, [Chord(c1), Chord(c2)])
class TestChordProgressionFunctions(unittest.TestCase):
def test_append(self):
cp = ChordProgression(["C", "D", "E"])
cp.append("F")
self.assertEqual(len(cp), 4)
self.assertEqual(cp.chords[-1], Chord("F"))
def test_insert(self):
cp = ChordProgression(["C", "D", "E"])
cp.insert(0, "F")
self.assertEqual(len(cp), 4)
self.assertEqual(cp.chords[0], Chord("F"))
def test_pop(self):
cp = ChordProgression(["C", "D", "E"])
c = cp.pop()
self.assertEqual(len(cp), 2)
self.assertEqual(c, Chord("E"))
def test_transpose(self):
cp = ChordProgression(["C", "F", "G"])
cp.transpose(3)
self.assertEqual(cp.chords, [Chord("Eb"), Chord("Ab"), Chord("Bb")])
def test_add(self):
cp1 = ChordProgression(["C", "F", "G"])
cp2 = ChordProgression(["Am", "Em"])
cp = cp1 + cp2
self.assertEqual(len(cp), 5)
self.assertEqual(cp.chords, [Chord("C"), Chord("F"), Chord("G"), Chord("Am"), Chord("Em")])
def test_self_add(self):
cp1 = ChordProgression(["C", "F", "G"])
cp2 = ChordProgression(["Am", "Em"])
cp1 += cp2
self.assertEqual(len(cp1), 5)
self.assertEqual(cp1.chords, [Chord("C"), Chord("F"), Chord("G"), Chord("Am"), Chord("Em")])
def test_get_item(self):
cp = ChordProgression(["C", "F", "G"])
self.assertEqual(cp[0], Chord("C"))
self.assertEqual(cp[1], Chord("F"))
self.assertEqual(cp[-1], Chord("G"))
def test_set_item(self):
cp = ChordProgression(["C", "F", "G"])
cp[1] = Chord("E")
self.assertEqual(cp[0], Chord("C"))
self.assertEqual(cp[1], Chord("E"))
self.assertEqual(cp[2], Chord("G"))
self.assertEqual(len(cp), 3)
def test_slice(self):
cp = ChordProgression(["C", "F", "G"])
self.assertEqual(cp[0:1], [Chord("C")])
self.assertEqual(cp[1:], [Chord("F"), Chord("G")])
self.assertEqual(cp[0::2], [Chord("C"), Chord("G")])
def test_eq(self):
cp1 = ChordProgression(["C", "F", "G"])
cp2 = ChordProgression(["C", "F", "G"])
self.assertEqual(cp1, cp2)
self.assertIsNot(cp1, cp2)
def test_invalid_eq(self):
cp = ChordProgression(["C", "F", "G"])
with self.assertRaises(TypeError):
print(cp == 0)
| mit | -6,067,492,040,451,249,000 | 29.678261 | 100 | 0.541667 | false |
stackforge/python-openstacksdk | openstack/tests/functional/cloud/test_aggregate.py | 1 | 2091 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_aggregate
----------------------------------
Functional tests for `shade` aggregate resource.
"""
from openstack.tests.functional import base
class TestAggregate(base.BaseFunctionalTest):
def test_aggregates(self):
aggregate_name = self.getUniqueString()
availability_zone = self.getUniqueString()
self.addCleanup(self.cleanup, aggregate_name)
aggregate = self.operator_cloud.create_aggregate(aggregate_name)
aggregate_ids = [v['id']
for v in self.operator_cloud.list_aggregates()]
self.assertIn(aggregate['id'], aggregate_ids)
aggregate = self.operator_cloud.update_aggregate(
aggregate_name,
availability_zone=availability_zone
)
self.assertEqual(availability_zone, aggregate['availability_zone'])
aggregate = self.operator_cloud.set_aggregate_metadata(
aggregate_name,
{'key': 'value'}
)
self.assertIn('key', aggregate['metadata'])
aggregate = self.operator_cloud.set_aggregate_metadata(
aggregate_name,
{'key': None}
)
self.assertNotIn('key', aggregate['metadata'])
# Validate that we can delete by name
self.assertTrue(
self.operator_cloud.delete_aggregate(aggregate_name))
def cleanup(self, aggregate_name):
aggregate = self.operator_cloud.get_aggregate(aggregate_name)
if aggregate:
self.operator_cloud.delete_aggregate(aggregate['id'])
| apache-2.0 | -1,216,998,120,332,145,000 | 33.85 | 75 | 0.658058 | false |
geodynamics/pylith | tests/fullscale/linearelasticity/nofaults-2d/sheartraction_gendb.py | 1 | 2474 | #!/usr/bin/env nemesis
#
# ----------------------------------------------------------------------
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2021 University of California, Davis
#
# See LICENSE.md for license information.
#
# ----------------------------------------------------------------------
#
# @file tests/fullscale/linearelasticity/nofaults-2d/sheartraction_gendb.py
#
# @brief Python script to generate spatial database with displacement
# boundary conditions for the shear test. The traction boundary
# conditions use UniformDB in the .cfg file.
import numpy
class GenerateDB(object):
"""Python object to generate spatial database with displacement
boundary conditions for the shear test.
"""
def __init__(self):
"""Constructor.
"""
return
def run(self):
"""Generate the database.
"""
# Domain
x = numpy.arange(-4000.0, 4000.1, 1000.0)
y = numpy.arange(-4000.0, 4000.1, 1000.0)
npts = x.shape[0]
xx = x * numpy.ones((npts, 1), dtype=numpy.float64)
yy = y * numpy.ones((npts, 1), dtype=numpy.float64)
xy = numpy.zeros((npts**2, 2), dtype=numpy.float64)
xy[:, 0] = numpy.ravel(xx)
xy[:, 1] = numpy.ravel(numpy.transpose(yy))
from sheartraction_soln import AnalyticalSoln
soln = AnalyticalSoln()
disp = soln.displacement(xy)
from spatialdata.geocoords.CSCart import CSCart
cs = CSCart()
cs.inventory.spaceDim = 2
cs._configure()
data = {'points': xy,
'coordsys': cs,
'data_dim': 2,
'values': [{'name': "initial_amplitude_x",
'units': "m",
'data': disp[0, :, 0].ravel()},
{'name': "initial_amplitude_y",
'units': "m",
'data': disp[0, :, 1].ravel()}]}
from spatialdata.spatialdb.SimpleIOAscii import createWriter
io = createWriter("sheartraction_disp.spatialdb")
io.write(data)
return
# ======================================================================
if __name__ == "__main__":
GenerateDB().run()
# End of file
| mit | 6,571,733,550,315,437,000 | 29.925 | 75 | 0.525869 | false |
davidbgk/udata | udata/core/organization/api_fields.py | 1 | 4922 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from udata.api import api, fields, base_reference
from udata.core.badges.api import badge_fields
from .models import ORG_ROLES, DEFAULT_ROLE, MEMBERSHIP_STATUS, LOGO_SIZES
BIGGEST_LOGO_SIZE = LOGO_SIZES[0]
org_ref_fields = api.inherit('OrganizationReference', base_reference, {
'name': fields.String(description='The organization name', readonly=True),
'acronym': fields.String(description='The organization acronym'),
'uri': fields.UrlFor(
'api.organization', lambda o: {'org': o},
description='The organization API URI', readonly=True),
'slug': fields.String(
description='The organization string used as permalink',
required=True),
'page': fields.UrlFor(
'organizations.show', lambda o: {'org': o},
description='The organization web page URL', readonly=True),
'logo': fields.ImageField(original=True,
description='The organization logo URL'),
'logo_thumbnail': fields.ImageField(attribute='logo', size=BIGGEST_LOGO_SIZE,
description='The organization logo thumbnail URL. This is the square '
'({0}x{0}) and cropped version.'.format(BIGGEST_LOGO_SIZE)),
})
from udata.core.user.api_fields import user_ref_fields # noqa: required
request_fields = api.model('MembershipRequest', {
'id': fields.String(readonly=True),
'user': fields.Nested(user_ref_fields),
'created': fields.ISODateTime(
description='The request creation date', readonly=True),
'status': fields.String(
description='The current request status', required=True,
enum=MEMBERSHIP_STATUS.keys()),
'comment': fields.String(
description='A request comment from the user', required=True),
})
member_fields = api.model('Member', {
'user': fields.Nested(user_ref_fields),
'role': fields.String(
description='The member role in the organization', required=True,
enum=ORG_ROLES.keys(), default=DEFAULT_ROLE)
})
org_fields = api.model('Organization', {
'id': fields.String(
description='The organization identifier', required=True),
'name': fields.String(description='The organization name', required=True),
'acronym': fields.String(description='The organization acronym'),
'url': fields.String(description='The organization website URL'),
'slug': fields.String(
description='The organization string used as permalink',
required=True),
'description': fields.Markdown(
description='The organization description in Markdown', required=True),
'created_at': fields.ISODateTime(
description='The organization creation date', readonly=True),
'last_modified': fields.ISODateTime(
description='The organization last modification date', readonly=True),
'deleted': fields.ISODateTime(
description='The organization deletion date if deleted',
readonly=True),
'metrics': fields.Raw(
description='The organization metrics', readonly=True),
'uri': fields.UrlFor(
'api.organization', lambda o: {'org': o},
description='The organization API URI', readonly=True),
'page': fields.UrlFor(
'organizations.show', lambda o: {'org': o},
description='The organization page URL', readonly=True),
'logo': fields.ImageField(original=True,
description='The organization logo URL'),
'logo_thumbnail': fields.ImageField(attribute='logo', size=BIGGEST_LOGO_SIZE,
description='The organization logo thumbnail URL. This is the square '
'({0}x{0}) and cropped version.'.format(BIGGEST_LOGO_SIZE)),
'members': fields.List(
fields.Nested(member_fields, description='The organization members')),
'badges': fields.List(fields.Nested(badge_fields),
description='The organization badges',
readonly=True),
})
org_page_fields = api.model('OrganizationPage', fields.pager(org_fields))
org_suggestion_fields = api.model('OrganizationSuggestion', {
'id': fields.String(
description='The organization identifier', readonly=True),
'name': fields.String(description='The organization name', readonly=True),
'acronym': fields.String(
description='The organization acronym', readonly=True),
'slug': fields.String(
description='The organization permalink string', readonly=True),
'image_url': fields.String(
description='The organization logo URL', readonly=True),
'page': fields.UrlFor(
'organizations.show_redirect', lambda o: {'org': o['slug']},
description='The organization web page URL', readonly=True),
'score': fields.Float(
description='The internal match score', readonly=True),
})
refuse_membership_fields = api.model('RefuseMembership', {
'comment': fields.String(
description='The refusal comment.'),
})
| agpl-3.0 | 4,873,251,861,033,014,000 | 42.557522 | 81 | 0.677367 | false |
gsnbng/erpnext | erpnext/hr/utils.py | 1 | 17205 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, erpnext
from frappe import _
from frappe.utils import formatdate, format_datetime, getdate, get_datetime, nowdate, flt, cstr, add_days, today
from frappe.model.document import Document
from frappe.desk.form import assign_to
from erpnext.hr.doctype.employee.employee import get_holiday_list_for_employee
class DuplicateDeclarationError(frappe.ValidationError): pass
class EmployeeBoardingController(Document):
'''
Create the project and the task for the boarding process
Assign to the concerned person and roles as per the onboarding/separation template
'''
def validate(self):
# remove the task if linked before submitting the form
if self.amended_from:
for activity in self.activities:
activity.task = ''
def on_submit(self):
# create the project for the given employee onboarding
project_name = _(self.doctype) + " : "
if self.doctype == "Employee Onboarding":
project_name += self.job_applicant
else:
project_name += self.employee
project = frappe.get_doc({
"doctype": "Project",
"project_name": project_name,
"expected_start_date": self.date_of_joining if self.doctype == "Employee Onboarding" else self.resignation_letter_date,
"department": self.department,
"company": self.company
}).insert(ignore_permissions=True)
self.db_set("project", project.name)
self.db_set("boarding_status", "Pending")
self.reload()
self.create_task_and_notify_user()
def create_task_and_notify_user(self):
# create the task for the given project and assign to the concerned person
for activity in self.activities:
if activity.task:
continue
task = frappe.get_doc({
"doctype": "Task",
"project": self.project,
"subject": activity.activity_name + " : " + self.employee_name,
"description": activity.description,
"department": self.department,
"company": self.company,
"task_weight": activity.task_weight
}).insert(ignore_permissions=True)
activity.db_set("task", task.name)
users = [activity.user] if activity.user else []
if activity.role:
user_list = frappe.db.sql_list('''select distinct(parent) from `tabHas Role`
where parenttype='User' and role=%s''', activity.role)
users = users + user_list
if "Administrator" in users:
users.remove("Administrator")
# assign the task the users
if users:
self.assign_task_to_users(task, set(users))
def assign_task_to_users(self, task, users):
for user in users:
args = {
'assign_to' : user,
'doctype' : task.doctype,
'name' : task.name,
'description' : task.description or task.subject,
'notify': self.notify_users_by_email
}
assign_to.add(args)
def on_cancel(self):
# delete task project
for task in frappe.get_all("Task", filters={"project": self.project}):
frappe.delete_doc("Task", task.name, force=1)
frappe.delete_doc("Project", self.project, force=1)
self.db_set('project', '')
for activity in self.activities:
activity.db_set("task", "")
@frappe.whitelist()
def get_onboarding_details(parent, parenttype):
return frappe.get_all("Employee Boarding Activity",
fields=["activity_name", "role", "user", "required_for_employee_creation", "description", "task_weight"],
filters={"parent": parent, "parenttype": parenttype},
order_by= "idx")
@frappe.whitelist()
def get_boarding_status(project):
status = 'Pending'
if project:
doc = frappe.get_doc('Project', project)
if flt(doc.percent_complete) > 0.0 and flt(doc.percent_complete) < 100.0:
status = 'In Process'
elif flt(doc.percent_complete) == 100.0:
status = 'Completed'
return status
def set_employee_name(doc):
if doc.employee and not doc.employee_name:
doc.employee_name = frappe.db.get_value("Employee", doc.employee, "employee_name")
def update_employee(employee, details, date=None, cancel=False):
internal_work_history = {}
for item in details:
fieldtype = frappe.get_meta("Employee").get_field(item.fieldname).fieldtype
new_data = item.new if not cancel else item.current
if fieldtype == "Date" and new_data:
new_data = getdate(new_data)
elif fieldtype =="Datetime" and new_data:
new_data = get_datetime(new_data)
setattr(employee, item.fieldname, new_data)
if item.fieldname in ["department", "designation", "branch"]:
internal_work_history[item.fieldname] = item.new
if internal_work_history and not cancel:
internal_work_history["from_date"] = date
employee.append("internal_work_history", internal_work_history)
return employee
@frappe.whitelist()
def get_employee_fields_label():
fields = []
for df in frappe.get_meta("Employee").get("fields"):
if df.fieldname in ["salutation", "user_id", "employee_number", "employment_type",
"holiday_list", "branch", "department", "designation", "grade",
"notice_number_of_days", "reports_to", "leave_policy", "company_email"]:
fields.append({"value": df.fieldname, "label": df.label})
return fields
@frappe.whitelist()
def get_employee_field_property(employee, fieldname):
if employee and fieldname:
field = frappe.get_meta("Employee").get_field(fieldname)
value = frappe.db.get_value("Employee", employee, fieldname)
options = field.options
if field.fieldtype == "Date":
value = formatdate(value)
elif field.fieldtype == "Datetime":
value = format_datetime(value)
return {
"value" : value,
"datatype" : field.fieldtype,
"label" : field.label,
"options" : options
}
else:
return False
def validate_dates(doc, from_date, to_date):
date_of_joining, relieving_date = frappe.db.get_value("Employee", doc.employee, ["date_of_joining", "relieving_date"])
if getdate(from_date) > getdate(to_date):
frappe.throw(_("To date can not be less than from date"))
elif getdate(from_date) > getdate(nowdate()):
frappe.throw(_("Future dates not allowed"))
elif date_of_joining and getdate(from_date) < getdate(date_of_joining):
frappe.throw(_("From date can not be less than employee's joining date"))
elif relieving_date and getdate(to_date) > getdate(relieving_date):
frappe.throw(_("To date can not greater than employee's relieving date"))
def validate_overlap(doc, from_date, to_date, company = None):
query = """
select name
from `tab{0}`
where name != %(name)s
"""
query += get_doc_condition(doc.doctype)
if not doc.name:
# hack! if name is null, it could cause problems with !=
doc.name = "New "+doc.doctype
overlap_doc = frappe.db.sql(query.format(doc.doctype),{
"employee": doc.get("employee"),
"from_date": from_date,
"to_date": to_date,
"name": doc.name,
"company": company
}, as_dict = 1)
if overlap_doc:
if doc.get("employee"):
exists_for = doc.employee
if company:
exists_for = company
throw_overlap_error(doc, exists_for, overlap_doc[0].name, from_date, to_date)
def get_doc_condition(doctype):
if doctype == "Compensatory Leave Request":
return "and employee = %(employee)s and docstatus < 2 \
and (work_from_date between %(from_date)s and %(to_date)s \
or work_end_date between %(from_date)s and %(to_date)s \
or (work_from_date < %(from_date)s and work_end_date > %(to_date)s))"
elif doctype == "Leave Period":
return "and company = %(company)s and (from_date between %(from_date)s and %(to_date)s \
or to_date between %(from_date)s and %(to_date)s \
or (from_date < %(from_date)s and to_date > %(to_date)s))"
def throw_overlap_error(doc, exists_for, overlap_doc, from_date, to_date):
msg = _("A {0} exists between {1} and {2} (").format(doc.doctype,
formatdate(from_date), formatdate(to_date)) \
+ """ <b><a href="#Form/{0}/{1}">{1}</a></b>""".format(doc.doctype, overlap_doc) \
+ _(") for {0}").format(exists_for)
frappe.throw(msg)
def get_employee_leave_policy(employee):
leave_policy = frappe.db.get_value("Employee", employee, "leave_policy")
if not leave_policy:
employee_grade = frappe.db.get_value("Employee", employee, "grade")
if employee_grade:
leave_policy = frappe.db.get_value("Employee Grade", employee_grade, "default_leave_policy")
if not leave_policy:
frappe.throw(_("Employee {0} of grade {1} have no default leave policy").format(employee, employee_grade))
if leave_policy:
return frappe.get_doc("Leave Policy", leave_policy)
else:
frappe.throw(_("Please set leave policy for employee {0} in Employee / Grade record").format(employee))
def validate_duplicate_exemption_for_payroll_period(doctype, docname, payroll_period, employee):
existing_record = frappe.db.exists(doctype, {
"payroll_period": payroll_period,
"employee": employee,
'docstatus': ['<', 2],
'name': ['!=', docname]
})
if existing_record:
frappe.throw(_("{0} already exists for employee {1} and period {2}")
.format(doctype, employee, payroll_period), DuplicateDeclarationError)
def validate_tax_declaration(declarations):
subcategories = []
for d in declarations:
if d.exemption_sub_category in subcategories:
frappe.throw(_("More than one selection for {0} not allowed").format(d.exemption_sub_category))
subcategories.append(d.exemption_sub_category)
def get_total_exemption_amount(declarations):
exemptions = frappe._dict()
for d in declarations:
exemptions.setdefault(d.exemption_category, frappe._dict())
category_max_amount = exemptions.get(d.exemption_category).max_amount
if not category_max_amount:
category_max_amount = frappe.db.get_value("Employee Tax Exemption Category", d.exemption_category, "max_amount")
exemptions.get(d.exemption_category).max_amount = category_max_amount
sub_category_exemption_amount = d.max_amount \
if (d.max_amount and flt(d.amount) > flt(d.max_amount)) else d.amount
exemptions.get(d.exemption_category).setdefault("total_exemption_amount", 0.0)
exemptions.get(d.exemption_category).total_exemption_amount += flt(sub_category_exemption_amount)
if category_max_amount and exemptions.get(d.exemption_category).total_exemption_amount > category_max_amount:
exemptions.get(d.exemption_category).total_exemption_amount = category_max_amount
total_exemption_amount = sum([flt(d.total_exemption_amount) for d in exemptions.values()])
return total_exemption_amount
def get_leave_period(from_date, to_date, company):
leave_period = frappe.db.sql("""
select name, from_date, to_date
from `tabLeave Period`
where company=%(company)s and is_active=1
and (from_date between %(from_date)s and %(to_date)s
or to_date between %(from_date)s and %(to_date)s
or (from_date < %(from_date)s and to_date > %(to_date)s))
""", {
"from_date": from_date,
"to_date": to_date,
"company": company
}, as_dict=1)
if leave_period:
return leave_period
def generate_leave_encashment():
''' Generates a draft leave encashment on allocation expiry '''
from erpnext.hr.doctype.leave_encashment.leave_encashment import create_leave_encashment
if frappe.db.get_single_value('HR Settings', 'auto_leave_encashment'):
leave_type = frappe.get_all('Leave Type', filters={'allow_encashment': 1}, fields=['name'])
leave_type=[l['name'] for l in leave_type]
leave_allocation = frappe.get_all("Leave Allocation", filters={
'to_date': add_days(today(), -1),
'leave_type': ('in', leave_type)
}, fields=['employee', 'leave_period', 'leave_type', 'to_date', 'total_leaves_allocated', 'new_leaves_allocated'])
create_leave_encashment(leave_allocation=leave_allocation)
def allocate_earned_leaves():
'''Allocate earned leaves to Employees'''
e_leave_types = frappe.get_all("Leave Type",
fields=["name", "max_leaves_allowed", "earned_leave_frequency", "rounding"],
filters={'is_earned_leave' : 1})
today = getdate()
divide_by_frequency = {"Yearly": 1, "Half-Yearly": 6, "Quarterly": 4, "Monthly": 12}
for e_leave_type in e_leave_types:
leave_allocations = frappe.db.sql("""select name, employee, from_date, to_date from `tabLeave Allocation` where %s
between from_date and to_date and docstatus=1 and leave_type=%s""", (today, e_leave_type.name), as_dict=1)
for allocation in leave_allocations:
leave_policy = get_employee_leave_policy(allocation.employee)
if not leave_policy:
continue
if not e_leave_type.earned_leave_frequency == "Monthly":
if not check_frequency_hit(allocation.from_date, today, e_leave_type.earned_leave_frequency):
continue
annual_allocation = frappe.db.get_value("Leave Policy Detail", filters={
'parent': leave_policy.name,
'leave_type': e_leave_type.name
}, fieldname=['annual_allocation'])
if annual_allocation:
earned_leaves = flt(annual_allocation) / divide_by_frequency[e_leave_type.earned_leave_frequency]
if e_leave_type.rounding == "0.5":
earned_leaves = round(earned_leaves * 2) / 2
else:
earned_leaves = round(earned_leaves)
allocation = frappe.get_doc('Leave Allocation', allocation.name)
new_allocation = flt(allocation.total_leaves_allocated) + flt(earned_leaves)
if new_allocation > e_leave_type.max_leaves_allowed and e_leave_type.max_leaves_allowed > 0:
new_allocation = e_leave_type.max_leaves_allowed
if new_allocation == allocation.total_leaves_allocated:
continue
allocation.db_set("total_leaves_allocated", new_allocation, update_modified=False)
create_additional_leave_ledger_entry(allocation, earned_leaves, today)
def create_additional_leave_ledger_entry(allocation, leaves, date):
''' Create leave ledger entry for leave types '''
allocation.new_leaves_allocated = leaves
allocation.from_date = date
allocation.unused_leaves = 0
allocation.create_leave_ledger_entry()
def check_frequency_hit(from_date, to_date, frequency):
'''Return True if current date matches frequency'''
from_dt = get_datetime(from_date)
to_dt = get_datetime(to_date)
from dateutil import relativedelta
rd = relativedelta.relativedelta(to_dt, from_dt)
months = rd.months
if frequency == "Quarterly":
if not months % 3:
return True
elif frequency == "Half-Yearly":
if not months % 6:
return True
elif frequency == "Yearly":
if not months % 12:
return True
return False
def get_salary_assignment(employee, date):
assignment = frappe.db.sql("""
select * from `tabSalary Structure Assignment`
where employee=%(employee)s
and docstatus = 1
and %(on_date)s >= from_date order by from_date desc limit 1""", {
'employee': employee,
'on_date': date,
}, as_dict=1)
return assignment[0] if assignment else None
def get_sal_slip_total_benefit_given(employee, payroll_period, component=False):
total_given_benefit_amount = 0
query = """
select sum(sd.amount) as 'total_amount'
from `tabSalary Slip` ss, `tabSalary Detail` sd
where ss.employee=%(employee)s
and ss.docstatus = 1 and ss.name = sd.parent
and sd.is_flexible_benefit = 1 and sd.parentfield = "earnings"
and sd.parenttype = "Salary Slip"
and (ss.start_date between %(start_date)s and %(end_date)s
or ss.end_date between %(start_date)s and %(end_date)s
or (ss.start_date < %(start_date)s and ss.end_date > %(end_date)s))
"""
if component:
query += "and sd.salary_component = %(component)s"
sum_of_given_benefit = frappe.db.sql(query, {
'employee': employee,
'start_date': payroll_period.start_date,
'end_date': payroll_period.end_date,
'component': component
}, as_dict=True)
if sum_of_given_benefit and flt(sum_of_given_benefit[0].total_amount) > 0:
total_given_benefit_amount = sum_of_given_benefit[0].total_amount
return total_given_benefit_amount
def get_holidays_for_employee(employee, start_date, end_date):
holiday_list = get_holiday_list_for_employee(employee)
holidays = frappe.db.sql_list('''select holiday_date from `tabHoliday`
where
parent=%(holiday_list)s
and holiday_date >= %(start_date)s
and holiday_date <= %(end_date)s''', {
"holiday_list": holiday_list,
"start_date": start_date,
"end_date": end_date
})
holidays = [cstr(i) for i in holidays]
return holidays
@erpnext.allow_regional
def calculate_annual_eligible_hra_exemption(doc):
# Don't delete this method, used for localization
# Indian HRA Exemption Calculation
return {}
@erpnext.allow_regional
def calculate_hra_exemption_for_period(doc):
# Don't delete this method, used for localization
# Indian HRA Exemption Calculation
return {}
def get_previous_claimed_amount(employee, payroll_period, non_pro_rata=False, component=False):
total_claimed_amount = 0
query = """
select sum(claimed_amount) as 'total_amount'
from `tabEmployee Benefit Claim`
where employee=%(employee)s
and docstatus = 1
and (claim_date between %(start_date)s and %(end_date)s)
"""
if non_pro_rata:
query += "and pay_against_benefit_claim = 1"
if component:
query += "and earning_component = %(component)s"
sum_of_claimed_amount = frappe.db.sql(query, {
'employee': employee,
'start_date': payroll_period.start_date,
'end_date': payroll_period.end_date,
'component': component
}, as_dict=True)
if sum_of_claimed_amount and flt(sum_of_claimed_amount[0].total_amount) > 0:
total_claimed_amount = sum_of_claimed_amount[0].total_amount
return total_claimed_amount
| agpl-3.0 | -7,992,770,331,410,956,000 | 36.730263 | 123 | 0.706306 | false |
Bluejudy/blueblockd | lib/components/assets.py | 1 | 14553 | import os
import logging
import decimal
import base64
import json
from datetime import datetime
from lib import config, util, util_worldcoin
ASSET_MAX_RETRY = 3
D = decimal.Decimal
def parse_issuance(db, message, cur_block_index, cur_block):
if message['status'] != 'valid':
return
def modify_extended_asset_info(asset, description):
"""adds an asset to asset_extended_info collection if the description is a valid json link. or, if the link
is not a valid json link, will remove the asset entry from the table if it exists"""
if util.is_valid_url(description, suffix='.json', allow_no_protocol=True):
db.asset_extended_info.update({'asset': asset},
{'$set': {
'info_url': description,
'info_status': 'needfetch',
'fetch_info_retry': 0, # retry ASSET_MAX_RETRY times to fetch info from info_url
'info_data': {},
'errors': []
}}, upsert=True)
#^ valid info_status settings: needfetch, valid, invalid, error
#additional fields will be added later in events, once the asset info is pulled
else:
db.asset_extended_info.remove({ 'asset': asset })
#remove any saved asset image data
imagePath = os.path.join(config.DATA_DIR, config.SUBDIR_ASSET_IMAGES, asset + '.png')
if os.path.exists(imagePath):
os.remove(imagePath)
tracked_asset = db.tracked_assets.find_one(
{'asset': message['asset']}, {'_id': 0, '_history': 0})
#^ pulls the tracked asset without the _id and history fields. This may be None
if message['locked']: #lock asset
assert tracked_asset is not None
db.tracked_assets.update(
{'asset': message['asset']},
{"$set": {
'_at_block': cur_block_index,
'_at_block_time': cur_block['block_time_obj'],
'_change_type': 'locked',
'locked': True,
},
"$push": {'_history': tracked_asset } }, upsert=False)
logging.info("Locking asset %s" % (message['asset'],))
elif message['transfer']: #transfer asset
assert tracked_asset is not None
db.tracked_assets.update(
{'asset': message['asset']},
{"$set": {
'_at_block': cur_block_index,
'_at_block_time': cur_block['block_time_obj'],
'_change_type': 'transferred',
'owner': message['issuer'],
},
"$push": {'_history': tracked_asset } }, upsert=False)
logging.info("Transferring asset %s to address %s" % (message['asset'], message['issuer']))
elif message['quantity'] == 0 and tracked_asset is not None: #change description
db.tracked_assets.update(
{'asset': message['asset']},
{"$set": {
'_at_block': cur_block_index,
'_at_block_time': cur_block['block_time_obj'],
'_change_type': 'changed_description',
'description': message['description'],
},
"$push": {'_history': tracked_asset } }, upsert=False)
modify_extended_asset_info(message['asset'], message['description'])
logging.info("Changing description for asset %s to '%s'" % (message['asset'], message['description']))
else: #issue new asset or issue addition qty of an asset
if not tracked_asset: #new issuance
tracked_asset = {
'_change_type': 'created',
'_at_block': cur_block_index, #the block ID this asset is current for
'_at_block_time': cur_block['block_time_obj'],
#^ NOTE: (if there are multiple asset tracked changes updates in a single block for the same
# asset, the last one with _at_block == that block id in the history array is the
# final version for that asset at that block
'asset': message['asset'],
'owner': message['issuer'],
'description': message['description'],
'divisible': message['divisible'],
'locked': False,
'total_issued': message['quantity'],
'total_issued_normalized': util_worldcoin.normalize_quantity(message['quantity'], message['divisible']),
'_history': [] #to allow for block rollbacks
}
db.tracked_assets.insert(tracked_asset)
logging.info("Tracking new asset: %s" % message['asset'])
modify_extended_asset_info(message['asset'], message['description'])
else: #issuing additional of existing asset
assert tracked_asset is not None
db.tracked_assets.update(
{'asset': message['asset']},
{"$set": {
'_at_block': cur_block_index,
'_at_block_time': cur_block['block_time_obj'],
'_change_type': 'issued_more',
},
"$inc": {
'total_issued': message['quantity'],
'total_issued_normalized': util_worldcoin.normalize_quantity(message['quantity'], message['divisible'])
},
"$push": {'_history': tracked_asset} }, upsert=False)
logging.info("Adding additional %s quantity for asset %s" % (
util_worldcoin.normalize_quantity(message['quantity'], message['divisible']), message['asset']))
return True
def inc_fetch_retry(db, asset, max_retry=ASSET_MAX_RETRY, new_status='error', errors=[]):
asset['fetch_info_retry'] += 1
asset['errors'] = errors
if asset['fetch_info_retry'] == max_retry:
asset['info_status'] = new_status
db.asset_extended_info.save(asset)
def sanitize_json_data(data):
data['asset'] = util.sanitize_eliteness(data['asset'])
if 'description' in data: data['description'] = util.sanitize_eliteness(data['description'])
if 'website' in data: data['website'] = util.sanitize_eliteness(data['website'])
if 'pgpsig' in data: data['pgpsig'] = util.sanitize_eliteness(data['pgpsig'])
return data
def process_asset_info(db, asset, info_data):
# sanity check
assert asset['info_status'] == 'needfetch'
assert 'info_url' in asset
assert util.is_valid_url(asset['info_url'], allow_no_protocol=True) #already validated in the fetch
errors = util.is_valid_json(info_data, config.ASSET_SCHEMA)
if not isinstance(info_data, dict) or 'asset' not in info_data:
errors.append('Invalid data format')
elif asset['asset'] != info_data['asset']:
errors.append('asset field does not match asset name')
if len(errors) > 0:
inc_fetch_retry(db, asset, new_status='invalid', errors=errors)
return (False, errors)
asset['info_status'] = 'valid'
#fetch any associated images...
#TODO: parallelize this 2nd level asset image fetching ... (e.g. just compose a list here, and process it in later on)
if 'image' in info_data:
info_data['valid_image'] = util.fetch_image(info_data['image'],
config.SUBDIR_ASSET_IMAGES, asset['asset'], fetch_timeout=5)
asset['info_data'] = sanitize_json_data(info_data)
db.asset_extended_info.save(asset)
return (True, None)
def fetch_all_asset_info(db):
assets = list(db.asset_extended_info.find({'info_status': 'needfetch'}))
asset_info_urls = []
def asset_fetch_complete_hook(urls_data):
logging.info("Enhanced asset info fetching complete. %s unique URLs fetched. Processing..." % len(urls_data))
for asset in assets:
logging.debug("Looking at asset %s: %s" % (asset, asset['info_url']))
if asset['info_url']:
info_url = ('http://' + asset['info_url']) \
if not asset['info_url'].startswith('http://') and not asset['info_url'].startswith('https://') else asset['info_url']
assert info_url in urls_data
if not urls_data[info_url][0]: #request was not successful
inc_fetch_retry(db, asset, max_retry=ASSET_MAX_RETRY, errors=[urls_data[info_url][1]])
logging.warn("Fetch for asset at %s not successful: %s (try %i of %i)" % (
info_url, urls_data[info_url][1], asset['fetch_info_retry'], ASSET_MAX_RETRY))
else:
result = process_asset_info(db, asset, urls_data[info_url][1])
if not result[0]:
logging.info("Processing for asset %s at %s not successful: %s" % (asset['asset'], info_url, result[1]))
else:
logging.info("Processing for asset %s at %s successful" % (asset['asset'], info_url))
#compose and fetch all info URLs in all assets with them
for asset in assets:
if not asset['info_url']: continue
if asset.get('disabled', False):
logging.info("ExtendedAssetInfo: Skipping disabled asset %s" % asset['asset'])
continue
#may or may not end with .json. may or may not start with http:// or https://
asset_info_urls.append(('http://' + asset['info_url']) \
if not asset['info_url'].startswith('http://') and not asset['info_url'].startswith('https://') else asset['info_url'])
asset_info_urls_str = ', '.join(asset_info_urls)
asset_info_urls_str = (asset_info_urls_str[:2000] + ' ...') if len(asset_info_urls_str) > 2000 else asset_info_urls_str #truncate if necessary
if len(asset_info_urls):
logging.info('Fetching enhanced asset info for %i assets: %s' % (len(asset_info_urls), asset_info_urls_str))
util.stream_fetch(asset_info_urls, asset_fetch_complete_hook,
fetch_timeout=10, max_fetch_size=4*1024, urls_group_size=20, urls_group_time_spacing=20,
per_request_complete_callback=lambda url, data: logging.debug("Asset info URL %s retrieved, result: %s" % (url, data)))
def get_escrowed_balances(addresses):
addresses_holder = ','.join(['?' for e in range(0,len(addresses))])
sql ='''SELECT (source || '_' || give_asset) AS source_asset, source AS address, give_asset AS asset, SUM(give_remaining) AS quantity
FROM orders
WHERE source IN ({}) AND status = ? AND give_asset != ?
GROUP BY source_asset'''.format(addresses_holder)
bindings = addresses + ['open', 'WDC']
results = util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT (tx0_address || '_' || forward_asset) AS source_asset, tx0_address AS address, forward_asset AS asset, SUM(forward_quantity) AS quantity
FROM order_matches
WHERE tx0_address IN ({}) AND forward_asset != ? AND status = ?
GROUP BY source_asset'''.format(addresses_holder)
bindings = addresses + ['WDC', 'pending']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT (tx1_address || '_' || backward_asset) AS source_asset, tx1_address AS address, backward_asset AS asset, SUM(backward_quantity) AS quantity
FROM order_matches
WHERE tx1_address IN ({}) AND backward_asset != ? AND status = ?
GROUP BY source_asset'''.format(addresses_holder)
bindings = addresses + ['WDC', 'pending']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT source AS address, '{}' AS asset, SUM(wager_remaining) AS quantity
FROM bets
WHERE source IN ({}) AND status = ?
GROUP BY address'''.format(config.XBJ, addresses_holder)
bindings = addresses + ['open']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT tx0_address AS address, '{}' AS asset, SUM(forward_quantity) AS quantity
FROM bet_matches
WHERE tx0_address IN ({}) AND status = ?
GROUP BY address'''.format(config.XBJ, addresses_holder)
bindings = addresses + ['pending']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT tx1_address AS address, '{}' AS asset, SUM(backward_quantity) AS quantity
FROM bet_matches
WHERE tx1_address IN ({}) AND status = ?
GROUP BY address'''.format(config.XBJ, addresses_holder)
bindings = addresses + ['pending']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT source AS address, '{}' AS asset, SUM(wager) AS quantity
FROM rps
WHERE source IN ({}) AND status = ?
GROUP BY address'''.format(config.XBJ, addresses_holder)
bindings = addresses + ['open']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT tx0_address AS address, '{}' AS asset, SUM(wager) AS quantity
FROM rps_matches
WHERE tx0_address IN ({}) AND status IN (?, ?, ?)
GROUP BY address'''.format(config.XBJ, addresses_holder)
bindings = addresses + ['pending', 'pending and resolved', 'resolved and pending']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
sql = '''SELECT tx1_address AS address, '{}' AS asset, SUM(wager) AS quantity
FROM rps_matches
WHERE tx1_address IN ({}) AND status IN (?, ?, ?)
GROUP BY address'''.format(config.XBJ, addresses_holder)
bindings = addresses + ['pending', 'pending and resolved', 'resolved and pending']
results += util.call_jsonrpc_api("sql", {'query': sql, 'bindings': bindings}, abort_on_error=True)['result']
escrowed_balances = {}
for order in results:
if order['address'] not in escrowed_balances:
escrowed_balances[order['address']] = {}
if order['asset'] not in escrowed_balances[order['address']]:
escrowed_balances[order['address']][order['asset']] = 0
escrowed_balances[order['address']][order['asset']] += order['quantity']
return escrowed_balances
| mit | -3,977,259,987,266,524,000 | 51.16129 | 159 | 0.589226 | false |
lantianlz/qiexing | www/journey/views.py | 1 | 5036 | # -*- coding: utf-8 -*-
import json
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.template import RequestContext
from django.shortcuts import render_to_response
from common import utils, page
from www.journey import interface
from www.misc.decorators import member_required, staff_required, common_ajax_response
from www.admin.interface import CoverBase
jb = interface.JourneyBase()
lb = interface.LikeBase()
def home(request, template_name='journey/home.html'):
from www.activity.interface import ActivityBase
from www.admin.interface import FriendlyLinkBase
activitys = ActivityBase().get_all_valid_activitys()[:3]
journeys = jb.format_journeys(jb.get_all_journeys_for_home_page()[:4])
links = FriendlyLinkBase().get_friendly_link_by_link_type(link_type=3)
covers = CoverBase().get_home_cover()
return render_to_response(template_name, locals(), context_instance=RequestContext(request))
def journey_list(request, template_name='journey/journey_list.html'):
journeys = jb.get_all_journeys_for_home_page()
# 分页
page_num = int(request.REQUEST.get('page', 1))
page_objs = page.Cpt(journeys, count=10, page=page_num).info
journeys = page_objs[0]
page_params = (page_objs[1], page_objs[4])
journeys = jb.format_journeys(journeys)
return render_to_response(template_name, locals(), context_instance=RequestContext(request))
def journey_detail(request, journey_id, template_name='journey/journey_detail.html'):
journey = jb.get_journey_by_id(journey_id)
if not journey:
raise Http404
journey = jb.format_journeys([journey, ],)[0]
sort = request.REQUEST.get('sort', 'like_count')
answers_list_params = "%s$%s" % (journey.id, "0") # 用于前端提取回复列表
# 从session中获取提示信息
if request.session.has_key('error_msg'):
error_msg = request.session['error_msg']
del request.session['error_msg']
if request.session.has_key('success_msg'):
success_msg = request.session['success_msg']
del request.session['success_msg']
if request.session.has_key('answer_content'):
request.answer_content = request.session['answer_content']
del request.session['answer_content']
if request.session.has_key('guide'):
guide = request.session['guide']
del request.session['guide']
# 异步更新浏览次数
from www.tasks import async_add_journey_view_count
async_add_journey_view_count(journey.id)
return render_to_response(template_name, locals(), context_instance=RequestContext(request))
@member_required
def write_journey(request, template_name='journey/write_journey.html'):
if request.POST:
journey_title = request.POST.get('journey_title', '').strip()
journey_content = request.POST.get('journey_content', '').strip()
is_hide_user = request.POST.get('is_hide_user')
errcode, result = jb.create_journey(request.user.id, journey_title, journey_content,
ip=utils.get_clientip(request), is_hide_user=is_hide_user)
if errcode == 0:
request.session['guide'] = True
return HttpResponseRedirect(result.get_url())
else:
error_msg = result
return render_to_response(template_name, locals(), context_instance=RequestContext(request))
@member_required
def modify_journey(request, journey_id):
if request.POST:
journey_title = request.POST.get('journey_title', '').strip()
journey_content = request.POST.get('journey_content', '').strip()
is_hide_user = request.POST.get('is_hide_user')
errcode, result = jb.modify_journey(journey_id, request.user, journey_title, journey_content,
ip=utils.get_clientip(request), is_hide_user=is_hide_user)
if errcode == 0:
request.session['success_msg'] = u'修改成功'
return HttpResponseRedirect(result.get_url())
else:
request.session['error_msg'] = result
return HttpResponseRedirect(jb.get_journey_by_id(journey_id).get_url())
# ===================================================ajax部分=================================================================#
@member_required
@common_ajax_response
def like_journey(request):
journey_id = request.POST.get('journey_id', '').strip()
return lb.like_it(journey_id, request.user.id, ip=utils.get_clientip(request))
@member_required
@common_ajax_response
def remove_journey(request):
journey_id = request.POST.get('journey_id', '').strip()
return jb.remove_journey(journey_id, request.user)
@staff_required
@common_ajax_response
def set_top(request):
journey_id = request.POST.get('journey_id', '').strip()
return jb.set_top(journey_id)
@staff_required
@common_ajax_response
def cancel_top(request):
journey_id = request.POST.get('journey_id', '').strip()
return jb.cancel_top(journey_id)
| gpl-2.0 | -7,290,553,088,338,072,000 | 34.485714 | 125 | 0.662037 | false |
MGApcDev/LamasAndGroves | src/wordbranch.py | 1 | 2333 | class WordBranch(object):
"""WordBranch represents a single branch in the tree of all the valid word combinations.
Attributes:
letter_branch (LetterBranch) The reference to the LetterBranch that represents the word.
origin (WordBranch) The reference to the parent WordBranch.
remain_char (int) Number of characters remaining in the remain_dict.
valid_children ([WordBranch]) Array of WordBranches leading to valid anagrams.
"""
def __init__(self, letter_branch, origin, remain_char, valid_children):
self.letter_branch = letter_branch
self.origin = origin
self.remain_char = remain_char
self.valid_children = valid_children
def __str__(self):
'''Trace words from leaf branch to root.
Args
self (WordBranch) The leaf branch to trace for word.
Returns
(string) The full string of represented by the leaf.
'''
output_str = ''
words = []
pointer = self
while pointer.origin != None:
words.append(pointer)
pointer = pointer.origin
words.reverse() # Put words in the right order
for word in words:
output_str += str(word.letter_branch) + ' '
# Remove last char --> ' '
return output_str[:-1]
hash_to_branch = {}
def get_word_tree_root(phrase_len, phrase_dict, words):
'''Construct the root object of the WordBranch tree.
Args
phrase_len (int) Count of valid characters in phrase.
phrase_dict ({char => int}) The remaining letters of the phrase.
words ([LetterBranch]) Array of all the available words as LetterBranch.
Returns
(WordBranch) The root of WordBranch tree.
'''
global hash_to_branch
hash_to_branch = {} # Reset hash to branch on new tree root
root_children = []
root = WordBranch(None, None, phrase_len, None)
for word in words:
root_children.append(WordBranch(word, root, phrase_len - len(str(word)), None))
return root, root_children
def get_hash_to_branch():
global hash_to_branch
return hash_to_branch
| mit | 8,808,247,715,610,096,000 | 37.542373 | 98 | 0.583369 | false |
alehander42/bach | bach/bach_macro.py | 1 | 4898 | import types
import bach_ast
import compiler
import bach_stl
from errors import MacroMatchError
def register_macro(mapping, label, macro, count=None):
if label not in mapping:
mapping[label] = []
if isinstance(macro, types.FunctionType):
mapping[label].append((count, macro))
else:
mapping[label].append((macro.args_count(), macro))
class BachMacro(object):
def __init__(self, label, args, body):
self.label, self.args, self.body = label, args, body
def render(self, sexps):
# mapping = {}
# if len(self.args) > 0 and isinstance(self.args[-1], bach_ast.Many):
# if len(self.args) >= len(sexps) - 1:
# for arg, sexp in zip(self.args[:-1], self.sexps[:len(self.args) - 1]):
# mapping[arg.label] = sexp
# mapping[self.args[-1].label] = sexps[len(self.args) - 1:]
# else:
# raise MacroMatchError("No enough args for %s" % self.label)
# else:
# if len(self.args) == len(sexps):
# for arg, sexp in zip(self.args, sexps):
# mapping[arg.label] = sexp
# else:
# raise MacroMatchError("Expected %d args got %d for %s" % (len(self.args), len(sexps), self.label))
# value =
if not self.args:
args = []
elif isinstance(self.args[-1], bach_ast.Many):
args = self.args[:-1] + [bach_ast.Label(self.args[-1].label)]
else:
args = self.args
sexps = [bach_ast.Quote(sexp) for sexp in sexps]
sexp = bach_ast.Program([[bach_ast.Lambda(args, self.body)] + sexps])
result = compiler.Compiler().compile_and_eval(sexp, stl=bach_stl.load_stl(), return_value=True)
return self.normal_sexp(result)
def normal_sexp(self, sexp):
'''
we compile macros to a bach lambda and then run them, so some of the resulting values
can't have the compile-time node types(only native python types and bach runtime types)
however they are just several of those cases and they're pretty similar
we convert the results back to normal bach sexp, so we can easily apply other macros
'''
PYTHON_BACH_EQUIVALENTS = {int: bach_ast.Integer, float: bach_ast.Float, str: bach_ast.String, bool: bach_ast.Boolean}
if isinstance(sexp, list):
return map(self.normal_sexp, sexp)
elif type(sexp) in PYTHON_BACH_EQUIVALENTS:
return PYTHON_BACH_EQUIVALENTS[type(sexp)](sexp)
elif type(sexp).__name__ == 'BachSymbol':
return bach_ast.Label(sexp.value)
elif isinstance(sexp, dict):
return bach_ast.Dict(map(self.normal_sexp, sexp.keys()), map(self.normal_sexp, sexp.values()))
elif isinstance(sexp, set):
return bach_ast.Set(map(self.normal_sexp, sexp))
else:
return sexp
def generic_render(self, node, mapping):
method_name = 'render_' + type(node).lower()
if hasattr(self, method_name):
return getattr(self, 'render_' + type(node).lower())(node, mapping)
else:
return node
def render_list(self, node, mapping):
if mapping[QUA]:
result = []
for child in node:
if isinstance(child, bach_ast.UnquoteList):
result += self.generic_render(child, mapping)
else:
result.append(self.generic_render(child, mapping))
else:
return [self.generic_render(child) for child in node]
def render_quasiquote(self, node, mapping):
quasiquote_mapping = mapping.copy()
quasiquote_mapping[QUA] = True
return self.generic_render(node.expr, quasiquote_mapping)
def render_quote(self, node, mapping):
return self.generic_render(node.expr, mapping)
def render_unquote(self, node, mapping):
if mapping[QUA]:
return mapping[node.expr.label]
else:
return node
def render_unquotelist(self, node, mapping):
if mapping[QUA]:
return mapping[node.expr.label]
else:
return node
def register(self, sexps):
mapping = {QUA: False} # a flag for activated quasi
for arg, sexp in zip(self.args, sexps):
e = 4
if isinstance(arg, bach_ast.Label):
mapping[arg.label] = sexp
if len(sexps) > len(self.args) and isinstance(self.args[-1], bach_ast.Many):
mapping[self.args[-1].label] = sexps[len(self.args)-1:]
return mapping
def args_count(self):
if len(self.args) > 0 and isinstance(self.args[-1], bach_ast.Label) or len(self.args) == 0:
return len(self.args)
else:
return (len(self.args),)
| mit | 1,044,999,443,034,420,000 | 39.147541 | 126 | 0.576766 | false |
juliusdedekind/FindDuplicateFiles | FindDuplicates.py | 1 | 2897 | """Find duplicate files inside a directory tree."""
from os import walk, remove, stat
from os.path import join as joinpath
from hashlib import md5
import threading
import Queue
import time
import sys
class Scanner(threading.Thread):
def __init__(self, path, queue, finished_scan):
threading.Thread.__init__(self)
self._path = path
self._queue = queue
self._finished_scan = finished_scan
def run(self):
"""Find duplicate files in directory tree and return array with lists of duplicateted files."""
filesizes = {}
# Build up dict with key as filesize and value is list of filenames.
for path, dirs, files in walk(self._path ):
for filename in files:
filepath = joinpath( path, filename )
filesize = stat(filepath).st_size
filesizes.setdefault( filesize, [] ).append(filepath)
#Compare content hash of all files which have the same size
#if two or more files have same hash and size they are added to the queue
for files in [ flist for flist in filesizes.values() if len(flist) > 1 ]:
#run over all files in dir with the same size if there is more then one
duplicates = {}
for filepath in files:
with open( filepath ) as openfile:
filehash = md5(openfile.read()).hexdigest()
if filehash not in duplicates:
duplicates.setdefault(filehash, []).append(filepath)
else:
duplicates[filehash].append(filepath)
for duplicate in [duplicate for duplicate in duplicates.values() if len(duplicate) > 1 ]:
self._queue.put(duplicate)
self._finished_scan[0] = 1
class Updater(threading.Thread):
def __init__(self, queue, duplicates, updateFunction, finished_scan, time):
threading.Thread.__init__(self)
self._queue = queue
self._updateFunc = updateFunction
self._duplicates = duplicates
self._finished_scan = finished_scan
self._time_duration = time
def run(self):
while True:
try:
item = self._queue.get(True, 0.03) # seems to be a good time value
self._duplicates.append(item)
self._queue.task_done()
self._updateFunc()
except Queue.Empty:
# if queue is empty and scan is finished then stop this thread
if self._finished_scan[0] == 1:
self._time_duration = time.time() - self._time_duration
print 'Finished in ' + repr(self._time_duration) + ' seconds!'
self._updateFunc()
break
else:
continue
| gpl-3.0 | -9,085,385,887,322,903,000 | 38.802817 | 103 | 0.563687 | false |
manewton/BioReactor-Data-Logging | Project/bioreactor_unittests.py | 1 | 3958 | import unittest
import os
import pandas as pd
import numpy as np
import sys
import types
from downloader import download_latest
from googledriveutils import remove_file, find_reactorfolder
from googledriveutils import find_reactorfileid, get_newdata, get_file_list
dates = pd.date_range('20130101', periods=6)
data = pd.DataFrame(np.random.randn(6,4), index=dates, columns=list('ABCD'))
class TestBioreactor(unittest.TestCase):
def test_download_latest(self):
'''
Test the download_latest function. It should return true
'''
download_result = download_latest()
assert download_result is None
def test_remove_file(self):
'''
Test the remove_file function when the file doesn't exist.
'''
#os.remove('nonexistent_file')
remove_result = remove_file('nonexistent_file')
self.assertFalse(remove_result)
def test_find_reactorfolder(self):
'''
Test the find_rectorfolder function. That function return reactor id,
if the result is successful. The id of reactor #1 is
'0B4idCyQOSLaBVi1rTFZhTkUzSk0'. Testing successful result.
'''
reactorfolder_result = find_reactorfolder(1)
self.assertEqual(reactorfolder_result, '0B4idCyQOSLaBVi1rTFZhTkUzSk0')
def test_find_reactorfolder_none(self):
'''
Test the find_rectorfolder function. That function return reactor id,
if the result is successful, and returns None if the results is
unsuccessful. Testing unsuccessful result.
'''
reactorfolder_result = find_reactorfolder(999)
self.assertIsNone(reactorfolder_result)
def test_find_reactorfileid(self):
'''
Test the find_rectorfile function. That function return the id of
a specific file within a specified reactor's directory
if the result is successful, and returns False if unsuccessful.
Testing successful result using R1data file which exist within
the directory of the reactor #1.
'''
reactorfileid_result = find_reactorfileid(1, 'R1data')['title']
self.assertEqual(reactorfileid_result, 'R1data')
def test_find_reactorfileid_none(self):
'''
Test the find_rectorfile function. That function return the id of
a specific file within a specified reactor's directory
if the result is successful, and returns False if unsuccessful.
Testing unsuccessful result using R999data file which doesn't exist
within the directory of the reactor #1.
'''
reactorfileid_result = find_reactorfileid(1, 'R999data')
self.assertFalse(reactorfileid_result)
def test_get_newdata(self):
'''
Test the get_newdata function. The funtion returns a Pandas dataframe
with bioreactor data. Test that the first column name is 'Media Pump',
which is a real column in the Reactor #1 data frame.
'''
get_newdata_result = get_newdata(1)
self.assertEqual(get_newdata_result.columns[0], 'Media Pump')
def test_get_newdata_none(self):
'''
Test the get_newdata function. The funtion returns a Pandas dataframe
with bioreactor data. Test that the function returns an empty data
frame for which we need to call the function using nonexistent
reactor number (999 for example). The length of the empty data frame
is 1.
'''
get_newdata_result = get_newdata(999)
self.assertEqual(len(get_newdata_result), 1)
def test_get_file_list(self):
'''
Test the get_file_list function. The funtion returns a list with a
content of GDrive. We check if its type is 'list'.
'''
get_file_list_result = get_file_list('0B4idCyQOSLaBVi1rTFZhTkUzSk0')
self.assertTrue(isinstance(get_file_list_result, list))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 | 5,540,993,594,522,019,000 | 34.981818 | 78 | 0.669277 | false |
armanpazouki/chrono | src/demos/python/chrono-tensorflow/PPO/train.py | 1 | 12022 | """
PPO: Proximal Policy Optimization
serial version
"""
import sys
sys.path.append('../envs')
import chtrain as gym
import numpy as np
from policy import Policy
from value_function import NNValueFunction
import scipy.signal
from utils import Logger, Scaler
from datetime import datetime
import argparse
import signal
class GracefulKiller:
""" Gracefully exit program on CTRL-C """
def __init__(self):
self.kill_now = False
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, signum, frame):
self.kill_now = True
def init_gym(env_name, render):
"""
Initialize gym environment, return dimension of observation
and action spaces.
Args:
render: True to toggle on visualization
Returns: 3-tuple
environment (object)
number of observation dimensions (int)
number of action dimensions (int)
"""
env = gym.Init(env_name, render)
obs_dim = env.observation_space.shape[0]
act_dim = env.action_space.shape[0]
return env, obs_dim, act_dim
def run_episode(env, policy, scaler, animate=True):
""" Run single episode
Args:
env: environment (object)
policy: policy object with sample() method
scaler: scaler object, scales/offsets each observation
Returns: 4-tuple of NumPy arrays
observes: shape = (episode len, obs_dim)
actions: shape = (episode len, act_dim)
rewards: shape = (episode len,)
unscaled_obs: dataset for training scaler, shape = (episode len, obs_dim)
"""
obs = env.reset() #resets whenever an episode begins
observes, actions, rewards, unscaled_obs = [], [], [], []
done = False
step = 0.0
scale, offset = scaler.get()
scale[-1] = 1.0 # don't scale time step feature
offset[-1] = 0.0 # don't offset time step feature
while not done:
obs = obs.astype(np.float64).reshape((1, -1))
obs = np.append(obs, [[step]], axis=1) # add time step feature TODO: check if this extra state is useful
unscaled_obs.append(obs)
obs = (obs - offset) * scale # center and scale observations TODO: check ifscaler is useful (it should be according to literature)
observes.append(obs)
action = policy.sample(obs).reshape((1, -1)).astype(np.float64)
actions.append(action)
obs, reward, done, _ = env.step(action) #state, reward, done, info = env.step(action)
if not isinstance(reward, float):
reward = np.asscalar(reward)
rewards.append(reward)
step += 1e-3 # increment time step feature
return (np.concatenate(observes), np.concatenate(actions),
np.array(rewards, dtype=np.float64), np.concatenate(unscaled_obs))
def run_policy(env, policy, scaler, logger, episodes):
""" Run policy and collect data
Args:
env: environment (object)
policy: policy object with sample() method
scaler: scaler object, scales/offsets each observation
logger: logger object, used to save stats from episodes
episodes: total episodes to run
Returns: list of trajectory dictionaries, list length = number of episodes
'observes' : NumPy array of states from episode
'actions' : NumPy array of actions from episode
'rewards' : NumPy array of (un-discounted) rewards from episode
'unscaled_obs' : NumPy array of (un-scaled) states from episode
"""
total_steps = 0
trajectories = []
for e in range(episodes):
observes, actions, rewards, unscaled_obs = run_episode(env, policy, scaler)
total_steps += observes.shape[0]
trajectory = {'observes': observes,
'actions': actions,
'rewards': rewards,
'unscaled_obs': unscaled_obs}
trajectories.append(trajectory)
unscaled = np.concatenate([t['unscaled_obs'] for t in trajectories])
scaler.update(unscaled) # update running statistics for scaling observations
logger.log({'_MeanReward': np.mean([t['rewards'].sum() for t in trajectories]),
'Steps': total_steps})
return trajectories
def discount(x, gamma):
""" Calculate discounted forward sum of a sequence at each point """
return scipy.signal.lfilter([1.0], [1.0, -gamma], x[::-1])[::-1]
def add_disc_sum_rew(trajectories, gamma):
""" Adds discounted sum of rewards to all time steps of all trajectories
Args:
trajectories: as returned by run_policy()
gamma: discount
Returns:
None (mutates trajectories dictionary to add 'disc_sum_rew')
"""
for trajectory in trajectories:
if gamma < 0.999: # don't scale for gamma ~= 1
rewards = trajectory['rewards'] * (1 - gamma)
else:
rewards = trajectory['rewards']
disc_sum_rew = discount(rewards, gamma)
trajectory['disc_sum_rew'] = disc_sum_rew
def add_value(trajectories, val_func):
""" Adds estimated value to all time steps of all trajectories
Args:
trajectories: as returned by run_policy()
val_func: object with predict() method, takes observations
and returns predicted state value
Returns:
None (mutates trajectories dictionary to add 'values')
"""
for trajectory in trajectories:
observes = trajectory['observes']
values = val_func.predict(observes)
trajectory['values'] = values
def add_gae(trajectories, gamma, lam):
""" Add generalized advantage estimator.
https://arxiv.org/pdf/1506.02438.pdf
Args:
trajectories: as returned by run_policy(), must include 'values'
key from add_value().
gamma: reward discount
lam: lambda (see paper).
lam=0 : use TD residuals
lam=1 : A = Sum Discounted Rewards - V_hat(s)
Returns:
None (mutates trajectories dictionary to add 'advantages')
"""
for trajectory in trajectories:
if gamma < 0.999: # don't scale for gamma ~= 1
rewards = trajectory['rewards'] * (1 - gamma)
else:
rewards = trajectory['rewards']
values = trajectory['values']
# temporal differences
# values[1:] deletes the first element (Vo) and attachs a 0 at the end (the future state value function at the end of the trajectory is 0)
# r - Vs + gamma*Vst+1
tds = rewards - values + np.append(values[1:] * gamma, 0)
advantages = discount(tds, gamma * lam)
trajectory['advantages'] = advantages
def build_train_set(trajectories):
"""
Args:
trajectories: trajectories after processing by add_disc_sum_rew(),
add_value(), and add_gae()
Returns: 4-tuple of NumPy arrays
observes: shape = (N, obs_dim)
actions: shape = (N, act_dim)
advantages: shape = (N,)
disc_sum_rew: shape = (N,)
"""
observes = np.concatenate([t['observes'] for t in trajectories])
actions = np.concatenate([t['actions'] for t in trajectories])
disc_sum_rew = np.concatenate([t['disc_sum_rew'] for t in trajectories])
advantages = np.concatenate([t['advantages'] for t in trajectories])
# normalize advantages
advantages = (advantages - advantages.mean()) / (advantages.std() + 1e-6)
return observes, actions, advantages, disc_sum_rew
def log_batch_stats(observes, actions, advantages, disc_sum_rew, logger, episode):
""" Log various batch statistics """
logger.log({'_mean_obs': np.mean(observes),
'_min_obs': np.min(observes),
'_max_obs': np.max(observes),
'_std_obs': np.mean(np.var(observes, axis=0)),
'_mean_act': np.mean(actions),
'_min_act': np.min(actions),
'_max_act': np.max(actions),
'_std_act': np.mean(np.var(actions, axis=0)),
'_mean_adv': np.mean(advantages),
'_min_adv': np.min(advantages),
'_max_adv': np.max(advantages),
'_std_adv': np.var(advantages),
'_mean_discrew': np.mean(disc_sum_rew),
'_min_discrew': np.min(disc_sum_rew),
'_max_discrew': np.max(disc_sum_rew),
'_std_discrew': np.var(disc_sum_rew),
'_Episode': episode
})
def main(env_name, num_episodes, render, gamma, lam, kl_targ, batch_size):
""" Main training loop
Args:
env_name: OpenAI Gym environment name, e.g. 'Hopper-v1'
num_episodes: maximum number of episodes to run
gamma: reward discount factor (float)
lam: lambda from Generalized Advantage Estimate
kl_targ: D_KL target for policy update [D_KL(pi_old || pi_new)
batch_size: number of episodes per policy training batch
"""
killer = GracefulKiller()
env, obs_dim, act_dim = init_gym(env_name, render)
obs_dim += 1 # add 1 to obs dimension for time step feature (see run_episode())
now = datetime.utcnow().strftime("%b-%d_%H-%M-%S") # create unique directories
logger = Logger(logname=env_name, now=now)
scaler = Scaler(obs_dim, env_name)
val_func = NNValueFunction(obs_dim, env_name)
policy = Policy(obs_dim, act_dim, kl_targ, env_name)
# run a few episodes of untrained policy to initialize scaler:
run_policy(env, policy, scaler, logger, episodes=5)
episode = 0
#capture = False
while episode < num_episodes:
trajectories = run_policy(env, policy, scaler, logger, episodes=batch_size)
episode += len(trajectories)
"""if episode > 600 and not capture:
env.ScreenCapture(5)
capture = True"""
add_value(trajectories, val_func) # add estimated values to episodes
add_disc_sum_rew(trajectories, gamma) # calculated discounted sum of Rs
add_gae(trajectories, gamma, lam) # calculate advantage
# concatenate all episodes into single NumPy arrays
observes, actions, advantages, disc_sum_rew = build_train_set(trajectories)
# add various stats to training log:
log_batch_stats(observes, actions, advantages, disc_sum_rew, logger, episode)
policy.update(observes, actions, advantages, logger) # update policy
val_func.fit(observes, disc_sum_rew, logger) # update value function
logger.write(display=True) # write logger results to file and stdout
scaler.save()
if killer.kill_now:
if input('Terminate training (y/[n])? ') == 'y':
break
killer.kill_now = False
logger.close()
policy.close_sess()
val_func.close_sess()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=('Train policy on OpenAI Gym environment '
'using Proximal Policy Optimizer'))
parser.add_argument('env_name', type=str, help='OpenAI Gym environment name')
parser.add_argument('-n', '--num_episodes', type=int, help='Number of episodes to run',
default=1000)
parser.add_argument('--renderON',action='store_true', default=False, dest='render', help='Toggle ON video')
parser.add_argument('--renderOFF',action='store_false', default=False, dest='render', help='Toggle OFF video')
parser.add_argument('-g', '--gamma', type=float, help='Discount factor', default=0.995)
parser.add_argument('-l', '--lam', type=float, help='Lambda for Generalized Advantage Estimation',
default=0.98)
parser.add_argument('-k', '--kl_targ', type=float, help='D_KL target value',
default=0.003)
parser.add_argument('-b', '--batch_size', type=int,
help='Number of episodes per training batch',
default=20)
args = parser.parse_args()
main(**vars(args))
| bsd-3-clause | -2,523,526,546,991,726,000 | 37.780645 | 147 | 0.6182 | false |
RuudBurger/CouchPotatoServer | couchpotato/core/downloaders/deluge.py | 1 | 16194 | from base64 import b64encode, b16encode, b32decode
from datetime import timedelta
from hashlib import sha1
import os.path
import re
import traceback
from bencode import bencode as benc, bdecode
from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList
from couchpotato.core.helpers.encoding import isInt, sp
from couchpotato.core.helpers.variable import tryFloat, cleanHost
from couchpotato.core.logger import CPLog
from deluge_client.client import DelugeRPCClient
log = CPLog(__name__)
autoload = 'Deluge'
class Deluge(DownloaderBase):
protocol = ['torrent', 'torrent_magnet']
log = CPLog(__name__)
drpc = None
def connect(self, reconnect = False):
""" Connect to the delugeRPC, re-use connection when already available
:param reconnect: force reconnect
:return: DelugeRPC instance
"""
# Load host from config and split out port.
host = cleanHost(self.conf('host'), protocol = False).split(':')
# Force host assignment
if len(host) == 1:
host.append(80)
if not isInt(host[1]):
log.error('Config properties are not filled in correctly, port is missing.')
return False
if not self.drpc or reconnect:
self.drpc = DelugeRPC(host[0], port = host[1], username = self.conf('username'), password = self.conf('password'))
return self.drpc
def download(self, data = None, media = None, filedata = None):
""" Send a torrent/nzb file to the downloader
:param data: dict returned from provider
Contains the release information
:param media: media dict with information
Used for creating the filename when possible
:param filedata: downloaded torrent/nzb filedata
The file gets downloaded in the searcher and send to this function
This is done to have failed checking before using the downloader, so the downloader
doesn't need to worry about that
:return: boolean
One faile returns false, but the downloaded should log his own errors
"""
if not media: media = {}
if not data: data = {}
log.info('Sending "%s" (%s) to Deluge.', (data.get('name'), data.get('protocol')))
if not self.connect():
return False
if not filedata and data.get('protocol') == 'torrent':
log.error('Failed sending torrent, no data')
return False
# Set parameters for Deluge
options = {
'add_paused': self.conf('paused', default = 0),
'label': self.conf('label')
}
if self.conf('directory'):
#if os.path.isdir(self.conf('directory')):
options['download_location'] = self.conf('directory')
#else:
# log.error('Download directory from Deluge settings: %s doesn\'t exist', self.conf('directory'))
if self.conf('completed_directory'):
#if os.path.isdir(self.conf('completed_directory')):
options['move_completed'] = 1
options['move_completed_path'] = self.conf('completed_directory')
#else:
# log.error('Download directory from Deluge settings: %s doesn\'t exist', self.conf('directory'))
if data.get('seed_ratio'):
options['stop_at_ratio'] = 1
options['stop_ratio'] = tryFloat(data.get('seed_ratio'))
# Deluge only has seed time as a global option. Might be added in
# in a future API release.
# if data.get('seed_time'):
# Send request to Deluge
if data.get('protocol') == 'torrent_magnet':
remote_torrent = self.drpc.add_torrent_magnet(data.get('url'), options)
else:
filename = self.createFileName(data, filedata, media)
remote_torrent = self.drpc.add_torrent_file(filename, filedata, options)
if not remote_torrent:
log.error('Failed sending torrent to Deluge')
return False
log.info('Torrent sent to Deluge successfully.')
return self.downloadReturnId(remote_torrent)
def test(self):
""" Check if connection works
:return: bool
"""
if self.connect(True) and self.drpc.test():
return True
return False
def getAllDownloadStatus(self, ids):
""" Get status of all active downloads
:param ids: list of (mixed) downloader ids
Used to match the releases for this downloader as there could be
other downloaders active that it should ignore
:return: list of releases
"""
log.debug('Checking Deluge download status.')
if not self.connect():
return []
release_downloads = ReleaseDownloadList(self)
queue = self.drpc.get_alltorrents(ids)
if not queue:
log.debug('Nothing in queue or error')
return []
for torrent_id in queue:
torrent = queue[torrent_id]
if not 'hash' in torrent:
# When given a list of ids, deluge will return an empty item for a non-existant torrent.
continue
log.debug('name=%s / id=%s / save_path=%s / move_on_completed=%s / move_completed_path=%s / hash=%s / progress=%s / state=%s / eta=%s / ratio=%s / stop_ratio=%s / is_seed=%s / is_finished=%s / paused=%s', (torrent['name'], torrent['hash'], torrent['save_path'], torrent['move_on_completed'], torrent['move_completed_path'], torrent['hash'], torrent['progress'], torrent['state'], torrent['eta'], torrent['ratio'], torrent['stop_ratio'], torrent['is_seed'], torrent['is_finished'], torrent['paused']))
# Deluge has no easy way to work out if a torrent is stalled or failing.
#status = 'failed'
status = 'busy'
# If an user opts to seed a torrent forever (usually associated to private trackers usage), stop_ratio will be 0 or -1 (depending on Deluge version).
# In this scenario the status of the torrent would never change from BUSY to SEEDING.
# The last check takes care of this case.
if torrent['is_seed'] and ((tryFloat(torrent['ratio']) < tryFloat(torrent['stop_ratio'])) or (tryFloat(torrent['stop_ratio']) < 0)):
# We have torrent['seeding_time'] to work out what the seeding time is, but we do not
# have access to the downloader seed_time, as with deluge we have no way to pass it
# when the torrent is added. So Deluge will only look at the ratio.
# See above comment in download().
status = 'seeding'
elif torrent['is_seed'] and torrent['is_finished'] and torrent['paused'] and torrent['state'] == 'Paused':
status = 'completed'
download_dir = sp(torrent['save_path'])
if torrent['move_on_completed']:
download_dir = torrent['move_completed_path']
torrent_files = []
for file_item in torrent['files']:
torrent_files.append(sp(os.path.join(download_dir, file_item['path'])))
release_downloads.append({
'id': torrent['hash'],
'name': torrent['name'],
'status': status,
'original_status': torrent['state'],
'seed_ratio': torrent['ratio'],
'timeleft': str(timedelta(seconds = torrent['eta'])),
'folder': sp(download_dir if len(torrent_files) == 1 else os.path.join(download_dir, torrent['name'])),
'files': torrent_files,
})
return release_downloads
def pause(self, release_download, pause = True):
if pause:
return self.drpc.pause_torrent([release_download['id']])
else:
return self.drpc.resume_torrent([release_download['id']])
def removeFailed(self, release_download):
log.info('%s failed downloading, deleting...', release_download['name'])
return self.drpc.remove_torrent(release_download['id'], True)
def processComplete(self, release_download, delete_files = False):
log.debug('Requesting Deluge to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else ''))
return self.drpc.remove_torrent(release_download['id'], remove_local_data = delete_files)
class DelugeRPC(object):
host = 'localhost'
port = 58846
username = None
password = None
client = None
def __init__(self, host = 'localhost', port = 58846, username = None, password = None):
super(DelugeRPC, self).__init__()
self.host = host
self.port = port
self.username = username
self.password = password
def connect(self):
#self.client = DelugeClient()
#self.client.connect(self.host, int(self.port), self.username, self.password)
self.client = DelugeRPCClient(self.host, int(self.port), self.username, self.password)
self.client.connect()
def test(self):
try:
self.connect()
except:
return False
return True
def add_torrent_magnet(self, torrent, options):
torrent_id = False
try:
self.connect()
torrent_id = self.client.core.add_torrent_magnet(torrent, options)
if not torrent_id:
torrent_id = self._check_torrent(True, torrent)
if torrent_id and options['label']:
self.client.label.set_torrent(torrent_id, options['label'])
except Exception as err:
log.error('Failed to add torrent magnet %s: %s %s', (torrent, err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
return torrent_id
def add_torrent_file(self, filename, torrent, options):
torrent_id = False
try:
self.connect()
torrent_id = self.client.core.add_torrent_file(filename, b64encode(torrent), options)
if not torrent_id:
torrent_id = self._check_torrent(False, torrent)
if torrent_id and options['label']:
self.client.label.set_torrent(torrent_id, options['label'])
except Exception as err:
log.error('Failed to add torrent file %s: %s %s', (filename, err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
return torrent_id
def get_alltorrents(self, ids):
ret = False
try:
self.connect()
ret = self.client.core.get_torrents_status({'id': ids}, ('name', 'hash', 'save_path', 'move_completed_path', 'progress', 'state', 'eta', 'ratio', 'stop_ratio', 'is_seed', 'is_finished', 'paused', 'move_on_completed', 'files'))
except Exception as err:
log.error('Failed to get all torrents: %s %s', (err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
return ret
def pause_torrent(self, torrent_ids):
try:
self.connect()
self.client.core.pause_torrent(torrent_ids)
except Exception as err:
log.error('Failed to pause torrent: %s %s', (err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
def resume_torrent(self, torrent_ids):
try:
self.connect()
self.client.core.resume_torrent(torrent_ids)
except Exception as err:
log.error('Failed to resume torrent: %s %s', (err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
def remove_torrent(self, torrent_id, remove_local_data):
ret = False
try:
self.connect()
ret = self.client.core.remove_torrent(torrent_id, remove_local_data)
except Exception as err:
log.error('Failed to remove torrent: %s %s', (err, traceback.format_exc()))
finally:
if self.client:
self.disconnect()
return ret
def disconnect(self):
self.client.disconnect()
def _check_torrent(self, magnet, torrent):
# Torrent not added, check if it already existed.
if magnet:
torrent_hash = re.findall('urn:btih:([\w]{32,40})', torrent)[0]
else:
info = bdecode(torrent)["info"]
torrent_hash = sha1(benc(info)).hexdigest()
# Convert base 32 to hex
if len(torrent_hash) == 32:
torrent_hash = b16encode(b32decode(torrent_hash))
torrent_hash = torrent_hash.lower()
torrent_check = self.client.core.get_torrent_status(torrent_hash, {})
if torrent_check['hash']:
return torrent_hash
return False
config = [{
'name': 'deluge',
'groups': [
{
'tab': 'downloaders',
'list': 'download_providers',
'name': 'deluge',
'label': 'Deluge',
'description': 'Use <a href="http://www.deluge-torrent.org/" target="_blank">Deluge</a> to download torrents.',
'wizard': True,
'options': [
{
'name': 'enabled',
'default': 0,
'type': 'enabler',
'radio_group': 'torrent',
},
{
'name': 'host',
'default': 'localhost:58846',
'description': 'Hostname with port. Usually <strong>localhost:58846</strong>',
},
{
'name': 'username',
},
{
'name': 'password',
'type': 'password',
},
{
'name': 'directory',
'type': 'directory',
'description': 'Download to this directory. Keep empty for default Deluge download directory.',
},
{
'name': 'completed_directory',
'type': 'directory',
'description': 'Move completed torrent to this directory. Keep empty for default Deluge options.',
'advanced': True,
},
{
'name': 'label',
'description': 'Label to add to torrents in the Deluge UI.',
},
{
'name': 'remove_complete',
'label': 'Remove torrent',
'type': 'bool',
'default': True,
'advanced': True,
'description': 'Remove the torrent from Deluge after it has finished seeding.',
},
{
'name': 'delete_files',
'label': 'Remove files',
'default': True,
'type': 'bool',
'advanced': True,
'description': 'Also remove the leftover files.',
},
{
'name': 'paused',
'type': 'bool',
'advanced': True,
'default': False,
'description': 'Add the torrent paused.',
},
{
'name': 'manual',
'default': 0,
'type': 'bool',
'advanced': True,
'description': 'Disable this downloader for automated searches, but use it when I manually send a release.',
},
{
'name': 'delete_failed',
'default': True,
'advanced': True,
'type': 'bool',
'description': 'Delete a release after the download has failed.',
},
],
}
],
}]
| gpl-3.0 | 8,216,095,494,107,268,000 | 37.374408 | 512 | 0.545943 | false |
jeffdonahue/voc-classification | src/python_layers.py | 1 | 3416 | from __future__ import division
from __future__ import print_function
from caffe_all import *
import numpy as np
import unittest
# Helper class that allows python layers to be added to NetSpec easily. Just use
# Py.YourLayer(bottom1, bottom2, ..., parameter1=.., ...)
# parameter1 will automatically be passed to YourLayer defined below
class PY:
def _parse_kwargs(self, layer, kwargs):
l = getattr(self.py_module, layer)
if not 'param_str' in kwargs:
py_args = {}
for a in list(kwargs.keys()):
if hasattr(l, a):
py_args[a] = kwargs.pop(a)
kwargs['param_str'] = str(py_args)
if hasattr(l, 'N_TOP'):
kwargs['ntop'] = l.N_TOP
return kwargs
def __init__(self, module):
import importlib
self.module = module
self.py_module = importlib.import_module(module)
def __getattr__(self, name):
return lambda *args, **kwargs: caffe.layers.Python(*args, module=self.module, layer=name, **self._parse_kwargs(name, kwargs))
Py = PY('python_layers')
class PyLayer(caffe.Layer):
def setup(self, bottom, top):
if self.param_str:
params = eval(self.param_str)
if isinstance(params, dict):
for p,v in params.items():
setattr(self, p, v)
class SigmoidCrossEntropyLoss(PyLayer):
ignore_label = None
def reshape(self, bottom, top):
assert len(bottom) == 2
assert len(top) == 1
top[0].reshape()
def forward(self, bottom, top):
N = bottom[0].shape[0]
f, df, t = bottom[0].data, bottom[0].diff, bottom[1].data
mask = (self.ignore_label is None or t != self.ignore_label)
lZ = np.log(1+np.exp(-np.abs(f))) * mask
dlZ = np.exp(np.minimum(f,0))/(np.exp(np.minimum(f,0))+np.exp(-np.maximum(f,0))) * mask
top[0].data[...] = np.sum(lZ + ((f>0)-t)*f * mask) / N
df[...] = (dlZ - t*mask) / N
def backward(self, top, prop, bottom):
bottom[0].diff[...] *= top[0].diff
class SigmoidCrossEntropyLossTest(unittest.TestCase):
def _setupTestNet(self, n, m):
from caffe_all import L
ns = caffe.NetSpec()
ns.f, ns.gt = L.DummyData(dummy_data_param = dict(shape=[dict(dim=[n,m])]*2, data_filler=[dict(type='gaussian'), dict(type='uniform')]), ntop=2)
ns.caffe_s = L.SigmoidCrossEntropyLoss(ns.f, ns.gt, loss_weight=1)
ns.python_s = Py.SigmoidCrossEntropyLoss(ns.f, ns.gt, loss_weight=1)
net = caffe.get_net_from_string('force_backward:true\n'+str(ns.to_proto()), caffe.TEST)
return net
def test_forward(self):
# Create a test net
for n in range(1,10):
for m in range(1,10):
with self.subTest(n=n,m=m):
net = self._setupTestNet(n,m)
r = net.forward()
self.assertAlmostEqual(r['caffe_s'], r['python_s'], 3)
def test_backward(self):
# Create a test net
for n in range(1,10):
for m in range(1,10):
with self.subTest(n=n,m=m):
net = self._setupTestNet(n,m)
net.forward()
net.blobs['f'].diff[...], net.blobs['caffe_s'].diff[...], net.blobs['python_s'].diff[...] = 0, 1, 0
r1 = net.backward(['f'])['f']
net.forward()
net.blobs['f'].diff[...], net.blobs['caffe_s'].diff[...], net.blobs['python_s'].diff[...] = 0, 0, 1
r2 = net.backward(['f'])['f']
np.testing.assert_array_almost_equal( r1, r2, 3 )
self.assertGreater( np.mean(np.abs(r1)), 0 )
class Print(PyLayer):
def reshape(self, bottom, top):
pass
def forward(self, bottom, top):
print( bottom[0].data )
def backward(self, top, prop, bottom):
pass
if __name__ == '__main__':
unittest.main()
| bsd-2-clause | -6,313,557,467,080,100,000 | 30.054545 | 146 | 0.636417 | false |
smira/fmspy | fmspy/application/room.py | 1 | 2208 | # FMSPy - Copyright (c) 2009 Andrey Smirnov.
#
# See COPYRIGHT for details.
"""
Application rooms.
"""
class Room(object):
"""
Room (scope, context) is location inside application where clients meet.
Room holds server objects: streams, shared objects, etc. It can be
used to iterate over clients in room.
@ivar clients: set of clients inside room
@type clients: C{set}
@ivar name: room name
@type name: C{str}
@ivar application: application owning this room
@type application: L{Application}
"""
def __init__(self, application, name='_'):
"""
Construct new room.
@param application: application owning this room
@type application: L{Application}
@param name: room name
@type name: C{str}
"""
self.name = name
self.application = application
self.clients = set()
def dismiss(self):
"""
Close room.
"""
self.clients = set()
self.application = None
def __eq__(self, other):
if not isinstance(other, Room):
return NotImplemented
return self.application == other.application and self.name == other.name
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "<Room %r @ %r (%d)>" % (self.name, self.application, len(self.clients))
def enter(self, client):
"""
Client enters room.
@param client: room client
@type client: L{RTMPServerProtocol}
"""
assert client not in self.clients
self.clients.add(client)
def leave(self, client):
"""
Client leaves room.
@param client: room client
@type client: L{RTMPServerProtocol}
"""
assert client in self.clients
self.clients.remove(client)
if not self.clients:
self.application.room_empty(self)
def __iter__(self):
"""
Iterate over clients.
"""
return self.clients.__iter__()
def empty(self):
"""
Is this room empty?
@rtype: C{bool}
"""
return False if self.clients else True
| mit | -1,946,959,390,960,262,100 | 22.242105 | 87 | 0.567482 | false |
ArchibaldArt/avanan_challenge | config.py | 1 | 1146 | '''
SOApp config file
'''
import os
import sys
class Config(object):
DEBUG = False
TESTING = False
DEVELOPMENT = False
WTF_CSRF_ENABLED = True
# Random value needed for OAuth2 process (unsecure value)
OAUTH2_STATE_RANDOM = '+HqEsxiec6/inR3EPwM29DCF9IUBNSmQ9giWY2yC1u0='
class DevelopmentConfig(Config):
DEBUG = True
DEVELOPMENT = True
@classmethod
def init_app(cls, app):
try:
from configs import dev_config
except ImportError:
app.logger.error('You have to create ./config/dev_config.py. See readme.txt')
sys.exit(1)
try:
cls.SE_CLIENT_ID = dev_config.SE_CLIENT_ID
cls.SE_CLIENT_SECRET = dev_config.SE_CLIENT_SECRET
cls.SE_KEY = dev_config.SE_KEY
cls.SECRET_KEY = dev_config.SECRET_KEY
except AttributeError as e:
app.logger.error('You have to define variable in dev_config.py: %s', str(e))
sys.exit(1)
class DefaultConfig(DevelopmentConfig):
pass
CONFIG_NAME_MAPPER = {
'development': DevelopmentConfig,
'default': DefaultConfig,
}
| mit | 8,156,346,666,529,675,000 | 25.045455 | 89 | 0.624782 | false |
chadgates/locmaster | unlocode/csvimport.py | 1 | 10411 | from unlocode.models import Country, SubDivision, Locode, LocCountry, LocFunction, LocStatus, LocSubdivision, LocVersion
from unlocode.models import LocChangeIndicator
import os
import csv
import logging
from django.db import IntegrityError, transaction
def saveatomic(object, logger):
result = False
try:
with transaction.atomic():
object.save()
except IntegrityError as ex:
if logger:
logger.exception(ex)
return result
def cleanoutVersion(version):
logger = logging.getLogger(__name__)
msg = str(Locode.objects.filter(version=version).delete()[0]) + " LocCodes deleted"
msg += "\n"
msg += str(LocCountry.objects.filter(version=version).delete()[0]) + " LocCountries deleted"
msg += "\n"
msg += str(LocFunction.objects.filter(version=version).delete()[0]) + " LocCodes deleted"
msg += "\n"
msg += str(LocStatus.objects.filter(version=version).delete()[0]) + " LocStatus deleted"
msg += "\n"
msg += str(LocSubdivision.objects.filter(version=version).delete()[0]) + " LocSubdivisions deleted"
logger.info(msg)
return msg
def importUNLOCODE(version):
logger = logging.getLogger(__name__)
path = os.getcwd() + "/unlocode/data/versions/" + version + "/"
logger.info("Start import for " + path)
if not (False in dict(check_version_dir(version)).values()):
objversion = LocVersion.objects.get(version=version)
msg = cleanoutVersion(version)
msg += "\n"
msg += importFunctionClassifiers(objversion, version, path)
msg += "\n"
msg += importStatusIndicators(objversion, version, path)
msg += "\n"
msg += importCountryCodes(objversion, version, path)
msg += "\n"
msg += importLocSubdivision(objversion, version, path)
msg += "\n"
msg += importCodeList(objversion, version, path)
else:
msg = "Nothing imported, files incomplete. "
logger.info(msg)
return msg
def importCountryCodes(objversion, version, path):
logger = logging.getLogger(__name__)
csv_filepathname = path + "CountryCodes.txt"
dataReader = csv.reader(open(csv_filepathname, encoding='utf-8'), delimiter=',', quotechar='"')
savecounter = 0
skipcounter = 0
rowcounter = 0
for row in dataReader:
locountry = LocCountry()
locountry.alpha2code = row[0]
locountry.name = row[1]
locountry.version = objversion
#locountry.save()
if saveatomic(locountry, logger):
savecounter += 1
else:
skipcounter += 1
rowcounter += 1
msg = str(rowcounter) + " Country codes (" + version + ") processed: " + str(savecounter) + \
" created / " + str(skipcounter) + " skipped."
logger.info(msg)
return msg
def importFunctionClassifiers(objversion, version, path):
logger = logging.getLogger(__name__)
csv_filepathname = path + "FunctionClassifiers.txt"
dataReader = csv.reader(open(csv_filepathname, encoding='utf-8'), delimiter=',', quotechar='"')
rowcounter = 0
skipcounter = 0
savecounter = 0
for row in dataReader:
locfunction = LocFunction()
locfunction.functioncode = row[0]
locfunction.description = row[1]
locfunction.version = objversion
try:
with transaction.atomic():
locfunction.save()
savecounter += 1
except IntegrityError as ex:
logger.exception(ex)
logger.exception += 1
rowcounter += 1
msg = str(rowcounter) + " Function classifiers (" + version + ") processed: " + str(savecounter) + \
" created / " + str(skipcounter) + " skipped."
logger.info(msg)
return msg
def importStatusIndicators(objversion, version, path):
logger = logging.getLogger(__name__)
csv_filepathname = path + "StatusIndicators.txt"
dataReader = csv.reader(open(csv_filepathname, encoding='utf-8'), delimiter=',', quotechar='"')
rowcounter = 0
skipcounter = 0
savecounter = 0
for row in dataReader:
locstatus = LocStatus()
locstatus.statuscode = row[0]
locstatus.description = row[1]
locstatus.version = objversion
try:
with transaction.atomic():
locstatus.save()
savecounter += 1
except IntegrityError as ex:
logger.exception(ex)
skipcounter += 1
rowcounter += 1
msg = str(rowcounter) + " Status Indicators (" + version + ") processed: " + str(savecounter) + \
" created / " + str(skipcounter) + " skipped."
logger.info(msg)
return msg
def importLocSubdivision(objversion, version, path):
logger = logging.getLogger(__name__)
csv_filepathname = path + "SubdivisionCodes.txt"
dataReader = csv.reader(open(csv_filepathname, encoding='utf-8'), delimiter=',', quotechar='"')
rowcounter = 0
skipcounter = 0
savecounter = 0
for row in dataReader:
locsubdivision = LocSubdivision()
locsubdivision.alpha2code = LocCountry.objects.filter(alpha2code=row[0], version=version).first()
locsubdivision.shortcode = row[1]
locsubdivision.name = row[2]
locsubdivision.version = objversion
try:
with transaction.atomic():
locsubdivision.save()
savecounter += 1
except IntegrityError as ex:
logger.exception(ex)
skipcounter += 1
rowcounter += 1
msg = str(rowcounter) + " Subdivisions (" + version + ") processed: " + str(savecounter) + \
" created / " + str(skipcounter) + " skipped."
logger.info(msg)
return msg
def importCodeList(objversion, version, path):
logger = logging.getLogger(__name__)
csv_filepathname = path + "CodeList.txt"
dataReader = csv.reader(open(csv_filepathname, encoding='utf-8'), delimiter=',', quotechar='"')
savecounter = 0
skipcounter = 0
rowcounter = 0
for row in dataReader:
if row[2] != '':
locode = Locode()
locode.locchangeindicator = LocChangeIndicator.objects.filter(changecode=row[0]).first()
locode.locodecountry = LocCountry.objects.filter(alpha2code=row[1], version=objversion).first()
locode.locodeplace = row[2]
locode.locname = row[3]
locode.locnamewodia = row[4]
locode.locsubdivision = LocSubdivision.objects.filter(shortcode=row[5], version=objversion,
alpha2code=locode.locodecountry_id).first()
locode.locfunction = row[7]
locode.locstatus = LocStatus.objects.filter(statuscode=row[6], version=objversion).first()
locode.locdate = row[8]
locode.lociata = row[9]
locode.locoordinates = row[10]
locode.locremarks = row[11]
# locode.locode = row[1]+row[2]
locode.version = objversion
try:
with transaction.atomic():
locode.save()
savecounter += 1
except IntegrityError as ex:
logger.exception(ex)
skipcounter += 1
else:
skipcounter += 1
rowcounter += 1
msg = str(rowcounter) + " UN/LOCODES (" + version + ") processed: " + str(savecounter) + \
" created / " + str(skipcounter) + " skipped."
logger.info(msg)
return msg
def importsubdivisons():
logger = logging.getLogger(__name__)
csv_filepathname = os.getcwd() + "/unlocode/data/subdivisions.txt"
dataReader = csv.reader(open(csv_filepathname, encoding='utf-8'), dialect='excel-tab')
# dataReader = csv.reader(open(csv_filepathname), delimter=',', quotechar='"')
savecounter = 0
skipcounter = 0
rowcounter = 0
for row in dataReader:
if not rowcounter == 0:
subdivision = SubDivision()
subdivision.level1 = row[0]
subdivision.level2 = row[1]
subdivision.name = row[2]
subdivision.alpha2code = (subdivision.level1 + subdivision.level2).split("-", 1)[0]
subdivision.shortcode = (subdivision.level1 + subdivision.level2).split("-", 1)[1]
try:
with transaction.atomic():
subdivision.save()
savecounter += 1
except IntegrityError as ex:
logger.exception(ex)
skipcounter += 1
rowcounter += 1
msg = str(rowcounter) + " Subdivisions processed: " + str(savecounter) + \
" created / " + str(skipcounter) + " skipped."
logger.info(msg)
return msg
def importcountries():
csv_filepathname = os.getcwd() + "/unlocode/data/Country_List_ISO_3166_Codes_Latitude_Longitude.csv"
dataReader = csv.reader(open(csv_filepathname, encoding='utf-8'), delimiter=',', quotechar='"')
rowcounter = 0
for row in dataReader:
if not rowcounter == 0:
country = Country()
country.name = row[0]
country.alpha2code = row[1]
country.alpha3code = row[2]
country.numericcode = row[3]
country.latitudeavg = row[4]
country.longitudeavg = row[5]
country.save()
rowcounter += 1
return str(rowcounter) + " countries imported"
def check_for_complete_set(filelist):
loc_set = {'CodeList.txt': False,
'CountryCodes.txt': False,
'FunctionClassifiers.txt': False,
'StatusIndicators.txt': False,
'SubdivisionCodes.txt': False}
for items in filelist:
if items in loc_set:
loc_set.update({items: True})
return list(loc_set.items())
def get_file_names(directory):
"""Returns list of file names within directory"""
contents = os.listdir(directory)
files = list()
for item in contents:
if os.path.isfile(os.path.join(directory, item)):
files.append(item)
return files
def check_version_dir(version):
dirpath = os.getcwd() + "/unlocode/data/versions/" + version
if os.path.exists(dirpath):
files = get_file_names(dirpath)
else:
files = ""
filestatus = check_for_complete_set(files)
return filestatus
| bsd-3-clause | 5,067,940,848,887,395,000 | 31.232198 | 120 | 0.600038 | false |
Birion/python-ffdl | pyffdl/core/app.py | 1 | 4385 | import shutil
from typing import List, Tuple, Optional
import attr
import click
from furl import furl # type: ignore
from pyffdl.__version__ import __version__
from pyffdl.sites import (
AdultFanFictionStory,
ArchiveOfOurOwnStory,
FanFictionNetStory,
HTMLStory,
TwistingTheHellmouthStory,
TGStorytimeStory,
)
from pyffdl.utilities import get_url_from_file, list2text
AVAILABLE_SITES = {
"fanfiction.net": FanFictionNetStory,
"fictionpress.com": FanFictionNetStory,
"adult-fanfiction.org": AdultFanFictionStory,
"archiveofourown.org": ArchiveOfOurOwnStory,
"tthfanfic.org": TwistingTheHellmouthStory,
"tgstorytime.com": TGStorytimeStory,
}
@attr.s()
class URL:
url: furl = attr.ib()
file: Optional[str] = attr.ib(default=None)
def download(urls: List[URL], verbose: bool = False, force: bool = False) -> None:
for url in urls:
if not url.url:
continue
try:
host = ".".join(url.url.host.split(".")[-2:])
site = AVAILABLE_SITES.get(host)
if not site:
click.echo(
f"{__file__} is currently only able to download from {list2text(list(AVAILABLE_SITES.keys()))}."
)
return
story = site.parse(url.url, verbose, force)
if url.file:
story.filename = url.file
story.run()
except AttributeError as e:
raise e
# print(e)
# error = "There were problems with parsing the URL."
# with open("pyffdl.log", "a") as fp:
# click.echo(error, file=fp)
# click.echo(error, err=True)
@click.group()
@click.version_option(version=__version__)
def cli() -> None:
pass
@cli.command( # noqa: unused-function
"download", help="Download a new fanfiction story."
)
@click.option(
"-f",
"--from",
"from_file",
type=click.File(),
help="Load a list of URLs from a plaintext file.",
)
@click.option("-v", "--verbose", is_flag=True)
@click.argument("url_list", nargs=-1)
def cli_download(
from_file: click.File, url_list: Tuple[str, ...], verbose: bool = False
) -> None:
urls = [URL(furl(x)) for x in url_list]
if from_file:
urls += [
URL(furl(x.strip("\n"))) for x in from_file.readlines() if not x.startswith("#")
]
download(urls, verbose)
@cli.command( # noqa: unused-function
"html", help="Download a single story, using a list of chapter URLs."
)
@click.option(
"-f",
"--from",
"from_file",
type=click.File(),
help="Load a list of URLs from a plaintext file.",
)
@click.option("-a", "--author", help="Name of the author", type=str, required=True)
@click.option("-t", "--title", help="Title of the story", type=str, required=True)
@click.option("-v", "--verbose", is_flag=True)
@click.argument("url_list", nargs=-1)
def cli_html(
from_file: click.File,
author: str,
title: str,
url_list: Tuple[str, ...],
verbose: bool = False,
):
urls = [URL(furl(x)) for x in url_list]
if from_file:
urls += [
URL(furl(x.strip("\n"))) for x in from_file.readlines() if not x.startswith("#")
]
if not urls:
click.echo("You must provide at least one URL to download.")
return
story = HTMLStory(
chapters=[x.url.tostr() for x in urls],
author=author,
title=title,
url=furl("http://httpbin.org/status/200"),
)
story.verbose = verbose
story.run()
@cli.command( # noqa: unused-function
"update", help="Update an existing .epub fanfiction file."
)
@click.option(
"-f",
"--force",
is_flag=True,
default=False,
help="Completely refresh the ebook file.",
)
@click.option(
"-b", "--backup", is_flag=True, default=False, help="Backup the original file."
)
@click.option("-v", "--verbose", is_flag=True)
@click.argument("filenames", type=click.Path(dir_okay=False, exists=True), nargs=-1)
def cli_update(
force: bool, backup: bool, filenames: List[click.Path], verbose: bool = False
) -> None:
if backup:
for filename in filenames:
shutil.copy(f"{filename}", f"{filename}.bck")
stories = [
URL(get_url_from_file(x), str(x) if not force else None) for x in filenames
]
download(stories, verbose, force)
| mit | 876,202,144,005,720,600 | 27.848684 | 116 | 0.601596 | false |
libvirt/libvirt-python | examples/domstart.py | 1 | 1239 | #!/usr/bin/env python3
"""
Check that the domain described by DOMAIN.XML is running.
If the domain is not running, create it.
"""
import libvirt
import libxml2
from argparse import ArgumentParser
from typing import Tuple
# Parse the XML description of domU from FNAME
# and return a tuple (name, xmldesc) where NAME
# is the name of the domain, and xmldesc is the contetn of FNAME
def read_domain(fname: str) -> Tuple[str, str]:
fp = open(fname, "r")
xmldesc = fp.read()
fp.close()
doc = libxml2.parseDoc(xmldesc)
name = doc.xpathNewContext().xpathEval("/domain/name")[0].content
return (name, xmldesc)
parser = ArgumentParser(description=__doc__)
parser.add_argument("file", metavar="DOMAIN.XML", help="XML configuration of the domain in libvirt's XML format")
args = parser.parse_args()
(name, xmldesc) = read_domain(args.file)
try:
conn = libvirt.open(None)
except libvirt.libvirtError:
print('Failed to open connection to the hypervisor')
exit(1)
try:
dom = conn.lookupByName(name)
except libvirt.libvirtError:
print("Starting domain %s ... " % name)
dom = conn.createLinux(xmldesc, 0)
if dom is None:
print("failed")
exit(1)
else:
print("done")
| lgpl-2.1 | -1,861,244,689,678,241,000 | 25.361702 | 113 | 0.688458 | false |
nburn42/tensorflow | tensorflow/python/keras/models_test.py | 1 | 4439 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `models.py` (model cloning, mainly)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.platform import test
class TestModelCloning(test.TestCase):
def test_clone_sequential_model(self):
with self.test_session():
val_a = np.random.random((10, 4))
val_out = np.random.random((10, 4))
model = keras.models.Sequential()
model.add(keras.layers.Dense(4, input_shape=(4,)))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(4))
# Everything should work in a new session.
keras.backend.clear_session()
with self.test_session():
# With placeholder creation
new_model = keras.models.clone_model(model)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch(val_a, val_out)
# On top of new tensor
input_a = keras.Input(shape=(4,))
new_model = keras.models.clone_model(
model, input_tensors=input_a)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch(val_a, val_out)
# On top of new, non-Keras tensor
input_a = keras.backend.variable(val_a)
new_model = keras.models.clone_model(
model, input_tensors=input_a)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch(None, val_out)
def test_clone_functional_model(self):
with self.test_session():
val_a = np.random.random((10, 4))
val_b = np.random.random((10, 4))
val_out = np.random.random((10, 4))
input_a = keras.Input(shape=(4,))
input_b = keras.Input(shape=(4,))
dense_1 = keras.layers.Dense(4,)
dense_2 = keras.layers.Dense(4,)
x_a = dense_1(input_a)
x_a = keras.layers.Dropout(0.5)(x_a)
x_b = dense_1(input_b)
x_a = dense_2(x_a)
outputs = keras.layers.add([x_a, x_b])
model = keras.models.Model([input_a, input_b], outputs)
# Everything should work in a new session.
keras.backend.clear_session()
with self.test_session():
# With placeholder creation
new_model = keras.models.clone_model(model)
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch([val_a, val_b], val_out)
# On top of new tensors
input_a = keras.Input(shape=(4,), name='a')
input_b = keras.Input(shape=(4,), name='b')
new_model = keras.models.clone_model(
model, input_tensors=[input_a, input_b])
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch([val_a, val_b], val_out)
# On top of new, non-Keras tensors
input_a = keras.backend.variable(val_a)
input_b = keras.backend.variable(val_b)
new_model = keras.models.clone_model(
model, input_tensors=[input_a, input_b])
new_model.compile('rmsprop', 'mse')
new_model.train_on_batch(None, val_out)
def test_model_cloning_invalid_use_cases(self):
seq_model = keras.models.Sequential()
seq_model.add(keras.layers.Dense(4, input_shape=(4,)))
x = keras.Input((4,))
y = keras.layers.Dense(4)(x)
fn_model = keras.models.Model(x, y)
with self.assertRaises(ValueError):
keras.models._clone_functional_model(seq_model)
with self.assertRaises(ValueError):
keras.models._clone_functional_model(None)
with self.assertRaises(ValueError):
keras.models._clone_sequential_model(fn_model)
with self.assertRaises(ValueError):
keras.models._clone_sequential_model(seq_model, input_tensors=[x, x])
with self.assertRaises(ValueError):
keras.models._clone_sequential_model(seq_model, input_tensors=y)
if __name__ == '__main__':
test.main()
| apache-2.0 | -5,862,570,881,873,089,000 | 33.952756 | 80 | 0.644289 | false |
semente/django-smuggler | tests/test_app/tests/test_auth.py | 1 | 3232 | from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import reverse
class TestSmugglerViewsRequireAuthentication(TestCase):
def test_dump_data(self):
url = reverse('dump-data')
response = self.client.get(url)
self.assertRedirects(
response, '/admin/login/?next=/admin/dump/')
def test_dump_app_data(self):
url = reverse('dump-app-data', kwargs={'app_label': 'sites'})
response = self.client.get(url)
self.assertRedirects(
response, '/admin/login/?next=/admin/sites/dump/')
def test_dump_model_data(self):
url = reverse('dump-model-data', kwargs={
'app_label': 'sites',
'model_label': 'site'
})
response = self.client.get(url)
self.assertRedirects(
response, '/admin/login/?next=/admin/sites/site/dump/')
def test_load_data(self):
url = reverse('load-data')
response = self.client.get(url, follow=True)
self.assertRedirects(
response, '/admin/login/?next=/admin/load/')
class TestSmugglerViewsDeniesNonSuperuser(TestCase):
def setUp(self):
staff = User(username='staff')
staff.set_password('test')
staff.is_staff = True
staff.save()
self.client.login(username='staff', password='test')
def test_dump_data(self):
url = reverse('dump-data')
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
def test_dump_app_data(self):
url = reverse('dump-app-data', kwargs={'app_label': 'sites'})
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
def test_dump_model_data(self):
url = reverse('dump-model-data', kwargs={
'app_label': 'sites',
'model_label': 'site'
})
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
def test_load_data(self):
url = reverse('load-data')
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
class TestSmugglerViewsAllowsSuperuser(TestCase):
def setUp(self):
superuser = User(username='superuser')
superuser.set_password('test')
superuser.is_staff = True
superuser.is_superuser = True
superuser.save()
self.client.login(username='superuser', password='test')
def test_dump_data(self):
url = reverse('dump-data')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_dump_app_data(self):
url = reverse('dump-app-data', kwargs={'app_label': 'sites'})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_dump_model_data(self):
url = reverse('dump-model-data', kwargs={
'app_label': 'sites',
'model_label': 'site'
})
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_load_data(self):
url = reverse('load-data')
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
| lgpl-3.0 | -7,871,784,510,331,345,000 | 32.319588 | 69 | 0.60953 | false |
liuwill-projects/flask-server-scaffold | main.py | 1 | 1505 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import os
from flask import Flask, jsonify # , request, current_app
from flask_cors import CORS, cross_origin
from flask_socketio import SocketIO, emit, send
from chat.utils.jsonp import jsonp
from chat.controllers.mock import Mock
import logging
from logging.config import fileConfig
fileConfig('logging_config.ini')
logger = logging.getLogger()
#logger = logging.getLogger('api')
app = Flask(__name__)
cors = CORS(app, resources={r"/api/users/*": {"origins": "*"}})
app.config['SECRET_KEY'] = 'secret!'
socketio = SocketIO(app)
mockController = Mock(app)
@app.route("/")
def hello():
print os.environ.get('PYTHONSTARTUP')
return "Hello World!"
@app.route("/api/jsonp/getCookie.js")
@jsonp
def getCookie():
return mockController.getCookie()
@app.route("/api/users/me")
@cross_origin()
def me():
return mockController.me()
@socketio.on('message', namespace='/chat')
def handle_message(message):
send(message)
@socketio.on('json', namespace='/chat')
def handle_json(json):
send(json, json=True)
@socketio.on('my event', namespace='/chat')
def test_message(message):
emit('my response', {'data': 'got it!'})
@socketio.on('connect', namespace='/chat')
def test_connect():
emit('my response', {'data': 'Connected'})
@socketio.on('disconnect', namespace='/chat')
def test_disconnect():
print('Client disconnected')
if __name__ == "__main__":
#app.run('0.0.0.0', 5000)
socketio.run(app, host='0.0.0.0', port=5000)
| mit | 7,558,739,674,957,527,000 | 24.508475 | 63 | 0.682392 | false |
theY4Kman/infusionsoft-client | setup.py | 1 | 1956 | import os
from setuptools import setup, find_packages
def build_install_requires(path):
"""Support pip-type requirements files"""
basedir = os.path.dirname(path)
with open(path) as f:
reqs = []
for line in f:
line = line.strip()
if not line:
continue
if line[0] == '#':
continue
elif line.startswith('-r '):
nested_req = line[3:].strip()
nested_path = os.path.join(basedir, nested_req)
reqs += build_install_requires(nested_path)
elif line[0] == '-':
continue
else:
reqs.append(line)
return reqs
pkg = 'infusionsoft'
root = os.path.dirname(__file__)
from_root = lambda *p: os.path.join(root, *p)
pkg_root = lambda *p: from_root(pkg, *p)
with open(from_root('README.rst')) as fp:
long_description = fp.read()
with open(pkg_root('version.py')) as fp:
context = {}
exec(fp.read(), None, context)
version = context['__version__']
setup(
name='infusionsoft-client',
version=version,
url='https://github.com/theY4Kman/infusionsoft-client',
author='Zach "theY4Kman" Kanzler',
author_email='[email protected]',
description='Sexy Infusionsoft XML-RPC API client',
long_description=long_description,
packages=find_packages(include=(pkg, pkg + '.*')),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries',
],
install_requires=build_install_requires(from_root('requirements.txt')),
extras_requires=build_install_requires(from_root('django-requirements.txt')),
include_package_data=True,
)
| apache-2.0 | 1,362,176,675,255,124,500 | 30.047619 | 82 | 0.593047 | false |
brigittebigi/proceed | proceed/src/term/textprogress.py | 1 | 5853 | #!/usr/bin/env python2
# -*- coding: UTF-8 -*-
# ---------------------------------------------------------------------------
# ___ __ __ __ ___
# / | \ | \ | \ / Automatic
# \__ |__/ |__/ |___| \__ Annotation
# \ | | | | \ of
# ___/ | | | | ___/ Speech
# =============================
#
# http://sldr.org/sldr000800/preview/
#
# ---------------------------------------------------------------------------
# developed at:
#
# Laboratoire Parole et Langage
#
# Copyright (C) 2011-2015 Brigitte Bigi
#
# Use of this software is governed by the GPL, v3
# This banner notice must not be removed
# ---------------------------------------------------------------------------
#
# SPPAS is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SPPAS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SPPAS. If not, see <http://www.gnu.org/licenses/>.
#
# ---------------------------------------------------------------------------
# File: textprogress.py
# ----------------------------------------------------------------------------
__docformat__ = """epytext"""
__authors__ = """Brigitte Bigi ([email protected])"""
__copyright__ = """Copyright (C) 2011-2015 Brigitte Bigi"""
# ----------------------------------------------------------------------------
import sys
import re
import math
from terminalcontroller import TerminalController
# ----------------------------------------------------------------------------
# Constants
# ----------------------------------------------------------------------------
WIDTH = 74
BAR = '%3d%% ${GREEN}[${BOLD}%s%s${NORMAL}${GREEN}]${NORMAL}\n'
HEADER = '${BOLD}${CYAN}%s${NORMAL}\n\n'
# ----------------------------------------------------------------------------
class TextProgress:
"""
@authors: Brigitte Bigi
@contact: brigitte.bigi((AATT))lpl-aix.fr
@license: GPL
@summary: A 3-lines progress self.bar.
It looks like::
Header
20% [===========----------------------------------]
progress message
The progress self.bar is colored, if the terminal supports color
output; and adjusts to the width of the terminal.
"""
def __init__(self):
"""
Constructor.
"""
try:
self.term = TerminalController()
except:
self.term = None
if not (self.term.CLEAR_EOL and self.term.UP and self.term.BOL):
self.term = None
self.bar = BAR
if self.term:
self.bar = self.term.render(BAR)
self.cleared = 1 #: true if we haven't drawn the self.bar yet.
self.percent = 0
self.text = ""
# End __init__
# ------------------------------------------------------------------
def update(self, percent, message):
"""
Update the progress.
@param text: progress self.bar text (default: None)
@param fraction: progress self.bar value (default: 0)
"""
n = int((WIDTH-10)*percent)
if self.term:
sys.stdout.write(
self.term.BOL + self.term.UP + self.term.CLEAR_EOL +
(self.bar % (100*percent, '='*n, '-'*(WIDTH-10-n))) +
self.term.CLEAR_EOL + message.center(WIDTH))
else:
sys.stdout.write( ' => ' + message + " \n")
self.percent = percent
self.text = message
# End update
# ------------------------------------------------------------------
def clear(self):
"""
Clear.
"""
if not self.cleared:
if self.term:
sys.stdout.write(self.term.BOL + self.term.CLEAR_EOL +
self.term.UP + self.term.CLEAR_EOL +
self.term.UP + self.term.CLEAR_EOL)
else:
sys.stdout.write('\n'*50)
self.cleared = 1
# End clear
# ------------------------------------------------------------------
def set_fraction(self, percent):
"""
Set a new progress value.
@param fraction: new progress value
"""
self.update(percent,self.text)
# End set_fraction
# ------------------------------------------------------------------
def set_text(self,text):
"""
Set a new progress text.
@param text: new progress text
"""
self.update(self.percent,text)
# End set_text
# ------------------------------------------------------------------
def set_header(self,header):
"""
Set a new progress label.
@param label: new progress label
"""
if self.term:
self.header = self.term.render(HEADER % header.center(WIDTH))
else:
self.header = " " + header
sys.stdout.write(self.header)
# End set_header
# ------------------------------------------------------------------
def set_new(self):
"""
Initialize a new progress line.
"""
sys.stdout.write('\n')
self.clear()
self.text = ""
self.percent = 0
# End set_new
# ------------------------------------------------------------------
| gpl-3.0 | -6,311,342,072,019,640,000 | 27.832512 | 78 | 0.411584 | false |
chenzhengchen200821109/github-python | socketerror.py | 1 | 1117 | #!/bin/python
# Error handing
import socket, sys
host = sys.argv[1]
textport = sys.argv[2]
filename = sys.argv[3]
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error, e:
print "Strange error creating socket: %s" % e
sys.exit(1)
try:
port = int(textport)
except ValueError:
# Thant didn't work, so it's probably a protocol name
# Look it up instead.
try:
port = socket.getservbyname(textport, 'tcp')
except socket.error, e:
print "Couldn't find your port: %s" % e
sys.exit(1)
try:
s.connect((host, port))
except socket.gaierror, e:
print "Address-related error connecting to server: %s" % e
sys.exit(1)
except socket.error, e:
print "Connection error: %s" % e
sys.exit(1)
try:
s.sendall("GET %s HTTP/1.0\r\n\r\n" % filename)
except socket.error, e:
print "Error sending data: %s" % e
sys.exit(1)
while 1:
try:
buf = s.recv(2048)
except socket.error, e:
print "Error receiving data: %s" % e
sys.exit(1)
if not len(buf):
break
sys.stdout.write(buf)
| mit | 3,929,081,061,703,721,000 | 20.901961 | 62 | 0.615936 | false |
bachiraoun/fullrmc | Examples/explore/createSystem.py | 1 | 1476 | import os
from pdbparser.pdbparser import pdbparser
from pdbparser.Utilities.Collection import get_path
from pdbparser.Utilities.Construct import AmorphousSystem
from pdbparser.Utilities.Geometry import get_satisfactory_records_indexes, translate, get_geometric_center
from pdbparser.Utilities.Modify import delete_records_and_models_records, reset_records_serial_number, reset_sequence_number_per_residue
from pdbparser.Utilities.Database import __WATER__
# read thf molecule and translate to the center
thfNAGMA = pdbparser(os.path.join(get_path("pdbparser"),"Data/NAGMA.pdb" ) )
center = get_geometric_center(thfNAGMA.indexes, thfNAGMA)
translate(thfNAGMA.indexes, thfNAGMA, -center)
# create pdbWATER
pdbWATER = pdbparser()
pdbWATER.records = __WATER__
pdbWATER.set_name("water")
# create amorphous
pdbWATER = AmorphousSystem(pdbWATER, boxSize=[40,40,40], density = 0.75).construct().get_pdb()
center = get_geometric_center(pdbWATER.indexes, pdbWATER)
translate(pdbWATER.indexes, pdbWATER, -center)
# make hollow
hollowIndexes = get_satisfactory_records_indexes(pdbWATER.indexes, pdbWATER, "np.sqrt(x**2 + y**2 + z**2) <= 10")
delete_records_and_models_records(hollowIndexes, pdbWATER)
# concatenate
thfNAGMA.concatenate(pdbWATER, pdbWATER.boundaryConditions)
# reset numbering
reset_sequence_number_per_residue(thfNAGMA.indexes, thfNAGMA)
reset_records_serial_number(thfNAGMA)
# export and visualize
thfNAGMA.export_pdb("nagma_in_water.pdb")
thfNAGMA.visualize()
| agpl-3.0 | -1,953,033,512,038,257,700 | 38.891892 | 136 | 0.797425 | false |
hasibi/TAGME-Reproducibility | scripts/evaluator_annot.py | 1 | 6793 | """
This script computes Topic metrics for the end-to-end performance.
Precision and recall are macro-averaged.
Matching condition: entities should match and mentions should be equal or contained in each other.
@author: Faegheh Hasibi ([email protected])
"""
from __future__ import division
import sys
from collections import defaultdict
class EvaluatorAnnot(object):
def __init__(self, qrels, results, score_th, null_qrels=None):
self.qrels_dict = self.__group_by_queries(qrels)
self.results_dict = self.__group_by_queries(results, res=True, score_th=score_th)
self.null_qrels = self.__group_by_queries(null_qrels) if null_qrels else None
@staticmethod
def __group_by_queries(file_lines, res=False, score_th=None):
"""
Groups the lines by query id.
:param file_lines: list of lines [[qid, score, en_id, mention, page_id], ...]
:return: {qid: {(men0, en0), (men1, en01), ..}, ..};
"""
grouped_inters = defaultdict(set)
for cols in file_lines:
if len(cols) > 2:
if res and (float(cols[1]) < score_th):
continue
grouped_inters[cols[0]].add((cols[3].lower(), cols[2].lower()))
return grouped_inters
def rm_nulls_from_res(self):
"""
Removes mentions that not linked to an entity in the qrel.
There are some entities in the qrel with "*NONE*" as id. We remove the related mentions from the result file.
Null entities are generated due to the inconsistency between TAGME Wikipedia dump (2009) and our dump (2010).
"""
print "Removing mentions with null entities ..."
new_results_dict = defaultdict(set)
for qid in self.results_dict:
# easy case: the query does not have any null entity.
if qid not in set(self.null_qrels.keys()):
new_results_dict[qid] = self.results_dict[qid]
continue
qrel_null_mentions = [item[0] for item in self.null_qrels[qid]]
# check null mentions with results mentions
for men, en in self.results_dict[qid]:
is_null = False
for qrel_null_men in qrel_null_mentions:
# results mention does not match null qrel mention
if mention_match(qrel_null_men, men):
is_null = True
break
if not is_null:
new_results_dict[qid].add((men, en))
self.results_dict = new_results_dict
def eval(self, eval_query_func):
"""
Evaluates all queries and calculates total precision, recall and F1 (macro averaging).
:param eval_query_func: A function that takes qrel and results for a query and returns evaluation metrics
:return Total precision, recall, and F1 for all queries
"""
self.rm_nulls_from_res()
queries_eval = {}
total_prec, total_rec, total_f = 0, 0, 0
for qid in sorted(self.qrels_dict):
queries_eval[qid] = eval_query_func(self.qrels_dict[qid], self.results_dict.get(qid, {}))
total_prec += queries_eval[qid]['prec']
total_rec += queries_eval[qid]['rec']
n = len(self.qrels_dict) # number of queries
total_prec /= n
total_rec /= n
total_f = 2 * total_prec * total_rec / (total_prec + total_rec)
log = "\n----------------" + "\nEvaluation results:\n" + \
"Prec: " + str(round(total_prec, 4)) + "\n" +\
"Rec: " + str(round(total_rec, 4)) + "\n" + \
"F1: " + str(round(total_f, 4)) + "\n" + \
"all: " + str(round(total_prec, 4)) + ", " + str(round(total_rec, 4)) + ", " + str(round(total_f, 4))
print log
metrics = {'prec': total_prec, 'rec': total_rec, 'f': total_f}
return metrics
def erd_eval_query(query_qrels, query_results):
"""
Evaluates a single query.
:param query_qrels: Query interpretations from Qrel [{en1, en2, ..}, ..]
:param query_results: Query interpretations from result file [{en1, en2, ..}, ..]
:return: precision, recall, and F1 for a query
"""
tp = 0 # correct
fn = 0 # missed
fp = 0 # incorrectly returned
# ----- Query has at least an interpretation set. -----
# Iterate over qrels to calculate TP and FN
for qrel_item in query_qrels:
if find_item(qrel_item, query_results):
tp += 1
else:
fn += 1
# Iterate over results to calculate FP
for res_item in query_results:
if not find_item(res_item, query_qrels): # Finds the result in the qrels
fp += 1
prec = tp / (tp+fp) if tp+fp != 0 else 0
rec = tp / (tp+fn) if tp+fn != 0 else 0
f = (2 * prec * rec) / (prec + rec) if prec + rec != 0 else 0
metrics = {'prec': prec, 'rec': rec, 'f': f}
return metrics
def find_item(item_to_find, items_list):
"""
Returns True if an item is found in the item list.
:param item_to_find: item to be found
:param items_list: list of items to search in
:return boolean
"""
is_found = False
for item in items_list:
if (item[1] == item_to_find[1]) and mention_match(item[0], item_to_find[0]):
is_found = True
return is_found
def mention_match(mention1, mention2):
"""
Checks if two mentions matches each other.
Matching condition: One of the mentions is sub-string of the other one.
"""
match = ((mention1 in mention2) or (mention2 in mention1))
return match
def parse_file(file_name, res=False):
"""
Parses file and returns the positive instances for each query.
:param file_name: Name of file to be parsed
:return lists of lines [[qid, label, en_id, ...], ...], lines with null entities are separated
"""
null_lines = []
file_lines = []
efile = open(file_name, "r")
for line in efile.readlines():
if line.strip() == "":
continue
cols = line.strip().split("\t")
if (not res) and (cols[2].strip() == "*NONE*"):
null_lines.append(cols)
else:
file_lines.append(cols)
return file_lines, null_lines
def main(args):
if len(args) < 2:
print "\tUsage: <qrel_file> <result_file>"
exit(0)
print "parsing qrel ..."
qrels, null_qrels = parse_file(args[0]) # here qrel does not contain null entities
print "parsing results ..."
results = parse_file(args[1], res=True)[0]
print "evaluating ..."
evaluator = EvaluatorAnnot(qrels, results, float(args[2]), null_qrels=null_qrels)
evaluator.eval(erd_eval_query)
if __name__ == '__main__':
main(sys.argv[1:])
| mit | 5,499,083,128,206,597,000 | 35.718919 | 117 | 0.580892 | false |
conda/kapsel | conda_kapsel/internal/toposort.py | 1 | 2834 | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2016, Continuum Analytics, Inc. All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import collections
class CycleError(Exception):
def __init__(self, involving):
message = "Cycle in graph involving {involving}".format(involving=involving)
super(CycleError, self).__init__(message)
self.involving = involving
def toposort(nodes, get_next_nodes):
"""Sort list of graph nodes.
Returns a new list, does not modify input list.
Args:
nodes (iterable): iterable of some kind of node
get_next_nodes (function): takes a node and returns iterable of next nodes
Returns:
new sorted list of nodes
"""
traversing = set()
traversed = set()
result = collections.deque()
def traverse(node):
if node in traversing:
raise CycleError(node)
if node in traversed:
return # not a cycle but we already saw this
traversing.add(node)
for next in get_next_nodes(node):
traverse(next)
traversed.add(node)
traversing.remove(node)
result.appendleft(node)
for node in nodes:
traverse(node)
return list(result)
def toposort_from_dependency_info(nodes, get_node_key, get_dependency_keys, can_ignore_dependency=None):
"""Sort list of nodes that depend on other nodes in dependency-first order.
All dependencies must be in the list of nodes.
Returns a new list, does not modify input list.
Args:
nodes (iterable): iterable of some kind of node
get_node_key (function): get identifier for a node
get_dependency_keys (function): get iterable of node identifiers a node depends on
Returns:
new sorted list of nodes
"""
nodes_by_key = dict()
node_depended_on_by = dict()
for node in nodes:
key = get_node_key(node)
if key in nodes_by_key:
raise ValueError("two nodes with the same key %r" % key)
nodes_by_key[key] = node
node_depended_on_by[key] = set()
for node in nodes:
dep_keys = get_dependency_keys(node)
for dep_key in dep_keys:
if dep_key not in nodes_by_key:
if can_ignore_dependency is None or not can_ignore_dependency(dep_key):
raise ValueError("Dependency %r was not in the list of nodes %r" % (dep_key, nodes))
else:
node_depended_on_by[dep_key].add(node)
return toposort(nodes, lambda n: node_depended_on_by[get_node_key(n)])
| bsd-3-clause | 6,041,369,235,106,766,000 | 31.563218 | 104 | 0.5976 | false |
demianw/tract_querier | tract_querier/tests/test_query_eval.py | 1 | 10072 | from .. import query_processor
from nose.tools import assert_true, assert_equal
from numpy import random
import ast
# Ten tracts traversing random labels
another_set = True
while (another_set):
tracts_labels = dict([(i, set(random.randint(100, size=2))) for i in range(100)])
labels_tracts = query_processor.labels_for_tracts(tracts_labels)
another_set = 0 not in labels_tracts.keys() or 1 not in labels_tracts.keys()
tracts_in_0 = set().union(*[labels_tracts[label] for label in labels_tracts if label == 0])
tracts_in_all_but_0 = set().union(*[labels_tracts[label] for label in labels_tracts if label != 0])
tract_in_label_0_uniquely = labels_tracts[0].difference(tracts_in_all_but_0)
class DummySpatialIndexing:
def __init__(
self,
crossing_tracts_labels, crossing_labels_tracts,
ending_tracts_labels, ending_labels_tracts,
label_bounding_boxes, tract_bounding_boxes
):
self.crossing_tracts_labels = crossing_tracts_labels
self.crossing_labels_tracts = crossing_labels_tracts
self.ending_tracts_labels = ending_tracts_labels
self.ending_labels_tracts = ending_labels_tracts
self.label_bounding_boxes = label_bounding_boxes
self.tract_bounding_boxes = tract_bounding_boxes
dummy_spatial_indexing = DummySpatialIndexing(tracts_labels, labels_tracts, ({}, {}), ({}, {}), {}, {})
empty_spatial_indexing = DummySpatialIndexing({}, {}, ({}, {}), ({}, {}), {}, {})
def test_assign():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=0"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == labels_tracts[0] and
query_evaluator.evaluated_queries_info['A'].labels == set((0,))
))
def test_assign_attr():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("a.left=0"))
assert_true((
'a.left' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['a.left'].tracts == labels_tracts[0] and
query_evaluator.evaluated_queries_info['a.left'].labels == set((0,))
))
def test_assign_side():
query_evaluator = query_processor.EvaluateQueries(empty_spatial_indexing)
queries_labels = {
'a.left': set([3, 6]),
'a.right': set([4, 5]),
'b.left': set([3]),
'b.right': set([4]),
'c.left': set([5]),
'c.right': set([6])
}
queries_tracts = {
'a.left': set([]),
'a.right': set([]),
'b.left': set([]),
'b.right': set([]),
'c.left': set([]),
'c.right': set([])
}
query = r"""
b.left=3 ;
b.right = 4;
c.left = 5;
c.right = 6;
a.side = b.side or c.opposite
"""
query_evaluator.visit(ast.parse(query))
assert_equal({k: v.labels for k, v in query_evaluator.evaluated_queries_info.items()}, queries_labels)
assert_equal({k: v.tracts for k, v in query_evaluator.evaluated_queries_info.items()}, queries_tracts)
def test_assign_str():
query_evaluator = query_processor.EvaluateQueries(empty_spatial_indexing)
queries_labels = {
'b.left': set([3]),
'b.right': set([4]),
'c.left': set([5]),
'c.right': set([6]),
'h': set([3, 5])
}
queries_tracts = {
'b.left': set([]),
'b.right': set([]),
'c.left': set([]),
'c.right': set([]),
'h': set([])
}
query = """
b.left=3
b.right = 4
c.left = 5
c.right = 6
h = '*left'
"""
query_evaluator.visit(ast.parse(query))
assert_equal({k: v.labels for k, v in query_evaluator.evaluated_queries_info.items()}, queries_labels)
assert_equal({k: v.tracts for k, v in query_evaluator.evaluated_queries_info.items()}, queries_tracts)
def test_for_list():
query_evaluator = query_processor.EvaluateQueries(empty_spatial_indexing)
queries_tracts = {
'a.left': set([]),
'a.right': set([]),
'b.left': set([]),
'b.right': set([]),
'c.left': set([]),
'c.right': set([]),
'd.left': set([]),
'd.right': set([]),
'e.left': set([]),
'e.right': set([])
}
query = """
a.left= 0
b.left= 1
c.left= 2
d.left= 3
e.left= 4
for i in [a,b,c,d,e]: i.right = i.left
"""
query_evaluator.visit(ast.parse(query))
assert_equal({k: v.tracts for k, v in query_evaluator.evaluated_queries_info.items()}, queries_tracts)
def test_for_str():
query_evaluator = query_processor.EvaluateQueries(empty_spatial_indexing)
queries_tracts = {
'a.left': set([]),
'a.left.right': set([]),
'b.left': set([]),
'b.left.right': set([]),
'c.left': set([]),
'c.left.right': set([]),
'd.left': set([]),
'd.left.right': set([]),
'e.left': set([]),
'e.left.right': set([])
}
query = """
a.left= 0
b.left= 1
c.left= 2
d.left= 3
e.left= 4
for i in '*left': i.right = i
"""
query_evaluator.visit(ast.parse(query))
assert_equal({k: v.tracts for k, v in query_evaluator.evaluated_queries_info.items()}, queries_tracts)
def test_add():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=0+1"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == labels_tracts[0].union(labels_tracts[1]) and
query_evaluator.evaluated_queries_info['A'].labels == set((0, 1))
))
def test_mult():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=0 * 1"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == labels_tracts[0].intersection(labels_tracts[1]) and
query_evaluator.evaluated_queries_info['A'].labels == set((0, 1))
))
def test_sub():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=(0 + 1) - 1"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == labels_tracts[0].difference(labels_tracts[1]) and
query_evaluator.evaluated_queries_info['A'].labels == set((0,))
))
def test_or():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=0 or 1"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == labels_tracts[0].union(labels_tracts[1]) and
query_evaluator.evaluated_queries_info['A'].labels == set((0, 1))
))
def test_and():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=0 and 1"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == labels_tracts[0].intersection(labels_tracts[1]) and
query_evaluator.evaluated_queries_info['A'].labels == set((0, 1))
))
def test_not_in():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=0 or 1 not in 1"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == labels_tracts[0].difference(labels_tracts[1]) and
query_evaluator.evaluated_queries_info['A'].labels == set((0,))
))
def test_only_sign():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=~0"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == tract_in_label_0_uniquely and
query_evaluator.evaluated_queries_info['A'].labels == set((0,))
))
def test_only():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=only(0)"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == tract_in_label_0_uniquely and
query_evaluator.evaluated_queries_info['A'].labels == set((0,))
))
def test_unsaved_query():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A|=0"))
assert_true((
'A' not in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == labels_tracts[0] and
query_evaluator.evaluated_queries_info['A'].labels == set((0,))
))
def test_symbolic_assignment():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A=0; B=A"))
assert_true((
'B' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['B'].tracts == labels_tracts[0] and
query_evaluator.evaluated_queries_info['B'].labels == set((0,))
))
def test_unarySub():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("B=0; A=-B"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == tracts_in_all_but_0 and
query_evaluator.evaluated_queries_info['A'].labels == set(labels_tracts.keys()).difference((0,))
))
def test_not():
query_evaluator = query_processor.EvaluateQueries(dummy_spatial_indexing)
query_evaluator.visit(ast.parse("A= not 0"))
assert_true((
'A' in query_evaluator.queries_to_save and
query_evaluator.evaluated_queries_info['A'].tracts == tracts_in_all_but_0 and
query_evaluator.evaluated_queries_info['A'].labels == set(labels_tracts.keys()).difference((0,))
))
| bsd-3-clause | -3,546,840,993,955,329,000 | 32.131579 | 113 | 0.629468 | false |
morganmeliment/Calculate-Pi | calculatepi.py | 1 | 1034 | """
calculatepi.py
Author: Morgan Meliment
Credit: none
Assignment:
Write and submit a Python program that computes an approximate value of π by calculating the following sum:
(see: https://github.com/HHS-IntroProgramming/Calculate-Pi/blob/master/README.md)
This sum approaches the true value of π as n approaches ∞.
Your program must ask the user how many terms to use in the estimate of π, how many decimal places,
then print the estimate using that many decimal places. Exactly like this:
I will estimate pi. How many terms should I use? 100
How many decimal places should I use in the result? 7
The approximate value of pi is 3.1315929
Note: remember that the printed value of pi will be an estimate!
"""
import math
num = int(input("I will estimate pi. How many terms should I use? "))
dec = int(input("How many decimal places should I use in the result? "))
func = lambda n: (((-1) ** n)/((2 * n) + 1))
m = map(func, range(0,num))
pi = 4 * sum(m)
print("The approximate value of pi is {0:.{1}f}".format(pi, dec))
| mit | -4,700,008,651,961,431,000 | 32.193548 | 107 | 0.729835 | false |
gh4w/some | web/diego/pronostix/scripts/load_database.py | 1 | 1473 | # coding: utf8
#! /usr/bin/env python3
import json
import re
import iso8601 as iso
from pronostix.models import Club, Rencontre
def get_entity_id(entity):
return get_url_id(entity['_links']['self']['href'])
def get_url_id(url):
regex = re.compile('http://api.football-data.org/v1/[^/]+/(?P<id>\d+)$')
m = regex.match(url)
return m.group("id")
def charger_clubs(teams):
clubs = []
for t in teams:
c, created = Club.objects.get_or_create(nom = t['name'], json_id = get_entity_id(t))
if created: c.save()
clubs.append(c)
return clubs
def charger_rencontres(fixtures, clubs):
team2club = { c.json_id: c for c in clubs }
rencontres = []
for f in fixtures:
status = f['status']
id1 = get_url_id(f['_links']['homeTeam']['href'])
id2 = get_url_id(f['_links']['awayTeam']['href'])
d = iso.parse_date(f['date'])
r, created = Rencontre.objects.get_or_create(date = d, club1 = team2club[id1], club2 = team2club[id2], json_id = get_entity_id(f))
if created: r.save()
rencontres.append(r)
return rencontres
def charger():
teams_file = 'teams.json'
with open(teams_file, 'r') as fs: teams = json.load(fs)
clubs = charger_clubs(teams['teams'])
fixtures_file = 'fixtures.json'
with open(fixtures_file, 'r') as fs: fixtures = json.load(fs)
rencontres = charger_rencontres(fixtures['fixtures'], clubs)
if __name__ == '__main__': main()
| mit | -5,689,127,188,113,624,000 | 30.340426 | 138 | 0.610998 | false |
magnusmorton/nest | nest/main.py | 1 | 1314 | #!/usr/bin/env python
# encoding: utf-8
"""
Created by Magnus Morton on 2012-03-14.
(c) Copyright 2012 Magnus Morton.
This file is part of Nest.
Nest is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Nest is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with Nest. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
import os
import argparse
import nest.translate
import nest.transformer
import ast
from nest.loop import get_safe_loops
def main():
parser = argparse.ArgumentParser(description='implicitly parallelising Python')
parser.add_argument('file')
args = parser.parse_args()
source_file = args.file
translator = nest.translate.Translator(source_file, get_safe_loops, nest.transformer.ForTransformer)
with open(source_file, 'r') as the_file:
translator.translate(the_file.read())
if __name__ == '__main__':
main()
| agpl-3.0 | 6,048,209,914,957,177,000 | 29.55814 | 104 | 0.750381 | false |
bazz-erp/erpnext | erpnext/accounts/doctype/eventual_purchase_invoice/eventual_purchase_invoice.py | 1 | 3890 | # -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from erpnext.accounts.general_ledger import make_gl_entries
from frappe.utils import nowdate, flt
from frappe import _
class EventualPurchaseInvoice(Document):
def validate(self):
self.validate_dates()
self.check_mandatory()
self.validate_cuit()
self.set_total_amount()
# Removes 'Draft' transition, submit document directly
self._action = "submit"
self.docstatus = 1
self.set_status()
def validate_dates(self):
if not self.issue_date:
self.issue_date = nowdate()
if not self.iva_date:
self.iva_date = nowdate()
def validate_cuit(self):
if not self.cuit.isdigit():
frappe.throw(_("{0} field must contain only digits"))
if len(self.cuit) > 11:
frappe.throw (_("CUIT has 11 numbers as maximum"))
def set_status(self, update = False):
if self.is_new():
self.status = 'Draft'
# None value in outstanding amount indicates that document is new
elif self.docstatus == 1 and (self.outstanding_amount > 0 or self.outstanding_amount is None):
self.status = 'Unpaid'
elif self.docstatus == 1 and self.outstanding_amount <= 0:
self.status = 'Paid'
if update:
self.db_set("status", self.status)
def check_mandatory(self):
for field in ["supplier_name", "cuit", "iva_type", "taxed_amount_21", "taxed_amount_10",
"taxed_amount_27", "iva_21", "iva_10", "iva_27"]:
if self.get(field) == None:
frappe.throw(_("{0} in Eventual Purchase Invoice is mandatory").format(self.meta.get_label(field)))
def set_total_amount(self):
total_amount = 0
for field in ["taxed_amount_21", "taxed_amount_10",
"taxed_amount_27", "iva_21", "iva_10", "iva_27", "exempts", "others", "iva_perception", "ibb_perception"]:
if self.get(field):
total_amount += flt(self.get(field))
self.total_amount = total_amount
def on_submit(self):
self.make_gl_entries()
self.set_status(update = True)
def make_gl_entries(self):
gl_entries = []
self.make_supplier_gl_entry(gl_entries)
make_gl_entries(gl_entries)
def make_supplier_gl_entry(self, gl_entries):
default_payable_account = frappe.get_doc("Company", self.company).default_payable_account
stock_received_but_not_billed = frappe.get_doc("Company", self.company).stock_received_but_not_billed
gl_entries.append(
frappe._dict({
'company': self.company,
'posting_date': nowdate(),
"account": default_payable_account,
"party_type": "Supplier",
"credit": self.total_amount,
"credit_in_account_currency": self.total_amount,
"voucher_no": self.name,
"voucher_type": self.doctype,
"against_voucher": self.name,
"against_voucher_type": self.doctype,
"against": self.supplier_name
})
)
gl_entries.append(
frappe._dict({
"party_type": "Supplier",
"posting_date": nowdate(),
"account": stock_received_but_not_billed,
"debit": self.total_amount,
"debit_in_account_currency": self.total_amount,
"voucher_no": self.name,
"voucher_type": self.doctype,
"against": default_payable_account
})
)
| gpl-3.0 | -2,652,611,756,018,956,000 | 31.416667 | 128 | 0.569409 | false |
mtbc/openmicroscopy | components/tools/OmeroWeb/omeroweb/webstart/views.py | 1 | 4134 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
#
#
# Copyright (c) 2008-2014 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>, 2008.
#
# Version: 1.0
#
import os
import sys
import traceback
from glob import glob
from django.conf import settings
from django.template import loader as template_loader
from django.template import RequestContext as Context
from django.core.urlresolvers import reverse
from django.views.decorators.cache import never_cache
from omeroweb.http import HttpJNLPResponse
from omero_version import omero_version
from omeroweb.webclient.decorators import render_response
@never_cache
@render_response()
def custom_index(request, conn=None, **kwargs):
context = {"version": omero_version}
if settings.INDEX_TEMPLATE is not None:
try:
template_loader.get_template(settings.INDEX_TEMPLATE)
context['template'] = settings.INDEX_TEMPLATE
except Exception, e:
context['template'] = 'webstart/start.html'
context["error"] = traceback.format_exception(*sys.exc_info())[-1]
else:
context['template'] = 'webstart/start.html'
insight_url = None
if settings.WEBSTART:
context['insight_url'] = request.build_absolute_uri(reverse("webstart_insight"))
return context
@never_cache
@render_response()
def index(request, conn=None, **kwargs):
context = {"version": omero_version}
if settings.WEBSTART_TEMPLATE is not None:
try:
template_loader.get_template(settings.WEBSTART_TEMPLATE)
context['template'] = settings.WEBSTART_TEMPLATE
except Exception, e:
context['template'] = 'webstart/index.html'
context["error"] = traceback.format_exception(*sys.exc_info())[-1]
else:
context['template'] = 'webstart/index.html'
insight_url = None
if settings.WEBSTART:
context['insight_url'] = request.build_absolute_uri(reverse("webstart_insight"))
return context
@never_cache
def insight(request):
t = template_loader.get_template('webstart/insight.xml')
codebase = request.build_absolute_uri(settings.STATIC_URL+'webstart/jars/')
href = request.build_absolute_uri(reverse("webstart_insight"))
pattern = os.path.abspath(os.path.join(settings.OMERO_HOME, "lib", "insight", "*.jar").replace('\\','/'))
jarlist = glob(pattern)
jarlist = [os.path.basename(x) for x in jarlist]
# ticket:9478 put insight jar at the start of the list if available
# This can be configured via omero.web.webstart_jar to point to a
# custom value.
idx = jarlist.index(settings.WEBSTART_JAR)
if idx > 0:
jarlist.pop(idx)
jarlist.insert(0, settings.WEBSTART_JAR)
idy = jarlist.index(settings.NANOXML_JAR)
if idy > 0:
jarlist.pop(idy)
jarlist.insert(len(jarlist)-1, settings.NANOXML_JAR)
context = {'codebase': codebase, 'href': href, 'jarlist': jarlist,
'icon': settings.WEBSTART_ICON,
'heap': settings.WEBSTART_HEAP,
'host': settings.WEBSTART_HOST,
'port': settings.WEBSTART_PORT,
'class': settings.WEBSTART_CLASS,
'title': settings.WEBSTART_TITLE,
'vendor': settings.WEBSTART_VENDOR,
'homepage': settings.WEBSTART_HOMEPAGE,
}
c = Context(request, context)
return HttpJNLPResponse(t.render(c))
| gpl-2.0 | 4,398,770,162,860,684,000 | 33.739496 | 110 | 0.676343 | false |
MrChoclate/optim | tsp.py | 1 | 3352 | import itertools
import math
import functools
import time
import random
import copy
def timer(func):
def with_time(*args, **kwargs):
t = time.time()
res = func(*args, **kwargs)
print("{} took {} sec".format(func.__name__, time.time() - t))
return res
return with_time
def read():
n = int(input())
return [tuple(float(x) for x in input().split()) for _ in range(n)]
@functools.lru_cache(maxsize=1024)
def distance(src, dest):
return math.sqrt(sum((x - y) ** 2 for x, y in zip(src, dest)))
def cost(sol, cities):
dst = sum(distance(cities[x], cities[y]) for x, y in zip(sol[:-1], sol[1:]))
dst += distance(cities[-1], cities[0])
return dst
def random_sol(cities):
sol = list(range(1, len(cities)))
random.shuffle(sol)
return [0] + sol
def neighboor(sol):
assert(sol[0] == 0)
i = random.randint(1, len(sol) - 1)
j = i
while j == i:
j = random.randint(1, len(sol) - 1)
res = copy.copy(sol)
res[i], res[j] = res[j], res[i]
return res
@timer
def random_search(cities):
res = float('inf')
best_sol = None
for _ in range(len(cities)):
sol = random_sol(cities)
current_cost = cost(sol, cities)
if res > current_cost:
best_sol = sol
res = current_cost
return res, best_sol
@timer
def stochastic_hill_climbing(cities, kmax=1000):
best_sol = random_sol(cities)
best_cost = cost(best_sol, cities)
k = 0
while k < kmax:
k += 1
current_sol = neighboor(best_sol)
current_cost = cost(current_sol, cities)
if current_cost < best_cost:
best_sol = current_sol
best_cost = current_cost
k = 0
return best_cost, best_sol
@timer
def simulated_annealing(cities):
current_sol = best_sol = random_sol(cities)
current_cost = best_cost = cost(best_sol, cities)
T = 1000 * best_cost / len(cities)
T_min = best_cost / len(cities) / 1000.
k = 0
while T > T_min:
k += 1
new_sol = neighboor(current_sol)
new_cost = cost(new_sol, cities)
if new_cost < best_cost:
best_sol = new_sol
best_cost = new_cost
k = 0
if new_cost < current_cost or random.random() <= math.exp((current_cost - new_cost) / T):
current_sol = new_sol
current_cost = new_cost
if k > 100:
T *= 0.99999
return best_cost, best_sol
@timer
def brute_solve(cities):
best_cost = float('inf')
best_sol = None
for sol in itertools.permutations(range(len(cities))):
current_cost = cost(sol, cities)
if current_cost < best_cost:
best_cost = current_cost
best_sol = sol
return best_cost, best_sol
@timer
def greedy_solve(cities, fn=min):
sol = [0]
i = 0
while i != len(cities) - 1:
remaining = set(range(len(cities))) - set(sol)
_, pick = fn((distance(cities[i], cities[x]), x) for x in remaining)
sol.append(pick)
i += 1
return cost(sol, cities), sol
if __name__ == '__main__':
cities = read()
print(greedy_solve(cities, fn=min))
print(greedy_solve(cities, fn=max))
print(random_search(cities))
print(stochastic_hill_climbing(cities))
print(simulated_annealing(cities))
| gpl-3.0 | -7,064,214,220,862,310,000 | 25.1875 | 97 | 0.571599 | false |
nkolban/Espruino | scripts/common.py | 1 | 16586 | #!/bin/false
# This file is part of Espruino, a JavaScript interpreter for Microcontrollers
#
# Copyright (C) 2013 Gordon Williams <[email protected]>
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# ----------------------------------------------------------------------------------------
# Reads board information from boards/BOARDNAME.py - used by build_board_docs,
# build_pininfo, and build_platform_config
# ----------------------------------------------------------------------------------------
import subprocess;
import re;
import json;
import sys;
import os;
import importlib;
silent = os.getenv("SILENT");
if silent:
class Discarder(object):
def write(self, text):
pass # do nothing
# now discard everything coming out of stdout
sys.stdout = Discarder()
# http://stackoverflow.com/questions/4814970/subprocess-check-output-doesnt-seem-to-exist-python-2-6-5
if "check_output" not in dir( subprocess ):
def f(*popenargs, **kwargs):
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd)
return output
subprocess.check_output = f
# Scans files for comments of the form /*JSON......*/
#
# Comments look like:
#
#/*JSON{ "type":"staticmethod|staticproperty|constructor|method|property|function|variable|class|library|idle|init|kill",
# // class = built-in class that does not require instantiation
# // library = built-in class that needs require('classname')
# // idle = function to run on idle regardless
# // init = function to run on initialisation
# // kill = function to run on deinitialisation
# "class" : "Double", "name" : "doubleToIntBits",
# "needs_parentName":true, // optional - if for a method, this makes the first 2 args parent+parentName (not just parent)
# "generate_full|generate|wrap" : "*(JsVarInt*)&x",
# "description" : " Convert the floating point value given into an integer representing the bits contained in it",
# "params" : [ [ "x" , "float|int|int32|bool|pin|JsVar|JsVarName|JsVarArray", "A floating point number"] ],
# // float - parses into a JsVarFloat which is passed to the function
# // int - parses into a JsVarInt which is passed to the function
# // int32 - parses into a 32 bit int
# // bool - parses into a boolean
# // pin - parses into a pin
# // JsVar - passes a JsVar* to the function (after skipping names)
# // JsVarArray - parses this AND ANY SUBSEQUENT ARGUMENTS into a JsVar of type JSV_ARRAY. THIS IS ALWAYS DEFINED, EVEN IF ZERO LENGTH. Currently it must be the only parameter
# "return" : ["int|float|JsVar", "The integer representation of x"],
# "return_object" : "ObjectName", // optional - used for tern's code analysis - so for example we can do hints for openFile(...).yyy
# "no_create_links":1 // optional - if this is set then hyperlinks are not created when this name is mentioned (good example = bit() )
# "not_real_object" : "anything", // optional - for classes, this means we shouldn't treat this as a built-in object, as internally it isn't stored in a JSV_OBJECT
# "prototype" : "Object", // optional - for classes, this is what their prototype is. It's particlarly helpful if not_real_object, because there is no prototype var in that case
# "check" : "jsvIsFoo(var)", // for classes - this is code that returns true if 'var' is of the given type
# "ifndef" : "SAVE_ON_FLASH", // if the given preprocessor macro is defined, don't implement this
# "ifdef" : "USE_LCD_FOO", // if the given preprocessor macro isn't defined, don't implement this
# "#if" : "A>2", // add a #if statement in the generated C file (ONLY if type==object)
#}*/
#
# description can be an array of strings as well as a simple string (in which case each element is separated by a newline),
# and adding ```sometext``` in the description surrounds it with HTML code tags
#
def get_jsondata(is_for_document, parseArgs = True, board = False):
scriptdir = os.path.dirname (os.path.realpath(__file__))
print("Script location "+scriptdir)
os.chdir(scriptdir+"/..")
jswraps = []
defines = []
if board and ("build" in board.info) and ("defines" in board.info["build"]):
for i in board.info["build"]["defines"]:
print("Got define from board: " + i);
defines.append(i)
if parseArgs and len(sys.argv)>1:
print("Using files from command line")
for i in range(1,len(sys.argv)):
arg = sys.argv[i]
if arg[0]=="-":
if arg[1]=="D":
defines.append(arg[2:])
elif arg[1]=="B":
board = importlib.import_module(arg[2:])
if "usart" in board.chip: defines.append("USART_COUNT="+str(board.chip["usart"]));
if "spi" in board.chip: defines.append("SPI_COUNT="+str(board.chip["spi"]));
if "i2c" in board.chip: defines.append("I2C_COUNT="+str(board.chip["i2c"]));
if "USB" in board.devices: defines.append("defined(USB)=True");
else: defines.append("defined(USB)=False");
elif arg[1]=="F":
"" # -Fxxx.yy in args is filename xxx.yy, which is mandatory for build_jswrapper.py
else:
print("Unknown command-line option")
exit(1)
else:
jswraps.append(arg)
else:
print("Scanning for jswrap.c files")
jswraps = subprocess.check_output(["find", ".", "-name", "jswrap*.c"]).strip().split("\n")
if len(defines)>1:
print("Got #DEFINES:")
for d in defines: print(" "+d)
jsondatas = []
for jswrap in jswraps:
# ignore anything from archives
if jswrap.startswith("./archives/"): continue
# now scan
print("Scanning "+jswrap)
code = open(jswrap, "r").read()
if is_for_document and "DO_NOT_INCLUDE_IN_DOCS" in code:
print("FOUND 'DO_NOT_INCLUDE_IN_DOCS' IN FILE "+jswrap)
continue
for comment in re.findall(r"/\*JSON.*?\*/", code, re.VERBOSE | re.MULTILINE | re.DOTALL):
charnumber = code.find(comment)
linenumber = 1+code.count("\n", 0, charnumber)
# Strip off /*JSON .. */ bit
comment = comment[6:-2]
endOfJson = comment.find("\n}")+2;
jsonstring = comment[0:endOfJson];
description = comment[endOfJson:].strip();
# print("Parsing "+jsonstring)
try:
jsondata = json.loads(jsonstring)
if len(description): jsondata["description"] = description;
jsondata["filename"] = jswrap
jsondata["include"] = jswrap[:-2]+".h"
jsondata["githublink"] = "https://github.com/espruino/Espruino/blob/master/"+jswrap+"#L"+str(linenumber)
dropped_prefix = "Dropped "
if "name" in jsondata: dropped_prefix += jsondata["name"]+" "
elif "class" in jsondata: dropped_prefix += jsondata["class"]+" "
drop = False
if not is_for_document:
if ("ifndef" in jsondata) and (jsondata["ifndef"] in defines):
print(dropped_prefix+" because of #ifndef "+jsondata["ifndef"])
drop = True
if ("ifdef" in jsondata) and not (jsondata["ifdef"] in defines):
print(dropped_prefix+" because of #ifdef "+jsondata["ifdef"])
drop = True
if ("#if" in jsondata):
expr = jsondata["#if"]
for defn in defines:
if defn.find('=')!=-1:
dname = defn[:defn.find('=')]
dkey = defn[defn.find('=')+1:]
expr = expr.replace(dname, dkey);
try:
r = eval(expr)
except:
print("WARNING: error evaluating '"+expr+"' - from '"+jsondata["#if"]+"'")
r = True
if not r:
print(dropped_prefix+" because of #if "+jsondata["#if"]+ " -> "+expr)
drop = True
if not drop:
jsondatas.append(jsondata)
except ValueError as e:
sys.stderr.write( "JSON PARSE FAILED for " + jsonstring + " - "+ str(e) + "\n")
exit(1)
except:
sys.stderr.write( "JSON PARSE FAILED for " + jsonstring + " - "+str(sys.exc_info()[0]) + "\n" )
exit(1)
print("Scanning finished.")
return jsondatas
# Takes the data from get_jsondata and restructures it in prepartion for output as JS
#
# Results look like:,
#{
# "Pin": {
# "desc": [
# "This is the built-in class for Pins, such as D0,D1,LED1, or BTN",
# "You can call the methods on Pin, or you can use Wiring-style functions such as digitalWrite"
# ],
# "methods": {
# "read": {
# "desc": "Returns the input state of the pin as a boolean",
# "params": [],
# "return": [
# "bool",
# "Whether pin is a logical 1 or 0"
# ]
# },
# "reset": {
# "desc": "Sets the output state of the pin to a 0",
# "params": [],
# "return": []
# },
# ...
# },
# "props": {},
# "staticmethods": {},
# "staticprops": {}
# },
# "print": {
# "desc": "Print the supplied string",
# "return": []
# },
# ...
#}
#
def get_struct_from_jsondata(jsondata):
context = {"modules": {}}
def checkClass(details):
cl = details["class"]
if not cl in context:
context[cl] = {"type": "class", "methods": {}, "props": {}, "staticmethods": {}, "staticprops": {}, "desc": details.get("description", "")}
return cl
def addConstructor(details):
cl = checkClass(details)
context[cl]["constructor"] = {"params": details.get("params", []), "return": details.get("return", []), "desc": details.get("description", "")}
def addMethod(details, type = ""):
cl = checkClass(details)
context[cl][type + "methods"][details["name"]] = {"params": details.get("params", []), "return": details.get("return", []), "desc": details.get("description", "")}
def addProp(details, type = ""):
cl = checkClass(details)
context[cl][type + "props"][details["name"]] = {"return": details.get("return", []), "desc": details.get("description", "")}
def addFunc(details):
context[details["name"]] = {"type": "function", "return": details.get("return", []), "desc": details.get("description", "")}
def addObj(details):
context[details["name"]] = {"type": "object", "instanceof": details.get("instanceof", ""), "desc": details.get("description", "")}
def addLib(details):
context["modules"][details["class"]] = {"desc": details.get("description", "")}
def addVar(details):
return
for data in jsondata:
type = data["type"]
if type=="class":
checkClass(data)
elif type=="constructor":
addConstructor(data)
elif type=="method":
addMethod(data)
elif type=="property":
addProp(data)
elif type=="staticmethod":
addMethod(data, "static")
elif type=="staticproperty":
addProp(data, "static")
elif type=="function":
addFunc(data)
elif type=="object":
addObj(data)
elif type=="library":
addLib(data)
elif type=="variable":
addVar(data)
else:
print(json.dumps(data, sort_keys=True, indent=2))
return context
def get_includes_from_jsondata(jsondatas):
includes = []
for jsondata in jsondatas:
include = jsondata["include"]
if not include in includes:
includes.append(include)
return includes
def is_property(jsondata):
return jsondata["type"]=="property" or jsondata["type"]=="staticproperty" or jsondata["type"]=="variable"
def is_function(jsondata):
return jsondata["type"]=="function" or jsondata["type"]=="method"
def get_prefix_name(jsondata):
if jsondata["type"]=="event": return "event"
if jsondata["type"]=="constructor": return "constructor"
if jsondata["type"]=="function": return "function"
if jsondata["type"]=="method": return "function"
if jsondata["type"]=="variable": return "variable"
if jsondata["type"]=="property": return "property"
return ""
def get_ifdef_description(d):
if d=="SAVE_ON_FLASH": return "devices with low flash memory"
if d=="STM32F1": return "STM32F1 devices (including Original Espruino Board)"
if d=="USE_LCD_SDL": return "Linux with SDL support compiled in"
if d=="USE_TLS": return "devices with TLS and SSL support (Espruino Pico and Espruino WiFi only)"
if d=="RELEASE": return "release builds"
if d=="LINUX": return "Linux-based builds"
if d=="USE_USB_HID": return "devices that support USB HID (Espruino Pico and Espruino WiFi)"
if d=="USE_AES": return "devices that support AES (Espruino Pico, Espruino WiFi or Linux)"
if d=="USE_CRYPTO": return "devices that support Crypto Functionality (Espruino Pico, Espruino WiFi, Linux or ESP8266)"
print("WARNING: Unknown ifdef '"+d+"' in common.get_ifdef_description")
return d
def get_script_dir():
return os.path.dirname(os.path.realpath(__file__))
def get_version():
# Warning: the same release label derivation is also in the Makefile
scriptdir = get_script_dir()
jsutils = scriptdir+"/../src/jsutils.h"
version = re.compile("^.*JS_VERSION.*\"(.*)\"");
alt_release = os.getenv("ALT_RELEASE")
if alt_release == None:
# Default release labeling based on commits since last release tag
latest_release = subprocess.check_output('git tag | grep RELEASE_ | sort | tail -1', shell=True).strip()
commits_since_release = subprocess.check_output('git log --oneline '+latest_release.decode("utf-8")+'..HEAD | wc -l', shell=True).decode("utf-8").strip()
else:
# Alternate release labeling with fork name (in ALT_RELEASE env var) plus branch
# name plus commit SHA
sha = subprocess.check_output('git rev-parse --short HEAD', shell=True).strip()
branch = subprocess.check_output('git name-rev --name-only HEAD', shell=True).strip()
commits_since_release = alt_release + '_' + branch + '_' + sha
for line in open(jsutils):
match = version.search(line);
if (match != None):
v = match.group(1);
if commits_since_release=="0": return v
else: return v+"."+commits_since_release
return "UNKNOWN"
def get_name_or_space(jsondata):
if "name" in jsondata: return jsondata["name"]
return ""
def get_bootloader_size(board):
if board.chip["family"]=="STM32F4": return 16*1024; # 16kb Pages, so we have no choice
return 10*1024;
# On normal chips this is 0x00000000
# On boards with bootloaders it's generally + 10240
# On F401, because of the setup of pages we put the bootloader in the first 16k, then in the 16+16+16 we put the saved code, and then finally we but the binary somewhere else
def get_espruino_binary_address(board):
if "place_text_section" in board.chip:
return board.chip["place_text_section"]
if "bootloader" in board.info and board.info["bootloader"]==1:
return get_bootloader_size(board);
return 0;
def get_board_binary_name(board):
return board.info["binary_name"].replace("%v", get_version());
| mpl-2.0 | -2,976,484,380,835,921,000 | 43.347594 | 205 | 0.574882 | false |
centrofermi/e3pipe | display/E3EventCanvas.py | 1 | 5968 | #!/usr/bin/env python
# *********************************************************************
# * Copyright (C) 2015 Luca Baldini ([email protected]) *
# * *
# * For the license terms see the file LICENCE, distributed *
# * along with this software. *
# *********************************************************************
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
X_MIN = 0.
X_MAX = 158.0
NUM_STRIPS = 24
STRIP_PITCH = 3.2
Y_MIN = 0.5*STRIP_PITCH
Y_MAX = STRIP_PITCH*(NUM_STRIPS - 0.5)
DX = X_MAX - X_MIN
DY = Y_MAX - Y_MIN
from e3pipe.display.E3PhysicalCanvas import E3PhysicalCanvas
from e3pipe.display.__geometry2d__ import *
class E3EventCanvas(E3PhysicalCanvas):
"""
"""
NAME = 'cdisplay'
TITLE = 'EEE event display'
WPX = 1200
PLANE_THICKNESS = 2.
def __init__(self, z = [0, 40, 80], padding = 30., **kwargs):
""" Constructor.
"""
self.__Z = z
self.__Pad = padding
dz = max(z) - min(z)
self.__W = self.__Pad*4 + DX + DY
self.__H = self.__Pad*2 + dz
E3PhysicalCanvas.__init__(self, self.NAME, self.__W, self.__H,
self.WPX, title = self.TITLE, logo = False)
self.setup()
def setup(self):
""" Setup the canvas to display an event.
"""
self.Clear()
self.drawTelescope()
self.drawReference()
self.drawAnnotations()
def xz2canvas(self, x, z):
""" Convert from physical units to canvas coordinates (x-z plane).
"""
_x = x + self.__Pad - 0.5*self.__W
_z = z + self.__Pad - 0.5*self.__H
return (_x, _z)
def yz2canvas(self, y, z):
""" Convert from physical units to canvas coordinates (y-z plane).
"""
_y = y + 3*self.__Pad + DX - 0.5*self.__W
_z = z + self.__Pad - 0.5*self.__H
return (_y, _z)
def drawTelescope(self):
""" Draw the three planes of the telescope.
"""
for z in self.__Z:
box(0.5*DX - 0.5*self.__W + self.__Pad,
z - 0.5*self.__H + self.__Pad,
DX, self.PLANE_THICKNESS)
for i in range(NUM_STRIPS):
box((1 + i)*STRIP_PITCH - 0.5*self.__W + DX + 3*self.__Pad,
z - 0.5*self.__H + self.__Pad,
STRIP_PITCH, self.PLANE_THICKNESS)
def drawReference(self):
""" Draw the reference system.
"""
_l = 0.4*self.__Pad
_x, _z = self.xz2canvas(-0.5*self.__Pad, -0.5*self.__Pad)
arrow(_x, _z, _x + _l, _z)
annotate(_x + _l, _z, ' x', align = 13)
arrow(_x, _z, _x, _z + _l)
annotate(_x, _z + _l, 'z ', align = 31)
_y, _z = self.yz2canvas(-0.5*self.__Pad, -0.5*self.__Pad)
arrow(_y, _z, _y + _l, _z)
annotate(_y + _l, _z, ' y', align = 13)
arrow(_y, _z, _y, _z + _l)
annotate(_y, _z + _l, 'z ', align = 31)
def drawAnnotations(self):
""" Draw some annotations.
"""
_x, _z = self.xz2canvas(DX + self.__Pad, self.__Z[0])
annotate(_x, _z, 'bot', align = 22)
_x, _z = self.xz2canvas(DX + self.__Pad, self.__Z[1])
annotate(_x, _z, 'mid', align = 22)
_x, _z = self.xz2canvas(DX + self.__Pad, self.__Z[2])
annotate(_x, _z, 'top', align = 22)
_x, _z1 = self.xz2canvas(-0.5*self.__Pad, self.__Z[1])
_x, _z2 = self.xz2canvas(-0.5*self.__Pad, self.__Z[2])
vquote(_z1, _z2, _x)
def drawMarker(self, x, y, z, **kwargs):
""" Draw a three-dimensional point.
"""
_x, _z = self.xz2canvas(x, z)
marker(_x, _z, **kwargs)
_y, _z = self.yz2canvas(y, z)
marker(_y, _z, **kwargs)
self.Update()
def drawLine(self, x0, y0, z0, xdir, ydir, zdir, top = 100, bot = 100,
**kwargs):
""" Draw a line.
"""
_x0, _z0 = self.xz2canvas(x0, z0)
_x1 = _x0 - bot*xdir
_z1 = _z0 - bot*zdir
_x2 = _x0 + top*xdir
_z2 = _z0 + top*zdir
line(_x1, _z1, _x2, _z2, **kwargs)
_y0, _z0 = self.yz2canvas(y0, z0)
_y1 = _y0 - bot*ydir
_z1 = _z0 - bot*zdir
_y2 = _y0 + top*ydir
_z2 = _z0 + top*zdir
line(_y1, _z1, _y2, _z2, **kwargs)
self.Update()
def drawEventInfo(self, fileName, run, evt):
"""
"""
annotate(0.02, 0.94, '%s [%d - %d]' % (fileName, run, evt),
ndc = True, align = 12)
self.Update()
def annotateXZ(self, x, z, text, size = 1, ndc = False,
align = 22, color = ROOT.kBlack, angle = 0):
"""
"""
_x, _z = self.xz2canvas(x, z)
annotate(_x, _z, text, size, ndc, align, color, angle)
self.Update()
def annotateYZ(self, y, z, text, size = 1, ndc = False,
align = 22, color = ROOT.kBlack, angle = 0):
"""
"""
_y, _z = self.yz2canvas(y, z)
annotate(_y, _z, text, size, ndc, align, color, angle)
self.Update()
if __name__ == '__main__':
c = E3EventCanvas()
c.Draw()
c.drawPoint(100, 34, 40)
| gpl-3.0 | 3,696,403,357,255,309,000 | 32.909091 | 78 | 0.487936 | false |
nithintech/google-python-exercises | babynames/babynames.py | 2 | 2533 | #!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
import os
"""Baby Names exercise
Define the extract_names() function below and change main()
to call it.
For writing regex, it's nice to include a copy of the target
text for inspiration.
Here's what the html looks like in the baby.html files:
...
<h3 align="center">Popularity in 1990</h3>
....
<tr align="right"><td>1</td><td>Michael</td><td>Jessica</td>
<tr align="right"><td>2</td><td>Christopher</td><td>Ashley</td>
<tr align="right"><td>3</td><td>Matthew</td><td>Brittany</td>
...
Suggested milestones for incremental development:
-Extract the year and print it
-Extract the names and rank numbers and just print them
-Get the names data into a dict and print it
-Build the [year, 'name rank', ... ] list and print it
-Fix main() to use the extract_names list
"""
def extract_names(filename):
"""
Given a file name for baby.html, returns a list starting with the year string
followed by the name-rank strings in alphabetical order.
['2006', 'Aaliyah 91', Aaron 57', 'Abagail 895', ' ...]
"""
# +++your code here+++
dic={}
f=open(filename,'rU')
d=f.read()
m=re.search("Popularity\sin\s(\d\d\d\d)",d)
print m.group(1)
n=re.findall("<td>(\d*)</td><td>(\w+)</td><td>(\w+)</td>",d)
for i in n:
dic[i[1]]=i[0]
dic[i[2]]=i[0]
res=sorted(dic.items())
lis=[]
lis.append(m.group(1))
for i in res:
s=i[0]+" "+i[1]
lis.append(s)
return lis
#for i in n:
# print i[0], i[1]
def main():
# This command-line parsing code is provided.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print 'usage: [--summaryfile] file [file ...]'
sys.exit(1)
# Notice the summary flag and remove it from args if it is present.
summary = False
if args[0] == '--summaryfile':
summary = True
del args[0]
for i in args:
names=extract_names(i)
if summary:
j=os.path.basename(i)
f=open(j+"summary.txt",'w')
for i in names:
f.write(i+"\n")
f.close()
else:
print names
# +++your code here+++
# For each filename, get the names, then either print the text output
# or write it to a summary file
if __name__ == '__main__':
main()
| apache-2.0 | 3,653,605,300,787,971,600 | 22.896226 | 79 | 0.629293 | false |
pbs/django-cms | cms/forms/widgets.py | 1 | 9439 | # -*- coding: utf-8 -*-
from cms.forms.utils import get_site_choices, get_page_choices
from cms.models import Page, PageUser, Placeholder
from cms.plugin_pool import plugin_pool
from cms.utils import get_language_from_request, cms_static_url
from django.conf import settings
from django.contrib.sites.models import Site
from django.contrib.auth import get_permission_codename
from django.forms.widgets import Select, MultiWidget, Widget
from django.template.context import RequestContext
from django.template.loader import render_to_string
from django.utils.encoding import force_unicode
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
import copy
from cms.templatetags.cms_admin import CMS_ADMIN_ICON_BASE
class LazySelect(Select):
def __init__(self, *args, **kwargs):
choices = kwargs['choices']
from cms.forms.fields import SuperLazyIterator
if isinstance(choices, SuperLazyIterator):
self.choices = kwargs.pop('choices')
super(Select, self).__init__(*args, **kwargs)
else:
super(LazySelect, self).__init__(*args, **kwargs)
class PageSelectWidget(MultiWidget):
"""A widget that allows selecting a page by first selecting a site and then
a page on that site in a two step process.
"""
def __init__(self, site_choices=None, page_choices=None, attrs=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
if site_choices is None or page_choices is None:
from cms.forms.fields import SuperLazyIterator
site_choices = SuperLazyIterator(get_site_choices)
page_choices = SuperLazyIterator(get_page_choices)
self.site_choices = site_choices
self.choices = page_choices
widgets = (LazySelect(choices=site_choices ),
LazySelect(choices=[('', '----')]),
LazySelect(choices=self.choices, attrs={'style': "display:none;"} ),
)
super(PageSelectWidget, self).__init__(widgets, attrs)
def decompress(self, value):
"""
receives a page_id in value and returns the site_id and page_id
of that page or the current site_id and None if no page_id is given.
"""
if value:
page = Page.objects.get(pk=value)
site = page.site
return [site.pk, page.pk, page.pk]
site = Site.objects.get_current()
return [site.pk,None,None]
def _has_changed(self, initial, data):
# THIS IS A COPY OF django.forms.widgets.Widget._has_changed()
# (except for the first if statement)
"""
Return True if data differs from initial.
"""
# For purposes of seeing whether something has changed, None is
# the same as an empty string, if the data or inital value we get
# is None, replace it w/ u''.
if data is None or (len(data)>=2 and data[1] in [None,'']):
data_value = u''
else:
data_value = data
if initial is None:
initial_value = u''
else:
initial_value = initial
if force_unicode(initial_value) != force_unicode(data_value):
return True
return False
def render(self, name, value, attrs=None):
# THIS IS A COPY OF django.forms.widgets.MultiWidget.render()
# (except for the last line)
# value is a list of values, each corresponding to a widget
# in self.widgets.
if not isinstance(value, list):
value = self.decompress(value)
output = []
final_attrs = self.build_attrs(attrs)
id_ = final_attrs.get('id', None)
for i, widget in enumerate(self.widgets):
try:
widget_value = value[i]
except IndexError:
widget_value = None
if id_:
final_attrs = dict(final_attrs, id='%s_%s' % (id_, i))
output.append(widget.render(name + '_%s' % i, widget_value, final_attrs))
output.append(r'''<script type="text/javascript">
(function($) {
var handleSiteChange = function(site_name, selected_id) {
$("#id_%(name)s_1 optgroup").remove();
var myOptions = $("#id_%(name)s_2 optgroup[label='" + site_name.replace(/(\'|\")/g, '\\$1') + "']").clone();
$("#id_%(name)s_1").append(myOptions);
$("#id_%(name)s_1").change();
};
var handlePageChange = function(page_id) {
if (page_id) {
$("#id_%(name)s_2 option").removeAttr('selected');
$("#id_%(name)s_2 option[value=" + page_id + "]").attr('selected','selected');
} else {
$("#id_%(name)s_2 option[value=]").attr('selected','selected');
};
};
$("#id_%(name)s_0").change(function(){
var site_label = $("#id_%(name)s_0").children(":selected").text();
handleSiteChange( site_label );
});
$("#id_%(name)s_1").change(function(){
var page_id = $(this).find('option:selected').val();
handlePageChange( page_id );
});
$(function(){
handleSiteChange( $("#id_%(name)s_0").children(":selected").text() );
$("#add_id_%(name)s").hide();
});
})(django.jQuery);
</script>''' % {'name': name})
return mark_safe(self.format_output(output))
def format_output(self, rendered_widgets):
return u' '.join(rendered_widgets)
class PluginEditor(Widget):
def __init__(self, attrs=None):
if attrs is not None:
self.attrs = attrs.copy()
else:
self.attrs = {}
class Media:
js = [cms_static_url(path) for path in (
'js/libs/jquery.ui.core.js',
'js/libs/jquery.ui.sortable.js',
'js/plugin_editor.js',
)]
css = {
'all': [cms_static_url(path) for path in (
'css/plugin_editor.css',
)]
}
def render(self, name, value, attrs=None):
context = {
'plugin_list': self.attrs['list'],
'installed_plugins': self.attrs['installed'],
'copy_languages': self.attrs['copy_languages'],
'language': self.attrs['language'],
'show_copy': self.attrs['show_copy'],
'placeholder': self.attrs['placeholder'],
}
return mark_safe(render_to_string(
'admin/cms/page/widgets/plugin_editor.html', context))
class UserSelectAdminWidget(Select):
"""Special widget used in page permission inlines, because we have to render
an add user (plus) icon, but point it somewhere else - to special user creation
view, which is accessible only if user haves "add user" permissions.
Current user should be assigned to widget in form constructor as an user
attribute.
"""
def render(self, name, value, attrs=None, choices=()):
output = [super(UserSelectAdminWidget, self).render(name, value, attrs, choices)]
opts = PageUser._meta
if hasattr(self, 'user') and (self.user.is_superuser or \
self.user.has_perm(opts.app_label + '.' + get_permission_codename('add', opts))):
# append + icon
add_url = '../../../cms/pageuser/add/'
output.append(u'<a href="%s" class="add-another" id="add_id_%s" onclick="return showAddAnotherPopup(this);"> ' % \
(add_url, name))
output.append(u'<img src="%sicon_addlink.gif" width="10" height="10" alt="%s"/></a>' % (CMS_ADMIN_ICON_BASE, _('Add Another')))
return mark_safe(u''.join(output))
class PlaceholderPluginEditorWidget(PluginEditor):
attrs = {}
def __init__(self, request, filter_func):
self.request = request
self.filter_func = filter_func
def __deepcopy__(self, memo):
obj = copy.copy(self)
obj.request = copy.copy(self.request)
obj.filter_func = self.filter_func
memo[id(self)] = obj
return obj
def render(self, name, value, attrs=None):
try:
ph = Placeholder.objects.get(pk=value)
except Placeholder.DoesNotExist:
ph = None
context = {'add':True}
if ph:
plugin_list = ph.cmsplugin_set.filter(parent=None).order_by('position')
plugin_list = self.filter_func(self.request, plugin_list)
language = get_language_from_request(self.request)
copy_languages = []
if ph.actions.can_copy:
copy_languages = ph.actions.get_copy_languages(
placeholder=ph,
model=ph._get_attached_model(),
fieldname=ph._get_attached_field_name()
)
context = {
'plugin_list': plugin_list,
'installed_plugins': plugin_pool.get_all_plugins(ph.slot, include_page_only=False),
'copy_languages': copy_languages,
'language': language,
'show_copy': bool(copy_languages) and ph.actions.can_copy,
'urloverride': True,
'placeholder': ph,
}
#return mark_safe(render_to_string(
# 'admin/cms/page/widgets/plugin_editor.html', context))
return mark_safe(render_to_string(
'admin/cms/page/widgets/placeholder_editor.html', context, RequestContext(self.request)))
| bsd-3-clause | -3,840,572,163,296,305,700 | 38.827004 | 139 | 0.580888 | false |
nvbn/thefuck | tests/rules/test_remove_shell_prompt_literal.py | 1 | 1168 | import pytest
from thefuck.rules.remove_shell_prompt_literal import match, get_new_command
from thefuck.types import Command
@pytest.fixture
def output():
return "$: command not found"
@pytest.mark.parametrize(
"script",
[
"$ cd newdir",
" $ cd newdir",
"$ $ cd newdir"
" $ $ cd newdir",
],
)
def test_match(script, output):
assert match(Command(script, output))
@pytest.mark.parametrize(
"command",
[
Command("$", "$: command not found"),
Command(" $", "$: command not found"),
Command("$?", "127: command not found"),
Command(" $?", "127: command not found"),
Command("", ""),
],
)
def test_not_match(command):
assert not match(command)
@pytest.mark.parametrize(
"script, new_command",
[
("$ cd newdir", "cd newdir"),
("$ $ cd newdir", "cd newdir"),
("$ python3 -m virtualenv env", "python3 -m virtualenv env"),
(" $ $ $ python3 -m virtualenv env", "python3 -m virtualenv env"),
],
)
def test_get_new_command(script, new_command, output):
assert get_new_command(Command(script, output)) == new_command
| mit | 7,313,102,935,219,183,000 | 23.333333 | 76 | 0.578767 | false |
gihankarunarathne/udp | EXTRACTFLO2DWATERLEVEL.py | 1 | 22923 | #!/usr/bin/python3
import csv
import getopt
import json
import os
import sys
import traceback
import copy
from datetime import datetime, timedelta
from os.path import join as pjoin
from curwmysqladapter import MySQLAdapter
import Constants
from LIBFLO2DWATERLEVELGRID import getWaterLevelOfChannels
from Util.LibForecastTimeseries import extractForecastTimeseries
from Util.LibForecastTimeseries import extractForecastTimeseriesInDays
from Util.Utils import getUTCOffset
def usage():
usageText = """
Usage: ./EXTRACTFLO2DTOWATERLEVEL.py [-d YYYY-MM-DD] [-t HH:MM:SS] [-p -o -h] [-S YYYY-MM-DD] [-T HH:MM:SS]
-h --help Show usage
-f --forceInsert Force Insert into the database. May override existing values.
-F --flo2d_config Configuration for FLO2D model run
-d --date Model State Date in YYYY-MM-DD. Default is current date.
-t --time Model State Time in HH:MM:SS. If -d passed, then default is 00:00:00. Otherwise Default is current time.
-S --start_date Base Date of FLO2D model output in YYYY-MM-DD format. Default is same as -d option value.
-T --start_time Base Time of FLO2D model output in HH:MM:SS format. Default is set to 00:00:00
-p --path FLO2D model path which include HYCHAN.OUT
-o --out Suffix for 'water_level-<SUFFIX>' and 'water_level_grid-<SUFFIX>' output directories.
Default is 'water_level-<YYYY-MM-DD>' and 'water_level_grid-<YYYY-MM-DD>' same as -d option value.
-n --name Name field value of the Run table in Database. Use time format such as 'Cloud-1-<%H:%M:%S>' to replace with time(t).
-u --utc_offset UTC offset of current timestamps. "+05:30" or "-10:00". Default value is "+00:00".
"""
print(usageText)
def isfloat(value):
try:
float(value)
return True
except ValueError:
return False
def save_forecast_timeseries(my_adapter, my_timeseries, my_model_date, my_model_time, my_opts):
print('EXTRACTFLO2DWATERLEVEL:: save_forecast_timeseries >>', my_opts)
# Convert date time with offset
date_time = datetime.strptime('%s %s' % (my_model_date, my_model_time), Constants.COMMON_DATE_TIME_FORMAT)
if 'utcOffset' in my_opts:
date_time = date_time + my_opts['utcOffset']
my_model_date = date_time.strftime('%Y-%m-%d')
my_model_time = date_time.strftime('%H:%M:%S')
# If there is an offset, shift by offset before proceed
forecast_timeseries = []
if 'utcOffset' in my_opts:
print('Shit by utcOffset:', my_opts['utcOffset'].resolution)
for item in my_timeseries:
forecast_timeseries.append(
[datetime.strptime(item[0], Constants.COMMON_DATE_TIME_FORMAT) + my_opts['utcOffset'], item[1]])
forecast_timeseries = extractForecastTimeseries(forecast_timeseries, my_model_date, my_model_time, by_day=True)
else:
forecast_timeseries = extractForecastTimeseries(my_timeseries, my_model_date, my_model_time, by_day=True)
# print(forecast_timeseries[:10])
extracted_timeseries = extractForecastTimeseriesInDays(forecast_timeseries)
# for ll in extractedTimeseries :
# print(ll)
# Check whether existing station
force_insert = my_opts.get('forceInsert', False)
station = my_opts.get('station', '')
is_station_exists = adapter.get_station({'name': station})
if is_station_exists is None:
print('WARNING: Station %s does not exists. Continue with others.' % station)
return
# TODO: Create if station does not exists.
run_name = my_opts.get('run_name', 'Cloud-1')
less_char_index = run_name.find('<')
greater_char_index = run_name.find('>')
if -1 < less_char_index > -1 < greater_char_index:
start_str = run_name[:less_char_index]
date_format_str = run_name[less_char_index + 1:greater_char_index]
end_str = run_name[greater_char_index + 1:]
try:
date_str = date_time.strftime(date_format_str)
run_name = start_str + date_str + end_str
except ValueError:
raise ValueError("Incorrect data format " + date_format_str)
types = [
'Forecast-0-d',
'Forecast-1-d-after',
'Forecast-2-d-after',
'Forecast-3-d-after',
'Forecast-4-d-after',
'Forecast-5-d-after',
'Forecast-6-d-after',
'Forecast-7-d-after',
'Forecast-8-d-after',
'Forecast-9-d-after',
'Forecast-10-d-after',
'Forecast-11-d-after',
'Forecast-12-d-after',
'Forecast-13-d-after',
'Forecast-14-d-after'
]
meta_data = {
'station': station,
'variable': 'WaterLevel',
'unit': 'm',
'type': types[0],
'source': 'FLO2D',
'name': run_name
}
for i in range(0, min(len(types), len(extracted_timeseries))):
meta_data_copy = copy.deepcopy(meta_data)
meta_data_copy['type'] = types[i]
event_id = my_adapter.get_event_id(meta_data_copy)
if event_id is None:
event_id = my_adapter.create_event_id(meta_data_copy)
print('HASH SHA256 created: ', event_id)
else:
print('HASH SHA256 exists: ', event_id)
if not force_insert:
print('Timeseries already exists. User --force to update the existing.\n')
continue
# for l in timeseries[:3] + timeseries[-2:] :
# print(l)
row_count = my_adapter.insert_timeseries(event_id, extracted_timeseries[i], force_insert)
print('%s rows inserted.\n' % row_count)
# -- END OF SAVE_FORECAST_TIMESERIES
try:
CONFIG = json.loads(open('CONFIG.json').read())
CWD = os.getcwd()
HYCHAN_OUT_FILE = 'HYCHAN.OUT'
BASE_OUT_FILE = 'BASE.OUT'
WATER_LEVEL_FILE = 'water_level.txt'
WATER_LEVEL_DIR = 'water_level'
OUTPUT_DIR = 'OUTPUT'
RUN_FLO2D_FILE = 'RUN_FLO2D.json'
UTC_OFFSET = '+00:00:00'
MYSQL_HOST = "localhost"
MYSQL_USER = "root"
MYSQL_DB = "curw"
MYSQL_PASSWORD = ""
if 'HYCHAN_OUT_FILE' in CONFIG:
HYCHAN_OUT_FILE = CONFIG['HYCHAN_OUT_FILE']
if 'BASE_OUT_FILE' in CONFIG:
BASE_OUT_FILE = CONFIG['BASE_OUT_FILE']
if 'WATER_LEVEL_FILE' in CONFIG:
WATER_LEVEL_FILE = CONFIG['WATER_LEVEL_FILE']
if 'OUTPUT_DIR' in CONFIG:
OUTPUT_DIR = CONFIG['OUTPUT_DIR']
if 'MYSQL_HOST' in CONFIG:
MYSQL_HOST = CONFIG['MYSQL_HOST']
if 'MYSQL_USER' in CONFIG:
MYSQL_USER = CONFIG['MYSQL_USER']
if 'MYSQL_DB' in CONFIG:
MYSQL_DB = CONFIG['MYSQL_DB']
if 'MYSQL_PASSWORD' in CONFIG:
MYSQL_PASSWORD = CONFIG['MYSQL_PASSWORD']
adapter = MySQLAdapter(host=MYSQL_HOST, user=MYSQL_USER, password=MYSQL_PASSWORD, db=MYSQL_DB)
# TODO: Pass source name as a paramter to script
flo2d_source = adapter.get_source(name='FLO2D')
try:
flo2d_source = json.loads(flo2d_source.get('parameters', "{}"))
except Exception as e:
print(e)
traceback.print_exc()
CHANNEL_CELL_MAP = {}
if 'CHANNEL_CELL_MAP' in flo2d_source:
CHANNEL_CELL_MAP = flo2d_source['CHANNEL_CELL_MAP']
FLOOD_PLAIN_CELL_MAP = {}
if 'FLOOD_PLAIN_CELL_MAP' in flo2d_source:
FLOOD_PLAIN_CELL_MAP = flo2d_source['FLOOD_PLAIN_CELL_MAP']
"""
{
"CHANNEL_CELL_MAP": {
"179": "Wellawatta",
"221": "Dehiwala",
"592": "Torington",
"616": "N'Street-Canal",
"618": "N'Street-River",
"684": "Dematagoda-Canal",
"814": "Heen Ela",
"1062": "Kolonnawa-Canal",
"991": "kittampahuwa-Out",
"1161": "Kittampahuwa-River",
"1515": "Parliament Lake Bridge-Kotte Canal",
"2158": "Parliament Lake-Out",
"2396": "Salalihini-River",
"2496": "Salalihini-Canal",
"3580": "Madiwela-Out",
"3673": "Ambathale"
},
"FLOOD_PLAIN_CELL_MAP": {
"2265": "Parliament Lake",
"3559": "Madiwela-US"
}
}
"""
ELEMENT_NUMBERS = CHANNEL_CELL_MAP.keys()
FLOOD_ELEMENT_NUMBERS = FLOOD_PLAIN_CELL_MAP.keys()
SERIES_LENGTH = 0
MISSING_VALUE = -999
date = ''
time = ''
path = ''
output_suffix = ''
start_date = ''
start_time = ''
flo2d_config = ''
run_name_default = 'Cloud-1'
runName = ''
utc_offset = ''
forceInsert = False
try:
opts, args = getopt.getopt(sys.argv[1:], "hF:d:t:p:o:S:T:fn:u:",
["help", "flo2d_config=", "date=", "time=", "path=", "out=", "start_date=",
"start_time=", "name=", "forceInsert", "utc_offset="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-F", "--flo2d_config"):
flo2d_config = arg
elif opt in ("-d", "--date"):
date = arg
elif opt in ("-t", "--time"):
time = arg
elif opt in ("-p", "--path"):
path = arg.strip()
elif opt in ("-o", "--out"):
output_suffix = arg.strip()
elif opt in ("-S", "--start_date"):
start_date = arg.strip()
elif opt in ("-T", "--start_time"):
start_time = arg.strip()
elif opt in ("-n", "--name"):
runName = arg.strip()
elif opt in ("-f", "--forceInsert"):
forceInsert = True
elif opt in ("-u", "--utc_offset"):
utc_offset = arg.strip()
appDir = pjoin(CWD, date + '_Kelani')
if path:
appDir = pjoin(CWD, path)
# Load FLO2D Configuration file for the Model run if available
FLO2D_CONFIG_FILE = pjoin(appDir, RUN_FLO2D_FILE)
if flo2d_config:
FLO2D_CONFIG_FILE = pjoin(CWD, flo2d_config)
FLO2D_CONFIG = json.loads('{}')
# Check FLO2D Config file exists
if os.path.exists(FLO2D_CONFIG_FILE):
FLO2D_CONFIG = json.loads(open(FLO2D_CONFIG_FILE).read())
# Default run for current day
now = datetime.now()
if 'MODEL_STATE_DATE' in FLO2D_CONFIG and len(
FLO2D_CONFIG['MODEL_STATE_DATE']): # Use FLO2D Config file data, if available
now = datetime.strptime(FLO2D_CONFIG['MODEL_STATE_DATE'], '%Y-%m-%d')
if date:
now = datetime.strptime(date, '%Y-%m-%d')
date = now.strftime("%Y-%m-%d")
if 'MODEL_STATE_TIME' in FLO2D_CONFIG and len(
FLO2D_CONFIG['MODEL_STATE_TIME']): # Use FLO2D Config file data, if available
now = datetime.strptime('%s %s' % (date, FLO2D_CONFIG['MODEL_STATE_TIME']), '%Y-%m-%d %H:%M:%S')
if time:
now = datetime.strptime('%s %s' % (date, time), '%Y-%m-%d %H:%M:%S')
time = now.strftime("%H:%M:%S")
if start_date:
start_date = datetime.strptime(start_date, '%Y-%m-%d')
start_date = start_date.strftime("%Y-%m-%d")
elif 'TIMESERIES_START_DATE' in FLO2D_CONFIG and len(
FLO2D_CONFIG['TIMESERIES_START_DATE']): # Use FLO2D Config file data, if available
start_date = datetime.strptime(FLO2D_CONFIG['TIMESERIES_START_DATE'], '%Y-%m-%d')
start_date = start_date.strftime("%Y-%m-%d")
else:
start_date = date
if start_time:
start_time = datetime.strptime('%s %s' % (start_date, start_time), '%Y-%m-%d %H:%M:%S')
start_time = start_time.strftime("%H:%M:%S")
elif 'TIMESERIES_START_TIME' in FLO2D_CONFIG and len(
FLO2D_CONFIG['TIMESERIES_START_TIME']): # Use FLO2D Config file data, if available
start_time = datetime.strptime('%s %s' % (start_date, FLO2D_CONFIG['TIMESERIES_START_TIME']),
'%Y-%m-%d %H:%M:%S')
start_time = start_time.strftime("%H:%M:%S")
else:
start_time = datetime.strptime(start_date, '%Y-%m-%d') # Time is set to 00:00:00
start_time = start_time.strftime("%H:%M:%S")
# Run Name of DB
if 'RUN_NAME' in FLO2D_CONFIG and len(FLO2D_CONFIG['RUN_NAME']): # Use FLO2D Config file data, if available
runName = FLO2D_CONFIG['RUN_NAME']
if not runName:
runName = run_name_default
# UTC Offset
if 'UTC_OFFSET' in FLO2D_CONFIG and len(FLO2D_CONFIG['UTC_OFFSET']): # Use FLO2D Config file data, if available
UTC_OFFSET = FLO2D_CONFIG['UTC_OFFSET']
if utc_offset:
UTC_OFFSET = utc_offset
utcOffset = getUTCOffset(UTC_OFFSET, default=True)
print('Extract Water Level Result of FLO2D on', date, '@', time, 'with Bast time of', start_date, '@', start_time)
print('With UTC Offset of ', str(utcOffset), ' <= ', UTC_OFFSET)
OUTPUT_DIR_PATH = pjoin(CWD, OUTPUT_DIR)
HYCHAN_OUT_FILE_PATH = pjoin(appDir, HYCHAN_OUT_FILE)
WATER_LEVEL_DIR_PATH = pjoin(OUTPUT_DIR_PATH, "%s-%s" % (WATER_LEVEL_DIR, date))
if 'FLO2D_OUTPUT_SUFFIX' in FLO2D_CONFIG and len(
FLO2D_CONFIG['FLO2D_OUTPUT_SUFFIX']): # Use FLO2D Config file data, if available
WATER_LEVEL_DIR_PATH = pjoin(OUTPUT_DIR_PATH, "%s-%s" % (WATER_LEVEL_DIR, FLO2D_CONFIG['FLO2D_OUTPUT_SUFFIX']))
if output_suffix:
WATER_LEVEL_DIR_PATH = pjoin(OUTPUT_DIR_PATH, "%s-%s" % (WATER_LEVEL_DIR, output_suffix))
print('Processing FLO2D model on', appDir)
# Check BASE.OUT file exists
if not os.path.exists(HYCHAN_OUT_FILE_PATH):
print('Unable to find file : ', HYCHAN_OUT_FILE_PATH)
sys.exit()
# Create OUTPUT Directory
if not os.path.exists(OUTPUT_DIR_PATH):
os.makedirs(OUTPUT_DIR_PATH)
# Calculate the size of time series
bufsize = 65536
with open(HYCHAN_OUT_FILE_PATH) as infile:
isWaterLevelLines = False
isCounting = False
countSeriesSize = 0 # HACK: When it comes to the end of file, unable to detect end of time series
while True:
lines = infile.readlines(bufsize)
if not lines or SERIES_LENGTH:
break
for line in lines:
if line.startswith('CHANNEL HYDROGRAPH FOR ELEMENT NO:', 5):
isWaterLevelLines = True
elif isWaterLevelLines:
cols = line.split()
if len(cols) > 0 and cols[0].replace('.', '', 1).isdigit():
countSeriesSize += 1
isCounting = True
elif isWaterLevelLines and isCounting:
SERIES_LENGTH = countSeriesSize
break
print('Series Length is :', SERIES_LENGTH)
bufsize = 65536
#################################################################
# Extract Channel Water Level elevations from HYCHAN.OUT file #
#################################################################
print('Extract Channel Water Level Result of FLO2D HYCHAN.OUT on', date, '@', time, 'with Bast time of', start_date,
'@', start_time)
with open(HYCHAN_OUT_FILE_PATH) as infile:
isWaterLevelLines = False
isSeriesComplete = False
waterLevelLines = []
seriesSize = 0 # HACK: When it comes to the end of file, unable to detect end of time series
while True:
lines = infile.readlines(bufsize)
if not lines:
break
for line in lines:
if line.startswith('CHANNEL HYDROGRAPH FOR ELEMENT NO:', 5):
seriesSize = 0
elementNo = line.split()[5]
if elementNo in ELEMENT_NUMBERS:
isWaterLevelLines = True
waterLevelLines.append(line)
else:
isWaterLevelLines = False
elif isWaterLevelLines:
cols = line.split()
if len(cols) > 0 and isfloat(cols[0]):
seriesSize += 1
waterLevelLines.append(line)
if seriesSize == SERIES_LENGTH:
isSeriesComplete = True
if isSeriesComplete:
baseTime = datetime.strptime('%s %s' % (start_date, start_time), '%Y-%m-%d %H:%M:%S')
timeseries = []
elementNo = waterLevelLines[0].split()[5]
print('Extracted Cell No', elementNo, CHANNEL_CELL_MAP[elementNo])
for ts in waterLevelLines[1:]:
v = ts.split()
if len(v) < 1:
continue
# Get flood level (Elevation)
value = v[1]
# Get flood depth (Depth)
# value = v[2]
if not isfloat(value):
value = MISSING_VALUE
continue # If value is not present, skip
if value == 'NaN':
continue # If value is NaN, skip
timeStep = float(v[0])
currentStepTime = baseTime + timedelta(hours=timeStep)
dateAndTime = currentStepTime.strftime("%Y-%m-%d %H:%M:%S")
timeseries.append([dateAndTime, value])
# Create Directory
if not os.path.exists(WATER_LEVEL_DIR_PATH):
os.makedirs(WATER_LEVEL_DIR_PATH)
# Get Time stamp Ref:http://stackoverflow.com/a/13685221/1461060
ModelTime = float(waterLevelLines[1].split()[3])
fileModelTime = datetime.strptime(date, '%Y-%m-%d')
fileModelTime = fileModelTime + timedelta(hours=ModelTime)
dateAndTime = fileModelTime.strftime("%Y-%m-%d_%H-%M-%S")
# Create files
fileName = WATER_LEVEL_FILE.rsplit('.', 1)
stationName = CHANNEL_CELL_MAP[elementNo].replace(' ', '_')
fileTimestamp = "%s_%s" % (date, time.replace(':', '-'))
fileName = "%s-%s-%s.%s" % (fileName[0], stationName, fileTimestamp, fileName[1])
WATER_LEVEL_FILE_PATH = pjoin(WATER_LEVEL_DIR_PATH, fileName)
csvWriter = csv.writer(open(WATER_LEVEL_FILE_PATH, 'w'), delimiter=',', quotechar='|')
csvWriter.writerows(timeseries)
# Save Forecast values into Database
opts = {
'forceInsert': forceInsert,
'station': CHANNEL_CELL_MAP[elementNo],
'run_name': runName
}
print('>>>>>', opts)
if utcOffset != timedelta():
opts['utcOffset'] = utcOffset
save_forecast_timeseries(adapter, timeseries, date, time, opts)
isWaterLevelLines = False
isSeriesComplete = False
waterLevelLines = []
# -- END for loop
# -- END while loop
#################################################################
# Extract Flood Plain water elevations from BASE.OUT file #
#################################################################
BASE_OUT_FILE_PATH = pjoin(appDir, BASE_OUT_FILE)
print('Extract Flood Plain Water Level Result of FLO2D on', date, '@', time, 'with Bast time of', start_date, '@',
start_time)
with open(BASE_OUT_FILE_PATH) as infile:
isWaterLevelLines = False
waterLevelLines = []
waterLevelSeriesDict = dict.fromkeys(FLOOD_ELEMENT_NUMBERS, [])
while True:
lines = infile.readlines(bufsize)
if not lines:
break
for line in lines:
if line.startswith('MODEL TIME =', 5):
isWaterLevelLines = True
elif isWaterLevelLines and line.startswith('***CHANNEL RESULTS***', 17):
waterLevels = getWaterLevelOfChannels(waterLevelLines, FLOOD_ELEMENT_NUMBERS)
# Create Directory
if not os.path.exists(WATER_LEVEL_DIR_PATH):
os.makedirs(WATER_LEVEL_DIR_PATH)
# Get Time stamp Ref:http://stackoverflow.com/a/13685221/1461060
ModelTime = float(waterLevelLines[0].split()[3])
baseTime = datetime.strptime('%s %s' % (start_date, start_time), '%Y-%m-%d %H:%M:%S')
currentStepTime = baseTime + timedelta(hours=ModelTime)
dateAndTime = currentStepTime.strftime("%Y-%m-%d %H:%M:%S")
for elementNo in FLOOD_ELEMENT_NUMBERS:
tmpTS = waterLevelSeriesDict[elementNo][:]
if elementNo in waterLevels:
tmpTS.append([dateAndTime, waterLevels[elementNo]])
else:
tmpTS.append([dateAndTime, MISSING_VALUE])
waterLevelSeriesDict[elementNo] = tmpTS
isWaterLevelLines = False
# for l in waterLevelLines :
# print(l)
waterLevelLines = []
if isWaterLevelLines:
waterLevelLines.append(line)
# -- END for loop
# -- END while loop
# Create files
for elementNo in FLOOD_ELEMENT_NUMBERS:
fileName = WATER_LEVEL_FILE.rsplit('.', 1)
stationName = FLOOD_PLAIN_CELL_MAP[elementNo].replace(' ', '_')
fileTimestamp = "%s_%s" % (date, time.replace(':', '-'))
fileName = "%s-%s-%s.%s" % \
(fileName[0], FLOOD_PLAIN_CELL_MAP[elementNo].replace(' ', '_'), fileTimestamp, fileName[1])
WATER_LEVEL_FILE_PATH = pjoin(WATER_LEVEL_DIR_PATH, fileName)
csvWriter = csv.writer(open(WATER_LEVEL_FILE_PATH, 'w'), delimiter=',', quotechar='|')
csvWriter.writerows(waterLevelSeriesDict[elementNo])
# Save Forecast values into Database
opts = {
'forceInsert': forceInsert,
'station': FLOOD_PLAIN_CELL_MAP[elementNo],
'run_name': runName
}
if utcOffset != timedelta():
opts['utcOffset'] = utcOffset
save_forecast_timeseries(adapter, waterLevelSeriesDict[elementNo], date, time, opts)
print('Extracted Cell No', elementNo, FLOOD_PLAIN_CELL_MAP[elementNo], 'into -> ', fileName)
except Exception as e:
traceback.print_exc()
print(e)
finally:
print('Completed processing', HYCHAN_OUT_FILE_PATH, ' to ', WATER_LEVEL_FILE_PATH)
| apache-2.0 | 7,930,331,980,908,002,000 | 41.06055 | 136 | 0.551019 | false |
adalke/rdkit | rdkit/Chem/Subshape/SubshapeBuilder.py | 1 | 4360 | # $Id$
#
# Copyright (C) 2007 by Greg Landrum
# All rights reserved
#
from __future__ import print_function
from rdkit import Chem,Geometry
from rdkit.Chem import AllChem
from rdkit.Chem.Subshape import SubshapeObjects
from rdkit.Chem.Subshape import BuilderUtils
from rdkit.six.moves import cPickle
import time
#-----------------------------------------------------------------------------
class SubshapeCombineOperations(object):
UNION=0
SUM=1
INTERSECT=2
#-----------------------------------------------------------------------------
class SubshapeBuilder(object):
gridDims=(20,15,10)
gridSpacing=0.5
winRad=3.0
nbrCount=7
terminalPtRadScale=0.75
fraction=0.25
stepSize=1.0
featFactory=None
def SampleSubshape(self,subshape1,newSpacing):
ogrid=subshape1.grid
rgrid = Geometry.UniformGrid3D(self.gridDims[0],self.gridDims[1],self.gridDims[2],
newSpacing)
for idx in range(rgrid.GetSize()):
l = rgrid.GetGridPointLoc(idx)
v = ogrid.GetValPoint(l)
rgrid.SetVal(idx,v)
res = SubshapeObjects.ShapeWithSkeleton()
res.grid = rgrid
return res;
def GenerateSubshapeShape(self,cmpd,confId=-1,addSkeleton=True,**kwargs):
shape = SubshapeObjects.ShapeWithSkeleton()
shape.grid=Geometry.UniformGrid3D(self.gridDims[0],self.gridDims[1],self.gridDims[2],
self.gridSpacing)
AllChem.EncodeShape(cmpd,shape.grid,ignoreHs=False,confId=confId)
if addSkeleton:
conf = cmpd.GetConformer(confId)
self.GenerateSubshapeSkeleton(shape,conf,kwargs)
return shape
def __call__(self,cmpd,**kwargs):
return self.GenerateSubshapeShape(cmpd,**kwargs)
def GenerateSubshapeSkeleton(self,shape,conf=None,terminalPtsOnly=False,skelFromConf=True):
if conf and skelFromConf:
pts = BuilderUtils.FindTerminalPtsFromConformer(conf,self.winRad,self.nbrCount)
else:
pts = BuilderUtils.FindTerminalPtsFromShape(shape,self.winRad,self.fraction)
pts = BuilderUtils.ClusterTerminalPts(pts,self.winRad,self.terminalPtRadScale)
BuilderUtils.ExpandTerminalPts(shape,pts,self.winRad)
if len(pts)<3:
raise ValueError('only found %d terminals, need at least 3'%len(pts))
if not terminalPtsOnly:
pts = BuilderUtils.AppendSkeletonPoints(shape.grid,pts,self.winRad,self.stepSize)
for i,pt in enumerate(pts):
BuilderUtils.CalculateDirectionsAtPoint(pt,shape.grid,self.winRad)
if conf and self.featFactory:
BuilderUtils.AssignMolFeatsToPoints(pts,conf.GetOwningMol(),self.featFactory,self.winRad)
shape.skelPts=pts
def CombineSubshapes(self,subshape1,subshape2,operation=SubshapeCombineOperations.UNION):
import copy
cs = copy.deepcopy(subshape1)
if operation==SubshapeCombineOperations.UNION:
cs.grid |= subshape2.grid
elif operation==SubshapeCombineOperations.SUM:
cs.grid += subshape2.grid
elif operation==SubshapeCombineOperations.INTERSECT:
cs.grid &= subshape2.grid
else:
raise ValueError('bad combination operation')
return cs
if __name__=='__main__':
from rdkit.Chem import AllChem,ChemicalFeatures
from rdkit.Chem.PyMol import MolViewer
#cmpd = Chem.MolFromSmiles('CCCc1cc(C(=O)O)ccc1')
#cmpd = Chem.AddHs(cmpd)
if 1:
cmpd = Chem.MolFromSmiles('C1=CC=C1C#CC1=CC=C1')
cmpd = Chem.AddHs(cmpd)
AllChem.EmbedMolecule(cmpd)
AllChem.UFFOptimizeMolecule(cmpd)
AllChem.CanonicalizeMol(cmpd)
print(Chem.MolToMolBlock(cmpd), file=file('testmol.mol','w+'))
else:
cmpd = Chem.MolFromMolFile('testmol.mol')
builder=SubshapeBuilder()
if 1:
shape=builder.GenerateSubshapeShape(cmpd)
v = MolViewer()
if 1:
import tempfile
tmpFile = tempfile.mktemp('.grd')
v.server.deleteAll()
Geometry.WriteGridToFile(shape.grid,tmpFile)
time.sleep(1)
v.ShowMol(cmpd,name='testMol',showOnly=True)
v.server.loadSurface(tmpFile,'testGrid','',2.5)
v.server.resetCGO('*')
cPickle.dump(shape,file('subshape.pkl','w+'))
for i,pt in enumerate(shape.skelPts):
v.server.sphere(tuple(pt.location),.5,(1,0,1),'Pt-%d'%i)
if not hasattr(pt,'shapeDirs'): continue
momBeg = pt.location-pt.shapeDirs[0]
momEnd = pt.location+pt.shapeDirs[0]
v.server.cylinder(tuple(momBeg),tuple(momEnd),.1,(1,0,1),'v-%d'%i)
| bsd-3-clause | -3,022,887,758,401,373,000 | 34.447154 | 95 | 0.68555 | false |
bbc/ebu-tt-live-toolkit | ebu_tt_live/adapters/base.py | 1 | 3499 | import logging
import weakref
from abc import abstractmethod, abstractproperty
from ebu_tt_live.utils import AutoRegisteringABCMeta, AbstractStaticMember, validate_types_only
log = logging.getLogger(__name__)
# Interfaces
# ==========
class IDocumentDataAdapter(object, metaclass=AutoRegisteringABCMeta):
"""
This adapter is used to do various conversions on the payload between the carriage and the node
"""
__impl_registry = {}
_expects = AbstractStaticMember(validate_types_only)
_provides = AbstractStaticMember(validate_types_only)
@classmethod
def auto_register_impl(cls, impl_class):
impl_expects = impl_class.expects()
provides_map = cls.__impl_registry.setdefault(impl_expects, weakref.WeakValueDictionary())
impl_provides = impl_class.provides()
if impl_provides in list(provides_map.keys()):
log.warning(
'({} -> {}) adapter already registered: {}. Ignoring: {} '.format(
impl_expects,
impl_provides,
provides_map[impl_provides],
impl_class
)
)
else:
log.debug(
'Registering ({} -> {}) adapter: {}'.format(
impl_expects,
impl_provides,
impl_class
)
)
provides_map[impl_provides] = impl_class
@classmethod
def get_registered_impl(cls, expects, provides):
impl_class = cls.__impl_registry.get(expects, {}).get(provides, None)
if impl_class is None:
raise ValueError('No adapter found for: {} -> {}'.format(
expects, provides
))
return impl_class
@classmethod
def expects(cls):
"""
Data type expected
:return:
"""
if isinstance(cls._expects, AbstractStaticMember):
raise TypeError('Classmethod relies on abstract property: \'_expects\'')
return cls._expects
@classmethod
def provides(cls):
"""
Data type provided
:return:
"""
if isinstance(cls._provides, AbstractStaticMember):
raise TypeError('Classmethod relies on abstract property: \'_provides\'')
return cls._provides
@abstractmethod
def convert_data(self, data, **kwargs):
"""
Subclasses must implement this method
:param data:
:param kwargs: Extra parameters
:return:
"""
raise NotImplementedError()
class INodeCarriageAdapter(object, metaclass=AutoRegisteringABCMeta):
"""
This adapter wraps the DocumentDataAdapter conversion logic and shows a dual interface. It responsibility is
to facilitate direct communication between incompatible carriage mechanisms and processing nodes.
This is a tricky business because this class does not have a hardcoded expects-provides interface contract.
It works it out as it goes forward from the parameters.
"""
@abstractproperty
def data_adapters(self):
"""
Data conversion adapters
:return: list of DocumentDataAdapter instances
"""
@abstractmethod
def convert_data(self, data, **kwargs):
"""
This executes a conversion by looping through the data adapters.
:param data: Input data format
:param kwargs: Extra parameters
:return: Output data format
"""
| bsd-3-clause | 7,567,805,828,722,318,000 | 31.700935 | 112 | 0.606745 | false |
qtproject/qt-creator | src/libs/3rdparty/syntax-highlighting/data/generators/generate-cmake-syntax.py | 1 | 5026 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Generate Kate syntax file for CMake
#
# Copyright (c) 2017-2019 Alex Turbov <[email protected]>
#
# To install prerequisites:
#
# $ pip install --user click jinja2 yaml
#
# To use:
#
# $ ./generate-cmake-syntax.py cmake.yaml > ../syntax/cmake.xml
#
import click
import jinja2
import pathlib
import re
import yaml
import pprint
_TEMPLATED_NAME = re.compile('<[^>]+>')
_PROPERTY_KEYS = [
'global-properties'
, 'directory-properties'
, 'target-properties'
, 'source-properties'
, 'test-properties'
, 'cache-properties'
, 'install-properties'
]
_KW_RE_LIST = ['kw', 're']
_VAR_KIND_LIST = ['variables', 'environment-variables']
def try_transform_placeholder_string_to_regex(name):
'''
NOTE Some placeholders are not IDs, but numbers...
`CMAKE_MATCH_<N>` 4 example
'''
m = _TEMPLATED_NAME.split(name)
if 'CMAKE_MATCH_' in m:
return '\\bCMAKE_MATCH_[0-9]+\\b'
if 'CMAKE_ARGV' in m:
return '\\bCMAKE_ARGV[0-9]+\\b'
return '\\b{}\\b'.format('&id_re;'.join(list(m))) if 1 < len(m) else name
def partition_iterable(fn, iterable):
true, false = [], []
for i in iterable:
(false, true)[int(fn(i))].append(i)
return true, false
def _transform_command_set(cmd, list_name):
args, args_re = partition_iterable(lambda x: _TEMPLATED_NAME.search(x) is None, cmd[list_name])
del cmd[list_name]
list_name = list_name.replace('-', '_')
cmd[list_name] = {k: sorted(set(v)) for k, v in zip(_KW_RE_LIST, [args, args_re])}
cmd[list_name]['re'] = [*map(lambda x: try_transform_placeholder_string_to_regex(x), args_re)]
return cmd
def transform_command(cmd):
can_be_nulary = True
if 'name' not in cmd:
raise RuntimeError('Command have no name')
if 'named-args' in cmd:
new_cmd = _transform_command_set(cmd, 'named-args')
assert new_cmd == cmd
can_be_nulary = False
if 'special-args' in cmd:
new_cmd = _transform_command_set(cmd, 'special-args')
assert new_cmd == cmd
can_be_nulary = False
if 'property-args' in cmd:
new_cmd = _transform_command_set(cmd, 'property-args')
assert new_cmd == cmd
can_be_nulary = False
cmd['nested_parentheses'] = cmd['nested-parentheses?'] if 'nested-parentheses?' in cmd else False
if 'nulary?' in cmd and cmd['nulary?'] and not can_be_nulary:
raise RuntimeError('Command `{}` w/ args declared nulary!?'.format(cmd['name']))
return cmd
#BEGIN Jinja filters
def cmd_is_nulary(cmd):
assert not ('named-args' in cmd or 'special-args' in cmd or 'property-args' in cmd)
return 'nulary?' in cmd and cmd['nulary?']
#END Jinja filters
@click.command()
@click.argument('input_yaml', type=click.File('r'))
@click.argument('template', type=click.File('r'), default='./cmake.xml.tpl')
def cli(input_yaml, template):
data = yaml.load(input_yaml)
# Partition `variables` and `environment-variables` lists into "pure" (key)words and regexes to match
for var_key in _VAR_KIND_LIST:
data[var_key] = {
k: sorted(set(v)) for k, v in zip(
_KW_RE_LIST
, [*partition_iterable(lambda x: _TEMPLATED_NAME.search(x) is None, data[var_key])]
)
}
data[var_key]['re'] = [
*map(
lambda x: try_transform_placeholder_string_to_regex(x)
, data[var_key]['re']
)
]
# Transform properties and make all-properties list
data['properties'] = {}
for prop in _PROPERTY_KEYS:
python_prop_list_name = prop.replace('-', '_')
props, props_re = partition_iterable(lambda x: _TEMPLATED_NAME.search(x) is None, data[prop])
del data[prop]
data['properties'][python_prop_list_name] = {
k: sorted(set(v)) for k, v in zip(_KW_RE_LIST, [props, props_re])
}
data['properties'][python_prop_list_name]['re'] = [
*map(lambda x: try_transform_placeholder_string_to_regex(x), props_re)
]
data['properties']['kinds'] = [*map(lambda name: name.replace('-', '_'), _PROPERTY_KEYS)]
# Make all commands list
data['commands'] = [
*map(
lambda cmd: transform_command(cmd)
, data['scripting-commands'] + data['project-commands'] + data['ctest-commands'])
]
# Fix node names to be accessible from Jinja template
data['generator_expressions'] = data['generator-expressions']
data['environment_variables'] = data['environment-variables']
del data['generator-expressions']
del data['environment-variables']
env = jinja2.Environment(
keep_trailing_newline=True
)
# Register convenience filters
env.tests['nulary'] = cmd_is_nulary
tpl = env.from_string(template.read())
result = tpl.render(data)
print(result)
if __name__ == '__main__':
cli()
# TODO Handle execptions and show errors
| gpl-3.0 | 3,560,213,243,156,102,700 | 27.885057 | 105 | 0.606446 | false |
annoviko/pyclustering | pyclustering/nnet/examples/hysteresis_examples.py | 1 | 2173 | """!
@brief Examples of usage and demonstration of abilities of Hysteresis Oscillatory Network.
@authors Andrei Novikov ([email protected])
@date 2014-2020
@copyright BSD-3-Clause
"""
from pyclustering.nnet.hysteresis import hysteresis_network, hysteresis_visualizer;
from pyclustering.nnet import *;
def template_dynamic(num_osc, own_weight = -3, neigh_weight = -1, initial_states = None, initial_outputs = None, steps = 1000, time = 10):
network = hysteresis_network(num_osc, own_weight, neigh_weight);
if (initial_states is not None):
network.states = initial_states;
if (initial_outputs is not None):
network.outputs = initial_outputs;
output_dynamic = network.simulate(steps, time);
hysteresis_visualizer.show_output_dynamic(output_dynamic);
ensembles = output_dynamic.allocate_sync_ensembles(tolerance = 0.5, threshold_steps = 5);
print("Allocated synchronous ensembles ( amout:", len(ensembles), "):", ensembles);
def one_oscillator_weight_2():
template_dynamic(1, -2);
def one_oscillator_weight_4():
template_dynamic(1, -4);
def two_oscillators_sync():
"Comment: Different initial state - state of sync. will be reached."
template_dynamic(2, -4, 1, [1, 0], [1, 1]);
def two_oscillators_desync():
"Note: if initial state is the same for both oscillators then desync. will not be exist. It is very important to set different values if desync. is required."
template_dynamic(2, -4, -1, [1, 0], [1, 1]);
def five_oscillators_positive_conn():
"Note: Oscillations are dead in this case (sync. should be in ideal case)"
template_dynamic(5, -4, 1, [1, 0.5, 0, -0.5, -1], [1, 1, 1, 1, 1]);
template_dynamic(5, -4, 1, [1, 0.8, 0.6, 0.4, 0.2], [-1, -1, -1, -1, -1]);
def five_oscillators_negative_conn():
"Comment: Full desync."
template_dynamic(5, -4, -1, [1, 0.5, 0, -0.5, -1], [1, 1, 1, 1, 1]);
one_oscillator_weight_2();
one_oscillator_weight_4();
two_oscillators_sync();
two_oscillators_desync();
five_oscillators_positive_conn();
five_oscillators_negative_conn(); | gpl-3.0 | 8,405,484,669,327,025,000 | 34.25 | 162 | 0.650253 | false |
rbaravalle/imfractal | imfractal/Algorithm/MFS_3D.py | 1 | 11780 | """
Copyright (c) 2013 Rodrigo Baravalle
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from Algorithm import *
import numpy as np
from math import log10
import scipy.signal
import scipy.io as sio
from scipy.stats import norm
class MFS_3D (Algorithm):
"""
:3D implementation of MFS through holder exponents f(alpha)
:version: 1.0
:author: Rodrigo Baravalle
"""
def __init__(self):
pass
def setDef(self, ind, f, ite, filename, file_mask, params):
# parameters: ind -> determines how many levels are used when computing the density
# choose 1 for using directly the image measurement im or
# >= 6 for computing the density of im (quite stable for >=5)
# f ----> determines the dimension of MFS vector
# ite ---> determines how many levels are used when computing MFS for each
self.ind_num = ind # number of pixels for averaging
self.f_num = f # window
self.ite_num = ite
self.filename = filename
self.file_mask = file_mask
self.params = params
def gauss_kern(self,size_x, size_y, size_z):
""" Returns a normalized 3D gauss kernel array for convolutions """
m = np.float32(size_x)
n = np.float32(size_y)
o = np.float32(size_z)
sigma = 2; # ???
if(size_x <= 3): sigma = 1.5;
if(size_x == 5): sigma = 2.5;
z, y, x = np.mgrid[-(m-1)/2:(m-1)/2+1, -(n-1)/2:(n-1)/2+1, -(o-1)/2:(o-1)/2+1]
b = 2*(sigma**2)
square = lambda i : i**2
fm = lambda i: map(square, i)
x2 = map(fm, x)
y2 = map(fm, y)
z2 = map(fm, z)
g = np.sum([x2, y2, z2], axis=0).astype(np.float32)
g = np.exp(g).astype(np.float32)
return g / g.sum()
def determine_threshold(self, arr):
# compute histogram of values
bins = range(np.min(arr), np.max(arr) + 1)
h = np.histogram(arr, bins=bins)
threshold = np.min(arr)
# get x% of mass -> threshold
assert (len(arr.shape) == 3)
total_pixels = arr.shape[0] * arr.shape[1] * arr.shape[2]
for i in range(len(bins) + 1):
# compute sum of h(x) from x = 0 to x = i
partial_sum_vector = np.cumsum(h[0][: (i + 1)])
partial_sum = partial_sum_vector[len(partial_sum_vector) - 1]
percentage = (float)(partial_sum) / (float)(total_pixels)
if percentage > 0.75:
threshold = np.min(arr) + i
break
return threshold
def openMatlab(self, name, filename, greyscale):
import scipy.io as sio
arr = np.array(sio.loadmat(filename)[name]).astype(np.int32)
if greyscale:
return arr
if name == "S":
threshold = self.determine_threshold(arr)
arr = arr > threshold
a_v = arr.cumsum()
print "Amount of white pixels: ", a_v[len(a_v) - 1]
# debug - to see the spongious structure
# plt.imshow((arr[:,:,50]), cmap=plt.gray())
# plt.show()
return arr
def gradient(self, data):
Nx, Ny, Nz = data.shape
basic_fx = np.array([[-1, 0, 1], [0, 0, 0], [0, 0, 0]])
basic_fy = basic_fx.T
basic_fxy = [[-1, 0, 0], [0, 0, 0], [0, 0, 1]]
basic_fyx = [[0, 0, -1], [0, 0, 0], [1, 0, 0]]
fx = np.float32(0.5) * np.array([basic_fx, basic_fx, basic_fx])
fy = np.float32(0.5) * np.array([basic_fy, basic_fy, basic_fy])
fxy = np.float32(0.5) * np.array([basic_fxy, basic_fxy, basic_fxy])
fyx = np.float32(0.5) * np.array([basic_fyx, basic_fyx, basic_fyx])
a = scipy.signal.convolve(data, fx, mode="full")
Nx, Ny, Nz = a.shape
a = a[0:Nx - 2, 1:Ny - 1, 1:Nz - 1] # fix me, check z indices!
b = scipy.signal.convolve(data, fy, mode="full")
Nx, Ny, Nz = b.shape
b = b[1:Nx - 1, 0:Ny - 2, 1:Nz - 1]
c = scipy.signal.convolve(data, fxy, mode="full")
Nx, Ny, Nz = c.shape
c = c[1:Nx - 1, 1:Ny - 1, 1:Nz - 1]
d = scipy.signal.convolve(data, fyx, mode="full")
Nx, Ny, Nz = d.shape
d = d[1:Nx - 1, 1:Ny - 1, 1:Nz - 1]
data = a ** 2 + b ** 2 + c ** 2 + d ** 2
data = np.sqrt(data)
data = np.floor(data)
return data
def laplacian(self, data): # MFS of Laplacion
# 3d, octave:
# f1 = fspecial3('gaussian', 5, 1);
# f2 = -ones(3,3,3);
# f2(2,2,2) = 26;
# f = convn(f1, f2);
laplacian_kernel = np.load('exps/data/laplacian_kernel.npy')
print "SHAPES: !"
print laplacian_kernel.shape
print data.shape
a = scipy.signal.convolve(data, laplacian_kernel, mode="full")
Nx, Ny, Nz = a.shape
a = a[3:Nx - 3, 3:Ny - 3, 3:Nz - 3]
a = np.floor((a < 0).choose(a, 0))
return a
def getFDs(self, data = []):
"""
@param string filename : volume location
@param string file_mask : mask volume location
@return [float] : 3D multi fractal dimentions
@author: Rodrigo Baravalle. Code ported from Matlab and extended to 3D
"""
if len(data) == 0:
# data is a 3D grayscale volume
data = self.openMatlab('S', self.filename, True)
data_mask = self.openMatlab('M', self.file_mask, True)
# Masking
data = data * (data_mask > 0)
# Other multifractal measures
if self.params['gradient'] == True:
data = self.gradient(data)
else:
if self.params['laplacian'] == True:
print "laplacian!"
data = self.laplacian(data)
#Using [0..255] to denote the intensity profile of the image
grayscale_box = [0, 255]
#sigmoid function
#data = norm.cdf(data, loc=200.0, scale=100.0);
#Preprocessing: default intensity value of image ranges from 0 to 255
if abs(data).max()< 1:
data = data * grayscale_box[1]
else:
# put every value into [0, 255]
data = (data - data.min()) * 255 / (data.max() - data.min())
#######################
#DEBUG
print data.max(), data.min(), data.sum()
### Estimating density function of the volume
### by solving least squares for D in the equation
### log10(bw) = D*log10(c) + b
r = 1.0 / max(data.shape)
c = np.dot(range(1, self.ind_num+1), r)
c = map(lambda i: log10(i), c)
bw = np.zeros((self.ind_num, data.shape[0], data.shape[1], data.shape[2])).astype(np.float32)
bw[0] = data + 1
# DEBUG
#print "BW: ", bw.shape
k = 1
if(self.ind_num > 1):
bw[1] = scipy.signal.convolve(bw[0], self.gauss_kern(k+1, k+1, k+1), mode="full")[1:,1:]*((k+1)**2)
for k in range(2,self.ind_num):
temp = scipy.signal.convolve(bw[0], self.gauss_kern(k+1, k+1, k+1), mode="full")*((k+1)**2)
if(k==4):
bw[k] = temp[k - 1 - 1 : temp.shape[0] - (k / 2),
k - 1 - 1 : temp.shape[1] - (k / 2),
k - 1 - 1 : temp.shape[2] - (k / 2)]
else:
bw[k] = temp[k - 1 : temp.shape[0] - (1),
k - 1 : temp.shape[1] - (1),
k - 1 : temp.shape[2] - (1)]
#print bw.min(), bw.max()
bw = np.log10(bw)
n1 = c[0] * c[0]
n2 = bw[0] * c[0]
for k in range(1,self.ind_num):
n1 = n1 + c[k]*c[k]
n2 = n2 + bw[k]*c[k]
sum3 = bw[0]
for i in range(1,self.ind_num):
sum3 = sum3 + bw[i]
if(self.ind_num >1):
D = (n2*self.ind_num-sum(c)*sum3)/(n1*self.ind_num -sum(c)*sum(c));
if (self.ind_num > 1):
max_D = np.float32(4)
min_D = np.float32(1)
D = grayscale_box[1]*(D-min_D)/(max_D - min_D)+grayscale_box[0]
else:
D = data
#Partition the density
# throw away the boundary
D = D[self.ind_num - 1 : D.shape[0] - self.ind_num + 1,
self.ind_num - 1 : D.shape[1] - self.ind_num + 1,
self.ind_num - 1 : D.shape[2] - self.ind_num + 1]
IM = np.zeros(D.shape)
gap = np.ceil((grayscale_box[1] - grayscale_box[0])/np.float32(self.f_num));
center = np.zeros(self.f_num);
for k in range(1,self.f_num+1):
bin_min = (k-1) * gap;
bin_max = k * gap - 1;
center[k-1] = round((bin_min + bin_max) / 2);
D = ((D <= bin_max) & (D >= bin_min)).choose(D, center[k-1])
D = ((D >= bin_max)).choose(D,0)
D = ((D < 0)).choose(D,0)
IM = D
# Constructing the filter for approximating log fitting
r = max(IM.shape)
c = np.zeros(self.ite_num)
c[0] = 1;
for k in range(1,self.ite_num):
c[k] = c[k-1]/(k+1)
c = c / sum(c);
# Construct level sets
Idx_IM = np.zeros(IM.shape);
for k in range(0, self.f_num):
IM = (IM == center[k]).choose(IM,k+1)
Idx_IM = IM
IM = np.zeros(IM.shape)
#Estimate MFS by box-counting
num = np.zeros(self.ite_num)
MFS = np.zeros(self.f_num)
for k in range(1, self.f_num+1):
#print k, self.f_num
IM = np.zeros(IM.shape)
IM = (Idx_IM == k).choose(Idx_IM, 255 + k)
IM = (IM<255 + k).choose(IM, 0)
IM = (IM > 0).choose(IM, 1)
temp = max(IM.sum(), 1)
num[0] = log10(temp)/log10(r);
for j in range(2, self.ite_num+1):
mask = np.ones((j, j, j))
bw = scipy.signal.convolve(IM, mask, mode = "full")[1:, 1:, 1:]
ind_x = np.arange(0, IM.shape[0], j)
ind_y = np.arange(0, IM.shape[1], j)
ind_z = np.arange(0, IM.shape[2], j)
bw = bw[np.ix_(ind_x, ind_y, ind_z)]
idx = (bw > 0 ).sum()
temp = max(idx, 1)
num[j-1] = log10( temp ) / log10( r / j )
MFS[k-1] = sum(c*num)
return MFS
| bsd-3-clause | 1,529,415,426,108,300,800 | 32.276836 | 111 | 0.519864 | false |
karec/oct-browser | octbrowser/browser.py | 1 | 14578 | """This file contain the main class for the octbrowser
It represent a simple browser object with all methods
"""
import re
import os
import lxml.html as lh
import requests
from lxml.cssselect import CSSSelector
from octbrowser.exceptions import FormNotFoundException, NoUrlOpen, LinkNotFound, NoFormWaiting, HistoryIsNone
from octbrowser.history.base import BaseHistory
from octbrowser.history.cached import CachedHistory
class Browser(object):
"""This class represent a minimal browser. Build on top of lxml awesome library it let you write script for accessing
or testing website with python scripts
:param session: The session object to use. If set to None will use requests.Session
:type session: requests.Session
:param base_url: The base url for the website, will append it for every link without a full url
:type base_url: str
:param history: The history object to use. If set to None no history will be stored.
:type history: octbrowser.history.BaseHistory
:type history: octbrowser.history.base.BaseHistory instance
"""
def __init__(self, session=None, base_url='', **kwargs):
self._sess_bak = session
self._history = kwargs.get('history', CachedHistory())
# check history class
if self._history is not None:
assert isinstance(self._history, BaseHistory)
self._response = None
self._base_url = base_url
self.form = None
self.form_data = None
self.session = session or requests.Session()
def clean_browser(self):
"""Clears browser history, session, current page, and form state
self._base_url is unmodified
:return: None
"""
self.clean_session()
self._response = None
self.form = None
self.form_data = None
try:
self.clear_history()
except HistoryIsNone:
pass
def add_header(self, name, value):
"""Allow you to add custom header, one by one.
Specify existing name for update
Headers will be used by every request
:param name: the key of the header
:type name: str
:param value: the associated value
:type value: str
:return: None
"""
self.session.headers[name] = value
def del_header(self, key):
"""Try to delete the 'key' of headers property
:param key: the key to delete
:type key: mixed
:return: None
"""
self.session.headers.pop(key, None)
def set_headers(self, headers):
"""Setter for headers property
:param headers: a dict containing all headers
:type headers: dict
:return: None
"""
self.session.headers.clear()
self.session.headers.update(headers)
def clean_session(self):
"""This function is called by the core of multi-mechanize. It cleans the session for avoiding cache or cookies
errors, or giving false results based on cache
:return: None
"""
del self.session
self.session = self._sess_bak or requests.Session()
@property
def _url(self):
"""Url of the current page or None if there isn't one
:return: url of current page or None if there isn't one
"""
try:
return self._response.url
except AttributeError:
return None
@property
def _html(self):
"""Parsed html of the current page or None if there isn't any
:return: html of current page or None if there isn't any
"""
try:
return self._response.html
except AttributeError:
return None
@property
def _form_waiting(self):
"""Check if a form is actually on hold or not
:return: True or False
"""
if self.form is not None:
return True
return False
def _process_response(self, response):
"""Update the response object with parsed html and browser properties
Html property is a lxml.Html object, needed for parsing the content, getting elements like form, etc...
If you want the raw html, you can use both::
response.read() # or .content for urllib response objects
Or use lxml::
lxml.html.tostring(response.html)
:param response: requests.Response or urllib.Response object
:return: the updated Response object
"""
if not hasattr(response, 'html'):
try:
html = response.content
except AttributeError:
html = response.read()
response.content = html
tree = lh.fromstring(html)
tree.make_links_absolute(base_url=self._base_url)
response.html = tree
self._response = response
return response
def get_form(self, selector=None, nr=0, at_base=False):
"""Get the form selected by the selector and / or the nr param
Raise:
* oct.core.exceptions.FormNotFoundException
* oct.core.exceptions.NoUrlOpen
:param selector: A css-like selector for finding the form
:type selector: str
:param nr: the index of the form, if selector is set to None, it will search on the hole page
:type nr: int
:param at_base: must be set to true in case of form action is on the base_url page
:type at_base: bool
:return: None
"""
if self._html is None:
raise NoUrlOpen('No url open')
if selector is None:
self.form = self._html.forms[nr]
self.form_data = dict(self._html.forms[nr].fields)
else:
sel = CSSSelector(selector)
for el in sel(self._html):
if el.forms:
self.form = el.forms[nr]
self.form_data = dict(el.forms[nr].fields)
if self.form is None:
raise FormNotFoundException('Form not found with selector {0} and nr {1}'.format(selector, nr))
# common case where action was empty before make_link_absolute call
if (self.form.action == self._base_url and
self._url is not self._base_url and
not at_base):
self.form.action = self._url
def get_select_values(self):
"""Get the available values of all select and select multiple fields in form
:return: a dict containing all values for each fields
:raises: NoFormWaiting
"""
if not self._form_waiting:
raise NoFormWaiting('No form waiting')
data = {}
for i in self.form.inputs:
if isinstance(i, lh.SelectElement):
data[i.name] = i.value_options
return data
def submit_form(self):
"""Submit the form filled with form_data property dict
Raise:
oct.core.exceptions.NoFormWaiting
:return: Response object after the submit
"""
if not self._form_waiting:
raise NoFormWaiting('No form waiting to be send')
self.form.fields = self.form_data
r = lh.submit_form(self.form, open_http=self._open_session_http)
resp = self._process_response(r)
if self._history is not None:
self._history.append_item(resp)
self.form_data = None
self.form = None
return resp
def _open_session_http(self, method, url, values):
"""Custom method for form submission, send to lxml submit form method
:param method: the method of the form (POST, GET, PUT, DELETE)
:type method: str
:param url: the url of the action of the form7
:type url: str
:param values: the values of the form
:type values: dict
:return: Response object from requests.request method
"""
return self.session.request(method, url, None, values)
def open_url(self, url, data=None, **kwargs):
"""Open the given url
:param url: The url to access
:type url: str
:param data: Data to send. If data is set, the browser will make a POST request
:type data: dict
:return: The Response object from requests call
"""
if data:
response = self.session.post(url, data, **kwargs)
else:
response = self.session.get(url, **kwargs)
response = self._process_response(response)
if self._history is not None:
self._history.append_item(response)
response.connection.close()
return response
def back(self):
"""Go to the previous url in the history
:return: the Response object
:rtype: requests.Response
:raises: NoPreviousPage, HistoryIsNone
"""
if self._history is None:
raise HistoryIsNone("You must set history if you need to use historic methods")
response = self._history.back()
return self._process_response(response)
def forward(self):
"""Go to the next url in the history
:return: the Response object
:rtype: requests.Response
:raises: EndOfHistory, HistoryIsNone
"""
if self._history is None:
raise HistoryIsNone("You must set history if you need to use historic methods")
response = self._history.forward()
return self._process_response(response)
def refresh(self):
"""Refresh the current page by resending the request
:return: the Response object
:rtype: requests.Response
:raises: NoUrlOpen
"""
if self._response is None:
raise NoUrlOpen("Can't perform refresh. No url open")
response = self.session.send(self._response.request)
return self._process_response(response)
def clear_history(self):
"""Re initialise the history
"""
if self._history is None:
raise HistoryIsNone("You must set history if you need to use historic methods")
self._history.clear_history()
@property
def history(self):
"""Return the actual history list
:return: the history list
:rtype: list
:raises: HistoryIsNone
"""
if self._history is None:
raise HistoryIsNone("You must set history if you need to use historic methods")
return self._history.history
@property
def history_object(self):
"""Return the actual history object
:return: the _history property
:rtype: History
"""
return self._history
def follow_link(self, selector, url_regex=None):
"""Will access the first link found with the selector
Raise:
oct.core.exceptions.LinkNotFound
:param selector: a string representing a css selector
:type selector: str
:param url_regex: regex for finding the url, can represent the href attribute or the link content
:type url_regex: str
:return: Response object
"""
sel = CSSSelector(selector)
resp = None
if self._html is None:
raise NoUrlOpen
for e in sel(self._html):
if url_regex:
r = re.compile(url_regex)
if r.match(e.get('href')) or r.match(e.xpath('string()')):
return self.open_url(e.get('href'))
else:
return self.open_url(e.get('href'))
if resp is None:
raise LinkNotFound('Link not found')
def get_html_element(self, selector):
"""Return a html element as string. The element will be find using the `selector` param
Use this method for get single html elements, if you want to get a list of elements,
please use `get_html_elements`
:param selector: a string representing a css selector
:type selector: str
:return: a string containing the element, if multiples elements are find, it will concat them
:rtype: str
"""
if self._html is None:
raise NoUrlOpen()
elements = self._html.cssselect(selector)
ret = ""
for elem in elements:
ret += lh.tostring(elem, encoding='unicode', pretty_print=True)
return ret
def get_html_elements(self, selector):
"""Return a list of lxml.html.HtmlElement matching the `selector` argument
:param selector: a string representing a css selector
:type selector: str
:return: a list of lxml.html.HtmlElement of finded elements
:rtype: list
"""
if self._html is None:
raise NoUrlOpen()
return self._html.cssselect(selector)
def get_resource(self, selector, output_dir, source_attribute='src'):
"""Get a specified ressource and write it to the output dir
Raise:
OSError
:param selector: a string representing a css selector
:type selector: str
:param output_dir: the directory where the ressources will be wright
:type output_dir: str
:param source_attribute: the attribute to retreive the url needed for downloading the ressource
:type source_attribute: str
:return: number or resources successfully saved (zero for failure)
"""
if self._html is None:
raise NoUrlOpen()
elements = self._html.cssselect(selector)
cnt = 0
if not elements or len(elements) == 0:
return cnt
for elem in elements:
src = elem.get(source_attribute)
if not src:
continue
response = requests.get(src, stream=True)
if not response.ok:
continue
# Save resource to file
filename = os.path.basename(response.url)
path = os.path.join(output_dir, filename)
with open(path, 'wb') as f:
for block in response.iter_content(1024):
if not block:
break
f.write(block)
cnt += 1
return cnt
@staticmethod
def open_in_browser(response):
"""Provide a simple interface for `lxml.html.open_in_browser` function.
Be careful, use this function only for debug purpose
:param response: the response object to open in the browser
:type response: requests.Response
:return:
"""
lh.open_in_browser(response.html)
| mit | -6,376,803,414,716,263,000 | 32.131818 | 121 | 0.599465 | false |
DarthMaulware/EquationGroupLeaks | Leak #5 - Lost In Translation/windows/Resources/Ops/PyScripts/lib/ops/pprint.py | 1 | 4263 |
from __future__ import print_function
from __future__ import division
import dsz
import dsz.ui
import ops.data
ALIGN_LEFT = '<'
ALIGN_CENTER = '_'
ALIGN_RIGHT = '>'
def pprint(data, header=None, dictorder=None, echocodes=None, align=None, print_handler=print):
if ((data is None) or (len(data) == 0)):
return
if ((dict is type(data[0])) and (dictorder is None)):
dictorder = data[0].keys()
if ((dict is type(data[0])) and (header is None)):
header = dictorder
if isinstance(data[0], ops.data.OpsObject):
newdata = list()
for item in data:
newdata.append(item.__dict__)
data = newdata
if (dictorder is None):
raise Exception('You must specify a dictorder (set of keys) when pprinting an ops.data object')
if (header is None):
header = dictorder
(sdata, align) = makeStrings(data, dictorder, align)
(widths, percents) = calcSize(sdata, header)
output = ''
if header:
for i in range(len(header)):
output += ((('|' + (' ' * (((widths[i] - len(header[i])) // 2) + 1))) + header[i]) + (' ' * (((widths[i] - len(header[i])) // 2) + 1)))
if ((widths[i] - len(header[i])) % 2):
output += ' '
if percents[i]:
output += (' ' * (percents[i] - header[i].count('%')))
output += '|'
if echocodes:
dsz.ui.Echo(output)
output = ''
else:
output += '\n'
for i in range(len(widths)):
output += ('+-' + ('-' * ((widths[i] + 1) + percents[i])))
output += '+'
if echocodes:
dsz.ui.Echo(output)
output = ''
else:
output += '\n'
for j in range(len(sdata)):
d = sdata[j]
a = align[j]
for i in range(len(d)):
if (a[i] == ALIGN_RIGHT):
output += ((('|' + (' ' * ((widths[i] - len(d[i])) + 1))) + d[i]) + ' ')
elif (a[i] == ALIGN_CENTER):
output += ((('|' + (' ' * (((widths[i] - len(d[i])) // 2) + 1))) + d[i]) + (' ' * (((widths[i] - len(d[i])) // 2) + 1)))
if ((widths[i] - len(d[i])) % 2):
output += ' '
else:
output += (('| ' + d[i]) + (' ' * ((widths[i] - len(d[i])) + 1)))
if percents[i]:
output += (' ' * (percents[i] - d[i].count('%')))
output += '|'
if echocodes:
dsz.ui.Echo((output.encode('utf8') if isinstance(output, unicode) else output), echocodes[j])
output = ''
else:
output += '\n'
if (echocodes is None):
print_handler(output, end='')
def makeStrings(data, dictOrder, align):
r = []
a = ([] if (align is None) else None)
for i in data:
c = []
ac = []
if dictOrder:
for k in dictOrder:
c += ([i[k]] if (unicode is type(i[k])) else [(str(i[k]) if (i[k] is not None) else '')])
if (a is not None):
ac += ([ALIGN_RIGHT] if ((int is type(i[k])) or (float is type(i[k])) or (long is type(i[k]))) else [ALIGN_LEFT])
else:
for k in i:
c += ([k] if (unicode is type(k)) else [(str(k) if (k is not None) else '')])
if (a is not None):
ac += ([ALIGN_RIGHT] if ((int is type(k)) or (float is type(k)) or (long is type(k))) else [ALIGN_LEFT])
r += [c]
if (a is not None):
a += [ac]
return (r, (a if (a is not None) else align))
def calcSize(data, header):
widths = range(len(data[0]))
percents = range(len(data[0]))
for i in widths:
widths[i] = 0
percents[i] = 0
if header:
for i in range(len(header)):
r = len(header[i])
if (r > widths[i]):
widths[i] = r
r = header[i].count('%')
if (r > percents[i]):
percents[i] = r
for d in data:
for i in range(len(d)):
r = len(d[i])
if (r > widths[i]):
widths[i] = r
r = d[i].count('%')
if (r > percents[i]):
percents[i] = r
return (widths, percents) | unlicense | -8,518,540,465,188,391,000 | 35.444444 | 147 | 0.451325 | false |
lem9/weblate | weblate/permissions/templatetags/permissions.py | 1 | 5965 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2017 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
from django import template
import weblate.permissions.helpers
register = template.Library()
@register.assignment_tag
def can_upload_translation(user, translation):
return weblate.permissions.helpers.can_upload_translation(
user, translation
)
@register.assignment_tag
def can_translate(user, translation):
return weblate.permissions.helpers.can_translate(
user, translation
)
@register.assignment_tag
def can_suggest(user, translation):
return weblate.permissions.helpers.can_suggest(
user, translation
)
@register.assignment_tag
def can_accept_suggestion(user, translation):
return weblate.permissions.helpers.can_accept_suggestion(
user, translation
)
@register.assignment_tag
def can_delete_suggestion(user, translation, suggestion):
return weblate.permissions.helpers.can_delete_suggestion(
user, translation, suggestion
)
@register.assignment_tag
def can_vote_suggestion(user, translation):
return weblate.permissions.helpers.can_vote_suggestion(
user, translation
)
@register.assignment_tag
def can_use_mt(user, translation):
return weblate.permissions.helpers.can_use_mt(user, translation)
@register.assignment_tag
def can_see_repository_status(user, project):
return weblate.permissions.helpers.can_see_repository_status(user, project)
@register.assignment_tag
def can_commit_translation(user, project):
return weblate.permissions.helpers.can_commit_translation(user, project)
@register.assignment_tag
def can_update_translation(user, project):
return weblate.permissions.helpers.can_update_translation(user, project)
@register.assignment_tag
def can_push_translation(user, project):
return weblate.permissions.helpers.can_push_translation(user, project)
@register.assignment_tag
def can_reset_translation(user, project):
return weblate.permissions.helpers.can_reset_translation(user, project)
@register.assignment_tag
def can_lock_translation(user, project):
return weblate.permissions.helpers.can_lock_translation(user, project)
@register.assignment_tag
def can_lock_subproject(user, project):
return weblate.permissions.helpers.can_lock_subproject(user, project)
@register.assignment_tag
def can_edit_flags(user, project):
return weblate.permissions.helpers.can_edit_flags(user, project)
@register.assignment_tag
def can_edit_priority(user, project):
return weblate.permissions.helpers.can_edit_priority(user, project)
@register.assignment_tag
def can_ignore_check(user, project):
return weblate.permissions.helpers.can_ignore_check(user, project)
@register.assignment_tag
def can_delete_comment(user, comment):
return weblate.permissions.helpers.can_delete_comment(user, comment)
@register.assignment_tag
def can_manage_acl(user, project):
return weblate.permissions.helpers.can_manage_acl(user, project)
@register.assignment_tag
def can_download_changes(user, project):
return weblate.permissions.helpers.can_download_changes(user, project)
@register.assignment_tag
def can_view_reports(user, project):
return weblate.permissions.helpers.can_view_reports(user, project)
@register.assignment_tag
def can_add_translation(user, project):
return weblate.permissions.helpers.can_add_translation(user, project)
@register.assignment_tag
def can_remove_translation(user, project):
return weblate.permissions.helpers.can_remove_translation(user, project)
@register.assignment_tag
def can_edit_subproject(user, project):
return weblate.permissions.helpers.can_edit_subproject(user, project)
@register.assignment_tag
def can_edit_project(user, project):
return weblate.permissions.helpers.can_edit_project(user, project)
@register.assignment_tag
def can_upload_dictionary(user, project):
return weblate.permissions.helpers.can_upload_dictionary(user, project)
@register.assignment_tag
def can_delete_dictionary(user, project):
return weblate.permissions.helpers.can_delete_dictionary(user, project)
@register.assignment_tag
def can_change_dictionary(user, project):
return weblate.permissions.helpers.can_change_dictionary(user, project)
@register.assignment_tag
def can_add_dictionary(user, project):
return weblate.permissions.helpers.can_add_dictionary(user, project)
@register.assignment_tag
def can_add_comment(user, project):
return weblate.permissions.helpers.can_add_comment(user, project)
@register.assignment_tag
def can_overwrite_translation(user, project):
return weblate.permissions.helpers.can_overwrite_translation(user, project)
@register.assignment_tag
def can_see_git_repository(user, project):
return weblate.permissions.helpers.can_see_git_repository(user, project)
@register.assignment_tag
def can_add_screenshot(user, project):
return weblate.permissions.helpers.can_add_screenshot(user, project)
@register.assignment_tag
def can_change_screenshot(user, project):
return weblate.permissions.helpers.can_change_screenshot(user, project)
@register.assignment_tag
def can_delete_screenshot(user, project):
return weblate.permissions.helpers.can_delete_screenshot(user, project)
| gpl-3.0 | -306,578,692,451,510,400 | 27.390476 | 79 | 0.77256 | false |
lmarent/network_agents_ver2_python | simulation_site/simulation/forms.py | 1 | 4022 | from django import forms
from django.core.exceptions import ValidationError
from django.forms.models import inlineformset_factory
import inspect
import os
import re
import sys
from simulation.models import ProbabilityDistribution
from simulation.models import DiscreteProbabilityDistribution
from simulation.models import CostFunction
from simulation.models import ContinuousCostFunction
class ProbabilityDistributionForm(forms.ModelForm):
class Meta:
model = ProbabilityDistribution
fields = ('name', 'class_name', 'domain')
def formfield_for_choice_field(self, available_choices):
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.append(currentdir)
file_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
dir_path = file_path.split('/')
dir_path.pop() # remove ./simulation from the list
dir_path.pop() # remove ./simulation_site from the list
probability_directory = '/'.join(dir_path)
probability_directory += '/agents/probabilities'
black_list = ['__init__','ProbabilityDistribution',
'ProbabilityDistributionFactory',
'ProbabilityDistributionException']
for filename in os.listdir (probability_directory):
# Ignore subfolders
if os.path.isdir (os.path.join(probability_directory, filename)):
continue
else:
if re.match(r".*?\.py$", filename):
classname = re.sub(r".py", r"", filename)
if (classname not in black_list):
available_choices.append((classname, classname))
def __init__(self, *args, **kwargs):
available_choices = []
self.formfield_for_choice_field(available_choices)
print available_choices
#self.fields['class_name'].choices = available_choices
return super(ProbabilityDistributionForm, self).__init__(*args, **kwargs)
# inlineformset_factory creates a Class from a parent model (Contact)
# to a child model (Address)
DiscreteProbabilityFormSet = inlineformset_factory(
ProbabilityDistribution,
DiscreteProbabilityDistribution, fields=('value','label','probability')
)
class CostFunctionForm(forms.ModelForm):
class Meta:
model = CostFunction
fields = ('name', 'class_name', 'range_function')
def formfield_for_choice_field(self, available_choices):
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
sys.path.append(currentdir)
file_path = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
dir_path = file_path.split('/')
dir_path.pop() # remove ./simulation from the list
dir_path.pop() # remove ./simulation_site from the list
costfunction_directory = '/'.join(dir_path)
costfunction_directory += '/agents/costfunction'
black_list = ['__init__','CostFunction', 'CostFunctionFactory']
for filename in os.listdir (costfunction_directory):
# Ignore subfolders
if os.path.isdir (os.path.join(costfunction_directory, filename)):
continue
else:
if re.match(r".*?\.py$", filename):
classname = re.sub(r".py", r"", filename)
if (classname not in black_list):
available_choices.append((classname, classname))
def __init__(self, *args, **kwargs):
available_choices = []
self.formfield_for_choice_field(available_choices)
print available_choices
#self.fields['class_name'].choices = available_choices
return super(CostFunctionForm, self).__init__(*args, **kwargs)
# inlineformset_factory creates a Class from a parent model (Contact)
# to a child model (Address)
ConstinousCostFunctionFormSet = inlineformset_factory(
CostFunction,
ContinuousCostFunction, fields=('parameter', 'value')
)
| mit | -7,739,133,508,648,501,000 | 39.22 | 94 | 0.663352 | false |
USStateDept/geonode | geonode/documents/models.py | 1 | 6056 | import logging
import os
import uuid
from django.db import models
from django.db.models import signals
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.contrib.contenttypes import generic
from django.contrib.staticfiles import finders
from django.utils.translation import ugettext_lazy as _
from geonode.layers.models import Layer
from geonode.base.models import ResourceBase, resourcebase_post_save
from geonode.maps.signals import map_changed_signal
from geonode.maps.models import Map
IMGTYPES = ['jpg', 'jpeg', 'tif', 'tiff', 'png', 'gif']
logger = logging.getLogger(__name__)
class Document(ResourceBase):
"""
A document is any kind of information that can be attached to a map such as pdf, images, videos, xls...
"""
# Relation to the resource model
content_type = models.ForeignKey(ContentType, blank=True, null=True)
object_id = models.PositiveIntegerField(blank=True, null=True)
resource = generic.GenericForeignKey('content_type', 'object_id')
doc_file = models.FileField(upload_to='documents',
null=True,
blank=True,
verbose_name=_('File'))
extension = models.CharField(max_length=128, blank=True, null=True)
doc_type = models.CharField(max_length=128, blank=True, null=True)
doc_url = models.URLField(
blank=True,
null=True,
help_text=_('The URL of the document if it is external.'),
verbose_name=_('URL'))
def __unicode__(self):
return self.title
def get_absolute_url(self):
return reverse('document_detail', args=(self.id,))
@property
def name_long(self):
if not self.title:
return str(self.id)
else:
return '%s (%s)' % (self.title, self.id)
def _render_thumbnail(self):
from cStringIO import StringIO
size = 200, 150
try:
from PIL import Image, ImageOps
except ImportError, e:
logger.error(
'%s: Pillow not installed, cannot generate thumbnails.' %
e)
return None
try:
# if wand is installed, than use it for pdf thumbnailing
from wand import image
except:
wand_available = False
else:
wand_available = True
if wand_available and self.extension and self.extension.lower(
) == 'pdf' and self.doc_file:
logger.debug(
'Generating a thumbnail for document: {0}'.format(
self.title))
with image.Image(filename=self.doc_file.path) as img:
img.sample(*size)
return img.make_blob('png')
elif self.extension and self.extension.lower() in IMGTYPES and self.doc_file:
img = Image.open(self.doc_file.path)
img = ImageOps.fit(img, size, Image.ANTIALIAS)
else:
filename = finders.find('documents/{0}-placeholder.png'.format(self.extension), False) or \
finders.find('documents/generic-placeholder.png', False)
if not filename:
return None
img = Image.open(filename)
imgfile = StringIO()
img.save(imgfile, format='PNG')
return imgfile.getvalue()
@property
def class_name(self):
return self.__class__.__name__
class Meta(ResourceBase.Meta):
pass
def get_related_documents(resource):
if isinstance(resource, Layer) or isinstance(resource, Map):
ct = ContentType.objects.get_for_model(resource)
return Document.objects.filter(content_type=ct, object_id=resource.pk)
else:
return None
def pre_save_document(instance, sender, **kwargs):
base_name, extension, doc_type = None, None, None
if instance.doc_file:
base_name, extension = os.path.splitext(instance.doc_file.name)
instance.extension = extension[1:]
doc_type_map = settings.DOCUMENT_TYPE_MAP
if doc_type_map is None:
doc_type = 'other'
else:
if instance.extension in doc_type_map:
doc_type = doc_type_map[''+instance.extension]
else:
doc_type = 'other'
instance.doc_type = doc_type
elif instance.doc_url:
if len(instance.doc_url) > 4 and instance.doc_url[-4] == '.':
instance.extension = instance.doc_url[-3:]
if not instance.uuid:
instance.uuid = str(uuid.uuid1())
instance.csw_type = 'document'
if instance.abstract == '' or instance.abstract is None:
instance.abstract = 'No abstract provided'
if instance.title == '' or instance.title is None:
instance.title = instance.doc_file.name
if instance.resource:
instance.csw_wkt_geometry = instance.resource.geographic_bounding_box.split(
';')[-1]
instance.bbox_x0 = instance.resource.bbox_x0
instance.bbox_x1 = instance.resource.bbox_x1
instance.bbox_y0 = instance.resource.bbox_y0
instance.bbox_y1 = instance.resource.bbox_y1
else:
instance.bbox_x0 = -180
instance.bbox_x1 = 180
instance.bbox_y0 = -90
instance.bbox_y1 = 90
def create_thumbnail(sender, instance, created, **kwargs):
from geonode.tasks.update import create_document_thumbnail
create_document_thumbnail.delay(object_id=instance.id)
def update_documents_extent(sender, **kwargs):
model = 'map' if isinstance(sender, Map) else 'layer'
ctype = ContentType.objects.get(model=model)
for document in Document.objects.filter(content_type=ctype, object_id=sender.id):
document.save()
signals.pre_save.connect(pre_save_document, sender=Document)
signals.post_save.connect(create_thumbnail, sender=Document)
signals.post_save.connect(resourcebase_post_save, sender=Document)
map_changed_signal.connect(update_documents_extent)
| gpl-3.0 | -3,836,716,759,410,032,600 | 31.735135 | 107 | 0.63177 | false |
rodrigozc/mockatron | mockatron_core/utils.py | 1 | 6477 | from django.http import HttpResponse
from django.template import Context
from .models import *
from .constants import *
from .classes import *
from xml.etree import ElementTree
import hashlib, json, xmltodict, re, urllib.request, logging
logger = logging.getLogger("django")
def extract_agent_data_from_request(request):
result = {}
if "HTTP_X_FORWARDED_PROTO" in request.META:
result['protocol'] = request.META["HTTP_X_FORWARDED_PROTO"]
else:
result['protocol'] = request.scheme
logger.info(request.META)
if 'HTTP_X_MOCKATRON_ORIGINAL_HOST' in request.META:
result['host'] = request.META["HTTP_X_MOCKATRON_ORIGINAL_HOST"].split(":")[0]
result['port'] = request.META["HTTP_X_MOCKATRON_ORIGINAL_HOST"].split(":")[1]
else:
result['host'] = request.META["HTTP_HOST"].split(":")[0]
if 'HTTP_X_FORWARDED_PORT' in request.META:
result['port'] = request.META["HTTP_X_FORWARDED_PORT"]
else:
result['port'] = request.META["SERVER_PORT"]
result['path'] = request.path
result['method'] = request.method
result['content_type'] = request.META["CONTENT_TYPE"]
if result['content_type'] != None:
result['content_type'] = result['content_type'].split(";")[0]
return result
def create_and_return_agent(agent_data):
agent = Agent(protocol=agent_data['protocol'], host=agent_data['host'], port=agent_data['port'], path=agent_data['path'], method=agent_data['method'], content_type=agent_data['content_type'])
agent.save()
if agent.content_type == CONTENT_TYPE_XML:
try:
req = urllib.request.Request(agent.wsdl_url())
content = urllib.request.urlopen(req).read()
root = ElementTree.fromstring(content.decode(encoding='UTF-8'))
for operation_wsdl in root.findall('.//{http://schemas.xmlsoap.org/wsdl/}portType/{http://schemas.xmlsoap.org/wsdl/}operation'):
# Define input message
input_element = operation_wsdl.find('{http://schemas.xmlsoap.org/wsdl/}input')
input_element_str = input_element.attrib['message'][input_element.attrib['message'].find(':')+1:]
input_message_element = root.find('.//{http://schemas.xmlsoap.org/wsdl/}message[@name="' + input_element_str + '"]/{http://schemas.xmlsoap.org/wsdl/}part')
input_message_element_str = input_message_element.attrib['element'][input_message_element.attrib['element'].find(':')+1:]
# Define output message
output_element = operation_wsdl.find('{http://schemas.xmlsoap.org/wsdl/}output')
if output_element != None:
output_element_str = output_element.attrib['message'][output_element.attrib['message'].find(':')+1:]
output_message_element = root.find('.//{http://schemas.xmlsoap.org/wsdl/}message[@name="' + output_element_str + '"]/{http://schemas.xmlsoap.org/wsdl/}part')
output_message_element_str = output_message_element.attrib['element'][output_message_element.attrib['element'].find(':')+1:]
else:
output_message_element_str = None
operation = Operation(agent=agent, name=operation_wsdl.attrib['name'], input_message=input_message_element_str, output_message=output_message_element_str)
operation.save()
create_default_response(operation)
except Exception:
create_default_response(agent)
else:
create_default_response(agent)
return agent
def create_default_response(provider):
parent_key = re.sub(r'class (.+\.)+', '', re.sub('[\'<>]', '', str(type(provider)))).lower()
if provider.get_content_type() == CONTENT_TYPE_XML:
default_label = XML_DEFAULT_LABEL
default_response = XML_DEFAULT_RESPONSE
elif provider.get_content_type() == CONTENT_TYPE_JSON:
default_label = JSON_DEFAULT_LABEL
default_response = JSON_DEFAULT_RESPONSE
else:
default_label = UNKNOWN_DEFAULT_LABEL
default_response = UNKNOWN_DEFAULT_RESPONSE
response_args = {parent_key: provider, 'label': default_label, 'content': default_response}
response = Response(**response_args)
response.save()
def responder(agent, request):
logger.debug("Starting responder...")
response_method = None
# Evaluate request against Operations, if exists
logger.debug("Evaluate request against operations to get response method...")
if agent.operations.count() > 0:
for operation in agent.operations.all():
if operation.belongs_to(request):
response_method = MockResponderFactory.get_mock_responder(operation)
break
# Gets response_method based on Agent, if no one Operation matchs request before
logger.debug("Get response method based on agent, if no one operation matchs request...")
if response_method == None:
response_method = MockResponderFactory.get_mock_responder(agent)
logger.debug("Get response based on mock responder type...")
response = response_method.get() if isinstance(response_method, SimpleMockResponder) else response_method.get(request)
context = Context()
context['request'] = request
logger.debug("Build response based on agent content type...")
if request.body != b'':
body = request.body.decode(encoding='UTF-8')
if agent.content_type == CONTENT_TYPE_XML:
context['body'] = xmltodict.parse(body, process_namespaces=True)
elif agent.content_type == CONTENT_TYPE_JSON:
context['body'] = json.loads(body)
else:
context['body'] = body
logger.debug("Replies apllying django template...")
return HttpResponse(response.template().render(context), status=response.http_code, content_type=agent.content_type)
def json_agent_locator(agent_data):
url = '{}://{}:{}{}'.format(agent_data['protocol'], agent_data['host'], agent_data['port'], agent_data['path'])
agents = Agent.objects.filter(method=agent_data['method'], protocol=agent_data['protocol'], host=agent_data['host'], port=agent_data['port'])
path_list = agent_data['path'].split('/')
while len(path_list) > 1:
agents_list = agents.filter(path__startswith='/'.join(path_list))
for a in agents_list:
if a.match(url):
return a
path_list.pop()
return None
| apache-2.0 | 4,762,224,527,407,931,000 | 48.068182 | 195 | 0.64428 | false |
hradec/gaffer | python/GafferUI/PlugLayout.py | 1 | 28367 | ##########################################################################
#
# Copyright (c) 2014, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2014, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import re
import sys
import functools
import collections
import Gaffer
import GafferUI
from Qt import QtWidgets
## A class for laying out widgets to represent all the plugs held on a particular parent.
#
# Per-plug metadata support :
#
# - "<layoutName>:index" controls ordering of plugs within the layout
# - "<layoutName>:section" places the plug in a named section of the layout
# - "<layoutName>:divider" specifies whether or not a plug should be followed by a divider
# - "<layoutName>:activator" the name of an activator to control editability
# - "<layoutName>:visibilityActivator" the name of an activator to control visibility
# - "<layoutName>:accessory" groups as an accessory to the previous widget
# - "<layoutName>:width" gives a specific width to the plug's widget
#
# Per-parent metadata support :
#
# - <layoutName>:section:sectionName:summary" dynamic metadata entry returning a
# string to be used as a summary for the section.
# - <layoutName>:section:sectionName:collapsed" boolean indicating whether or
# not a section should be collapsed initially.
# - "<layoutName>:activator:activatorName" a dynamic boolean metadata entry to control
# the activation of plugs within the layout
# - "<layoutName>:activators" a dynamic metadata entry returning a CompoundData of booleans
# for several named activators.
#
# ## Custom widgets
#
# Custom widgets unassociated with any specific plugs may also be added to plug layouts.
# This can be useful when customising user interfaces for a particular facility - for instance
# to display asset management information for each node.
#
# A custom widget is specified using parent metadata entries starting with
# "<layoutName>:customWidget:Name:" prefixes, where "Name" is a unique identifier for the
# custom widget :
#
# - "<layoutName>:customWidget:Name:widgetType" specifies a string containing the fully qualified
# name of a python callable which will be used to create the widget. This callable will be passed
# the same parent GraphComponent (node or plug) that the PlugLayout is being created for.
# - "<layoutName>:customWidget:Name:*" as for the standard per-plug "<layoutName>:*" metadata, so custom
# widgets may be assigned to a section, reordered, given activators etc.
#
class PlugLayout( GafferUI.Widget ) :
# We use this when we can't find a ScriptNode to provide the context.
__fallbackContext = Gaffer.Context()
def __init__( self, parent, orientation = GafferUI.ListContainer.Orientation.Vertical, layoutName = "layout", rootSection = "", embedded = False, **kw ) :
assert( isinstance( parent, ( Gaffer.Node, Gaffer.Plug ) ) )
# embedded indicates that the PlugLayout is embedded in another layout
# which affects how the widget is built
self.__embedded = embedded
self.__layout = _TabLayout( orientation, embedded = embedded ) if isinstance( parent, Gaffer.Node ) and not rootSection else _CollapsibleLayout( orientation )
GafferUI.Widget.__init__( self, self.__layout, **kw )
self.__parent = parent
self.__readOnly = False
self.__layoutName = layoutName
# not to be confused with __rootSection, which holds an actual _Section object
self.__rootSectionName = rootSection
# we need to connect to the childAdded/childRemoved signals on
# the parent so we can update the ui when plugs are added and removed.
parent.childAddedSignal().connect( Gaffer.WeakMethod( self.__childAddedOrRemoved ), scoped = False )
parent.childRemovedSignal().connect( Gaffer.WeakMethod( self.__childAddedOrRemoved ), scoped = False )
# since our layout is driven by metadata, we must respond dynamically
# to changes in that metadata.
Gaffer.Metadata.plugValueChangedSignal( self.__node() ).connect( Gaffer.WeakMethod( self.__plugMetadataChanged ), scoped = False )
# and since our activations are driven by plug values, we must respond
# when the plugs are dirtied.
self.__node().plugDirtiedSignal().connect( Gaffer.WeakMethod( self.__plugDirtied ), scoped = False )
# frequently events that trigger a ui update come in batches, so we
# perform the update lazily using a LazyMethod. the dirty variables
# keep track of the work we'll need to do in the update.
self.__layoutDirty = True
self.__activationsDirty = True
self.__summariesDirty = True
# mapping from layout item to widget, where the key is either a plug or
# the name of a custom widget (as returned by layoutOrder()).
self.__widgets = {}
self.__rootSection = _Section( self.__parent )
# set up an appropriate default context in which to view the plugs.
scriptNode = self.__node() if isinstance( self.__node(), Gaffer.ScriptNode ) else self.__node().scriptNode()
self.setContext( scriptNode.context() if scriptNode is not None else self.__fallbackContext )
# Build the layout
self.__update()
def getReadOnly( self ) :
return self.__readOnly
def setReadOnly( self, readOnly ) :
if readOnly == self.getReadOnly() :
return
self.__readOnly = readOnly
for widget in self.__widgets.values() :
self.__applyReadOnly( widget, self.__readOnly )
def getContext( self ) :
return self.__context
def setContext( self, context ) :
self.__context = context
self.__contextChangedConnection = self.__context.changedSignal().connect( Gaffer.WeakMethod( self.__contextChanged ) )
for widget in self.__widgets.values() :
self.__applyContext( widget, context )
## Returns a PlugValueWidget representing the specified child plug.
def plugValueWidget( self, childPlug ) :
self.__updateLazily.flush( self )
w = self.__widgets.get( childPlug, None )
if w is None :
return w
elif isinstance( w, GafferUI.PlugValueWidget ) :
return w
else :
return w.plugValueWidget()
## Returns the custom widget registered with the specified name.
def customWidget( self, name ) :
self.__updateLazily.flush( self )
return self.__widgets.get( name )
## Returns the list of section names that will be used when laying
# out the plugs of the specified parent. The sections are returned
# in the order in which they will be created.
@classmethod
def layoutSections( cls, parent, includeCustomWidgets = False, layoutName = "layout" ) :
d = collections.OrderedDict()
for item in cls.layoutOrder( parent, includeCustomWidgets, layoutName = layoutName ) :
sectionPath = cls.__staticSectionPath( item, parent, layoutName )
sectionName = ".".join( sectionPath )
d[sectionName] = 1
return list( d.keys() )
## Returns the child plugs of the parent in the order in which they
# will be laid out, based on "<layoutName>:index" Metadata entries. If
# includeCustomWidgets is True, then the positions of custom widgets
# are represented by the appearance of the names of the widgets as
# strings within the list. If a section name is specified, then the
# result will be filtered to include only items in that section.
@classmethod
def layoutOrder( cls, parent, includeCustomWidgets = False, section = None, layoutName = "layout", rootSection = "" ) :
items = parent.children( Gaffer.Plug )
items = [ plug for plug in items if not plug.getName().startswith( "__" ) ]
if includeCustomWidgets :
for name in Gaffer.Metadata.registeredValues( parent ) :
m = re.match( layoutName + ":customWidget:(.+):widgetType", name )
if m and cls.__metadataValue( parent, name ) :
items.append( m.group( 1 ) )
itemsAndIndices = [ list( x ) for x in enumerate( items ) ]
for itemAndIndex in itemsAndIndices :
index = cls.__staticItemMetadataValue( itemAndIndex[1], "index", parent, layoutName )
if index is not None :
index = index if index >= 0 else sys.maxsize + index
itemAndIndex[0] = index
itemsAndIndices.sort( key = lambda x : x[0] )
if section is not None :
sectionPath = section.split( "." ) if section else []
itemsAndIndices = [ x for x in itemsAndIndices if cls.__staticSectionPath( x[1], parent, layoutName ) == sectionPath ]
if rootSection :
rootSectionPath = rootSection.split( "." if rootSection else [] )
itemsAndIndices = [ x for x in itemsAndIndices if cls.__staticSectionPath( x[1], parent, layoutName )[:len(rootSectionPath)] == rootSectionPath ]
return [ x[1] for x in itemsAndIndices ]
@GafferUI.LazyMethod()
def __updateLazily( self ) :
self.__update()
def __update( self ) :
if self.__layoutDirty :
self.__updateLayout()
self.__layoutDirty = False
if self.__activationsDirty :
self.__updateActivations()
self.__activationsDirty = False
if self.__summariesDirty :
self.__updateSummariesWalk( self.__rootSection )
self.__summariesDirty = False
# delegate to our layout class to create a concrete
# layout from the section definitions.
self.__layout.update( self.__rootSection )
def __updateLayout( self ) :
# get the items to lay out - these are a combination
# of plugs and strings representing custom widgets.
items = self.layoutOrder( self.__parent, includeCustomWidgets = True, layoutName = self.__layoutName, rootSection = self.__rootSectionName )
# ditch widgets we don't need any more
itemsSet = set( items )
self.__widgets = { k : v for k, v in self.__widgets.items() if k in itemsSet }
# ditch widgets whose metadata type has changed - we must recreate these.
self.__widgets = {
k : v for k, v in self.__widgets.items()
if isinstance( k, str ) or v is not None and Gaffer.Metadata.value( k, "plugValueWidget:type" ) == v.__plugValueWidgetType
}
# make (or reuse existing) widgets for each item, and sort them into
# sections.
rootSectionDepth = self.__rootSectionName.count( "." ) + 1 if self.__rootSectionName else 0
self.__rootSection.clear()
for item in items :
if item not in self.__widgets :
if isinstance( item, Gaffer.Plug ) :
widget = self.__createPlugWidget( item )
else :
widget = self.__createCustomWidget( item )
self.__widgets[item] = widget
else :
widget = self.__widgets[item]
if widget is None :
continue
section = self.__rootSection
for sectionName in self.__sectionPath( item )[rootSectionDepth:] :
section = section.subsection( sectionName )
if len( section.widgets ) and self.__itemMetadataValue( item, "accessory" ) :
if isinstance( section.widgets[-1], _AccessoryRow ) :
section.widgets[-1].append( widget )
else :
row = _AccessoryRow()
row.append( section.widgets[-1] )
row.append( widget )
section.widgets[-1] = row
else :
section.widgets.append( widget )
if self.__itemMetadataValue( item, "divider" ) :
section.widgets.append( GafferUI.Divider(
GafferUI.Divider.Orientation.Horizontal if self.__layout.orientation() == GafferUI.ListContainer.Orientation.Vertical else GafferUI.Divider.Orientation.Vertical
) )
def __updateActivations( self ) :
with self.getContext() :
# Must scope the context when getting activators, because they are typically
# computed from the plug values, and may therefore trigger a compute.
activators = self.__metadataValue( self.__parent, self.__layoutName + ":activators" ) or {}
activators = { k : v.value for k, v in activators.items() } # convert CompoundData of BoolData to dict of booleans
def active( activatorName ) :
result = True
if activatorName :
result = activators.get( activatorName )
if result is None :
with self.getContext() :
result = self.__metadataValue( self.__parent, self.__layoutName + ":activator:" + activatorName )
result = result if result is not None else False
activators[activatorName] = result
return result
for item, widget in self.__widgets.items() :
if widget is not None :
widget.setEnabled( active( self.__itemMetadataValue( item, "activator" ) ) )
widget.setVisible( active( self.__itemMetadataValue( item, "visibilityActivator" ) ) )
def __updateSummariesWalk( self, section ) :
with self.getContext() :
# Must scope the context because summaries are typically
# generated from plug values, and may therefore trigger
# a compute.
section.summary = self.__metadataValue( self.__parent, self.__layoutName + ":section:" + section.fullName + ":summary" ) or ""
section.valuesChanged = False
for subsection in section.subsections.values() :
self.__updateSummariesWalk( subsection )
# If one of our subsections has changed, we don't need to
# check any of our own plugs, we just propagate the flag.
if subsection.valuesChanged :
section.valuesChanged = True
if not section.valuesChanged :
# Check our own widgets, this is a little icky, the alternative
# would be to iterate our items, reverse engineer the section
# then update that, but this allows us to early-out much sooner.
for widget in section.widgets :
if self.__widgetPlugValuesChanged( widget ) :
section.valuesChanged = True
break
@staticmethod
def __widgetPlugValuesChanged( widget ) :
plugs = []
if isinstance( widget, GafferUI.PlugWidget ) :
widget = widget.plugValueWidget()
if hasattr( widget, 'getPlugs' ) :
plugs = widget.getPlugs()
for plug in plugs :
if PlugLayout.__plugValueChanged( plug ) :
return True
return False
@staticmethod
def __plugValueChanged( plug ) :
## \todo This mirrors LabelPlugValueWidget. This doesn't handle child plug defaults/connections
# properly. We need to improve NodeAlgo when we have the next API break.
valueChanged = plug.getInput() is not None
if not valueChanged and isinstance( plug, Gaffer.ValuePlug ) :
if Gaffer.NodeAlgo.hasUserDefault( plug ) :
valueChanged = not Gaffer.NodeAlgo.isSetToUserDefault( plug )
else :
valueChanged = not plug.isSetToDefault()
return valueChanged
def __import( self, path ) :
path = path.split( "." )
result = __import__( path[0] )
for n in path[1:] :
result = getattr( result, n )
return result
def __createPlugWidget( self, plug ) :
result = GafferUI.PlugValueWidget.create( plug )
if result is None :
return result
width = self.__itemMetadataValue( plug, "width" )
if width is not None :
result._qtWidget().setFixedWidth( width )
if result._qtWidget().layout() is not None :
result._qtWidget().layout().setSizeConstraint( QtWidgets.QLayout.SetDefaultConstraint )
if isinstance( result, GafferUI.PlugValueWidget ) and not result.hasLabel() and self.__itemMetadataValue( plug, "label" ) != "" :
result = GafferUI.PlugWidget( result )
if self.__layout.orientation() == GafferUI.ListContainer.Orientation.Horizontal :
# undo the annoying fixed size the PlugWidget has applied
# to the label.
## \todo Shift all the label size fixing out of PlugWidget and just fix the
# widget here if we're in a vertical orientation.
QWIDGETSIZE_MAX = 16777215 # qt #define not exposed by PyQt or PySide
result.labelPlugValueWidget().label()._qtWidget().setFixedWidth( QWIDGETSIZE_MAX )
self.__applyReadOnly( result, self.getReadOnly() )
self.__applyContext( result, self.getContext() )
# Store the metadata value that controlled the type created, so we can compare to it
# in the future to determine if we can reuse the widget.
result.__plugValueWidgetType = Gaffer.Metadata.value( plug, "plugValueWidget:type" )
return result
def __createCustomWidget( self, name ) :
widgetType = self.__itemMetadataValue( name, "widgetType" )
widgetClass = self.__import( widgetType )
result = widgetClass( self.__parent )
self.__applyContext( result, self.getContext() )
return result
def __node( self ) :
return self.__parent if isinstance( self.__parent, Gaffer.Node ) else self.__parent.node()
@classmethod
def __metadataValue( cls, plugOrNode, name ) :
return Gaffer.Metadata.value( plugOrNode, name )
@classmethod
def __staticItemMetadataValue( cls, item, name, parent, layoutName ) :
if isinstance( item, Gaffer.Plug ) :
v = Gaffer.Metadata.value( item, layoutName + ":" + name )
if v is None and name in ( "divider", "label" ) :
# Backwards compatibility with old unprefixed metadata names.
v = Gaffer.Metadata.value( item, name )
return v
else :
return cls.__metadataValue( parent, layoutName + ":customWidget:" + item + ":" + name )
def __itemMetadataValue( self, item, name ) :
return self.__staticItemMetadataValue( item, name, parent = self.__parent, layoutName = self.__layoutName )
@classmethod
def __staticSectionPath( cls, item, parent, layoutName ) :
m = None
if isinstance( parent, Gaffer.Node ) :
# Backwards compatibility with old metadata entry
## \todo Remove
m = cls.__staticItemMetadataValue( item, "nodeUI:section", parent, layoutName )
if m == "header" :
m = ""
if m is None :
m = cls.__staticItemMetadataValue( item, "section", parent, layoutName )
return m.split( "." ) if m else []
def __sectionPath( self, item ) :
return self.__staticSectionPath( item, parent = self.__parent, layoutName = self.__layoutName )
def __childAddedOrRemoved( self, *unusedArgs ) :
# typically many children are added and removed at once, so
# we do a lazy update so we can batch up several changes into one.
# upheaval is over.
self.__layoutDirty = True
self.__updateLazily()
def __applyReadOnly( self, widget, readOnly ) :
if widget is None :
return
if hasattr( widget, "setReadOnly" ) :
widget.setReadOnly( readOnly )
elif isinstance( widget, GafferUI.PlugWidget ) :
widget.labelPlugValueWidget().setReadOnly( readOnly )
widget.plugValueWidget().setReadOnly( readOnly )
elif hasattr( widget, "plugValueWidget" ) :
widget.plugValueWidget().setReadOnly( readOnly )
def __applyContext( self, widget, context ) :
if hasattr( widget, "setContext" ) :
widget.setContext( context )
elif isinstance( widget, GafferUI.PlugWidget ) :
widget.labelPlugValueWidget().setContext( context )
widget.plugValueWidget().setContext( context )
elif hasattr( widget, "plugValueWidget" ) :
widget.plugValueWidget().setContext( context )
def __plugMetadataChanged( self, plug, key, reason ) :
if plug != self.__parent and plug.parent() != self.__parent :
return
if key in (
"divider",
self.__layoutName + ":divider",
self.__layoutName + ":index",
self.__layoutName + ":section",
self.__layoutName + ":accessory",
"plugValueWidget:type"
) :
# we often see sequences of several metadata changes - so
# we schedule a lazy update to batch them into one ui update.
self.__layoutDirty = True
self.__updateLazily()
elif re.match( self.__layoutName + ":section:.*:summary", key ) :
self.__summariesDirty = True
self.__updateLazily()
def __plugDirtied( self, plug ) :
if not self.visible() or plug.direction() != plug.Direction.In :
return
self.__activationsDirty = True
self.__summariesDirty = True
self.__updateLazily()
def __contextChanged( self, context, name ) :
self.__activationsDirty = True
self.__summariesDirty = True
self.__updateLazily()
class _AccessoryRow( GafferUI.ListContainer ) :
def __init__( self, **kw ) :
GafferUI.ListContainer.__init__( self, GafferUI.ListContainer.Orientation.Horizontal, spacing = 4, **kw )
# The _Section class provides a simple abstract representation of a hierarchical
# layout. Each section contains a list of widgets to be displayed in that section,
# and an OrderedDict of named subsections.
class _Section( object ) :
def __init__( self, _parent, _fullName = "" ) :
self.__parent = _parent
self.fullName = _fullName
self.clear()
def subsection( self, name ) :
result = self.subsections.get( name )
if result is not None :
return result
result = _Section(
self.__parent,
self.fullName + "." + name if self.fullName else name
)
self.subsections[name] = result
return result
def clear( self ) :
self.widgets = []
self.subsections = collections.OrderedDict()
self.summary = ""
self.valuesChanged = False
def saveState( self, name, value ) :
Gaffer.Metadata.registerValue( self.__parent, self.__stateName( name ), value, persistent = False )
def restoreState( self, name ) :
return Gaffer.Metadata.value( self.__parent, self.__stateName( name ) )
def __stateName( self, name ) :
return "layout:section:" + self.fullName + ":" + name
# The PlugLayout class deals with all the details of plugs, metadata and
# signals to define an abstract layout in terms of _Sections. It then
# delegates to the _Layout classes to create an actual layout in terms
# of Widgets. This allows us to present different layouts based on whether
# or the parent is a node (tabbed layout) or a plug (collapsible layout).
class _Layout( GafferUI.Widget ) :
def __init__( self, topLevelWidget, orientation, **kw ) :
GafferUI.Widget.__init__( self, topLevelWidget, **kw )
self.__orientation = orientation
def orientation( self ) :
return self.__orientation
def update( self, section ) :
raise NotImplementedError
class _TabLayout( _Layout ) :
def __init__( self, orientation, embedded = False, **kw ) :
self.__embedded = embedded
self.__mainColumn = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Vertical )
_Layout.__init__( self, self.__mainColumn, orientation, **kw )
with self.__mainColumn :
self.__widgetsColumn = GafferUI.ListContainer( self.orientation(), spacing = 4, borderWidth = 4 )
self.__tabbedContainer = GafferUI.TabbedContainer()
# if the TabLayout is embedded, we want to restrict the maximum width/height depending on the orientation
if self.__embedded :
if self.orientation() == GafferUI.ListContainer.Orientation.Vertical :
self.__tabbedContainer._qtWidget().setSizePolicy( QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Maximum ) )
else :
self.__tabbedContainer._qtWidget().setSizePolicy( QtWidgets.QSizePolicy( QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Expanding ) )
self.__currentTabChangedConnection = self.__tabbedContainer.currentChangedSignal().connect(
Gaffer.WeakMethod( self.__currentTabChanged )
)
def update( self, section ) :
self.__section = section
self.__widgetsColumn[:] = section.widgets
existingTabs = collections.OrderedDict()
for tab in self.__tabbedContainer[:] :
existingTabs[self.__tabbedContainer.getLabel( tab )] = tab
updatedTabs = collections.OrderedDict()
for name, subsection in section.subsections.items() :
tab = existingTabs.get( name )
if tab is None :
# Use scroll bars only when the TabLayout is not embedded
if self.__embedded :
tab = GafferUI.Frame( borderWidth = 0, borderStyle = GafferUI.Frame.BorderStyle.None_ )
else :
tab = GafferUI.ScrolledContainer( borderWidth = 8 )
if self.orientation() == GafferUI.ListContainer.Orientation.Vertical :
tab.setHorizontalMode( GafferUI.ScrollMode.Never )
else :
tab.setVerticalMode( GafferUI.ScrollMode.Never )
tab.setChild( _CollapsibleLayout( self.orientation() ) )
tab.getChild().update( subsection )
updatedTabs[name] = tab
if existingTabs.keys() != updatedTabs.keys() :
with Gaffer.BlockedConnection( self.__currentTabChangedConnection ) :
del self.__tabbedContainer[:]
for name, tab in updatedTabs.items() :
self.__tabbedContainer.append( tab, label = name )
for index, subsection in enumerate( section.subsections.values() ) :
## \todo Consider how/if we should add a public tooltip API to TabbedContainer.
self.__tabbedContainer._qtWidget().setTabToolTip( index, subsection.summary )
if not len( existingTabs ) :
currentTabIndex = self.__section.restoreState( "currentTab" ) or 0
if currentTabIndex < len( self.__tabbedContainer ) :
self.__tabbedContainer.setCurrent( self.__tabbedContainer[currentTabIndex] )
self.__widgetsColumn.setVisible( len( section.widgets ) )
self.__tabbedContainer.setVisible( len( self.__tabbedContainer ) )
def __currentTabChanged( self, tabbedContainer, currentTab ) :
self.__section.saveState( "currentTab", tabbedContainer.index( currentTab ) )
class _CollapsibleLayout( _Layout ) :
def __init__( self, orientation, **kw ) :
self.__column = GafferUI.ListContainer( orientation, spacing = 4 )
_Layout.__init__( self, self.__column, orientation, **kw )
self.__collapsibles = {} # Indexed by section name
def update( self, section ) :
widgets = list( section.widgets )
for name, subsection in section.subsections.items() :
collapsible = self.__collapsibles.get( name )
if collapsible is None :
collapsible = GafferUI.Collapsible( name, _CollapsibleLayout( self.orientation() ), collapsed = True )
# Hack to add margins at the top and bottom but not at the sides.
## \todo This is exposed in the public API via the borderWidth
# parameter to the Collapsible. That parameter sucks because a) it
# makes a margin rather than a border, and b) it doesn't allow per-edge
# control. Either make that make sense, or remove it and find a way
# of deferring all this to the style.
collapsible._qtWidget().layout().setContentsMargins( 0, 2, 0, 2 )
collapsible.setCornerWidget( GafferUI.Label(), True )
## \todo This is fighting the default sizing applied in the Label constructor. Really we need a standard
# way of controlling size behaviours for all widgets in the public API.
collapsible.getCornerWidget()._qtWidget().setSizePolicy( QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Fixed )
if subsection.restoreState( "collapsed" ) is False :
collapsible.setCollapsed( False )
collapsible.stateChangedSignal().connect(
functools.partial( Gaffer.WeakMethod( self.__collapsibleStateChanged ), subsection = subsection ),
scoped = False
)
self.__collapsibles[name] = collapsible
collapsible.getChild().update( subsection )
collapsible.getCornerWidget().setText(
"<small>" + " ( " + subsection.summary + " )</small>" if subsection.summary else ""
)
currentValueChanged = collapsible._qtWidget().property( "gafferValueChanged" )
if subsection.valuesChanged != currentValueChanged :
collapsible._qtWidget().setProperty( "gafferValueChanged", GafferUI._Variant.toVariant( subsection.valuesChanged ) )
collapsible._repolish()
widgets.append( collapsible )
self.__column[:] = widgets
def __collapsibleStateChanged( self, collapsible, subsection ) :
subsection.saveState( "collapsed", collapsible.getCollapsed() )
| bsd-3-clause | -3,389,370,072,250,530,000 | 35.936198 | 165 | 0.706913 | false |
UCSBarchlab/PyRTL | tests/rtllib/test_barrel.py | 1 | 2448 | import unittest
import random
import pyrtl
from pyrtl.rtllib import barrel
class TestBarrel(unittest.TestCase):
# @classmethod
# def setUpClass(cls):
# # this is to ensure reproducibility
# random.seed(777906374)
def setUp(self):
pyrtl.reset_working_block()
self.inp_val = pyrtl.Input(8, 'inp_val')
self.inp_shift = pyrtl.Input(2, 'inp_shift')
self.out_zeros = pyrtl.Output(18, 'out_zeros')
self.out_ones = pyrtl.Output(18, 'out_ones')
def test_shift_left(self):
random.seed(777906373)
zero = pyrtl.Const(0, 1)
one = pyrtl.Const(1, 1)
self.out_zeros <<= barrel.barrel_shifter(self.inp_val, zero, one, self.inp_shift)
self.out_ones <<= barrel.barrel_shifter(self.inp_val, one, one, self.inp_shift)
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
vals = [random.randint(0, 20) for v in range(20)]
shifts = [random.randint(0, 3) for s in range(20)]
for i in range(len(vals)):
sim.step({
self.inp_val: vals[i],
self.inp_shift: shifts[i]
})
base_sum = vals[i] * pow(2, shifts[i])
self.assertEqual(sim.inspect(self.out_zeros), base_sum)
self.assertEqual(sim.inspect(self.out_ones), base_sum + pow(2, shifts[i]) - 1)
def test_shift_right(self):
random.seed(777906374)
zero = pyrtl.Const(0, 1)
one = pyrtl.Const(1, 1)
self.out_zeros <<= barrel.barrel_shifter(self.inp_val, zero, zero, self.inp_shift)
self.out_ones <<= barrel.barrel_shifter(self.inp_val, one, zero, self.inp_shift)
sim_trace = pyrtl.SimulationTrace()
sim = pyrtl.Simulation(tracer=sim_trace)
vals = [random.randint(0, 20) for v in range(20)]
shifts = [random.randint(0, 3) for s in range(20)]
for i in range(len(vals)):
sim.step({
self.inp_val: vals[i],
self.inp_shift: shifts[i]
})
base_sum = int(vals[i] / pow(2, shifts[i]))
self.assertEqual(sim.inspect(self.out_zeros), base_sum, "failed on value %d" % vals[i])
extra_sum = sum([pow(2, len(self.inp_val) - b - 1) for b in range(shifts[i])])
self.assertEqual(sim.inspect(self.out_ones), base_sum + extra_sum,
"failed on value %d" % vals[i])
| bsd-3-clause | -2,724,166,392,009,105,000 | 40.491525 | 99 | 0.574346 | false |
leth/nose2 | nose2/plugins/loader/discovery.py | 1 | 8390 | """
Discovery-based test loader.
This plugin implements nose2's automatic test module discovery. It
looks for test modules in packages and directories whose names start
with 'test', then fires the :func:`loadTestsFromModule` hook for each
one to allow other plugins to load the actual tests.
It also fires :func:`handleFile` for every file that it sees, and
:func:`matchPath` for every python module, to allow other plugins to
load tests from other kinds of files and to influence which modules
are examined for tests.
"""
# Adapted from unittest2/loader.py from the unittest2 plugins branch.
# This module contains some code copied from unittest2/loader.py and other
# code developed in reference to that module and others within unittest2.
# unittest2 is Copyright (c) 2001-2010 Python Software Foundation; All
# Rights Reserved. See: http://docs.python.org/license.html
from fnmatch import fnmatch
import logging
import os
import sys
from nose2 import events, util
__unittest = True
log = logging.getLogger(__name__)
class DiscoveryLoader(events.Plugin):
"""Loader plugin that can discover tests"""
alwaysOn = True
configSection = 'discovery'
def registerInSubprocess(self, event):
event.pluginClasses.append(self.__class__)
def loadTestsFromName(self, event):
"""Load tests from module named by event.name"""
# turn name into path or module name
# fire appropriate hooks (handle file or load from module)
if event.module:
return
name = event.name
module = None
_, top_level_dir = self._getStartDirs()
try:
# try name as a dotted module name first
__import__(name)
module = sys.modules[name]
except ImportError:
# if that fails, try it as a file or directory
event.extraTests.extend(
self._find_tests(event, name, top_level_dir))
else:
event.extraTests.extend(
self._find_tests_in_module(event, module, top_level_dir))
def loadTestsFromNames(self, event):
"""Discover tests if no test names specified"""
log.debug("Received event %s", event)
if event.names or event.module:
return
event.handled = True # I will handle discovery
return self._discover(event)
def _getStartDirs(self):
start_dir = self.session.startDir
top_level_dir = self.session.topLevelDir
if start_dir is None:
start_dir = '.'
if top_level_dir is None:
top_level_dir = start_dir
if not os.path.isdir(os.path.abspath(start_dir)):
raise OSError("%s is not a directory" % os.path.abspath(start_dir))
is_not_importable = False
start_dir = os.path.abspath(start_dir)
top_level_dir = os.path.abspath(top_level_dir)
if start_dir != top_level_dir:
is_not_importable = not os.path.isfile(
os.path.join(start_dir, '__init__.py'))
if is_not_importable:
raise ImportError(
'Start directory is not importable: %r' % start_dir)
# this is redundant in some cases, but that's ok
self.session.prepareSysPath()
return start_dir, top_level_dir
def _discover(self, event):
loader = event.loader
try:
start_dir, top_level_dir = self._getStartDirs()
except (OSError, ImportError):
_, ev, _ = sys.exc_info()
return loader.suiteClass(
loader.failedLoadTests(self.session.startDir, ev))
log.debug("_discover in %s (%s)", start_dir, top_level_dir)
tests = list(self._find_tests(event, start_dir, top_level_dir))
return loader.suiteClass(tests)
def _find_tests(self, event, start, top_level):
"""Used by discovery. Yields test suites it loads."""
log.debug('_find_tests(%r, %r)', start, top_level)
if start == top_level:
full_path = start
else:
full_path = os.path.join(top_level, start)
if os.path.isdir(start):
for test in self._find_tests_in_dir(
event, full_path, top_level):
yield test
elif os.path.isfile(start):
for test in self._find_tests_in_file(
event, start, full_path, top_level):
yield test
def _find_tests_in_dir(self, event, full_path, top_level):
log.debug("find in dir %s (%s)", full_path, top_level)
dirname = os.path.basename(full_path)
pattern = self.session.testFilePattern
evt = events.HandleFileEvent(
event.loader, dirname, full_path, pattern, top_level)
result = self.session.hooks.handleDir(evt)
if evt.extraTests:
for test in evt.extraTests:
yield test
if evt.handled:
if result:
yield result
return
evt = events.MatchPathEvent(dirname, full_path, pattern)
result = self.session.hooks.matchDirPath(evt)
if evt.handled and not result:
return
for path in os.listdir(full_path):
entry_path = os.path.join(full_path, path)
if os.path.isfile(entry_path):
for test in self._find_tests_in_file(
event, path, entry_path, top_level):
yield test
elif os.path.isdir(entry_path):
if ('test' in path.lower()
or util.ispackage(entry_path)
or path in self.session.libDirs):
for test in self._find_tests(event, entry_path, top_level):
yield test
def _find_tests_in_file(self, event, filename, full_path, top_level):
log.debug("find in file %s (%s)", full_path, top_level)
pattern = self.session.testFilePattern
loader = event.loader
evt = events.HandleFileEvent(
loader, filename, full_path, pattern, top_level)
result = self.session.hooks.handleFile(evt)
if evt.extraTests:
yield loader.suiteClass(evt.extraTests)
if evt.handled:
if result:
yield result
return
if not util.valid_module_name(filename):
# valid Python identifiers only
return
evt = events.MatchPathEvent(filename, full_path, pattern)
result = self.session.hooks.matchPath(evt)
if evt.handled:
if not result:
return
elif not self._match_path(filename, full_path, pattern):
return
# if the test file matches, load it
name = util.name_from_path(full_path)
try:
module = util.module_from_name(name)
except:
yield loader.failedImport(name)
else:
mod_file = os.path.abspath(
getattr(module, '__file__', full_path))
realpath = os.path.splitext(mod_file)[0]
fullpath_noext = os.path.splitext(full_path)[0]
if realpath.lower() != fullpath_noext.lower():
module_dir = os.path.dirname(realpath)
mod_name = os.path.splitext(os.path.basename(full_path))[0]
expected_dir = os.path.dirname(full_path)
msg = ("%r module incorrectly imported from %r. "
"Expected %r. Is this module globally installed?"
)
raise ImportError(
msg % (mod_name, module_dir, expected_dir))
yield loader.loadTestsFromModule(module)
def _find_tests_in_module(self, event, module, top_level_dir):
# only called from loadTestsFromName
yield event.loader.loadTestsFromModule(module)
# may be a package; recurse into __path__ if so
pkgpath = getattr(module, '__path__', None)
if pkgpath:
for entry in pkgpath:
full_path = os.path.abspath(os.path.join(top_level_dir, entry))
for test in self._find_tests_in_dir(
event, full_path, top_level_dir):
yield test
def _match_path(self, path, full_path, pattern):
# override this method to use alternative matching strategy
return fnmatch(path, pattern)
| bsd-2-clause | -5,359,058,218,203,558,000 | 36.792793 | 79 | 0.590942 | false |
edm1/error-aware-demultiplexer | extras/install_pypy3-2.4.0.py | 1 | 4815 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# The MIT License (MIT)
#
# Copyright (c) 2014 Edward Mountjoy
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import sys
import argparse
import os
from sys import platform as _platform
import subprocess
from shutil import copyfile
def main():
""" Installs pypy3.
"""
# Parse the command line args
args = parse_arguments()
print("Installing...")
install_pypy(args)
return 0
def install_pypy(args):
""" Function get and install pypy3 binary.
"""
# Make input python 2.7 compatible
if sys.version_info[0] >= 3:
get_input = input
else:
get_input = raw_input
# Confirm paths
exit_msg = "\nExiting. Use --help to view install options."
for msg in ["> Install path: {0} [y/n] ".format(args.dir),
"> Bashrc path: {0} [y/n] ".format(args.bashrc)]:
ret = get_input(msg)
if not ret == "y":
sys.exit(exit_msg)
# Make output folder
make_folders(args.dir)
# Get and extract pypy3
temp_pypy = "pypy3_2.4.0_download.tar.bz2"
cmd = []
if _platform == "linux" or _platform == "linux2":
url = "https://bitbucket.org/pypy/pypy/downloads/pypy3-2.4.0-linux64.tar.bz2"
cmd.append('wget {0} -O {1}'.format(url, temp_pypy))
elif _platform == "darwin":
url = "https://bitbucket.org/pypy/pypy/downloads/pypy3-2.4.0-osx64.tar.bz2"
# OS X
cmd.append('curl -o {0} -L {1}'.format(temp_pypy, url))
# Unzip file
cmd.append('tar -jxvf {0} --strip 1 -C {1}'.format(temp_pypy, args.dir))
# Run command
ret = subprocess.call(";".join(cmd), shell=True)
if not ret == 0:
sys.exit("There was a problem downloading or extracting pypy. Exiting.")
# Remove download
os.remove(temp_pypy)
# Create backup of bashrc
bashrc_backup = "{0}_backup".format(args.bashrc)
if os.path.exists(args.bashrc):
copyfile(args.bashrc, bashrc_backup)
print("\nCreated backup for of {0} at {1}.".format(args.bashrc, bashrc_backup))
# Add pypy3 bin to PATH
pypy_bin = os.path.join(args.dir, "bin")
lines = ["\n# PyPy3 2.4.0 bin PATH - created by aware-demultiplexer",
"export PATH=$PATH:{0}\n".format(pypy_bin)]
with open(args.bashrc, 'a') as out_h:
for line in lines:
out_h.write(line + "\n")
print("Finished installing PyPy3")
def make_folders(outDir):
# Get list of folders that need checking
check_dirs = []
check_dir = outDir
while True: #not :
# Check that its not home dir
try:
if os.path.samefile(check_dir, os.getenv("HOME")):
break
except FileNotFoundError:
pass
# Append file
check_dirs.append(check_dir)
check_dir = os.path.split(check_dir)[0]
# Check those folders
for check_dir in check_dirs[::-1]:
if not os.path.exists(check_dir):
os.makedirs(check_dir)
return 0
def parse_arguments():
""" Will parse the command line arguments arnd return the arg object.
"""
home_dir = os.getenv("HOME")
parser = argparse.ArgumentParser(
description="Installs PyPy3 2.4.0 in user's home directory")
parser.add_argument("--dir", metavar='<installDir>',
help="Directory to install PyPy3 to. (Default: ~/programs/pypy3-2.4.0)",
default=os.path.join(*[home_dir, "programs", "pypy3-2.4.0"]))
parser.add_argument("--bashrc", metavar='<bashrc>',
help=("Location of basrc file (or equivalent) to append pypy3 bin path "
"to. (Default: ~/.bashrc)"),
default=os.path.join(home_dir, ".bashrc"))
# Parse the arguments
return parser.parse_args()
if __name__ == '__main__':
main()
| mit | -2,608,913,167,537,939,000 | 33.640288 | 87 | 0.637799 | false |
Liakoni/pgn2csv | pgn2csv.py | 1 | 8232 | #pgn2csv is free software: you can redistribute it
#and/or modify it under the terms of the GNU General Public License
#as published by the Free Software Foundation, either version 3
#of the License, or (at your option) any later version.
#You should have received a copy of the GNU General Public License
#along with pgn2csv. If not, see <http://www.gnu.org/licenses/>.
#Copyleft 2012 - Author: [email protected]
#version 1.1
import sys
import os
import argparse
from collections import OrderedDict
default_dir = os.getcwd()
'''process pgn's lines, seperating tag's from their values and removing characters like quotes commas etc'''
def process_line(line, tags):
tag, value = line.split(' ', 1) #split each line to its 1st whitespace (2nd arg means: 1 split only)
tag = tag[1:] #remove '[' (1st character)
value = value.replace( ',' , '' ) #name fields may contain name and lastname seperated by comma (remove it to keep csv fields intact)
value = value.rstrip( '\r\n' ) #remove newline chars
if tags.has_key(tag): #do not add arbitrary tags
tags[tag] = value[1:-2] #also remove last two chars : "] and the 1st one : "
def write_to_file(tags, fout):
global id
for v in tags.values():
fout.write(str(v)+', ')
def initDic(dict):
for key in dict.keys():
dict[key] = ' '
'''sourceFile: the path of source file (pgn) --- outputDir: output directory for (csv files)'''
def process_file(sourceFile, outputDir=default_dir):
print 'proc file .... ', sourceFile
global id
#Creating the output Directory
if os.path.exists(outputDir) == False: #if directory doesn't exist create it
os.makedirs(outputDir) #also creates intermediate directories
#Opening files
sourceFileDirs = sourceFile.split('/') #in case an absolute path is provided
sourceFileName = sourceFileDirs[-1] #take the last part of the path which is the file's name
foutName = os.path.join(outputDir,sourceFileName)
print foutName
try:
fin = open(sourceFile, 'r')
fout = open(foutName, 'w')
#Reading - Writing files
fout.write('Id, Event, Site, Date, Round, White, Black, Result, ECO, WhiteTitle, WhiteElo, WhiteFideId, '+
'BlackTitle, BlackElo, BlackFideId, EventDate, Opening, Variation, Title, Moves')
initItems = [('Id', ' '), ('Event',' '), ('Site',' '), ('Date',' '), ('Round',' '), ('White',' ') , ('Black',' '),
('Result',' '), ('ECO',' '), ('WhiteTitle', ' '), ('WhiteElo', ' ') , ('WhiteFideId',' '), ('BlackTitle', ' '),
('BlackElo',' '), ('BlackFideId',' ') , ('EventDate', ' '), ('Opening', ' '), ('Variation',' ')]
tags = OrderedDict(initItems) #ordered Dictionary creation
flag = True #helping flag to apply [pgn] , [/pgn] pgn4web flags only once for every game in moves section
firstLineFlag = True #helping flag to not apply /pgn tag in 1st line
for line in fin:
if line[0:7] == '[Event ': #previous line/row/entry/game is over go on (one pgn can contain multiple games)
#reaching here means line contains event info
if firstLineFlag == False: #every time we come up with a new game except the 1st time
fout.write(' [/pgn]') #close the pgn4web tag
firstLineFlag = False
flag = True
initDic(tags) #empty dictionary from previous game's values
tags['Id'] = id
id = id + 1
fout.write('\n')
process_line(line, tags) #now we are ready to write the tag's value like we do in every tag
elif line[0].isdigit(): #moves section
write_to_file(tags, fout) #move the tags' values from dictionary to file before writing moves
#before the moves append the white-black info (not in the tags) - feature helping drupal site :P
fout.write(tags['White']+' - '+tags['Black']+', ')
while line not in ['\n', '\r\n'] : #read all the lines containing moves
if flag: #apply tag only before 1st move in each game
fout.write('[pgn] ') #apply tags for pgn4web automation board presentation software
flag = False #do not apply tag after every newline(from pgn file) with moves
a = line.rstrip('\r\n') #remove newline character and '\r' $MS$ b00l$h1t
fout.write( a+' ' ) #write all the moves in one cell
line = fin.next() #read next line
if len(line) == 0: #trying to catch EOF but never yet - StopIteration exception is raised and handled below
break
elif len(line) > 2 : #not empty remember \r\n make len(line) == 2
process_line(line, tags) #ordinary tag, write its value to dictionary(tags)
#end of external for loop
fout.write('[/pgn]') #last tag outside the loop
#Closing the files
fin.close()
fout.close()
except StopIteration:
fout.write('[/pgn]') #because when there is not an empty line at the End Of File we get that exception in line 76: line=fin.next()
fout.close()
fin.close()
except IOError:
print "Sth wrong with Input file: ", sourceFile, " or output directory: ", outputDir
fout.close()
fin.close()
'''sourceDir: the path of the directory containing src files --- outputDir: output directory for (csv files)'''
def process_dir(sourceDir=default_dir, outputDir=default_dir):
for x in os.listdir(sourceDir):
if x == "csvFiles":
continue
path = os.path.join(sourceDir, x)
if os.path.isdir(path): # directory - recursive call
if '/' in path:
folderPaths = path.split('/') # not the folderName yet ... just splitting the path
else:
folderPaths = path.split('/') # not the folderName yet ... just splitting the path
folderName = str(folderPaths[-1])
if folderName == "csvFiles":
continue
outDir = os.path.join(outputDir, folderName)
process_dir(path, outDir ) #recursive call to the new path but output Directory is kept to outDir
elif path[-4:] == '.pgn': #if we find a pgn file then we call the process_file func
process_file(path, outputDir)
if __name__ == "__main__":
global id #counter for the 1st column of csv
parser = argparse.ArgumentParser(description='usage: >>python -f file or >>python -d directory')
parser.add_argument('-f', '--file', help='path of the pgn file')
parser.add_argument('-d', '--directory', help='path of the pgn directory(multiple source files)-(default: current directory', default=default_dir)
parser.add_argument('-o', '--outputdir', help='path of output directory (default: current directory)', default=default_dir)
parser.add_argument('-i', '--id', help='starting id counter (default = 1)', default=1)
args = parser.parse_args()
id = int(args.id)
if args.file == None: #no specific file specified
outDir = os.path.join(args.outputdir, 'csvFiles')
if os.path.exists(outDir) == False: #if directory doesn't exist create it
os.mkdir(outDir)
process_dir(args.directory, outDir ) #work with directory
else:
process_file(args.file, args.outputdir) #work with file
print "Conversion completed successfully"
| gpl-3.0 | 1,060,888,842,327,421,300 | 54.248322 | 151 | 0.565233 | false |
L5hunter/TestCoin | qa/rpc-tests/mempool_coinbase_spends.py | 1 | 3854 | #!/usr/bin/env python2
# Copyright (c) 2014 The Testcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test re-org scenarios with a mempool that contains transactions
# that spend (directly or indirectly) coinbase transactions.
#
from test_framework import TestcoinTestFramework
from Testcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
import os
import shutil
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(TestcoinTestFramework):
alert_filename = None # Set by setup_network
def setup_network(self):
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.nodes.append(start_node(1, self.options.tmpdir, args))
connect_nodes(self.nodes[1], 0)
self.is_network_split = False
self.sync_all
def create_tx(self, from_txid, to_address, amount):
inputs = [{ "txid" : from_txid, "vout" : 0}]
outputs = { to_address : amount }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signresult = self.nodes[0].signrawtransaction(rawtx)
assert_equal(signresult["complete"], True)
return signresult["hex"]
def run_test(self):
start_count = self.nodes[0].getblockcount()
# Mine three blocks. After this, nodes[0] blocks
# 101, 102, and 103 are spend-able.
new_blocks = self.nodes[1].setgenerate(True, 4)
self.sync_all()
node0_address = self.nodes[0].getnewaddress()
node1_address = self.nodes[1].getnewaddress()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_101
# 2. Indirect (coinbase spend in chain, child in mempool) : spend_102 and spend_102_1
# 3. Indirect (coinbase and child both in chain) : spend_103 and spend_103_1
# Use invalidatblock to make all of the above coinbase spends invalid (immature coinbase),
# and make sure the mempool code behaves correctly.
b = [ self.nodes[0].getblockhash(n) for n in range(102, 105) ]
coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ]
spend_101_raw = self.create_tx(coinbase_txids[0], node1_address, 50)
spend_102_raw = self.create_tx(coinbase_txids[1], node0_address, 50)
spend_103_raw = self.create_tx(coinbase_txids[2], node0_address, 50)
# Broadcast and mine spend_102 and 103:
spend_102_id = self.nodes[0].sendrawtransaction(spend_102_raw)
spend_103_id = self.nodes[0].sendrawtransaction(spend_103_raw)
self.nodes[0].setgenerate(True, 1)
# Create 102_1 and 103_1:
spend_102_1_raw = self.create_tx(spend_102_id, node1_address, 50)
spend_103_1_raw = self.create_tx(spend_103_id, node1_address, 50)
# Broadcast and mine 103_1:
spend_103_1_id = self.nodes[0].sendrawtransaction(spend_103_1_raw)
self.nodes[0].setgenerate(True, 1)
# ... now put spend_101 and spend_102_1 in memory pools:
spend_101_id = self.nodes[0].sendrawtransaction(spend_101_raw)
spend_102_1_id = self.nodes[0].sendrawtransaction(spend_102_1_raw)
self.sync_all()
assert_equal(set(self.nodes[0].getrawmempool()), set([ spend_101_id, spend_102_1_id ]))
# Use invalidateblock to re-org back and make all those coinbase spends
# immature/invalid:
for node in self.nodes:
node.invalidateblock(new_blocks[0])
self.sync_all()
# mempool should be empty.
assert_equal(set(self.nodes[0].getrawmempool()), set())
if __name__ == '__main__':
MempoolCoinbaseTest().main()
| mit | -2,353,289,480,723,677,000 | 40 | 98 | 0.656201 | false |
dragly/conan | conans/test/download_test.py | 1 | 4502 | import unittest
from conans.test.tools import TestClient, TestServer
from conans.test.utils.test_files import hello_source_files
from conans.client.manager import CONANFILE
import os
from conans.model.ref import ConanFileReference, PackageReference
from conans.paths import CONAN_MANIFEST, CONANINFO
from conans.util.files import save
from conans.model.manifest import FileTreeManifest
from conans.client.proxy import ConanProxy
myconan1 = """
from conans import ConanFile
import platform
class HelloConan(ConanFile):
name = "Hello"
version = "1.2.1"
"""
class DownloadTest(unittest.TestCase):
def complete_test(self):
""" basic installation of a new conans
"""
servers = {}
# All can write (for avoid authentication until we mock user_io)
test_server = TestServer([("*/*@*/*", "*")], [("*/*@*/*", "*")])
servers["default"] = test_server
conan_digest = FileTreeManifest(123123123, {})
client = TestClient(servers=servers)
client.init_dynamic_vars()
conan_ref = ConanFileReference.loads("Hello/1.2.1@frodo/stable")
reg_folder = client.paths.export(conan_ref)
files = hello_source_files()
client.save(files, path=reg_folder)
client.save({CONANFILE: myconan1,
CONAN_MANIFEST: str(conan_digest),
"include/math/lib1.h": "//copy",
"my_lib/debug/libd.a": "//copy",
"my_data/readme.txt": "//copy"}, path=reg_folder)
package_ref = PackageReference(conan_ref, "fakeid")
package_folder = client.paths.package(package_ref)
save(os.path.join(package_folder, CONANINFO), "info")
save(os.path.join(package_folder, CONAN_MANIFEST), "manifest")
save(os.path.join(package_folder, "include", "lib1.h"), "//header")
save(os.path.join(package_folder, "lib", "my_lib", "libd.a"), "//lib")
save(os.path.join(package_folder, "res", "shares", "readme.txt"),
"//res")
digest_path = client.client_cache.digestfile_package(package_ref)
expected_manifest = FileTreeManifest.create(os.path.dirname(digest_path))
save(os.path.join(package_folder, CONAN_MANIFEST), str(expected_manifest))
client.run("upload %s" % str(conan_ref))
client.run("upload %s -p %s" % (str(conan_ref), package_ref.package_id))
client2 = TestClient(servers=servers)
client2.init_dynamic_vars()
installer = ConanProxy(client2.paths, client2.user_io, client2.remote_manager, "default")
installer.get_recipe(conan_ref)
installer.get_package(package_ref, force_build=False, short_paths=False)
# Check that the output is done in order
lines = [line.strip() for line in str(client2.user_io.out).splitlines()
if line.startswith("Downloading")]
self.assertEqual(lines, ["Downloading conanmanifest.txt",
"Downloading conanfile.py",
"Downloading conan_export.tgz",
"Downloading conanmanifest.txt",
"Downloading conaninfo.txt",
"Downloading conan_package.tgz"
])
reg_path = client2.paths.export(ConanFileReference.loads("Hello/1.2.1/frodo/stable"))
pack_folder = client2.paths.package(package_ref)
# Test the file in the downloaded conans
files = ['CMakeLists.txt',
'my_lib/debug/libd.a',
'hello.cpp',
'hello0.h',
CONANFILE,
CONAN_MANIFEST,
'main.cpp',
'include/math/lib1.h',
'my_data/readme.txt']
for _file in files:
self.assertTrue(os.path.exists(os.path.join(reg_path, _file)))
self.assertTrue(os.path.exists(pack_folder))
# Test the file in the downloaded package
self.assertTrue(os.path.exists(pack_folder))
self.assertTrue(os.path.exists(os.path.join(pack_folder, "include",
"lib1.h")))
self.assertTrue(os.path.exists(os.path.join(pack_folder, "lib",
"my_lib/libd.a")))
self.assertTrue(os.path.exists(os.path.join(pack_folder, "res",
"shares/readme.txt")))
| mit | -1,512,268,174,114,705,200 | 41.074766 | 97 | 0.581964 | false |
hasgeek/boxoffice | boxoffice/mailclient.py | 1 | 6437 | from decimal import Decimal
from flask import render_template
from flask_mail import Message
from html2text import html2text
from premailer import transform as email_transform
from baseframe import __
from . import app, mail, rq
from .models import CURRENCY_SYMBOL, LINE_ITEM_STATUS, Assignee, LineItem, Order
@rq.job('boxoffice')
def send_receipt_mail(
order_id,
subject="Thank you for your order!",
template='order_confirmation_mail.html.jinja2',
):
"""Send buyer a link to fill attendee details and get cash receipt."""
with app.test_request_context():
order = Order.query.get(order_id)
msg = Message(
subject=subject,
recipients=[order.buyer_email],
bcc=[order.organization.contact_email],
)
line_items = (
LineItem.query.filter(
LineItem.order == order, LineItem.status == LINE_ITEM_STATUS.CONFIRMED
)
.order_by(LineItem.line_item_seq.asc())
.all()
)
html = email_transform(
render_template(
template,
order=order,
org=order.organization,
line_items=line_items,
base_url=app.config['BASE_URL'],
currency_symbol=CURRENCY_SYMBOL['INR'],
)
)
msg.html = html
msg.body = html2text(html)
mail.send(msg)
@rq.job('boxoffice')
def send_participant_assignment_mail(
order_id, item_collection_title, team_member, subject="Please tell us who's coming!"
):
with app.test_request_context():
order = Order.query.get(order_id)
msg = Message(
subject=subject,
recipients=[order.buyer_email],
bcc=[order.organization.contact_email],
)
html = email_transform(
render_template(
'participant_assignment_mail.html.jinja2',
base_url=app.config['BASE_URL'],
order=order,
org=order.organization,
item_collection_title=item_collection_title,
team_member=team_member,
)
)
msg.html = html
msg.body = html2text(html)
mail.send(msg)
@rq.job('boxoffice')
def send_line_item_cancellation_mail(
line_item_id, refund_amount, subject="Ticket Cancellation"
):
with app.test_request_context():
line_item = LineItem.query.get(line_item_id)
item_title = line_item.item.title
order = line_item.order
is_paid = line_item.final_amount > Decimal('0')
msg = Message(
subject=subject,
recipients=[order.buyer_email],
bcc=[order.organization.contact_email],
)
# Only INR is supported as of now
html = email_transform(
render_template(
'line_item_cancellation_mail.html.jinja2',
base_url=app.config['BASE_URL'],
order=order,
line_item=line_item,
item_title=item_title,
org=order.organization,
is_paid=is_paid,
refund_amount=refund_amount,
currency_symbol=CURRENCY_SYMBOL['INR'],
)
)
msg.html = html
msg.body = html2text(html)
mail.send(msg)
@rq.job('boxoffice')
def send_order_refund_mail(order_id, refund_amount, note_to_user):
with app.test_request_context():
order = Order.query.get(order_id)
subject = __(
"{item_collection_title}: Refund for receipt no. {invoice_no}".format(
item_collection_title=order.item_collection.title,
invoice_no=order.invoice_no,
)
)
msg = Message(
subject=subject,
recipients=[order.buyer_email],
bcc=[order.organization.contact_email],
)
# Only INR is supported as of now
html = email_transform(
render_template(
'order_refund_mail.html.jinja2',
base_url=app.config['BASE_URL'],
order=order,
org=order.organization,
note_to_user=note_to_user.html,
refund_amount=refund_amount,
currency_symbol=CURRENCY_SYMBOL['INR'],
)
)
msg.html = html
msg.body = html2text(html)
mail.send(msg)
@rq.job('boxoffice')
def send_ticket_assignment_mail(line_item_id):
"""Send a confirmation email when ticket has been assigned."""
with app.test_request_context():
line_item = LineItem.query.get(line_item_id)
order = line_item.order
subject = order.item_collection.title + ": Here's your ticket"
msg = Message(
subject=subject,
recipients=[line_item.current_assignee.email],
bcc=[order.buyer_email],
)
html = email_transform(
render_template(
'ticket_assignment_mail.html.jinja2',
order=order,
org=order.organization,
line_item=line_item,
base_url=app.config['BASE_URL'],
)
)
msg.html = html
msg.body = html2text(html)
mail.send(msg)
@rq.job('boxoffice')
def send_ticket_reassignment_mail(line_item_id, old_assignee_id, new_assignee_id):
"""Send notice of reassignment of ticket."""
with app.test_request_context():
line_item = LineItem.query.get(line_item_id)
order = line_item.order
old_assignee = Assignee.query.get(old_assignee_id)
new_assignee = Assignee.query.get(new_assignee_id)
subject = (
order.item_collection.title
+ ": Your ticket has been transfered to someone else"
)
msg = Message(
subject=subject, recipients=[old_assignee.email], bcc=[order.buyer_email]
)
html = email_transform(
render_template(
'ticket_reassignment_mail.html.jinja2',
old_assignee=old_assignee,
new_assignee=new_assignee,
order=order,
org=order.organization,
line_item=line_item,
base_url=app.config['BASE_URL'],
)
)
msg.html = html
msg.body = html2text(html)
mail.send(msg)
| agpl-3.0 | -475,470,740,108,540,700 | 31.675127 | 88 | 0.555538 | false |
jricardo27/holiday_planner | holiday_planner/holiday_place/models/place.py | 1 | 4516 | """Place representing a geographical point on the map."""
from django.db import models
from model_utils import Choices
class PlaceTypeMixin:
"""Place type definitions."""
PLACE_TYPES = Choices(
('city', 'City'),
('town', 'Town'),
('beach', 'Beach'),
('cafe', 'Cafe'),
('bar', 'Bar'),
('zoo', 'Zoo'),
('market', 'Market'),
('restaurant', 'Restaurant'),
('island', 'Island'),
('museum', 'Museum'),
('shop', 'Shop'),
('winery', 'Winery'),
('natural_lookout', 'Natural Look Out'),
('man_made_lookout', 'Man Made Look Out'),
('national_park', 'National Park'),
('farmers_market', 'Farmer\'s Market'),
('art_gallery', 'Art Gallery'),
('accommodation_available', 'Accommodation Available'),
('accommodation_booked', 'Accommodation Booked'),
('amusement_park', 'Amusement Park'),
('interactive_park', 'Interactive Park'),
('thematic_park', 'Thematic Park'),
('big_thing', 'Australia\'s Big Thing'),
('botanic_garden', 'Botanic Garden'),
('chinese_garden', 'Chinese Garden'),
('coral_reef', 'Coral Reef'),
('indigenous_centre', 'Indigeneous Centre'),
('neighborhood', 'City Neighborhood'),
('scenic_drive', 'Scenic Drive'),
)
GEOPOLITICAL_PLACES = [
PLACE_TYPES.city,
PLACE_TYPES.town,
]
NATURAL_PLACES = [
PLACE_TYPES.beach,
PLACE_TYPES.natural_lookout,
PLACE_TYPES.national_park,
PLACE_TYPES.coral_reef,
PLACE_TYPES.island,
PLACE_TYPES.scenic_drive,
]
CITY_ATTRACTIONS = [
PLACE_TYPES.restaurant,
PLACE_TYPES.bar,
PLACE_TYPES.cafe,
PLACE_TYPES.shop,
PLACE_TYPES.farmers_market,
PLACE_TYPES.market,
PLACE_TYPES.amusement_park,
PLACE_TYPES.interactive_park,
PLACE_TYPES.thematic_park,
PLACE_TYPES.botanic_garden,
PLACE_TYPES.chinese_garden,
PLACE_TYPES.art_gallery,
PLACE_TYPES.museum,
PLACE_TYPES.man_made_lookout,
PLACE_TYPES.neighborhood,
]
LOOK_OUTS = [
PLACE_TYPES.natural_lookout,
PLACE_TYPES.man_made_lookout,
]
ANIMAL_RELATED = [
PLACE_TYPES.zoo,
]
NATURE_RELATED = [
PLACE_TYPES.national_park,
PLACE_TYPES.botanic_garden,
PLACE_TYPES.chinese_garden,
PLACE_TYPES.coral_reef,
]
ACCOMMODATION_RELATED = [
PLACE_TYPES.accommodation_available,
PLACE_TYPES.accommodation_booked,
]
OTHER = [
PLACE_TYPES.big_thing,
PLACE_TYPES.indigenous_centre,
PLACE_TYPES.winery,
]
class Place(PlaceTypeMixin, models.Model):
"""A place could be a city, a town, an attraction..."""
class Meta:
unique_together = ("name", "type")
name = models.CharField(
verbose_name="Name",
help_text="Name of the place.",
max_length=255,
blank=False,
null=False,
)
longitude = models.FloatField(
verbose_name="Longitude",
)
latitude = models.FloatField(
verbose_name="Latitude",
)
type = models.CharField(
verbose_name="Main Type",
help_text="A type that describe this site.",
choices=PlaceTypeMixin.PLACE_TYPES,
max_length=60,
default=PlaceTypeMixin.PLACE_TYPES.city,
)
short_description = models.TextField(
verbose_name="Short Description",
max_length=500,
blank=True,
)
long_description = models.TextField(
verbose_name="Long Description",
blank=True,
)
located_in = models.ForeignKey(
"self",
verbose_name="City/Town",
help_text="City/Town this place is located",
related_name='children',
on_delete=models.SET_NULL,
blank=True,
null=True,
limit_choices_to={
'type__in': PlaceTypeMixin.GEOPOLITICAL_PLACES,
},
)
def __str__(self):
return '{}[{}]'.format(self.name, self.type_str)
@property
def type_str(self):
"""Display the human readable form for the type."""
return self.PLACE_TYPES[self.type]
@property
def located_in_str(self):
"""Display the human readable form for the location in."""
if self.located_in:
return self.PLACE_TYPES[self.located_in]
return ''
| bsd-3-clause | 3,256,515,929,308,662,000 | 25.409357 | 66 | 0.568423 | false |
paveu/api_mocker | apimocker/settings/components/logging.py | 1 | 3197 | from __future__ import absolute_import
import logging
from logstash.formatter import LogstashFormatterVersion1
class SuppressDeprecated(logging.Filter):
def filter(self, record):
WARNINGS_TO_SUPPRESS = [
'RemovedInDjango110Warning',
'RemovedInDjango20Warning',
]
# Return false to suppress message.
return not any([warn in record.getMessage() for warn in WARNINGS_TO_SUPPRESS])
class LogstashFormatter(LogstashFormatterVersion1):
def _stringify(self, s):
if isinstance(s, unicode):
s = s.decode('utf-8', 'ignore')
return str(s)
def format(self, record):
# Create message dict
message = {
'@timestamp': self.format_timestamp(record.created),
'@version': '1',
'host': self.host,
'pathname': record.pathname,
'tags2': self.tags,
'message': record.getMessage(),
# Extra Fields
'level': record.levelname,
'logger_name': record.name,
'ex': {k: self._stringify(v) for k, v in self.get_extra_fields(record).iteritems()},
}
# If exception, add debug info
if record.exc_info:
message.update(self.get_debug_fields(record))
return self.serialize(message)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'root': {
'level': 'INFO',
'handlers': ['main', 'sentry'],
},
'formatters': {
'logstash': {
'()': LogstashFormatter,
},
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(message)s'
},
},
'handlers': {
'main': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.handlers.SentryHandler',
},
},
'loggers': {
'apimocker.utils.middlewares': {
'handlers': ['main'],
'level': 'INFO',
'propagate': False,
},
'django.db.backends': {
'handlers': ['sentry'],
'level': 'ERROR',
'propagate': False,
},
'django.request': {
'handlers': ['sentry'],
'level': 'ERROR',
'propagate': False,
},
'raven': {
'level': 'DEBUG',
'handlers': ['sentry'],
'propagate': False,
},
'sentry.errors': {
'level': 'DEBUG',
'handlers': ['sentry'],
'propagate': False,
},
'celery': {
'level': 'WARNING',
'handlers': ['sentry'],
'propagate': False,
},
},
'filters': {
'suppress_deprecated': {
'()': SuppressDeprecated,
}
},
}
if ENVIRONMENT == 'production': # noqa
LOGGING['handlers']['main'] = {
'level': 'INFO',
'class': 'logging.FileHandler',
'filename': '/var/log/app.log',
'formatter': 'logstash',
'filters': ['suppress_deprecated'],
}
| mit | -1,602,555,381,508,861,700 | 26.09322 | 96 | 0.487645 | false |
gpersistence/tstop | python/persistence/CrossValidation.py | 1 | 16269 | #TSTOP
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#!/usr/bin/python
import sys
import argparse
import importlib
from copy import copy
import itertools
import multiprocessing
import numpy
import signal
import Queue
import random
import time
import traceback
from struct import pack, unpack
from persistence.Datatypes.JSONObject import JSONObject, load_data, save_data, cond_get
from persistence.Datatypes.Configuration import Configuration, parse_range, get_filename
from persistence.Datatypes.Segments import Segments, SegmentInfo, max_label
from persistence.Datatypes.PersistenceDiagrams import PersistenceDiagrams
from persistence.Datatypes.TrainTestPartitions import TrainTestPartitions, TrainTestPartition
from Datatypes.Learning import Learning
import persistence.KernelLearning
class Validator :
"""
Callable class to hold state for multiprocessing.Pool invocation
"""
def __init__(self, config,
kernel_class, kernel_args, distances_class, distances_args, learning_class,
kernel_objects, distances_object, input,
partitions, cv_partitions) :
self.config = config
self.kernel_class = kernel_class
self.kernel_args = kernel_args
self.distances_class = distances_class
self.distances_args = distances_args
self.learning_class = learning_class
self.kernel_objects = kernel_objects
self.distances_object = distances_object
self.input = input
self.partitions = partitions
self.cv_partitions = cv_partitions
def __call__(self, (kernel_arg, distances_arg, learning_arg, partition)) :
learning_scale = self.learning_class.get_scale_arg()
config = copy(self.config)
kernel_scale = None
distances_scale = None
if learning_scale != None :
config[learning_scale] = learning_arg
if self.kernel_class != None and self.kernel_args != None :
kernel_scale = self.kernel_class.get_scale_arg()
if kernel_scale != None :
config[kernel_scale] = kernel_arg
learning_object = self.learning_class(config, self.kernel_objects[self.kernel_args.index(kernel_arg)],
self.cv_partitions[self.partitions.cross_validation.index(partition)])
elif self.distances_class != None and distances_args != None :
distances_scale = self.distances_class.get_scale_arg()
if distances_scale != None :
config[distances_scale] = distances_arg
learning_object = self.learning_class(config, self.distances_objects[self.distances_args.index(distances_arg)],
self.cv_partitions[self.partitions.cross_validation.index(partition)])
else :
learning_object = self.learning_class(config, self.input, partition)
learning_object.train()
result = learning_object.test()
return (kernel_arg, distances_arg, learning_arg, result)
class CrossValidation(JSONObject) :
"""
Run cross validation using a kernel generator / kernel learning or
distances generator / k-NN learning using supplied the paritions
in the cross_validation field in the supplied TrainTestParititons
"""
fields = ['config',
'kernel_module',
'kernel_arg',
'distances_module',
'distances_arg',
'learning_module',
'learning_arg',
'partitions']
def __init__(self, input_json, config=None,
kernel_module=None, kernel_arg=None,
distances_module=None, distances_arg=None,
learning_module=None, learning_arg=None,
partitions=None,
pool=None, timeout=600) :
self.input_json = input_json
self.config = config
self.kernel_module = kernel_module
self.kernel_arg = kernel_arg
self.distances_module = distances_module
self.distances_arg = distances_arg
self.learning_module = learning_module
self.learning_arg = learning_arg
self.partitions = partitions
self.pool = pool
self.timeout = timeout # optional way to die if things are taking to long
@classmethod
def fromJSONDict(cls, json):
return cls(None,
config=Configuration.fromJSONDict(json['config']),
kernel_module=cond_get(json, 'kernel_module'),
kernel_arg=cond_get(json, 'kernel_arg'),
distances_module=cond_get(json, 'distances_module'),
distances_arg=cond_get(json, 'distances_arg'),
learning_module=cond_get(json, 'learning_module'),
learning_arg=cond_get(json, 'learning_arg'),
partitions=cond_get(json, 'partitions'))
def cross_validate(self,) :
cv_input = None
# Make a mapping for just the segments / diagrams / whatever we need for cross validation
cv_indices = list(set(itertools.chain.from_iterable([cv.train + cv.test for cv in self.partitions.cross_validation])))
cv_indices.sort()
cv_partitions = [TrainTestPartition(train=[cv_indices.index(i) for i in cv.train],
test=[cv_indices.index(i) for i in cv.test],
state=cv.state) for cv in self.partitions.cross_validation]
learning_class = None
kernel_class = None
distances_class = None
if self.kernel_module != None :
print self.kernel_module
kernel_module = importlib.import_module("persistence." + self.kernel_module)
kernel_class = getattr(kernel_module, self.kernel_module)
kernel_input_type = kernel_class.get_input_type()
kernel_input_module = importlib.import_module("persistence.Datatypes." + kernel_input_type)
kernel_input_class = getattr(kernel_input_module, kernel_input_type)
cv_input = kernel_input_class.fromJSONDict(self.input_json)
field = kernel_input_class.get_iterable_field()
# narrow the input to only the cross validation inputs
cv_input[field] = [cv_input[field][i] for i in cv_indices]
elif self.distances_module != None :
distances_module = importlib.import_module("persistence." + self.distances_module)
distances_class = getattr(distances_module, self.distances_module)
distances_input_type = distances_class.get_input_type()
distances_input_module = importlib.import_module("persistence.Datatypes." + distances_input_type)
distances_input_class = getattr(distances_input_module, distances_input_type)
cv_input = distances_input_class.fromJSONDict(self.input_json)
field = distances_input_class.get_iterable_field()
# narrow the input to only the cross validation inputs
cv_input[field] = [cv_input[field][i] for i in cv_indices]
learning_module = importlib.import_module("persistence." + self.learning_module)
learning_class = getattr(learning_module, self.learning_module)
learning_input_type = learning_class.get_input_type()
learning_input_module = importlib.import_module("persistence.Datatypes." + learning_input_type)
learning_input_class = getattr(learning_input_module, learning_input_type)
# Cross validation only using the learning_arg value
if self.kernel_module == None and self.distances_module == None:
cv_input = learning_input_class.fromJSONDict(self.input_json)
learning_results = []
if isinstance(self.kernel_arg, list) :
kernel_args = self.kernel_arg
else :
kernel_args = [self.kernel_arg]
if self.kernel_module != None :
# Precompute kernel objects
def computed_kernel(arg) :
config = copy(self.config)
scale_arg = kernel_class.get_scale_arg()
if scale_arg != None :
config[scale_arg] = arg
kernel = kernel_class(config, cv_input, pool=self.pool)
print "Computing %s for %s of %s" % ( self.kernel_module, scale_arg, arg )
kernel.compute_kernel()
kernel.pool = None
return kernel
kernel_objects = [computed_kernel(arg) for arg in kernel_args]
else :
kernel_objects = None
if isinstance(self.distances_arg, list) :
distances_args = self.distances_arg
else :
distances_args = [self.distances_arg]
if self.distances_module != None :
# Precompute distances objects
def computed_distances(arg) :
config = copy(self.config)
scale_arg = distances_class.get_scale_arg()
if scale_arg != None :
config[scale_arg] = arg
distances = distances_class(config, cv_input, pool=self.pool)
print "Computing %s for %s of %s" % ( self.distances_module, scale_arg, arg )
distances.compute_distances()
distances.pool = None
return distances
distances_objects = [computed_distances(arg) for arg in distances_args]
else :
distances_objects = None
if isinstance(self.learning_arg, list) :
learning_args = self.learning_arg
else :
learning_args = [self.learning_arg]
validator = Validator(self.config,
kernel_class, kernel_args, distances_class, distances_args, learning_class,
kernel_objects, distances_objects, cv_input,
self.partitions, cv_partitions)
if self.pool == None :
print "single thread computations"
results = itertools.imap(validator,
itertools.product(kernel_args, distances_args, learning_args,
self.partitions.cross_validation))
results = list(results)
else :
results = self.pool.imap(validator,
itertools.product(kernel_args, distances_args, learning_args,
self.partitions.cross_validation),
1)
final_results = []
try:
while True:
if self.timeout > 0 :
result = results.next(self.timeout)
else :
result = results.next()
final_results.append(result)
except StopIteration:
pass
except multiprocessing.TimeoutError as e:
self.pool.terminate()
print traceback.print_exc()
sys.exit(1)
results = final_results
results = list(results)
best_result = (None, 0.0)
learning_scale = None
kernel_scale = None
distances_scale = None
for (kernel_arg, distances_arg, learning_arg) in itertools.product(kernel_args, distances_args, learning_args) :
these_results = [result for (_kernel_arg, _distances_arg, _learning_arg, result) in results if kernel_arg == _kernel_arg and distances_arg == _distances_arg and learning_arg == _learning_arg]
config = copy(self.config)
learning_scale = learning_class.get_scale_arg()
if learning_scale != None :
config[learning_scale] = learning_arg
if self.kernel_module != None and kernel_args != None :
kernel_scale = kernel_class.get_scale_arg()
if kernel_scale != None :
config[kernel_scale] = kernel_arg
elif self.distances_module != None and distances_args != None :
distances_scale = distances_class.get_scale_arg()
if distances_scale != None :
config[distances_scale] = distances_arg
correct = Learning(config, these_results).get_average_correct()
if correct > best_result[1]:
best_result = (config, correct)
self.config = best_result[0]
print "Best result %02.2f%% %s%s%s" % \
(best_result[1] * 100.0,
("%s %s " % (kernel_scale, self.config[kernel_scale])) if kernel_scale != None else "",
("%s %s " % (distances_scale, self.config[distances_scale])) if distances_scale != None else "",
("%s %s " % (learning_scale, self.config[learning_scale])) if learning_scale != None else "")
self.config.status = 'CrossValidation'
@staticmethod
def get_cross_validation_filename(config, gz=False):
fields = Configuration.fields
return get_filename(config, fields, 'CrossValidation', gz)
def main(argv) :
parser = argparse.ArgumentParser(description="General purpose cross validation tool")
parser.add_argument("--kernel-module", "-K")
parser.add_argument("--kernel-arg", "-k")
parser.add_argument("--distances-module", "-D")
parser.add_argument("--distances-arg", "-d")
parser.add_argument("--learning-module", "-L")
parser.add_argument("--learning-arg", "-l")
parser.add_argument("--infile", "-i")
parser.add_argument("--outfile", "-o")
parser.add_argument("--train-test-partitions", "-t")
parser.add_argument("--pool", "-p", type=int, default=max(1,multiprocessing.cpu_count()-2))
parser.add_argument("--timeout", type=int, default=0)
args = parser.parse_args(argv[1:])
input_json = load_data(args.infile, "input", None, None, argv[0] + ":")
partitions_json = load_data(args.train_test_partitions, "input", None, None, argv[0] + ":")
partitions = TrainTestPartitions.fromJSONDict(partitions_json)
if args.pool > 1 :
pool = multiprocessing.Pool(args.pool)
else :
pool = None
if args.kernel_arg != None :
kernel_arg = parse_range(args.kernel_arg, t=float)
else :
kernel_arg = None
if args.distances_arg != None :
distances_arg = parse_range(args.distances_arg, t=float)
else :
distances_arg = None
if args.learning_arg != None :
learning_arg = parse_range(args.learning_arg, t=float)
else :
learning_arg = None
print "Kernel %s distance %s learning %s" % (kernel_arg, distances_arg, learning_arg)
cv = CrossValidation(input_json,
config=Configuration.fromJSONDict(input_json['config']),
kernel_module=args.kernel_module,
kernel_arg=kernel_arg,
distances_module=args.distances_module,
distances_arg=distances_arg,
learning_module=args.learning_module,
learning_arg=learning_arg,
partitions=partitions,
pool=pool,
timeout=args.timeout)
cv.cross_validate()
if args.outfile == None :
args.outfile = CrossValidation.get_cross_validation_filename(cv.config)
print "Writing %s" % args.outfile
save_data(args.outfile, cv.toJSONDict())
if __name__=="__main__" :
main(sys.argv)
| gpl-3.0 | 1,937,170,279,775,606,000 | 44.957627 | 203 | 0.597209 | false |
par2/lamana | lamana/output_.py | 1 | 29238 | # -----------------------------------------------------------------------------
'''Classes and functions for handling visualizations, plots and exporting data. BETA'''
# _distribplot(): independent plots of single and multiple geometries
# _multiplot(): aggregates severa; distribplots into a grid of subplots
# flake8 output_.py --ignore E265,E501,E701,F841,N802,N803,N806
'''Plot single and multiple LaminateModels.
Plots objects found within a list of LMs. Assumes Laminate objects are
in the namespace. Calls `_distribplot()` for single/multiple geometries.
Parameters
----------
title : str; default None
Suptitle; convenience keyword
subtitle : str; default None
Subtitle; convenience keyword. Used ax.text().
x, y : str; default None
DataFrame column names. Users can manually pass in other columns names.
normalized : bool; default None
If true, plots y = k_; else plots y = d_ unless specified otherwise.
halfplot : str; default None
Trim the DataFrame to read either |'tensile'|'compressive'|None|.
extrema : bool; default True
Plot minima and maxima only; equivalent to p=2.
separate : bool; default False
Plot each geometry in separate subplots.
legend_on : bool; default True
Turn on/off plot
colorblind : bool; default False
Set line and marker colors as colorblind-safe.
grayscale : bool; default False
Set everything to grayscale; overrides colorblind.
annotate : bool; default False
Annotate names of layer types.
inset: bool; default None
Unnormalized plot of single geometry in upper right corner.
ax : matplotlib axes; default None
An axes containing the plots.
{subplots, suptitle}_kw : dict; default None
Default keywords are initialed to set up the distribution plots.
- subplots: |ncols=1|figsize=(12,8)|dpi=300|
- suptitle: |fontsize=15|fontweight='bold'|
Notes
-----
See `_distroplot()` for more kwargs. Here are some preferred idioms:
>>> case.LM.plot() # geometries in case
Case Plotted. Data Written. Image Saved.
>>> case.LM[4:-1].plot() # handle slicing
Case Plotted. Data Written. Image Saved.
Examples
--------
Plot Single Geometry
--------------------
Unnormalized stress distribution for single geometry (default):
.. plot::
:context: close-figs
>>> import lamana as la
>>> from LamAma.models import Wilson_LT as wlt
>>> dft = wlt.Defaults()
>>> case = la.distributions.Case(dft.load_params, dft.mat_props)
>>> case.apply('400-[200]-800')
>>> case.plot()
Normalized stress distribution for single geometry:
.. plot::
:context: close-figs
>>> case.plot(normalized=True)
Normalized stress distribution (base) with an unnormalized inset:
.. plot::
:context: close-figs
>>> case.plot(inset=True)
Stress distribution plot with layer annotations:
.. plot::
:context: close-figs
>>> plot(annotate=True)
Custom markerstyles and kwarg passing.
.. plot::
:context: close-figs
>>> plot(markerstyles=['D'])
Colorblind-safe color palette.
.. plot::
:context: close-figs
>>> plot(colorblind=True)
Grayscale color palette.
.. plot::
:context: close-figs
>>> plot(grayscale=True)
Plot Multiple Geometries
------------------------
Normalized stress distributions for multiple geometries (default):
.. plot::
:context: close-figs
>>> case.apply('400-200-800', '350-400-500', '200-100-1400')
>>> case.plot()
Tensile stress distribution:
.. plot::
:context: close-figs
>>> case.plot(halfplot='tensile')
Insets are not implemented for multiple geometries:
.. plot::
:context: close-figs
>>> case.plot(inset=True)
NotImplementedError 'Unable to superimpose multiple, unnormalized plots.
See Also
--------
lamana.constructs.Laminate : builds the `LaminateModel` object.
lamana.output_._distribplot : generic handler for stress distribution plots.
lamana.output_._multiplot : plots multiple cases as subplots (caselets).
lamana.distributions.Case.plot : makes call to `_distribplot()`.
lamana.distributions.Cases.plot : makes call to `_multiplot()`.
'''
import math
import logging
import itertools as it
import matplotlib as mpl
import matplotlib.pyplot as plt
from lamana.lt_exceptions import InputError, PlottingError
# TODO: Replace with config.LAMANA_PALETTES
# colorblind palette from seaborn; grayscale is web-safe
LAMANA_PALETTES = dict(
#bold=['#FC0D00','#FC7700','#018C99','#00C318','#6A07A9','#009797','#CF0069'],
bold=['#EB0C00', '#FC7700', '#018C99', '#00C318', '#6A07A9', '#009797', '#CF0069'],
colorblind=['#0072B2', '#009E73', '#D55E00', '#CC79A7', '#F0E442', '#56B4E9'],
grayscale=['#FFFFFF', '#999999', '#666666', '#333333', '#000000'],
HAPSu=['#E7940E', '#F5A9A9', '#FCEB00', '#0B4EA5'],
)
# =============================================================================
# PLOTS -----------------------------------------------------------------------
# =============================================================================
# Process plotting figures of single and multiple subplots
#def _cycle_depth(iterable, n=None):
# '''Return a cycler that iterates n items into an iterable.'''
# if n is None:
# n = len(iterable)
# return it.cycle(it.islice(iterable, n))
def _cycle_depth(iterable, depth=None):
'''Return an itertools.cycle that slices the iterable by a given depth.
Parameters
----------
iterable : iterable
A container of infinite length.
depth : int
A index value; if None, cycle the entire iterable.
Examples
--------
>>> # Depth: 1 2 3 4 5 6
>>> iter_ = ['A', 'B', 'C', 'D', 'E', 'F']
>>> _cycle_depth(iter_, depth=2)
itertools.cycle # ['A', 'B', 'A', 'B', 'A' ...]
>>> # Depth: 1 2 3 4 5 6
>>> iter_ = ['A', 'B', 'C', 'D', 'E', 'F']
>>> _cycle_depth(iter_, depth=3)
itertools.cycle # ['A', 'B', 'C', 'A', 'B', 'C' ...]
Returns
-------
itertools.cycle
An infinite generator.
'''
if depth is None:
depth = len(iterable)
return it.cycle(it.islice(iterable, depth))
# TODO: Abstract to Distribplot and PanelPlot classes
def _distribplot(
LMs, x=None, y=None, normalized=True, halfplot=None, extrema=True,
legend_on=True, colorblind=False, grayscale=False, annotate=False, ax=None,
linestyles=None, linecolors=None, markerstyles=None, layercolors=None,
plot_kw=None, patch_kw=None, annotate_kw=None, legend_kw=None,
sublabel_kw=None, **kwargs
):
'''Return an axes plot of stress distributions.
Some characteristics
- multiplot: plot multiple geometries
- halfplot: plots only compressive or tensile side
- annotate: write layer type names
Users can override kwargs normal mpl style.
Parameters
----------
LMs : list of LaminateModel objects
Container for LaminateModels.
x, y : str
DataFrame column names. Users can pass in other columns names.
normalized : bool
If true, plots y = k_; else plots y = d_ unless specified otherwise.
halfplot : str
Trim the DataFrame to read either |'tensile'|'compressive'|None|.
extrema : bool
Plot minima and maxima only; equivalent to p=2.
legend_on : bool
Turn on/off plot. Default: True.
colorblind : bool
Set line and marker colors as colorblind-safe.
grayscale : bool
Set everything to grayscale. Overrides colorblind.
annotate : bool
Annotate names of layer types.
ax : matplotlib axes
An axes containing the plots.
These keywords control general plotting aesthetics.
{lines, marker, layer}_styles/colors : dict
Processes cycled iterables for matplotlib keywords.
- linestyles: ["-","--","-.",":"]
- linecolors: LAMANA_PALETTES['bold']
- markerstyles: mpl.lines.Line2D.filled_markers
- layercolors: LAMANA_PALETTES['HAPSu']
{plot, patch, annotate, legend, sublabel}_kw : dict
Default keywords are initialized to set up the distribution plots.
- plot: |linewidth=1.8|markersize=8|alpha=1.0|clip_on=False|
- patch: |linewidth=1.0|alpha=0.15|
- annotate: write layer types |fontsize=20|alpha=.7|ha='left'|va='center'|
- legend: |loc=1|fontsize='large'|
- sublabel: default is lower case alphabet
|x=0.12|y=0.94|s=''|fontsize=20|weight='bold'|ha='center'|va='center'|
Returns
-------
matplotlib axes
A plot of k or d (height) versus stress.
Raises
------
InputError
If no stress column is found.
PlottingError
If multiple geometries try an unnormalized plot; cannot superimpose.
Notes
-----
Since this function pulls from existing axes with `gca`, it is currently up
to the coder to manage axes cleanup, particularly when making consecutive plot
instances. The following example uses the clear axes f(x) to remedy this issue:
>>> # Plot consecutive instances
>>> case = ut.laminator(['400-200-800'])[0]
>>> LMs = case.LMs
>>> plot1 = la.output_._distribplot(LMs, normalized=True)
>>> plot1.cla() # clear last plot, otherwise prevents infinite loop of gca from old plot
>>> plot2 = la.output_._distribplot(LMs, normalized=False)
If you want to keep your old axes, consider passing in a new axes.
>>> fig, new_ax = plt.subplots()
>>> plot3 = la.output_._distribplot(LMs, normalized=False, ax=new_ax)
Examples
--------
>>> # Plot a single geometry
>>> import lamana as la
>>> from lamana.models import Wilson_LT as wlt
>>> dft = wlt.Defaults()
>>> case = la.distributions.Case(dft.load_params, dft.mat_props)
>>> case.apply(['400-200-800'])
>>> la.output_._distribplot(case.LMs)
<matplotlib.axes._subplots.AxesSubplot>
'''
# -------------------------------------------------------------------------
'''Make cyclers colorblind and grayscale friendly'''
if ax is None:
ax = plt.gca()
# Default axis labels and DataFrame columns for normalized plots
if x is None:
# 'stress_f (MPa/N)' is in Wilson_LT; so the following triggers handling
##x = 'stress_f (MPa/N)'
x = 'stress'
if normalized:
y = 'k'
elif not normalized and y is None:
y = 'd(m)'
# NOTE: Will have trouble standardizing the name of the stress column.
# NOTE: Need to de-hard-code x label since changes with model
# TODO: Try looking for stress columns, and select last one, else look for strain.
# see loop on handling stress column
# Plot Defaults -----------------------------------------------------------
# Set defaults for plotting keywords with dicts
# If no kwd found, make an empty dict; only update keys not passed in
plot_kw = {} if plot_kw is None else plot_kw
plot_dft = dict(linewidth=1.8, markersize=8, alpha=1.0, clip_on=False,)
plot_kw.update({k: v for k, v in plot_dft.items() if k not in plot_kw})
#print('plot_kw (pre-loop): ', plot_kw)
patch_kw = {} if patch_kw is None else patch_kw
patch_dft = dict(linewidth=1.0, alpha=0.15,)
patch_kw.update({k: v for k, v in patch_dft.items() if k not in patch_kw})
#print('patch_kw: ', patch_kw)
annotate_kw = {} if annotate_kw is None else annotate_kw
annotate_dft = dict(fontsize=20, alpha=.7, ha='left', va='center',)
annotate_kw.update({k: v for k, v in annotate_dft.items() if k not in annotate_kw})
#print('annotate_kw: ', annotate_kw)
legend_kw = {} if legend_kw is None else legend_kw
legend_dft = dict(loc=1, fontsize='large',)
legend_kw.update({k: v for k, v in legend_dft.items()
if k not in legend_kw and legend_on})
#print('legend_kw: ', legend_kw)
sublabel_kw = {} if sublabel_kw is None else sublabel_kw
sublabel_dft = dict(
x=0.12, y=0.94, s='', fontsize=20, weight='bold', ha='center',
va='center', transform=ax.transAxes
)
sublabel_kw.update({k: v for k, v in sublabel_dft.items()
if k not in sublabel_kw})
#print('sublabel_kw: ', sublabel_kw)
# Style Cyclers -----------------------------------------------------------
# Set defaults for the line/marker styles, colors and layer patch colors
if linestyles is None:
linestyles = it.cycle(["-", "--", "-.", ":"])
if linecolors is None:
linecolors = LAMANA_PALETTES['bold']
if markerstyles is None:
markerstyles = [mrk for mrk in mpl.lines.Line2D.filled_markers
if mrk not in ('None', None)]
if layercolors is None:
layercolors = LAMANA_PALETTES['HAPSu']
##layercolors = ['#E7940E', '#F5A9A9', '#FCEB00', '#0B4EA5']
if colorblind:
linecolors = LAMANA_PALETTES['colorblind']
'''Add special color blind to layers'''
if grayscale:
linecolors = ['#000000']
layercolors = reversed(LAMANA_PALETTES['grayscale'][:-1]) # exclude black
patch_kw.update(dict(alpha=0.5))
if colorblind:
print('Grayscale has overriden the colorblind option.')
marker_cycle = it.cycle(markerstyles)
##marker_cycle = it.cycle(reversed(markerstyles))
line_cycle = it.cycle(linestyles)
color_cycle = it.cycle(linecolors)
# Plotting ----------------------------------------------------------------
minX, maxX = (0, 0)
for i, LM in enumerate(LMs):
if extrema:
df = LM.extrema # plots p=2
else:
df = LM.LMFrame
#nplies = LM.nplies # unused
materials = LM.materials
lbl = LM.Geometry.string
stack_order = LM.stack_order
# Handle arbitrary name of x column by
# selecting last 'stress' column; assumes 'stress_f (MPa)' for Wilson_LT
# if none found, exception is raised. user should input x value
#logging.debug('x: {}'.format(x))
x_col = x
y_col = y
try:
df[x_col]
except KeyError:
try:
# Try to discern if input wants a stress column.
stress_names = df.columns.str.startswith('stress')
stress_cols = df.loc[:, stress_names]
##stress_cols = df.loc[stress_names]
x_series = stress_cols.iloc[:, -1]
x_col = x_series.name
logging.info(
"Stress column '{}' not found."
" Using '{}' column.".format(x, x_col)
)
# TODO: unable to test without configuring model. Make mock model for test.
except KeyError:
raise InputError(
"Stress column '{}' not found."
' Specify `y` column in plot() method.'.format(x_col)
)
x_series, y_series = df[x_col], df[y_col]
xs, ys = x_series.tolist(), y_series.tolist()
# Update plot boundaries
if min(xs) < minX:
minX = float(min(xs))
if max(xs) > maxX:
maxX = float(max(xs))
#print(minX, maxX)
# Keyword Updates;
# Use the cycler if plot_kw is empty, otherwise let the user manually change plot_kw
plot_kw.update({
'label': lbl,
#'marker': 'o',
#'color': 'b',
'marker': next(marker_cycle),
'color': next(color_cycle),
'linestyle': next(line_cycle)
})
'''Put following into info.'''
#print(LM.Geometry, LM.Geometry.string, LM.name, LM.nplies, LM.p)
# Label caselets with sublabels, e.g. a,b,c, i,ii,iii...
ax.tick_params(axis='x', pad=10)
ax.tick_params(axis='y', pad=10)
ax.plot(xs, ys, **plot_kw)
width = maxX - minX # sets rectangle width
minY = y_series.min()
maxY = y_series.max()
# Smart-cycle layer colors list; slice iterable the length of materials
# Draw layers only for # y = {k_ and d_(if nplies=1)}
layer_cycle = _cycle_depth(layercolors, depth=len(materials)) # assumes all Cases materials equiv.
# -------------------------------------------------------------------------
# Annotations anchored to layers instead of plot; iterates layers
incrementer = 0
for layer_, (type_, t_, matl_) in stack_order.items():
if normalized:
ypos, thick = layer_, 1 # thick is a unit thick (k-k_1)
elif (not normalized and len(LMs) == 1):
thick = t_ / 1e6
ypos = incrementer
else:
raise PlottingError(
'Unnormalized plots (i.e. y=d(m)) are visually cumbersome for'
' geometries > 1. Consider using the `normalized=True` keyword'
' for displaying simultaneous multi-geometry data.'
)
# NOTE: Replaced with raise in 0.4.11.dev0
#'''Add this to warning.'''
#print('CAUTION: Unnormalized plots (y=d(m)) are cumbersome for '
# 'geometries > 1. Consider normalized=True for multi-geometry '
# 'plots.')
#return None
patch_kw.update({'facecolor': next(layer_cycle)}) # adv. cyclers
rect = mpl.patches.Rectangle((minX, ypos), width, thick, **patch_kw)
ax.add_artist(rect)
'''add these to a kw dict somehow.. preferably to annotate_kw'''
xpad = 0.02
ypad_layer = 0.15
ypad_plot = 0.03
if normalized:
ypad = (rect.get_height() * ypad_layer) # relative to layers
elif not normalized:
#print(ax.get_ylim()[1])
ypad = ax.get_ylim()[1] * ypad_plot # relative to plot
#print(ypad)
rx, ry = rect.get_xy()
cx = rx + (rect.get_width() * xpad)
cy = ry + ypad
if annotate:
ax.annotate(type_, (cx, cy), **annotate_kw)
incrementer += thick
# -------------------------------------------------------------------------
# Set plot limits
#ax.axis([minX, maxX, minY, maxY])
if halfplot is None:
ax.axis([minX, maxX, minY, maxY])
elif halfplot is not None:
if halfplot.lower().startswith('comp'):
ax.set_xlim([minX, 0.0])
ax.set_ylim([minY, maxY])
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(5))
else: # default tensile
ax.set_xlim([0.0, maxX])
ax.set_ylim([minY, maxY])
ax.xaxis.set_major_locator(mpl.ticker.MaxNLocator(5))
# '''Fix overlapping; no way to do automatically'''
# major_ticks = np.arange(0.0, maxX, 0.1)
# ax.set_xticks(major_ticks)
# Set legend parameters and axes labels
if legend_kw is not None and legend_on:
ax.legend(**legend_kw)
ax.text(**sublabel_kw) # figure sublabel
# TODO: Refactor for less limited parameter-setting of axes labels.
axtitle = kwargs.get('label', '')
xlabel = kwargs.get('xlabel', x)
ylabel = kwargs.get('ylabel', y)
ax.set_title(axtitle)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
##ax.xaxis.labelpad = 20
##ax.yaxis.labelpad = 20
return ax
# TODO: Needs to return an axes or figure plot
# TODO: caselets are defined as containers of str, lists of str or cases, in LPEP 003.
# Here caseslets are an LM, LMs or cases; list of cases(?) or cases object.
def _multiplot(
caselets, x=None, y=None, title=None, normalized=True, extrema=False,
halfplot='tensile', colorblind=False, grayscale=False, annotate=False,
labels_off=False, suptitle_kw=None, subplots_kw=None, patch_kw=None,
plot_kw=None, legend_kw=None, labels_kw=None, **kwargs
):
'''Return figure of axes containing several plots.
Characteristics:
- multiple plots
- kwarg/arg passing
- global labels and titles
- delete remaining subplots if less than remaining axes.
Parameters
----------
caselets : LM, LMs or cases
Should be a container of str, lists of str or cases; however, accepting
LM, LMs or cases. Refactoring required.
x, y : str
DataFrame column names. Users can pass in other columns names.
title : str
Figure title.
normalized : bool
If true, plots y = k_; else plots y = d_ unless specified otherwise.
extrema : bool, default: False
Plot minima and maxima only; equivalent to p=2.
Forced off for clarity in separate plots.
halfplot : str
Trim the DataFrame to read either |'tensile'|'compressive'|None|.
colorblind : bool
Set line and marker colors as colorblind-safe.
grayscale : bool
Set everything to grayscale. Overrides colorblind.
annotate : bool
Annotate names of layer types.
labels_off : bool
Toggle labels.
labels_kw : dict
One stop for custom labels and annotated text passed in from user.
axestitle, sublabels, legendtitles are lists of labels for each caselet.
These keywords control general plotting aesthetics.
{subplot, patch, plot, legend, suptitle}_kw : dict
Default keywords are initialized to set up the distribution plots.
- subplots: |ncols=4|
- patch: None
- plot: |clip_on=True|
- legend: |loc=1|fontsize='small'|
- suptitle: |t=''|fontsize=22|fontweight='bold'|
Returns
-------
matplotlib figure
A figure of subplots.
Examples
--------
>>> # Plot a set of caselets (subplots)
>>> import lamana as la
>>> from lamana.models import Wilson_LT as wlt
>>> dft = wlt.Defaults()
>>> const_total = ['350-400-500', '400-200-800']
>>> cases = la.distributions.Cases(
... const_total, load_params=dft.load_params, mat_props=dft.mat_props,
... model='Wilson_LT', ps=[2, 3]
... )
>>> la.output_._multiplot(cases)
'''
# DEFAULTS ----------------------------------------------------------------
title = '' if title is None else title
if labels_off:
kwargs['xlabel'], kwargs['ylabel'] = ('', '') # turn off axes labels
subplots_kw = {} if subplots_kw is None else subplots_kw
subplots_dft = dict(ncols=4)
subplots_kw.update({k: v for k, v in subplots_dft.items() if k not in subplots_kw})
#print('subplots_kw: ', subplots_kw)
patch_kw = {} if patch_kw is None else patch_kw
#print('patch_kw: ', patch_kw)
plot_kw = {} if plot_kw is None else plot_kw
plot_dft = dict(clip_on=True) # needed in halfplots; else BUG
plot_kw.update({k: v for k, v in plot_dft.items() if k not in plot_kw})
#print('plot_kw: ', plot_kw)
legend_kw = {} if legend_kw is None else legend_kw
legend_dft = dict(loc=1, fontsize='small')
legend_kw.update({k: v for k, v in legend_dft.items() if k not in legend_kw})
#print('legend_kw: ', legend_kw)
suptitle_kw = {} if suptitle_kw is None else suptitle_kw
suptitle_dft = dict(t='', fontsize=22, fontweight='bold')
if title:
suptitle_dft.update(dict(t=title))
suptitle_kw.update({k: v for k, v in suptitle_dft.items() if k not in suptitle_kw})
#print('suptitle_kw: ', suptitle_kw)
# Main dict to handle all text
# sublabels defaults to no labels after letter 'z'.
# Will auto label subplots from a to z. Afterwhich, the user must supply labels.
labels_kw = {} if labels_kw is None else labels_kw
alphabet = map(chr, range(97, 123)) # to label subplots; REF 037
labels_dft = dict(suptitle=None, sublabels=list(alphabet),
axes_titles=None, legend_titles=None,)
if title:
labels_dft.update(suptitle=title) # compliment convenience kw arg
labels_kw.update({k: v for k, v in labels_dft.items() if k not in labels_kw})
if labels_kw['suptitle']:
suptitle_kw.update(t=labels_kw['suptitle'])
# if labels_kw['subtitle']: subtitle=labels_kw['subtitle']
# if labels_kw['xlabel']: kwargs['xlabel'] = '' # remove axlabels; use text()
# if labels_kw['ylabel']: kwargs['ylabel'] = '' # remove axlabels; use text()
#print('labels_kw: ', labels_kw)
'''Consider cycling linecolors for each single geo, multiplot.'''
# FIGURE ------------------------------------------------------------------
# Reset figure dimensions
ncaselets = len(caselets)
ncols_dft = subplots_kw['ncols']
nrows = int(math.ceil(ncaselets / ncols_dft)) # Fix "can't mult. seq. by non-int..." error; nrows should always be int
##nrows = math.ceil(ncaselets / ncols_dft)
subplots_kw['figsize'] = (24, 8 * nrows)
if ncaselets < ncols_dft:
ncols_dft = ncaselets
subplots_kw['ncols'] = ncaselets
# Set defaults for lists of titles/labels
for key in ['axes_titles', 'legend_titles', 'sublabels']:
if labels_kw[key] is None:
labels_kw[key] = [''] * ncaselets
if ncaselets > len(labels_kw['sublabels']):
labels_kw['sublabels'] = [' '] * ncaselets
print('There are more cases than sublabels. Bypassing default... '
"Consider adding custom labels to 'axestext_kw'.")
fig, axes = plt.subplots(nrows=nrows, **subplots_kw)
#print('args: {}'.format(args))
#print('kwargs:{} '.format(kwargs))
#print('nrows: {}, ncols: {}'.format(nrows, ncols_dft))
# NOTE: does not return ax. Fix?
def plot_caselets(i, ax):
'''Iterate axes of the subplots; apply a small plot ("caselet").
Caselets could contain cases (iterable) or LaminateModels (not iterable).
'''
try:
caselet, axtitle, ltitle, sublabel = (
caselets[i],
labels_kw['axes_titles'][i],
labels_kw['legend_titles'][i],
labels_kw['sublabels'][i]
)
# Plot LMs on each axes per case (and legend notes if there)
#print(ltitle, axsub)
kwargs.update(label=axtitle)
legend_kw.update(title=ltitle)
sublabel_kw = dict(s=sublabel)
# TODO: Refactor
# Caselet could be a case or LM, but distribplot needs a list of LMs
try:
# Case
LMs = caselet.LMs
except (AttributeError):
# Single LaminateModel
LMs = [caselet]
#print('Exception was caught; not a case')
# NOTE: what about LMs?
_distribplot(
LMs, x=x, y=y, halfplot=halfplot, extrema=extrema, annotate=annotate,
normalized=normalized, ax=ax, colorblind=colorblind,
grayscale=grayscale, plot_kw=plot_kw, patch_kw=patch_kw,
legend_kw=legend_kw, sublabel_kw=sublabel_kw, **kwargs
)
except(IndexError, KeyError):
# Cleanup; remove the remaining plots
fig.delaxes(ax)
def iter_vector():
'''Return axes for nrow=1; uses single loop.'''
for i, ax in enumerate(axes):
plot_caselets(i, ax)
def iter_matrix():
'''Return axes for nrow>1; uses nested loop.'''
i = 0
for ax_row in axes:
for ax in ax_row:
plot_caselets(i, ax)
i += 1
if nrows == 1:
iter_vector()
else:
iter_matrix()
# Common Figure Labels
fig.suptitle(**suptitle_kw)
plt.rcParams.update({'font.size': 18})
# NOTE: Add a figure return and show deprecation in 0.4.11.dev0
return fig
#plt.show()
# -----------------------------------------------------------------------------
# AXES-LEVEL ------------------------------------------------------------------
# -----------------------------------------------------------------------------
class AxesPlot():
'''Return a matplotblib axes.
See Also
--------
- _distribplot()
- singleplot()
- halfplot()
- quarterplot()
- predictplot()
'''
pass
# -----------------------------------------------------------------------------
# FIGURE-LEVEL ----------------------------------------------------------------
# -----------------------------------------------------------------------------
class FigurePlot():
'''Return a matplotlib figure.
This class sets up a figure to accept data for multiple plots.
Attributes
-----------
nrows, ncols = int, int
Figure rows and columns.
Notes
-----
Each subplot is a separate axes.
See Also
--------
- _multiplot()
- ratioplot()
'''
#figsize = (ncols * size * aspect, nrows * size)
pass
| bsd-3-clause | 8,377,580,359,115,972,000 | 34.743276 | 131 | 0.569157 | false |
jaeilepp/eggie | mne/viz/topo.py | 1 | 27382 | """Functions to plot M/EEG data on topo (one axes per channel)
"""
from __future__ import print_function
# Authors: Alexandre Gramfort <[email protected]>
# Denis Engemann <[email protected]>
# Martin Luessi <[email protected]>
# Eric Larson <[email protected]>
#
# License: Simplified BSD
import warnings
from itertools import cycle
from functools import partial
import numpy as np
from scipy import ndimage
# XXX : don't import pyplot here or you will break the doc
from ..baseline import rescale
from ..utils import deprecated
from ..io.pick import channel_type, pick_types
from ..fixes import normalize_colors
from ..utils import _clean_names
from .utils import _mutable_defaults, _check_delayed_ssp, COLORS
from .utils import _draw_proj_checkbox
def iter_topography(info, layout=None, on_pick=None, fig=None,
fig_facecolor='k', axis_facecolor='k',
axis_spinecolor='k', layout_scale=None,
colorbar=False):
""" Create iterator over channel positions
This function returns a generator that unpacks into
a series of matplotlib axis objects and data / channel
indices, both corresponding to the sensor positions
of the related layout passed or inferred from the channel info.
`iter_topography`, hence, allows to conveniently realize custom
topography plots.
Parameters
----------
info : instance of mne.io.meas_info.Info
The measurement info.
layout : instance of mne.layout.Layout | None
The layout to use. If None, layout will be guessed
on_pick : callable | None
The callback function to be invoked on clicking one
of the axes. Is supposed to instantiate the following
API: `function(axis, channel_index)`
fig : matplotlib.figure.Figure | None
The figure object to be considered. If None, a new
figure will be created.
fig_facecolor : str | obj
The figure face color. Defaults to black.
axis_facecolor : str | obj
The axis face color. Defaults to black.
axis_spinecolor : str | obj
The axis spine color. Defaults to black. In other words,
the color of the axis' edge lines.
layout_scale: float | None
Scaling factor for adjusting the relative size of the layout
on the canvas. If None, nothing will be scaled.
Returns
-------
A generator that can be unpacked into
ax : matplotlib.axis.Axis
The current axis of the topo plot.
ch_dx : int
The related channel index.
"""
import matplotlib.pyplot as plt
if fig is None:
fig = plt.figure()
fig.set_facecolor(fig_facecolor)
if layout is None:
from ..layouts import find_layout
layout = find_layout(info)
if on_pick is not None:
callback = partial(_plot_topo_onpick, show_func=on_pick)
fig.canvas.mpl_connect('button_press_event', callback)
pos = layout.pos.copy()
if layout_scale:
pos[:, :2] *= layout_scale
ch_names = _clean_names(info['ch_names'])
iter_ch = [(x, y) for x, y in enumerate(layout.names) if y in ch_names]
for idx, name in iter_ch:
ax = plt.axes(pos[idx])
ax.patch.set_facecolor(axis_facecolor)
plt.setp(list(ax.spines.values()), color=axis_spinecolor)
ax.set_xticklabels([])
ax.set_yticklabels([])
plt.setp(ax.get_xticklines(), visible=False)
plt.setp(ax.get_yticklines(), visible=False)
ch_idx = ch_names.index(name)
vars(ax)['_mne_ch_name'] = name
vars(ax)['_mne_ch_idx'] = ch_idx
vars(ax)['_mne_ax_face_color'] = axis_facecolor
yield ax, ch_idx
def _plot_topo(info=None, times=None, show_func=None, layout=None,
decim=None, vmin=None, vmax=None, ylim=None, colorbar=None,
border='none', cmap=None, layout_scale=None, title=None,
x_label=None, y_label=None, vline=None):
"""Helper function to plot on sensor layout"""
import matplotlib.pyplot as plt
# prepare callbacks
tmin, tmax = times[[0, -1]]
on_pick = partial(show_func, tmin=tmin, tmax=tmax, vmin=vmin,
vmax=vmax, ylim=ylim, x_label=x_label,
y_label=y_label, colorbar=colorbar)
fig = plt.figure()
if colorbar:
norm = normalize_colors(vmin=vmin, vmax=vmax)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
sm.set_array(np.linspace(vmin, vmax))
ax = plt.axes([0.015, 0.025, 1.05, .8], axisbg='k')
cb = fig.colorbar(sm, ax=ax)
cb_yticks = plt.getp(cb.ax.axes, 'yticklabels')
plt.setp(cb_yticks, color='w')
my_topo_plot = iter_topography(info, layout=layout, on_pick=on_pick,
fig=fig, layout_scale=layout_scale,
axis_spinecolor=border,
colorbar=colorbar)
for ax, ch_idx in my_topo_plot:
if layout.kind == 'Vectorview-all' and ylim is not None:
this_type = {'mag': 0, 'grad': 1}[channel_type(info, ch_idx)]
ylim_ = [v[this_type] if _check_vlim(v) else v for v in ylim]
else:
ylim_ = ylim
show_func(ax, ch_idx, tmin=tmin, tmax=tmax, vmin=vmin,
vmax=vmax, ylim=ylim_)
if ylim_ and not any(v is None for v in ylim_):
plt.ylim(*ylim_)
if title is not None:
plt.figtext(0.03, 0.9, title, color='w', fontsize=19)
return fig
def _plot_topo_onpick(event, show_func=None, colorbar=False):
"""Onpick callback that shows a single channel in a new figure"""
# make sure that the swipe gesture in OS-X doesn't open many figures
orig_ax = event.inaxes
if event.inaxes is None:
return
import matplotlib.pyplot as plt
try:
ch_idx = orig_ax._mne_ch_idx
face_color = orig_ax._mne_ax_face_color
fig, ax = plt.subplots(1)
plt.title(orig_ax._mne_ch_name)
ax.set_axis_bgcolor(face_color)
# allow custom function to override parameters
show_func(plt, ch_idx)
except Exception as err:
# matplotlib silently ignores exceptions in event handlers,
# so we print
# it here to know what went wrong
print(err)
raise err
def _imshow_tfr(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None, tfr=None,
freq=None, vline=None, x_label=None, y_label=None,
colorbar=False, picker=True, cmap=None):
""" Aux function to show time-freq map on topo """
import matplotlib.pyplot as plt
if cmap is None:
cmap = plt.cm.jet
extent = (tmin, tmax, freq[0], freq[-1])
ax.imshow(tfr[ch_idx], extent=extent, aspect="auto", origin="lower",
vmin=vmin, vmax=vmax, picker=picker, cmap=cmap)
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
plt.ylabel(y_label)
if colorbar:
plt.colorbar()
def _plot_timeseries(ax, ch_idx, tmin, tmax, vmin, vmax, ylim, data, color,
times, vline=None, x_label=None, y_label=None,
colorbar=False):
""" Aux function to show time series on topo """
import matplotlib.pyplot as plt
picker_flag = False
for data_, color_ in zip(data, color):
if not picker_flag:
# use large tol for picker so we can click anywhere in the axes
ax.plot(times, data_[ch_idx], color_, picker=1e9)
picker_flag = True
else:
ax.plot(times, data_[ch_idx], color_)
if vline:
[plt.axvline(x, color='w', linewidth=0.5) for x in vline]
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
plt.ylabel(y_label)
if colorbar:
plt.colorbar()
def _check_vlim(vlim):
"""AUX function"""
return not np.isscalar(vlim) and not vlim is None
def plot_topo(evoked, layout=None, layout_scale=0.945, color=None,
border='none', ylim=None, scalings=None, title=None, proj=False,
vline=[0.0]):
"""Plot 2D topography of evoked responses.
Clicking on the plot of an individual sensor opens a new figure showing
the evoked response for the selected sensor.
Parameters
----------
evoked : list of Evoked | Evoked
The evoked response to plot.
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
layout_scale: float
Scaling factor for adjusting the relative size of the layout
on the canvas
color : list of color objects | color object | None
Everything matplotlib accepts to specify colors. If not list-like,
the color specified will be repeated. If None, colors are
automatically drawn.
border : str
matplotlib borders style to be used for each sensor plot.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If None,`
defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
ylim : dict | None
ylim for plots. The value determines the upper and lower subplot
limits. e.g. ylim = dict(eeg=[-200e-6, 200e6]). Valid keys are eeg,
mag, grad, misc. If None, the ylim parameter for each channel is
determined by the maximum absolute peak.
proj : bool | 'interactive'
If true SSP projections are applied before display. If 'interactive',
a check box for reversible selection of SSP projection vectors will
be shown.
title : str
Title of the figure.
vline : list of floats | None
The values at which to show a vertical line.
Returns
-------
fig : Instance of matplotlib.figure.Figure
Images of evoked responses at sensor locations
"""
if not type(evoked) in (tuple, list):
evoked = [evoked]
if type(color) in (tuple, list):
if len(color) != len(evoked):
raise ValueError('Lists of evoked objects and colors'
' must have the same length')
elif color is None:
colors = ['w'] + COLORS
stop = (slice(len(evoked)) if len(evoked) < len(colors)
else slice(len(colors)))
color = cycle(colors[stop])
if len(evoked) > len(colors):
warnings.warn('More evoked objects than colors available.'
'You should pass a list of unique colors.')
else:
color = cycle([color])
times = evoked[0].times
if not all([(e.times == times).all() for e in evoked]):
raise ValueError('All evoked.times must be the same')
info = evoked[0].info
ch_names = evoked[0].ch_names
if not all([e.ch_names == ch_names for e in evoked]):
raise ValueError('All evoked.picks must be the same')
ch_names = _clean_names(ch_names)
if layout is None:
from ..layouts.layout import find_layout
layout = find_layout(info)
# XXX. at the moment we are committed to 1- / 2-sensor-types layouts
chs_in_layout = set(layout.names) & set(ch_names)
types_used = set(channel_type(info, ch_names.index(ch))
for ch in chs_in_layout)
# one check for all vendors
meg_types = ['mag'], ['grad'], ['mag', 'grad'],
is_meg = any(types_used == set(k) for k in meg_types)
if is_meg:
types_used = list(types_used)[::-1] # -> restore kwarg order
picks = [pick_types(info, meg=kk, ref_meg=False, exclude=[])
for kk in types_used]
else:
types_used_kwargs = dict((t, True) for t in types_used)
picks = [pick_types(info, meg=False, **types_used_kwargs)]
assert isinstance(picks, list) and len(types_used) == len(picks)
scalings = _mutable_defaults(('scalings', scalings))[0]
evoked = [e.copy() for e in evoked]
for e in evoked:
for pick, t in zip(picks, types_used):
e.data[pick] = e.data[pick] * scalings[t]
if proj is True and all([e.proj is not True for e in evoked]):
evoked = [e.apply_proj() for e in evoked]
elif proj == 'interactive': # let it fail early.
for e in evoked:
_check_delayed_ssp(e)
if ylim is None:
set_ylim = lambda x: np.abs(x).max()
ylim_ = [set_ylim([e.data[t] for e in evoked]) for t in picks]
ymax = np.array(ylim_)
ylim_ = (-ymax, ymax)
elif isinstance(ylim, dict):
ylim_ = _mutable_defaults(('ylim', ylim))[0]
ylim_ = [ylim_[kk] for kk in types_used]
ylim_ = zip(*[np.array(yl) for yl in ylim_])
else:
raise ValueError('ylim must be None ore a dict')
plot_fun = partial(_plot_timeseries, data=[e.data for e in evoked],
color=color, times=times, vline=vline)
fig = _plot_topo(info=info, times=times, show_func=plot_fun, layout=layout,
decim=1, colorbar=False, ylim=ylim_, cmap=None,
layout_scale=layout_scale, border=border, title=title,
x_label='Time (s)', vline=vline)
if proj == 'interactive':
for e in evoked:
_check_delayed_ssp(e)
params = dict(evokeds=evoked, times=times,
plot_update_proj_callback=_plot_update_evoked_topo,
projs=evoked[0].info['projs'], fig=fig)
_draw_proj_checkbox(None, params)
return fig
def _plot_update_evoked_topo(params, bools):
"""Helper function to update topo sensor plots"""
evokeds, times, fig = [params[k] for k in ('evokeds', 'times', 'fig')]
projs = [proj for ii, proj in enumerate(params['projs'])
if ii in np.where(bools)[0]]
params['proj_bools'] = bools
evokeds = [e.copy() for e in evokeds]
for e in evokeds:
e.info['projs'] = []
e.add_proj(projs)
e.apply_proj()
# make sure to only modify the time courses, not the ticks
axes = fig.get_axes()
n_lines = len(axes[0].lines)
n_diff = len(evokeds) - n_lines
ax_slice = slice(abs(n_diff)) if n_diff < 0 else slice(n_lines)
for ax in axes:
lines = ax.lines[ax_slice]
for line, evoked in zip(lines, evokeds):
line.set_data(times, evoked.data[ax._mne_ch_idx])
fig.canvas.draw()
@deprecated('`plot_topo_tfr` is deprecated and will be removed in '
'MNE 0.9. Use `plot_topo` method on TFR objects.')
def plot_topo_tfr(epochs, tfr, freq, layout=None, colorbar=True, vmin=None,
vmax=None, cmap='RdBu_r', layout_scale=0.945, title=None):
"""Plot time-frequency data on sensor layout
Clicking on the time-frequency map of an individual sensor opens a
new figure showing the time-frequency map of the selected sensor.
Parameters
----------
epochs : instance of Epochs
The epochs used to generate the power
tfr : 3D-array shape=(n_sensors, n_freqs, n_times)
The time-frequency data. Must have the same channels as Epochs.
freq : array-like
Frequencies of interest as passed to induced_power
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
colorbar : bool
If true, colorbar will be added to the plot
vmin : float
Minimum value mapped to lowermost color
vmax : float
Minimum value mapped to upppermost color
cmap : instance of matplotlib.pyplot.colormap | str
Colors to be mapped to the values. Default 'RdBu_r'.
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas
title : str
Title of the figure.
Returns
-------
fig : Instance of matplotlib.figure.Figure
Images of time-frequency data at sensor locations
"""
if vmin is None:
vmin = tfr.min()
if vmax is None:
vmax = tfr.max()
if layout is None:
from ..layouts.layout import find_layout
layout = find_layout(epochs.info)
tfr_imshow = partial(_imshow_tfr, tfr=tfr.copy(), freq=freq, cmap=cmap)
fig = _plot_topo(info=epochs.info, times=epochs.times,
show_func=tfr_imshow, layout=layout, border='w',
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title,
x_label='Time (s)', y_label='Frequency (Hz)')
return fig
@deprecated('`plot_topo_power` is deprecated and will be removed in '
'MNE 0.9. Use `plot_topo` method on TFR objects.')
def plot_topo_power(epochs, power, freq, layout=None, baseline=None,
mode='mean', decim=1, colorbar=True, vmin=None, vmax=None,
cmap=None, layout_scale=0.945, dB=True, title=None):
"""Plot induced power on sensor layout
Clicking on the induced power map of an individual sensor opens a
new figure showing the induced power map of the selected sensor.
Parameters
----------
epochs : instance of Epochs
The epochs used to generate the power
power : 3D-array
First return value from mne.time_frequency.induced_power
freq : array-like
Frequencies of interest as passed to induced_power
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
decim : integer
Increment for selecting each nth time slice
colorbar : bool
If true, colorbar will be added to the plot
vmin : float
Minimum value mapped to lowermost color
vmax : float
Minimum value mapped to upppermost color
cmap : instance of matplotlib.pyplot.colormap
Colors to be mapped to the values
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas
dB : bool
If True, log10 will be applied to the data.
title : str
Title of the figure.
Returns
-------
fig : Instance of matplotlib.figure.Figure
Images of induced power at sensor locations
"""
times = epochs.times[::decim].copy()
if mode is not None:
if baseline is None:
baseline = epochs.baseline
power = rescale(power.copy(), times, baseline, mode)
times *= 1e3
if dB:
power = 20 * np.log10(power)
if vmin is None:
vmin = power.min()
if vmax is None:
vmax = power.max()
if layout is None:
from ..layouts.layout import find_layout
layout = find_layout(epochs.info)
power_imshow = partial(_imshow_tfr, tfr=power.copy(), freq=freq)
fig = _plot_topo(info=epochs.info, times=times,
show_func=power_imshow, layout=layout, decim=decim,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title, border='w',
x_label='Time (s)', y_label='Frequency (Hz)')
return fig
@deprecated('`plot_topo_phase_lock` is deprecated and will be removed in '
'MNE 0.9. Use `plot_topo` method on TFR objects.')
def plot_topo_phase_lock(epochs, phase, freq, layout=None, baseline=None,
mode='mean', decim=1, colorbar=True, vmin=None,
vmax=None, cmap=None, layout_scale=0.945,
title=None):
"""Plot phase locking values (PLV) on sensor layout
Clicking on the PLV map of an individual sensor opens a new figure
showing the PLV map of the selected sensor.
Parameters
----------
epochs : instance of Epochs
The epochs used to generate the phase locking value
phase_lock : 3D-array
Phase locking value, second return value from
mne.time_frequency.induced_power.
freq : array-like
Frequencies of interest as passed to induced_power
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout is
inferred from the data.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent' | None
Do baseline correction with ratio (phase is divided by mean
phase during baseline) or z-score (phase is divided by standard
deviation of phase during baseline after subtracting the mean,
phase = [phase - mean(phase_baseline)] / std(phase_baseline)).
If None, baseline no correction will be performed.
decim : integer
Increment for selecting each nth time slice
colorbar : bool
If true, colorbar will be added to the plot
vmin : float
Minimum value mapped to lowermost color
vmax : float
Minimum value mapped to upppermost color
cmap : instance of matplotlib.pyplot.colormap
Colors to be mapped to the values
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
title : str
Title of the figure.
Returns
-------
fig : Instance of matplotlib.figure.Figrue
Phase lock images at sensor locations
"""
times = epochs.times[::decim] * 1e3
if mode is not None:
if baseline is None:
baseline = epochs.baseline
phase = rescale(phase.copy(), times, baseline, mode)
if vmin is None:
vmin = phase.min()
if vmax is None:
vmax = phase.max()
if layout is None:
from ..layouts.layout import find_layout
layout = find_layout(epochs.info)
phase_imshow = partial(_imshow_tfr, tfr=phase.copy(), freq=freq)
fig = _plot_topo(info=epochs.info, times=times,
show_func=phase_imshow, layout=layout, decim=decim,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title, border='w',
x_label='Time (s)', y_label='Frequency (Hz)')
return fig
def _erfimage_imshow(ax, ch_idx, tmin, tmax, vmin, vmax, ylim=None,
data=None, epochs=None, sigma=None,
order=None, scalings=None, vline=None,
x_label=None, y_label=None, colorbar=False):
"""Aux function to plot erfimage on sensor topography"""
import matplotlib.pyplot as plt
this_data = data[:, ch_idx, :].copy()
ch_type = channel_type(epochs.info, ch_idx)
if not ch_type in scalings:
raise KeyError('%s channel type not in scalings' % ch_type)
this_data *= scalings[ch_type]
if callable(order):
order = order(epochs.times, this_data)
if order is not None:
this_data = this_data[order]
this_data = ndimage.gaussian_filter1d(this_data, sigma=sigma, axis=0)
ax.imshow(this_data, extent=[tmin, tmax, 0, len(data)], aspect='auto',
origin='lower', vmin=vmin, vmax=vmax, picker=True)
if x_label is not None:
plt.xlabel(x_label)
if y_label is not None:
plt.ylabel(y_label)
if colorbar:
plt.colorbar()
def plot_topo_image_epochs(epochs, layout=None, sigma=0.3, vmin=None,
vmax=None, colorbar=True, order=None, cmap=None,
layout_scale=.95, title=None, scalings=None):
"""Plot Event Related Potential / Fields image on topographies
Parameters
----------
epochs : instance of Epochs
The epochs.
layout: instance of Layout
System specific sensor positions.
sigma : float
The standard deviation of the Gaussian smoothing to apply along
the epoch axis to apply in the image.
vmin : float
The min value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
vmax : float
The max value in the image. The unit is uV for EEG channels,
fT for magnetometers and fT/cm for gradiometers.
colorbar : bool
Display or not a colorbar.
order : None | array of int | callable
If not None, order is used to reorder the epochs on the y-axis
of the image. If it's an array of int it should be of length
the number of good epochs. If it's a callable the arguments
passed are the times vector and the data as 2d array
(data.shape[1] == len(times)).
cmap : instance of matplotlib.pyplot.colormap
Colors to be mapped to the values.
layout_scale: float
scaling factor for adjusting the relative size of the layout
on the canvas.
title : str
Title of the figure.
scalings : dict | None
The scalings of the channel types to be applied for plotting. If
None, defaults to `dict(eeg=1e6, grad=1e13, mag=1e15)`.
Returns
-------
fig : instance of matplotlib figure
Figure distributing one image per channel across sensor topography.
"""
scalings = _mutable_defaults(('scalings', scalings))[0]
data = epochs.get_data()
if vmin is None:
vmin = data.min()
if vmax is None:
vmax = data.max()
if layout is None:
from ..layouts.layout import find_layout
layout = find_layout(epochs.info)
erf_imshow = partial(_erfimage_imshow, scalings=scalings, order=order,
data=data, epochs=epochs, sigma=sigma)
fig = _plot_topo(info=epochs.info, times=epochs.times,
show_func=erf_imshow, layout=layout, decim=1,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title,
border='w', x_label='Time (s)', y_label='Epoch')
return fig
| bsd-2-clause | -3,022,529,476,795,081,000 | 36.768276 | 79 | 0.616281 | false |
jodogne/OrthancMirror | OrthancServer/Resources/Samples/Python/ArchiveStudiesInTimeRange.py | 1 | 3416 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Orthanc - A Lightweight, RESTful DICOM Store
# Copyright (C) 2012-2016 Sebastien Jodogne, Medical Physics
# Department, University Hospital of Liege, Belgium
# Copyright (C) 2017-2021 Osimis S.A., Belgium
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import os.path
import sys
import RestToolbox
def PrintHelp():
print('Download ZIP archives for all the studies generated '
'during a given time range (according to the StudyDate tag)\n')
print('Usage: %s <URL> <StartDate> <EndDate> <TargetFolder>\n' % sys.argv[0])
print('Example: %s http://127.0.0.1:8042/ 20150101 20151231 /tmp/\n' % sys.argv[0])
exit(-1)
def CheckIsDate(date):
if len(date) != 8 or not date.isdigit():
print '"%s" is not a valid date!\n' % date
exit(-1)
if len(sys.argv) != 5:
PrintHelp()
URL = sys.argv[1]
START = sys.argv[2]
END = sys.argv[3]
TARGET = sys.argv[4]
CheckIsDate(START)
CheckIsDate(END)
def GetTag(tags, key):
if key in tags:
return tags[key]
else:
return 'No%s' % key
# Loop over the studies
for studyId in RestToolbox.DoGet('%s/studies' % URL):
# Retrieve the DICOM tags of the current study
study = RestToolbox.DoGet('%s/studies/%s' % (URL, studyId))['MainDicomTags']
# Retrieve the DICOM tags of the parent patient of this study
# Case 1: Baseline version
patient = RestToolbox.DoGet('%s/studies/%s/patient' % (URL, studyId))['MainDicomTags']
# Case 2: Tweaked version that can be used if several patients
# share the same "Patient ID", but have different "Patient Name"
# (which is invalid according to the DICOM standard).
# https://groups.google.com/d/msg/orthanc-users/58AxIkxFbZs/N6Knub8MAgAJ
# patient = RestToolbox.DoGet('%s/studies/%s' % (URL, studyId)) ['PatientMainDicomTags']
# Check that the StudyDate tag lies within the given range
studyDate = study['StudyDate'][:8]
if studyDate >= START and studyDate <= END:
# Create a filename
filename = '%s - %s %s - %s.zip' % (GetTag(study, 'StudyDate'),
GetTag(patient, 'PatientID'),
GetTag(patient, 'PatientName'),
GetTag(study, 'StudyDescription'))
# Remove any non-ASCII character in the filename
filename = filename.encode('ascii', errors = 'replace').translate(None, r"'\/:*?\"<>|!=").strip()
# Download the ZIP archive of the study
print('Downloading %s' % filename)
zipContent = RestToolbox.DoGet('%s/studies/%s/archive' % (URL, studyId))
# Write the ZIP archive at the proper location
with open(os.path.join(TARGET, filename), 'wb') as f:
f.write(zipContent)
| gpl-3.0 | -516,811,386,712,827,460 | 35.731183 | 105 | 0.650761 | false |
HugoMMRabson/fonsa | src/test/old/backend/svrtools/net/netwrk/testFirewall.py | 1 | 1579 | '''
Created on Mar 5, 2018
@author: johnrabsonjr
Functions:
generate_clearnet_firewall_configuration_file
generate_darknet_firewall_configuration_file
activate_firewall_configuration_file
configure_firewall
Classes:
_BackendNetworkFirewall
'''
import unittest
class Test_generate_clearnet_firewall_configuration_file(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_generate_clearnet_firewall_configuration_file(self):
# generate_clearnet_firewall_configuration_file
pass
class Test_generate_darknet_firewall_configuration_file(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_generate_darknet_firewall_configuration_file(self):
# generate_darknet_firewall_configuration_file
pass
class Test_activate_firewall_configuration_file(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_activate_firewall_configuration_file(self):
# activate_firewall_configuration_file
pass
class Test_configure_firewall(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_configure_firewall(self):
pass
class Test_BackendNetworkFirewall(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_BackendNetworkFirewall(self):
pass
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| gpl-3.0 | -8,070,229,525,640,989,000 | 18.024096 | 76 | 0.673211 | false |
anuragpapineni/Hearthbreaker-evolved-agent | hearthbreaker/agents/trade_agent.py | 1 | 4738 | # from hearthbreaker.agents.basic_agents import RandomAgent
from hearthbreaker.agents.trade.possible_play import PlayMixin
from hearthbreaker.agents.trade.trade import TradeMixin, AttackMixin
from hearthbreaker.agents.trade.util import Util
import hearthbreaker.cards.battlecries
class BattlecryType:
@staticmethod
def buff_battlecries():
res = []
res.append(hearthbreaker.cards.battlecries.heal_two)
res.append(hearthbreaker.cards.battlecries.heal_three)
res.append(hearthbreaker.cards.battlecries.give_divine_shield)
res.append(hearthbreaker.cards.battlecries.give_stealth)
res.append(hearthbreaker.cards.battlecries.give_three_health)
res.append(hearthbreaker.cards.battlecries.two_temp_attack)
res.append(hearthbreaker.cards.battlecries.give_windfury)
return res
@staticmethod
def damage_battlecries():
res = []
res.append(hearthbreaker.cards.battlecries.silence)
res.append(hearthbreaker.cards.battlecries.deal_one_damage)
res.append(hearthbreaker.cards.battlecries.deal_two_damage)
res.append(hearthbreaker.cards.battlecries.deal_three_damage)
res.append(hearthbreaker.cards.battlecries.change_attack_to_one)
res.append(hearthbreaker.cards.battlecries.take_control_of_minion)
return res
@staticmethod
def target_type(cry):
if cry in BattlecryType.buff_battlecries():
return "Friendly"
elif cry in BattlecryType.damage_battlecries():
return "Enemy"
else:
return None
def target_type_for_card(card):
res = None
minion = card.create_minion(None)
if hasattr(minion, "battlecry"):
res = BattlecryType.target_type(minion.battlecry)
return res
class ChooseTargetMixin:
def choose_target_enemy(self, all_targets):
if len(all_targets) == 0:
raise Exception("No targets")
targets = self.prune_targets(all_targets, False)
if len(targets) == 0:
return Util.rand_el(all_targets)
if not self.current_trade:
return Util.rand_prefer_minion(targets)
# raise Exception("No current trade")
for target in targets:
if self.current_trade.opp_minion == target:
return target
# raise Exception("Could not find target {}".format(target))
return Util.rand_prefer_minion(targets)
def choose_target_friendly(self, targets):
pruned = self.prune_targets(targets, True)
if len(pruned) == 0:
return Util.rand_el(targets)
return Util.rand_el(pruned)
def prune_targets(self, targets, get_friendly):
res = []
for target in targets:
is_friendly_minion = any(map(lambda c: c == target, self.player.minions))
is_friendly_hero = target == self.player.hero
is_friendly = is_friendly_minion or is_friendly_hero
if is_friendly == get_friendly:
res.append(target)
return res
def has_friendly_targets(self, targets):
return len(self.prune_targets(targets, True)) > 0
def should_target_self(self, targets):
cry_type = BattlecryType.target_type_for_card(self.last_card_played)
if cry_type == "Friendly":
return True
elif cry_type == "Enemy":
return False
elif self.last_card_played.name == "Elven Archerzzz":
return False
elif self.has_friendly_targets(targets):
return True
else:
return False
def choose_target_inner(self, targets):
if len(targets) == 0:
return None
if self.should_target_self(targets):
return self.choose_target_friendly(targets)
else:
return self.choose_target_enemy(targets)
def choose_target(self, targets):
res = self.choose_target_inner(targets)
# print("Target {}".format(res))
return res
class NullCard:
def __init__(self):
self.name = "Null Card"
def create_minion(self, player):
return None
class TradeAgent(TradeMixin, AttackMixin, PlayMixin, ChooseTargetMixin):
def __init__(self):
super().__init__()
self.current_trade = None
self.last_card_played = NullCard()
def do_turn(self, player, game):
self.player = player
self.play_cards(player)
self.attack(player)
if not player.game.game_ended:
self.play_cards(player)
return
def do_card_check(self, cards):
return [True, True, True, True]
def choose_index(self, card, player):
return 0
| mit | -1,693,280,705,883,882,800 | 31.231293 | 85 | 0.632967 | false |
fabiobalzano/LED | maingui.py | 1 | 5125 | """
Copyright (c) 2012, [email protected] All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, self
list of conditions and the following disclaimer. Redistributions in binary
form must reproduce the above copyright notice, self list of conditions and
the following disclaimer in the documentation and/or other materials
provided with the distribution. Neither the name of [email protected] nor
the names of its contributors may be used to endorse or promote products
derived from self software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
[email protected]
"""
import sys
import pygame
from pgu import gui
from dialogs import EditProgram
from dialogs import EditOptions
from dialogs import Confirm
########################################################################################
# STATIC VALUES
########################################################################################
# Main GUI Text color
FG_COLOR = (255,255,255)
########################################################################################
class LEDControl(gui.Table):
""" MAIN control GUI """
def __init__(self, datastore, timemanager, form, **params):
gui.Table.__init__(self,**params)
def accell_changed():
self.multiplier = form['accelerator'].value
accell_indicator.set_text('Accelleratore: %sX' % self.multiplier)
datastore.db['options']['accelerator'] = self.multiplier
datastore.save()
self.tr()
accell_indicator = gui.Label("Accelleratore: 0X",color=FG_COLOR)
self.td(accell_indicator, colspan=2)
self.tr()
e = gui.HSlider(0,-10,10,size=20,width=200,height=16,name='accelerator')
e.connect(gui.CHANGE, accell_changed)
e.value = datastore.db['options']['accelerator']
self.td(e, colspan=2)
self.tr()
self.td(gui.Label("PAUSA",color=FG_COLOR))
self.td(gui.Switch(value=False,name='pause'))
self.tr()
self.td(gui.Label("BlackOut!",color=FG_COLOR))
self.td(gui.Switch(value=False,name='blackout'))
self.tr()
self.td(gui.Label("Go Flash!",color=FG_COLOR))
self.td(gui.Switch(value=False,name='flash'))
dlg = EditProgram(datastore)
#Hook on closing dialog window
def dialog_close():
#refresh the datastore for changes
datastore.load()
form['pause'].value = False
def dialog_open(arg):
#pause the game
form['pause'].value = True
#reset of the dialog window
dlg.__init__(datastore, arg)
dlg.connect(gui.CLOSE, dialog_close)
dlg.open()
btn_conf = gui.Button("Programma Principale", width=200, height=40)
btn_conf.connect(gui.CLICK, dialog_open, 'contents')
self.tr()
self.td(btn_conf, colspan=2)
btn_fla = gui.Button("Programma Flash", width=200, height=40)
btn_fla.connect(gui.CLICK, dialog_open, 'flash')
self.tr()
self.td(btn_fla, colspan=2)
opt = EditOptions(datastore)
#Hook on closing options window
def options_close():
#refresh the datstore for changes
timemanager.init_options(datastore)
self.send(gui.CHANGE)
form['pause'].value = False
def options_open():
#pause the game
form['pause'].value = True
#reset of the dialog window
opt.connect(gui.CLOSE, options_close)
opt.open()
btn_conf = gui.Button("Impostazione Orari", width=200, height=40)
btn_conf.connect(gui.CLICK, options_open)
self.tr()
self.td(btn_conf, colspan=2)
def openconfirmquit():
confirm = Confirm()
confirm.connect(gui.CHANGE, sendquit)
#confirm.connect(gui.CLOSE,closingme)
confirm.open()
def sendquit():
pygame.quit()
sys.exit()
btn_exit = gui.Button("ESCI", width=200, height=40)
btn_exit.connect(gui.CLICK,openconfirmquit)
self.tr()
self.td(btn_exit, colspan=2)
| bsd-3-clause | -911,846,867,217,889,300 | 34.10274 | 88 | 0.618341 | false |
jboomer/python-isobus | setup.py | 1 | 2170 | # Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='isobus',
version='0.0.1',
description='ISO11873 implementation',
long_description=long_description,
url='https://github.com/jboomer/python-isobus',
author='jboomer',
#author_email='EMAIL HERE',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: CAN',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='can j1939 isobus',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
install_requires=['python-can'],
extras_require={
'dev': ['check-manifest'],
'test': ['coverage'],
},
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
entry_points={
'console_scripts': [
'vtclient=isobus.bin.vtclient:main',
],
},
)
| mit | 1,452,935,112,713,547,000 | 28.324324 | 94 | 0.62765 | false |
acbraith/crossfit_scraper | data_analysis.py | 1 | 23756 | from crossfit_api import get_analysis_dataframe
import numpy as np
import pandas as pd
from memoize import persistent_memoize, memoize
from functools import partial
from sklearn.pipeline import Pipeline
from sklearn.feature_selection import SelectFromModel, RFECV
from sklearn.linear_model import Lasso, RANSACRegressor, LinearRegression
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, IsolationForest
from sklearn.tree import DecisionTreeRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import RobustScaler, StandardScaler
from multiprocessing import Pool
import itertools, random, os, sys, time
import fancyimpute
from matplotlib import pyplot as plt
import matplotlib.ticker as ticker
from datetime import datetime
from fancyimpute import Solver
class ScaleKNeighborsRegressor(KNeighborsRegressor):
def predict(self, X):
# standardise X
X = self.scaler.transform(X)
return super().predict(X)
def fit(self, X, y):
# standardise X
self.scaler = RobustScaler().fit(X)
X = self.scaler.transform(X)
return super().fit(X,y)
class RecursiveKNN(Solver):
def __init__(self, k=5, verbose=0,
min_value=None,
max_value=None,
normalizer=None,
feature_selector=None,
regressor=partial(ScaleKNeighborsRegressor, weights='distance'),
n_jobs=1):
Solver.__init__(
self,
min_value=min_value,
max_value=max_value,
normalizer=normalizer)
self.k = k
self.verbose = verbose
self.feature_selector = feature_selector
self.regressor = regressor
self.n_jobs = n_jobs
def _transform(self, feature_selector, X):
# alternative feature selector transform to remove some NaN checks
mask = feature_selector.get_support()
if not mask.any():
warn("No features were selected: either the data is"
" too noisy or the selection test too strict.",
UserWarning)
return np.empty(0).reshape((X.shape[0], 0))
if len(mask) != X.shape[1]:
raise ValueError("X has a different shape than during fitting.")
return X[:, mask]
def _get_reg(self):
if self.feature_selector != None:
reg = Pipeline([
('feature_selection', SelectFromModel(self.feature_selector())),
('regression', ScaleKNeighborsRegressor(algorithm='brute'))
])
else:
reg = ScaleKNeighborsRegressor()
return reg
def _impute_row(self, i):
row = self.X[i,:]
known_idx = np.where(~np.isnan(row))[0]
unknown_idx = np.where(np.isnan(row))[0]
# todo make this do one col at a time
X_ = self.X[:,known_idx]
y_ = self.X[:,unknown_idx]
y_pred = np.zeros_like(unknown_idx)
if unknown_idx.size > 0:
reg = self.regressor()
full_rows = np.logical_and(~np.isnan(X_).any(axis=1), ~np.isnan(y_).any(axis=1))
X_ = X_[full_rows]
y_ = y_[full_rows]
reg.fit(X_, y_)
y_pred = reg.predict(row[known_idx].reshape(1,-1))
return (i, unknown_idx, y_pred)
def _impute_unonown_idx(self, unknown_idx):
known_idx = [x for x in range(self.X.shape[1]) if x not in unknown_idx]
row_idxs = np.argwhere(np.logical_and(
np.isnan(self.X[:,unknown_idx]).all(axis=1),
~np.isnan(self.X[:,known_idx]).any(axis=1)))
y_pred = np.zeros((len(row_idxs),len(unknown_idx)))
if len(row_idxs) > 0:
reg = self.regressor()
selector = SelectFromModel(self.feature_selector())
# predict 1 feature at a time
for i,idx in enumerate(unknown_idx):
full_rows = np.argwhere(np.logical_and(
~np.isnan(self.X[:,known_idx]).any(axis=1),
~np.isnan(self.X[:,[idx]]).any(axis=1)))
# use these rows to perform feature selection
selector.fit(
self.X[full_rows,known_idx],
self.X[full_rows,[idx]])
# now recalculate full rows based on selected features
full_rows = np.argwhere(np.logical_and(
~np.isnan(self._transform(selector, self.X[:,known_idx])).any(axis=1),
~np.isnan(self.X[:,[idx]]).any(axis=1)))
# and fit regression model, then predict
reg.fit(
self._transform(selector, self.X[full_rows,known_idx]),
self.X[full_rows,[idx]])
# memory error for predicting too many at once
# so split into chunks
chunksize = 10000
for chunk_idx in range(0, len(row_idxs), chunksize):
y_pred[chunk_idx:chunk_idx+chunksize, [i]] = \
reg.predict(
self._transform(selector,
self.X[row_idxs[chunk_idx:chunk_idx+chunksize], known_idx]))
if self.verbose > 1:
print("Imputed",len(unknown_idx),"features in",len(row_idxs),"rows\n",
"\tUsing data from", len(full_rows),"rows")
#y_pred[:,[i]] = reg.predict(self.X[row_idxs,known_idx])
return (row_idxs, unknown_idx, y_pred)
def solve(self, X, missing_mask):
self.X = np.where(~missing_mask, X, np.nan)
imputed_X = np.where(~missing_mask, X, np.nan)
# do rows based on what's missing
pool = Pool(processes=self.n_jobs)
cols = np.argwhere(np.isnan(self.X).any(axis=0)).flatten()
num_combs = [j * len(list(itertools.combinations(cols,j))) for j in range(1,len(cols))]
cum_num_combs = np.cumsum(num_combs)
t0 = time.time()
for j in range(1,len(cols)):
np.savetxt(str(j)+'.csv', imputed_X, delimiter=',')
if self.verbose > 0:
if j > 1:
print("\tTime elapsed:", time.time()-t0)
print("\t", round(100*cum_num_combs[j-1]/cum_num_combs[-1],1),"% complete")
print("\tEstimated total time:", (time.time()-t0)/cum_num_combs[j-1] * \
cum_num_combs[-1])
print("Imputing",len(list(itertools.combinations(cols,j))),
"feature combinations of size",j,"/",len(cols)-1)
for i, unknown_idx, y_pred in \
pool.imap(self._impute_unonown_idx, itertools.combinations(cols,j), chunksize=100):
imputed_X[i,unknown_idx] = y_pred
return imputed_X
# check for extreme values (eg 666 pullups, 10sec 400m...)
def clear_outliers(data):
data = data.copy()
cols = [
'Age','Height','Weight',
'Back Squat','Clean and Jerk','Snatch',
'Deadlift','Fight Gone Bad','Max Pull-ups',
'Fran','Grace','Helen',
'Filthy 50','Sprint 400m','Run 5k']
ranges = [
(16,80),(100,250),(30,150),
(20,300),(20,250),(20,200),
(20,400),(20,750),(0,150),
(1.5,30),(1,60),(3,60),
(10,120),(0.72,3),(12.5,60)
]
'''ranges = [
(16,80),(100,250),(30,150),
(20,300),(20,250),(20,200),
(20,400),(20,600),(0,120),
(1.5,10),(1,15),(3,15),
(10,60),(0.72,3),(12.5,45)
]'''
for col,valid_range in zip(cols, ranges):
outliers = (valid_range[0] > data[col]) | (data[col] > valid_range[1])
i = 0
for idx in np.argwhere(outliers==True).flatten():
i += 1
print(i, "outliers in", col)
data[col] = data[col].where(~outliers, np.nan)
# check for other outliers
# this doesn't work so well
'''clf = IsolationForest(contamination=1/1000)
clf.fit(data.dropna())
outliers = clf.predict(data.fillna(data.mean()))
outliers = outliers == -1
for idx in np.argwhere(outliers==True).flatten():
print(pd.DataFrame(pd.DataFrame(data.loc[idx]).transpose()))
raise Exception'''
return data
@persistent_memoize('get_imputed_dataframe')
def _get_imputed_dataframe(*args, **kwargs):
def impute_rows(data, X_cols, y_cols):
rows_idx = np.argwhere(np.logical_and(
np.isnan(data[:,y_cols]).all(axis=1),
~np.isnan(data[:,X_cols]).any(axis=1)))
y_pred = np.zeros((len(rows_idx),len(y_cols)))
if len(rows_idx) > 0:
print("\tImputing",len(rows_idx),"rows")
full_rows = np.argwhere(np.logical_and(
~np.isnan(data[:,X_cols]).any(axis=1),
~np.isnan(data[:,y_cols]).any(axis=1)))
reg = RANSACRegressor()
reg.fit(
data[full_rows,X_cols],
data[full_rows,y_cols])
y_pred = reg.predict(data[rows_idx,X_cols]).clip(min=0)
return (rows_idx, y_cols, y_pred)
def impute_update_data(data, X_cols, y_cols):
print(X_cols,"predicting",y_cols)
cols = list(data)
X_cols = [cols.index(x) for x in X_cols]
y_cols = [cols.index(y) for y in y_cols]
matrix = data.as_matrix()
rows_idx, y_cols, y_pred = impute_rows(matrix, X_cols, y_cols)
matrix[rows_idx,y_cols] = y_pred
return pd.DataFrame(matrix, index=data.index, columns=data.columns)
data = get_analysis_dataframe(*args, **kwargs)
data = data.astype(float)
data = clear_outliers(data)
Xys = [
#(['Height'],['Weight']),
#(['Weight'],['Height']),
(['Snatch'],['Clean and Jerk']),
(['Clean and Jerk'],['Snatch']),
(['Snatch','Clean and Jerk'],['Back Squat']),
(['Snatch','Clean and Jerk','Back Squat'],['Deadlift']),
(['Back Squat'],['Deadlift']),
(['Deadlift'],['Back Squat']),
#(['Run 5k'],['Sprint 400m']),
#(['Sprint 400m'],['Run 5k']),
(['Weight','Snatch','Clean and Jerk','Back Squat','Deadlift'],['Max Pull-ups']),
(['Weight','Back Squat','Deadlift'],['Max Pull-ups']),
(['Weight','Snatch','Clean and Jerk'],['Max Pull-ups']),
#(['Filthy 50'],['Fight Gone Bad']),
#(['Fight Gone Bad'],['Filthy 50']),
(['Max Pull-ups', 'Clean and Jerk'],['Fran']),
(['Clean and Jerk', 'Fran'],['Grace']),
(['Max Pull-ups', 'Sprint 400m', 'Run 5k'],['Helen']),
#(['Max Pull-ups', 'Grace'],['Fran']),
]
for x,y in Xys:
data = impute_update_data(data, x, y)
data = clear_outliers(data)
imputer = RecursiveKNN(verbose=1,n_jobs=4,
feature_selector=DecisionTreeRegressor)
data = pd.DataFrame(imputer.complete(data), index=data.index, columns=data.columns)
return data
def get_imputed_dataframe(competition='open', year=2017, division='men',
sort='overall', fittest_in='region', region='worldwide'):
return _get_imputed_dataframe(competition, year, division, sort, fittest_in, region)
# ANALYSIS
def box_plots(data, title='Open'):
plt.suptitle(title + " Box Plots")
kwargs = {'showfliers':False}
stats = ['Age', 'Height', 'Weight']
weights = ['Deadlift','Back Squat', 'Clean and Jerk', 'Snatch']
reps = ['Fight Gone Bad', 'Max Pull-ups']
times = ['Fran', 'Grace', 'Helen', 'Filthy 50', 'Sprint 400m', 'Run 5k']
for i,x in enumerate(stats):
plt.subplot(4,3,i+1)
plt.boxplot(list(data[x].dropna()),labels=[x], **kwargs)
plt.subplot(4,1,2)
plt.boxplot([list(data[x].dropna()) for x in weights], labels=weights, vert=False, **kwargs)
for i,x in enumerate(reps):
plt.subplot(4,2,5+i)
plt.boxplot(list(data[x].dropna()),labels=[x], vert=False, **kwargs)
plt.subplot(4,1,4)
for i,x in enumerate(times):
plt.subplot(4,6,19+i)
plt.boxplot(list(data[x].dropna()),labels=[x], **kwargs)
plt.show()
def box_plots_all(open_data, regionals_data, games_data, title, my_data, metric):
def mouse_click(event):
ax = event.inaxes
stat = ''
if ax in ax_stats:
stat = stats[ax_stats.index(ax)]
val = event.ydata
elif ax in ax_weights:
stat = weights[ax_weights.index(ax)]
val = event.ydata
elif ax in ax_reps:
stat = reps[ax_reps.index(ax)]
val = event.xdata
elif ax in ax_times:
stat = times[ax_times.index(ax)]
val = event.ydata
if event.button == 1:
my_data[stat]=val
elif event.button == 2:
nonlocal box_plots_on, ax_stats, ax_weights, ax_reps, ax_times
box_plots_on = not(box_plots_on)
ax_stats, ax_weights, ax_reps, ax_times = draw_plots()
if event.button == 3:
if stat in my_data:
del my_data[stat]
plot_my_data()
def plot_my_data():
nonlocal lines
for l in lines:
try:
l.remove()
except:
l.pop(0).remove()
lines = []
for x,ax in zip(stats, ax_stats):
if x in my_data:
ax.set_prop_cycle(None)
l = ax.plot([0,4],[my_data[x],my_data[x]])
lines += [l]
for x,ax in zip(weights, ax_weights):
if x in my_data:
ax.set_prop_cycle(None)
l = ax.plot([0,4],[my_data[x],my_data[x]])
lines += [l]
for x,ax in zip(reps, ax_reps):
if x in my_data:
ax.set_prop_cycle(None)
l = ax.plot([my_data[x],my_data[x]], [0,4])
lines += [l]
for x,ax in zip(times, ax_times):
if x in my_data:
ax.set_prop_cycle(None)
l = ax.plot([0,4],[my_data[x],my_data[x]])
lines += [l]
rank,pcntile = predict_ranking(open_data, my_data)
filled_my_data = {}
for k,v in my_data.items(): filled_my_data[k] = v
for k in stats+weights+reps+times:
if k not in filled_my_data:
filled_my_data[k] = np.nan
table_text = []
table_text.append([
'Age',fmt_age(filled_my_data['Age'],0),
'Height',fmt_height(filled_my_data['Height'],0),
'Weight',fmt_weight(filled_my_data['Weight'],0),
'',''])
table_text.append([
'Back Squat',fmt_weight(filled_my_data['Back Squat'],0),
'Deadlift',fmt_weight(filled_my_data['Deadlift'],0),
'Fran',fmt_time(filled_my_data['Fran'],0),
'Filthy 50',fmt_time(filled_my_data['Filthy 50'],0)])
table_text.append([
'Clean and Jerk',fmt_weight(filled_my_data['Clean and Jerk'],0),
'Fight Gone Bad',fmt_reps(filled_my_data['Fight Gone Bad'],0),
'Grace',fmt_time(filled_my_data['Grace'],0),
'Sprint 400m',fmt_time(filled_my_data['Sprint 400m'],0)])
table_text.append([
'Snatch',fmt_weight(filled_my_data['Snatch'],0),
'Max Pull-ups',fmt_reps(filled_my_data['Max Pull-ups'],0),
'Helen',fmt_time(filled_my_data['Helen'],0),
'Run 5k',fmt_time(filled_my_data['Run 5k'],0)])
table_text.append([
'','',
'','',
'Estimated Ranking', str(round(rank,0)),
'Percentile', str(round(pcntile,2))])
font = {
'family': 'monospace',
'color': 'k',
'weight': 'heavy',
'size': 12,
}
ax = plt.subplot(5,1,5)
tab = ax.table(cellText=table_text, loc='center', bbox=[0, -.5, 1, 1.25], fontsize=12,
colWidths=[1.5,1] * 4)
cells = tab.properties()['celld']
for i in range(5):
for j in range(4):
cells[i,2*j]._loc = 'right'
cells[i,2*j+1]._loc = 'left'
cells[i,2*j].set_linewidth(0)
cells[i,2*j+1].set_linewidth(0)
ax.axis('tight')
ax.axis('off')
lines += [tab]
plt.gcf().canvas.draw_idle()
box_plots_on = True
lines = []
plt.figure().canvas.mpl_connect('button_press_event', mouse_click)
maintitle = dict(fontsize=18, fontweight='bold')
subtitle = dict(fontsize=12, fontweight='bold')
plt.suptitle(title + " Box Plots", **maintitle)
plt.rcParams['axes.facecolor'] = 'whitesmoke'
boxprops = dict(linewidth=1, alpha=0.8)
medianprops = dict(linewidth=2, color='k', alpha=0.8)
whiskerprops = dict(linewidth=1, color='k', linestyle='-')
kwargs = dict(sym='', whis=[1,99], patch_artist=True, widths=0.5, #notch=True, bootstrap=1000,
medianprops=medianprops, boxprops=boxprops, whiskerprops=whiskerprops)
stats = ['Age', 'Height', 'Weight']
weights = ['Deadlift','Back Squat', 'Clean and Jerk', 'Snatch']
reps = ['Fight Gone Bad', 'Max Pull-ups']
times = ['Fran', 'Grace', 'Helen', 'Filthy 50', 'Sprint 400m', 'Run 5k']
colors = ['steelblue', 'olivedrab', 'indianred']
def add_colors(bplot):
for patch,color in zip(bplot['boxes'],colors):
patch.set_facecolor(color)
def fmt_age(x, pos):
if np.isnan(x): return ''
x = round(x)
return str(x)
def fmt_height(x, pos):
if np.isnan(x): return ''
if metric:
return str(int(x))+" cm"
ft, inches = divmod(round(x), 12)
ft, inches = map(int, [ft, inches])
return ('{}\''.format(ft) if not inches
else '{}\'{}"'.format(ft, inches) if ft
else '{}"'.format(inches))
def fmt_weight(x, pos):
if np.isnan(x): return ''
x = int(x)
if metric:
return str(x)+" kg"
return str(x)+" lbs"
def fmt_reps(x, pos):
if np.isnan(x): return ''
x = int(x)
return str(x)+" reps"
def fmt_time(x, pos):
if np.isnan(x): return ''
m, s = divmod(round(x*60), 60)
m, s = map(int, [m, s])
return (str(m)+':'+str(s).zfill(2))
def draw_plots():
def get_cols(cols):
if metric:
return [
list(open_data[x].dropna()),
list(regionals_data[x].dropna()),
list(games_data[x].dropna())]
else:
if x == 'Height': scaler = 1/2.54
elif x in ['Weight']+weights: scaler = 2.2
else: scaler = 1
return [
list(open_data[x].dropna()*scaler),
list(regionals_data[x].dropna()*scaler),
list(games_data[x].dropna()*scaler)]
labels = ['Open','Regionals','Games']
ax_stats = []
for i,x in enumerate(stats):
ax = plt.subplot(5,3,i+1)
ax_stats += [ax]
plt.title(x, **subtitle)
plt.grid(axis='y',linestyle='dotted')
bplot = plt.boxplot(get_cols(x),labels=labels, **kwargs)
add_colors(bplot)
if x == 'Height':
plt.gca().yaxis.set_major_formatter(ticker.FuncFormatter(fmt_height))
elif x == 'Weight':
plt.gca().yaxis.set_major_formatter(ticker.FuncFormatter(fmt_weight))
plt.gca().yaxis.set_major_locator(plt.MaxNLocator(5))
ax_weights = []
for i,x in enumerate(weights):
ax = plt.subplot(5,4,5+i)
ax_weights += [ax]
plt.title(x, **subtitle)
plt.grid(axis='y',linestyle='dotted')
bplot = plt.boxplot(get_cols(x),labels=labels, **kwargs)
add_colors(bplot)
plt.gca().yaxis.set_major_formatter(ticker.FuncFormatter(fmt_weight))
plt.gca().yaxis.set_major_locator(plt.MaxNLocator(5))
ax_reps = []
for i,x in enumerate(reps):
ax = plt.subplot(5,2,5+i)
ax_reps += [ax]
plt.title(x, **subtitle)
plt.grid(axis='x',linestyle='dotted')
bplot = plt.boxplot(get_cols(x),labels=labels, vert=False, **kwargs)
add_colors(bplot)
plt.gca().xaxis.set_major_formatter(ticker.FuncFormatter(fmt_reps))
plt.gca().xaxis.set_major_locator(plt.MaxNLocator(5))
ax_times = []
for i,x in enumerate(times):
ax = plt.subplot(5,6,19+i)
ax_times += [ax]
plt.title(x, **subtitle)
plt.grid(axis='y',linestyle='dotted')
bplot = plt.boxplot(get_cols(x),labels=labels, **kwargs)
add_colors(bplot)
plt.gca().set_yscale('log')
plt.gca().yaxis.set_major_formatter(ticker.FuncFormatter(fmt_time))
plt.gca().yaxis.set_major_locator(plt.MaxNLocator(5))
plt.minorticks_off()
plt.subplots_adjust(left=0.125,right=0.9,
bottom=0.1,top=0.9,wspace=0.3,hspace=0.4)
return ax_stats, ax_weights, ax_reps, ax_times
ax_stats, ax_weights, ax_reps, ax_times = draw_plots()
plot_my_data()
plt.show()
def ANALYSIS_open(division='men'):
open_data = get_analysis_dataframe(competition='open', division=division)
open_data = clear_outliers(open_data)
box_plots(open_data, 'Open')
def ANALYSIS_regionals(division='men'):
regionals_data = get_analysis_dataframe(competition='regionals', division=division)
regionals_data = clear_outliers(regionals_data)
box_plots(regionals_data, 'Regionals')
def ANALYSIS_games(division='men'):
games_data = get_analysis_dataframe(competition='games', division=division)
games_data = clear_outliers(games_data)
box_plots(games_data, 'Games')
def ANALYSIS_all(division='men', my_data={}, metric=True):
open_data = get_analysis_dataframe(competition='open', division=division)
open_data = clear_outliers(open_data)
regionals_data = get_analysis_dataframe(competition='regionals', division=division)
regionals_data = clear_outliers(regionals_data)
games_data = get_analysis_dataframe(competition='games', division=division)
games_data = clear_outliers(games_data)
box_plots_all(open_data, regionals_data, games_data, division.title(), my_data, metric)
def ANALYSIS_all_imputed(division='men', my_data={}, metric=True):
open_data = get_imputed_dataframe(division = division, competition='open')
regionals_data = get_analysis_dataframe(division = division, competition='regionals')
games_data = get_analysis_dataframe(division = division, competition='games')
# use imputed values from open data to fill in athlete stats for regionals/games data
regionals_data = pd.merge(
open_data.drop(['overallrank','overallscore'],axis=1),
regionals_data[['userid','overallrank','overallscore']],
on='userid', how='inner')
games_data = pd.merge(
open_data.drop(['overallrank','overallscore'],axis=1),
games_data[['userid','overallrank','overallscore']],
on='userid', how='inner')
box_plots_all(open_data, regionals_data, games_data, "Imputed " + division.title(), my_data, metric)
alex = {'Age':23,'Height':165,'Weight':70,
'Back Squat':175, 'Clean and Jerk':133, 'Snatch':108, 'Deadlift':220,
'Max Pull-ups':25,
'Fran': 5}
pan = {'Age':22,'Height':158,'Weight':53,
'Back Squat':57, 'Clean and Jerk':35, 'Snatch':28, 'Deadlift':70,
'Max Pull-ups':0}
fraser = get_analysis_dataframe(division='men', competition='games').iloc[0].dropna().drop(['overallscore','userid','overallrank']).to_dict()
tct = get_analysis_dataframe(division='women', competition='games').iloc[0].dropna().drop(['overallscore','userid','overallrank']).to_dict()
sara = get_analysis_dataframe(division='women', competition='open').iloc[0].dropna().drop(['overallscore','userid','overallrank']).to_dict()
import xgboost as xgb
@memoize()
def get_fitted_model(data):
reg = xgb.XGBRegressor(missing=np.nan)
X = data.drop(['userid', 'overallrank', 'overallscore'], axis=1).as_matrix()
y = data['overallrank'].as_matrix()
reg.fit(X,np.log1p(y))
return reg
def predict_ranking(data, my_data):
cols = list(data.drop(['userid', 'overallrank', 'overallscore'], axis=1))
X_pred = []
for i,col in enumerate(cols):
if col in my_data:
X_pred += [my_data[col]]
else:
X_pred += [np.nan]
reg = get_fitted_model(data)
known_cols = list(my_data)
y_pred = np.expm1(reg.predict(X_pred))
return y_pred[0], (y_pred / data['overallrank'].max()*100)[0]
ANALYSIS_all_imputed(division='men',metric=True, my_data=alex)
#ANALYSIS_all_imputed(division='men',metric=True, my_data=fraser)
raise Exception()
def to_imperial(stats):
stats['Height'] /= 2.54
stats['Weight'] *= 2.2
stats['Back Squat'] *= 2.2
stats['Clean and Jerk'] *= 2.2
stats['Snatch'] *= 2.2
stats['Deadlift'] *= 2.2
return stats
# lets test some models
data = get_analysis_dataframe()
data = clear_outliers(data)
#data = data[:1000]
X = data.drop(['userid', 'overallrank', 'overallscore'], axis=1)
y = pd.to_numeric(data['overallrank'])
y = np.log1p(y)
from sklearn.preprocessing import Imputer, StandardScaler, RobustScaler, FunctionTransformer
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import ElasticNetCV, RidgeCV
from sklearn.neural_network import MLPRegressor
from sklearn.svm import SVR
from scipy.stats import skew
def log_skewed_cols(X):
X = X.apply(lambda x: np.log1p(x) if abs(skew(x.dropna()))>1 else x, axis=0)
return X
get_score = lambda model: cross_val_score(model, X, y, n_jobs=1).mean()
for reg in [
LinearRegression(),
RidgeCV(),
#ElasticNetCV(),
#MLPRegressor(hidden_layer_sizes=(100,10,5,)),
#KNeighborsRegressor(),
#RandomForestRegressor(),
SVR(),
xgb.XGBRegressor(),
#KerasRegressor(build_fn=create_model, verbose=0),
]:
print(reg)
pipeline = Pipeline([
('logtransform', FunctionTransformer(log_skewed_cols, validate=False)),
('imputer', Imputer()),
('scaler', RobustScaler()),
('regressor', reg)
])
try:
t = time.time()
print("\tScore:",get_score(pipeline))
print("\t",time.time()-t,"seconds")
except Exception as e:
raise e
print(e)
# xgb easily outperforms others
# now look at imputing methods
class SKLearnFancyImputer(Imputer):
def __init__(self,imputer):
self.imputer = imputer
def fit(self, X, y=None):
self.X = X
return self
def transform(self, X):
if np.array_equal(np.nan_to_num(self.X),np.nan_to_num(X)):
return self.imputer.complete(X)
else:
return self.imputer.complete(X.append(self.X))[:len(X)]
'''for imp in [
Imputer(), Imputer(strategy='median'),
SKLearnFancyImputer(fancyimpute.SoftImpute(verbose=0)),
SKLearnFancyImputer(fancyimpute.IterativeSVD(verbose=0)),
#SKLearnFancyImputer(fancyimpute.MICE(verbose=0)),
#SKLearnFancyImputer(fancyimpute.MatrixFactorization(verbose=False)),
#SKLearnFancyImputer(fancyimpute.NuclearNormMinimization(verbose=0)),
#SKLearnFancyImputer(fancyimpute.BiScaler(verbose=0)),
]:
print(imp)
pipeline = Pipeline([
('imputer', imp),
('regressor', xgb.XGBRegressor())
])
try:
t = time.time()
print("\tScore:",get_score(pipeline))
print("\t",time.time()-t,"seconds")
except Exception as e:
print(e)'''
| mit | 18,189,834,306,985,480 | 31.233379 | 141 | 0.658655 | false |
browseinfo/odoo_saas3_nicolas | addons/project_mrp/project_procurement.py | 1 | 5722 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class procurement_order(osv.osv):
_name = "procurement.order"
_inherit = "procurement.order"
_columns = {
'task_id': fields.many2one('project.task', 'Task'),
'sale_line_id': fields.many2one('sale.order.line', 'Sales order line')
}
def action_check_finished(self, cr, uid, ids):
res = super(procurement_order, self).action_check_finished(cr, uid, ids)
return res and self.check_task_done(cr, uid, ids)
def check_task_done(self, cr, uid, ids, context=None):
""" Checks if task is done or not.
@return: True or False.
"""
for p in self.browse(cr, uid, ids, context=context):
if (p.product_id.type == 'service') and (p.procure_method == 'make_to_order') and p.task_id and (p.task_id.stage_id and not p.task_id.stage_id.closed):
return False
return True
def check_produce_service(self, cr, uid, procurement, context=None):
return True
def _convert_qty_company_hours(self, cr, uid, procurement, context=None):
product_uom = self.pool.get('product.uom')
company_time_uom_id = self.pool.get('res.users').browse(cr, uid, uid).company_id.project_time_mode_id
if procurement.product_uom.id != company_time_uom_id.id and procurement.product_uom.category_id.id == company_time_uom_id.category_id.id:
planned_hours = product_uom._compute_qty(cr, uid, procurement.product_uom.id, procurement.product_qty, company_time_uom_id.id)
else:
planned_hours = procurement.product_qty
return planned_hours
def _get_project(self, cr, uid, procurement, context=None):
project_project = self.pool.get('project.project')
project = procurement.product_id.project_id
if not project and procurement.sale_line_id:
# find the project corresponding to the analytic account of the sales order
account = procurement.sale_line_id.order_id.project_id
project_ids = project_project.search(cr, uid, [('analytic_account_id', '=', account.id)])
projects = project_project.browse(cr, uid, project_ids, context=context)
project = projects and projects[0] or False
return project
def action_produce_assign_service(self, cr, uid, ids, context=None):
if not context:
context = {}
project_task = self.pool.get('project.task')
for procurement in self.browse(cr, uid, ids, context=context):
project = self._get_project(cr, uid, procurement, context=context)
planned_hours = self._convert_qty_company_hours(cr, uid, procurement, context=context)
manager = procurement.product_id.product_manager
partner = procurement.sale_line_id and procurement.sale_line_id.order_id.partner_id or None
lang = (manager and manager.lang) or (partner and partner.lang) or False
if not lang:
lang = self.pool['res.users'].browse(cr, uid, uid, context=context).lang
product = self.pool['product.product'].browse(cr, uid, procurement.product_id.id, context=dict(context, lang=lang))
task_id = project_task.create(cr, uid, {
'name': '%s:%s' % (procurement.origin or '', product.name),
'date_deadline': procurement.date_planned,
'planned_hours': planned_hours,
'remaining_hours': planned_hours,
'partner_id': procurement.sale_line_id and procurement.sale_line_id.order_id.partner_id.id or False,
'user_id': procurement.product_id.product_manager.id,
'procurement_id': procurement.id,
'description': procurement.sale_line_id and procurement.sale_line_id.name or procurement.name,
'project_id': project and project.id or False,
'company_id': procurement.company_id.id,
},context=context)
self.write(cr, uid, [procurement.id], {'task_id': task_id, 'state': 'running', 'message':_('Task created.')}, context=context)
self.project_task_create_note(cr, uid, ids, context=context)
return task_id
def project_task_create_note(self, cr, uid, ids, context=None):
for procurement in self.browse(cr, uid, ids, context=context):
body = _("Task created")
self.message_post(cr, uid, [procurement.id], body=body, context=context)
if procurement.sale_line_id and procurement.sale_line_id.order_id:
procurement.sale_line_id.order_id.message_post(body=body)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | 3,890,371,232,505,141,000 | 52.981132 | 163 | 0.627753 | false |
jalaziz/django-cms-grappelli-old | cms/middleware/multilingual.py | 1 | 6166 | # -*- coding: utf-8 -*-
from cms.utils.i18n import get_default_language
from django.conf import settings
from django.core.urlresolvers import reverse
from django.middleware.locale import LocaleMiddleware
from django.utils import translation
import re
import urllib
SUPPORTED = dict(settings.CMS_LANGUAGES)
HAS_LANG_PREFIX_RE = re.compile(r"^/(%s)/.*" % "|".join(map(lambda l: re.escape(l[0]), settings.CMS_LANGUAGES)))
def has_lang_prefix(path):
check = HAS_LANG_PREFIX_RE.match(path)
if check is not None:
return check.group(1)
else:
return False
def patch_response(content, pages_root, language):
# Customarily user pages are served from http://the.server.com/~username/
# When a user uses django-cms for his pages, the '~' of the url appears quoted in href links.
# We have to quote pages_root for the regular expression to match.
#
# The used regex is quite complex. The exact pattern depends on the used settings.
# The regex extracts the path of the url without the leading page root, but only matches urls
# that don't already contain a language string or aren't considered multilingual.
#
# Here is an annotated example pattern (_r_ is a shorthand for the value of pages_root):
# pattern: <a([^>]+)href=("|\')(?=_r_)(?!(/fr/|/de/|/en/|/pt-br/|/media/|/media/admin/))(_r_(.*?))("|\')(.*?)>
# |-\1--| |-\2-| |---------------------\3---------------------| | |-\5--|||-\6-||-\7-|
# |---\4---|
# input (_r_=/): <a href="/admin/password_change/" class="foo">
# matched groups: (u' ', None, u'/admin/password_change/', u'admin/password_change/', u' class="foo"')
#
# Notice that (?=...) and (?!=...) do not consume input or produce a group in the match object.
# If the regex matches, the extracted path we want is stored in the fourth group (\4).
quoted_root = urllib.quote(pages_root)
HREF_URL_FIX_RE = re.compile(ur'<a([^>]+)href=("|\')(?=%s)(?!(%s|%s|%s))(%s(.*?))("|\')(.*?)>' % (
quoted_root,
"|".join(map(lambda l: quoted_root + l[0] + "/" , settings.CMS_LANGUAGES)),
settings.MEDIA_URL,
settings.ADMIN_MEDIA_PREFIX,
quoted_root
))
# Unlike in href links, the '~' (see above) the '~' in form actions appears unquoted.
#
# For understanding this regex, please read the documentation for HREF_URL_FIX_RE above.
FORM_URL_FIX_RE = re.compile(ur'<form([^>]+)action=("|\')(?=%s)(?!(%s|%s|%s))(%s(.*?))("|\')(.*?)>' % (
pages_root,
"|".join(map(lambda l: pages_root + l[0] + "/" , settings.CMS_LANGUAGES)),
settings.MEDIA_URL,
settings.ADMIN_MEDIA_PREFIX,
pages_root
))
content = HREF_URL_FIX_RE.sub(ur'<a\1href=\2/%s%s\5\6\7>' % (language, pages_root), content)
content = FORM_URL_FIX_RE.sub(ur'<form\1action=\2%s%s/\5\6\7>' % (pages_root, language), content).encode("utf8")
return content
class MultilingualURLMiddleware:
def get_language_from_request (self,request):
changed = False
prefix = has_lang_prefix(request.path_info)
if prefix:
request.path = "/" + "/".join(request.path.split("/")[2:])
request.path_info = "/" + "/".join(request.path_info.split("/")[2:])
t = prefix
if t in SUPPORTED:
lang = t
if hasattr(request, "session") and \
request.session.get("django_language", None) != lang:
request.session["django_language"] = lang
changed = True
else:
lang = translation.get_language_from_request(request)
if not changed:
if hasattr(request, "session"):
lang = request.session.get("django_language", None)
if lang in SUPPORTED and lang is not None:
return lang
elif "django_language" in request.COOKIES.keys():
lang = request.COOKIES.get("django_language", None)
if lang in SUPPORTED and lang is not None:
return lang
if not lang:
lang = translation.get_language_from_request(request)
lang = get_default_language(lang)
return lang
def process_request(self, request):
language = self.get_language_from_request(request)
translation.activate(language)
request.LANGUAGE_CODE = language
def process_response(self, request, response):
language = getattr(request, 'LANGUAGE_CODE', self.get_language_from_request(request))
local_middleware = LocaleMiddleware()
response =local_middleware.process_response(request, response)
path = unicode(request.path)
# note: pages_root is assumed to end in '/'.
# testing this and throwing an exception otherwise, would probably be a good idea
if not path.startswith(settings.MEDIA_URL) and \
not path.startswith(settings.ADMIN_MEDIA_PREFIX) and \
response.status_code == 200 and \
response._headers['content-type'][1].split(';')[0] == "text/html":
pages_root = urllib.unquote(reverse("pages-root"))
try:
decoded_response = response.content.decode('utf-8')
except UnicodeDecodeError:
decoded_response = response.content
response.content = patch_response(
decoded_response,
pages_root,
request.LANGUAGE_CODE
)
if (response.status_code == 301 or response.status_code == 302 ):
location = response['Location']
if not has_lang_prefix(location) and location.startswith("/") and \
not location.startswith(settings.MEDIA_URL) and \
not location.startswith(settings.ADMIN_MEDIA_PREFIX):
response['Location'] = "/%s%s" % (language, location)
response.set_cookie("django_language", language)
return response
| bsd-3-clause | -1,841,472,630,449,027,300 | 46.430769 | 123 | 0.57444 | false |
olivierdalang/stdm | third_party/reportlab/graphics/renderPDF.py | 1 | 14984 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/renderPDF.py
# renderPDF - draws Drawings onto a canvas
__version__=''' $Id$ '''
__doc__="""Render Drawing objects within others PDFs or standalone
Usage::
import renderpdf
renderpdf.draw(drawing, canvas, x, y)
Execute the script to see some test drawings.
changed
"""
from reportlab.graphics.shapes import *
from reportlab.pdfgen.canvas import Canvas
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.lib.utils import getStringIO
from reportlab import rl_config
from renderbase import Renderer, StateTracker, getStateDelta, renderScaledDrawing
# the main entry point for users...
def draw(drawing, canvas, x, y, showBoundary=rl_config._unset_):
"""As it says"""
R = _PDFRenderer()
R.draw(renderScaledDrawing(drawing), canvas, x, y, showBoundary=showBoundary)
class _PDFRenderer(Renderer):
"""This draws onto a PDF document. It needs to be a class
rather than a function, as some PDF-specific state tracking is
needed outside of the state info in the SVG model."""
def __init__(self):
self._stroke = 0
self._fill = 0
self._tracker = StateTracker()
def drawNode(self, node):
"""This is the recursive method called for each node
in the tree"""
#print "pdf:drawNode", self
#if node.__class__ is Wedge: stop
if not (isinstance(node, Path) and node.isClipPath):
self._canvas.saveState()
#apply state changes
deltas = getStateDelta(node)
self._tracker.push(deltas)
self.applyStateChanges(deltas, {})
#draw the object, or recurse
self.drawNodeDispatcher(node)
self._tracker.pop()
if not (isinstance(node, Path) and node.isClipPath):
self._canvas.restoreState()
def drawRect(self, rect):
if rect.rx == rect.ry == 0:
#plain old rectangle
self._canvas.rect(
rect.x, rect.y,
rect.width, rect.height,
stroke=self._stroke,
fill=self._fill
)
else:
#cheat and assume ry = rx; better to generalize
#pdfgen roundRect function. TODO
self._canvas.roundRect(
rect.x, rect.y,
rect.width, rect.height, rect.rx,
fill=self._fill,
stroke=self._stroke
)
def drawImage(self, image):
path = image.path
# currently not implemented in other renderers
if path and (hasattr(path,'mode') or os.path.exists(image.path)):
self._canvas.drawInlineImage(
path,
image.x, image.y,
image.width, image.height
)
def drawLine(self, line):
if self._stroke:
self._canvas.line(line.x1, line.y1, line.x2, line.y2)
def drawCircle(self, circle):
self._canvas.circle(
circle.cx, circle.cy, circle.r,
fill=self._fill,
stroke=self._stroke
)
def drawPolyLine(self, polyline):
if self._stroke:
assert len(polyline.points) >= 2, 'Polyline must have 2 or more points'
head, tail = polyline.points[0:2], polyline.points[2:],
path = self._canvas.beginPath()
path.moveTo(head[0], head[1])
for i in range(0, len(tail), 2):
path.lineTo(tail[i], tail[i+1])
self._canvas.drawPath(path)
def drawWedge(self, wedge):
centerx, centery, radius, startangledegrees, endangledegrees = \
wedge.centerx, wedge.centery, wedge.radius, wedge.startangledegrees, wedge.endangledegrees
yradius, radius1, yradius1 = wedge._xtraRadii()
if yradius is None: yradius = radius
angle = endangledegrees-startangledegrees
path = self._canvas.beginPath()
if (radius1==0 or radius1 is None) and (yradius1==0 or yradius1 is None):
path.moveTo(centerx, centery)
path.arcTo(centerx-radius, centery-yradius, centerx+radius, centery+yradius,
startangledegrees, angle)
else:
path.arc(centerx-radius, centery-yradius, centerx+radius, centery+yradius,
startangledegrees, angle)
path.arcTo(centerx-radius1, centery-yradius1, centerx+radius1, centery+yradius1,
endangledegrees, -angle)
path.close()
self._canvas.drawPath(path,
fill=self._fill,
stroke=self._stroke)
def drawEllipse(self, ellipse):
#need to convert to pdfgen's bounding box representation
x1 = ellipse.cx - ellipse.rx
x2 = ellipse.cx + ellipse.rx
y1 = ellipse.cy - ellipse.ry
y2 = ellipse.cy + ellipse.ry
self._canvas.ellipse(x1,y1,x2,y2,fill=self._fill,stroke=self._stroke)
def drawPolygon(self, polygon):
assert len(polygon.points) >= 2, 'Polyline must have 2 or more points'
head, tail = polygon.points[0:2], polygon.points[2:],
path = self._canvas.beginPath()
path.moveTo(head[0], head[1])
for i in range(0, len(tail), 2):
path.lineTo(tail[i], tail[i+1])
path.close()
self._canvas.drawPath(
path,
stroke=self._stroke,
fill=self._fill
)
def drawString(self, stringObj):
if self._fill:
S = self._tracker.getState()
text_anchor, x, y, text, enc = S['textAnchor'], stringObj.x,stringObj.y,stringObj.text, stringObj.encoding
if not text_anchor in ['start','inherited']:
font, font_size = S['fontName'], S['fontSize']
textLen = stringWidth(text, font, font_size, enc)
if text_anchor=='end':
x -= textLen
elif text_anchor=='middle':
x -= textLen*0.5
elif text_anchor=='numeric':
x -= numericXShift(text_anchor,text,textLen,font,font_size,enc)
else:
raise ValueError, 'bad value for textAnchor '+str(text_anchor)
t = self._canvas.beginText(x,y)
t.textLine(text)
self._canvas.drawText(t)
def drawPath(self, path):
from reportlab.graphics.shapes import _renderPath
pdfPath = self._canvas.beginPath()
drawFuncs = (pdfPath.moveTo, pdfPath.lineTo, pdfPath.curveTo, pdfPath.close)
isClosed = _renderPath(path, drawFuncs)
if isClosed:
fill = self._fill
else:
fill = 0
if path.isClipPath:
self._canvas.clipPath(pdfPath, fill=fill, stroke=self._stroke)
else:
self._canvas.drawPath(pdfPath,
fill=fill,
stroke=self._stroke)
def setStrokeColor(self,c):
self._canvas.setStrokeColor(c)
def setFillColor(self,c):
self._canvas.setFillColor(c)
def applyStateChanges(self, delta, newState):
"""This takes a set of states, and outputs the PDF operators
needed to set those properties"""
for key, value in delta.items():
if key == 'transform':
self._canvas.transform(value[0], value[1], value[2],
value[3], value[4], value[5])
elif key == 'strokeColor':
#this has different semantics in PDF to SVG;
#we always have a color, and either do or do
#not apply it; in SVG one can have a 'None' color
if value is None:
self._stroke = 0
else:
self._stroke = 1
self.setStrokeColor(value)
elif key == 'strokeWidth':
self._canvas.setLineWidth(value)
elif key == 'strokeLineCap': #0,1,2
self._canvas.setLineCap(value)
elif key == 'strokeLineJoin':
self._canvas.setLineJoin(value)
# elif key == 'stroke_dasharray':
# self._canvas.setDash(array=value)
elif key == 'strokeDashArray':
if value:
if isinstance(value,(list,tuple)) and len(value)==2 and isinstance(value[1],(tuple,list)):
phase = value[0]
value = value[1]
else:
phase = 0
self._canvas.setDash(value,phase)
else:
self._canvas.setDash()
elif key == 'fillColor':
#this has different semantics in PDF to SVG;
#we always have a color, and either do or do
#not apply it; in SVG one can have a 'None' color
if value is None:
self._fill = 0
else:
self._fill = 1
self.setFillColor(value)
elif key in ['fontSize', 'fontName']:
# both need setting together in PDF
# one or both might be in the deltas,
# so need to get whichever is missing
fontname = delta.get('fontName', self._canvas._fontname)
fontsize = delta.get('fontSize', self._canvas._fontsize)
self._canvas.setFont(fontname, fontsize)
elif key=='fillOpacity':
if value is not None:
self._canvas.setFillAlpha(value)
elif key=='strokeOpacity':
if value is not None:
self._canvas.setStrokeAlpha(value)
elif key=='fillOverprint':
self._canvas.setFillOverprint(value)
elif key=='strokeOverprint':
self._canvas.setStrokeOverprint(value)
elif key=='overprintMask':
self._canvas.setOverprintMask(value)
from reportlab.platypus import Flowable
class GraphicsFlowable(Flowable):
"""Flowable wrapper around a Pingo drawing"""
def __init__(self, drawing):
self.drawing = drawing
self.width = self.drawing.width
self.height = self.drawing.height
def draw(self):
draw(self.drawing, self.canv, 0, 0)
def drawToFile(d, fn, msg="", showBoundary=rl_config._unset_, autoSize=1):
"""Makes a one-page PDF with just the drawing.
If autoSize=1, the PDF will be the same size as
the drawing; if 0, it will place the drawing on
an A4 page with a title above it - possibly overflowing
if too big."""
d = renderScaledDrawing(d)
c = Canvas(fn)
if msg:
c.setFont(rl_config.defaultGraphicsFontName, 36)
c.drawString(80, 750, msg)
c.setTitle(msg)
if autoSize:
c.setPageSize((d.width, d.height))
draw(d, c, 0, 0, showBoundary=showBoundary)
else:
#show with a title
c.setFont(rl_config.defaultGraphicsFontName, 12)
y = 740
i = 1
y = y - d.height
draw(d, c, 80, y, showBoundary=showBoundary)
c.showPage()
c.save()
if sys.platform=='mac' and not hasattr(fn, "write"):
try:
import macfs, macostools
macfs.FSSpec(fn).SetCreatorType("CARO", "PDF ")
macostools.touched(fn)
except:
pass
def drawToString(d, msg="", showBoundary=rl_config._unset_,autoSize=1):
"Returns a PDF as a string in memory, without touching the disk"
s = getStringIO()
drawToFile(d, s, msg=msg, showBoundary=showBoundary,autoSize=autoSize)
return s.getvalue()
#########################################################
#
# test code. First, define a bunch of drawings.
# Routine to draw them comes at the end.
#
#########################################################
def test():
from reportlab.graphics.shapes import _baseGFontName, _baseGFontNameBI
c = Canvas('renderPDF.pdf')
c.setFont(_baseGFontName, 36)
c.drawString(80, 750, 'Graphics Test')
# print all drawings and their doc strings from the test
# file
#grab all drawings from the test module
from reportlab.graphics import testshapes
drawings = []
for funcname in dir(testshapes):
if funcname[0:10] == 'getDrawing':
drawing = eval('testshapes.' + funcname + '()') #execute it
docstring = eval('testshapes.' + funcname + '.__doc__')
drawings.append((drawing, docstring))
#print in a loop, with their doc strings
c.setFont(_baseGFontName, 12)
y = 740
i = 1
for (drawing, docstring) in drawings:
assert (docstring is not None), "Drawing %d has no docstring!" % i
if y < 300: #allows 5-6 lines of text
c.showPage()
y = 740
# draw a title
y = y - 30
c.setFont(_baseGFontNameBI,12)
c.drawString(80, y, 'Drawing %d' % i)
c.setFont(_baseGFontName,12)
y = y - 14
textObj = c.beginText(80, y)
textObj.textLines(docstring)
c.drawText(textObj)
y = textObj.getY()
y = y - drawing.height
draw(drawing, c, 80, y)
i = i + 1
if y!=740: c.showPage()
c.save()
print 'saved renderPDF.pdf'
##def testFlowable():
## """Makes a platypus document"""
## from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
## from reportlab.lib.styles import getSampleStyleSheet
## styles = getSampleStyleSheet()
## styNormal = styles['Normal']
##
## doc = SimpleDocTemplate('test_flowable.pdf')
## story = []
## story.append(Paragraph("This sees is a drawing can work as a flowable", styNormal))
##
## import testdrawings
## drawings = []
##
## for funcname in dir(testdrawings):
## if funcname[0:10] == 'getDrawing':
## drawing = eval('testdrawings.' + funcname + '()') #execute it
## docstring = eval('testdrawings.' + funcname + '.__doc__')
## story.append(Paragraph(docstring, styNormal))
## story.append(Spacer(18,18))
## story.append(drawing)
## story.append(Spacer(36,36))
##
## doc.build(story)
## print 'saves test_flowable.pdf'
if __name__=='__main__':
test()
#testFlowable()
| gpl-2.0 | 5,012,156,805,755,201,000 | 36.51928 | 118 | 0.545782 | false |
nuobit/odoo-addons | connector_sage/models/payroll_sage_payslip.py | 1 | 1319 | # Copyright NuoBiT Solutions, S.L. (<https://www.nuobit.com>)
# Eric Antones <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl)
from odoo import api, fields, models, _
from odoo.exceptions import UserError, ValidationError
class Payslip(models.Model):
_inherit = 'payroll.sage.payslip'
@api.multi
def action_paysplip_import(self):
for rec in self:
backend = self.env['sage.backend'].search([('company_id', '=', rec.company_id.id)])
if len(backend) != 1:
raise UserError(_("Expected 1 backend for the current company, found %i" % len(backend)))
# import lines and checks
backend.import_payslip_line_id = rec
if rec.type == 'transfer':
self.env['sage.payroll.sage.payslip.line.transfer'].with_delay().import_payslip_lines(rec, backend)
backend.import_payslip_check_id = rec
self.env['sage.payroll.sage.payslip.check'].with_delay().import_payslip_checks(rec, backend)
elif rec.type == 'payroll':
self.env['sage.payroll.sage.payslip.line.payroll'].with_delay().import_payslip_lines(rec, backend)
else:
raise UserError(_("Unexpected payslip type %s!") % rec.type)
return True
| agpl-3.0 | 8,481,821,566,376,713,000 | 42.966667 | 115 | 0.620925 | false |
rueckstiess/jiratopic | onlineldavb/lookup_topic.py | 1 | 1908 | import sys, os, re, random, math, urllib2, time, cPickle
import numpy
import argparse
import onlineldavb
from operator import itemgetter
topics_30 = [
"NETWORKING / CONNECTIONS",
"HARDWARE / RESOURCES",
"DRIVERS",
"MMS",
"?1",
"JIRA",
"QUERY",
"REPLICATION",
"REPLICATION",
"STORAGE???",
"NETWORKING / SETUP / LIMITS",
"CHUNKS",
"NETWORKING / PROBLEMS",
"SHARDING / CONFIG SERVER",
"SHARDING / BALANCING",
"DIAGNOSIS",
"SHELL",
"AUTH/SECURITY",
"QUERY / DOCUMENTS",
"OPS / RESTART",
"STORAGE / OPS",
"STORAGE",
"CHUNKS",
"INDEXING",
"UPGRADING",
"INITIAL DIAGNOSIS",
"INDEXING / OPTIMIZATION",
"REPLICASET CONFIGURATION",
"BACKUPS",
"NETWORKING / DNS"
]
def main():
# The number of documents to analyze each iteration
batchsize = 64
# The total number of documents in the CS project
D = 14617
# argparse arguments
argparser = argparse.ArgumentParser()
argparser.add_argument('-v', '--vocabulary', action='store', default="../manual_vocab.txt", help='provide vocabulary file')
argparser.add_argument('-l', '--lambda', action='store', default="./lambda-79-30.dat", help='provide lambda parameter file')
argparser.add_argument('-s', '--string', action='store', nargs='*', help='string to evaluate')
args = vars(argparser.parse_args())
vocab = str.split(file(args['vocabulary']).read())
init_lambda = numpy.loadtxt(args['lambda'])
K = init_lambda.shape[0]
olda = onlineldavb.OnlineLDA(vocab, K, D, 1./K, 1./K, 1024., 0.7, init_lambda)
gamma, _ = olda.do_e_step( args['string'] )
gamma = gamma.flatten()
sorted_ids = sorted ( [(i,g) for i,g in enumerate(gamma) if g > 1.0], key=itemgetter(1), reverse=True)
scores = map(itemgetter(1), sorted_ids)
topics = map(lambda x: topics_30[x[0]], sorted_ids)
print ", ".join( map(lambda x: "%s (%.2f)" % (x[0], x[1]), zip (topics, scores)) )
if __name__ == '__main__':
main()
| apache-2.0 | 4,325,715,177,712,102,400 | 26.652174 | 128 | 0.649895 | false |
ericzundel/pants | tests/python/pants_test/backend/jvm/tasks/jvm_compile/base_compile_integration_test.py | 1 | 3488 | # coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
from collections import defaultdict
from contextlib import contextmanager
import six
from pants.util.contextutil import temporary_dir
from pants_test.pants_run_integration_test import PantsRunIntegrationTest
class BaseCompileIT(PantsRunIntegrationTest):
"""
:API: public
"""
_EXTRA_TASK_ARGS=[]
@contextmanager
def do_test_compile(self, target, expected_files=None, iterations=2, expect_failure=False,
extra_args=None, workdir_outside_of_buildroot=False):
"""Runs a configurable number of iterations of compilation for the given target.
:API: public
By default, runs twice to shake out errors related to noops.
"""
if not workdir_outside_of_buildroot:
workdir_generator = self.temporary_workdir()
else:
workdir_generator = temporary_dir(suffix='.pants.d')
with workdir_generator as workdir:
with self.temporary_cachedir() as cachedir:
for i in six.moves.xrange(0, iterations):
pants_run = self.run_test_compile(workdir, cachedir, target,
clean_all=(i == 0),
extra_args=extra_args)
if expect_failure:
self.assert_failure(pants_run)
else:
self.assert_success(pants_run)
found = defaultdict(set)
workdir_files = []
if expected_files:
to_find = set(expected_files)
for root, _, files in os.walk(workdir):
for file in files:
workdir_files.append(os.path.join(root, file))
if file in to_find:
found[file].add(os.path.join(root, file))
to_find.difference_update(found)
if not expect_failure:
self.assertEqual(set(), to_find,
'Failed to find the following compiled files: {} in {}'.format(
to_find, '\n'.join(sorted(workdir_files))))
yield found
def run_test_compile(self, workdir, cacheurl, target, clean_all=False, extra_args=None, test=False):
"""
:API: public
"""
global_args = [
'--cache-write',
'--cache-write-to=[\'{}\']'.format(cacheurl),
] + self._EXTRA_TASK_ARGS
task = 'test' if test else 'compile'
args = [task, target] + (extra_args if extra_args else [])
# Clean-all on the first iteration.
if clean_all:
args.insert(0, 'clean-all')
return self.run_pants_with_workdir(global_args + args, workdir)
def get_only(self, found, name):
files = found[name]
self.assertEqual(1, len(files))
return files.pop()
def do_test_success_and_failure(self, target, success_args, failure_args, shared_args=None):
"""Ensure that a target fails to build when one arg set is passed, and succeeds for another.
:API: public
"""
shared_args = shared_args if shared_args else []
# Check that success_args succeed.
with self.do_test_compile(target, extra_args=(shared_args + success_args)):
pass
# Check that failure_args fail.
with self.do_test_compile(target, extra_args=(shared_args + failure_args), expect_failure=True):
pass
| apache-2.0 | -4,696,149,196,501,012,000 | 34.591837 | 102 | 0.624713 | false |
battlemidget/shipit | shipit/git.py | 1 | 1605 | # -*- coding: utf-8 -*-
"""
shipit.git
~~~~~~~~~~
Operations on git repositories.
"""
import os
import tempfile
import subprocess
def get_remotes():
"""
Get a list of the git remote URLs for this repository.
Return a dictionary of remote names mapped to URL strings if remotes were
found.
Otherwise return ``None``.
"""
tmp_file = tempfile.NamedTemporaryFile(mode='w+', delete=False)
retcode = subprocess.call(['git', 'remote', '-v'], stdout=tmp_file.file)
if retcode != 0:
return
# Store the output of the command and delete temporary file
tmp_file.file.seek(0)
raw_remotes = tmp_file.read()
os.remove(tmp_file.name)
# Get the GitHub remote strings
nonempty_remotes = (r for r in raw_remotes.split('\n') if 'github' in r.lower())
return {remote_name(r): remote_url(r) for r in nonempty_remotes}
def remote_name(remotestring):
return remotestring.split(' ')[0].split('\t')[0]
def remote_url(remotestring):
return remotestring.split(' ')[0].split('\t')[1]
def extract_user_and_repo_from_remote(remote_url):
# TODO: name slices
if remote_url.startswith('git://'):
# Git remote
user_repo = remote_url.split('/')[3:]
user, repo = user_repo[0], user_repo[1][:-4]
elif remote_url.startswith('http'):
# HTTP[S] remote
user_repo = remote_url.split('/')[3:]
user, repo = user_repo[0], user_repo[1][:-4]
else:
# SSH remote
user_repo = remote_url.split(':')[1][:-4]
user, repo = tuple(user_repo.split('/'))
return user, repo
| gpl-3.0 | 6,923,824,859,608,695,000 | 24.078125 | 84 | 0.611838 | false |
NoviceLive/unish | py/gh.py | 1 | 2560 | #!/usr/bin/env python3
from os.path import basename, splitext
from logging import basicConfig, DEBUG
import click
from plumbum import local, FG
__author__ = 'Gu Zhengxiong'
__version__ = '0.1.0'
PROGRAM_NAME = 'GH'
PACKAGE_NAME = PROGRAM_NAME.lower()
VERSION_PROMPT = (
'{version}\n\nCopyright 2015-2016 {author} '
'<[email protected]>\n\n'
'This is free software; see the source for '
'copying conditions.\nThere is NO warranty; '
'not even for MERCHANTABILITY nor \nFITNESS FOR '
'A PARTICULAR PURPOSE.'.format(
version=__version__, author=__author__)
)
@click.group(
context_settings=dict(help_option_names=['-h', '--help']))
@click.version_option(VERSION_PROMPT,
'-V', '--version', prog_name=PROGRAM_NAME)
def main():
"""Simplified & Unfied Interface Of Mercurial & Git."""
basicConfig(level=DEBUG)
@main.command()
@click.argument('url', required=False)
def cl(url):
"""Clone a repository in a simplified manner."""
from pyperclip import paste
url = url if url else paste().strip()
SCM(url).doer.clone()
class SCM(object):
def __init__(self, url):
suffix = self.get_suffix(url)
if suffix == Hg.suffix:
self.doer = Hg(url)
elif suffix == Git.suffix:
self.doer = Git(url)
else:
raise RuntimeError('No handler for URL: %s', url)
@staticmethod
def get_suffix(url):
"""Determine the suffix for the URL.
Example Git URL:
1. https://github.com/NoviceLive/unish.git
2. [email protected]:NoviceLive/unish.git
3. https://[email protected]/novicelive/good.git
4. [email protected]:novicelive/good.git
Example Mercurial URL:
1. ssh://[email protected]/novicelive/unish
"""
suffix = splitext(basename(url))[1]
return Git.suffix if suffix == Git.suffix else Hg.suffix
class Git(object):
suffix = '.git'
git = local['git']
def __init__(self, url):
self.url = url
self.base = basename(url)
def clone(self, dest=None):
if dest is None:
dest = self.base
self.git['clone', self.url, dest, '--recursive'] & FG
class Hg(object):
suffix = '.hg'
hg = local['hg']
def __init__(self, url):
self.url = url
self.base = basename(url)
def clone(self, dest=None):
if dest is None:
dest = self.base + self.suffix
self.hg['clone', self.url, dest] & FG
if __name__ == '__main__':
main()
| gpl-3.0 | 6,983,805,230,766,400,000 | 24.346535 | 64 | 0.596484 | false |
tonnrueter/pymca_devel | PyMca/SpsDataSource.py | 1 | 12025 | #/*##########################################################################
# Copyright (C) 2004-2012 European Synchrotron Radiation Facility
#
# This file is part of the PyMca X-ray Fluorescence Toolkit developed at
# the ESRF by the Software group.
#
# This toolkit is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# PyMca is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# PyMca; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# PyMca follows the dual licensing model of Riverbank's PyQt and cannot be
# used as a free plugin for a non-free program.
#
# Please contact the ESRF industrial unit ([email protected]) if this license
# is a problem for you.
#############################################################################*/
import types
from PyMca import DataObject
from PyMca import spswrap as sps
DEBUG = 0
SOURCE_TYPE = 'SPS'
class SpsDataSource(object):
def __init__(self, name):
if type(name) not in types.StringTypes:
raise TypeError("Constructor needs string as first argument")
self.name = name
self.sourceName = name
self.sourceType = SOURCE_TYPE
def refresh(self):
pass
def getSourceInfo(self):
"""
Returns information about the Spec version in self.name
to give application possibility to know about it before loading.
Returns a dictionary with the key "KeyList" (list of all available keys
in this source). Each element in "KeyList" is an shared memory
array name.
"""
return self.__getSourceInfo()
def getKeyInfo(self, key):
if key in self.getSourceInfo()['KeyList']:
return self.__getArrayInfo(key)
else:
return {}
def getDataObject(self, key_list, selection=None):
if type(key_list) != types.ListType:
nolist = True
key_list = [key_list]
else:
output = []
nolist = False
if self.name in sps.getspeclist():
sourcekeys = self.getSourceInfo()['KeyList']
for key in key_list:
#a key corresponds to an array name
if key not in sourcekeys:
raise KeyError("Key %s not in source keys" % key)
#array = key
#create data object
data = DataObject.DataObject()
data.info = self.__getArrayInfo(key)
data.info['selection'] = selection
data.data = sps.getdata(self.name, key)
if nolist:
if selection is not None:
scantest = (data.info['flag'] &
sps.TAG_SCAN) == sps.TAG_SCAN
if ((key in ["SCAN_D"]) or scantest) \
and 'cntlist' in selection:
data.x = None
data.y = None
data.m = None
if 'nopts' in data.info['envdict']:
nopts = int(data.info['envdict']['nopts']) + 1
else:
nopts = data.info['rows']
if not 'LabelNames' in data.info:
data.info['LabelNames'] =\
selection['cntlist'] * 1
if 'x' in selection:
for labelindex in selection['x']:
label = data.info['LabelNames'][labelindex]
if label not in data.info['LabelNames']:
raise ValueError("Label %s not in scan labels" % label)
index = data.info['LabelNames'].index(label)
if data.x is None: data.x = []
data.x.append(data.data[:nopts, index])
if 'y' in selection:
for labelindex in selection['y']:
label = data.info['LabelNames'][labelindex]
if label not in data.info['LabelNames']:
raise ValueError("Label %s not in scan labels" % label)
index = data.info['LabelNames'].index(label)
if data.y is None: data.y = []
data.y.append(data.data[:nopts, index])
if 'm' in selection:
for labelindex in selection['m']:
label = data.info['LabelNames'][labelindex]
if label not in data.info['LabelNames']:
raise ValueError("Label %s not in scan labels" % label)
index = data.info['LabelNames'].index(label)
if data.m is None: data.m = []
data.m.append(data.data[:nopts, index])
data.info['selectiontype'] = "1D"
data.info['scanselection'] = True
data.data = None
return data
if (key in ["XIA_DATA"]) and 'XIA' in selection:
if selection["XIA"]:
if 'Detectors' in data.info:
for i in range(len(selection['rows']['y'])):
selection['rows']['y'][i] = \
data.info['Detectors'].index(selection['rows']['y'][i]) + 1
del selection['XIA']
return data.select(selection)
else:
if data.data is not None:
data.info['selectiontype'] = "%dD" % len(data.data.shape)
if data.info['selectiontype'] == "2D":
data.info["imageselection"] = True
return data
else:
output.append(data.select(selection))
return output
else:
return None
def __getSourceInfo(self):
arraylist = []
sourcename = self.name
for array in sps.getarraylist(sourcename):
arrayinfo = sps.getarrayinfo(sourcename, array)
arraytype = arrayinfo[2]
arrayflag = arrayinfo[3]
if arraytype != sps.STRING:
if (arrayflag & sps.TAG_ARRAY) == sps.TAG_ARRAY:
arraylist.append(array)
continue
if DEBUG:
print("array not added %s" % array)
source_info = {}
source_info["Size"] = len(arraylist)
source_info["KeyList"] = arraylist
return source_info
def __getArrayInfo(self, array):
info = {}
info["SourceType"] = SOURCE_TYPE
info["SourceName"] = self.name
info["Key"] = array
arrayinfo = sps.getarrayinfo(self.name, array)
info["rows"] = arrayinfo[0]
info["cols"] = arrayinfo[1]
info["type"] = arrayinfo[2]
info["flag"] = arrayinfo[3]
counter = sps.updatecounter(self.name, array)
info["updatecounter"] = counter
envdict = {}
keylist = sps.getkeylist(self.name, array + "_ENV")
for i in keylist:
val = sps.getenv(self.name, array + "_ENV", i)
envdict[i] = val
info["envdict"] = envdict
scantest = (info['flag'] & sps.TAG_SCAN) == sps.TAG_SCAN
if (array in ["SCAN_D"]) or scantest:
if 'axistitles' in info["envdict"]:
info["LabelNames"] = self._buildLabelsList(info['envdict']['axistitles'])
if 'H' in info["envdict"]:
if 'K' in info["envdict"]:
if 'L' in info["envdict"]:
info['hkl'] = [envdict['H'],
envdict['K'],
envdict['L']]
calibarray = array + "_PARAM"
if calibarray in sps.getarraylist(self.name):
try:
data = sps.getdata(self.name, calibarray)
updc = sps.updatecounter(self.name, calibarray)
info["EnvKey"] = calibarray
# data is an array
info["McaCalib"] = data.tolist()[0]
info["env_updatecounter"] = updc
except:
# Some of our C modules return NULL without setting
# an exception ...
pass
if array in ["XIA_DATA", "XIA_BASELINE"]:
envarray = "XIA_DET"
if envarray in sps.getarraylist(self.name):
try:
data = sps.getdata(self.name, envarray)
updc = sps.updatecounter(self.name, envarray)
info["EnvKey"] = envarray
info["Detectors"] = data.tolist()[0]
info["env_updatecounter"] = updc
except:
pass
return info
def _buildLabelsList(self, instr):
if DEBUG:
print('SpsDataSource : building counter list')
state = 0
llist = ['']
for letter in instr:
if state == 0:
if letter == ' ':
state = 1
elif letter == '{':
state = 2
else:
llist[-1] = llist[-1] + letter
elif state == 1:
if letter == ' ':
pass
elif letter == '{':
state = 2
llist.append('')
else:
llist.append(letter)
state = 0
elif state == 2:
if letter == '}':
state = 0
else:
llist[-1] = llist[-1] + letter
try:
llist.remove('')
except ValueError:
pass
return llist
def isUpdated(self, sourceName, key):
if sps.specrunning(sourceName):
if sps.isupdated(sourceName, key):
return True
#return True if its environment is updated
envkey = key + "_ENV"
if envkey in sps.getarraylist(sourceName):
if sps.isupdated(sourceName, envkey):
return True
return False
source_types = {SOURCE_TYPE: SpsDataSource}
# TODO object is a builtins
def DataSource(name="", object=None, copy=True, source_type=SOURCE_TYPE):
try:
sourceClass = source_types[source_type]
except KeyError:
# ERROR invalid source type
raise TypeError("Invalid Source Type, source type should be one of %s" % source_types.keys())
return sourceClass(name, object, copy)
def main():
import sys
try:
specname = sys.argv[1]
arrayname = sys.argv[2]
obj = DataSource(specname)
data = obj.getData(arrayname)
print("info = ", data.info)
except:
# give usage instructions
print("Usage: SpsDataSource <specversion> <arrayname>")
sys.exit()
if __name__ == "__main__":
main()
| gpl-2.0 | 4,840,702,036,057,973,000 | 39.762712 | 103 | 0.47526 | false |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.