max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
practicepython/ex3/ex3_list_less_than_10.py | drofp/pypractice | 0 | 12791251 | #!usr/bin/env python3
"""Example 3 from https://www.practicepython.org/exercise/2014/02/15/03-list-less-than-ten.html
==========================
GIVEN: A list of numbers
- Ask user for a number.
- In one line, print a new list that contains all elements from the original list that are less than
the input number
"""
def main(someList):
chosenVal = int(input("Enter a number.\n" +
"All values in a given list larger than or equal to this number will be filtered out.\n" +
"--> "))
print("The given list is:", someList)
print("The new list is: ", list(val for val in someList if val < chosenVal))
if __name__ == "__main__":
testList = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]
main(testList)
| 4.46875 | 4 |
abupy/TLineBu/ABuTLGolden.py | luqin/firefly | 1 | 12791252 | <reponame>luqin/firefly
# -*- encoding:utf-8 -*-
"""
黄金分割及比例分割示例模块
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from collections import namedtuple
import matplotlib.pyplot as plt
from ..TLineBu import ABuTLExecute
from ..UtilBu.ABuDTUtil import plt_show
__author__ = '阿布'
__weixin__ = 'abu_quant'
def calc_golden(kl_pd, show=True):
"""
只针对金融时间序列的收盘价格close序列,进行黄金分割及比例分割
数值结果分析以及可视化
:param kl_pd: 金融时间序列,pd.DataFrame对象
:param show: 是否可视化黄金分割及比例分割结果
:return: 黄金分割及比例分割结果组成的namedtuple数值对象
"""
kl_close = kl_pd.close
if not hasattr(kl_pd, 'name'):
# 金融时间序列中如果有异常的没有name信息的补上一个unknown
kl_pd.name = 'unknown'
# 计算视觉黄金分割
gd_382, gd_500, gd_618 = ABuTLExecute.find_golden_point(kl_pd.index, kl_close)
# 计算统计黄金分割
gex_382, gex_500, gex_618 = ABuTLExecute.find_golden_point_ex(kl_pd.index, kl_close)
# below above 382, 618确定,即382,618上下底
below618, above618 = ABuTLExecute.below_above_gen(gd_618, gex_618)
below382, above382 = ABuTLExecute.below_above_gen(gd_382, gex_382)
# 再次通过比例序列percents和find_percent_point寻找对应比例的位置字典pts_dict
percents = [0.20, 0.25, 0.30, 0.70, 0.80, 0.90, 0.95]
pts_dict = ABuTLExecute.find_percent_point(percents, kl_close)
# 0.20, 0.25, 0.30只找最低的,即底部只要最低的
below200, _ = ABuTLExecute.below_above_gen(*pts_dict[0.20])
below250, _ = ABuTLExecute.below_above_gen(*pts_dict[0.25])
below300, _ = ABuTLExecute.below_above_gen(*pts_dict[0.30])
# 0.70, 0.80, 0.90, 0.95只找最高的,即顶部只要最高的
_, above700 = ABuTLExecute.below_above_gen(*pts_dict[0.70])
_, above800 = ABuTLExecute.below_above_gen(*pts_dict[0.80])
_, above900 = ABuTLExecute.below_above_gen(*pts_dict[0.90])
_, above950 = ABuTLExecute.below_above_gen(*pts_dict[0.95])
if show:
with plt_show():
# 开始可视化黄金分割及比例分割结果
plt.axes([0.025, 0.025, 0.95, 0.95])
plt.plot(kl_close)
# 0.70, 0.80, 0.90, 0.95,lw线条粗度递减
plt.axhline(above950, lw=3.5, color='c')
plt.axhline(above900, lw=3.0, color='y')
plt.axhline(above800, lw=2.5, color='k')
plt.axhline(above700, lw=2.5, color='m')
# 中间层的618是带,有上下底
plt.axhline(above618, lw=2, color='r')
plt.axhline(below618, lw=1.5, color='r')
plt.fill_between(kl_pd.index, above618, below618,
alpha=0.1, color="r")
# 中间层的382是带,有上下底
plt.axhline(above382, lw=1.5, color='g')
plt.axhline(below382, lw=2, color='g')
plt.fill_between(kl_pd.index, above382, below382,
alpha=0.1, color="g")
# 0.20, 0.25, 0.30 lw线条粗度递曾
plt.axhline(below300, lw=2.5, color='k')
plt.axhline(below250, lw=3.0, color='y')
plt.axhline(below200, lw=3.5, color='c')
_ = plt.setp(plt.gca().get_xticklabels(), rotation=30)
plt.legend([kl_pd.name, 'above950', 'above900', 'above800', 'above700', 'above618', 'below618',
'above382', 'below382', 'below300', 'below250', 'below200'], bbox_to_anchor=(1.05, 1), loc=2,
borderaxespad=0.)
plt.title('between golden')
return namedtuple('golden', ['g382', 'gex382', 'g500', 'gex500', 'g618',
'gex618', 'above618', 'below618', 'above382', 'below382',
'above950', 'above900', 'above800', 'above700', 'below300', 'below250', 'below200'])(
gd_382, gex_382,
gd_500, gex_500, gd_618, gex_618, above618, below618, above382, below382,
above950, above900, above800, above700, below300, below250, below200)
| 2.34375 | 2 |
model_command.py | danielcorreaeng/jarvis | 0 | 12791253 | import time
import json
import sys,os
import subprocess
import argparse
import unittest
VALUES_INPUT = {}
VALUES_OUTPUT = {}
class TestCases(unittest.TestCase):
def test_case_000(self):
self.assertEqual('foo'.upper(), 'FOO')
def test_case_001(self):
self.assertEqual('foo'.upper(), 'FOO')
def Run(command, parameters=None):
if(parameters != None):
subprocess.Popen([command, parameters], shell=True)
else:
subprocess.Popen(command, shell=True)
def OpenFolder(path):
if sys.platform == 'win32':
Run('explorer.exe', path)
def Main():
'''No describe'''
global VALUES_INPUT
global VALUES_OUTPUT
VALUES_OUTPUT = VALUES_INPUT
#OpenFolder(r'C:\Windows')
#Run(r'Calc')
#Run(r'C:\Program Files\Google\Chrome\Application\chrome.exe','-incognito www.google.com.br')
#VALUES_OUTPUT['vartest'] = 'test'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=Main.__doc__)
parser.add_argument('-d','--description', help='Description of program', action='store_true')
parser.add_argument('-u','--tests', help='Execute tests', action='store_true')
parser.add_argument('-i','--file_input', help='data entry via file (path)')
parser.add_argument('-o','--file_output', help='output data via file (path)')
args, unknown = parser.parse_known_args()
args = vars(args)
if args['description'] == True:
print(Main.__doc__)
sys.exit()
if args['tests'] == True:
suite = unittest.TestSuite()
suite.addTest(TestCases("test_case_000"))
suite.addTest(TestCases("test_case_001"))
runner = unittest.TextTestRunner()
runner.run(suite)
sys.exit()
if args['file_input']:
with open(args['file_input']) as json_file:
VALUES_INPUT = json.load(json_file)
param = ' '.join(unknown)
Main()
if args['file_output']:
with open(args['file_output'], "w") as outfile:
json_string = json.dumps(VALUES_OUTPUT, default=lambda o: o.__dict__, sort_keys=True, indent=2)
outfile.write(json_string) | 2.5625 | 3 |
get_whitelist.py | alexliyu/fdslight | 0 | 12791254 | #!/usr/bin/env python3
"""从apnic获取中国IP范围"""
import urllib.request, os
URL = "http://ftp.apnic.net/apnic/stats/apnic/delegated-apnic-latest"
TMP_PATH = "./whitelist.tmp"
# 生成的最终白名单
RESULT_FILE_PATH = "./fdslight_etc/whitelist.txt"
def get_remote_file():
tmpfile = open(TMP_PATH, "wb")
response = urllib.request.urlopen(URL)
rdata = response.read()
tmpfile.write(rdata)
tmpfile.close()
def is_ipv4(line):
"""检查是否是IPv4"""
if line.find("ipv4") < 6: return False
return True
def is_cn_ipv4(line):
if line.find("CN") < 6: return False
return True
def get_subnet(line):
tmplist = line.split("|")
if len(tmplist) != 7: return None
if tmplist[6] != "allocated": return None
base_net = tmplist[3]
n = int(tmplist[4]) - 1
msize = 32 - len(bin(n)) + 2
return "%s/%s" % (base_net, msize,)
def main():
print("downloading...")
get_remote_file()
print("parsing...")
fdst = open(TMP_PATH, "r")
rfdst = open(RESULT_FILE_PATH, "w")
rfdst.write("# %s\n" % URL)
rfdst.write("# China IP address\n")
for line in fdst:
line = line.replace("\r", "")
line = line.replace("\n", "")
if line[0:6] != "apnic|": continue
if not is_ipv4(line): continue
if not is_cn_ipv4(line): continue
subnet = get_subnet(line)
if not subnet: continue
sts = "%s\n" % subnet
rfdst.write(sts)
print("parse ok")
rfdst.close()
fdst.close()
os.remove(TMP_PATH)
if __name__ == '__main__':
main()
| 2.8125 | 3 |
Data Structures/IMP-BST.py | itsrohanvj/Data-Structures-Algorithms-in-Python | 1 | 12791255 | # Returns true if a binary tree is a binary search tree
def IsBST3(root):
if root == None:
return 1
# false if the max of the left is > than root
if (root.getLeft() != None and FindMax(root.getLeft()) > root.get_data())
return 0
# false if the min of the right is <= than root
if (root.getRight() != None and FindMin(root.getRight()) < root.get_data())
return 0
# false if, recursively, the left or right is not a BST
if (not IsBST3(root.getLeft()) or not IsBST3(root.getRight()))
return 0
# passing all that, it's a BST
return 1
#METHOD 2
def isBST4(root, previousValue=[NEG_INFINITY]):
if root is None:
return 1
if not isBST4(root.getLeft(), previousValue):
return False
if root.get_data() < lastNode[0]:
return 0
previousValue = root.get_data()
return isBST4(root.getRight(), previousValue)
#-----------------------------------------------------------------------
def DLLtoBalancedBST(head):
if(not head or not head.next):
return head
temp = FindMiddleNode(head) # Refer Linked Lists chapter for this function.
p = head #We can use two-pointer logic to find the middle node
while(p.next != temp):
p = p.next
p.next = None
q = temp.next
temp.next = None
temp.prev = DLLtoBalancedBST(head)
temp.next = DLLtoBalancedBST(q)
return temp
#----------------------------------------------------------
def BuildBST(A, left, right) :
if(left > right):
return None
newNode = Node()
if(not newNode) :
print("Memory Error")
return
if(left == right):
newNode.data = A[left]
newNode.left = None
newNode.right = None
else :
mid = left + (right - left) / 2
newNode.data = A[mid]
newNode.left = BuildBST(A, left, mid - 1)
newNode.right = BuildBST(A, mid + 1, right)
return newNode
if __name__ == "__main__":
# create the sample BST
A = [2, 3, 4, 5, 6, 7]
root = BuildBST(A, 0, len(A) - 1)
print "\ncreating BST"
printBST(root)
#-------------------------------------------------------
count = 0
def kthSmallestInBST(root, k):
global count
if (not root):
return None
left = kthSmallestInBST(root.left, k)
if (left):
return left
count += 1
if (count == k):
return root
return kthSmallestInBST(root.right, k)
#-------------------------------------------------------
def SortedListToBST(ll, start, end):
if(start > end):
return None
# same as (start+end)/2, avoids overflow
mid = start + (end - start) // 2
left = SortedListToBST(ll, start, mid - 1)
root = BSTNode(ll.head.data)
ll.deleteBeg()
root.left = left
root.right = SortedListToBST(ll, mid + 1, end)
return root
def convertSortedListToBST(ll, n) :
return SortedListToBST(ll, 0, n - 1)
#------------------------------------------------------
#PROBLEM 96 : narasimha
class Answer:
def maxPathSum(self, root):
self.maxValue = float("-inf")
self.maxPathSumRec(root)
return self.maxValue
def maxPathSumRec(self, root):
if root == None:
return 0
leftSum = self.maxPathSumRec(root.left)
rightSum = self.maxPathSumRec(root.right)
if leftSum < 0 and rightSum < 0:
self.maxValue = max(self.maxValue, root.val)
return root.val
if leftSum > 0 and rightSum > 0:
self.maxValue = max(self.maxValue, root.val + leftSum + rightSum)
maxValueUp = max(leftSum, rightSum) + root.val
self.maxValue = max(self.maxValue, maxValueUp)
return maxValueUp
| 3.890625 | 4 |
Chapter 2/computational_graph.py | shantam21/Deep-Learning-with-TensorFlow-2-and-Keras | 267 | 12791256 | <reponame>shantam21/Deep-Learning-with-TensorFlow-2-and-Keras<gh_stars>100-1000
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
in_a = tf.placeholder(dtype=tf.float32, shape=(2))
def model(x):
with tf.variable_scope("matmul"):
W = tf.get_variable("W", initializer=tf.ones(shape=(2,2)))
b = tf.get_variable("b", initializer=tf.zeros(shape=(2)))
return x * W + b
out_a = model(in_a)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
outs = sess.run([out_a],
feed_dict={in_a: [1, 0]})
writer = tf.summary.FileWriter("./logs/example", sess.graph) | 3.046875 | 3 |
tests/data_interface/test_tess_transit_disposition_metadata_manager.py | golmschenk/ramjet | 3 | 12791257 | import pandas as pd
from unittest.mock import patch, Mock, PropertyMock
import ramjet.data_interface.tess_transit_metadata_manager as module
from ramjet.data_interface.tess_transit_metadata_manager import TessTransitMetadataManager, Disposition
from ramjet.data_interface.tess_toi_data_interface import ToiColumns
class TestTessTransitMetadata:
@patch.object(module, 'metadatabase')
@patch.object(module, 'TessTransitMetadata')
def test_table_building_creates_rows_based_on_toi_dispositions(self, mock_tess_target_transit_disposition,
mock_metadatabase):
tess_transit_disposition_metadata_manager = TessTransitMetadataManager()
toi_dispositions = pd.DataFrame({ToiColumns.tic_id.value: [1, 2, 3],
ToiColumns.disposition.value: ['KP', '', 'FP']})
ctoi_dispositions = pd.DataFrame({ToiColumns.tic_id.value: [],
ToiColumns.disposition.value: []})
with patch.object(module.TessToiDataInterface, 'toi_dispositions', new_callable=PropertyMock
) as mock_toi_dispositions:
with patch.object(module.TessToiDataInterface, 'ctoi_dispositions', new_callable=PropertyMock
) as mock_ctoi_dispositions:
mock_toi_dispositions.return_value = toi_dispositions
mock_ctoi_dispositions.return_value = ctoi_dispositions
tess_transit_disposition_metadata_manager.build_table()
call_args_list = mock_tess_target_transit_disposition.call_args_list
assert len(call_args_list) == 3
assert call_args_list[0][1] == {'tic_id': 1, 'disposition': Disposition.CONFIRMED.value}
assert call_args_list[1][1] == {'tic_id': 2, 'disposition': Disposition.CANDIDATE.value}
assert call_args_list[2][1] == {'tic_id': 3, 'disposition': Disposition.FALSE_POSITIVE.value}
| 2.109375 | 2 |
slurmJob.py | wckdouglas/hack_slurm | 0 | 12791258 | #!/usr/bin/env python
from __future__ import print_function
from builtins import str, bytes
import fileinput
import argparse
import os
import sys
import subprocess
python_path = subprocess.check_output(['which' ,'python']).decode('utf-8')
system_path = os.path.dirname(python_path)
def writeJob(commandlist, jobname, commandRank, numberOfJob, numberOfNode, allocation, queue, time, concurrent_job):
commandFiles = 'command_%i.bash' %commandRank
options = \
"#!/bin/bash \n" +\
"#SBATCH -J %s # Job name \n" %(jobname) +\
"#SBATCH -N %i # Total number of nodes \n" %(numberOfNode)+\
"#SBATCH -n 24 # Total number of tasks %i\n" %(numberOfJob)+\
"#SBATCH -p %s # Queue name \n" %(queue)+\
"#SBATCH -o %s.o%s # Name of stdout output file \n" %(jobname,'%j')+ \
"#SBATCH -t %s # Run time (hh:mm:ss) \n" %time +\
"#SBATCH -A %s \nmodule load gcc\nmodule load java\n" %(allocation) +\
'ulimit -c unlimited\n' +\
"export PATH=%s:$PATH" %system_path
with open('launcher_%i.slurm' %(commandRank), 'w') as slurmFile:
print(options, file = slurmFile)
if concurrent_job == 1:
print('bash %s' %(commandFiles), file = slurmFile)
else:
print('parallel -j%i :::: %s \n' %(concurrent_job,commandFiles), file = slurmFile)
with open(commandFiles,'w') as commandFile:
print('\n'.join(commandlist) + '\n', file = commandFile)
return 0
def main(args):
commandFile = args.cmdlst
jobname = args.jobname
numberOfJob = args.numberOfCmd
numberOfNode = args.numberOfNode
allocation = args.allocation
queue = args.queue
time = args.time
concurrent_job = args.processes
with open(commandFile,'r') as f:
commands = f.readlines()
commandlist = []
i = 0
commandRank = 0
for command in commands:
commandlist.append(str(command).strip())
i += 1
if i % numberOfJob == 0:
writeJob(commandlist, jobname, commandRank, numberOfJob, numberOfNode, allocation, queue, time, concurrent_job)
commandRank += 1
i = 0
commandlist=[]
if commandlist:
writeJob(commandlist, jobname, commandRank, i, numberOfNode, allocation, queue, time, concurrent_job)
commandRank += 1
print('Written %i scripts' %commandRank, file = sys.stdout)
return 0
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='A script to create slurm scripts from list of commands')
parser.add_argument('-c', '--cmdlst', help='A list of command, each line is a command', required=True)
parser.add_argument('-j', '--jobname', default='job',help='Jobname (default: job)')
parser.add_argument('-N', '--numberOfNode', default=1, type=int, help='Number of node for each job (default: 1)')
parser.add_argument('-n', '--numberOfCmd', default=1, type=int, help='Number of command per node (default: 1)')
parser.add_argument('-A', '--allocation', default = '2013lambowitz',
help= 'Account (default: 2013lambowitz)',
choices = {'tRNA-profiling-and-b', '2013lambowitz', 'Exosome-RNA-seq'})
parser.add_argument('-t', '--time', default='01:00:00', help='Run time (hh:mm:ss) default: 1:00:00')
parser.add_argument('-q','--queue', default='normal',help='Queue (default: normal)')
parser.add_argument('-p','--processes', default=24,help='How many process to run in the same time (default: 24)', type=int)
args = parser.parse_args()
main(args)
| 2.203125 | 2 |
koans/about_lists.py | uottawapython/UOPy-koans-day-1 | 0 | 12791259 | <gh_stars>0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Based on AboutArrays in the Ruby Koans
#
from runner.koan import *
class AboutLists(Koan):
def test_accessing_list_elements(self):
noms = ['peanut', 'butter', 'and', 'jelly']
self.assertEqual(__, noms[0])
self.assertEqual(__, noms[3])
self.assertEqual(__, noms[-1])
self.assertEqual(__, noms[-3])
def test_slicing_lists(self):
"""
Use a colon to slice a list
# list = [start:<end:step]
"""
noms = ['peanut', 'butter', 'and', 'jelly']
self.assertEqual(__, noms[0:1])
self.assertEqual(__, noms[0:2])
self.assertEqual(__, noms[2:2])
self.assertEqual(__, noms[2:20])
self.assertEqual(__, noms[4:0])
self.assertEqual(__, noms[4:100])
self.assertEqual(__, noms[5:0])
def test_slicing_to_the_edge(self):
"""
# list = [start:<end:step]
"""
noms = ['peanut', 'butter', 'and', 'jelly']
self.assertEqual(__, noms[2:])
self.assertEqual(__, noms[:2])
| 3.703125 | 4 |
user/models.py | Hy-Oy/Swipter | 0 | 12791260 | import datetime
from django.db import models
from libs.orm import ModelToDicMiXin
SEXS = (
(0, '未知'),
(1, '男'),
(2, '女'),
)
LOCATIONS = (
('bj', '北京'),
('sh', '上海'),
('hz', '杭州'),
('sz', '深圳'),
('cd', '成都'),
('gz', '广州'),
)
class User(models.Model):
"""
phonenum 手机号
nickname 昵称
sex 性别
birth_year 出生年
birth_month 出生月
birth_day 出生日
avatar 个人形象
location 常居地
"""
phonenum = models.CharField(max_length=11, unique=True)
nickname = models.CharField(max_length=16)
sex = models.IntegerField(choices=SEXS, default=0)
birth_year = models.IntegerField(default=2000)
birth_month = models.IntegerField(default=1)
birth_day = models.IntegerField(default=1)
avater = models.CharField(max_length=256)
location = models.CharField(choices=LOCATIONS,max_length=32,default='gz')
@property
def age(self):
date = datetime.date.today()
age = date.year - self.birth_year
age = age if date.month > self.birth_month and date.day > self.birth_day else age-1
return age
@property
def profile(self):
if not hasattr(self, '_profile'):
self._profile, _ = Profile.objects.get_or_create(pk=self.id)
return self._profile
@property
def to_dic(self):
return {
'uid': self.id,
'phonenum': self.phonenum,
'nickname': self.nickname,
'sex': self.sex,
'avater': self.avater,
'location': self.location,
'age': self.age,
}
class Meta:
db_table = 'users'
# def get_or_create_token(self):
# """
# 为用户生成唯一的 token
# :return:
# """
# key = 'token:{}'.format(self.id)
#
# token = cache.get(key)
#
# if not token:
# token = 'token........<PASSWORD>'
# cache.set(key, token, 24 * 60 * 60)
#
# return token
class Profile(models.Model, ModelToDicMiXin):
"""
location 目标城市
min_distance 最小查找范围
max_distance 最大查找范围
min_dating_age 最小交友年龄
max_dating_age 最大交友年龄
dating_sex 匹配的性别
auto_play 视频自动播放
user.profile.location
"""
location = models.CharField(max_length=32, choices=LOCATIONS, default='gz')
min_distance = models.IntegerField(default=0)
max_distance = models.IntegerField(default=10)
min_dating_age = models.IntegerField(default=18)
max_dating_age = models.IntegerField(default=81)
dating_sex = models.IntegerField(choices=SEXS, default=0)
auto_play = models.BooleanField(default=True)
class Meta:
db_table = 'profiles'
| 2.390625 | 2 |
ion-channel-models/mcmc.py | sanmitraghosh/fickleheart-method-tutorials | 0 | 12791261 | #!/usr/bin/env python3
from __future__ import print_function
import sys
sys.path.append('./method')
import os
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import pints
import pints.io
import pints.plot
import model as m
import parametertransform
import priors
"""
Run fit.
"""
model_list = ['A', 'B', 'C']
try:
which_model = sys.argv[1]
except:
print('Usage: python %s [str:which_model]' % os.path.basename(__file__))
sys.exit()
if which_model not in model_list:
raise ValueError('Input model %s is not available in the model list' \
% which_model)
# Get all input variables
import importlib
sys.path.append('./mmt-model-files')
info_id = 'model_%s' % which_model
info = importlib.import_module(info_id)
data_dir = './data'
savedir = './out/mcmc-' + info_id
if not os.path.isdir(savedir):
os.makedirs(savedir)
data_file_name = 'data-sinewave.csv'
print('Fitting to ', data_file_name)
print('Temperature: ', info.temperature)
saveas = info_id + '-' + data_file_name[5:][:-4]
# Protocol
protocol = np.loadtxt('./protocol-time-series/sinewave.csv', skiprows=1,
delimiter=',')
protocol_times = protocol[:, 0]
protocol = protocol[:, 1]
# Control fitting seed
# fit_seed = np.random.randint(0, 2**30)
fit_seed = 542811797
print('Fit seed: ', fit_seed)
np.random.seed(fit_seed)
# Set parameter transformation
transform_to_model_param = parametertransform.log_transform_to_model_param
transform_from_model_param = parametertransform.log_transform_from_model_param
# Load data
data = np.loadtxt(data_dir + '/' + data_file_name,
delimiter=',', skiprows=1) # headers
times = data[:, 0]
data = data[:, 1]
noise_sigma = np.std(data[:500])
print('Estimated noise level: ', noise_sigma)
# Model
model = m.Model(info.model_file,
variables=info.parameters,
current_readout=info.current_list,
set_ion=info.ions_conc,
transform=transform_to_model_param,
temperature=273.15 + info.temperature, # K
)
LogPrior = {
'model_A': priors.ModelALogPrior,
'model_B': priors.ModelBLogPrior,
}
# Update protocol
model.set_fixed_form_voltage_protocol(protocol, protocol_times)
# Create Pints stuffs
problem = pints.SingleOutputProblem(model, times, data)
loglikelihood = pints.GaussianLogLikelihood(problem)
logmodelprior = LogPrior[info_id](transform_to_model_param,
transform_from_model_param)
lognoiseprior = pints.UniformLogPrior([0.1 * noise_sigma], [10. * noise_sigma])
logprior = pints.ComposedLogPrior(logmodelprior, lognoiseprior)
logposterior = pints.LogPosterior(loglikelihood, logprior)
# Check logposterior is working fine
priorparams = np.copy(info.base_param)
transform_priorparams = transform_from_model_param(priorparams)
priorparams = np.append(priorparams, noise_sigma)
transform_priorparams = np.append(transform_priorparams, noise_sigma)
print('Posterior at prior parameters: ',
logposterior(transform_priorparams))
for _ in range(10):
assert(logposterior(transform_priorparams) ==\
logposterior(transform_priorparams))
# Load fitting results
calloaddir = './out/' + info_id
load_seed = 542811797
fit_idx = [1, 2, 3]
transform_x0_list = []
print('MCMC starting point: ')
for i in fit_idx:
f = '%s/%s-solution-%s-%s.txt' % (calloaddir, 'sinewave', load_seed, i)
p = np.loadtxt(f)
transform_x0_list.append(np.append(transform_from_model_param(p),
noise_sigma))
print(transform_x0_list[-1])
print('Posterior: ', logposterior(transform_x0_list[-1]))
# Run
mcmc = pints.MCMCController(logposterior, len(transform_x0_list),
transform_x0_list, method=pints.PopulationMCMC)
n_iter = 100000
mcmc.set_max_iterations(n_iter)
mcmc.set_initial_phase_iterations(int(0.05 * n_iter))
mcmc.set_parallel(False)
mcmc.set_chain_filename('%s/%s-chain.csv' % (savedir, saveas))
mcmc.set_log_pdf_filename('%s/%s-pdf.csv' % (savedir, saveas))
chains = mcmc.run()
# De-transform parameters
chains_param = np.zeros(chains.shape)
for i, c in enumerate(chains):
c_tmp = np.copy(c)
chains_param[i, :, :-1] = transform_to_model_param(c_tmp[:, :-1])
chains_param[i, :, -1] = c_tmp[:, -1]
del(c_tmp)
# Save (de-transformed version)
pints.io.save_samples('%s/%s-chain.csv' % (savedir, saveas), *chains_param)
# Plot
# burn in and thinning
chains_final = chains[:, int(0.5 * n_iter)::5, :]
chains_param = chains_param[:, int(0.5 * n_iter)::5, :]
transform_x0 = transform_x0_list[0]
x0 = np.append(transform_to_model_param(transform_x0[:-1]), transform_x0[-1])
pints.plot.pairwise(chains_param[0], kde=False, ref_parameters=x0)
plt.savefig('%s/%s-fig1.png' % (savedir, saveas))
plt.close('all')
pints.plot.trace(chains_param, ref_parameters=x0)
plt.savefig('%s/%s-fig2.png' % (savedir, saveas))
plt.close('all') | 2.296875 | 2 |
myriad/game/shell/grammar.py | oubiwann/myriad-worlds | 3 | 12791262 | <filename>myriad/game/shell/grammar.py
import random
from pyparsing import alphas, empty, oneOf, replaceWith
from pyparsing import CaselessLiteral, OneOrMore, Optional, ParseException
from pyparsing import CaselessKeyword, LineEnd, MatchFirst, Word
from myriad.game.shell import command
from myriad.item import Item
class ShellParseException(ParseException):
pass
class ShellParser(object):
def __init__(self, session=None):
self.session = session
self.bnf = self.makeBNF()
def makeCommandParseAction(self, cls):
def cmdParseAction(s, l, tokens):
return cls(tokens)
return cmdParseAction
def makeBNF(self):
makeCmd = lambda s: MatchFirst(map(CaselessKeyword, s.split()))
invVerb = makeCmd("INV INVENTORY I")
mapVerb = makeCmd("MAP M")
dropVerb = makeCmd("DROP LEAVE")
takeVerb = makeCmd("TAKE PICKUP") | \
(CaselessLiteral("PICK") + CaselessLiteral("UP"))
moveVerb = makeCmd("MOVE GO") | empty
useVerb = makeCmd("USE U")
openVerb = makeCmd("OPEN O")
quitVerb = makeCmd("QUIT Q")
lookVerb = makeCmd("LOOK L")
doorsVerb = CaselessKeyword("DOORS")
helpVerb = makeCmd("H HELP ?")
readVerb = CaselessKeyword("READ")
itemRef = OneOrMore(Word(alphas)).setParseAction(self.validateItemName)
makeDir = lambda s : makeCmd(s).setParseAction(
replaceWith(s.split()[0]))
nDir = makeDir("N NORTH")
sDir = makeDir("S SOUTH")
eDir = makeDir("E EAST")
wDir = makeDir("W WEST")
neDir = makeDir("NE NORTHEAST")
seDir = makeDir("SE SOUTHEAST")
swDir = makeDir("SW SOUTHWEST")
nwDir = makeDir("NW NORTHWEST")
moveDirection = nDir | sDir | eDir | wDir | neDir | seDir | swDir \
| nwDir
invCommand = invVerb
mapCommand = mapVerb
dropCommand = dropVerb + itemRef("item")
takeCommand = takeVerb + itemRef("item")
useCommand = useVerb + itemRef("usedObj") + \
Optional(oneOf("IN ON", caseless=True)) + \
Optional(itemRef, default=None)("targetObj")
openCommand = openVerb + itemRef("item")
moveCommand = moveVerb + moveDirection("direction")
quitCommand = quitVerb
lookCommand = lookVerb
doorsCommand = doorsVerb
helpCommand = helpVerb
readCommand = readVerb + itemRef("subjectObj")
invCommand.setParseAction(command.InventoryCommand)
mapCommand.setParseAction(command.MapCommand)
dropCommand.setParseAction(command.DropCommand)
takeCommand.setParseAction(command.TakeCommand)
useCommand.setParseAction(command.UseCommand)
openCommand.setParseAction(command.OpenCommand)
moveCommand.setParseAction(command.MoveCommand)
quitCommand.setParseAction(command.QuitCommand)
lookCommand.setParseAction(command.LookCommand)
doorsCommand.setParseAction(command.DoorsCommand)
helpCommand.setParseAction(command.HelpCommand)
readCommand.setParseAction(command.ReadCommand)
return (invCommand |
mapCommand |
useCommand |
openCommand |
dropCommand |
takeCommand |
moveCommand |
lookCommand |
doorsCommand |
helpCommand |
quitCommand |
readCommand).setResultsName("command") + LineEnd()
def validateItemName(self, s, l, t):
iname = " ".join(t)
if iname not in Item.items:
raise ShellParseException(s, l, "No such item '%s'." % iname)
return iname
def parseCmd(self, cmdstr):
try:
return self.bnf.parseString(cmdstr)
except ShellParseException, parseError:
print "ShellParseException: %s" % parseError.msg
except ParseException, parseError:
return random.choice(["Sorry, I don't understand that.",
"Say what?",
"Whatchyoo talkin' 'bout, Willis?",
"Huh?",
"Garbage in, garbage out. Try again.",
"What was the middle part again?",
"Excuse me?",
"Wtf?",
"Uh... what?"])
| 2.5625 | 3 |
src/bets/model/stats/ratio_stats.py | nachereshata/bets-cli | 0 | 12791263 | from bets.model.stats.constants import RANKS, OUTCOMES
from bets.model.stats.abstract_stats import AbstractStats
class RatioStats(AbstractStats):
KEYS = ["date", "country", "tournament", "host_team", "guest_team",
"ratio_1", "ratio_X", "ratio_2",
"rank_1", "rank_X", "rank_2",
"ratio_min", "ratio_med", "ratio_max",
"outcome_min", "outcome_med", "outcome_max",
"ratio_perc_1_X", "ratio_perc_X_2", "ratio_perc_1_2",
"ratio_perc_min_med", "ratio_perc_med_max", "ratio_perc_min_max",
"ratio_mean", "ratio_geometric_mean", "ratio_perc_mean_geometric_mean"]
def __init__(self, ratio_1, ratio_X, ratio_2, host_team="", guest_team="", date="", country="", tournament=""):
self.host_team = host_team
self.guest_team = guest_team
self.date = date
self.country = country
self.tournament = tournament
self.ratio_1 = round(float(ratio_1), 2)
self.ratio_X = round(float(ratio_X), 2)
self.ratio_2 = round(float(ratio_2), 2)
self.ratios = (self.ratio_1, self.ratio_X, self.ratio_2)
self.ratios_sorted = tuple(sorted(self.ratios))
self.ratio_min = self.ratios_sorted[0]
self.ratio_med = self.ratios_sorted[1]
self.ratio_max = self.ratios_sorted[2]
outcomes_by_rank = {rank: [] for rank in RANKS}
ranks_by_outcome = {outcome: [] for outcome in OUTCOMES}
for outcome in OUTCOMES:
for rank in RANKS:
if self[f"ratio_{outcome}"] == self[f"ratio_{rank}"]:
outcomes_by_rank[rank].append(outcome)
ranks_by_outcome[outcome].append(rank)
self.rank_1 = "/".join(ranks_by_outcome["1"])
self.rank_X = "/".join(ranks_by_outcome["X"])
self.rank_2 = "/".join(ranks_by_outcome["2"])
self.outcome_min = "/".join(outcomes_by_rank["min"])
self.outcome_med = "/".join(outcomes_by_rank["med"])
self.outcome_max = "/".join(outcomes_by_rank["max"])
self.ratio_perc_1_X = round(((self.ratio_1 / self.ratio_X) * 100), 2)
self.ratio_perc_X_2 = round(((self.ratio_X / self.ratio_2) * 100), 2)
self.ratio_perc_1_2 = round(((self.ratio_1 / self.ratio_2) * 100), 2)
self.ratio_perc_min_med = round(((self.ratio_min / self.ratio_med) * 100), 2)
self.ratio_perc_med_max = round(((self.ratio_med / self.ratio_max) * 100), 2)
self.ratio_perc_min_max = round(((self.ratio_min / self.ratio_max) * 100), 2)
self.ratio_mean = round(((self.ratio_1 + self.ratio_X + self.ratio_2) / 3), 2)
self.ratio_geometric_mean = round(((self.ratio_1 * self.ratio_X * self.ratio_2) ** (1 / 3)), 2)
self.ratio_perc_mean_geometric_mean = round(((self.ratio_mean / self.ratio_geometric_mean) * 100), 2)
def is_having_similar_ratios_to(self, other: "RatioStats", delta=0.05) -> bool:
if isinstance(other, RatioStats):
if abs(self.ratio_1 - other.ratio_1) <= delta:
if abs(self.ratio_X - other.ratio_X) <= delta:
if abs(self.ratio_2 - other.ratio_2) <= delta:
return True
return False
def is_having_similar_outcome_ratio_percentages_to(self, other: "RatioStats", delta=0.05) -> bool:
if isinstance(other, RatioStats):
if abs(self.ratio_perc_1_X - other.ratio_perc_1_X):
if abs(self.ratio_perc_X_2 - other.ratio_perc_X_2) <= delta:
if abs(self.ratio_perc_1_2 - other.ratio_perc_1_2) <= delta:
return True
return False
def is_having_similar_rank_ratio_percentages_to(self, other: "RatioStats", delta=0.05) -> bool:
if isinstance(other, RatioStats):
if abs(self.ratio_perc_min_med - other.ratio_perc_min_med) <= delta:
if abs(self.ratio_perc_med_max - other.ratio_perc_med_max) <= delta:
if abs(self.ratio_perc_min_max - other.ratio_perc_min_max) <= delta:
return True
return False
| 2.53125 | 3 |
run.py | zding5/Microblog-Flask | 0 | 12791264 | #!flask/bin/python
# This file is for starting up the server!
from app import myapp
myapp.run(debug=True) | 1.75 | 2 |
nlpsc/test/test_vboard.py | BSlience/nlpsc | 4 | 12791265 | # encoding:utf-8
from nlpsc.dataset import Dataset
from nlpsc.vboard.dataset import DatasetVBoard
class TestVBoard(object):
def test_dataset_vboard(self):
# from nlpsc.vboard.dataset import index
from ..vboard import bottle
bottle.TEMPLATE_PATH.append('../vboard/views/')
dataset = Dataset(name='测试数据集')
dataset.add_header('F-no(int) F-text_a F-text_b L-label1(list) L-label2')
DatasetVBoard(dataset).serve()
| 2.09375 | 2 |
Week of Code 35/Triple Recursion-Week Of Code-35 2.py | anirudhkannanvp/HACKERRANK | 3 | 12791266 | <filename>Week of Code 35/Triple Recursion-Week Of Code-35 2.py
n,m,k=map(int,input().split())
a=[[0]*n for i in range(n)]
for i in range(n):
for j in range(n):
if(i==0 and j==0):
a[i][j]=m
elif(i==j):
a[i][j]=a[i-1][j-1]+k
elif(i>j):
a[i][j]=a[i-1][j]-1
else:
a[i][j]=a[i][j-1]-1
for i in range(n):
for j in range(n):
print(a[i][j],sep=" ",end=" ")
print() | 3.140625 | 3 |
markwiki/authn/__init__.py | cabalamat/markwiki | 1 | 12791267 | <filename>markwiki/authn/__init__.py
# Copyright (c) 2016, <NAME>
'''A package to support MarkWiki authentication'''
| 1.117188 | 1 |
kitti_example.py | JarnoRalli/python_camera_library | 1 | 12791268 | <gh_stars>1-10
__author__ = "<NAME>"
__copyright__ = "Copyright, 2021, <NAME>"
__license__ = "3-Clause BSD License"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
#INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
#IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
#OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
#OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cv2
import cameralib
import numpy as np
import open3d as o3d
# Source: https://github.com/utiasSTARS/pykitti/blob/master/pykitti/utils.py
def load_velo_scan(file):
"""Load and parse a velodyne binary file."""
scan = np.fromfile(file, dtype=np.float32)
return scan.reshape((-1, 4))
def readConfFile(fileName):
"""Reads a Kitti camera/velodyne configuration file.
Name of the parameter and the data is separated by ':', i.e 'T: 0.0 0.0 0.0'.
Parameters
----------
fileName : str
Name of the file to be read.
Returns
-------
dictionary
a dictionary containing the configuration data.
"""
conf_dict = dict()
try:
with open(fileName) as f:
for line in f:
data = line.split(":")
conf_dict[data[0]] = data[1]
return conf_dict
except Exception as e:
raise
def extractMatrix(input_str, matrix_shape=None):
"""Convert a str into a matrix/vector.
Parameters
----------
input_str : str
String to be converted into numpy matrix.
matrix_shape : tuple
Tuple defining the output shape.
Returns
-------
numpy.array
Numpy array that has the shape matrix_shape
"""
try:
if matrix_shape is None:
output = np.fromstring(input_str, dtype=float, sep=' ').tolist()
else:
output = np.fromstring(input_str, dtype=float, sep=' ').reshape(matrix_shape)
return output
except Exception as e:
raise
#--------------
# Test program
#--------------
# Read configuration files
cam_conf = readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_cam_to_cam.txt')
lidar_conf = readConfFile('./test_data/kitti/2011_09_26_calib/2011_09_26/calib_velo_to_cam.txt')
lidar_data = np.transpose(load_velo_scan(
'./test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/velodyne_points/data/0000000000.bin')[
:, :3])
image_data = np.array(cv2.imread(
'./test_data/kitti/2011_09_26_drive_0001_sync/2011_09_26/2011_09_26_drive_0001_sync/image_02/data/0000000000.png'))
# Rotation and traslation from velodyne to camera 0
RvelTocam0 = extractMatrix(lidar_conf['R'], (3, 3))
TvelTocam0 = extractMatrix(lidar_conf['T'], (3, 1))
# Trans_velTocam0 = transform_from_rot_trans(RvelTocam0, TvelTocam0)
Trans_velTocam0 = cameralib.concatenateRt(RvelTocam0, TvelTocam0)
# Rotation and traslation from camera 0 to camera 2
Rcam0Tocam2 = extractMatrix(cam_conf['R_02'], (3, 3))
Tcam0Tocam2 = extractMatrix(cam_conf['T_02'], (3, 1))
Trans_cam0Tocam2 = cameralib.concatenateRt(Rcam0Tocam2, Tcam0Tocam2)
# Projection matrix from camera 2 to rectified camera 2
Pcam2 = extractMatrix(cam_conf['P_rect_02'], (3, 4))
Kcam2 = extractMatrix(cam_conf['K_02'], (3, 3))
Rcam2rect = extractMatrix(cam_conf['R_rect_02'], (3, 3))
im_size_rcam2 = extractMatrix(cam_conf['S_rect_02'])
im_size_rcam2.reverse()
# Extract K-matrix from the projection matrix P = K[R | t]
Kcam2rect = np.matmul(Pcam2[:3, :3], Rcam2rect.transpose())
#print("Kcam 2: " + str(Kcam2))
#print("Kcam rectified 2: " + str(Kcam2rect))
# Transform lidar points to camera 0 coordinate frame
lidar_data_cam0 = cameralib.transform(Trans_velTocam0, lidar_data)
# Transform lidar points from camera0 to camera 2 coordinate frame
lidar_data_cam2 = cameralib.transform(Trans_cam0Tocam2, lidar_data_cam0)
# Project lidar points into rectified camera 2
cam2_lidar, uv, RGB_lidar, depth_map = cameralib.forwardprojectP(lidar_data_cam2, Pcam2, im_size_rcam2, image_data)
# Write original lidar points into ply-file
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(lidar_data.transpose())
o3d.io.write_point_cloud("3d_lidar.ply", pcd)
# Write lidar points in cam0 coordinate frame points into ply-file
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(lidar_data_cam0.transpose())
o3d.io.write_point_cloud("3d_cam0.ply", pcd)
# Write lidar points in cam2 coordinate frame points into ply-file
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(lidar_data_cam2.transpose())
o3d.io.write_point_cloud("3d_cam2.ply", pcd)
# Write "filtered" points into ply-file
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(cam2_lidar.transpose())
pcd.colors = o3d.utility.Vector3dVector(RGB_lidar / 255)
o3d.io.write_point_cloud("3d_proj_cam2.ply", pcd)
| 2.03125 | 2 |
docqa/triviaqa/build_complete_vocab.py | Willyoung2017/doc-qa | 422 | 12791269 | import argparse
from os.path import exists
from docqa.triviaqa.build_span_corpus import TriviaQaOpenDataset
from docqa.triviaqa.evidence_corpus import get_evidence_voc
"""
Build vocab of all words in the triviaqa dataset, including
all documents and all train questions.
"""
def main():
parser = argparse.ArgumentParser()
parser.add_argument("output")
parser.add_argument("-m", "--min_count", type=int, default=1)
parser.add_argument("-n", "--n_processes", type=int, default=1)
args = parser.parse_args()
if exists(args.output):
raise ValueError()
data = TriviaQaOpenDataset()
corpus_voc = get_evidence_voc(data.evidence, args.n_processes)
print("Adding question voc...")
train = data.get_train()
for q in train:
corpus_voc.update(q.question)
print("Saving...")
with open(args.output, "w") as f:
for word, c in corpus_voc.items():
if c >= args.min_count:
f.write(word)
f.write("\n")
if __name__ == "__main__":
main() | 3.125 | 3 |
launchkey/entities/validation.py | bgroveben/launchkey-python | 1 | 12791270 | from formencode import Schema, validators, FancyValidator, Invalid, ForEach
from dateutil.parser import parse
class ValidateISODate(FancyValidator):
@staticmethod
def _to_python(value, state):
try:
val = parse(value)
except ValueError:
raise Invalid("Date/time format is invalid, it must be ISO 8601 formatted "
"for UTZ with no offset (i.e. 2010-01-01T01:01:01Z)", value, state)
return val
class PublicKeyValidator(Schema):
id = validators.String()
active = validators.Bool()
date_created = ValidateISODate()
date_expires = ValidateISODate()
public_key = validators.String()
allow_extra_fields = True
class DirectoryUserDeviceLinkResponseValidator(Schema):
qrcode = validators.String() # URL
code = validators.String(min=7)
allow_extra_fields = True
class DirectoryGetDeviceResponseValidator(Schema):
id = validators.String()
name = validators.String()
status = validators.Int()
type = validators.String()
allow_extra_fields = True
class DirectoryGetSessionsValidator(Schema):
auth_request = validators.String()
date_created = ValidateISODate()
service_icon = validators.String()
service_id = validators.String()
service_name = validators.String()
allow_extra_fields = True
class DirectoryValidator(Schema):
id = validators.String()
service_ids = ForEach(validators.String())
sdk_keys = ForEach(validators.String())
premium = validators.Bool()
name = validators.String()
android_key = validators.String()
ios_certificate_fingerprint = validators.String()
active = validators.Bool()
allow_extra_fields = True
class AuthorizationResponseValidator(Schema):
auth = validators.String()
service_user_hash = validators.String()
org_user_hash = validators.String()
user_push_id = validators.String()
public_key_id = validators.String()
allow_extra_fields = True
class AuthorizationResponsePackageValidator(Schema):
service_pins = ForEach()
auth_request = validators.String() # UUID
response = validators.Bool()
device_id = validators.String()
allow_extra_fields = True
class AuthorizeValidator(Schema):
auth_request = validators.String(not_empty=True)
push_package = validators.String(if_missing=None, not_empty=True)
allow_extra_fields = True
class AuthorizeSSEValidator(Schema):
service_user_hash = validators.String()
api_time = validators.String()
allow_extra_fields = True
class ServiceValidator(Schema):
id = validators.String()
icon = validators.String()
name = validators.String()
description = validators.String()
active = validators.Bool()
callback_url = validators.String()
allow_extra_fields = True
class ServiceSecurityPolicyValidator(Schema):
allow_extra_fields = True
| 2.421875 | 2 |
www.py | KirtoXX/segment | 5 | 12791271 | import tensorflow as tf
import numpy as np
input = tf.placeholder(dtype=tf.float32,shape=[5,5,3])
filter = tf.constant(value=1, shape=[3,3,3,5], dtype=tf.float32)
conv0 = tf.nn.atrous_conv2d(input,filters=filter,rate=2,padding='VALID')
with tf.Session() as sess:
img = np.array([3,5,5,3])
out = sess.run(conv0,feed_dict={input:img})
print(out.shape)
| 2.984375 | 3 |
snake_main.py | jprevc/SnakeGame | 0 | 12791272 | import sys
import pygame
import random
from snake_utility import Snake, Cherry, SnakeGameStatusFlags
import json
def set_new_cherry_pos(snake_lst):
"""
Sets new cherry position.
:param snake_lst: List, containing all snake instances present in the game. This is needed
to check that cherry will not be placed onto a snake.
:type snake_lst: list of Snake
"""
new_cherry_pos = random.randrange(0, width, Snake.block_size), random.randrange(0, height, Snake.block_size)
# check if new cherry position is within any of the snakes and set new one
for snk in snake_lst:
while new_cherry_pos in snk.block_pos_lst:
new_cherry_pos = random.randrange(0, width, Snake.block_size), random.randrange(0, height, Snake.block_size)
return new_cherry_pos
def init_game(config_data):
"""
Initializes the game with configuration, defined in config_data.
:param config_data: Dictionary, which contains configuration for the game, such as
game window dimensions, number of snakes, keyboard keys, etc.
:type config_data: dict
:return: Lists of initialized snakes and cherries.
:rtype: tuple of list
"""
# colors for snakes
snake_colors = [(0, 255, 0), # player 1 is green
(0, 0, 255), # player 2 is blue
(255, 255, 50), # player 3 is yellow
(205, 0, 205)] # player 4 is purple
# create snake instances
init_snake_lst = []
for i in range(config_data["num_snakes"]):
keys = config_data["keys"][i]
snake = Snake(start_pos=config_data["start_pos"][i],
move_keys={'up': pygame.__getattribute__(keys[0]),
'right': pygame.__getattribute__(keys[1]),
'down': pygame.__getattribute__(keys[2]),
'left': pygame.__getattribute__(keys[3])},
color=snake_colors[i],
block_size=config_data["block_size"],
num_of_start_blocks=config_data["initial_snake_length"])
init_snake_lst.append(snake)
# create cherry instances
init_cherry_lst = []
for i in range(config_data["num_cherries"]):
cherry = Cherry(block_size)
cherry.set_new_random_position(init_snake_lst, config_data["main_window_size"])
init_cherry_lst.append(cherry)
return init_snake_lst, init_cherry_lst
def redraw_screen(snake_lst, cherry_lst, block_size):
"""
Redraws screen with updated snake and cherry positions.
:param snake_lst: List of all snakes in the game.
:type snake_lst: list of Snake
:param cherry_lst: List of all cherries in the game.
:type cherry_lst: list of Cherry
:param block_size: Size of one block of snake or cherry in pixels.
:type block_size: int
"""
# clear screen
screen.fill(BLACK)
# draw snakes
for snake in snake_lst:
for block_pos in snake.block_pos_lst:
pygame.draw.rect(screen,
snake.color,
(block_pos[0], block_pos[1], block_size, block_size))
# draw cherries
for cherry in cherry_lst:
pygame.draw.rect(screen,
(255, 0, 0),
(cherry.position[0], cherry.position[1], block_size, block_size))
# update display
pygame.display.update()
def main_loop(snake_list, cherry_list):
"""
Main loop of the game. This function returns only if snake collision occured.
"""
while True:
# capture events
for event in pygame.event.get():
if event.type == pygame.QUIT:
# happens when user tries to close window
sys.exit() # exit from game
elif event.type == pygame.KEYDOWN:
# happens on key pressed
# check which snake's key was pressed and add it to key stack
for snake in snake_list:
if event.key in [val for _, val in snake.move_keys.items()]:
snake.key_stack.append(event.key)
elif event.type == pygame.USEREVENT: # happens on each timer tick
for snake in snake_list:
snake.get_dir_from_keystack()
snake.set_new_state(size, snake_list)
# check if there is collision
if snake.collision:
return SnakeGameStatusFlags.COLLISION_OCCURENCE
# check if any of the cherries was eaten by the current snake
for cherry in cherry_list:
if snake.block_pos_lst[0] == cherry.position:
# append new block to snake that ate the cherry
snake.block_pos_lst.append(snake.block_pos_lst[-1])
# set new random position for the eaten cherry
cherry.set_new_random_position(snake_lst, size)
# redraw screen with updated snake and cherry positions
redraw_screen(snake_list, cherry_list, block_size)
if __name__ == '__main__':
pygame.init()
# load configuration data
with open('config.json', 'r') as config_file:
configuration_data = json.load(config_file)
size = width, height = configuration_data["main_window_size"]
BLACK = 0, 0, 0
refresh_rate = configuration_data["refresh_rate"]
start_pos = configuration_data["start_pos"]
block_size = configuration_data["block_size"]
# set display
screen = pygame.display.set_mode(size)
# set timer
pygame.time.set_timer(pygame.USEREVENT, refresh_rate)
timer = pygame.time.get_ticks()
while True:
# initialize new game
snake_lst, cherry_pos = init_game(configuration_data)
# main loop will exit only if collision occurs
main_loop(snake_lst, cherry_pos)
| 3.578125 | 4 |
replace_text/__init__.py | jakeogh/replace-text | 1 | 12791273 | #from .replace_text import replace_text
from .replace_text import append_unique_bytes_to_file
from .replace_text import remove_comments_from_bytes
from .replace_text import replace_text_in_file
| 1.3125 | 1 |
src/calculations_plot_obs.py | danOSU/emulator-validation | 4 | 12791274 | #!/usr/bin/env python3
import numpy as np
import matplotlib
#matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys, os, glob
import re
# Output data format
from configurations import *
design_pt_to_plot=2
#################################################################################
#### Try to figure out semi-automatically what observables to group together ####
#################################################################################
# This is the input:
# Specifies how observables are grouped according to these regular expression
# Also specify if they should be plotted on a linear or a log scale
regex_obs_to_group_list=[
(r'$\pi$/K/p dN/dy',"dN_dy_(pion|kaon|proton)",'log'),
(r'$\pi$/K/p $\langle p_T \rangle$',"mean_pT_(pion|kaon|proton)",'linear'),
(r'$\Lambda/\Omega/\Xi$ dN/dy',"dN_dy_(Lambda|Omega|Xi)",'log'),
(r'$v_n\{2\}$',"v[2-5+]2",'linear'),
(r'$dN_{ch}/d\eta$',"dNch_deta",'log'),
(r'$dE_T/d\eta$',"dET_deta",'log'),
(r'$\langle p_T \rangle$ fluct',"pT_fluct",'linear'),
]
# This parts figures out how to group observables based on the regular expressions
obs_to_group={}
# Loop over observables to see which ones to group
for system in system_strs:
obs_to_group[system]={}
for obs_name in obs_cent_list[system]:
found_match=False
for regex_id, (regex_label, regex_obs_to_group, plot_scale) in enumerate(regex_obs_to_group_list):
r = re.compile(regex_obs_to_group)
match=r.match(obs_name)
# No match means nothing to group
if (match is not None):
if (found_match):
print("Non-exclusive grouping. Can't work...")
exit(1)
else:
found_match=True
obs_to_group[system][obs_name]=(regex_id, regex_label, plot_scale)
if (not found_match):
obs_to_group[system][obs_name]=None
# Parse the previous list to make something useful out of it
final_obs_grouping = {}
#
for system in system_strs:
final_obs_grouping[system]={}
for n, (key, value) in enumerate(obs_to_group[system].items()):
if (value is None):
newvalue=(n,key)
else:
newvalue=value
final_obs_grouping[system].setdefault(newvalue, []).append(key)
##############
#### Plot ####
##############
def plot(calcs):
for system in system_strs:
# Count how many observables to plot
nb_obs=len(final_obs_grouping[system])
# Decide how many columns we want the plot to have
nb_of_cols=4
# COunt how many rows needed
nb_of_rows=int(np.ceil(nb_obs/nb_of_cols))
# Prepare figure
fig = plt.figure(figsize=(2*nb_of_cols,2*nb_of_rows))
line_list=[]
#Loop over grouped observables
#for n, (obs, cent) in enumerate(obs_cent_list.items()):
for n, ((regex_id, obs_name, plot_scale), obs_list) in enumerate(final_obs_grouping[system].items()):
plt.subplot(nb_of_rows,nb_of_cols,n+1)
plt.xlabel(r'Centrality (%)', fontsize=10)
plt.ylabel(obs_name, fontsize=10)
plt.yscale(plot_scale)
# Loop over observable group
for obs, color in zip(obs_list,'rgbrgbrgb'):
cent=obs_cent_list[system][obs]
mid_centrality=[(low+up)/2. for low,up in cent]
#Loop over delta-f
idf_list=[0,1,2,3]
idf_sym=['D','o','^','.']
for idf, line in zip(idf_list, idf_sym):
mean_values=calcs[system][obs]['mean'][:,idf][design_pt_to_plot]
stat_uncert=calcs[system][obs]['err'][:,idf][design_pt_to_plot]
line_type,_,_ = plt.errorbar(mid_centrality, mean_values, yerr=stat_uncert, fmt=line, color=color, markersize=4)
line_list.append(line_type)
if (plot_scale != "log"):
plt.ylim(ymin=0)
# Plot legend in first subplot only
if (0 == n):
plt.legend(line_list,["idf="+str(idf) for idf in idf_list],loc="upper right",fontsize=10)
plt.tight_layout(True)
#plt.savefig("obs.pdf")
plt.show()
if __name__ == '__main__':
results = []
for file in glob.glob(sys.argv[1]):
# Load calculations
calcs = np.fromfile(file, dtype=np.dtype(bayes_dtype))
entry = plot(calcs)
| 2.4375 | 2 |
testdata/PyFEM-master/install.py | Konstantin8105/py4go | 3 | 12791275 | <reponame>Konstantin8105/py4go
############################################################################
# This Python file is part of PyFEM, the code that accompanies the book: #
# #
# 'Non-Linear Finite Element Analysis of Solids and Structures' #
# <NAME>, <NAME>, <NAME> and <NAME> #
# <NAME> and Sons, 2012, ISBN 978-0470666449 #
# #
# The code is written by <NAME>, <NAME> and <NAME>. #
# #
# The latest stable version can be downloaded from the web-site: #
# http://www.wiley.com/go/deborst #
# #
# A github repository, with the most up to date version of the code, #
# can be found here: #
# https://github.com/jjcremmers/PyFEM #
# #
# The code is open source and intended for educational and scientific #
# purposes only. If you use PyFEM in your research, the developers would #
# be grateful if you could cite the book. #
# #
# Disclaimer: #
# The authors reserve all rights but do not guarantee that the code is #
# free from errors. Furthermore, the authors shall not be liable in any #
# event caused by the use of the program. #
############################################################################
import os,sys,numpy,scipy,matplotlib
from PyQt5.Qt import PYQT_VERSION_STR
print("\n ===============================================================\n")
# get operating system
osName = sys.platform
# check python version
versionLong = sys.version.split(' ')
version = versionLong[0].split('.')
print(" Python version detected %10s : " %(versionLong[0]) , end=' ' )
if int(version[0]) == 3 and int(version[1]) >= 6:
print(" OK")
elif int(version[0]) == 2:
print(" Please note that PyFEM has been migrated to Python 3.x\n")
print(" Install Pyhon 3.x\n")
else:
print(" Not OK\n\n Please install Python 2.6.x or 2.7.x\n")
# check numpy version
versionLong = numpy.__version__
version = versionLong.split('.')
print(" Numpy version detected %10s : " %(versionLong) , end=' ' )
if int(version[0]) == 1 and int(version[1]) >= 6:
print(" OK")
else:
print(" Not OK\n\n Please install Numpy 1.6.x or higher\n")
# check scipy version
versionLong = scipy.__version__
version = versionLong.split('.')
print(" Scipy version detected %10s : " %(versionLong) , end=' ' )
if int(version[0]) == 0 and int(version[1]) >= 9:
print(" OK")
elif int(version[0]) >= 1 and int(version[1]) >= 0:
print(" OK")
else:
print(" Not OK\n\n Please install Scipy 0.9.x or higher\n")
versionLong = matplotlib.__version__
version = versionLong.split('.')
print(" Matplotlib version detected %10s : " %(versionLong) , end=' ' )
if int(version[0]) >= 1 and int(version[1]) >= 0:
print(" OK")
else:
print(" Not OK\n\n Please install Matplotlib 1.0.x or higher\n")
versionLong = PYQT_VERSION_STR
version = versionLong.split('.')
print(" PyQt version detected %10s : " %(versionLong) , end=' ' )
if int(version[0]) >= 5:
print(" OK")
else:
print(" Not OK\n\n Please install PyQt 5.x or higher\n")
# get current path
path = os.getcwd()
if osName[:5] == "linux":
print("\n LINUX INSTALLATION")
print(" ===============================================================\n")
print(" When using a bash shell, add the following line")
print(" to ~/.bashrc :\n")
print(' export PYTHONPATH="'+path+'"')
print(" alias pyfem='python3 "+path+"/PyFEM.py'\n")
print(" When using csh or tcsh add the following lines to")
print(" ~/.cshrc or ~/.tcshrc :\n")
print(" setenv PYTHONPATH "+path)
print(" alias pyfem 'python3 "+path+"/PyFEM.py'\n")
print(" ===============================================================\n")
print(" Installation succesful")
print(" See the user manual for further instructions.\n\n")
elif osName[:6] == "darwin":
print("\n MAC-OS INSTALLATION")
print(" ===============================================================\n")
print(" Add the following line to ~/.bashrc :\n")
#print(' export PYTHONPATH="'+path+'"')
print(" alias pyfem='python3 "+path+"/PyFEM.py'\n")
print(" ===============================================================\n")
print(" Installation succesful")
print(" See the user manual for further instructions.\n\n")
elif osName[:3] == "win":
batfile = open( 'pyfem.bat' , 'w' )
fexec = sys.executable
if fexec[-5:] == "w.exe":
fexec = fexec[:-5] + ".exe"
print(fexec)
batfile.write(fexec+' '+path+'\PyFEM.py %1')
batfile.close()
print("\n WINDOWS INSTALLATION")
print(" ===============================================================\n")
#print(" Add the following path to PYTHONPATH and PATH:\n")
#print(" ",path,"\n")
print(" ===============================================================\n")
print(" Installation successful!")
print(" See the user manual for instructions.\n\n")
else:
print("Operating system ",osName," not known.")
input(" Press Enter to continue...")
| 1.765625 | 2 |
hahomematic/parameter_visibility.py | SukramJ/hahomematic | 0 | 12791276 | """ Module about parameter visibility within hahomematic """
from __future__ import annotations
import logging
import os
from typing import Final
import hahomematic.central_unit as hm_central
from hahomematic.const import (
DEFAULT_ENCODING,
EVENT_CONFIG_PENDING,
EVENT_ERROR,
EVENT_STICKY_UN_REACH,
EVENT_UN_REACH,
EVENT_UPDATE_PENDING,
FILE_CUSTOM_UN_IGNORE_PARAMETERS,
PARAM_CHANNEL_OPERATION_MODE,
PARAMSET_KEY_MASTER,
PARAMSET_KEY_VALUES,
)
from hahomematic.helpers import check_or_create_directory
_LOGGER = logging.getLogger(__name__)
# {device_type: channel_no}
_RELEVANT_MASTER_PARAMSETS_BY_DEVICE: dict[str, tuple[set[int], str]] = {
"HmIPW-DRBL4": ({1, 5, 9, 13}, PARAM_CHANNEL_OPERATION_MODE),
"HmIP-DRBLI4": ({9, 13, 17, 21}, PARAM_CHANNEL_OPERATION_MODE),
}
HIDDEN_PARAMETERS: set[str] = {
EVENT_CONFIG_PENDING,
EVENT_ERROR,
EVENT_STICKY_UN_REACH,
EVENT_UN_REACH,
EVENT_UPDATE_PENDING,
PARAM_CHANNEL_OPERATION_MODE,
"ACTIVITY_STATE",
"DIRECTION",
}
# Parameters within the VALUES paramset for which we don't create entities.
_IGNORED_PARAMETERS: set[str] = {
"AES_KEY",
"BOOST_TIME",
"BOOT",
"BURST_LIMIT_WARNING",
"CLEAR_WINDOW_OPEN_SYMBOL",
"COMBINED_PARAMETER",
"DATE_TIME_UNKNOWN",
"DECISION_VALUE",
"DEVICE_IN_BOOTLOADER",
"DEW_POINT_ALARM",
"EMERGENCY_OPERATION",
"EXTERNAL_CLOCK",
"FROST_PROTECTION",
"HUMIDITY_LIMITER",
"IDENTIFICATION_MODE_LCD_BACKLIGHT",
"INCLUSION_UNSUPPORTED_DEVICE",
"INHIBIT",
"INSTALL_MODE",
"LEVEL_COMBINED",
"LEVEL_REAL",
"OLD_LEVEL",
"PARTY_SET_POINT_TEMPERATURE",
"PARTY_TIME_END",
"PARTY_TIME_START",
"PROCESS",
"QUICK_VETO_TIME",
"RAMP_STOP",
"RELOCK_DELAY",
"SECTION",
"SELF_CALIBRATION",
"SENSOR_ERROR",
"SET_SYMBOL_FOR_HEATING_PHASE",
"SMOKE_DETECTOR_COMMAND",
"STATE_UNCERTAIN",
"SWITCH_POINT_OCCURED",
"TEMPERATURE_LIMITER",
"TEMPERATURE_OUT_OF_RANGE",
"TIME_OF_OPERATION",
"WOCHENPROGRAMM",
}
# Ignore Parameter that end with
_IGNORED_PARAMETERS_WILDCARDS_END: set[str] = {
"OVERFLOW",
"OVERHEAT",
"OVERRUN",
"REPORTING",
"RESULT",
"STATUS",
"SUBMIT",
"WORKING",
}
# Ignore Parameter that start with
_IGNORED_PARAMETERS_WILDCARDS_START: set[str] = {
"ADJUSTING",
"ERR_TTM",
"ERROR",
"IDENTIFICATION_MODE_KEY_VISUAL",
"IDENTIFY_",
"PARTY_START",
"PARTY_STOP",
"STATUS_FLAG",
"WEEK_PROGRAM",
}
# Parameters within the paramsets for which we create entities.
_UN_IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]] = {
"DLD": ["ERROR_JAMMED"], # HmIP-DLD
"SD": ["SMOKE_DETECTOR_ALARM_STATUS"], # HmIP-SWSD
"HM-Sec-Win": ["DIRECTION", "WORKING", "ERROR", "STATUS"], # HM-Sec-Win*
"HM-Sec-Key": ["DIRECTION", "ERROR"], # HM-Sec-Key*
"HmIP-PCBS-BAT": [
"OPERATING_VOLTAGE",
"LOW_BAT",
], # To override ignore for HmIP-PCBS
}
# Parameters by device within the VALUES paramset for which we don't create entities.
_IGNORE_PARAMETERS_BY_DEVICE: dict[str, list[str]] = {
"LOWBAT": [
"HM-LC-Sw1-FM",
"HM-LC-Sw1PBU-FM",
"HM-LC-Sw1-Pl-DN-R1",
"HM-LC-Sw1-PCB",
"HM-LC-Sw4-DR",
"HM-SwI-3-FM",
],
"LOW_BAT": ["HmIP-BWTH", "HmIP-PCBS"],
"OPERATING_VOLTAGE": [
"HmIP-BDT",
"HmIP-BSL",
"HmIP-BSM",
"HmIP-BWTH",
"HmIP-DR",
"HmIP-FDT",
"HmIP-FSM",
"HmIP-MOD-OC8",
"HmIP-PCBS",
"HmIP-PDT",
"HmIP-PS",
"HmIP-SFD",
],
}
_ACCEPT_PARAMETER_ONLY_ON_CHANNEL: dict[str, int] = {"LOWBAT": 0}
class ParameterVisibilityCache:
"""Cache for parameter visibility."""
def __init__(
self,
central: hm_central.CentralUnit,
):
self._central: Final = central
self._storage_folder: Final = self._central.central_config.storage_folder
# paramset_key, parameter
self._un_ignore_parameters_general: dict[str, set[str]] = {
PARAMSET_KEY_MASTER: set(),
PARAMSET_KEY_VALUES: set(),
}
self._ignore_parameters_by_device_lower: dict[str, list[str]] = {
parameter: [device_type.lower() for device_type in device_types]
for parameter, device_types in _IGNORE_PARAMETERS_BY_DEVICE.items()
}
self._un_ignore_parameters_by_device_lower: dict[str, list[str]] = {
device_type.lower(): parameters
for device_type, parameters in _UN_IGNORE_PARAMETERS_BY_DEVICE.items()
}
# device_type, channel_no, paramset_key, list[parameter]
self._un_ignore_parameters_by_device_paramset_key: dict[
str, dict[int, dict[str, set[str]]]
] = {}
# device_type, channel_no
self._relevant_master_paramsets_by_device: dict[str, set[int]] = {}
self._init()
def _init(self) -> None:
"""Init relevant_master_paramsets_by_device and un_ignore_parameters_by_device from const"""
for (
device_type,
channels_parameter,
) in _RELEVANT_MASTER_PARAMSETS_BY_DEVICE.items():
device_type_l = device_type.lower()
channel_nos, parameter = channels_parameter
if device_type_l not in self._relevant_master_paramsets_by_device:
self._relevant_master_paramsets_by_device[device_type_l] = set()
if device_type_l not in self._un_ignore_parameters_by_device_paramset_key:
self._un_ignore_parameters_by_device_paramset_key[device_type_l] = {}
for channel_no in channel_nos:
self._relevant_master_paramsets_by_device[device_type_l].add(channel_no)
if (
channel_no
not in self._un_ignore_parameters_by_device_paramset_key[
device_type_l
]
):
self._un_ignore_parameters_by_device_paramset_key[device_type_l][
channel_no
] = {PARAMSET_KEY_MASTER: set()}
self._un_ignore_parameters_by_device_paramset_key[device_type_l][
channel_no
][PARAMSET_KEY_MASTER].add(parameter)
def get_un_ignore_parameters(
self, device_type: str, device_channel: int
) -> dict[str, set[str]]:
"""Return un_ignore_parameters"""
device_type_l = device_type.lower()
un_ignore_parameters: dict[str, set[str]] = {}
if device_type_l is not None and device_channel is not None:
un_ignore_parameters = (
self._un_ignore_parameters_by_device_paramset_key.get(
device_type_l, {}
).get(device_channel, {})
)
for (
paramset_key,
un_ignore_params,
) in self._un_ignore_parameters_general.items():
if paramset_key not in un_ignore_parameters:
un_ignore_parameters[paramset_key] = set()
un_ignore_parameters[paramset_key].update(un_ignore_params)
return un_ignore_parameters
def ignore_parameter(
self,
device_type: str,
sub_type: str | None,
device_channel: int,
paramset_key: str,
parameter: str,
) -> bool:
"""Check if parameter can be ignored."""
device_type_l = device_type.lower()
sub_type_l = sub_type.lower() if sub_type else None
if paramset_key == PARAMSET_KEY_VALUES:
if self.parameter_is_un_ignored(
device_type=device_type,
sub_type=sub_type,
device_channel=device_channel,
paramset_key=paramset_key,
parameter=parameter,
):
return False
if (
parameter in _IGNORED_PARAMETERS
or parameter.endswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_END))
or parameter.startswith(tuple(_IGNORED_PARAMETERS_WILDCARDS_START))
or device_type_l.startswith(
tuple(self._ignore_parameters_by_device_lower.get(parameter, []))
)
or sub_type_l
in self._ignore_parameters_by_device_lower.get(parameter, [])
):
return True
if (
accept_channel := _ACCEPT_PARAMETER_ONLY_ON_CHANNEL.get(parameter)
) is not None:
if accept_channel != device_channel:
return True
if paramset_key == PARAMSET_KEY_MASTER:
if parameter not in self._un_ignore_parameters_by_device_paramset_key.get(
device_type_l, {}
).get(device_channel, {}).get(PARAMSET_KEY_MASTER, []):
return True
return False
def parameter_is_un_ignored(
self,
device_type: str,
sub_type: str | None,
device_channel: int,
paramset_key: str,
parameter: str,
) -> bool:
"""Return if parameter is on un_ignore list"""
device_type_l = device_type.lower()
sub_type_l = sub_type.lower() if sub_type else None
if parameter in self._un_ignore_parameters_general[paramset_key]:
return True
if parameter in self._un_ignore_parameters_by_device_paramset_key.get(
device_type_l, {}
).get(device_channel, {}).get(paramset_key, set()):
return True
if sub_type_l:
if parameter in self._un_ignore_parameters_by_device_paramset_key.get(
sub_type_l, {}
).get(device_channel, {}).get(paramset_key, set()):
return True
if sub_type_l and sub_type_l in self._un_ignore_parameters_by_device_lower:
un_ignore_parameters = self._un_ignore_parameters_by_device_lower[
sub_type_l
]
if parameter in un_ignore_parameters:
return True
if device_type_l.startswith(tuple(self._un_ignore_parameters_by_device_lower)):
for (
device_t,
un_ignore_parameters,
) in self._un_ignore_parameters_by_device_lower.items():
if device_type_l.startswith(device_t):
if parameter in un_ignore_parameters:
return True
return False
def _add_line_to_cache(self, line: str) -> None:
"""
Add line to from un ignore file to cache.
Add data to relevant_master_paramsets_by_device and un_ignore_parameters_by_device from file.
"""
try:
line = line.strip()
if "@" in line:
# add parameter@devicetype:channel_no:paramset_key
data = line.split("@")
if len(data) != 2:
_LOGGER.warning(
"add_line_to_cache: Could not add line '%s' to un ignore cache. Only one @ expected.",
line,
)
return
parameter = data[0]
device_data = data[1].split(":")
if len(device_data) != 3:
_LOGGER.warning(
"add_line_to_cache: Could not add line '%s' to un ignore cache. 4 arguments expected: e.g. TEMPERATURE@HmIP-BWTH:1:VALUES.",
line,
)
return
device_type = device_data[0].lower()
channel_no = int(device_data[1])
paramset_key = device_data[2]
if device_type not in self._un_ignore_parameters_by_device_paramset_key:
self._un_ignore_parameters_by_device_paramset_key[device_type] = {}
if (
channel_no
not in self._un_ignore_parameters_by_device_paramset_key[
device_type
]
):
self._un_ignore_parameters_by_device_paramset_key[device_type][
channel_no
] = {}
if (
paramset_key
not in self._un_ignore_parameters_by_device_paramset_key[
device_type
][channel_no]
):
self._un_ignore_parameters_by_device_paramset_key[device_type][
channel_no
][paramset_key] = set()
self._un_ignore_parameters_by_device_paramset_key[device_type][
channel_no
][paramset_key].add(parameter)
if paramset_key == PARAMSET_KEY_MASTER:
if device_type not in self._relevant_master_paramsets_by_device:
self._relevant_master_paramsets_by_device[device_type] = set()
self._relevant_master_paramsets_by_device[device_type].add(
channel_no
)
elif ":" in line:
# add parameter:paramset_key
data = line.split(":")
if len(data) != 2:
_LOGGER.warning(
"add_line_to_cache: Could not add line '%s' to un ignore cache. 2 arguments expected: e.g. TEMPERATURE:VALUES.",
line,
)
return
paramset_key = data[0]
parameter = data[1]
if paramset_key in (PARAMSET_KEY_VALUES, PARAMSET_KEY_MASTER):
self._un_ignore_parameters_general[paramset_key].add(parameter)
else:
# add parameter
self._un_ignore_parameters_general[PARAMSET_KEY_VALUES].add(line)
except Exception:
_LOGGER.warning(
"add_line_to_cache: Could not add line '%s' to un ignore cache.", line
)
def is_relevant_paramset(
self,
device_type: str,
sub_type: str | None,
paramset_key: str,
device_channel: int,
) -> bool:
"""Return if a paramset is relevant."""
device_type_l = device_type.lower()
sub_type_l = sub_type.lower() if sub_type else None
if paramset_key == PARAMSET_KEY_VALUES:
return True
if device_channel is not None and paramset_key == PARAMSET_KEY_MASTER:
for (
d_type,
channel_nos,
) in self._relevant_master_paramsets_by_device.items():
if device_channel in channel_nos and (
device_type_l == d_type.lower()
or (sub_type_l and sub_type_l == d_type.lower())
or device_type_l.startswith(d_type.lower())
):
return True
return False
async def load(self) -> None:
"""Load custom un ignore parameters from disk."""
def _load() -> None:
if not check_or_create_directory(self._storage_folder):
return
if not os.path.exists(
os.path.join(self._storage_folder, FILE_CUSTOM_UN_IGNORE_PARAMETERS)
):
_LOGGER.debug(
"load: No file found in %s",
self._storage_folder,
)
return
try:
with open(
file=os.path.join(
self._storage_folder,
FILE_CUSTOM_UN_IGNORE_PARAMETERS,
),
mode="r",
encoding=DEFAULT_ENCODING,
) as fptr:
for line in fptr.readlines():
self._add_line_to_cache(line)
except Exception as ex:
_LOGGER.warning(
"load: Could not read unignore file %s",
ex.args,
)
await self._central.async_add_executor_job(_load)
| 2.0625 | 2 |
lazy/api/client.py | trisongz/lazycls | 2 | 12791277 | <reponame>trisongz/lazycls
from __future__ import annotations
from lazy.types import *
from lazy.models import BaseCls
from .config import *
from .types import *
from .utils import convert_to_cls
from .base_imports import _httpx_available, _ensure_api_reqs
if _httpx_available:
from httpx import Client as _Client
from httpx import AsyncClient as _AsyncClient
from httpx import Response as HttpResponse
else:
_Client, _AsyncClient, HttpResponse = object, object, object
class Client:
_web: _Client = None
_async: _AsyncClient = None
@classmethod
def create_client(cls, base_url: str = "", config: Dict[str, Any] = None, **kwargs) -> Type[_Client]:
"""Creates a Sync httpx Client"""
_ensure_api_reqs()
configz = HttpConfigz()
if config: configz.update_config(**config)
client_config = configz.httpx_config
if 'headers' in kwargs:
headers = kwargs.pop('headers')
if headers: client_config['headers'] = headers
return _Client(base_url = base_url, **client_config, **kwargs)
@classmethod
def create_async_client(cls, base_url: str = "", config: Dict[str, Any] = None, **kwargs) -> Type[_AsyncClient]:
""" Creates an async httpx Client"""
_ensure_api_reqs()
configz = AsyncHttpConfigz()
if config: configz.update_config(**config)
client_config = configz.httpx_config
if 'headers' in kwargs:
headers = kwargs.pop('headers')
if headers: client_config['headers'] = headers
return _AsyncClient(base_url = base_url, **client_config, **kwargs)
@classproperty
def client(cls) -> Type[_Client]:
if not cls._web: cls._web = cls.create_client()
return cls._web
@classproperty
def async_client(cls) -> Type[_AsyncClient]:
if not cls._async: cls._async = cls.create_async_client()
return cls._async
class ApiClient:
def __init__(self, base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers: DictAny = {}, config: DictAny = None, async_config: DictAny = None, module_name: str = HttpConfigz.module_name or AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs):
_ensure_api_reqs()
self.base_url = ""
self.headers = {}
self.config = None
self.async_config = None
self._module_name = None
self._kwargs = {}
self._web = None
self._async = None
self._default_mode = False
self.set_configs(base_url = base_url, headers = headers, config = config, async_config = async_config, module_name = module_name, default_resp = default_resp, **kwargs)
def set_configs(self, base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers: DictAny = {}, config: DictAny = None, async_config: DictAny = None, module_name: str = HttpConfigz.module_name or AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs):
self.base_url = base_url or self.base_url
self.headers = headers or self.headers
self.config = config or self.config
self.async_config = async_config or self.async_config
self._module_name = module_name or self._module_name
self._default_mode = default_resp or self._default_mode
self._kwargs = kwargs or self._kwargs
def reset_clients(self, base_url: str = HttpConfigz.base_url or AsyncHttpConfigz.base_url, headers: DictAny = {}, config: DictAny = None, async_config: DictAny = None, module_name: str = HttpConfigz.module_name or AsyncHttpConfigz.module_name, default_resp: bool = False, **kwargs):
self.set_configs(base_url = base_url, headers = headers, config = config, async_config = async_config, module_name = module_name, default_resp = default_resp, **kwargs)
self._web = None
self._async = None
@property
def client(self):
if not self._web: self._web = Client.create_client(base_url=self.base_url, config=self.config, headers=self.headers, **self._kwargs)
return self._web
@property
def aclient(self):
if not self._async: self._async = Client.create_async_client(base_url=self.base_url, config=self.async_config, headers=self.headers, **self._kwargs)
return self._async
#############################################################################
# Base REST APIs #
#############################################################################
def delete(self, path: str, **kwargs) -> Union[Response, HttpResponse]:
resp = self.client.delete(url=path, **kwargs)
if self._default_mode: return resp
return Response(resp = resp, client_type = 'sync', method = 'delete')
def get(self, path: str, **kwargs) -> Union[Response, HttpResponse]:
resp = self.client.get(url=path, **kwargs)
if self._default_mode: return resp
return Response(resp = resp, client_type = 'sync', method = 'get')
def head(self, path: str, **kwargs) -> Union[Response, HttpResponse]:
resp = self.client.head(url=path, **kwargs)
if self._default_mode: return resp
return Response(resp = resp, client_type = 'sync', method = 'head')
def patch(self, path: str, **kwargs) -> Union[Response, HttpResponse]:
resp = self.client.patch(url=path, **kwargs)
if self._default_mode: return resp
return Response(resp = resp, client_type = 'sync', method = 'patch')
def put(self, path: str, **kwargs) -> Union[Response, HttpResponse]:
resp = self.client.put(url=path, **kwargs)
if self._default_mode: return resp
return Response(resp = resp, client_type = 'sync', method = 'put')
def post(self, path: str, **kwargs) -> Union[Response, HttpResponse]:
resp = self.client.post(url=path, **kwargs)
if self._default_mode: return resp
return Response(resp = resp, client_type = 'sync', method = 'post')
#############################################################################
# Async REST Methods #
#############################################################################
async def async_delete(self, path: str, **kwargs) -> Union[Response, HttpResponse]:
resp = await self.aclient.delete(url=path, **kwargs)
if self._default_mode: return resp
return Response(resp = resp, client_type = 'async', method = 'delete')
async def async_get(self, path: str, **kwargs) -> Union[Response, HttpResponse]:
resp = await self.aclient.get(url=path, **kwargs)
if self._default_mode: return resp
return Response(resp = resp, client_type = 'async', method = 'get')
async def async_head(self, path: str, **kwargs) -> Union[Response, HttpResponse]:
resp = await self.aclient.head(url=path, **kwargs)
if self._default_mode: return resp
return Response(resp = resp, client_type = 'async', method = 'head')
async def async_patch(self, path: str, **kwargs) -> Union[Response, HttpResponse]:
resp = await self.aclient.patch(url=path, **kwargs)
if self._default_mode: return resp
return Response(resp = resp, client_type = 'async', method = 'patch')
async def async_put(self, path: str, **kwargs) -> Union[Response, HttpResponse]:
resp = await self.aclient.put(url=path, **kwargs)
if self._default_mode: return resp
return Response(resp = resp, client_type = 'async', method = 'put')
async def async_post(self, path: str, **kwargs) -> Union[Response, HttpResponse]:
resp = await self.aclient.post(url=path, **kwargs)
if self._default_mode: return resp
return Response(resp = resp, client_type = 'async', method = 'post')
#############################################################################
# Supplementary Helpful Callers #
#############################################################################
def ping(self, path: str, max_status_code: int = 300, min_status_code: int = None, **kwargs) -> bool:
""" Returns a bool of whether response code is great/within range/less than an int
Can be used as a health check """
res = self.get(url=path, **kwargs)
if min_status_code and max_status_code:
return bool(res.status_code in range(min_status_code, max_status_code))
if min_status_code:
return bool(res.status_code > min_status_code)
return bool(res.status_code < max_status_code)
def get_data(self, path: str, key: str = 'data', **kwargs) -> DataType:
""" Expects to get data in JSON. If does not get the key, returns None. """
resp = self.get(url=path, **kwargs)
return resp.data.get(key, None)
def get_lazycls(self, path: str, key: str = 'data', **kwargs) -> Type[BaseCls]:
"""
Expects to get data in JSON. If does not get the key, returns None.
Returns the data from a GET request to Path as a LazyCls
"""
data = self.get_data(path=path, key=key, **kwargs)
if not data: return None
return convert_to_cls(resp=data, module_name=self._module_name, base_key=key)
#############################################################################
# Async Supplementary Helpful Callers #
#############################################################################
async def async_ping(self, path: str, max_status_code: int = 300, min_status_code: int = None, **kwargs) -> bool:
""" Returns a bool of whether response code is great/within range/less than an int
Can be used as a health check """
res = await self.async_get(url=path, **kwargs)
if min_status_code and max_status_code:
return bool(res.status_code in range(min_status_code, max_status_code))
if min_status_code:
return bool(res.status_code > min_status_code)
return bool(res.status_code < max_status_code)
async def async_get_data(self, path: str, key: str = 'data', **kwargs) -> DataType:
""" Expects to get data in JSON. If does not get the key, returns None. """
resp = await self.async_get(url=path, **kwargs)
return resp.data.get(key, None)
async def async_get_lazycls(self, path: str, key: str = 'data', **kwargs) -> Type[BaseCls]:
"""
Expects to get data in JSON. If does not get the key, returns None.
Returns the data from a GET request to Path as a LazyCls
"""
data = await self.async_get_data(path=path, key=key, **kwargs)
if not data: return None
return convert_to_cls(resp=data, module_name=self._module_name, base_key=key)
APIClient = ApiClient
__all__ = [
'Client',
'HttpResponse',
'ApiClient',
'APIClient',
'_Client',
'_AsyncClient'
]
| 2.234375 | 2 |
faculty_sync/screens/loading.py | Matt-Haugh/faculty-sync | 6 | 12791278 | <gh_stars>1-10
SEQUENCE = ["|", "/", "-", "\\", "|", "/", "-", "\\"]
class LoadingIndicator(object):
def __init__(self):
self._index = 0
def current(self):
return SEQUENCE[self._index]
def next(self):
self._index = (self._index + 1) % len(SEQUENCE)
return self.current()
| 3.09375 | 3 |
CTF/new_file.py | mark0519/CTFplatform | 9 | 12791279 | <filename>CTF/new_file.py
# -*- coding: utf-8 -*-=
import os
from werkzeug.utils import secure_filename
from CTF import db,new,login
from CTF.models import que,user
import uuid
def random_filename(filename): #上传文件重命名
ext = os.path.splitext(filename)[1]
print(type(ext))
print(ext)
if ext =='.rar' or ext == '.7z'or ext =='.zip'or ext =='.tar'or ext =='.tar.gz':
new_filename = uuid.uuid4().hex + ext
return new_filename
else:
return None
from flask import (
Blueprint, flash, g, redirect, render_template, request, url_for, jsonify,session
)
from werkzeug.exceptions import abort
bp = Blueprint('/admin/new_file', __name__)
@bp.route('/admin/new_file', methods=['POST'])
def challenges_list():
if 'id' not in session or user.query.filter(user.user_id == session.get('id')).first().user_teamid !=1:
return redirect('../auth/login')
if request.method == 'POST':
file = request.files['file']
print(request.files)
if not file:
new_que = que.query.filter(que.que_id == new.new_que_id).first()
new_que.que_address=None
db.session.add(new_que)
try:
db.session.commit()
except:
db.session.rollback()
raise
finally:
db.session.close()
return redirect('challenges_list')
filename=random_filename(file.filename)
if not que.query.filter(que.que_id == new.new_que_id).first(): #题目名重复
return jsonify({'code': 0}),200
elif not filename: #文件类型出错
q = que.query.filter(que.que_id == new.new_que_id).first()
new.new_que_id -= 1
db.session.delete(q)
try:
db.session.commit()
except:
db.session.rollback()
raise
finally:
db.session.close()
return '''
<script>
alert("请上传压缩包格式文件");
window.location.href="/admin/new";
</script>
'''
else:
file.save(os.path.join('CTF/upload', secure_filename(filename)))
path = '/upload/'+str(filename)
print(new.new_que_id)
new_que = que.query.filter(que.que_id == new.new_que_id).first()
new_que.que_address=path
db.session.add(new_que)
try:
db.session.commit()
except:
db.session.rollback()
raise
finally:
db.session.close()
return redirect('challenges_list') | 2.328125 | 2 |
train_frcnn_v2.py | vadisala123/tf-fasterrcnn | 0 | 12791280 | from __future__ import division
import random
import pprint
import sys
import time
import numpy as np
from optparse import OptionParser
import pickle
import os
import re
import shutil
import tensorflow as tf
from tensorflow.keras import backend as K
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from keras_frcnn import config, data_generators
from keras_frcnn import losses as losses
import keras_frcnn.roi_helpers as roi_helpers
from tensorflow.python.keras.utils import generic_utils
sys.setrecursionlimit(40000)
from tensorflow.python.ops.numpy_ops import np_config
np_config.enable_numpy_behavior()
# if Logs path directory exists, it will delete the directory
if os.path.exists('logs'):
shutil.rmtree('logs')
parser = OptionParser()
parser.add_option("-p", "--path", dest="train_path", help="Path to training data.")
parser.add_option("-v", "--valid_path", dest="valid_path", help="Path to validation data.")
parser.add_option("-o", "--parser", dest="parser",
help="Parser to use. One of simple or pascal_voc", default="pascal_voc")
parser.add_option("-n", "--num_rois", type="int", dest="num_rois",
help="Number of RoIs to process at once.", default=32)
parser.add_option("--network", dest="network",
help="Base network to use. Supports vgg or resnet50.", default='resnet50')
parser.add_option("--hf", dest="horizontal_flips",
help="Augment with horizontal flips in training. (Default=false).",
action="store_true", default=False)
parser.add_option("--vf", dest="vertical_flips",
help="Augment with vertical flips in training. (Default=false).",
action="store_true", default=False)
parser.add_option("--rot", "--rot_90", dest="rot_90",
help="Augment with 90 degree rotations in training. (Default=false).",
action="store_true", default=False)
parser.add_option("--num_epochs", type="int",
dest="num_epochs", help="Number of epochs.", default=2000)
parser.add_option("--config_filename", dest="config_filename",
help="Location to store all the metadata related to "
"the training (to be used when testing).",
default="config.pickle")
parser.add_option("--output_weight_path", dest="output_weight_path",
help="Output path for weights.", default='./model_frcnn.hdf5')
parser.add_option("--input_weight_path", dest="input_weight_path",
help="Input path for weights. If not specified, will try to"
" load default weights provided by keras.")
(options, args) = parser.parse_args()
if not options.train_path: # if filename is not given
parser.error('Error: path to training data must be specified. Pass --path to command line')
if options.parser == 'pascal_voc':
from keras_frcnn.pascal_voc_parser import get_data
elif options.parser == 'simple':
from keras_frcnn.simple_parser import get_data
else:
raise ValueError("Command line option parser must be one of 'pascal_voc' or 'simple'")
# pass the settings from the command line, and persist them in the config object
C = config.Config()
C.use_horizontal_flips = bool(options.horizontal_flips)
C.use_vertical_flips = bool(options.vertical_flips)
C.rot_90 = bool(options.rot_90)
C.model_path = options.output_weight_path
model_path_regex = re.match("^(.+)(\.hdf5)$", C.model_path)
if model_path_regex.group(2) != '.hdf5':
print('Output weights must have .hdf5 filetype')
exit(1)
C.num_rois = int(options.num_rois)
if options.network == 'vgg':
C.network = 'vgg'
from keras_frcnn import vgg as nn
elif options.network == 'resnet50':
from keras_frcnn import resnet as nn
C.network = 'resnet50'
else:
print('Not a valid model')
raise ValueError
# check if weight path was passed via command line
if options.input_weight_path:
C.base_net_weights = options.input_weight_path
else:
# set the path to weights based on backend and model
C.base_net_weights = nn.get_weight_path()
train_imgs, classes_count, class_mapping = get_data(options.train_path)
val_imgs, _, _ = get_data(options.valid_path)
if 'bg' not in classes_count:
classes_count['bg'] = 0
class_mapping['bg'] = len(class_mapping)
C.class_mapping = class_mapping
inv_map = {v: k for k, v in class_mapping.items()}
print('Training images per class:')
pprint.pprint(classes_count)
print(f'Num classes (including bg) = {len(classes_count)}')
config_output_filename = options.config_filename
with open(config_output_filename, 'wb') as config_f:
pickle.dump(C, config_f)
print(f'Config has been written to {config_output_filename}, '
f'and can be loaded when testing to ensure correct results')
num_imgs = len(train_imgs)
num_valid_imgs = len(val_imgs)
print(f'Num train samples {len(train_imgs)}')
print(f'Num val samples {len(val_imgs)}')
data_gen_train = data_generators.get_anchor_gt(train_imgs, classes_count, C,
nn.get_img_output_length,
K.image_data_format(), mode='train')
data_gen_val = data_generators.get_anchor_gt(val_imgs, classes_count, C, nn.get_img_output_length,
K.image_data_format(), mode='val')
if K.image_data_format() == 'channels_first':
input_shape_img = (3, None, None)
else:
input_shape_img = (None, None, 3)
img_input = Input(shape=input_shape_img)
roi_input = Input(shape=(None, 4))
shared_layers = nn.nn_base(img_input, trainable=True)
# define the RPN, built on the base layers
num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)
rpn = nn.rpn(shared_layers, num_anchors)
classifier = nn.classifier(shared_layers, roi_input, C.num_rois,
nb_classes=len(classes_count), trainable=True)
model_rpn = Model(img_input, rpn[:2])
model_classifier = Model([img_input, roi_input], classifier)
# this is a model that holds both the RPN and the classifier,
# used to load/save weights for the models
model_all = Model([img_input, roi_input], rpn[:2] + classifier)
# Defining optimizers for all models
optimizer_rpn = Adam(learning_rate=1e-5)
optimizer_classifier = Adam(learning_rate=1e-5)
optimizer_all = SGD(learning_rate=0.01)
# Accuracy metrics for Fast RCNN model
train_classifier_metric = tf.keras.metrics.CategoricalAccuracy()
val_classifier_metric = tf.keras.metrics.CategoricalAccuracy()
# Loss function of RPN model and Fast RCNN model
rpn_class_loss_fn = losses.RpnClassificationLoss(num_anchors)
rpn_reg_loss_fn = losses.RpnRegressionLoss(num_anchors)
fast_rcnn_class_loss_fn = losses.FastrcnnClassLoss()
fast_rcnn_reg_loss_fn = losses.FastrcnnRegLoss(len(classes_count) - 1)
# tensorboard writer, automatically creates directory and writes logs
train_writer = tf.summary.create_file_writer('logs/train/')
valid_writer = tf.summary.create_file_writer('logs/valid/')
@tf.function
def rpn_train_step(step, x_batch_train, y_batch_train):
with tf.GradientTape() as rpn_tape:
y_rpn_cls_true, y_rpn_regr_true = y_batch_train
y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train, training=True)
rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred)
rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred)
rpn_grads = rpn_tape.gradient([rpn_class_loss, rpn_reg_loss],
model_rpn.trainable_weights)
optimizer_rpn.apply_gradients(zip(rpn_grads, model_rpn.trainable_weights))
# write training loss and accuracy to the tensorboard
with train_writer.as_default():
tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step)
tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step)
return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss
@tf.function
def frcnn_train_step(step, x_batch_train, X2, Y1, Y2):
with tf.GradientTape() as frcnn_tape:
rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train, X2],
training=True)
fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred)
fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred)
frcnn_grads = frcnn_tape.gradient([fast_rcnn_class_loss, fast_rcnn_reg_loss],
model_classifier.trainable_weights)
optimizer_classifier.apply_gradients(zip(frcnn_grads, model_classifier.trainable_weights))
train_classifier_metric.update_state(Y1, rcnn_class_pred)
fast_rcnn_class_acc = train_classifier_metric.result()
# write training loss and accuracy to the tensorboard
with train_writer.as_default():
tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step)
tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step)
tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step)
return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc
@tf.function
def rpn_valid_step(step, x_batch_train, y_batch_train):
with tf.GradientTape() as rpn_tape:
y_rpn_cls_true, y_rpn_regr_true = y_batch_train
y_rpn_cls_pred, y_rpn_regr_pred = model_rpn(x_batch_train, training=False)
rpn_class_loss = rpn_class_loss_fn(y_rpn_cls_true, y_rpn_cls_pred)
rpn_reg_loss = rpn_reg_loss_fn(y_rpn_regr_true, y_rpn_regr_pred)
# write training loss and accuracy to the tensorboard
with valid_writer.as_default():
tf.summary.scalar('rpn_class_loss', rpn_class_loss, step=step)
tf.summary.scalar('rpn_reg_loss', rpn_reg_loss, step=step)
return y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss
@tf.function
def frcnn_valid_step(step, x_batch_train, X2, Y1, Y2):
with tf.GradientTape() as frcnn_tape:
rcnn_class_pred, rcnn_reg_pred = model_classifier([x_batch_train, X2],
training=False)
fast_rcnn_class_loss = fast_rcnn_class_loss_fn(Y1, rcnn_class_pred)
fast_rcnn_reg_loss = fast_rcnn_reg_loss_fn(Y2, rcnn_reg_pred)
val_classifier_metric.update_state(Y1, rcnn_class_pred)
fast_rcnn_class_acc = val_classifier_metric.result()
# write training loss and accuracy to the tensorboard
with valid_writer.as_default():
tf.summary.scalar('fast_rcnn_class_loss', fast_rcnn_class_loss, step=step)
tf.summary.scalar('fast_rcnn_reg_loss', fast_rcnn_reg_loss, step=step)
tf.summary.scalar('fast_rcnn_class_acc', fast_rcnn_class_acc, step=step)
return fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc
def get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch):
neg_samples = np.where(Y1[0, :, -1] == 1)
pos_samples = np.where(Y1[0, :, -1] == 0)
if len(neg_samples) > 0:
neg_samples = neg_samples[0]
else:
neg_samples = []
if len(pos_samples) > 0:
pos_samples = pos_samples[0]
else:
pos_samples = []
rpn_accuracy_rpn_monitor.append(len(pos_samples))
rpn_accuracy_for_epoch.append((len(pos_samples)))
if C.num_rois > 1:
if len(pos_samples) < C.num_rois // 2:
selected_pos_samples = pos_samples.tolist()
else:
selected_pos_samples = np.random.choice(pos_samples, C.num_rois // 2,
replace=False).tolist()
try:
selected_neg_samples = np.random.choice(neg_samples,
C.num_rois - len(selected_pos_samples),
replace=False).tolist()
except:
selected_neg_samples = np.random.choice(neg_samples,
C.num_rois - len(selected_pos_samples),
replace=True).tolist()
sel_samples = selected_pos_samples + selected_neg_samples
else:
# in the extreme case where num_rois = 1, we pick a random pos or neg sample
selected_pos_samples = pos_samples.tolist()
selected_neg_samples = neg_samples.tolist()
if np.random.randint(0, 2):
sel_samples = random.choice(neg_samples)
else:
sel_samples = random.choice(pos_samples)
return sel_samples
n_epochs = options.num_epochs
BATCH_SIZE = 1
n_steps = num_imgs // BATCH_SIZE
n_valid_steps = num_valid_imgs // BATCH_SIZE
losses = np.zeros((n_steps, 5))
rpn_accuracy_rpn_monitor = []
rpn_accuracy_for_epoch = []
valid_losses = np.zeros((n_valid_steps, 5))
rpn_accuracy_rpn_monitor_valid = []
rpn_accuracy_for_epoch_valid = []
best_loss = np.Inf
start_time = time.time()
class_mapping_inv = {v: k for k, v in class_mapping.items()}
global_step = tf.convert_to_tensor(0, tf.int64)
one_step = tf.convert_to_tensor(1, tf.int64)
print("Training started for %d epochs" % n_epochs)
for epoch in range(n_epochs):
print("\nStart of epoch %d" % (epoch + 1,))
progbar = generic_utils.Progbar(n_steps)
# Iterate over the batches of the dataset.
for step, (x_batch_train, y_batch_train, img_data) in enumerate(data_gen_train):
# print(step, img_data['filepath'])
y_rpn_cls_true, y_rpn_regr_true = y_batch_train
step = tf.cast(step, dtype=tf.int64)
global_step = tf.add(global_step, one_step)
y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss = rpn_train_step(
global_step, x_batch_train, y_batch_train)
R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C, K.image_data_format(),
use_regr=True, overlap_thresh=0.7, max_boxes=300)
# note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format
X2, Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping)
if X2 is None:
rpn_accuracy_rpn_monitor.append(0)
rpn_accuracy_for_epoch.append(0)
continue
sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor, rpn_accuracy_for_epoch)
x2_tensor = tf.convert_to_tensor(X2[:, sel_samples, :], tf.float32)
y1_tensor = tf.convert_to_tensor(Y1[:, sel_samples, :], tf.float32)
y2_tensor = tf.convert_to_tensor(Y2[:, sel_samples, :], tf.float32)
fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_train_step(
global_step, x_batch_train, x2_tensor, y1_tensor, y2_tensor)
losses[step, 0] = rpn_class_loss
losses[step, 1] = rpn_reg_loss
losses[step, 2] = fast_rcnn_class_loss
losses[step, 3] = fast_rcnn_reg_loss
losses[step, 4] = fast_rcnn_class_acc
progbar.update(step + 1,
[('rpn_cls', rpn_class_loss),
('rpn_regr', rpn_reg_loss),
('detector_cls', fast_rcnn_class_loss),
('detector_regr', fast_rcnn_reg_loss)])
if step == n_steps - 1 and C.verbose:
mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor)
) / len(rpn_accuracy_rpn_monitor)
rpn_accuracy_rpn_monitor = []
print(f'\nAverage number of overlapping bounding boxes '
f'from RPN = {mean_overlapping_bboxes} for {step} previous iterations')
if mean_overlapping_bboxes == 0:
print('RPN is not producing bounding boxes that overlap the ground truth boxes.'
' Check RPN settings or keep training.')
loss_rpn_cls = np.mean(losses[:, 0])
loss_rpn_regr = np.mean(losses[:, 1])
loss_class_cls = np.mean(losses[:, 2])
loss_class_regr = np.mean(losses[:, 3])
class_acc = np.mean(losses[:, 4])
mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch)) / len(
rpn_accuracy_for_epoch)
rpn_accuracy_for_epoch = []
if C.verbose:
print(
f'\nMean number of bounding boxes from RPN overlapping '
f'ground truth boxes: {mean_overlapping_bboxes}')
print(f'Classifier accuracy for bounding boxes from RPN: {class_acc}')
print(f'Loss RPN classifier: {loss_rpn_cls}')
print(f'Loss RPN regression: {loss_rpn_regr}')
print(f'Loss Detector classifier: {loss_class_cls}')
print(f'Loss Detector regression: {loss_class_regr}')
print(f'Elapsed time: {time.time() - start_time}')
curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr
print("Total Loss: %.4f" % curr_loss)
start_time = time.time()
if curr_loss < best_loss:
if C.verbose:
print(
f'Total loss decreased from {best_loss} to {curr_loss}, saving weights')
best_loss = curr_loss
model_all.save_weights(model_path_regex.group(1) + "_" + '{:04d}'.format(
epoch) + model_path_regex.group(2))
break
# # Log every 10 steps.
# if step % 10 == 0:
# print("Step %d, RPN Cls Loss: %.4f RPN reg Loss: %.4f "
# "FRCNN Cls Loss: %.4f FRCNN reg Loss: %.4f" % (
# step, float(rpn_class_loss), float(rpn_reg_loss), float(fast_rcnn_class_loss),
# float(fast_rcnn_reg_loss)))
# Reset training metrics at the end of each epoch
train_classifier_metric.reset_states()
progbar = generic_utils.Progbar(n_valid_steps)
# Iterate over the batches of the dataset.
for step, (x_batch_val, y_batch_val, img_data) in enumerate(data_gen_val):
y_rpn_cls_true, y_rpn_regr_true = y_batch_val
y_rpn_cls_pred, y_rpn_regr_pred, rpn_class_loss, rpn_reg_loss = rpn_valid_step(
global_step, x_batch_val, y_batch_val)
R = roi_helpers.rpn_to_roi(y_rpn_cls_pred, y_rpn_regr_pred, C, K.image_data_format(),
use_regr=True, overlap_thresh=0.7, max_boxes=300)
# note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format
X2, Y1, Y2, IouS = roi_helpers.calc_iou(R, img_data, C, class_mapping)
if X2 is None:
rpn_accuracy_rpn_monitor_valid.append(0)
rpn_accuracy_for_epoch_valid.append(0)
continue
sel_samples = get_selected_samples(Y1, rpn_accuracy_rpn_monitor_valid,
rpn_accuracy_for_epoch_valid)
x2_tensor = tf.convert_to_tensor(X2[:, sel_samples, :], tf.float32)
y1_tensor = tf.convert_to_tensor(Y1[:, sel_samples, :], tf.float32)
y2_tensor = tf.convert_to_tensor(Y2[:, sel_samples, :], tf.float32)
fast_rcnn_class_loss, fast_rcnn_reg_loss, fast_rcnn_class_acc = frcnn_valid_step(
global_step, x_batch_val, x2_tensor, y1_tensor, y2_tensor)
valid_losses[step, 0] = rpn_class_loss
valid_losses[step, 1] = rpn_reg_loss
valid_losses[step, 2] = fast_rcnn_class_loss
valid_losses[step, 3] = fast_rcnn_reg_loss
valid_losses[step, 4] = fast_rcnn_class_acc
progbar.update(step + 1,
[('rpn_cls', rpn_class_loss),
('rpn_regr', rpn_reg_loss),
('detector_cls', fast_rcnn_class_loss),
('detector_regr', fast_rcnn_reg_loss)])
if step == n_valid_steps - 1 and C.verbose:
mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor_valid)
) / len(rpn_accuracy_for_epoch_valid)
rpn_accuracy_rpn_monitor_valid = []
print(f'\nValidation: Average number of overlapping bounding boxes '
f'from RPN = {mean_overlapping_bboxes}')
if mean_overlapping_bboxes == 0:
print('RPN is not producing bounding boxes that overlap the ground truth boxes.'
' Check RPN settings or keep training.')
loss_rpn_cls = np.mean(valid_losses[:, 0])
loss_rpn_regr = np.mean(valid_losses[:, 1])
loss_class_cls = np.mean(valid_losses[:, 2])
loss_class_regr = np.mean(valid_losses[:, 3])
class_acc = np.mean(valid_losses[:, 4])
mean_overlapping_bboxes = float(sum(rpn_accuracy_for_epoch_valid)
) / len(rpn_accuracy_for_epoch_valid)
rpn_accuracy_for_epoch_valid = []
if C.verbose:
print("Validation Metrics: ")
print(
f'Mean number of bounding boxes from RPN overlapping '
f'ground truth boxes: {mean_overlapping_bboxes}')
print(f'Classifier accuracy for bounding boxes from RPN: {class_acc}')
print(f'Loss RPN classifier: {loss_rpn_cls}')
print(f'Loss RPN regression: {loss_rpn_regr}')
print(f'Loss Detector classifier: {loss_class_cls}')
print(f'Loss Detector regression: {loss_class_regr}')
print(f'Elapsed time: {time.time() - start_time}')
curr_loss = loss_rpn_cls + loss_rpn_regr + loss_class_cls + loss_class_regr
print("Total validation loss: %.4f" % curr_loss)
start_time = time.time()
break
val_classifier_metric.reset_states()
| 2.078125 | 2 |
tests/conftest.py | primal100/stripe-subscriptions | 0 | 12791281 | <reponame>primal100/stripe-subscriptions
import os
import sys
import pytest
import stripe
from stripe.error import InvalidRequestError
from datetime import datetime, timedelta
import subscriptions
from subscriptions import UserProtocol, User
from typing import Optional, Any, List, Dict
api_key = ''
python_version = sys.version_info
ci_string = f'{os.name}-{python_version.major}{python_version.minor}'
def pytest_addoption(parser):
parser.addoption("--apikey", action="store", default=os.environ.get('STRIPE_TEST_SECRET_KEY'))
@pytest.fixture(scope="session")
def stripe_subscription_product_url() -> str:
return "http://localhost/paywall"
@pytest.fixture(scope="session")
def stripe_unsubscribed_product_url() -> str:
return "http://localhost/second_paywall"
@pytest.fixture(scope="session", autouse=True)
def setup_stripe(pytestconfig):
stripe.api_key = pytestconfig.getoption("apikey")
@pytest.fixture(scope="session")
def checkout_success_url() -> str:
return "http://localhost"
@pytest.fixture(scope="session")
def checkout_cancel_url() -> str:
return "http://localhost/cancel"
@pytest.fixture(scope="session")
def payment_method_types() -> List[str]:
return ["card"]
@pytest.fixture
def user_email() -> str:
return f'<EMAIL>-{ci_<EMAIL>'
@pytest.fixture
def user(user_email) -> UserProtocol:
user = User(user_id=1, email=user_email)
yield user
if user.stripe_customer_id:
try:
subscriptions.delete_customer(user)
except InvalidRequestError:
pass
@pytest.fixture(params=[None, "user"])
def none_or_user(request, user) -> Optional[UserProtocol]:
if not request.param:
return None
return user
@pytest.fixture
def wrong_customer_id() -> UserProtocol:
user = User(
2,
"<EMAIL>",
'cus_1234567890ABCD'
)
return user
@pytest.fixture
def user_with_customer_id(user, user_email) -> UserProtocol:
customers = stripe.Customer.list(email=user_email)
for customer in customers:
stripe.Customer.delete(customer)
subscriptions.create_customer(user, description="stripe-subscriptions test runner user")
return user
@pytest.fixture(params=["no-customer-id", "with-customer-id"])
def user_with_and_without_customer_id(request, user) -> UserProtocol:
if request.param == "no-customer-id":
return user
subscriptions.create_customer(user, description="stripe-subscriptions test runner user")
return user
@pytest.fixture(params=["no-user", "no-customer-id", "with-customer-id"])
def no_user_and_user_with_and_without_customer_id(request, user) -> Optional[UserProtocol]:
if request.param == "no-user":
return None
elif request.param == "no-customer-id":
return user
subscriptions.create_customer(user, description="stripe-subscriptions test runner user")
return user
@pytest.fixture
def payment_method_for_customer(user_with_customer_id) -> stripe.PaymentMethod:
return subscriptions.tests.create_payment_method_for_customer(user_with_customer_id)
@pytest.fixture
def default_payment_method_for_customer(user_with_customer_id) -> stripe.PaymentMethod:
return subscriptions.tests.create_default_payment_method_for_customer(user_with_customer_id)
@pytest.fixture
def payment_method_saved(user_with_customer_id, payment_method_for_customer) -> stripe.PaymentMethod:
payment_method_for_customer['customer'] = user_with_customer_id.stripe_customer_id
payment_method_for_customer['card']['checks']['cvc_check'] = "pass"
return payment_method_for_customer
@pytest.fixture
def default_payment_method_saved(user_with_customer_id, default_payment_method_for_customer) -> stripe.PaymentMethod:
default_payment_method_for_customer['customer'] = user_with_customer_id.stripe_customer_id
default_payment_method_for_customer['card']['checks']['cvc_check'] = "pass"
return default_payment_method_for_customer
@pytest.fixture
def subscription(user_with_customer_id, default_payment_method_for_customer, stripe_price_id) -> stripe.Subscription:
return subscriptions.create_subscription(user_with_customer_id, stripe_price_id)
@pytest.fixture
def non_existing_payment_method_id() -> str:
return "pm_ABCDEFGH123456"
@pytest.fixture
def non_existing_subscription_id() -> str:
return "sub_ABCDEFGH123456"
@pytest.fixture(scope="session")
def subscribed_product_name() -> str:
return 'Gold'
@pytest.fixture(scope="session")
def stripe_subscription_product_id(stripe_subscription_product_url, subscribed_product_name) -> str:
products = stripe.Product.list(url=stripe_subscription_product_url, active=True, limit=1)
if products:
product = products['data'][0]
else:
product = stripe.Product.create(name=subscribed_product_name, url=stripe_subscription_product_url)
return product['id']
@pytest.fixture(scope="session")
def stripe_price_currency() -> str:
return "usd"
@pytest.fixture(scope="session")
def unsubscribed_product_name() -> str:
return 'Silver'
@pytest.fixture(scope="session")
def stripe_unsubscribed_product_id(unsubscribed_product_name, stripe_unsubscribed_product_url) -> str:
products = stripe.Product.list(url=stripe_unsubscribed_product_url, active=True, limit=1)
if products:
product = products['data'][0]
else:
product = stripe.Product.create(name=unsubscribed_product_name, url=stripe_unsubscribed_product_url)
return product['id']
@pytest.fixture(scope="session")
def stripe_price_id(stripe_subscription_product_id) -> str:
prices = stripe.Price.list(product=stripe_subscription_product_id, active=True, limit=1)
if prices:
price = prices.data[0]
else:
price = stripe.Price.create(
unit_amount=129,
currency="usd",
recurring={"interval": "month"},
product=stripe_subscription_product_id,
)
return price['id']
@pytest.fixture(scope="session")
def stripe_unsubscribed_price_id(stripe_unsubscribed_product_id) -> str:
prices = stripe.Price.list(product=stripe_unsubscribed_product_id, active=True, limit=1)
if prices:
price = prices.data[0]
else:
price = stripe.Price.create(
unit_amount=9999,
currency="usd",
recurring={"interval": "year"},
product=stripe_unsubscribed_product_id,
)
return price['id']
@pytest.fixture
def subscription_id(subscription):
return subscription['id']
@pytest.fixture
def subscription_current_period_end(subscription):
return subscription['current_period_end']
@pytest.fixture
def expected_subscription_prices(stripe_subscription_product_id, stripe_price_id, stripe_price_currency,
subscription_id, subscription_current_period_end) -> List:
return [
{'id': stripe_price_id,
'recurring': {
"aggregate_usage": None,
"interval": "month",
"interval_count": 1,
"trial_period_days": None,
"usage_type": "licensed",
},
'type': 'recurring',
'currency': stripe_price_currency,
'unit_amount': 129,
'unit_amount_decimal': '129',
'nickname': None,
'metadata': {},
'product': stripe_subscription_product_id,
'subscription_info': {'sub_id': subscription_id, 'cancel_at': None,
'current_period_end': subscription_current_period_end}}]
@pytest.fixture
def expected_subscription_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id,
stripe_price_currency) -> List:
return [
{'id': stripe_price_id,
'recurring': {
"aggregate_usage": None,
"interval": "month",
"interval_count": 1,
"trial_period_days": None,
"usage_type": "licensed",
},
'type': 'recurring',
'currency': stripe_price_currency,
'unit_amount': 129,
'unit_amount_decimal': '129',
'nickname': None,
'metadata': {},
'product': stripe_subscription_product_id,
'subscription_info': {'sub_id': None, 'cancel_at': None, 'current_period_end': None}}]
@pytest.fixture
def expected_subscription_products_and_prices(stripe_subscription_product_id, stripe_price_id,
subscribed_product_name, stripe_unsubscribed_product_id,
unsubscribed_product_name, stripe_unsubscribed_price_id,
stripe_subscription_product_url,
stripe_unsubscribed_product_url,
stripe_price_currency, subscription_id,
subscription_current_period_end) -> List:
return [
{'id': stripe_unsubscribed_product_id,
'images': [],
'metadata': {},
'name': unsubscribed_product_name,
'prices': [{'currency': stripe_price_currency,
'id': stripe_unsubscribed_price_id,
'metadata': {},
'nickname': None,
'recurring': {'aggregate_usage': None,
'interval': 'year',
'interval_count': 1,
'trial_period_days': None,
'usage_type': 'licensed'},
'subscription_info': {'cancel_at': None, 'current_period_end': None, 'sub_id': None},
'type': 'recurring',
'unit_amount': 9999,
'unit_amount_decimal': '9999'}],
'shippable': None,
'subscription_info': {'cancel_at': None, 'current_period_end': None, 'sub_id': None},
'type': 'service',
'unit_label': None,
'url': stripe_unsubscribed_product_url},
{'id': stripe_subscription_product_id,
'images': [],
'type': 'service',
'name': subscribed_product_name,
'shippable': None,
'unit_label': None,
'url': stripe_subscription_product_url,
'metadata': {},
'prices': [{'id': stripe_price_id,
'recurring': {
"aggregate_usage": None,
"interval": "month",
"interval_count": 1,
"trial_period_days": None,
"usage_type": "licensed"
},
'type': 'recurring',
'currency': stripe_price_currency,
'unit_amount': 129,
'unit_amount_decimal': '129',
'nickname': None,
'metadata': {},
'subscription_info': {'sub_id': subscription_id, 'current_period_end': subscription_current_period_end, 'cancel_at': None}}],
'subscription_info': {'sub_id': subscription_id, 'current_period_end': subscription_current_period_end, 'cancel_at': None}}
]
@pytest.fixture
def expected_subscription_products_and_prices_unsubscribed(stripe_subscription_product_id, stripe_price_id,
subscribed_product_name, stripe_unsubscribed_product_id,
unsubscribed_product_name, stripe_unsubscribed_price_id,
stripe_subscription_product_url,
stripe_unsubscribed_product_url,
stripe_price_currency) -> List:
return [
{'id': stripe_unsubscribed_product_id,
'images': [],
'metadata': {},
'name': unsubscribed_product_name,
'prices': [{'currency': stripe_price_currency,
'id': stripe_unsubscribed_price_id,
'metadata': {},
'nickname': None,
'recurring': {'aggregate_usage': None,
'interval': 'year',
'interval_count': 1,
'trial_period_days': None,
'usage_type': 'licensed'},
'subscription_info': {'cancel_at': None, 'current_period_end': None, 'sub_id': None},
'type': 'recurring',
'unit_amount': 9999,
'unit_amount_decimal': '9999'}],
'shippable': None,
'subscription_info': {'cancel_at': None, 'current_period_end': None, 'sub_id': None},
'type': 'service',
'unit_label': None,
'url': stripe_unsubscribed_product_url},
{'id': stripe_subscription_product_id,
'images': [],
'type': 'service',
'name': subscribed_product_name,
'shippable': None,
'unit_label': None,
'url': stripe_subscription_product_url,
'metadata': {},
'prices': [{'id': stripe_price_id,
'recurring': {
"aggregate_usage": None,
"interval": "month",
"interval_count": 1,
"trial_period_days": None,
"usage_type": "licensed"
},
'type': 'recurring',
'currency': stripe_price_currency,
'unit_amount': 129,
'unit_amount_decimal': '129',
'nickname': None,
'metadata': {},
'subscription_info': {'sub_id': None, 'current_period_end': None,'cancel_at': None}}],
'subscription_info': {'sub_id': None, 'current_period_end': None,'cancel_at': None}}
]
| 2.078125 | 2 |
main.py | xfgryujk/blivetts | 8 | 12791282 | <gh_stars>1-10
# -*- coding: utf-8 -*-
import asyncio
import pyttsx3
import translate
import blivedm.blivedm as blivedm
class BLiveTts(blivedm.BLiveClient):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# 翻译
self._translator = translate.Translator(from_lang='zh', to_lang='ja')
# TTS
self._tts = None
def start(self):
self._loop.run_in_executor(None, self._tts_loop)
return super().start()
def _tts_loop(self):
self._tts = pyttsx3.init()
# voice = self._tts.getProperty('voice')
# print('cur voice', voice)
# voices = self._tts.getProperty('voices')
# for voice in voices:
# print(voice)
self._tts.setProperty('voice', r'HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\TTS_MS_JA-JP_HARUKA_11.0')
self._tts.startLoop()
async def _on_receive_danmaku(self, danmaku: blivedm.DanmakuMessage):
self._say(danmaku.msg)
async def _on_super_chat(self, message: blivedm.SuperChatMessage):
self._say(message.message)
def _say(self, text):
self._loop.create_task(self._do_say(text))
async def _do_say(self, text):
# TODO 常用的加缓存?
translated_text = await self._loop.run_in_executor(None, self._translator.translate, text)
print(f'{text} - {translated_text}')
# TODO 加入队列
self._tts.say(translated_text)
async def main():
client = BLiveTts(213)
await client.start()
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(main())
| 2.375 | 2 |
checker.py | Stefania12/Router | 0 | 12791283 | <reponame>Stefania12/Router<gh_stars>0
#!/usr/bin/env python3
import argparse
import os
import shutil
import sys
import traceback
from scapy.sendrecv import sendp, sniff
import info
import tests
def capture(interface, output_file="test"):
cap = sniff(iface=interface, timeout=info.TIMEOUT)
# FIXME
packets = []
for i in range(len(cap)):
packets.append(cap[i])
return packets
def passive(host, testname):
iface = info.get("host_if_name", host)
packets = capture(iface)
test = tests.TESTS[testname]
if host == test.host_r:
fn = test.passive_fn
elif host == test.host_s:
fn = tests.sender_default
else:
fn = tests.check_nothing
try:
status = fn(testname, packets)
except AssertionError as e:
traceback.print_tb(e.__traceback__)
status = False
if (status):
print("PASS")
else:
print("FAIL")
def send_packets(packets, iface):
for packet in packets:
sendp(packet, iface=iface)
def active(host, testname):
test = tests.TESTS[testname]
iface = info.get("host_if_name", host)
packets = test.active_fn(testname)
send_packets(packets, iface)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--passive", action="store_true")
parser.add_argument("--active", action="store_true")
parser.add_argument("--testname", type=str)
# Technically we *could* determine this, but this is simpler
parser.add_argument("--host", type=int)
args = parser.parse_args()
assert(args.passive ^ args.active)
if args.passive:
passive(args.host, args.testname)
else:
active(args.host, args.testname)
if __name__ == "__main__":
main()
| 2.515625 | 3 |
commands/dataFileComplete/entry.py | tapnair/ImportAndShare | 0 | 12791284 | <reponame>tapnair/ImportAndShare
import csv
import json
import time
import adsk.core
from ... import config
from ...lib import fusion360utils as futil
app = adsk.core.Application.get()
ui = app.userInterface
NAME1 = 'Data_Handler'
NAME2 = "Custom Import Event"
NAME3 = "Custom Save Event"
NAME4 = "Custom Close Event"
# Local list of event handlers used to maintain a reference so
# they are not released and garbage collected.
local_handlers = []
my_data_handlers = []
my_custom_handlers = []
# Executed when add-in is run. Create custom events so we don't disrupt the main application loop.
def start():
app.unregisterCustomEvent(config.custom_event_id_import)
custom_event_import = app.registerCustomEvent(config.custom_event_id_import)
custom_event_handler_import = futil.add_handler(custom_event_import, handle_import, name=NAME2)
my_custom_handlers.append({
'custom_event_id': config.custom_event_id_import,
'custom_event': custom_event_import,
'custom_event_handler': custom_event_handler_import
})
app.unregisterCustomEvent(config.custom_event_id_save)
custom_event_save = app.registerCustomEvent(config.custom_event_id_save)
custom_event_handler_save = futil.add_handler(custom_event_save, handle_save, name=NAME3)
my_custom_handlers.append({
'custom_event_id': config.custom_event_id_save,
'custom_event': custom_event_save,
'custom_event_handler': custom_event_handler_save
})
app.unregisterCustomEvent(config.custom_event_id_close)
custom_event_close = app.registerCustomEvent(config.custom_event_id_close)
custom_event_handler_close = futil.add_handler(custom_event_close, handle_close, name=NAME4)
my_custom_handlers.append({
'custom_event_id': config.custom_event_id_close,
'custom_event': custom_event_close,
'custom_event_handler': custom_event_handler_close
})
# Create the event handler for when data files are complete.
my_data_handlers.append(
futil.add_handler(app.dataFileComplete, handle_data_file_complete, local_handlers=local_handlers,
name=NAME1))
futil.log(f'**********local_handlers added: {len(local_handlers)}')
futil.log(f'**********my_data_handlers added: {len(my_data_handlers)}')
# Executed when add-in is stopped. Remove events.
def stop():
futil.log(f'**********local_handlers stop: {len(local_handlers)}')
futil.log(f'**********my_data_handlers stop: {len(my_data_handlers)}')
for custom_item in my_custom_handlers:
custom_item['custom_event'].remove(custom_item['custom_event_handler'])
app.unregisterCustomEvent(custom_item['custom_event_id'])
for data_handler in my_data_handlers:
app.dataFileComplete.remove(data_handler)
# Import a document from the list
def handle_import(args: adsk.core.CustomEventArgs):
event_data = json.loads(args.additionalInfo)
file_name = event_data['file_name']
file_path = event_data['file_path']
futil.log(f'**********Importing: {file_name}')
# Execute the Fusion 360 import into a new document
import_manager = app.importManager
step_options = import_manager.createSTEPImportOptions(file_path)
new_document = import_manager.importToNewDocument(step_options)
# Keep track of imported files
config.imported_documents[file_name] = new_document
config.imported_filenames.append(file_name)
# Fire event to save the document
event_data = {
'file_name': file_name,
'file_path': file_path
}
additional_info = json.dumps(event_data)
app.fireCustomEvent(config.custom_event_id_save, additional_info)
# Save a specific Document
def handle_save(args: adsk.core.CustomEventArgs):
event_data = json.loads(args.additionalInfo)
file_name = event_data['file_name']
futil.log(f'**********Saving: {file_name}')
new_document = config.imported_documents[file_name]
new_document.saveAs(file_name, config.target_data_folder, 'Imported from script', 'tag')
# Close a specific document
def handle_close(args: adsk.core.CustomEventArgs):
event_data = json.loads(args.additionalInfo)
file_name = event_data['file_name']
futil.log(f'**********Closing: {file_name}')
new_document = config.imported_documents.pop(file_name, False)
if new_document:
new_document.close(False)
# Function to be executed by the dataFileComplete event.
def handle_data_file_complete(args: adsk.core.DataEventArgs):
futil.log(f'***In application_data_file_complete event handler for: {args.file.name}')
# Get the dataFile and process it
# data_file: adsk.core.DataFile = args.file
# process_data_file(data_file)
document: adsk.core.Document
for file_name, document in config.imported_documents.items():
if document.isValid:
if document.dataFile.isComplete:
process_data_file(document.dataFile)
# document.close(False)
def process_data_file(data_file: adsk.core.DataFile):
# Make sure we are processing a file imported from this script
if data_file.name in config.imported_filenames:
try:
# Create the public link for the data file
public_link = data_file.publicLink
futil.log(f"**********Created public link for {data_file.name}: {public_link}")
# Store the result of this file
config.results.append({
'Name': data_file.name,
'URN': data_file.versionId,
'Link': public_link
})
config.imported_filenames.remove(data_file.name)
# Fire close event for this Document
event_data = {
'file_name': data_file.name,
}
additional_info = json.dumps(event_data)
app.fireCustomEvent(config.custom_event_id_close, additional_info)
except:
futil.handle_error('process_data_file')
# If all documents have been processed finalize results
if len(config.imported_filenames) == 0:
if not config.run_finished:
config.run_finished = True
write_results()
else:
# futil.log(f"**********Already processed: {data_file.name}")
...
# After all files are processed write the results
def write_results():
futil.log(f"Writing CSV")
with open(config.csv_file_name, mode='w') as csv_file:
fieldnames = ['Name', 'URN', 'Link']
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for row in config.results:
writer.writerow(row)
| 1.84375 | 2 |
src/data/utils.py | HemuManju/integrated-gradients-weighted-ica | 0 | 12791285 | <reponame>HemuManju/integrated-gradients-weighted-ica
import collections
from pathlib import Path
import deepdish as dd
import pandas as pd
def nested_dict():
return collections.defaultdict(nested_dict)
def save_dataset(path, dataset, save):
"""save the dataset.
Parameters
----------
path : str
path to save.
dataset : dataset
pytorch dataset.
save : Bool
"""
save_path = Path(__file__).parents[2] / path
if save:
dd.io.save(save_path, dataset)
return None
def compress_dataset(path):
"""compress the dataset.
Parameters
----------
path : str
path to save.
dataset : dataset
pytorch dataset.
save : Bool
"""
dataset = dd.io.load(path)
# New name
file_name = path.split('.')
file_name[-2] = file_name[-2] + '_compressed.'
save_path = ''.join(file_name)
dd.io.save(save_path, dataset, compression=('blosc', 5))
return None
def save_dataframe(path, dataframe, save):
save_path = Path(__file__).parents[2] / path
if save:
dataframe.to_csv(save_path, index=False)
return None
def read_dataframe(path):
read_path = Path(__file__).parents[2] / path
df = pd.read_csv(read_path)
return df
def read_dataset(path):
"""Read the dataset.
Parameters
----------
path : str
path to save.
dataset : dataset
pytorch dataset.
save : Bool
"""
read_path = Path(__file__).parents[2] / path
data = dd.io.load(read_path)
return data
| 2.625 | 3 |
Exercicios/Extras/RainbowCircle.py | RicardoMart922/estudo_Python | 0 | 12791286 | <reponame>RicardoMart922/estudo_Python
import turtle
t = turtle.Turtle()
screen = turtle.Screen()
screen.bgcolor('black')
t.pensize(2)
t.speed(0)
while(True):
for i in range(6):
for colors in ['red', 'blue', 'magenta', 'green', 'yellow', 'white']:
t.color(colors)
t.circle(100)
t.left(10)
t.hideturtle | 3.96875 | 4 |
autokeras/constant.py | chosungsu/autokeras | 1 | 12791287 | from collections import namedtuple
GoogleDriveFile = namedtuple('GoogleDriveFile', ['google_drive_id', 'local_name'])
class Constant:
BACKEND = 'torch'
# Data
VALIDATION_SET_SIZE = 0.08333
CUTOUT_HOLES = 1
CUTOUT_RATIO = 0.5
# Searcher
MAX_MODEL_NUM = 1000
BETA = 2.576
KERNEL_LAMBDA = 1.0
T_MIN = 0.0001
N_NEIGHBOURS = 8
MAX_MODEL_SIZE = (1 << 25)
MAX_LAYER_WIDTH = 4096
MAX_LAYERS = 200
# Grid Dimensions
LENGTH_DIM = 0
WIDTH_DIM = 1
# Default Search Space
DEFAULT_LENGTH_SEARCH = [50, 75, 100]
DEFAULT_WIDTH_SEARCH = [64, 128, 256]
# Model Defaults
DENSE_DROPOUT_RATE = 0.5
CONV_DROPOUT_RATE = 0.25
MLP_DROPOUT_RATE = 0.25
CONV_BLOCK_DISTANCE = 2
DENSE_BLOCK_DISTANCE = 1
MODEL_LEN = 3
MLP_MODEL_LEN = 3
MLP_MODEL_WIDTH = 5
MODEL_WIDTH = 64
POOLING_KERNEL_SIZE = 2
# ModelTrainer
DATA_AUGMENTATION = True
MAX_ITER_NUM = 200
MIN_LOSS_DEC = 1e-4
MAX_NO_IMPROVEMENT_NUM = 5
MAX_BATCH_SIZE = 128
LIMIT_MEMORY = False
SEARCH_MAX_ITER = 200
# Text Classifier
BERT_TRAINER_EPOCHS = 4
BERT_TRAINER_BATCH_SIZE = 32
# text preprocessor
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 400
MAX_NB_WORDS = 5000
EXTRACT_PATH = "glove/"
STORE_PATH = ''
# Download file name
PRETRAINED_VOCAB_BERT_BASE_UNCASED = \
GoogleDriveFile(google_drive_id='1hlPkUSPeT5ZQBYZ1Z734BbnHIvpx2ZLj', local_name='vbbu.txt')
PRETRAINED_VOCAB_BERT_BASE_CASED = \
GoogleDriveFile(google_drive_id='1FLytUhOIF0mTfA4A9MtE3aQ1kJr96oTR', local_name='vbbc.txt')
PRETRAINED_MODEL_BERT_BASE_UNCASED = \
GoogleDriveFile(google_drive_id='1rp1rVBoQwqgvg-JE8JwLL-adgLE07oTG', local_name='mbbu.pth')
PRETRAINED_MODEL_BERT_BASE_CASED = \
GoogleDriveFile(google_drive_id='1YKoGj-e4zoyTabt5dYpgEPe-PAmjOTDV', local_name='mbbc.pth')
# Image Resize
MAX_IMAGE_SIZE = 128 * 128
# SYS Constant
SYS_LINUX = 'linux'
SYS_WINDOWS = 'windows'
SYS_GOOGLE_COLAB = 'goog_colab'
# Google drive downloader
CHUNK_SIZE = 32768
DOWNLOAD_URL = "https://docs.google.com/uc?export=download"
| 2.328125 | 2 |
dfvfs/vfs/file_entry.py | Defense-Cyber-Crime-Center/dfvfs | 2 | 12791288 | # -*- coding: utf-8 -*-
"""The Virtual File System (VFS) file entry object interface.
The file entry can be various file system elements like a regular file,
a directory or file system metadata.
"""
import abc
from dfvfs.resolver import resolver
class Directory(object):
"""Class that implements the VFS directory object interface."""
def __init__(self, file_system, path_spec):
"""Initializes the directory object.
Args:
file_system: the file system object (instance of vfs.FileSystem).
path_spec: the path specification object (instance of path.PathSpec).
"""
super(Directory, self).__init__()
self._entries = None
self._file_system = file_system
self.path_spec = path_spec
@abc.abstractmethod
def _EntriesGenerator(self):
"""Retrieves directory entries.
Since a directory can contain a vast number of entries using
a generator is more memory efficient.
Yields:
A path specification (instance of path.PathSpec).
"""
@property
def entries(self):
"""The entries (generator of instance of path.OSPathSpec)."""
for entry in self._EntriesGenerator():
yield entry
class FileEntry(object):
"""Class that implements the VFS file entry object interface."""
def __init__(
self, resolver_context, file_system, path_spec, is_root=False,
is_virtual=False):
"""Initializes the file entry object.
Args:
resolver_context: the resolver context (instance of resolver.Context).
file_system: the file system object (instance of vfs.FileSystem).
path_spec: the path specification object (instance of path.PathSpec).
is_root: optional boolean value to indicate if the file entry is
the root file entry of the corresponding file system.
The default is False.
is_virtual: optional boolean value to indicate if the file entry is
a virtual file entry emulated by the corresponding file
system. The default is False.
"""
super(FileEntry, self).__init__()
self._directory = None
self._file_system = file_system
self._is_root = is_root
self._is_virtual = is_virtual
self._resolver_context = resolver_context
self._stat_object = None
self.path_spec = path_spec
self._file_system.Open(path_spec=path_spec)
def __del__(self):
"""Cleans up the file entry object."""
self._file_system.Close()
self._file_system = None
@abc.abstractmethod
def _GetDirectory(self):
"""Retrieves the directory object (instance of vfs.Directory)."""
@abc.abstractmethod
def _GetStat(self):
"""Retrieves the stat object (instance of vfs.VFSStat)."""
@property
def link(self):
"""The full path of the linked file entry."""
return u''
@abc.abstractproperty
def name(self):
"""The name of the file entry, which does not include the full path."""
@property
def number_of_sub_file_entries(self):
"""The number of sub file entries."""
if self._directory is None:
self._directory = self._GetDirectory()
if self._directory is None:
return 0
# We cannot use len(self._directory.entries) since entries is a generator.
return sum(1 for path_spec in self._directory.entries)
@abc.abstractproperty
def sub_file_entries(self):
"""The sub file entries (generator of instance of vfs.FileEntry)."""
@property
def type_indicator(self):
"""The type indicator."""
type_indicator = getattr(self, u'TYPE_INDICATOR', None)
if type_indicator is None:
raise NotImplementedError(
u'Invalid file system missing type indicator.')
return type_indicator
def GetFileObject(self):
"""Retrieves the file-like object (instance of file_io.FileIO)."""
return resolver.Resolver.OpenFileObject(
self.path_spec, resolver_context=self._resolver_context)
def GetFileSystem(self):
"""Retrieves the file system (instance of vfs.FileSystem)."""
return self._file_system
def GetLinkedFileEntry(self):
"""Retrieves the linked file entry, e.g. for a symbolic link."""
return
@abc.abstractmethod
def GetParentFileEntry(self):
"""Retrieves the parent file entry."""
def GetSubFileEntryByName(self, name, case_sensitive=True):
"""Retrieves a sub file entry by name."""
name_lower = name.lower()
matching_sub_file_entry = None
for sub_file_entry in self.sub_file_entries:
if sub_file_entry.name == name:
return sub_file_entry
if not case_sensitive and sub_file_entry.name.lower() == name_lower:
if not matching_sub_file_entry:
matching_sub_file_entry = sub_file_entry
return matching_sub_file_entry
def GetStat(self):
"""Retrieves the stat object (instance of vfs.VFSStat)."""
if self._stat_object is None:
self._stat_object = self._GetStat()
return self._stat_object
def IsAllocated(self):
"""Determines if the file entry is allocated."""
if self._stat_object is None:
self._stat_object = self._GetStat()
return self._stat_object.is_allocated
def IsDevice(self):
"""Determines if the file entry is a device."""
if self._stat_object is None:
self._stat_object = self._GetStat()
return self._stat_object.type == self._stat_object.TYPE_DEVICE
def IsDirectory(self):
"""Determines if the file entry is a directory."""
if self._stat_object is None:
self._stat_object = self._GetStat()
return self._stat_object.type == self._stat_object.TYPE_DIRECTORY
def IsFile(self):
"""Determines if the file entry is a file."""
if self._stat_object is None:
self._stat_object = self._GetStat()
return self._stat_object.type == self._stat_object.TYPE_FILE
def IsLink(self):
"""Determines if the file entry is a link."""
if self._stat_object is None:
self._stat_object = self._GetStat()
return self._stat_object.type == self._stat_object.TYPE_LINK
def IsPipe(self):
"""Determines if the file entry is a pipe."""
if self._stat_object is None:
self._stat_object = self._GetStat()
return self._stat_object.type == self._stat_object.TYPE_PIPE
def IsRoot(self):
"""Determines if the file entry is the root file entry."""
return self._is_root
def IsSocket(self):
"""Determines if the file entry is a socket."""
if self._stat_object is None:
self._stat_object = self._GetStat()
return self._stat_object.type == self._stat_object.TYPE_SOCKET
def IsVirtual(self):
"""Determines if the file entry is virtual (emulated by dfVFS)."""
return self._is_virtual
| 3.15625 | 3 |
ca_qc_saguenay/__init__.py | tor-councilmatic/scrapers-ca | 2 | 12791289 | <filename>ca_qc_saguenay/__init__.py
from __future__ import unicode_literals
from utils import CanadianJurisdiction
class Saguenay(CanadianJurisdiction):
classification = 'legislature'
division_id = 'ocd-division/country:ca/csd:2494068'
division_name = 'Saguenay'
name = 'Conseil municipal de Saguenay'
url = 'http://ville.saguenay.ca'
| 1.523438 | 2 |
cellphonedb/utils/dataframe_functions.py | chapuzzo/cellphonedb | 278 | 12791290 | <gh_stars>100-1000
import pandas as pd
from cellphonedb.utils import dataframe_format
def dataframes_has_same_data(dataframe1: pd.DataFrame, dataframe2: pd.DataFrame,
round_decimals: bool = False) -> pd.DataFrame:
dataframe1 = dataframe1.copy(deep=True)
dataframe2 = dataframe2.copy(deep=True)
columns_names_1 = list(dataframe1.columns.values)
columns_names_1.sort()
dataframe1 = dataframe_format.bring_columns_to_end(columns_names_1, dataframe1)
columns_names_2 = list(dataframe2.columns.values)
columns_names_2.sort()
dataframe2 = dataframe_format.bring_columns_to_end(columns_names_2, dataframe2)
if not dataframe1.empty:
dataframe1 = dataframe1.sort_values(columns_names_1).reset_index(drop=True)
if round_decimals:
dataframe1 = dataframe1.round(5)
if not dataframe2.empty:
dataframe2 = dataframe2.sort_values(columns_names_2).reset_index(drop=True)
if round_decimals:
dataframe2 = dataframe2.round(5)
if dataframe1.empty and dataframe2.empty:
return pd.Series(dataframe1.columns.values).equals(pd.Series(dataframe2.columns.values))
return dataframe1.equals(dataframe2)
| 2.671875 | 3 |
tests/components/mqtt/test_camera.py | pcaston/Open-Peer-Power | 0 | 12791291 | <reponame>pcaston/Open-Peer-Power
"""The tests for mqtt camera component."""
import json
from unittest.mock import ANY
from openpeerpower.components import camera, mqtt
from openpeerpower.components.mqtt.discovery import async_start
from openpeerpower.setup import async_setup_component
from tests.common import (
MockConfigEntry,
async_fire_mqtt_message,
async_mock_mqtt_component,
mock_registry,
)
async def test_run_camera_setup(opp, aiohttp_client):
"""Test that it fetches the given payload."""
topic = "test/camera"
await async_mock_mqtt_component(opp)
await async_setup_component(
opp,
"camera",
{"camera": {"platform": "mqtt", "topic": topic, "name": "Test Camera"}},
)
url = opp.states.get("camera.test_camera").attributes["entity_picture"]
async_fire_mqtt_message(opp, topic, "beer")
client = await aiohttp_client(opp.http.app)
resp = await client.get(url)
assert resp.status == 200
body = await resp.text()
assert body == "beer"
async def test_unique_id(opp):
"""Test unique id option only creates one camera per unique_id."""
await async_mock_mqtt_component(opp)
await async_setup_component(
opp,
"camera",
{
"camera": [
{
"platform": "mqtt",
"name": "Test Camera 1",
"topic": "test-topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test Camera 2",
"topic": "test-topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
},
)
async_fire_mqtt_message(opp, "test-topic", "payload")
assert len(opp.states.async_all()) == 1
async def test_discovery_removal_camera(opp, mqtt_mock, caplog):
"""Test removal of discovered camera."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(opp, "openpeerpower", {}, entry)
data = '{ "name": "Beer",' ' "topic": "test_topic"}'
async_fire_mqtt_message(opp, "openpeerpower/camera/bla/config", data)
await opp.async_block_till_done()
state = opp.states.get("camera.beer")
assert state is not None
assert state.name == "Beer"
async_fire_mqtt_message(opp, "openpeerpower/camera/bla/config", "")
await opp.async_block_till_done()
state = opp.states.get("camera.beer")
assert state is None
async def test_discovery_update_camera(opp, mqtt_mock, caplog):
"""Test update of discovered camera."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(opp, "openpeerpower", {}, entry)
data1 = '{ "name": "Beer",' ' "topic": "test_topic"}'
data2 = '{ "name": "Milk",' ' "topic": "test_topic"}'
async_fire_mqtt_message(opp, "openpeerpower/camera/bla/config", data1)
await opp.async_block_till_done()
state = opp.states.get("camera.beer")
assert state is not None
assert state.name == "Beer"
async_fire_mqtt_message(opp, "openpeerpower/camera/bla/config", data2)
await opp.async_block_till_done()
state = opp.states.get("camera.beer")
assert state is not None
assert state.name == "Milk"
state = opp.states.get("camera.milk")
assert state is None
async def test_discovery_broken(opp, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
await async_start(opp, "openpeerpower", {}, entry)
data1 = '{ "name": "Beer" }'
data2 = '{ "name": "Milk",' ' "topic": "test_topic"}'
async_fire_mqtt_message(opp, "openpeerpower/camera/bla/config", data1)
await opp.async_block_till_done()
state = opp.states.get("camera.beer")
assert state is None
async_fire_mqtt_message(opp, "openpeerpower/camera/bla/config", data2)
await opp.async_block_till_done()
state = opp.states.get("camera.milk")
assert state is not None
assert state.name == "Milk"
state = opp.states.get("camera.beer")
assert state is None
async def test_entity_id_update(opp, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
registry = mock_registry(opp, {})
mock_mqtt = await async_mock_mqtt_component(opp)
assert await async_setup_component(
opp,
camera.DOMAIN,
{
camera.DOMAIN: [
{
"platform": "mqtt",
"name": "beer",
"topic": "test-topic",
"unique_id": "TOTALLY_UNIQUE",
}
]
},
)
state = opp.states.get("camera.beer")
assert state is not None
assert mock_mqtt.async_subscribe.call_count == 1
mock_mqtt.async_subscribe.assert_any_call("test-topic", ANY, 0, None)
mock_mqtt.async_subscribe.reset_mock()
registry.async_update_entity("camera.beer", new_entity_id="camera.milk")
await opp.async_block_till_done()
state = opp.states.get("camera.beer")
assert state is None
state = opp.states.get("camera.milk")
assert state is not None
assert mock_mqtt.async_subscribe.call_count == 1
mock_mqtt.async_subscribe.assert_any_call("test-topic", ANY, 0, None)
async def test_entity_device_info_with_identifier(opp, mqtt_mock):
"""Test MQTT camera device registry integration."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_opp(opp)
await async_start(opp, "openpeerpower", {}, entry)
registry = await opp.helpers.device_registry.async_get_registry()
data = json.dumps(
{
"platform": "mqtt",
"name": "<NAME>",
"topic": "test-topic",
"device": {
"identifiers": ["helloworld"],
"connections": [["mac", "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
"unique_id": "veryunique",
}
)
async_fire_mqtt_message(opp, "openpeerpower/camera/bla/config", data)
await opp.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.identifiers == {("mqtt", "helloworld")}
assert device.connections == {("mac", "02:5b:26:a8:dc:12")}
assert device.manufacturer == "Whatever"
assert device.name == "Beer"
assert device.model == "Glass"
assert device.sw_version == "0.1-beta"
async def test_entity_device_info_update(opp, mqtt_mock):
"""Test device registry update."""
entry = MockConfigEntry(domain=mqtt.DOMAIN)
entry.add_to_opp(opp)
await async_start(opp, "openpeerpower", {}, entry)
registry = await opp.helpers.device_registry.async_get_registry()
config = {
"platform": "mqtt",
"name": "<NAME>",
"topic": "test-topic",
"device": {
"identifiers": ["helloworld"],
"connections": [["mac", "02:5b:26:a8:dc:12"]],
"manufacturer": "Whatever",
"name": "Beer",
"model": "Glass",
"sw_version": "0.1-beta",
},
"unique_id": "veryunique",
}
data = json.dumps(config)
async_fire_mqtt_message(opp, "openpeerpower/camera/bla/config", data)
await opp.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.name == "Beer"
config["device"]["name"] = "Milk"
data = json.dumps(config)
async_fire_mqtt_message(opp, "openpeerpower/camera/bla/config", data)
await opp.async_block_till_done()
device = registry.async_get_device({("mqtt", "helloworld")}, set())
assert device is not None
assert device.name == "Milk"
| 2.34375 | 2 |
mil_common/ros_alarms/nodes/alarm_server.py | naveenmaan/mil | 0 | 12791292 | <reponame>naveenmaan/mil
#!/usr/bin/env python
import rospy
from ros_alarms import HandlerBase
from ros_alarms.msg import Alarm as AlarmMsg
from ros_alarms.srv import AlarmGet, AlarmSet
from ros_alarms import Alarm
import inspect
class AlarmServer(object):
def __init__(self):
# Maps alarm name to Alarm objects
self.alarms = {}
# Handler classes for overwriting default alarm functionality
self.handlers = {}
# Maps meta alarm names to predicate Handler functions
self.meta_alarms = {}
msg = "Expecting at most the following alarms: {}"
rospy.loginfo(msg.format(rospy.get_param("/known_alarms", [])))
self._alarm_pub = rospy.Publisher("/alarm/updates", AlarmMsg, latch=True, queue_size=100)
self._create_meta_alarms()
self._create_alarm_handlers()
# Outside interface to the alarm system. Usually you don't want to
# interface with these directly.
rospy.Service("/alarm/set", AlarmSet, self._on_set_alarm)
rospy.Service("/alarm/get", AlarmGet, self._on_get_alarm)
def set_alarm(self, alarm):
''' Sets or updates the alarm
Updating the alarm triggers all of the alarms callbacks
'''
if alarm.alarm_name in self.handlers:
res = self.handlers[alarm.alarm_name].on_set(alarm)
if res is False:
return False
if alarm.alarm_name in self.alarms:
self.alarms[alarm.alarm_name].update(alarm)
else:
self.alarms[alarm.alarm_name] = Alarm.from_msg(alarm)
if isinstance(alarm,Alarm):
alarm = alarm.as_msg()
self._alarm_pub.publish(alarm)
return True
def _on_set_alarm(self, srv):
self.set_alarm(srv.alarm)
return True
def _on_get_alarm(self, srv):
''' Either returns the alarm request if it exists or a blank alarm '''
rospy.logdebug("Got request for alarm: {}".format(srv.alarm_name))
return self.alarms.get(srv.alarm_name, Alarm.blank(srv.alarm_name)).as_srv_resp()
def make_tagged_alarm(self, name):
'''
Makes a blank alarm with the node_name of the alarm_server so that users know it is the
initial state
'''
alarm = Alarm.blank(name)
alarm.node_name = 'alarm_server'
return alarm
def _handle_meta_alarm(self, meta_alarm, sub_alarms):
'''
Calls the meta_predicate callback for an alarm handler when one of its metal alarms has changed.
Then, updates the status of the parent alarm, if nessesary.
'''
alarms = {name: alarm for name, alarm in self.alarms.items() if name in sub_alarms}
meta = self.alarms[meta_alarm]
# Check the predicate, this should return either an alarm object or a boolean for if should be raised
res = self.meta_alarms[meta_alarm](meta, alarms)
# If it an alarm instance send it out as is
if isinstance(res, Alarm):
alarm = res
alarm.alarm_name = meta_alarm # Ensure alarm name is correct
elif type(res) == bool:
# If it is a boolean, only update if it changes the raised status
raised_status = res
if raised_status == meta.raised:
return
alarm = meta.as_msg()
alarm.raised = bool(raised_status)
if alarm.raised: # If it is raised, set problem description
alarm.problem_description = 'Raised by meta alarm'
else:
rospy.logwarn('Meta alarm callback for {} failed to return an Alarm or boolean'.format(meta_alarm))
return
self.set_alarm(alarm)
def _create_alarm_handlers(self):
'''
Alarm handlers are classes imported by the alarm server and run code upon a change of state
of their respective alarms.
Handlers should be in a python module (directory with an __init__.py) and in the python path.
They will be loaded from the module specified with the ~handler_module param to the alarm server.
'''
# If the param exists, load it here
handler_module = rospy.get_param("~handler_module", None)
if handler_module is None:
return
# Give handlers access to alarm server
HandlerBase._init(self)
# Import the module where the handlers are stored
alarm_handlers = __import__(handler_module, fromlist=[""])
for handler in [cls for name, cls in inspect.getmembers(alarm_handlers)
if inspect.isclass(cls) and issubclass(cls, HandlerBase) and
hasattr(cls, "alarm_name") and name is not "HandlerBase"]:
# Have to instantiate so the class exists exists
h = handler()
alarm_name = handler.alarm_name
# Set initial state if necessary (could have already been added while creating metas)
if hasattr(h, 'initial_alarm'):
if alarm_name in self.alarms:
self.alarms[alarm_name].update(h.initial_alarm)
else:
self.alarms[alarm_name] = h.initial_alarm # Update even if already added to server
elif alarm_name not in self.alarms: # Add default initial if not there already
self.alarms[alarm_name] = self.make_tagged_alarm(alarm_name)
else:
pass
# If a handler exists for a meta alarm, we need to save the predicate
if alarm_name in self.meta_alarms:
self.meta_alarms[alarm_name] = h.meta_predicate
self.handlers[alarm_name] = h
rospy.loginfo("Loaded handler: {}".format(h.alarm_name))
def _create_meta_alarms(self, namespace="meta_alarms/"):
''' Adds meta alarms to the alarm server
Meta alarms are special in that they are not directly raised or cleared but are instead triggered
by a change of state of their child alarms.
The /meta_alarms parameter defines a the structure of a meta alarm. It has the following structure:
{meta_alarm_name : [list of child alarm names], ...}
Users can also provide more complex triggering mechanisms by providing an alarm handler class with
a 'meta_predicate' method.
'''
meta_alarms_dict = rospy.get_param(namespace, {})
for meta, alarms in meta_alarms_dict.iteritems():
# Add the meta alarm
if meta not in self.alarms:
self.alarms[meta] = self.make_tagged_alarm(meta)
def default(meta, alarms):
'''
If no predicate for a meta-alarm is provided, then the meta-alarm will be raised
if any of the child alarms are raised
'''
return any(alarms.items())
self.meta_alarms[meta] = default
def cb(alarm, meta_name=meta, sub_alarms=alarms):
return self._handle_meta_alarm(meta_name, sub_alarms)
for alarm in alarms:
if alarm not in self.alarms:
self.alarms[alarm] = self.make_tagged_alarm(alarm)
self.alarms[alarm].add_callback(cb)
if __name__ == "__main__":
rospy.init_node("alarm_server")
a = AlarmServer()
rospy.spin()
| 2.28125 | 2 |
triphecta/phenotype_compare.py | martinghunt/triphecta | 0 | 12791293 | class PhenotypeCompare:
def __init__(self, constraints, count_unknown_as_diff=True):
self.compare_functions = {
"equal": PhenotypeCompare._compare_method_equal,
"range": PhenotypeCompare._compare_method_range,
"abs_distance": PhenotypeCompare._compare_method_abs_distance,
"percent_distance": PhenotypeCompare._compare_method_percent_distance,
}
self.constraints = constraints
errors = self._sanity_check_constraints()
if len(errors):
raise RuntimeError("Errors in constraints:\n" + "\n".join(errors))
self.count_unknown_as_diff = count_unknown_as_diff
self.required_diff_keys = {
k for k in self.constraints if not self.constraints[k]["must_be_same"]
}
def _sanity_check_constraints(self):
errors = []
for d in self.constraints.values():
if d["method"] not in self.compare_functions:
errors.append(f"Unknown method {d}")
continue
if d["method"] == "equal":
if "params" not in d:
d["params"] = {}
elif len(d["params"]) > 0:
errors.append(f"method is 'equal', params supplied: {d}")
if d["method"] == "range" and (
"low" not in d["params"] or "high" not in d["params"]
):
errors.append(f"method is 'range', low and high not supplied: {d}")
if d["method"] == "abs_distance" and ("max_dist" not in d["params"]):
errors.append(f"method is 'abs_distance', max_dist not supplied: {d}")
if d["method"] == "percent_distance" and ("max_percent" not in d["params"]):
errors.append(
f"method is 'percent_distance', max_percent not supplied: {d}"
)
return errors
def satisfy_required_differences(self, pheno1, pheno2):
for key in self.required_diff_keys:
if PhenotypeCompare._phenos_equal_account_for_none(
pheno1[key],
pheno2[key],
self.compare_functions[self.constraints[key]["method"]],
False,
**self.constraints[key]["params"],
):
return False
return True
@staticmethod
def _compare_method_equal(p1, p2):
return p1 == p2
@staticmethod
def _compare_method_range(p1, p2, low=None, high=None):
return (low <= p1 <= high) == (low <= p2 <= high)
@staticmethod
def _compare_method_abs_distance(p1, p2, max_dist=None):
return abs(p1 - p2) <= max_dist
@staticmethod
def _compare_method_percent_distance(p1, p2, max_percent=None):
if p1 == p2 == 0:
return True
return 100 * abs(p1 - p2) / max(abs(p1), abs(p2)) <= max_percent
@classmethod
def _phenos_equal_account_for_none(
cls, p1, p2, compare_function, count_unknown_as_diff, **kwargs
):
if p1 is None or p2 is None:
return not count_unknown_as_diff
else:
return compare_function(p1, p2, **kwargs)
def phenos_agree_on_one_feature(self, pheno1, pheno2, key):
return PhenotypeCompare._phenos_equal_account_for_none(
pheno1[key],
pheno2[key],
self.compare_functions[self.constraints[key]["method"]],
self.count_unknown_as_diff,
**self.constraints[key]["params"],
)
def phenos_agree_on_features(self, pheno1, pheno2, keys):
for key in keys:
if not self.phenos_agree_on_one_feature(pheno1, pheno2, key):
return False
return True
def differences(self, pheno1, pheno2):
"""Returns number of differences between the two phenotypes.
Assumes that satisfy_required_differences(pheno1, pheno2) is True.
(Or at least doesn't care if it's True or False.)
Counts the differences only from he constraints where
'must_be_same' is True"""
differences = 0
for key, constraint in self.constraints.items():
if constraint["must_be_same"] is False:
continue
elif not self.phenos_agree_on_one_feature(pheno1, pheno2, key):
differences += 1
return differences
| 2.734375 | 3 |
lobster.py | khurtado/lobster | 1 | 12791294 | <gh_stars>1-10
#!/usr/bin/env python
from lobster.ui import boil
boil()
| 1.125 | 1 |
c2vqa-verbs/dataset/dataset-editable.py | andeeptoor/qar-qae | 0 | 12791295 | <reponame>andeeptoor/qar-qae
import pandas as pd
import os
import spacy
from spacy.symbols import VERB, NOUN
import random
from pattern.en import conjugate, PROGRESSIVE, INDICATIVE
from utils import read_json
from common import save_data
print "Loading feature extractors..."
nlp = spacy.load('en')
dataset_dir = '/sb-personal/cvqa/data/visual-genome/8-29-2016/source-data/'
output_dir = os.path.join('/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/')
dataset_output_file = output_dir + 'question_action_data-v2.csv'
editable_dataset_output_file = output_dir + 'editable_and_not_editable_actions_vg_expanded_dataset-v3.csv'
output_dir = '/sb-personal/cvqa/data/visual-genome/8-26-2017/generated-data/'
output_actions_file = output_dir + 'action_image_data-v2.csv'
actions_df = pd.read_csv(output_actions_file)
# print df
all_action_names = set(actions_df['action'].tolist())
exclude = ['basketball','baseball','with','wear', 'show','look','use','dress','build','help','soccer']
exclude += ['be','remove','get','frisbee','object','clear','separate','feed','tennis','building']
exclude += ['picture','position','remote','paint',"photograph","smile"]
exclude += ['wear', 'show','use','dress','build','tennis','basketball','golf','baseball','building']
exclude_actions = set(exclude)
all_action_names = all_action_names - exclude_actions
# print all_action_names
df = pd.read_csv(dataset_output_file)
editable_questions = []
i = 0
total = len(df)
for _,row in df.iterrows():
if i % 1000 == 0:
print "Question: [%d/%d]" % (i,total)
i += 1
# print row
image_file = row['image_file']
image_actions = actions_df[actions_df['image_file'] == image_file]['action'].unique().tolist()
image_actions.sort()
question = row['question']
doc = nlp(unicode(question))
question_action = row['original_question_action']
actions_not_in_image = list(all_action_names - set(image_actions))
replacement_action = random.choice(actions_not_in_image)
replacement_action_conjugated = conjugate(replacement_action, tense = "present", mood=INDICATIVE, aspect=PROGRESSIVE)
editable_question = ' '.join([replacement_action_conjugated if w == question_action else w for w in question.split()])
question_action_conjugated = conjugate(question_action, tense = "present", mood=INDICATIVE, aspect=PROGRESSIVE)
data = {}
data['image_file'] = image_file
data['original_question'] = question
data['question'] = editable_question
data['answer'] = 'edit to ' + question_action_conjugated
data['original_answer_tense'] = question_action
data['replacement_action'] = replacement_action_conjugated
data['relevant'] = 0
data['image_id'] = row['image_id']
data['qa_id'] = -1 * row['qa_id']
data['image_actions'] = ','.join(image_actions)
editable_questions.append(data)
noedit_data = {}
noedit_data['image_file'] = image_file
noedit_data['original_question'] = question
noedit_data['question'] = question
noedit_data['answer'] = 'no edit because ' + question_action_conjugated
noedit_data['original_answer_tense'] = question_action
noedit_data['replacement_action'] = question_action
noedit_data['relevant'] = 1
noedit_data['image_id'] = row['image_id']
noedit_data['qa_id'] = row['qa_id']
noedit_data['image_actions'] = data['image_actions']
editable_questions.append(noedit_data)
editable_df = save_data(editable_questions, editable_dataset_output_file)
# print editable_df
| 2.4375 | 2 |
mnist_sync_sharding_greedy/model/model.py | epikjjh/DIstributed-Deep-Learning | 1 | 12791296 | <gh_stars>1-10
import tensorflow as tf
import pickle
import numpy as np
import pandas as pd
class Model:
def __init__(self):
# Data: mnist dataset
with open('data/mnist.pkl', 'rb') as f:
train_set, _, test_set = pickle.load(f, encoding='latin1')
self.x_train, y_train = train_set
self.x_test, y_test = test_set
self.y_train = pd.get_dummies(y_train)
self.y_test = pd.get_dummies(y_test)
# CNN model
with tf.compat.v1.variable_scope("mnist", reuse=tf.compat.v1.AUTO_REUSE):
self.x = tf.compat.v1.placeholder(tf.float32, [None, 784])
self.x_image = tf.reshape(self.x, [-1,28,28,1])
self.y_ = tf.compat.v1.placeholder(tf.float32, [None, 10])
'''First Conv layer'''
# shape: [5,5,1,32]
self.w_conv1 = tf.compat.v1.get_variable("v0", shape=[5,5,1,32], dtype=tf.float32)
# shape: [32]
self.b_conv1 = tf.compat.v1.get_variable("v1", shape=[32], dtype=tf.float32)
# conv layer
self.conv1 = tf.nn.conv2d(self.x_image, self.w_conv1, strides=[1,1,1,1], padding='SAME')
# activation layer
self.h_conv1 = tf.nn.relu(self.conv1 + self.b_conv1)
self.h_pool1 = tf.nn.max_pool2d(self.h_conv1, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
'''Second Conv layer'''
# shape: [5,5,32,64]
self.w_conv2 = tf.compat.v1.get_variable("v2", shape=[5,5,32,64], dtype=tf.float32)
# shape: [64]
self.b_conv2 = tf.compat.v1.get_variable("v3", shape=[64], dtype=tf.float32)
# conv layer
self.conv2 = tf.nn.conv2d(self.h_pool1, self.w_conv2, strides=[1,1,1,1], padding='SAME')
# activation layer
self.h_conv2 = tf.nn.relu(self.conv2 + self.b_conv2)
self.h_pool2 = tf.nn.max_pool2d(self.h_conv2, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
'''Third Conv layer'''
# shape: [5,5,64,128]
self.w_conv3 = tf.compat.v1.get_variable("v4", shape=[5,5,64,128], dtype=tf.float32)
# shape: [128]
self.b_conv3 = tf.compat.v1.get_variable("v5", shape=[128], dtype=tf.float32)
# conv layer
self.conv3 = tf.nn.conv2d(self.h_pool2, self.w_conv3, strides=[1,1,1,1], padding='SAME')
# activation layer
self.h_conv3 = tf.nn.relu(self.conv3 + self.b_conv3)
self.h_pool3 = tf.nn.max_pool2d(self.h_conv3, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
'''Forth Conv layer'''
# shape: [5,5,128,256]
self.w_conv4 = tf.compat.v1.get_variable("v6", shape=[5,5,128,256], dtype=tf.float32)
# shape: [256]
self.b_conv4 = tf.compat.v1.get_variable("v7", shape=[256], dtype=tf.float32)
# conv layer
self.conv4 = tf.nn.conv2d(self.h_pool3, self.w_conv4, strides=[1,1,1,1], padding='SAME')
# activation layer
self.h_conv4 = tf.nn.relu(self.conv4 + self.b_conv4)
self.h_pool4 = tf.nn.max_pool2d(self.h_conv4, ksize=[1,2,2,1], strides=[1,2,2,1], padding='SAME')
'''FC layer1'''
self.w_fc1 = tf.compat.v1.get_variable("v8", shape=[2*2*256, 1024], dtype=tf.float32)
self.b_fc1 = tf.compat.v1.get_variable("v9", shape=[1024], dtype=tf.float32)
self.h_pool4_flat = tf.reshape(self.h_pool4, [-1, 2*2*256])
self.h_fc1 = tf.nn.relu(tf.matmul(self.h_pool4_flat, self.w_fc1) + self.b_fc1)
'''Dropout'''
self.keep_prob = tf.compat.v1.placeholder(tf.float32)
self.h_fc1_drop = tf.nn.dropout(self.h_fc1, rate=1.0-self.keep_prob)
'''FC layer2'''
self.w_fc2 = tf.compat.v1.get_variable("v10", shape=[1024, 512], dtype=tf.float32)
self.b_fc2 = tf.compat.v1.get_variable("v11", shape=[512], dtype=tf.float32)
self.h_fc2 = tf.matmul(self.h_fc1_drop, self.w_fc2) + self.b_fc2
'''Dropout'''
self.h_fc2_drop = tf.nn.dropout(self.h_fc2, rate=1.0-self.keep_prob)
'''Softmax layer'''
self.w_fc3 = tf.compat.v1.get_variable("v12", shape=[512, 10], dtype=tf.float32)
self.b_fc3 = tf.compat.v1.get_variable("v13", shape=[10], dtype=tf.float32)
self.logits = tf.matmul(self.h_fc2_drop, self.w_fc3) + self.b_fc3
self.y = tf.nn.softmax(self.logits)
'''Cost function & optimizer'''
self.loss = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.y_)
self.cost = tf.reduce_mean(self.loss)
self.optimizer = tf.compat.v1.train.AdamOptimizer(1e-4)
# Variables
self.var_bucket = tf.compat.v1.trainable_variables()
self.var_size = len(self.var_bucket)
self.var_shape = [var.shape for var in self.var_bucket]
# Gradients
self.grads = self.optimizer.compute_gradients(self.cost, self.var_bucket)
# For evaluating
self.prediction = tf.equal(tf.argmax(self.y,1), tf.argmax(self.y_, 1))
self.accuracy = tf.reduce_mean(tf.cast(self.prediction, tf.float32))
self.train_step = self.optimizer.minimize(self.cost)
# Create session
self.sess = tf.compat.v1.Session()
# Initialize variables
self.sess.run(tf.compat.v1.global_variables_initializer())
| 2.5625 | 3 |
tutorial/de.digits.mg/graph.py | matomatical/memograph | 13 | 12791297 | <filename>tutorial/de.digits.mg/graph.py
from mg.graph import Node
D = ['null','eins','zwei','drei','vier','fünf','sechs','sieben','acht','neun']
def graph():
for i, n in enumerate(D):
yield (
Node(i, speak_str=i, speak_voice="en"),
Node(n, speak_str=n, speak_voice="de"),
)
| 2.984375 | 3 |
tests/test_utils.py | audeering/audformat | 4 | 12791298 | <gh_stars>1-10
from io import StringIO
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import audeer
import audformat
from audformat import utils
from audformat import define
@pytest.mark.parametrize(
'objs, overwrite, expected',
[
# empty
(
[],
False,
pd.Series([], audformat.filewise_index(), dtype='object'),
),
(
[pd.Series([], audformat.filewise_index(), dtype='object')],
False,
pd.Series([], audformat.filewise_index(), dtype='object')
),
(
[pd.Series([], audformat.segmented_index(), dtype='object')],
False,
pd.Series([], audformat.segmented_index(), dtype='object')
),
(
[pd.DataFrame([], audformat.segmented_index(), dtype='object')],
False,
pd.DataFrame([], audformat.segmented_index(), dtype='object')
),
# combine series with same name
(
[
pd.Series([], audformat.filewise_index(), dtype=float),
pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])),
],
False,
pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])),
),
(
[
pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])),
pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])),
],
False,
pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])),
),
(
[
pd.Series([1.], audformat.filewise_index('f1')),
pd.Series([2.], audformat.filewise_index('f2')),
],
False,
pd.Series([1., 2.], audformat.filewise_index(['f1', 'f2'])),
),
(
[
pd.Series([1.], audformat.segmented_index('f1')),
pd.Series([2.], audformat.segmented_index('f2')),
],
False,
pd.Series([1., 2.], audformat.segmented_index(['f1', 'f2'])),
),
(
[
pd.Series([1.], audformat.filewise_index('f1')),
pd.Series([2.], audformat.segmented_index('f2')),
],
False,
pd.Series([1., 2.], audformat.segmented_index(['f1', 'f2'])),
),
# combine values in same location
(
[
pd.Series([np.nan], audformat.filewise_index('f1')),
pd.Series([np.nan], audformat.filewise_index('f1')),
],
False,
pd.Series([np.nan], audformat.filewise_index('f1')),
),
(
[
pd.Series([1.], audformat.filewise_index('f1')),
pd.Series([np.nan], audformat.filewise_index('f1')),
],
False,
pd.Series([1.], audformat.filewise_index('f1')),
),
(
[
pd.Series([1.], audformat.filewise_index('f1')),
pd.Series([1.], audformat.filewise_index('f1')),
],
False,
pd.Series([1.], audformat.filewise_index('f1')),
),
# combine series and overwrite values
(
[
pd.Series([1.], audformat.filewise_index('f1')),
pd.Series([np.nan], audformat.filewise_index('f1')),
],
True,
pd.Series([1.], audformat.filewise_index('f1')),
),
(
[
pd.Series([1.], audformat.filewise_index('f1')),
pd.Series([2.], audformat.filewise_index('f1')),
],
True,
pd.Series([2.], audformat.filewise_index('f1')),
),
# combine values with matching dtype
(
[
pd.Series(
[1, 2],
audformat.filewise_index(['f1', 'f2']),
dtype='int64',
),
pd.Series(
[1, 2],
audformat.filewise_index(['f1', 'f2']),
dtype='Int64',
),
],
False,
pd.Series(
[1, 2],
audformat.filewise_index(['f1', 'f2']),
dtype='Int64',
),
),
(
[
pd.Series(
[1., 2.],
audformat.filewise_index(['f1', 'f2']),
dtype='float32',
),
pd.Series(
[1., 2.],
audformat.filewise_index(['f1', 'f2']),
dtype='float64',
),
],
False,
pd.Series(
[1., 2.],
audformat.filewise_index(['f1', 'f2']),
dtype='float64',
),
),
(
[
pd.Series(
[1., 2.],
audformat.filewise_index(['f1', 'f2']),
dtype='float32',
),
pd.Series(
[1., 2.],
audformat.filewise_index(['f1', 'f2']),
dtype='float64',
),
],
False,
pd.Series(
[1., 2.],
audformat.filewise_index(['f1', 'f2']),
dtype='float64',
),
),
(
[
pd.Series(
['a', 'b', 'a'],
index=audformat.filewise_index(['f1', 'f2', 'f3']),
),
pd.Series(
['a', 'b', 'a'],
index=audformat.filewise_index(['f1', 'f2', 'f3']),
),
],
False,
pd.Series(
['a', 'b', 'a'],
index=audformat.filewise_index(['f1', 'f2', 'f3']),
)
),
(
[
pd.Series(
['a', 'b', 'a'],
index=audformat.filewise_index(['f1', 'f2', 'f3']),
dtype='category',
),
pd.Series(
['a', 'b', 'a'],
index=audformat.filewise_index(['f1', 'f2', 'f3']),
dtype='category',
),
],
False,
pd.Series(
['a', 'b', 'a'],
index=audformat.filewise_index(['f1', 'f2', 'f3']),
dtype='category',
)
),
# combine series with non-nullable dtype
(
[
pd.Series([1, 2], audformat.filewise_index(['f1', 'f2'])),
pd.Series([1, 2], audformat.filewise_index(['f1', 'f2'])),
],
False,
pd.Series(
[1, 2],
audformat.filewise_index(['f1', 'f2']),
dtype='Int64'
),
),
(
[
pd.Series(
True,
audformat.filewise_index('f1'),
dtype='bool',
),
pd.Series(
True,
audformat.filewise_index('f2'),
dtype='bool',
),
],
False,
pd.Series(
True,
audformat.filewise_index(['f1', 'f2']),
dtype='boolean',
),
),
(
[
pd.Series(
1,
audformat.filewise_index('f1'),
dtype='int64',
),
pd.Series(
2,
audformat.filewise_index('f2'),
dtype='int64',
),
],
False,
pd.Series(
[1, 2],
audformat.filewise_index(['f1', 'f2']),
dtype='Int64',
),
),
# combine series with different names
(
[
pd.Series([1.], audformat.filewise_index('f1'), name='c1'),
pd.Series([2.], audformat.filewise_index('f1'), name='c2'),
],
False,
pd.DataFrame(
{
'c1': [1.],
'c2': [2.],
},
audformat.filewise_index('f1'),
),
),
(
[
pd.Series([1.], audformat.filewise_index('f1'), name='c1'),
pd.Series([2.], audformat.filewise_index('f2'), name='c2'),
],
False,
pd.DataFrame(
{
'c1': [1., np.nan],
'c2': [np.nan, 2.],
},
audformat.filewise_index(['f1', 'f2']),
),
),
(
[
pd.Series(
[1., 2.],
audformat.filewise_index(['f1', 'f2']),
name='c1',
),
pd.Series(
[2.],
audformat.filewise_index('f2'),
name='c2',
),
],
False,
pd.DataFrame(
{
'c1': [1., 2.],
'c2': [np.nan, 2.],
},
audformat.filewise_index(['f1', 'f2']),
),
),
(
[
pd.Series(
[1.],
audformat.filewise_index('f1'),
name='c1'),
pd.Series(
[2.],
audformat.segmented_index('f1', 0, 1),
name='c2',
),
],
False,
pd.DataFrame(
{
'c1': [1., np.nan],
'c2': [np.nan, 2.],
},
audformat.segmented_index(
['f1', 'f1'],
[0, 0],
[None, 1],
),
),
),
# combine series and data frame
(
[
pd.Series(
[1., 2.],
audformat.filewise_index(['f1', 'f2']),
name='c',
),
pd.DataFrame(
{
'c': [2., 3.]
},
audformat.filewise_index(['f2', 'f3']),
),
],
False,
pd.DataFrame(
{
'c': [1., 2., 3.],
},
audformat.filewise_index(['f1', 'f2', 'f3']),
),
),
(
[
pd.Series(
[1., 2.],
audformat.filewise_index(['f1', 'f2']),
name='c1',
),
pd.Series(
['a', np.nan, 'd'],
audformat.filewise_index(['f1', 'f2', 'f4']),
name='c2',
),
pd.DataFrame(
{
'c1': [np.nan, 3.],
'c2': ['b', 'c'],
},
audformat.segmented_index(['f2', 'f3']),
),
],
False,
pd.DataFrame(
{
'c1': [1., 2., 3., np.nan],
'c2': ['a', 'b', 'c', 'd']
},
audformat.segmented_index(['f1', 'f2', 'f3', 'f4']),
),
),
# error: dtypes do not match
pytest.param(
[
pd.Series([1], audformat.filewise_index('f1')),
pd.Series([1.], audformat.filewise_index('f1')),
],
False,
None,
marks=pytest.mark.xfail(raises=ValueError),
),
pytest.param(
[
pd.Series(
[1, 2, 3],
index=audformat.filewise_index(['f1', 'f2', 'f3']),
),
pd.Series(
['a', 'b', 'a'],
index=audformat.filewise_index(['f1', 'f2', 'f3']),
dtype='category',
),
],
False,
None,
marks=pytest.mark.xfail(raises=ValueError),
),
pytest.param(
[
pd.Series(
['a', 'b', 'a'],
index=audformat.filewise_index(['f1', 'f2', 'f3']),
),
pd.Series(
['a', 'b', 'a'],
index=audformat.filewise_index(['f1', 'f2', 'f3']),
dtype='category',
),
],
False,
None,
marks=pytest.mark.xfail(raises=ValueError),
),
pytest.param(
[
pd.Series(
['a', 'b', 'a'],
index=audformat.filewise_index(['f1', 'f2', 'f3']),
dtype='category',
),
pd.Series(
['a', 'b', 'c'],
index=audformat.filewise_index(['f1', 'f2', 'f3']),
dtype='category',
),
],
False,
None,
marks=pytest.mark.xfail(raises=ValueError),
),
# error: values do not match
pytest.param(
[
pd.Series([1.], audformat.filewise_index('f1')),
pd.Series([2.], audformat.filewise_index('f1')),
],
False,
None,
marks=pytest.mark.xfail(raises=ValueError),
),
],
)
def test_concat(objs, overwrite, expected):
obj = utils.concat(objs, overwrite=overwrite)
if isinstance(obj, pd.Series):
pd.testing.assert_series_equal(obj, expected)
else:
pd.testing.assert_frame_equal(obj, expected)
@pytest.mark.parametrize(
'obj, expected_duration',
[
(
audformat.segmented_index(),
pd.Timedelta(0, unit='s'),
),
(
audformat.segmented_index(['f1'], [0], [2]),
pd.Timedelta(2, unit='s'),
),
(
audformat.segmented_index(['f1'], [0.1], [2]),
pd.Timedelta(1.9, unit='s'),
),
(
audformat.segmented_index(['f1', 'f2'], [0, 1], [2, 2]),
pd.Timedelta(3, unit='s'),
),
(
pd.Series(
index=audformat.segmented_index(['f1'], [1], [2]),
dtype='category',
),
pd.Timedelta(1, unit='s'),
),
(
pd.DataFrame(index=audformat.segmented_index(['f1'], [1], [2])),
pd.Timedelta(1, unit='s'),
),
# filewise index, but file is missing
pytest.param(
audformat.filewise_index(['f1']),
None,
marks=pytest.mark.xfail(raises=FileNotFoundError),
),
# segmented index with NaT, but file is missing
pytest.param(
audformat.segmented_index(['f1'], [0]),
None,
marks=pytest.mark.xfail(raises=FileNotFoundError),
),
]
)
def test_duration(obj, expected_duration):
duration = audformat.utils.duration(obj)
if pd.isnull(expected_duration):
assert pd.isnull(duration)
else:
assert duration == expected_duration
@pytest.mark.parametrize(
'index, root, expected',
[
(
audformat.filewise_index(),
None,
audformat.filewise_index(),
),
(
audformat.segmented_index(),
None,
audformat.segmented_index(),
),
(
audformat.filewise_index(['f1', 'f2']),
'.',
audformat.filewise_index(
[
audeer.safe_path('f1'),
audeer.safe_path('f2'),
]
),
),
(
audformat.filewise_index(['f1', 'f2']),
os.path.join('some', 'where'),
audformat.filewise_index(
[
audeer.safe_path(os.path.join('some', 'where', 'f1')),
audeer.safe_path(os.path.join('some', 'where', 'f2')),
]
),
),
(
audformat.filewise_index(['f1', 'f2']),
os.path.join('some', 'where') + os.path.sep,
audformat.filewise_index(
[
audeer.safe_path(os.path.join('some', 'where', 'f1')),
audeer.safe_path(os.path.join('some', 'where', 'f2')),
]
),
),
(
audformat.filewise_index(['f1', 'f2']),
audeer.safe_path(os.path.join('some', 'where')),
audformat.filewise_index(
[
audeer.safe_path(os.path.join('some', 'where', 'f1')),
audeer.safe_path(os.path.join('some', 'where', 'f2')),
]
),
),
(
audformat.filewise_index(
[
audeer.safe_path('f1'),
audeer.safe_path('f2'),
]
),
audeer.safe_path(os.path.join('some', 'where')),
audformat.filewise_index(
[
audeer.safe_path(os.path.join('some', 'where'))
+ os.path.sep
+ audeer.safe_path('f1'),
audeer.safe_path(os.path.join('some', 'where'))
+ os.path.sep
+ audeer.safe_path('f2'),
]
),
),
(
audformat.segmented_index(
['f1', 'f2'],
['1s', '3s'],
['2s', '4s'],
),
'.',
audformat.segmented_index(
[
audeer.safe_path('f1'),
audeer.safe_path('f2'),
],
['1s', '3s'],
['2s', '4s'],
),
)
]
)
def test_expand_file_path(tmpdir, index, root, expected):
expanded_index = audformat.utils.expand_file_path(index, root)
pd.testing.assert_index_equal(expanded_index, expected)
@pytest.mark.parametrize(
'obj, expected',
[
(
audformat.filewise_index(),
'0',
),
(
audformat.segmented_index(),
'0',
),
(
audformat.filewise_index(['f1', 'f2']),
'-4231615416436839963',
),
(
audformat.segmented_index(['f1', 'f2']),
'-2363261461673824215',
),
(
audformat.segmented_index(['f1', 'f2']),
'-2363261461673824215',
),
(
audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]),
'-3831446135233514455',
),
(
pd.Series([0, 1], audformat.filewise_index(['f1', 'f2'])),
'-8245754232361677810',
),
(
pd.DataFrame(
{'a': [0, 1], 'b': [2, 3]},
audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]),
),
'-103439349488189352',
),
]
)
def test_hash(obj, expected):
assert utils.hash(obj) == expected
assert utils.hash(obj[::-1]) == expected
@pytest.mark.parametrize(
'objs, expected',
[
(
[],
audformat.filewise_index(),
),
(
[
audformat.filewise_index(),
],
audformat.filewise_index(),
),
(
[
audformat.filewise_index(),
audformat.filewise_index(),
],
audformat.filewise_index(),
),
(
[
audformat.filewise_index(['f1', 'f2']),
audformat.filewise_index(['f1', 'f2']),
],
audformat.filewise_index(['f1', 'f2']),
),
(
[
audformat.filewise_index(['f1', 'f2']),
audformat.filewise_index(['f1', 'f2']),
audformat.filewise_index(['f2', 'f3']),
],
audformat.filewise_index('f2'),
),
(
[
audformat.filewise_index(['f1', 'f2']),
audformat.filewise_index(['f1', 'f2']),
audformat.filewise_index('f3'),
],
audformat.filewise_index(),
),
(
[
audformat.segmented_index(),
],
audformat.segmented_index(),
),
(
[
audformat.segmented_index(),
audformat.segmented_index(),
],
audformat.segmented_index(),
),
(
[
audformat.segmented_index(['f1', 'f2']),
audformat.segmented_index(['f1', 'f2']),
],
audformat.segmented_index(['f1', 'f2']),
),
(
[
audformat.segmented_index(['f1', 'f2']),
audformat.segmented_index(['f3', 'f4']),
],
audformat.segmented_index(),
),
(
[
audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]),
audformat.segmented_index(['f2', 'f1'], [0, 0], [1, 1]),
audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]),
],
audformat.segmented_index('f2', 0, 1),
),
(
[
audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]),
audformat.segmented_index(['f2', 'f1'], [0, 0], [1, 1]),
audformat.segmented_index(['f2', 'f3'], [1, 1], [2, 2]),
],
audformat.segmented_index(),
),
(
[
audformat.filewise_index(),
audformat.segmented_index(),
],
audformat.segmented_index(),
),
(
[
audformat.filewise_index(['f1', 'f2']),
audformat.segmented_index(),
],
audformat.segmented_index(),
),
(
[
audformat.filewise_index(),
audformat.segmented_index(['f1', 'f2']),
],
audformat.segmented_index(),
),
(
[
audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]),
audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]),
audformat.filewise_index(['f1', 'f2']),
],
audformat.segmented_index('f2', 0, 1),
),
(
[
audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]),
audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]),
audformat.filewise_index('f1'),
],
audformat.segmented_index(),
),
(
[
audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]),
audformat.filewise_index(['f1', 'f2']),
audformat.filewise_index(['f2', 'f3']),
],
audformat.segmented_index('f2', 0, 1),
),
]
)
def test_intersect(objs, expected):
pd.testing.assert_index_equal(
audformat.utils.intersect(objs),
expected,
)
@pytest.mark.parametrize(
'labels, expected',
[
(
[],
[],
),
(
(['a'], ['b']),
['a', 'b'],
),
(
(['a'], ['b', 'c']),
['a', 'b', 'c'],
),
(
(['a'], ['a']),
['a'],
),
(
[{'a': 0}],
{'a': 0},
),
(
[{'a': 0}, {'b': 1}],
{'a': 0, 'b': 1},
),
(
[{'a': 0}, {'b': 1, 'c': 2}],
{'a': 0, 'b': 1, 'c': 2},
),
(
[{'a': 0, 'b': 1}, {'b': 1, 'c': 2}],
{'a': 0, 'b': 1, 'c': 2},
),
(
[{'a': 0, 'b': 1}, {'b': 2, 'c': 2}],
{'a': 0, 'b': 2, 'c': 2},
),
(
[{'a': 0}, {'a': 1}, {'a': 2}],
{'a': 2},
),
pytest.param(
['a', 'b', 'c'],
[],
marks=pytest.mark.xfail(raises=ValueError),
),
pytest.param(
('a', 'b', 'c'),
[],
marks=pytest.mark.xfail(raises=ValueError),
),
pytest.param(
[{'a': 0, 'b': 1}, ['c']],
[],
marks=pytest.mark.xfail(raises=ValueError),
),
pytest.param(
[['a', 'b'], ['b', 'c'], 'd'],
[],
marks=pytest.mark.xfail(raises=ValueError),
),
pytest.param(
[{0: {'age': 20}}, {'0': {'age': 30}}],
[],
marks=pytest.mark.xfail(raises=ValueError),
),
]
)
def test_join_labels(labels, expected):
assert utils.join_labels(labels) == expected
def test_join_schemes():
# Empty list
audformat.utils.join_schemes([], 'scheme_id')
# One database
db1 = audformat.Database('db1')
scheme1 = audformat.Scheme(labels={'a': [1, 2]})
db1.schemes['scheme_id'] = scheme1
audformat.utils.join_schemes([db1], 'scheme_id')
assert db1.schemes['scheme_id'] == scheme1
# Two databases
db2 = audformat.Database('db2')
scheme2 = audformat.Scheme(labels={'b': [3]})
db2.schemes['scheme_id'] = scheme2
expected = audformat.Scheme(labels={'a': [1, 2], 'b': [3]})
audformat.utils.join_schemes([db1, db2], 'scheme_id')
assert db1.schemes['scheme_id'] == expected
assert db2.schemes['scheme_id'] == expected
# Three database
db3 = audformat.Database('db3')
scheme3 = audformat.Scheme(labels={'a': [4]})
db3.schemes['scheme_id'] = scheme3
expected = audformat.Scheme(labels={'a': [4], 'b': [3]})
audformat.utils.join_schemes([db1, db2, db3], 'scheme_id')
# Fail for schemes without labels
with pytest.raises(ValueError):
db = audformat.Database('db')
db.schemes['scheme_id'] = audformat.Scheme('str')
audformat.utils.join_schemes([db], 'scheme_id')
@pytest.mark.parametrize(
'language, expected',
[
('en', 'eng'),
('en', 'eng'),
('english', 'eng'),
('English', 'eng'),
pytest.param(
'xx', None,
marks=pytest.mark.xfail(raises=ValueError)
),
pytest.param(
'xxx', None,
marks=pytest.mark.xfail(raises=ValueError)
),
pytest.param(
'Bad language', None,
marks=pytest.mark.xfail(raises=ValueError)
)
]
)
def test_map_language(language, expected):
assert utils.map_language(language) == expected
@pytest.mark.parametrize('csv,result', [
(
StringIO('''file
f1
f2
f3'''),
pd.Index(
['f1', 'f2', 'f3'],
name='file',
),
),
(
StringIO('''file,value
f1,0.0
f2,1.0
f3,2.0'''),
pd.Series(
[0.0, 1.0, 2.0],
index=audformat.filewise_index(['f1', 'f2', 'f3']),
name='value',
),
),
(
StringIO('''file,value1,value2
f1,0.0,a
f2,1.0,b
f3,2.0,c'''),
pd.DataFrame(
{
'value1': [0.0, 1.0, 2.0],
'value2': ['a', 'b', 'c'],
},
index=audformat.filewise_index(['f1', 'f2', 'f3']),
columns=['value1', 'value2'],
),
),
(
StringIO('''file,start,value
f1,00:00:00,0.0
f1,00:00:01,1.0
f2,00:00:02,2.0'''),
pd.Series(
[0.0, 1.0, 2.0],
index=audformat.segmented_index(
['f1', 'f1', 'f2'],
starts=['0s', '1s', '2s'],
ends=pd.to_timedelta([pd.NaT, pd.NaT, pd.NaT]),
),
name='value',
),
),
(
StringIO('''file,end,value
f1,00:00:01,0.0
f1,00:00:02,1.0
f2,00:00:03,2.0'''),
pd.Series(
[0.0, 1.0, 2.0],
index=audformat.segmented_index(
['f1', 'f1', 'f2'],
starts=['0s', '0s', '0s'],
ends=['1s', '2s', '3s'],
),
name='value',
),
),
(
StringIO('''file,start,end
f1,00:00:00,00:00:01
f1,00:00:01,00:00:02
f2,00:00:02,00:00:03'''),
pd.MultiIndex.from_arrays(
[
['f1', 'f1', 'f2'],
pd.to_timedelta(['0s', '1s', '2s']),
pd.to_timedelta(['1s', '2s', '3s']),
],
names=['file', 'start', 'end'],
),
),
(
StringIO('''file,start,end,value
f1,00:00:00,00:00:01,0.0
f1,00:00:01,00:00:02,1.0
f2,00:00:02,00:00:03,2.0'''),
pd.Series(
[0.0, 1.0, 2.0],
index=audformat.segmented_index(
['f1', 'f1', 'f2'],
starts=['0s', '1s', '2s'],
ends=['1s', '2s', '3s'],
),
name='value',
),
),
(
StringIO('''file,start,end,value1,value2
f1,00:00:00,00:00:01,0.0,a
f1,00:00:01,00:00:02,1.0,b
f2,00:00:02,00:00:03,2.0,c'''),
pd.DataFrame(
{
'value1': [0.0, 1.0, 2.0],
'value2': ['a', 'b', 'c'],
},
index=audformat.segmented_index(
['f1', 'f1', 'f2'],
starts=['0s', '1s', '2s'],
ends=['1s', '2s', '3s'],
),
columns=['value1', 'value2'],
),
),
pytest.param(
StringIO('''value
0.0
1.0
2.0'''),
None,
marks=pytest.mark.xfail(raises=ValueError)
)
])
def test_read_csv(csv, result):
obj = audformat.utils.read_csv(csv)
if isinstance(result, pd.Index):
pd.testing.assert_index_equal(obj, result)
elif isinstance(result, pd.Series):
pd.testing.assert_series_equal(obj, result)
else:
pd.testing.assert_frame_equal(obj, result)
@pytest.mark.parametrize(
'index, extension, pattern, expected_index',
[
(
audformat.filewise_index(),
'mp3',
None,
audformat.filewise_index(),
),
(
audformat.segmented_index(),
'mp3',
None,
audformat.segmented_index(),
),
(
audformat.filewise_index(['f1.wav', 'f2.wav']),
'mp3',
None,
audformat.filewise_index(['f1.mp3', 'f2.mp3']),
),
(
audformat.segmented_index(['f1.wav', 'f2.wav']),
'mp3',
None,
audformat.segmented_index(['f1.mp3', 'f2.mp3']),
),
(
audformat.filewise_index(['f1.WAV', 'f2.WAV']),
'MP3',
None,
audformat.filewise_index(['f1.MP3', 'f2.MP3']),
),
(
audformat.filewise_index(['f1', 'f2.wv']),
'mp3',
None,
audformat.filewise_index(['f1', 'f2.mp3']),
),
(
audformat.filewise_index(['f1.wav', 'f2.wav']),
'',
None,
audformat.filewise_index(['f1', 'f2']),
),
(
audformat.filewise_index(['f1.ogg', 'f2.wav']),
'mp3',
'.ogg',
audformat.filewise_index(['f1.mp3', 'f2.wav']),
),
]
)
def test_replace_file_extension(index, extension, pattern, expected_index):
index = audformat.utils.replace_file_extension(
index,
extension,
pattern=pattern,
)
pd.testing.assert_index_equal(index, expected_index)
@pytest.mark.parametrize(
'obj, allow_nat, files_duration, root, expected',
[
# empty
(
audformat.filewise_index(),
True,
None,
None,
audformat.segmented_index(),
),
(
audformat.filewise_index(),
False,
None,
None,
audformat.segmented_index(),
),
(
audformat.segmented_index(),
True,
None,
None,
audformat.segmented_index(),
),
(
audformat.segmented_index(),
False,
None,
None,
audformat.segmented_index(),
),
# allow nat
(
audformat.filewise_index(pytest.DB.files[:2]),
True,
None,
None,
audformat.segmented_index(pytest.DB.files[:2]),
),
(
audformat.segmented_index(pytest.DB.files[:2]),
True,
None,
None,
audformat.segmented_index(pytest.DB.files[:2]),
),
(
audformat.segmented_index(
pytest.DB.files[:2],
[0.1, 0.5],
[0.2, pd.NaT],
),
True,
None,
None,
audformat.segmented_index(
pytest.DB.files[:2],
[0.1, 0.5],
[0.2, pd.NaT],
),
),
# forbid nat
(
audformat.filewise_index(pytest.DB.files[:2]),
False,
None,
pytest.DB_ROOT,
audformat.segmented_index(
pytest.DB.files[:2],
[0, 0],
[pytest.FILE_DUR, pytest.FILE_DUR]
),
),
(
audformat.segmented_index(pytest.DB.files[:2]),
False,
None,
pytest.DB_ROOT,
audformat.segmented_index(
pytest.DB.files[:2],
[0, 0],
[pytest.FILE_DUR, pytest.FILE_DUR]
),
),
(
audformat.segmented_index(
pytest.DB.files[:2],
[0.1, 0.5],
[0.2, pd.NaT],
),
False,
None,
pytest.DB_ROOT,
audformat.segmented_index(
pytest.DB.files[:2],
[0.1, 0.5],
[0.2, pytest.FILE_DUR],
),
),
# provide file durations
(
audformat.filewise_index(pytest.DB.files[:2]),
False,
{
os.path.join(pytest.DB_ROOT, pytest.DB.files[1]):
pytest.FILE_DUR * 2,
},
pytest.DB_ROOT,
audformat.segmented_index(
pytest.DB.files[:2],
[0.0, 0.0],
[pytest.FILE_DUR, pytest.FILE_DUR * 2],
),
),
(
audformat.segmented_index(
pytest.DB.files[:2],
[0.1, 0.5],
[pd.NaT, pd.NaT],
),
False,
{
os.path.join(pytest.DB_ROOT, pytest.DB.files[1]):
pytest.FILE_DUR * 2,
},
pytest.DB_ROOT,
audformat.segmented_index(
pytest.DB.files[:2],
[0.1, 0.5],
[pytest.FILE_DUR, pytest.FILE_DUR * 2],
),
),
# file not found
pytest.param(
audformat.filewise_index(pytest.DB.files[:2]),
False,
None,
None,
None,
marks=pytest.mark.xfail(raises=FileNotFoundError),
),
# series and frame
(
pd.Series(
[1, 2],
index=audformat.filewise_index(pytest.DB.files[:2]),
),
True,
None,
None,
audformat.segmented_index(pytest.DB.files[:2]),
),
(
pd.DataFrame(
{'int': [1, 2], 'str': ['a', 'b']},
index=audformat.filewise_index(pytest.DB.files[:2]),
),
True,
None,
None,
audformat.segmented_index(pytest.DB.files[:2]),
),
]
)
def test_to_segmented_index(obj, allow_nat, files_duration, root, expected):
result = audformat.utils.to_segmented_index(
obj,
allow_nat=allow_nat,
files_duration=files_duration,
root=root,
)
if not isinstance(result, pd.Index):
result = result.index
pd.testing.assert_index_equal(result, expected)
if files_duration and not allow_nat:
# for filewise tables we expect a duration for every file
# for segmented only where end == NaT
files = result.get_level_values(audformat.define.IndexField.FILE)
if audformat.index_type(obj) == audformat.define.IndexType.SEGMENTED:
mask = result.get_level_values(
audformat.define.IndexField.END
) == pd.NaT
files = files[mask]
for file in files:
file = os.path.join(root, file)
assert file in files_duration
@pytest.mark.parametrize(
'output_folder,table_id,expected_file_names',
[
pytest.param(
'.',
'segments',
None,
marks=pytest.mark.xfail(raises=ValueError)
),
pytest.param(
os.path.abspath(''),
'segments',
None,
marks=pytest.mark.xfail(raises=ValueError)
),
(
'tmp',
'segments',
[
str(i).zfill(3) + f'_{j}'
for i in range(1, 11)
for j in range(10)
]
),
(
'tmp',
'files',
[str(i).zfill(3) for i in range(1, 101)]
)
]
)
def test_to_filewise(output_folder, table_id, expected_file_names):
has_existed = os.path.exists(output_folder)
frame = utils.to_filewise_index(
obj=pytest.DB[table_id].get(),
root=pytest.DB_ROOT,
output_folder=output_folder,
num_workers=3,
)
assert audformat.index_type(frame) == define.IndexType.FILEWISE
pd.testing.assert_frame_equal(
pytest.DB[table_id].get().reset_index(drop=True),
frame.reset_index(drop=True),
)
files = frame.index.get_level_values(define.IndexField.FILE).values
if table_id == 'segmented': # already `framewise` frame is unprocessed
assert os.path.isabs(output_folder) == os.path.isabs(files[0])
if table_id == 'files':
# files of unprocessed frame are relative to `root`
files = [os.path.join(pytest.DB_ROOT, f) for f in files]
assert all(os.path.exists(f) for f in files)
file_names = [f.split(os.path.sep)[-1].rsplit('.', 1)[0] for f in files]
assert file_names == expected_file_names
# clean-up
if not has_existed: # output folder was created and can be removed
if os.path.exists(output_folder):
shutil.rmtree(output_folder)
else:
if table_id == 'segments':
for f in frame.index.get_level_values(
define.IndexField.FILE):
if os.path.exists(f):
os.remove(f)
@pytest.mark.parametrize(
'objs, expected',
[
(
[],
audformat.filewise_index(),
),
(
[
audformat.filewise_index(),
],
audformat.filewise_index(),
),
(
[
audformat.filewise_index(),
audformat.filewise_index(),
],
audformat.filewise_index(),
),
(
[
audformat.filewise_index(['f1', 'f2']),
audformat.filewise_index(['f1', 'f2']),
],
audformat.filewise_index(['f1', 'f2']),
),
(
[
audformat.filewise_index(['f1', 'f2']),
audformat.filewise_index(['f1', 'f2']),
audformat.filewise_index(['f2', 'f3']),
],
audformat.filewise_index(['f1', 'f2', 'f3']),
),
(
[
audformat.filewise_index(['f1', 'f2']),
audformat.filewise_index(['f1', 'f2']),
audformat.filewise_index('f3'),
],
audformat.filewise_index(['f1', 'f2', 'f3']),
),
(
[
audformat.segmented_index(),
],
audformat.segmented_index(),
),
(
[
audformat.segmented_index(),
audformat.segmented_index(),
],
audformat.segmented_index(),
),
(
[
audformat.segmented_index(['f1', 'f2']),
audformat.segmented_index(['f1', 'f2']),
],
audformat.segmented_index(['f1', 'f2']),
),
(
[
audformat.segmented_index(['f1', 'f2']),
audformat.segmented_index(['f3', 'f4']),
],
audformat.segmented_index(['f1', 'f2', 'f3', 'f4']),
),
(
[
audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]),
audformat.segmented_index(['f2', 'f1'], [0, 0], [1, 1]),
audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]),
],
audformat.segmented_index(
['f1', 'f2', 'f3'],
[0, 0, 0],
[1, 1, 1],
),
),
(
[
audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]),
audformat.segmented_index(['f2', 'f1'], [0, 0], [1, 1]),
audformat.segmented_index(['f2', 'f3'], [1, 1], [2, 2]),
],
audformat.segmented_index(
['f1', 'f2', 'f2', 'f3'],
[0, 0, 1, 1],
[1, 1, 2, 2],
),
),
(
[
audformat.filewise_index(),
audformat.segmented_index(),
],
audformat.segmented_index(),
),
(
[
audformat.filewise_index(['f1', 'f2']),
audformat.segmented_index(),
],
audformat.segmented_index(['f1', 'f2']),
),
(
[
audformat.filewise_index(),
audformat.segmented_index(['f1', 'f2']),
],
audformat.segmented_index(['f1', 'f2']),
),
(
[
audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]),
audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]),
audformat.filewise_index(['f1', 'f2']),
],
audformat.segmented_index(
['f1', 'f1', 'f2', 'f2', 'f3'],
[0, 0, 0, 0, 0],
[pd.NaT, 1, pd.NaT, 1, 1],
),
),
(
[
audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]),
audformat.segmented_index(['f2', 'f3'], [0, 0], [1, 1]),
audformat.filewise_index('f1'),
],
audformat.segmented_index(
['f1', 'f1', 'f2', 'f3'],
[0, 0, 0, 0],
[pd.NaT, 1, 1, 1],
),
),
(
[
audformat.segmented_index(['f1', 'f2'], [0, 0], [1, 1]),
audformat.filewise_index(['f1', 'f2']),
audformat.filewise_index(['f2', 'f3']),
],
audformat.segmented_index(
['f1', 'f1', 'f2', 'f2', 'f3'],
[0, 0, 0, 0, 0],
[pd.NaT, 1, pd.NaT, 1, pd.NaT],
),
),
]
)
def test_union(objs, expected):
pd.testing.assert_index_equal(
audformat.utils.union(objs),
expected,
)
| 2.046875 | 2 |
misago/threads/api/postingendpoint/attachments.py | HenryChenV/iJiangNan | 1 | 12791299 | from rest_framework import serializers
from django.utils.translation import ugettext as _
from django.utils.translation import ungettext
from misago.acl import add_acl
from misago.conf import settings
from misago.threads.serializers import AttachmentSerializer
from . import PostingEndpoint, PostingMiddleware
class AttachmentsMiddleware(PostingMiddleware):
def use_this_middleware(self):
return bool(self.user.acl_cache['max_attachment_size'])
def get_serializer(self):
return AttachmentsSerializer(
data=self.request.data,
context={
'mode': self.mode,
'user': self.user,
'post': self.post,
}
)
def save(self, serializer):
serializer.save()
class AttachmentsSerializer(serializers.Serializer):
attachments = serializers.ListField(child=serializers.IntegerField(), required=False)
def validate_attachments(self, ids):
self.update_attachments = False
self.removed_attachments = []
self.final_attachments = []
ids = list(set(ids))
validate_attachments_count(ids)
attachments = self.get_initial_attachments(
self.context['mode'], self.context['user'], self.context['post']
)
new_attachments = self.get_new_attachments(self.context['user'], ids)
if not attachments and not new_attachments:
return [] # no attachments
# clean existing attachments
for attachment in attachments:
if attachment.pk in ids:
self.final_attachments.append(attachment)
else:
if attachment.acl['can_delete']:
self.update_attachments = True
self.removed_attachments.append(attachment)
else:
message = _(
"You don't have permission to remove \"%(attachment)s\" attachment."
)
raise serializers.ValidationError(
message % {'attachment': attachment.filename}
)
if new_attachments:
self.update_attachments = True
self.final_attachments += new_attachments
self.final_attachments.sort(key=lambda a: a.pk, reverse=True)
def get_initial_attachments(self, mode, user, post):
attachments = []
if mode == PostingEndpoint.EDIT:
queryset = post.attachment_set.select_related('filetype')
attachments = list(queryset)
add_acl(user, attachments)
return attachments
def get_new_attachments(self, user, ids):
if not ids:
return []
queryset = user.attachment_set.select_related('filetype').filter(
post__isnull=True,
id__in=ids,
)
return list(queryset)
def save(self):
if not self.update_attachments:
return
if self.removed_attachments:
for attachment in self.removed_attachments:
attachment.delete_files()
self.context['post'].attachment_set.filter(
id__in=[a.id for a in self.removed_attachments]
).delete()
if self.final_attachments:
# sort final attachments by id, descending
self.final_attachments.sort(key=lambda a: a.pk, reverse=True)
self.context['user'].attachment_set.filter(
id__in=[a.id for a in self.final_attachments]
).update(post=self.context['post'])
self.sync_attachments_cache(self.context['post'], self.final_attachments)
def sync_attachments_cache(self, post, attachments):
if attachments:
post.attachments_cache = AttachmentSerializer(attachments, many=True).data
for attachment in post.attachments_cache:
del attachment['acl']
del attachment['post']
del attachment['uploader_ip']
else:
post.attachments_cache = None
post.update_fields.append('attachments_cache')
def validate_attachments_count(data):
total_attachments = len(data)
if total_attachments > settings.MISAGO_POST_ATTACHMENTS_LIMIT:
message = ungettext(
"You can't attach more than %(limit_value)s file to single post (added %(show_value)s).",
"You can't attach more than %(limit_value)s flies to single post (added %(show_value)s).",
settings.MISAGO_POST_ATTACHMENTS_LIMIT,
)
raise serializers.ValidationError(
message % {
'limit_value': settings.MISAGO_POST_ATTACHMENTS_LIMIT,
'show_value': total_attachments,
}
)
| 1.90625 | 2 |
vida/vida/migrations/0017_form_color.py | smesdaghi/vida | 1 | 12791300 | <filename>vida/vida/migrations/0017_form_color.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('vida', '0016_auto_20160203_1355'),
]
operations = [
migrations.AddField(
model_name='form',
name='color',
field=models.CharField(blank=True, max_length=10, null=True, choices=[(b'#001F3F', b'Navy'), (b'#0074D9', b'Blue'), (b'#7FDBFF', b'Aqua'), (b'#39CCCC', b'Teal'), (b'#3D9970', b'Olive'), (b'#2ECC40', b'Green'), (b'#01FF70', b'Lime'), (b'#FFDC00', b'Yellow'), (b'#FF851B', b'Orange'), (b'#FF4136', b'Red'), (b'#F012BE', b'Fuchsia'), (b'#B10DC9', b'Purple'), (b'#85144B', b'Maroon'), (b'#FFFFFF', b'White'), (b'#DDDDDD', b'Silver'), (b'#AAAAAA', b'Gray'), (b'#111111', b'Black')]),
),
]
| 1.835938 | 2 |
tests/test_isosurface.py | TormodLandet/Ocellaris | 1 | 12791301 | # Copyright (C) 2017-2019 <NAME>
# SPDX-License-Identifier: Apache-2.0
import dolfin
import numpy
from ocellaris import Simulation, setup_simulation
import pytest
from helpers import skip_in_parallel
ISO_INPUT = """
ocellaris:
type: input
version: 1.0
mesh:
type: Rectangle
Nx: 4
Ny: 4
probes:
- name: free_surface
enabled: yes
type: IsoSurface
value: 0.5
field: c
custom_hook: MultiPhaseModelUpdated
multiphase_solver:
type: BlendedAlgebraicVOF
function_space_colour: DG
polynomial_degree_colour: 0
solver: {type: AnalyticalSolution}
boundary_conditions: [{'name': 'all', 'selector': 'code', 'inside_code': 'on_boundary'}]
physical_properties: {nu0: 1.0, nu1: 1, rho0: 1, rho1: 1}
output: {log_enabled: no}
"""
@pytest.mark.parametrize("degree", [0, 1, 2])
def test_isoline_horizontal(degree):
sim = Simulation()
sim.input.read_yaml(yaml_string=ISO_INPUT)
sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree)
setup_simulation(sim)
probe = sim.probes['free_surface']
# Initial value with sharp interface at x[1] == 0.5
Vc = sim.data['Vc']
c = sim.data['c']
dm = Vc.dofmap()
arr = c.vector().get_local()
for cell in dolfin.cells(sim.data['mesh']):
cell_value = 1 if cell.midpoint().y() < 0.5 else 0
for dof in dm.cell_dofs(cell.index()):
arr[dof] = cell_value
c.vector().set_local(arr)
c.vector().apply('insert')
lines = probe.run(force_active=True)
print('\nDegree:', degree, 'Vcdim:', Vc.dim())
print(probe.name, probe.field_name, probe.value)
print(len(lines))
if sim.ncpu > 1:
raise pytest.skip()
for x, y in lines:
print('x', x, '\ny', y)
assert all(abs(y - 0.5) < 1e-12)
# Results should be in sorted order
xdx = numpy.diff(x)
assert all(xdx > 0) or all(xdx < 0)
assert len(lines) == 1
@pytest.mark.parametrize("degree", [1])
def test_isoline_circle(degree):
sim = Simulation()
sim.input.read_yaml(yaml_string=ISO_INPUT)
sim.input.set_value('multiphase_solver/polynomial_degree_colour', degree)
sim.input.set_value('mesh/Nx', 10)
sim.input.set_value('mesh/Ny', 10)
sim.input.set_value(
'initial_conditions/cp/cpp_code', '1.1*pow(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2), 0.5)'
)
setup_simulation(sim)
sim.data['c'].assign(sim.data['cp'])
probe = sim.probes['free_surface']
lines = probe.run(force_active=True)
if False:
from matplotlib import pyplot
c = dolfin.plot(sim.data['c'])
pyplot.colorbar(c)
for x, y in lines:
pyplot.plot(x, y)
pyplot.savefig('test_isoline_circle_%d.png' % degree)
pyplot.close()
print(probe.name, probe.field_name, probe.value)
print(len(lines))
for x, y in lines:
# Check that the radius is constant
r = ((x - 0.5) ** 2 + (y - 0.5) ** 2) ** 0.5
print('x', x)
print('y', y)
print('dr', r - 0.5 / 1.1)
assert all(abs(r - 0.5 / 1.1) < 5e-3)
# Check that the line is clockwise or counter clockwise
# for all segments, no going back and forth
theta = numpy.arctan2(y - 0.5, x - 0.5) * 180 / numpy.pi
theta[theta < 0] += 360
tdt = numpy.diff(theta)
tdt2 = tdt[abs(tdt) < 340]
print('dt', tdt)
assert all(tdt2 > 0) or all(tdt2 < 0)
if sim.ncpu == 1:
# The iso surface code is not written for full parallel support
assert len(lines) == 1
assert x[0] == x[-1] and y[0] == y[-1], "The loop should be closed"
| 2 | 2 |
DifferentialExpression/05_Volcano_Plots.py | LewisLabUCSD/CHOSecretoryKO | 1 | 12791302 | <filename>DifferentialExpression/05_Volcano_Plots.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
csv_files = os.listdir(os.getcwd())
csv_files = [f for f in csv_files if "Line" in f and ".csv" in f]
# Function to determine significance
def isSignificant(xval,yval, xthr = 1, ythr = 2):
if abs(xval) >= xthr and abs(yval) >= ythr:
return True
else:
return False
# Read Entrez -> Name map
entrezToName = pd.read_csv("EntrezToNameMap.csv", header=0)
for csv_file in csv_files:
print("Processing file {}".format(csv_file))
df = pd.read_csv(csv_file, header=0)
df = df.rename(columns={"Unnamed: 0":"gename"})
x = df['log2FoldChange'].values
y = df['padj'].values + 1e-5
y = -np.log10(y)
significant_idx = [i for i in range(len(x)) if isSignificant(x[i],y[i])]
nonsignificant_idx = [i for i in range(len(x)) if not isSignificant(x[i],y[i])]
# Plot Volcano Plot
plt.figure(figsize=(8,8))
plt.scatter(x[significant_idx], y[significant_idx], c='red', alpha=0.35, label='Significant')
plt.scatter(x[nonsignificant_idx], y[nonsignificant_idx], c='blue', alpha=0.35, label='Nonsignificant')
plt.vlines(-1, 0, 5, linestyles='dashed')
plt.vlines(1, 0, 5, linestyles='dashed')
plt.hlines(2, min(x), max(x), linestyles='dashed')
plt.xlabel('Log2 Fold Change')
plt.ylabel('-log10 (adjusted p-value)')
plt.legend()
plt.savefig(csv_file.replace(".csv","_volcanoPlot.pdf"))
# Save names of significant differentially expressed genes
tmp_df = df.iloc[significant_idx,:].reset_index(drop=True)
final_df = pd.merge(entrezToName, tmp_df, on="gename")
final_df['keggGeneName'] = ["cge:" + str(id) for id in list(final_df['geneid'])] # Required for pathway analysis with ROntoTools
final_df.to_csv(csv_file.replace(".csv","_SignificantGenes.csv"), index=False) | 2.796875 | 3 |
S4/S4 Library/simulation/gsi_handlers/club_handlers.py | NeonOcean/Environment | 1 | 12791303 | from clubs.club_enums import ClubHangoutSetting
from sims4.gsi.dispatcher import GsiHandler
from sims4.gsi.schema import GsiGridSchema, GsiFieldVisualizers
import services
import sims4.resources
club_schema = GsiGridSchema(label='Club Info')
club_schema.add_field('name', label='Name', type=GsiFieldVisualizers.STRING)
club_schema.add_field('club_id', label='Club ID', type=GsiFieldVisualizers.STRING, unique_field=True)
club_schema.add_field('hangout', label='Hangout Location', type=GsiFieldVisualizers.STRING)
club_schema.add_field('associated_color', label='Associated Color', type=GsiFieldVisualizers.STRING)
club_schema.add_field('uniform_male_child', label='Male Child Uniform', type=GsiFieldVisualizers.STRING)
club_schema.add_field('uniform_female_child', label='Female Child Uniform', type=GsiFieldVisualizers.STRING)
club_schema.add_field('uniform_male_adult', label='Male Adult Uniform', type=GsiFieldVisualizers.STRING)
club_schema.add_field('uniform_female_adult', label='Female Child Uniform', type=GsiFieldVisualizers.STRING)
def generate_all_club_seeds():
instance_manager = services.get_instance_manager(sims4.resources.Types.CLUB_SEED)
if instance_manager.all_instances_loaded:
return [cls.__name__ for cls in instance_manager.types.values()]
return []
def add_club(manager):
with club_schema.add_view_cheat('clubs.create_club_from_seed', label='Create Club') as cheat:
cheat.add_token_param('club_seed', dynamic_token_fn=generate_all_club_seeds)
services.get_instance_manager(sims4.resources.Types.CLUB_SEED).add_on_load_complete(add_club)
with club_schema.add_view_cheat('clubs.remove_club_by_id', label='Remove Club') as cheat:
cheat.add_token_param('club_id')
with club_schema.add_view_cheat('clubs.remove_sim_from_club_by_id', label='Remove Sim From Club') as cheat:
cheat.add_token_param('sim_id')
cheat.add_token_param('club_id')
with club_schema.add_view_cheat('clubs.end_gathering_by_club_id', label='End Club Gathering') as cheat:
cheat.add_token_param('club_id')
with club_schema.add_view_cheat('clubs.start_gathering_by_club_id', label='Start Gathering') as cheat:
cheat.add_token_param('club_id')
with club_schema.add_view_cheat('clubs.refresh_safe_seed_data_for_club', label='Refresh Safe Data') as cheat:
cheat.add_token_param('club_id')
def get_buck_amounts():
return (1, 10, 100, 1000)
with club_schema.add_view_cheat('bucks.update_bucks_by_amount', label='Add Club Bucks') as cheat:
cheat.add_static_param('ClubBucks')
cheat.add_token_param('amount', dynamic_token_fn=get_buck_amounts)
cheat.add_token_param('club_id')
with club_schema.add_has_many('club_members', GsiGridSchema, label='Club Members') as sub_schema:
sub_schema.add_field('sim_id', label='Sim ID', width=0.35)
sub_schema.add_field('sim_name', label='Sim Name', width=0.4)
sub_schema.add_field('is_leader', label='Is Leader')
with club_schema.add_has_many('club_recent_members', GsiGridSchema, label='Recent Members') as sub_schema:
sub_schema.add_field('sim_id', label='Sim ID', width=0.35)
sub_schema.add_field('sim_name', label='Sim Name', width=0.4)
with club_schema.add_has_many('club_rules', GsiGridSchema, label='Club Rules') as sub_schema:
sub_schema.add_field('rule', label='Rule')
with club_schema.add_has_many('membership_criteria', GsiGridSchema, label='Membership Criteria') as sub_schema:
sub_schema.add_field('criteria', label='Criteria')
@GsiHandler('club_info', club_schema)
def generate_club_info_data():
club_service = services.get_club_service()
if club_service is None:
return
sim_info_manager = services.sim_info_manager()
club_info = []
for club in club_service.clubs:
if club.hangout_setting == ClubHangoutSetting.HANGOUT_VENUE:
club_hangout_str = 'Venue: {}'.format(str(club.hangout_venue))
elif club.hangout_setting == ClubHangoutSetting.HANGOUT_LOT:
club_hangout_str = 'Zone: {}'.format(club.hangout_zone_id)
else:
club_hangout_str = 'None'
entry = {'name': str(club), 'club_id': str(club.club_id), 'hangout': club_hangout_str, 'associated_color': str(club.associated_color) if club.associated_color else 'None', 'uniform_male_child': str(bool(club.uniform_male_child)), 'uniform_female_child': str(bool(club.uniform_female_child)), 'uniform_male_adult': str(bool(club.uniform_male_adult)), 'uniform_female_adult': str(bool(club.uniform_female_adult))}
members_info = []
entry['club_members'] = members_info
for sim in club.members:
group_members_entry = {'sim_id': str(sim.id), 'sim_name': sim.full_name, 'is_leader': str(sim is club.leader)}
members_info.append(group_members_entry)
entry['club_recent_members'] = [{'sim_id': str(sim_id), 'sim_name': str(sim_info_manager.get(sim_id))} for sim_id in club._recent_member_ids]
rules_info = []
entry['club_rules'] = rules_info
if club.rules:
for rule in club.rules:
rules_entry = {'rule': str(rule)}
rules_info.append(rules_entry)
criteria_info = []
entry['membership_criteria'] = criteria_info
if club.membership_criteria:
for criteria in club.membership_criteria:
criteria_entry = {'criteria': str(criteria)}
criteria_info.append(criteria_entry)
club_info.append(entry)
return club_info
| 2.03125 | 2 |
Connector/rpcutils/error.py | bridgedragon/NodeChain | 0 | 12791304 | <gh_stars>0
#!/usr/bin/python
from .constants import *
from json import JSONEncoder
from httputils import error
class RpcError(Exception):
def __init__(self, id, message, code):
self._message = message
self._code = code
self._id = id
super().__init__(self.message)
@property
def code(self):
return self._code
@code.setter
def code(self, value):
self._code = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def message(self):
return self._message
@message.setter
def message(self, value):
self._message = value
def parseToHttpError(self):
return error.Error(
message=self.message,
code=self.code
)
def jsonEncode(self):
return RpcErrorEncoder().encode(self)
class RpcBadRequestError(RpcError):
def __init__(self, id, message):
super().__init__(
id=id,
message=message,
code=BAD_REQUEST_CODE
)
def parseToHttpError(self):
return error.BadRequestError(message=self.message)
class RpcMethodNotAllowedError(RpcError):
def __init__(self, id, message):
super().__init__(
id=id,
message=message,
code=METHOD_NOT_ALLOWED_CODE
)
def parseToHttpError(self):
return error.MethodNotAllowedError(message=self.message)
class RpcInternalServerError(RpcError):
def __init__(self, id, message):
super().__init__(
id=id,
message=message,
code=INTERNAL_SERVER_ERROR_CODE
)
def parseToHttpError(self):
return error.InternalServerError(message=self.message)
class RpcNotFoundError(RpcError):
def __init__(self, id, message):
super().__init__(
id=id,
message=message,
code=NOT_FOUND_CODE
)
def parseToHttpError(self):
return error.NotFoundError(message=self.message)
class RpcErrorEncoder(JSONEncoder):
def encode(self, o):
return {
ID: o.id,
JSON_RPC: JSON_RPC_VERSION,
ERROR: {
CODE: o.code,
MESSAGE: o.message
}
}
| 2.4375 | 2 |
X_airbnb_revisited/airbnb_pricer/airbnb/compile_airbnb_data.py | djsegal/metis | 1 | 12791305 | import pandas as pd
import geopandas
def compile_airbnb_data(cur_link_table):
cur_tables = []
for cur_row in cur_link_table.itertuples():
tmp_table = cur_row.table.copy()
tmp_table["month"] = cur_row.month
tmp_table["year"] = cur_row.year
tmp_table["datetime"] = cur_row.datetime
cur_tables.append(tmp_table)
cur_data = pd.concat(cur_tables)
cur_data = cur_data.sort_values(by=["id", "datetime"], ascending=False).reset_index(drop=True)
cur_data = cur_data.drop(columns=["host_id", "first_review", "last_review"])
print(len(cur_data))
cur_selector = cur_data.groupby("id")["zipcode"].nunique()
cur_selector = cur_selector[ cur_selector == 1 ]
cur_data = cur_data[cur_data.id.isin(cur_selector.index)]
print(len(cur_data))
cur_data = cur_data[cur_data.room_type == "Entire home/apt"]
cur_data = cur_data.drop(columns = ["room_type"])
print(len(cur_data))
cur_data = cur_data[cur_data.property_type == "Apartment"]
cur_data = cur_data.drop(columns = ["property_type"])
print(len(cur_data))
cur_data = cur_data[cur_data.bed_type == "Real Bed"]
cur_data = cur_data.drop(columns = ["bed_type"])
print(len(cur_data))
cur_data = cur_data.dropna(subset=["zipcode", "beds", "bedrooms", "bathrooms"])
print(len(cur_data))
cur_data["price"] = cur_data.price.str.replace(r"[\$\,]", "").astype(float).round().astype(int)
cur_data = cur_data[cur_data["price"] < 1250]
cur_data = cur_data[cur_data["price"] > 25]
print(len(cur_data))
cur_selector = cur_data.groupby("id")["id"].count()
cur_selector = cur_selector[ cur_selector > 3 ]
cur_data = cur_data[cur_data.id.isin(cur_selector.index)]
print(len(cur_data))
replaced_columns = [
'neighbourhood_group_cleansed', 'latitude', 'longitude',
'accommodates', 'bathrooms', 'bedrooms', 'beds',
'number_of_reviews', 'review_scores_rating',
'reviews_per_month', 'is_location_exact', "datetime"
]
firsts_table = cur_data.groupby("id").first()[replaced_columns]
cur_data = cur_data.drop(columns=replaced_columns).merge(firsts_table, on="id", how="right")
cur_data = geopandas.GeoDataFrame(
cur_data,
geometry=geopandas.points_from_xy(
cur_data.longitude, cur_data.latitude
)
)
cur_data = cur_data.drop(columns=["longitude", "latitude"])
cur_data = cur_data.dropna(subset=["review_scores_rating", "reviews_per_month"])
print(len(cur_data))
cur_data = cur_data[cur_data.review_scores_rating > 60]
cur_data = cur_data.drop(columns=["review_scores_rating"])
print(len(cur_data))
cur_data = cur_data[cur_data.is_location_exact == "t"]
cur_data = cur_data.drop(columns=["is_location_exact"])
print(len(cur_data))
cur_data = cur_data[cur_data.neighbourhood_group_cleansed.isin(["Manhattan", "Brooklyn"])]
cur_data["is_brooklyn"] = cur_data.neighbourhood_group_cleansed == "Brooklyn"
cur_data = cur_data.drop(columns = ["neighbourhood_group_cleansed"])
print(len(cur_data))
cur_data = cur_data[cur_data.accommodates < 9]
print(len(cur_data))
cur_data = cur_data[cur_data.bathrooms >= 1]
print(len(cur_data))
cur_data = cur_data[ cur_data.bedrooms > 0 ]
cur_data = cur_data[ cur_data.bedrooms < 5 ]
print(len(cur_data))
cur_data = cur_data[ cur_data.beds > 0 ]
cur_data = cur_data[ cur_data.beds < 7 ]
print(len(cur_data))
cur_data = cur_data[ cur_data.number_of_reviews > 5 ]
cur_data = cur_data.drop(columns=["number_of_reviews"])
print(len(cur_data))
cur_data = cur_data[ cur_data.reviews_per_month > 1/8 ]
cur_data = cur_data.drop(columns=["reviews_per_month"])
print(len(cur_data))
cur_data = cur_data.drop(columns=["datetime"])
cur_data = cur_data.reset_index(drop=True)
cur_data["zipcode"] = cur_data["zipcode"].str.split("-").map(lambda work_list: work_list[0])
cur_data["zipcode"] = cur_data["zipcode"].astype("int")
return cur_data
| 3.015625 | 3 |
vedastr_cstr/vedastr/models/bodies/sequences/transformer/__init__.py | bsm8734/formula-image-latex-recognition | 13 | 12791306 | <reponame>bsm8734/formula-image-latex-recognition
from .decoder import TransformerDecoder # noqa 401
from .encoder import TransformerEncoder # noqa 401
| 0.902344 | 1 |
app/main.py | cultivationdev/py-debian-conda-flask-template | 0 | 12791307 | import logging
from app.core.app import create_app
from app.core.cfg import cfg
__author__ = 'kclark'
logger = logging.getLogger(__name__)
app = create_app()
def run_app():
logger.info('App Server Initializing')
app.run(host='localhost', port=5000, threaded=True, debug=cfg.debug_mode)
logger.info('App Server Running')
if __name__ == '__main__':
run_app()
| 2.078125 | 2 |
tests/test_encoding.py | kube-HPC/python-wrapper.hkube | 1 | 12791308 | <reponame>kube-HPC/python-wrapper.hkube
import os
import random
from hkube_python_wrapper.util.encoding import Encoding
size = 1 * 1024
def test_none_encoding():
encoding = Encoding('msgpack')
decoded = encoding.decode(header=None, value=None)
assert decoded is None
def test_json_encoding():
encoding = Encoding('json')
data = createObjectJson(size)
encoded = encoding.encode(data, plainEncode=True)
decoded = encoding.decode(value=encoded, plainEncode=True)
assert data == decoded
def test_bson_encoding():
encoding = Encoding('bson')
data = createObject(size, size)
(header, payload) = encoding.encode(data)
decoded = encoding.decode(header=header, value=payload)
assert data == decoded
def test_msgpack_encoding():
encoding = Encoding('msgpack')
data = create_bytearray(size)
(header, payload) = encoding.encode(data)
decoded = encoding.decode(header=header, value=payload)
assert data == decoded
def test_encoding_header_payload_bytes():
encoding = Encoding('msgpack')
data = create_bytearray(size)
(header, payload) = encoding.encode(data)
decoded = encoding.decode(header=header, value=payload)
assert data == decoded
def test_encoding_header_payload_object():
encoding = Encoding('msgpack')
data = createObject(size, size)
(header, payload) = encoding.encode(data)
decoded = encoding.decode(header=header, value=payload)
assert data == decoded
def test_encoding_no_header_bytes():
encoding = Encoding('msgpack')
data = create_bytearray(size)
(_, payload) = encoding.encode(data)
decoded = encoding.decode(header=None, value=payload)
assert data == decoded
def test_encoding_no_header_object():
encoding = Encoding('msgpack')
data = createObject(size, size)
(_, payload) = encoding.encode(data)
decoded = encoding.decode(header=None, value=payload)
assert data == decoded
def test_encoding_header_in_payload_bytes():
encoding = Encoding('msgpack')
data = create_bytearray(size)
(header, payload) = encoding.encode(data)
decoded = encoding.decode(header=None, value=header + payload)
assert data == decoded
def test_encoding_header_in_payload_object():
encoding = Encoding('msgpack')
data = createObject(size, size)
(header, payload) = encoding.encode(data)
decoded = encoding.decode(header=None, value=header + payload)
assert data == decoded
def create_bytearray(sizeBytes):
return b'\xdd' * (sizeBytes)
def randomString(n):
min_lc = ord(b'a')
len_lc = 26
ba = bytearray(os.urandom(n))
for i, b in enumerate(ba):
ba[i] = min_lc + b % len_lc # convert 0..255 to 97..122
return ba.decode("utf-8")
def randomInt(sizeBytes):
return random.sample(range(0, sizeBytes), sizeBytes)
def createObject(sizeBytes, sizeRandom):
obj = {
"bytesData": bytearray(b'\xdd' * (sizeBytes)),
"anotherBytesData": bytearray(sizeBytes),
"randomString": randomString(sizeRandom),
"randomIntArray": randomInt(sizeRandom),
"dataString": randomString(sizeRandom),
"bool": False,
"anotherBool": False,
"nestedObj": {
"dataString": randomString(sizeRandom),
"randomIntArray": randomInt(sizeRandom)
}
}
return obj
def createObjectJson(sizeRandom):
obj = {
"randomString": randomString(sizeRandom),
"randomIntArray": randomInt(sizeRandom),
"dataString": randomString(sizeRandom),
"bool": False,
"anotherBool": False,
"nestedObj": {
"dataString": randomString(sizeRandom),
"randomIntArray": randomInt(sizeRandom)
}
}
return obj
| 2.203125 | 2 |
pubmedextract/sex_utils/subdivide_table.py | allenai/pubmedextract | 8 | 12791309 | <reponame>allenai/pubmedextract<filename>pubmedextract/sex_utils/subdivide_table.py
from itertools import groupby
import numpy as np
from pubmedextract.sex_utils.regex_utils import categorize_cell_string
def subdivide(table):
"""
- Categorize each cell as string, value, or empty
- Figure out which of the top rows are column headers -> combine them
- Figure out which of the leftmost columns are row headers -> combine them
- Put the remaining subtable into a numpy array
TODO: Common problem: "n (%)" columns are often split up by Omnipage!
If two adjacent columns have column headers that end with 'n' and '%'/'(%)' respectively,
then they should be concatenated
"""
# first, categorize each cell
table_categories = np.zeros((table.nrow, table.ncol), dtype=np.unicode_)
for i in range(table.nrow):
for j in range(table.ncol):
table_categories[i, j] = categorize_cell_string(table[i, j])
# figure out how many of the top rows are column headers
column_header_rows = []
for i in range(0, table.nrow):
# sometimes the caption gets lobbed into the first column
# and splayed across many rows. detect that here:
all_rows_flag = (table[i, 0].indices[-1][1] + 1 == table.ncol)
# check if the number of strings is more than 2/3s of the entire row
s_count = np.sum(table_categories[i, :] == 'S')
v_count = np.sum(table_categories[i, :] == 'V')
if all_rows_flag or _row_or_col_is_header(s_count, v_count):
column_header_rows.append(i)
else:
break # as soon as this is false, we quit
# TODO: maybe find other rows that are not contiguous with the top rows?
# figure out how many of the leftmost columns are row headers
# excluding rows with column headers
first_non_header_row_ind = _get_and_increment_last(column_header_rows)
row_header_columns = []
for i in range(0, table.ncol):
s_count = np.sum(table_categories[first_non_header_row_ind:, i] == 'S')
v_count = np.sum(table_categories[first_non_header_row_ind:, i] == 'V')
# TODO: maybe have a different condition because we have cut out some rows
if _row_or_col_is_header(s_count, v_count):
row_header_columns.append(i)
else:
break
# TODO: maybe find other columns that are not contiguous with the top columns?
# get headers
column_headers = _combine_omnipage_cell_list(table, column_header_rows, row_flag=True)
row_headers = _combine_omnipage_cell_list(table, row_header_columns, row_flag=False)
# edge case if there are no column header rows
if len(column_headers) == 0:
column_headers = ['col_' + str(i) for i in range(table.ncol)]
# get numerical_subtable
first_non_header_col_ind = _get_and_increment_last(row_header_columns)
numerical_columns = []
for col in range(first_non_header_col_ind, table.ncol):
# extract the part of the column that isn't the header
col = [str(i) for i in table[:, col]][first_non_header_row_ind:]
numerical_columns.append(col)
# we only care about the rows/columns that span the numerical subtable
column_headers = column_headers[first_non_header_col_ind:]
row_headers = row_headers[first_non_header_row_ind:]
# merge columns to previous one if the column is mostly empty
empty_cols = (table_categories == 'E').mean(0)[first_non_header_col_ind:]
empty_col_inds = np.where(empty_cols > 0.9)[0]
ind_ranges_to_merge = [[i - 1, i] for i in empty_col_inds if i > 0]
# merge columns if they have the same headers
i = 0
for k, g in groupby(column_headers):
g = list(g)
ind_ranges_to_merge.append(list(range(i, i + len(g))))
i += len(g)
# combine overlapping merging index ranges
ind_ranges_to_merge = _combine_ind_ranges(ind_ranges_to_merge)
# perform the merge
# note: only merge the cell contents if they are not identical
numerical_columns_merged = []
column_headers_merged = []
for ind_range_to_merge in ind_ranges_to_merge:
subcols = [numerical_columns[i] for i in ind_range_to_merge]
merged_cols = [' '.join(_unique_sorted(j)).strip() for j in zip(*subcols)]
numerical_columns_merged.append(merged_cols)
column_headers_merged.append(column_headers[ind_range_to_merge[0]])
numerical_subtable = np.array(numerical_columns_merged).T
# if rows of the numerical subtable are all empty
# then this row's header can be appended to all the subsequent row headers
# until the next empty set of rows
# also sometimes there are no row headers, so we have to ensure the lens match
if len(numerical_subtable) > 1 and len(numerical_subtable) == len(row_headers):
row_headers, numerical_subtable = _append_row_header_to_subsequent_rows(row_headers, numerical_subtable)
return column_headers_merged, row_headers, numerical_subtable
def _combine_omnipage_cell_list(table, inds, row_flag):
"""
Utility function for subdivide
"""
if row_flag:
row_or_col_list = [table[i, :] for i in inds]
else:
row_or_col_list = [table[:, i] for i in inds]
return [' '.join(_unique_sorted([str(k) for k in j])).strip()
for j in zip(*row_or_col_list)]
def _get_and_increment_last(l):
"""
Utility function for subdivide
"""
if len(l) > 0:
return l[-1] + 1
else:
return 0
def _row_or_col_is_header(s_count, v_count):
"""
Utility function for subdivide
Heuristic for whether a row/col is a header or not.
"""
if s_count == 1 and v_count == 1:
return False
else:
return (s_count + 1) / (v_count + s_count + 1) >= 2. / 3.
def _combine_ind_ranges(ind_ranges_to_merge):
"""
Utility function for subdivide
Function that combines overlapping integer ranges.
Example
[[1,2,3], [2,3], [3], [4,5], [5]] -> [[1,2,3], [4,5]]
"""
ind_ranges_to_merge = sorted(ind_ranges_to_merge)
stack = []
result = []
for curr in ind_ranges_to_merge:
if len(stack) == 0:
stack.append(curr)
elif stack[-1][-1] >= curr[0]:
prev = stack.pop()
merged = sorted(list(set(prev + curr)))
stack.append(merged)
else:
prev = stack.pop()
result.append(prev)
stack.append(curr)
result += stack
return result
def _unique_sorted(seq):
"""
Utility function for subdivide
Keeps unique values but preserves order
"""
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
def _append_row_header_to_subsequent_rows(row_headers, numerical_subtable):
"""
Utility function for subdivide
Some rows headers actually apply to subsequent rows.
E.g.:
Sex np.nan np.nan
Male 50 30
Female 30 20
For this case, the strong 'Sex' is pre-pended to 'Male' and 'Female' to get:
Sex - Male 50 30
Sex - Female 30 20
"""
empty_flag = (numerical_subtable == '').mean(1) == 1
empty_rows = list(np.where(empty_flag)[0])
non_empty_rows = np.where(~empty_flag)[0]
if len(empty_rows) > 0:
if empty_rows[-1] != len(row_headers):
empty_rows.append(len(row_headers))
all_append_rows = [list(range(empty_rows[i] + 1, empty_rows[i + 1])) for i in range(len(empty_rows) - 1)]
for i, append_rows in zip(empty_rows, all_append_rows):
for append_row in append_rows:
row_headers[append_row] = row_headers[i] + ' - ' + row_headers[append_row]
row_headers = [row_headers[i] for i in non_empty_rows]
numerical_subtable = numerical_subtable[non_empty_rows]
return row_headers, numerical_subtable
| 2.9375 | 3 |
logic.py | rakeshr99/2048-Game-AI-Based-Solver | 0 | 12791310 | #
# CS1010FC --- Programming Methodology
#
# Mission N Solutions
#
# Note that written answers are commented out to allow us to run your
# code easily while grading your problem set.
from random import *
from copy import deepcopy
import math
import random
#######
#Task 1a#
#######
# [Marking Scheme]
# Points to note:
# Matrix elements must be equal but not identical
# 1 mark for creating the correct matrix
def new_game(n):
matrix = []
for i in range(n):
matrix.append([0] * n)
return matrix
###########
# Task 1b #
###########
# [Marking Scheme]
# Points to note:
# Must ensure that it is created on a zero entry
# 1 mark for creating the correct loop
def new_tile(mat):
seq = [2] * 90 + [4]
newTile = choice(seq)
emptySquareList = empty_cells(mat)
emptySquare = choice(emptySquareList)
mat[emptySquare[0]][emptySquare[1]] = newTile
return mat
###########
# Task 1c #
###########
# [Marking Scheme]
# Points to note:
# Matrix elements must be equal but not identical
# 0 marks for completely wrong solutions
# 1 mark for getting only one condition correct
# 2 marks for getting two of the three conditions
# 3 marks for correct checking
def game_state(mat):
for i in range(len(mat)):
for j in range(len(mat[0])):
if mat[i][j]==2048:
return 'win'
for i in range(len(mat)-1): #intentionally reduced to check the row on the right and below
for j in range(len(mat[0])-1): #more elegant to use exceptions but most likely this will be their solution
if mat[i][j]==mat[i+1][j] or mat[i][j+1]==mat[i][j]:
return 'not over'
for i in range(len(mat)): #check for any zero entries
for j in range(len(mat[0])):
if mat[i][j]==0:
return 'not over'
for k in range(len(mat)-1): #to check the left/right entries on the last row
if mat[len(mat)-1][k]==mat[len(mat)-1][k+1]:
return 'not over'
for j in range(len(mat)-1): #check up/down entries on last column
if mat[j][len(mat)-1]==mat[j+1][len(mat)-1]:
return 'not over'
return 'lose'
###########
# Task 2a #
###########
# [Marking Scheme]
# Points to note:
# 0 marks for completely incorrect solutions
# 1 mark for solutions that show general understanding
# 2 marks for correct solutions that work for all sizes of matrices
def reverse(mat):
new=[]
for i in range(len(mat)):
new.append([])
for j in range(len(mat[0])):
new[i].append(mat[i][len(mat[0])-j-1])
return new
###########
# Task 2b #
###########
# [Marking Scheme]
# Points to note:
# 0 marks for completely incorrect solutions
# 1 mark for solutions that show general understanding
# 2 marks for correct solutions that work for all sizes of matrices
def transpose(mat):
new=[]
for i in range(len(mat[0])):
new.append([])
for j in range(len(mat)):
new[i].append(mat[j][i])
return new
##########
# Task 3 #
##########
# [Marking Scheme]
# Points to note:
# The way to do movement is compress -> merge -> compress again
# Basically if they can solve one side, and use transpose and reverse correctly they should
# be able to solve the entire thing just by flipping the matrix around
# No idea how to grade this one at the moment. I have it pegged to 8 (which gives you like,
# 2 per up/down/left/right?) But if you get one correct likely to get all correct so...
# Check the down one. Reverse/transpose if ordered wrongly will give you wrong result.
def cover_up(mat):
new=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]
done=False
for i in range(4):
count=0
for j in range(4):
if mat[i][j]!=0:
new[i][count]=mat[i][j]
if j!=count:
done=True
count+=1
return (new,done)
def merge(mat):
score = 0
done=False
for i in range(4):
for j in range(3):
if mat[i][j]==mat[i][j+1] and mat[i][j]!=0:
score += mat[i][j] * 2
mat[i][j]*=2
mat[i][j+1]=0
done=True
return (mat,done, score)
def empty_cells(mat):
"""
Return a list of empty cells.
"""
emptySquareList = []
for row in range(len(mat)):
for col in range(len(mat[0])):
if mat[row][col] == 0:
emptySquareList.append([row, col])
return emptySquareList
def getMaxTile(mat):
maxTile = 0
for x in range(len(mat)):
for y in range(len(mat[x])):
maxTile = max(maxTile, mat[x][y])
return maxTile
def heuristic_score(mat):
number_of_empty_cells = len(empty_cells(mat))
score = monotonicity(mat)*1.5 + number_of_empty_cells*2 + + getMaxTile(mat)
return score
def monotonicity(grid):
grid_mask = [[2048, 1024, 256, 64],
[1024, 256, 64, 16],
[256, 64, 16, 4],
[64, 16, 4, 1]]
monotonicity_score = 0
for row in range(3):
for column in range(3):
monotonicity_score += grid[row][column] * grid_mask[row][column]
return monotonicity_score
def distance(mat, max_tile):
dis = None
for x in range(len(mat)):
if dis:
break
for y in range(len(mat)):
if max_tile == mat[x][y]:
if max_tile < 1024:
dis = -((abs(x - 0) + abs(y - 0)) * max_tile)
else:
dis = -((abs(x - 0) + abs(y - 0)) * (max_tile / 2))
break
return dis
def a_maximize(mat, alpha, beta, depth):
if game_state(mat)=='lose' or depth == 0:
return heuristic_score(mat)
maxUtility = -float('inf')
d = ['up', 'down', 'left', 'right']
for direction in d:
c = deepcopy(mat)
try:
c, done = move(c, direction)
if done:
maxUtility = max(maxUtility, a_minimize(c, alpha, beta, depth-1 ))
except IndexError:
print("error-----------------------------------------------------------------------------")
continue
alpha = max(maxUtility, alpha)
if alpha >= beta:
break
return maxUtility
def alphaBeta(grid, max, startDepth):
if max:
return a_maximize(grid, -float('inf'), float('inf'), startDepth)
else:
return a_minimize(grid, -float('inf'), float('inf'), startDepth)
def minimax(grid, max, startDepth):
if max:
return maximize(grid, startDepth)
else:
return minimize(grid, startDepth)
def maximize(mat, depth):
if game_state(mat)=='lose' or depth == 0:
return heuristic_score(mat)
maxUtility = -float('inf')
d = ['up', 'down', 'left', 'right']
for direction in d:
c = deepcopy(mat)
try:
c, done = move(c, direction)
if done:
maxUtility = max(maxUtility, minimize(c, depth - 1))
except IndexError:
continue
return maxUtility
def minimize(mat, depth):
if game_state(mat)=='lose' or depth == 0:
return heuristic_score(mat)
minUtility = float('inf')
emptyCells = empty_cells(mat)
children = []
for c in emptyCells:
gridCopy = deepcopy(mat)
gridCopy = set_tile(gridCopy, c[0], c[1], 2)
children.append(gridCopy)
gridCopy = deepcopy(mat)
gridCopy = set_tile(gridCopy, c[0], c[1], 4)
children.append(gridCopy)
for child in children:
minUtility = min(minUtility, maximize(child, depth - 1))
# print minUtility
return minUtility
def a_minimize(mat, alpha, beta, depth):
if game_state(mat)=='lose' or depth == 0:
return heuristic_score(mat)
minUtility = float('inf')
emptyCells = empty_cells(mat)
children = []
for c in emptyCells:
gridCopy = deepcopy(mat)
gridCopy = set_tile(gridCopy, c[0], c[1], 2)
children.append(gridCopy)
gridCopy = deepcopy(mat)
gridCopy = set_tile(gridCopy, c[0], c[1], 4)
children.append(gridCopy)
for child in children:
minUtility = min(minUtility, a_maximize(child, alpha, beta, depth - 1))
if minUtility <= alpha:
break
beta = min(minUtility, beta)
# print minUtility
return minUtility
def montecarlo(mat, initialScore):
scores = []
for i in range(0, 100):
directions = ['up', 'down', 'left', 'right']
direction = directions[random.randint(0, len(directions) - 1)]
newMat = deepcopy(mat)
gameScore = initialScore
while game_state(newMat)!='lose':
try:
newMat, done, score = move(newMat, direction)
newMat = new_tile(newMat)
gameScore+=score+heuristic_score(mat)
except IndexError:
break
scores.append(gameScore)
return sum(scores)/len(scores)
def expectimax(mat, depth, maximizer):
if depth==0:
return heuristic_score(mat)
if maximizer:
currentValue = -1
d = ['up', 'down', 'left', 'right']
for direction in d:
newBoard = deepcopy(mat)
newBoard, done, score = move(newBoard, direction)
calculatedValue = expectimax(newBoard, depth - 1, False)
if calculatedValue > currentValue:
currentValue = calculatedValue
return currentValue
else:
number = 0
sum_value = 0
emptyCells = empty_cells(mat)
children = []
for c in emptyCells:
gridCopy = deepcopy(mat)
gridCopy = set_tile(gridCopy, c[0], c[1], 2)
children.append(gridCopy)
gridCopy = deepcopy(mat)
gridCopy = set_tile(gridCopy, c[0], c[1], 4)
children.append(gridCopy)
for child in children:
sum_value+= expectimax(child, depth-1, True)
number+=1
if number == 0:
return expectimax(mat, depth-1, True)
return (sum_value/number)
def set_tile(mat, row, col, value):
"""
Set the tile at position row, col to have the given value.
"""
mat[row][col] = value
return mat
def move(game, direction):
if(direction=="up"):
return up(game)
elif direction=="down":
return down(game)
# down(game)
elif direction == "left":
return left(game)
elif direction=="right":
return right(game)
def up(game):
# print("up")
# return matrix after shifting up
game=transpose(game)
game,done=cover_up(game)
temp=merge(game)
game=temp[0]
done=done or temp[1]
score = temp[2]
game=cover_up(game)[0]
game=transpose(game)
return (game,done, score)
def down(game):
# print("down")
game=reverse(transpose(game))
game,done=cover_up(game)
temp=merge(game)
game=temp[0]
score = temp[2]
done=done or temp[1]
game=cover_up(game)[0]
game=transpose(reverse(game))
return (game,done, score)
def left(game):
# print("left")
# return matrix after shifting left
game,done=cover_up(game)
temp=merge(game)
game=temp[0]
score = temp[2]
done=done or temp[1]
game=cover_up(game)[0]
return (game,done, score)
def right(game):
# print("right")
# return matrix after shifting right
game=reverse(game)
game,done=cover_up(game)
temp=merge(game)
game=temp[0]
score = temp[2]
done=done or temp[1]
game=cover_up(game)[0]
game=reverse(game)
return (game,done, score)
| 3.625 | 4 |
archived/soc_038_monthly_asylum_requests/contents/src/__init__.py | Taufiq06/nrt-scripts | 6 | 12791311 | import os
import logging
import sys
from collections import OrderedDict, defaultdict
import datetime
import cartosql
import requests
import json
# Constants
LATEST_URL = 'http://popdata.unhcr.org/api/stats/asylum_seekers_monthly.json?year={year}'
CARTO_TABLE = 'soc_038_monthly_asylum_requests'
CARTO_SCHEMA = OrderedDict([
('_UID', 'text'),
('date', 'timestamp'),
('country', 'text'),
('value_type', 'text'),
('num_people', 'numeric'),
('some_stats_confidential', 'text')
])
UID_FIELD = '_UID'
TIME_FIELD = 'date'
DATA_DIR = 'data'
LOG_LEVEL = logging.INFO
DATE_FORMAT = '%Y-%m-%d'
CLEAR_TABLE_FIRST = False
# Limit 1M rows, drop older than 20yrs
MAXROWS = 1000000
MAXAGE = datetime.datetime.today().year - 20
DATASET_ID = 'de24a492-acee-4345-9073-bbbe991f6ede'
def lastUpdateDate(dataset, date):
apiUrl = 'http://api.resourcewatch.org/v1/dataset/{0}'.format(dataset)
headers = {
'Content-Type': 'application/json',
'Authorization': os.getenv('apiToken')
}
body = {
"dataLastUpdated": date.isoformat()
}
try:
r = requests.patch(url = apiUrl, json = body, headers = headers)
logging.info('[lastUpdated]: SUCCESS, '+ date.isoformat() +' status code '+str(r.status_code))
return 0
except Exception as e:
logging.error('[lastUpdated]: '+str(e))
def genUID(date, country, valuetype):
'''Generate unique id'''
return '{}_{}_{}'.format(country, date, valuetype)
def insertIfNew(data, year, valuetype,
existing_ids, new_ids, new_rows,
unknown_vals, date_format=DATE_FORMAT):
'''Loop over months in the data, add to new rows if new'''
last_day = [31,28,31,30,31,30,31,31,30,31,30,31]
for cntry in data:
for month, val in data[cntry].items():
date = datetime.datetime(year=year, month=month, day=last_day[month-1]).strftime(date_format)
UID = genUID(date, cntry, valuetype)
if UID not in existing_ids + new_ids:
new_ids.append(UID)
if month in unknown_vals[cntry]:
logging.debug('Some stats confidental for {} in {}-{}'.format(cntry, year, month))
values = [UID, date, cntry, valuetype, val, True]
else:
logging.debug('All known stats released for {} in {}-{}'.format(cntry, year, month))
values = [UID, date, cntry, valuetype, val, False]
new_rows.append(values)
def processNewData(existing_ids):
'''
Iterively fetch parse and post new data
'''
year = datetime.datetime.today().year
new_count = 1
new_ids = []
try:
while year > MAXAGE and new_count:
# get and parse each page; stop when no new results or 200 pages
# 1. Fetch new data
logging.info("Fetching data for year {}".format(year))
r = requests.get(LATEST_URL.format(year=year))
data = r.json()
logging.debug('data: {}'.format(data))
# 2. Collect Totals
origins = defaultdict(lambda: defaultdict(int))
asylums = defaultdict(lambda: defaultdict(int))
unknown_vals_origins = defaultdict(list)
unknown_vals_asylums = defaultdict(list)
for obs in data:
try:
origins[obs['country_of_origin']][obs['month']] += obs['value']
except Exception as e:
logging.debug("Error processing value {} for country of origin {} in {}-{}. Value set to -9999. Error: {}".format(obs['value'],obs['country_of_origin'],year,obs['month'],e))
unknown_vals_origins[obs['country_of_origin']].append(obs['month'])
origins[obs['country_of_origin']][obs['month']] += 0
try:
asylums[obs['country_of_asylum']][obs['month']] += obs['value']
except Exception as e:
logging.debug("Error processing value {} for country of asylum {} in {}-{}. Value set to -9999. Error: {}".format(obs['value'],obs['country_of_asylum'],year,obs['month'],e))
unknown_vals_asylums[obs['country_of_asylum']].append(obs['month'])
asylums[obs['country_of_asylum']][obs['month']] += 0
# 3. Create Unique IDs, create new rows
new_rows = []
logging.debug('Create data about places of origin for year {}'.format(year))
insert_kwargs = {
'data':origins,'year':year,'valuetype':'country_of_origin',
'existing_ids':existing_ids,'new_ids':new_ids,'new_rows':new_rows,
'unknown_vals':unknown_vals_origins
}
insertIfNew(**insert_kwargs)
logging.debug('Create data about places of asylum for year {}'.format(year))
insert_kwargs.update(data=asylums,
valuetype='country_of_asylum',
unknown_vals=unknown_vals_asylums)
insertIfNew(**insert_kwargs)
# 4. Insert new rows
new_count = len(new_rows)
if new_count:
logging.info('Pushing {} new rows'.format(new_count))
cartosql.insertRows(CARTO_TABLE, CARTO_SCHEMA.keys(),
CARTO_SCHEMA.values(), new_rows)
# Decrement year
year -= 1
except json.decoder.JSONDecodeError:
logging.info('API is still down.')
num_new = len(new_ids)
return num_new
##############################################################
# General logic for Carto
# should be the same for most tabular datasets
##############################################################
def createTableWithIndex(table, schema, id_field, time_field=''):
'''Get existing ids or create table'''
cartosql.createTable(table, schema)
cartosql.createIndex(table, id_field, unique=True)
if time_field:
cartosql.createIndex(table, time_field)
def getIds(table, id_field):
'''get ids from table'''
r = cartosql.getFields(id_field, table, f='csv')
return r.text.split('\r\n')[1:-1]
def deleteExcessRows(table, max_rows, time_field, max_age=''):
'''Delete excess rows by age or count'''
num_dropped = 0
if isinstance(max_age, datetime.datetime):
max_age = max_age.isoformat()
# 1. delete by age
if max_age:
r = cartosql.deleteRows(table, "{} < '{}'".format(time_field, max_age))
num_dropped = r.json()['total_rows']
# 2. get sorted ids (old->new)
r = cartosql.getFields('cartodb_id', table, order='{}'.format(time_field),
f='csv')
ids = r.text.split('\r\n')[1:-1]
# 3. delete excess
if len(ids) > max_rows:
r = cartosql.deleteRowsByIDs(table, ids[:-max_rows])
num_dropped += r.json()['total_rows']
if num_dropped:
logging.info('Dropped {} old rows from {}'.format(num_dropped, table))
def get_most_recent_date(table):
r = cartosql.getFields(TIME_FIELD, table, f='csv', post=True)
dates = r.text.split('\r\n')[1:-1]
dates.sort()
most_recent_date = datetime.datetime.strptime(dates[-1], '%Y-%m-%d %H:%M:%S')
return most_recent_date
def main():
logging.basicConfig(stream=sys.stderr, level=LOG_LEVEL)
logging.info('STARTING')
if CLEAR_TABLE_FIRST:
logging.info('Clearing table')
cartosql.deleteRows(CARTO_TABLE, 'cartodb_id IS NOT NULL', user=os.getenv('CARTO_USER'),
key=os.getenv('CARTO_KEY'))
# 1. Check if table exists and create table
existing_ids = []
if cartosql.tableExists(CARTO_TABLE):
logging.info('Fetching existing ids')
existing_ids = getIds(CARTO_TABLE, UID_FIELD)
else:
logging.info('Table {} does not exist, creating'.format(CARTO_TABLE))
createTableWithIndex(CARTO_TABLE, CARTO_SCHEMA, UID_FIELD, TIME_FIELD)
# 2. Iterively fetch, parse and post new data
num_new = processNewData(existing_ids)
existing_count = num_new + len(existing_ids)
logging.info('Total rows: {}, New: {}, Max: {}'.format(
existing_count, num_new, MAXROWS))
# 3. Remove old observations
deleteExcessRows(CARTO_TABLE, MAXROWS, TIME_FIELD, datetime.datetime(year=MAXAGE, month=1, day=1))
# Get most recent update date
most_recent_date = get_most_recent_date(CARTO_TABLE)
lastUpdateDate(DATASET_ID, most_recent_date)
logging.info('SUCCESS')
| 2.484375 | 2 |
core/domain/feedback_jobs_one_off_test.py | bching/oppia | 1 | 12791312 | <filename>core/domain/feedback_jobs_one_off_test.py
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for feedback-related jobs."""
import ast
from core.domain import feedback_jobs_one_off
from core.domain import feedback_services
from core.domain import subscription_services
from core.platform import models
from core.tests import test_utils
(feedback_models,) = models.Registry.import_models([models.NAMES.feedback])
taskqueue_services = models.Registry.import_taskqueue_services()
class FeedbackThreadMessagesCountOneOffJobTest(test_utils.GenericTestBase):
"""Tests for the one-off feedback thread message counter job."""
EXP_ID_1 = 'eid1'
EXP_ID_2 = 'eid2'
EXPECTED_THREAD_DICT = {
'status': u'open',
'state_name': u'a_state_name',
'summary': None,
'original_author_username': None,
'subject': u'a subject'
}
USER_EMAIL = '<EMAIL>'
USER_USERNAME = 'user'
def setUp(self):
super(FeedbackThreadMessagesCountOneOffJobTest, self).setUp()
self.signup(self.USER_EMAIL, self.USER_USERNAME)
self.user_id = self.get_user_id_from_email(self.USER_EMAIL)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.save_new_valid_exploration(
self.EXP_ID_1, self.owner_id, title='Bridges in England',
category='Architecture', language_code='en')
self.save_new_valid_exploration(
self.EXP_ID_2, self.owner_id, title='<NAME>',
category='Architecture', language_code='fi')
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.create_new() # pylint: disable=line-too-long
feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.enqueue(
job_id)
self.assertEqual(
self.count_jobs_in_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_tasks()
stringified_output = (
feedback_jobs_one_off.FeedbackThreadMessagesCountOneOffJob.get_output( # pylint: disable=line-too-long
job_id))
eval_output = [ast.literal_eval(stringified_item)
for stringified_item in stringified_output]
return eval_output
def test_message_count(self):
"""Test if the job returns the correct message count."""
feedback_services.create_thread(
self.EXP_ID_1, self.EXPECTED_THREAD_DICT['state_name'],
self.user_id, self.EXPECTED_THREAD_DICT['subject'],
'not used here')
feedback_services.create_thread(
self.EXP_ID_2, self.EXPECTED_THREAD_DICT['state_name'],
self.user_id, self.EXPECTED_THREAD_DICT['subject'],
'not used here')
thread_ids = subscription_services.get_all_threads_subscribed_to(
self.user_id)
self._run_one_off_job()
thread_summaries, _ = feedback_services.get_thread_summaries(
self.user_id, thread_ids)
# Check that the first message has only one message.
self.assertEqual(thread_summaries[0]['total_message_count'], 1)
# Check that the second message has only one message.
self.assertEqual(thread_summaries[1]['total_message_count'], 1)
feedback_services.create_message(
self.EXP_ID_1, thread_ids[0].split('.')[1], self.user_id, None,
None, 'editor message')
self._run_one_off_job()
thread_summaries, _ = feedback_services.get_thread_summaries(
self.user_id, thread_ids)
# Check that the first message has two messages.
self.assertEqual(thread_summaries[0]['total_message_count'], 2)
# Get the first message so that we can delete it and check the error
# case.
first_message_model = (
feedback_models.FeedbackMessageModel.get(
self.EXP_ID_1, thread_ids[0].split('.')[1], 0))
first_message_model.delete()
output = self._run_one_off_job()
# Check if the quantities have the correct values.
self.assertEqual(output[0][1]['message_count'], 1)
self.assertEqual(output[0][1]['next_message_id'], 2)
| 1.789063 | 2 |
web_py/server.py | ovvladimir/Servers | 0 | 12791313 | <reponame>ovvladimir/Servers<gh_stars>0
# https://webpy.org/docs/0.3/tutorial
# https://iximiuz.com/ru/posts/over-9000-ways-to-make-web-server-in-python/
# https://www.pyimagesearch.com/2019/04/15/live-video-streaming-over-network-with-opencv-and-imagezmq/
# python server.py 1234
import web
urls = (
'/', 'index'
)
class index:
def GET(self):
return "Hello, world!"
if __name__ == "__main__":
app = web.application(urls, globals())
app.run()
| 2.59375 | 3 |
tests/test_units.py | wiris/py-path-signature | 0 | 12791314 | import json
import os
import numpy as np
import pytest
from py_path_signature.data_models.stroke import Stroke
from py_path_signature.path_signature_extractor import PathSignatureExtractor
from .conftest import TEST_DATA_INPUT_DIR, TEST_DATA_REFERENCE_DIR
@pytest.mark.parametrize(
"input_strokes, expected_bounding_box",
[
(
[{"x": [1, 2, 3], "y": [1, 2, 3]}],
(1, 1, 2, 2),
),
(
[{"x": [0, 1, 2, 3], "y": [1, 2, 3, 4]}, {"x": [6, 8, 2, 3], "y": [0, 2, 3, 9]}],
(0, 0, 9, 8),
),
(
[
{"x": [714, 1], "y": [3, 4]},
{"x": [6, 8], "y": [0, 9]},
{"x": [100, 8], "y": [10, 9]},
],
(0, 1, 10, 713),
),
],
)
def test_bounding_box(input_strokes, expected_bounding_box):
strokes = [Stroke(**stroke) for stroke in input_strokes]
bounding_box = PathSignatureExtractor.calculate_bounding_box(strokes=strokes)
assert bounding_box == expected_bounding_box
def list_test_cases():
return [
os.path.splitext(case)[0]
for case in os.listdir(TEST_DATA_INPUT_DIR)
if os.path.isfile(os.path.join(TEST_DATA_INPUT_DIR, case))
]
@pytest.fixture(scope="function", params=list_test_cases())
def strokes_and_reference_signature(request):
test_case = request.param
with open(os.path.join(TEST_DATA_INPUT_DIR, f"{test_case}.json")) as f:
strokes = json.load(f)
with open(os.path.join(TEST_DATA_REFERENCE_DIR, f"{test_case}.json")) as f:
path_signature = np.array(json.load(f))
return (strokes, path_signature)
@pytest.fixture(scope="class")
def path_signature_extractor():
path_signature_extractor = PathSignatureExtractor(
order=2, rendering_size=(128, -1), min_rendering_dimension=5, max_aspect_ratio=30, delta=5
)
return path_signature_extractor
def test_image_signatures(path_signature_extractor, strokes_and_reference_signature):
input_strokes, path_signature_groundtruth = strokes_and_reference_signature
strokes = [Stroke(**stroke) for stroke in input_strokes]
path_signature = path_signature_extractor.extract_signature(strokes=strokes)
assert (path_signature == path_signature_groundtruth).all()
| 2.3125 | 2 |
ami/gunicorn.conf.py | NCKU-CCS/energy-blockchain | 0 | 12791315 | <reponame>NCKU-CCS/energy-blockchain
# pylint: skip-file
bind = "0.0.0.0:4000"
workers = 4
timeout = 120
proc_name = "AMI-Uploader"
errorlog = "-"
loglevel = "info"
accesslog = "-"
access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
| 1.34375 | 1 |
tests/broker/test_del_building.py | ned21/aquilon | 7 | 12791316 | #!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2008,2009,2010,2011,2012,2013,2014,2015,2016,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing the del building command."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
class TestDelBuilding(TestBrokerCommand):
def test_100_del_bu(self):
self.dsdb_expect_del_campus_building("ny", "bu")
self.dsdb_expect("delete_building_aq -building bu")
command = "del building --building bu"
self.noouttest(command.split(" "))
self.dsdb_verify()
def test_100_del_ex(self):
self.dsdb_expect_del_campus_building("ta", "cards")
self.dsdb_expect("delete_building_aq -building cards")
command = "del building --building cards"
self.noouttest(command.split(" "))
self.dsdb_verify()
def test_100_del_tu(self):
self.dsdb_expect_del_campus_building("ln", "tu")
self.dsdb_expect("delete_building_aq -building tu")
command = "del building --building tu"
self.noouttest(command.split(" "))
self.dsdb_verify()
def test_110_del_bunotindsdb(self):
self.dsdb_expect("add_building_aq -building_name bz -city ex "
"-building_addr Nowhere")
self.dsdb_expect_add_campus_building("ta", "bz")
command = ["add", "building", "--building", "bz", "--city", "ex",
"--address", "Nowhere"]
self.noouttest(command)
self.dsdb_verify()
dsdb_command = "delete_building_aq -building bz"
errstr = "bldg bz doesn't exists"
self.dsdb_expect(dsdb_command, True, errstr)
self.dsdb_expect_del_campus_building("ta", "bz")
command = "del building --building bz"
err = self.statustest(command.split(" "))
self.matchoutput(err,
"DSDB does not have building bz defined, proceeding.",
command)
self.dsdb_verify()
def test_120_add_nettest_net(self):
self.net.allocate_network(self, "nettest_net", 24, "unknown",
"building", "nettest",
comments="Made-up network")
def test_121_del_nettest_fail(self):
# try delete building
command = "del building --building nettest"
err = self.badrequesttest(command.split(" "))
self.matchoutput(err,
"Bad Request: Could not delete building nettest, "
"networks were found using this location.",
command)
self.dsdb_verify(empty=True)
def test_122_cleanup_nettest_net(self):
self.net.dispose_network(self, "nettest_net")
def test_130_del_nettest(self):
self.dsdb_expect_del_campus_building("ny", "nettest")
self.dsdb_expect("delete_building_aq -building nettest")
command = "del building --building nettest"
self.noouttest(command.split(" "))
self.dsdb_verify()
def test_200_del_building_notexist(self):
command = "del building --building bldg-not-exist"
out = self.notfoundtest(command.split(" "))
self.matchoutput(out, "Building bldg-not-exist not found.",
command)
def test_300_verify_bu(self):
command = "show building --building bu"
self.notfoundtest(command.split(" "))
def test_300_verify_tu(self):
command = "show building --building tu"
self.notfoundtest(command.split(" "))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestDelBuilding)
unittest.TextTestRunner(verbosity=2).run(suite)
| 2.25 | 2 |
ocellaris/solver_parts/bdm.py | TormodLandet/Ocellaris | 1 | 12791317 | <reponame>TormodLandet/Ocellaris
# Copyright (C) 2015-2019 <NAME>
# SPDX-License-Identifier: Apache-2.0
import dolfin
from dolfin import FiniteElement, VectorElement, MixedElement, FunctionSpace, VectorFunctionSpace
from dolfin import FacetNormal, TrialFunction, TestFunction, TestFunctions, Function
from dolfin import dot, as_vector, dx, dS, ds, LocalSolver
class VelocityBDMProjection:
def __init__(
self,
simulation,
w,
incompressibility_flux_type='central',
D12=None,
degree=None,
use_bcs=True,
use_nedelec=True,
):
"""
Implement equation 4a and 4b in "Two new techniques for generating exactly
incompressible approximate velocities" by <NAME> (2009)
For each element K in the mesh:
<u⋅n, φ> = <û⋅n, φ> ∀ ϕ ∈ P_{k}(F) for any face F ∈ ∂K
(u, ϕ) = (w, ϕ) ∀ φ ∈ P_{k-2}(K)^2
(u, ϕ) = (w, ϕ) ∀ φ ∈ {ϕ ∈ P_{k}(K)^2 : ∇⋅ϕ = 0 in K, ϕ⋅n = 0 on ∂K}
Here w is the input velocity function in DG2 space and û is the flux at
each face. P_{x} is the space of polynomials of order k
The flux type can be 'central' or 'upwind'
"""
self.simulation = simulation
simulation.log.info(' Setting up velocity BDM projection')
V = w[0].function_space()
ue = V.ufl_element()
gdim = w.ufl_shape[0]
if degree is None:
pdeg = ue.degree()
Vout = V
else:
pdeg = degree
Vout = FunctionSpace(V.mesh(), 'DG', degree)
pg = (pdeg, gdim)
assert ue.family() == 'Discontinuous Lagrange'
assert incompressibility_flux_type in ('central', 'upwind')
if use_nedelec and pdeg > 1:
a, L, V = self._setup_projection_nedelec(
w, incompressibility_flux_type, D12, use_bcs, pdeg, gdim
)
elif gdim == 2 and pdeg == 1:
a, L, V = self._setup_dg1_projection_2D(w, incompressibility_flux_type, D12, use_bcs)
elif gdim == 2 and pdeg == 2:
a, L, V = self._setup_dg2_projection_2D(w, incompressibility_flux_type, D12, use_bcs)
else:
raise NotImplementedError(
'VelocityBDMProjection does not support ' 'degree %d and dimension %d' % pg
)
# Pre-factorize matrices and store for usage in projection
self.local_solver = LocalSolver(a, L)
self.local_solver.factorize()
self.temp_function = Function(V)
self.w = w
# Create function assigners
self.assigners = []
for i in range(gdim):
self.assigners.append(dolfin.FunctionAssigner(Vout, V.sub(i)))
def _setup_dg1_projection_2D(self, w, incompressibility_flux_type, D12, use_bcs):
"""
Implement the projection where the result is BDM embeded in a DG1 function
"""
sim = self.simulation
k = 1
gdim = 2
mesh = w[0].function_space().mesh()
V = VectorFunctionSpace(mesh, 'DG', k)
W = FunctionSpace(mesh, 'DGT', k)
n = FacetNormal(mesh)
v1 = TestFunction(W)
u = TrialFunction(V)
# The same fluxes that are used in the incompressibility equation
if incompressibility_flux_type == 'central':
u_hat_dS = dolfin.avg(w)
elif incompressibility_flux_type == 'upwind':
w_nU = (dot(w, n) + abs(dot(w, n))) / 2.0
switch = dolfin.conditional(dolfin.gt(w_nU('+'), 0.0), 1.0, 0.0)
u_hat_dS = switch * w('+') + (1 - switch) * w('-')
if D12 is not None:
u_hat_dS += dolfin.Constant([D12, D12]) * dolfin.jump(w, n)
# Equation 1 - flux through the sides
a = L = 0
for R in '+-':
a += dot(u(R), n(R)) * v1(R) * dS
L += dot(u_hat_dS, n(R)) * v1(R) * dS
# Eq. 1 cont. - flux through external boundaries
a += dot(u, n) * v1 * ds
if use_bcs:
for d in range(gdim):
dirichlet_bcs = sim.data['dirichlet_bcs']['u%d' % d]
neumann_bcs = sim.data['neumann_bcs'].get('u%d' % d, [])
robin_bcs = sim.data['robin_bcs'].get('u%d' % d, [])
outlet_bcs = sim.data['outlet_bcs']
for dbc in dirichlet_bcs:
u_bc = dbc.func()
L += u_bc * n[d] * v1 * dbc.ds()
for nbc in neumann_bcs + robin_bcs + outlet_bcs:
if nbc.enforce_zero_flux:
pass # L += 0
else:
L += w[d] * n[d] * v1 * nbc.ds()
for sbc in sim.data['slip_bcs'].get('u', []):
pass # L += 0
else:
L += dot(w, n) * v1 * ds
# Equation 2 - internal shape : empty for DG1
# Equation 3 - BDM Phi : empty for DG1
return a, L, V
def _setup_dg2_projection_2D(self, w, incompressibility_flux_type, D12, use_bcs):
"""
Implement the projection where the result is BDM embeded in a DG2 function
"""
sim = self.simulation
k = 2
gdim = 2
mesh = w[0].function_space().mesh()
V = VectorFunctionSpace(mesh, 'DG', k)
n = FacetNormal(mesh)
# The mixed function space of the projection test functions
e1 = FiniteElement('DGT', mesh.ufl_cell(), k)
e2 = VectorElement('DG', mesh.ufl_cell(), k - 2)
e3 = FiniteElement('Bubble', mesh.ufl_cell(), 3)
em = MixedElement([e1, e2, e3])
W = FunctionSpace(mesh, em)
v1, v2, v3b = TestFunctions(W)
u = TrialFunction(V)
# The same fluxes that are used in the incompressibility equation
if incompressibility_flux_type == 'central':
u_hat_dS = dolfin.avg(w)
elif incompressibility_flux_type == 'upwind':
w_nU = (dot(w, n) + abs(dot(w, n))) / 2.0
switch = dolfin.conditional(dolfin.gt(w_nU('+'), 0.0), 1.0, 0.0)
u_hat_dS = switch * w('+') + (1 - switch) * w('-')
if D12 is not None:
u_hat_dS += dolfin.Constant([D12, D12]) * dolfin.jump(w, n)
# Equation 1 - flux through the sides
a = L = 0
for R in '+-':
a += dot(u(R), n(R)) * v1(R) * dS
L += dot(u_hat_dS, n(R)) * v1(R) * dS
# Eq. 1 cont. - flux through external boundaries
a += dot(u, n) * v1 * ds
if use_bcs:
for d in range(gdim):
dirichlet_bcs = sim.data['dirichlet_bcs']['u%d' % d]
neumann_bcs = sim.data['neumann_bcs'].get('u%d' % d, [])
robin_bcs = sim.data['robin_bcs'].get('u%d' % d, [])
outlet_bcs = sim.data['outlet_bcs']
for dbc in dirichlet_bcs:
u_bc = dbc.func()
L += u_bc * n[d] * v1 * dbc.ds()
for nbc in neumann_bcs + robin_bcs + outlet_bcs:
if nbc.enforce_zero_flux:
pass # L += 0
else:
L += w[d] * n[d] * v1 * nbc.ds()
for sbc in sim.data['slip_bcs'].get('u', []):
pass # L += 0
else:
L += dot(w, n) * v1 * ds
# Equation 2 - internal shape
a += dot(u, v2) * dx
L += dot(w, v2) * dx
# Equation 3 - BDM Phi
v3 = as_vector([v3b.dx(1), -v3b.dx(0)]) # Curl of [0, 0, v3b]
a += dot(u, v3) * dx
L += dot(w, v3) * dx
return a, L, V
def _setup_projection_nedelec(self, w, incompressibility_flux_type, D12, use_bcs, pdeg, gdim):
"""
Implement the BDM-like projection using Nedelec elements in the test function
"""
sim = self.simulation
k = pdeg
mesh = w[0].function_space().mesh()
V = VectorFunctionSpace(mesh, 'DG', k)
n = FacetNormal(mesh)
# The mixed function space of the projection test functions
e1 = FiniteElement('DGT', mesh.ufl_cell(), k)
e2 = FiniteElement('N1curl', mesh.ufl_cell(), k - 1)
em = MixedElement([e1, e2])
W = FunctionSpace(mesh, em)
v1, v2 = TestFunctions(W)
u = TrialFunction(V)
# The same fluxes that are used in the incompressibility equation
if incompressibility_flux_type == 'central':
u_hat_dS = dolfin.avg(w)
elif incompressibility_flux_type == 'upwind':
w_nU = (dot(w, n) + abs(dot(w, n))) / 2.0
switch = dolfin.conditional(dolfin.gt(w_nU('+'), 0.0), 1.0, 0.0)
u_hat_dS = switch * w('+') + (1 - switch) * w('-')
if D12 is not None:
u_hat_dS += dolfin.Constant([D12] * gdim) * dolfin.jump(w, n)
# Equation 1 - flux through the sides
a = L = 0
for R in '+-':
a += dot(u(R), n(R)) * v1(R) * dS
L += dot(u_hat_dS, n(R)) * v1(R) * dS
# Eq. 1 cont. - flux through external boundaries
a += dot(u, n) * v1 * ds
if use_bcs:
for d in range(gdim):
dirichlet_bcs = sim.data['dirichlet_bcs'].get('u%d' % d, [])
neumann_bcs = sim.data['neumann_bcs'].get('u%d' % d, [])
robin_bcs = sim.data['robin_bcs'].get('u%d' % d, [])
outlet_bcs = sim.data['outlet_bcs']
for dbc in dirichlet_bcs:
u_bc = dbc.func()
L += u_bc * n[d] * v1 * dbc.ds()
for nbc in neumann_bcs + robin_bcs + outlet_bcs:
if nbc.enforce_zero_flux:
pass # L += 0
else:
L += w[d] * n[d] * v1 * nbc.ds()
for sbc in sim.data['slip_bcs'].get('u', []):
pass # L += 0
else:
L += dot(w, n) * v1 * ds
# Equation 2 - internal shape using 'Nedelec 1st kind H(curl)' elements
a += dot(u, v2) * dx
L += dot(w, v2) * dx
return a, L, V
def run(self, w=None):
"""
Perform the projection based on the current state of the Function w
"""
# Find the projected velocity
self.local_solver.solve_local_rhs(self.temp_function)
# Assign to w
w = self.w if w is None else w
U = self.temp_function.split()
for i, a in enumerate(self.assigners):
a.assign(w[i], U[i])
| 2.1875 | 2 |
extractor_de_aspectos/tests/extractor/test_extractor_de_aspectos.py | XrossFox/maquina-de-aspectos | 0 | 12791318 | import sys
sys.path.append('../../extractor_de_aspectos')
import unittest
from extractor import extractor_de_aspectos
from cliente_corenlp import cliente_corenlp
from lematizador import lematizador
import nltk
class Test(unittest.TestCase):
def setUp(self):
self.ex = extractor_de_aspectos.ExtractorDeAspectos()
self.cliente = cliente_corenlp.ClienteCoreNLP()
self.lemas = lematizador.Lematizador()
def test_extractor_recibe_arbol_de_dependencias(self):
"""
Para poder extraer los aspectos, primero se necesita pasar como argumento el arbol de dependencias
que resuelve el Stanford CoreNLP.
Prueba que el método extraer levante una excepcion si no recibe el arbol de aspectos en fora de una lista
(la salida que ofrece cliente_corenlp.resolver_dependencias).
"""
com = "i am a valid comment."
diccionario = dict()
arbol = None
pos_lem = list()
with self.assertRaises(Exception):
self.ex.extraer(com, diccionario, arbol, pos_lem)
def test__buscar_en_tupla_pos_lem(self):
"""
Prueba el método auxiliar que es usado para buscar el lema o la palabra de una tupla pos_lem
dado una posición.
Se espera que de la tupla en la posición 1, devuelve el lema 'be'.
"""
tupla_pos_lem = [('i', 'LS', None), ('am', 'VBP', 'be'), ('a', 'DT', None), ('valid', 'JJ', 'valid'),
('comment', 'NN', 'comment'), ('.', '.', None)]
indice = 1
resultado = self.ex._buscar_en_tupla_pos_lem(indice, tupla_pos_lem)
resultado_esperado = 'be'
self.assertEqual(resultado, resultado_esperado)
def test__buscar_en_tupla_pos_lem_2(self):
"""
Prueba el método auxiliar que es usado para buscar el lema o la palabra de una tupla pos_lem
dado una posición.
Se espera que de la tupla en la posición 3, devuelve la palabra 'a', ya que el lema es None.
"""
tupla_pos_lem = [('i', 'LS', None), ('am', 'VBP', 'be'), ('a', 'DT', None), ('valid', 'JJ', 'valid'),
('comment', 'NN', 'comment'), ('.', '.', None)]
indice = 3
resultado = self.ex._buscar_en_tupla_pos_lem(indice-1, tupla_pos_lem)
resultado_esperado = 'a'
self.assertEqual(resultado, resultado_esperado)
def test__es_aspecto_1(self):
"""
Prueba el método auxiliar que es usado para determinar si una palabra esta en el diccionario de aspectos.
Se espera que la palabra 'comment' sea determinado como aspecto 'comment'.
"""
palabra = 'comment'
diccionario = {"comment":["comment"]}
resultado = self.ex._es_aspecto(palabra, diccionario)
self.assertEqual("comment", resultado)
def test__es_aspecto_2(self):
"""
Prueba el método auxiliar que es usado para determinar si una palabra esta en el diccionario de aspectos.
Se espera que la palabra 'review' sea determinado como aspecto 'comment'.
"""
palabra = 'comment'
diccionario = {"comment":["comment", "review"]}
resultado = self.ex._es_aspecto(palabra, diccionario)
self.assertEqual("comment", resultado)
def test__es_aspecto_3(self):
"""
Prueba el método auxiliar que es usado para determinar si una palabra esta en el diccionario de aspectos.
Se espera que la palabra 'review' no sea determinado como aspecto y devuelva None.
"""
palabra = 'review'
diccionario = {"comment":["comment"]}
resultado = self.ex._es_aspecto(palabra, diccionario)
self.assertEqual(None, resultado)
def test__amod_1(self):
"""
Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra una dependencia con la etiqueta "amod".
Se espera una tupla ("comment", "valid")
"""
indice_raiz = 5
indice_nodo = 4
lista_pos_lem = [('i', 'LS', None), ('am', 'VBP', 'be'), ('a', 'DT', None), ('valid', 'JJ', 'valid'),
('comment', 'NN', 'comment'), ('.', '.', None)]
diccionario_de_aspectos = {"comment":["comment"]}
res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos)
res_esperado = ("comment", "valid")
self.assertEqual(res, res_esperado)
def test__amod_2(self):
"""
Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra una dependencia con la etiqueta "amod".
Se espera una tupla ("cyclone", "red")
"""
indice_raiz = 4
indice_nodo = 3
lista_pos_lem = [('im', 'VB', None), ('the', 'DT', None), ('red', 'JJ', None),
('cyclone', 'NN', None), ('.', '.', None)]
diccionario_de_aspectos = {"cyclone":["cyclone"]}
res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos)
res_esperado = ("cyclone", "red")
self.assertEqual(res, res_esperado)
def test__amod_3(self):
"""
Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra una dependencia con la etiqueta "amod".
Se espera None
"""
indice_raiz = 4
indice_nodo = 3
lista_pos_lem = [('im', 'VB', None), ('the', 'DT', None), ('red', 'JJ', None),
('cyclone', 'NN', None), ('.', '.', None)]
diccionario_de_aspectos = {"not":["ok"]}
res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos)
res_esperado = None
self.assertEqual(res, res_esperado)
def test__amod_4(self):
"""
Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra una dependencia con la etiqueta "amod".
Se espera None
"""
indice_raiz = 4
indice_nodo = 3
lista_pos_lem = [('im', 'VB', None), ('the', 'DT', None), ('red', 'JJ', None),
('cyclone', 'NN', None), ('.', '.', None)]
arbol_de_dependencias = [('ROOT', 0, 1), ('det', 4, 2), ('amod', 4, 3),
('dobj', 1, 4), ('punct', 1, 5)]
diccionario_de_aspectos = {"not":["ok"]}
res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos,
arbol_de_dependencias=arbol_de_dependencias)
res_esperado = None
self.assertEqual(res, res_esperado)
def test__amod_5(self):
"""
Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra una dependencia con la etiqueta "amod".
Se espera una tupla ("cyclone", "red")
"""
indice_raiz = 4
indice_nodo = 3
lista_pos_lem = [('im', 'VB', None), ('the', 'DT', None), ('red', 'JJ', None),
('cyclone', 'NN', None), ('.', '.', None)]
arbol_de_dependencias = [('ROOT', 0, 1), ('det', 4, 2), ('amod', 4, 3),
('dobj', 1, 4), ('punct', 1, 5)]
diccionario_de_aspectos = {"cyclone":["cyclone"]}
res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos)
res_esperado = ("cyclone", "red")
self.assertEqual(res, res_esperado)
def test__advmod_1(self):
"""
Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra una dependencia con la etiqueta "advmod".
Se espera que regrese el adverbio del sustantivo en una tupla: ('sustantivo', 'dependencia').
"""
# ultimately, it's a sheep
indice_raiz = 6
indice_nodo = 1
lista_pos_lem = [('ultimately', 'RB', None), (',', ',', None), ('it', 'PRP', None),
("'s", 'VBZ', None), ('a', 'DT', None), ('sheep', 'NN', None)]
diccionario_de_aspectos = {"sheep": ["sheep"]}
res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos)
res_esperado = ("sheep","ultimately")
self.assertEqual(res_esperado, res)
def test__advmod_2(self):
"""
Prueba el método auxiliar _extraer_dependencia que se ejecuta cuando se encuentra una dependencia con la etiqueta "advmod".
Se espera que regrese el adverbio del sustantivo en una tupla: ('sustantivo', 'dependencia').
"""
# do you dream of perfectly electric sheep, lately?
indice_raiz = 3
indice_nodo = 9
lista_pos_lem = [('do', 'VB', None), ('you', 'PRP', None), ('dream', 'NN', None),
('of', 'IN', None), ('perfectly', 'RB', None), ('electric', 'JJ', None),
('sheep', 'NN', None), (',', ',', None), ('lately', 'RB', None),
('?', '.', None)]
diccionario_de_aspectos = {"Dream": ["dream"]}
res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos)
res_esperado = ("Dream","lately")
self.assertEqual(res_esperado, res)
def test__amod_advmod(self):
"""
En algunas ocaciones, adjetivos de un sustantivo poseen su propio adverbio. Esta prueba espera que
al encontrar una dependencia amod que tiene su propio advmod, se devuelvan ambos en un solo string.
Se espera ("sheep", "perfectly electric")
"""
# do you dream of perfectly electric sheep, lately?
indice_raiz = 7
indice_nodo = 6
lista_pos_lem = [('do', 'VB', None), ('you', 'PRP', None), ('dream', 'NN', None),
('of', 'IN', None), ('perfectly', 'RB', None), ('electric', 'JJ', None),
('sheep', 'NN', None), (',', ',', None), ('lately', 'RB', None),
('?', '.', None)]
arbol_de_dependencias = [('ROOT', 0, 3), ('aux', 3, 1), ('nsubj', 3, 2), ('case', 7, 4),
('advmod', 6, 5), ('amod', 7, 6), ('nmod', 3, 7), ('punct', 3, 8),
('advmod', 3, 9), ('punct', 3, 10)]
diccionario_de_aspectos = {"Sheep": ["sheep"]}
res = self.ex._extraer_dependencia(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos,
arbol_de_dependencias=arbol_de_dependencias)
res_esperado = ("Sheep","perfectly electric")
self.assertEqual(res_esperado, res)
def test_extraer_dependencia_doble_1(self):
"""
Prueba el método auxiliar que busca dependencias de dependencias. Debe encontrar el advmod
del adjetivo 'electric'. Se espera que devuelva 'perfectly'.
"""
indice_nodo = 6
lista_pos_lem = [('do', 'VB', None), ('you', 'PRP', None), ('dream', 'NN', None),
('of', 'IN', None), ('perfectly', 'RB', None), ('electric', 'JJ', None),
('sheep', 'NN', None), (',', ',', None), ('lately', 'RB', None),
('?', '.', None)]
arbol_de_dependencias = [('ROOT', 0, 3), ('aux', 3, 1), ('nsubj', 3, 2), ('case', 7, 4),
('advmod', 6, 5), ('amod', 7, 6), ('nmod', 3, 7), ('punct', 3, 8),
('advmod', 3, 9), ('punct', 3, 10)]
res_esperado = "perfectly"
res = self.ex._extraer_dependencia_doble(indice_nodo, lista_pos_lem, arbol_de_dependencias)
self.assertEqual(res_esperado, res)
def test__neg_1(self):
"""
Prueba el método auxiliar que busca negaciones. Debe encontrar la negacion
del sustantivos 'example'. Se espera que devuelva ('example','not').
"""
lista_pos_lem = [('this', 'DT', None), ('is', 'VBZ', None), ('not', 'RB', None),
('a', 'DT', None), ('good', 'JJ', None), ('example', 'NN', None),
('.', '.', None)]
arbol_de_dependencias = [('ROOT', 0, 6), ('nsubj', 6, 1), ('cop', 6, 2),
('neg', 6, 3), ('det', 6, 4), ('amod', 6, 5), ('punct', 6, 7)]
diccionario_de_aspectos = {"example": ["example"]}
indice_raiz = 6
indice_nodo = 3
res_esperado = ("example", "not")
res = self.ex._extraer_dependencia(indice_raiz=indice_raiz, indice_nodo=indice_nodo,
lista_pos_lem=lista_pos_lem,
diccionario_de_aspectos=diccionario_de_aspectos,
arbol_de_dependencias=arbol_de_dependencias)
self.assertEqual(res,res_esperado)
def test__nsub_1(self):
"""
Prueba el método auxiliar que busca sujetos nominales. Debe encontrar el adjetivo y adverbio
del sustantivo 'cats'. Se espera que devuelva ('cats', "really cute").
"""
lista_pos_lem = [('black', 'JJ', None), ('cats', 'NNS', None), ('are', 'VBP', None),
('really', 'RB', None),
('cute', 'JJ', None), ('.', '.', None)]
arbol_de_dependencias = [('ROOT', 0, 5), ('amod', 2, 1), ('nsubj', 5, 2),
('cop', 5, 3), ('advmod', 5, 4), ('punct', 5, 6)]
diccionario_de_aspectos = {"cats":["cats"]}
indice_raiz = 5
indice_nodo = 2
res_esperado = ("cats", "really cute")
res = self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias)
self.assertEqual(res_esperado, res)
def test__nsub_2(self):
"""
Prueba el método auxiliar que busca sujetos nominales. Como el sujeto nominas no va de un adjetivo
a un sustantivo, debe regresar None.
"""
lista_pos_lem = [('this', 'DT', None), ('is', 'VBZ', None), ('not', 'RB', None),
('a', 'DT', None), ('good', 'JJ', None), ('example', 'NN', None),
('.', '.', None)]
arbol_de_dependencias = [('ROOT', 0, 6), ('nsubj', 6, 1), ('cop', 6, 2),
('neg', 6, 3), ('det', 6, 4), ('amod', 6, 5), ('punct', 6, 7)]
diccionario_de_aspectos = {"example": ["example"]}
indice_raiz = 6
indice_nodo = 1
res_esperado = None
res = self.ex._extraer_nsubj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias)
self.assertEqual(res_esperado, res)
def test_extractor_1(self):
"""
Dado el siguiente comentario: i am a valid comment.
Debe devolver el adjetivo 'valid' del aspecto 'comment'
"""
com = "i am a valid comment."
diccionario = {"comment":["comment"]}
arbol = self.cliente.resolver_dependencias(com)
etiquetas_pos = self.cliente.etiquetar_texto(com)
lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)
diccionario_esperado = {"comment":["valid"]}
dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem)
self.assertEqual(diccionario_esperado, dic_resultado)
def test_extractor_2(self):
"""
Dado el siguiente comentario: im the red cyclone.
Debe devolver {"cyclone":["red"]}
"""
com = "im the red cyclone."
diccionario = {"cyclone":["cyclone"]}
arbol = self.cliente.resolver_dependencias(com)
etiquetas_pos = self.cliente.etiquetar_texto(com)
lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)
diccionario_esperado = {"cyclone":["red"]}
dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem)
self.assertEqual(diccionario_esperado, dic_resultado)
def test_extractor_3(self):
"""
Dado el siguiente comentario: do you dream of perfectly electric sheep, lately?
Debe devolver {"dream":["dream"],"sheep":["sheep"]}
"""
com = "do you dream of perfectly electric sheep, lately?"
diccionario = {"dream":["dream"],
"sheep":["sheep"]}
arbol = self.cliente.resolver_dependencias(com)
etiquetas_pos = self.cliente.etiquetar_texto(com)
lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)
diccionario_esperado = {"dream":["lately"], "sheep":["perfectly electric"]}
dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem)
self.assertEqual(diccionario_esperado, dic_resultado)
def test_extractor_4(self):
"""
Dado el siguiente comentario: ultimately, it's a sheep
Debe devolver {"sheep":["ultimately"]}
"""
com = "ultimately, it's a sheep"
diccionario = {"sheep":["sheep"]}
arbol = self.cliente.resolver_dependencias(com)
etiquetas_pos = self.cliente.etiquetar_texto(com)
lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)
diccionario_esperado = {"sheep":["ultimately"]}
dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem)
self.assertEqual(diccionario_esperado, dic_resultado)
def test_extractor_5(self):
"""
Dado el siguiente comentario: black cats are really cute.
Debe devolver {"cats":["black"," really cute"]}
"""
com = "black cats are really cute."
diccionario = {"cats":["cat", "cats"]}
arbol = self.cliente.resolver_dependencias(com)
etiquetas_pos = self.cliente.etiquetar_texto(com)
lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)
diccionario_esperado = {"cats":["black","really cute"]}
dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem)
self.assertEqual(diccionario_esperado, dic_resultado)
def test_extractor_6(self):
"""
Dado el siguiente comentario: i really love black cats.
Debe devolver {"cats":["black"}
"""
com = "i really love black cats."
diccionario = {"cats":["cat", "cats"]}
arbol = self.cliente.resolver_dependencias(com)
etiquetas_pos = self.cliente.etiquetar_texto(com)
lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)
diccionario_esperado = {"cats":["black"]}
dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem)
self.assertEqual(diccionario_esperado, dic_resultado)
def test_extractor_7(self):
"""
Dado el siguiente comentario: this is not a good example.
Debe devolver {"example":["not good"]}
"""
com = "this is not a good example."
diccionario = {"example":["example"]}
arbol = self.cliente.resolver_dependencias(com)
etiquetas_pos = self.cliente.etiquetar_texto(com)
lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)
diccionario_esperado = {"example":["not", "good"]}
dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem)
self.assertEqual(diccionario_esperado, dic_resultado)
def test_extractor_8(self):
"""
Dado el siguiente comentario: They sent him the same, wrong item.
Debe devolver {"item":["same","wrong"]}
"""
com = "They sent him the same, wrong item."
diccionario = {"item":["item", "items"]}
arbol = self.cliente.resolver_dependencias(com)
etiquetas_pos = self.cliente.etiquetar_texto(com)
lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)
diccionario_esperado = {"item":["same","wrong"]}
dic_resultado = self.ex.extraer(diccionario, arbol, lista_pos_lem)
print(diccionario_esperado)
self.assertEqual(diccionario_esperado, dic_resultado)
def test_extractor_9(self):
"""
Pruebas con comentarios reales
"""
com = "Usually I have good experiences with Amazon and its customer service reps, but after todays online customer service chat I am horrified at some of the people Amazon employs. Enter employee Ruchitha. I was trying to get a print out label for my roommate since he doesn't have Prime and isn't really internet savvy. After he had bought a dvd that wasn't playable in the country, he called customer service and a rep said they were going to send him the correct one. They sent him the same, wrong item. So he had 2 returns to do."
diccionario = {"experience":["experiences","experience"],"Amazon":["Amazon","amazon"],
"item":["item","items"]}
sentencias = nltk.sent_tokenize(com)
dic_resultado = dict()
for sentencia in sentencias:
arbol = self.cliente.resolver_dependencias(sentencia)
etiquetas_pos = self.cliente.etiquetar_texto(sentencia)
lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)
res = self.ex.extraer(diccionario, arbol, lista_pos_lem)
dic_resultado = self._combinar_dict(res, dic_resultado)
diccionario_esperado = {"experience":["good"],
"Amazon":[],
"item":["same","wrong"]
}
self.assertEqual(diccionario_esperado, dic_resultado)
def test_extractor_10(self):
"""
Pruebas con comentarios reales
"""
com = "There was a time I was a super-Amazon fan-boy, but those days are long past. If AMZ is good at one thing these days, it is finding new and innovated ways to anger their customers. I try to find the best deal with products all the time and use what discounts where I can. Apparently, AMZ does not like this and has taken to locking people out of their ability to comment on products if they feel you are not paying the top price. Today I had the simplest question about a feature on an item I bought on AMZ, but cannot ask the question as apparently, I am persona non grata these days. I got the product with a discount via research on the net."
diccionario = {"fan-boy":["fan-boy"],"Amazon":["Amazon","amazon","AMZ"],
"question":["question"], "thing":["thing", "things"],
"way":["way","ways"], "deal":["deal","deals"],
"price":["prices", "price"],}
sentencias = nltk.sent_tokenize(com)
dic_resultado = dict()
for sentencia in sentencias:
arbol = self.cliente.resolver_dependencias(sentencia)
etiquetas_pos = self.cliente.etiquetar_texto(sentencia)
lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)
res = self.ex.extraer(diccionario, arbol, lista_pos_lem)
dic_resultado = self._combinar_dict(res, dic_resultado)
diccionario_esperado = {"fan-boy":["super-Amazon"],
"Amazon":["good"],
"question":["simple"],
"thing":["good"],
"way":["new"],
"deal":["best"],
"price":["top"]
}
self.assertEqual(diccionario_esperado, dic_resultado)
def test__conj_1(self):
"""
Método aúxiliar para manejar las conjunciones de un sustantivo a un adverbio/adjetivo
"""
lista_pos_lem = [('I', 'PRP', None), ('have', 'VBP', None), ('been', 'VBN', None),
('a', 'DT', None), ('Prime', 'JJ', None), ('member', 'NN', None),
('for', 'IN', None), ('years', 'NNS', None), ('and', 'CC', None),
('always', 'RB', None), ('received', 'VBD', None), ('my', 'PRP$', None),
('merchandise', 'NN', None), ('in', 'IN', None), ('the', 'DT', None),
('desired', 'JJ', None), ('time', 'NN', None), ('frame', 'NN', None),
(',', ',', None), ('but', 'CC', None), ('no', 'DT', None),
('more', 'JJR', None), ('!!', '.', None)]
arbol_de_dependencias = [('ROOT', 0, 6), ('nsubj', 6, 1), ('aux', 6, 2),
('cop', 6, 3), ('det', 6, 4), ('amod', 6, 5),
('case', 8, 7), ('nmod', 6, 8), ('cc', 6, 9),
('advmod', 11, 10), ('conj', 6, 11), ('nmod:poss', 13, 12),
('dobj', 11, 13), ('case', 18, 14), ('det', 18, 15),
('amod', 18, 16), ('compound', 18, 17), ('nmod', 11, 18),
('punct', 6, 19), ('cc', 6, 20), ('neg', 22, 21),
('conj', 6, 22), ('punct', 6, 23)]
diccionario_de_aspectos = {"Member":["member"]}
indice_raiz = 6
indice_nodo = 22
res_esperado = ("Member", "no more")
res = self.ex._extraer_conj(indice_raiz, indice_nodo, lista_pos_lem, diccionario_de_aspectos, arbol_de_dependencias)
self.assertEqual(res_esperado, res)
def test_extractor_11(self):
"""
Pruebas con comentarios reales
"""
com = "Prime 2 day shipping seems to be a thing of the past. I have been a Prime member for years and always received my merchandise in the desired time frame, but no more!! I have had numerous conversations with customer service and supervisors. All they do is give me the runaround and tell me their policy has not changed. \"Two day shipping starts when the item leaves the warehouse\". They can't ship if the items are not in their warehouses, seemly blaming the vendors. Shame on you Amazon for not telling the truth. To save money, Amazon no longer uses reliable trucking companies to move merchandise from vendors warehousing to Amazon warehouses. They can't ship what's not available. Nice way to save a buck. But keep taking our membership money for services you no longer can provide."
diccionario = {"Member":["member","Member"],
"Shipping":["shipping","Shipping"],
}
sentencias = nltk.sent_tokenize(com)
dic_resultado = dict()
for sentencia in sentencias:
arbol = self.cliente.resolver_dependencias(sentencia)
etiquetas_pos = self.cliente.etiquetar_texto(sentencia)
lista_pos_lem = self.lemas.lematizar_tuplas(etiquetas_pos)
res = self.ex.extraer(diccionario, arbol, lista_pos_lem)
dic_resultado = self._combinar_dict(res, dic_resultado)
diccionario_esperado = {"Member":["Prime", "no more"],
"Shipping":["day"],
}
self.assertEqual(diccionario_esperado, dic_resultado)
def test_quitar_palabras(self):
"""
Prueba el metodo quitar_palabras. Se espera que elimine toda palabra que no tenga una etiqueta POS
de adverbio, sustantivo o negacion.
"""
texto = "do you dream of perfectly electric sheep, lately?"
res = self.ex.quitar_palabras(texto)
texto_esperado = "perfectly electric lately"
self.assertEqual(res, texto_esperado)
def test_quitar_palabras_2(self):
"""
Prueba el metodo quitar_palabras. Se espera que elimine toda palabra que no tenga una etiqueta POS
de adverbio, sustantivo o negacion.
"""
texto = "don't say no to cookies, never again"
res = self.ex.quitar_palabras(texto)
texto_esperado = "n't no never again"
self.assertEqual(res, texto_esperado)
def test_quitar_palabras_3(self):
"""
Prueba el metodo quitar_palabras. Se espera que elimine toda palabra que no tenga una etiqueta POS
de adverbio, sustantivo o negacion.
"""
texto = "black cats are really cute."
res = self.ex.quitar_palabras(texto)
texto_esperado = "black really cute"
self.assertEqual(res, texto_esperado)
def test__purgar_palabras_pos(self):
"""
Método auxiliar que es el que recorre las lista de tuplas para eliminar las palabras innecesarias.
"""
texto = "don't say no to cookies, never again"
lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto))
res = self.ex._purgar_palabras_pos(lista_pos_lem)
tupla_esperada = [("n't", 'RB', "n't"),('no', 'DT', None),
('never', 'RB', "never"), ('again', 'RB', "again")]
self.assertEqual(res, tupla_esperada)
def test__unir_palabras(self):
"""
Método auxiliar que une las palabras de la lista de tuplas.
"""
texto = "don't say no to cookies, never again"
lista_pos_lem = self.lemas.lematizar_tuplas(self.cliente.etiquetar_texto(texto))
tupla_purgada = self.ex._purgar_palabras_pos(lista_pos_lem)
res = self.ex._unir_palabras(tupla_purgada)
texto_esperado = "n't no never again"
self.assertEqual(res, texto_esperado)
def _combinar_dict(self, dict1, dict2):
for llave in dict1:
if llave in dict2.keys():
dict2[llave].extend(dict1[llave])
else:
dict2[llave] = dict1[llave]
return dict2
def tearDown(self):
self.cliente.cerrar_servicio()
self.ex.cerrar()
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | 2.8125 | 3 |
rbm.py | JonasWechsler/NeuralNetsLab4 | 0 | 12791319 | <reponame>JonasWechsler/NeuralNetsLab4<gh_stars>0
import numpy as np
import csv
import plot
from sklearn.neural_network import BernoulliRBM
def error(a, b):
return (a != b).sum()
def percent_error(a, b):
return sum(error(a[i], b[i]) for i in range(len(a)))/float(len(a)*len(a[0]))
def gen_even_slices(n, n_packs, n_samples=None):
start = 0
if n_packs < 1:
raise ValueError("gen_even_slices got n_packs=%s, must be >=1"
% n_packs)
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def reformat_data(data):
return data.reshape((28, 28))
def run(train_data, test_data):
batch_size=10
n_samples = np.array(train_data).shape[0]
n_batches = int(np.ceil(float(n_samples) / batch_size))
batch_slices = list(gen_even_slices(n_batches * batch_size, n_batches, n_samples))
nodes = [50, 75, 100, 150]
for item in nodes:
errors = []
model = BernoulliRBM(n_components=item, learning_rate=0.1, batch_size=10, n_iter=1,
random_state=None, verbose=1)
for _ in range(20):
for batch_slice in batch_slices:
model.partial_fit(train_data[batch_slice])
errors.append(percent_error(model.gibbs(test_data), test_data))
plot.plot_points(errors)
plot.plot_heatmap(reformat_data(test_data[0]))
plot.plot_heatmap(reformat_data(model.gibbs(test_data)[0]))
if item == 50 or item == 100:
plot.plot_heatmap(model.__dict__['components_'].reshape(item,784))
if __name__ == "__main__":
train_data = []
test_data = []
with open('binMNIST_data\\bindigit_trn.csv') as f:
reader = csv.reader(f)
for row in reader:
train_data.append(np.array([int(_) for _ in row]))
with open('binMNIST_data\\bindigit_tst.csv') as f:
reader = csv.reader(f)
for row in reader:
test_data.append(np.array([int(_) for _ in row]))
run(train_data, test_data) | 2.75 | 3 |
play_snake.py | Disi77/Snake | 1 | 12791320 | # SNAKE GAME
import pyglet
from pyglet import gl
from pyglet.window import key
from images_load import batch
from game_state import Game_state
from field import game_field
time_to_move = [0.7]
def on_key_press(symbol, modifiers):
'''
User press key for setting snake direction.
'''
if symbol == key.LEFT:
game_state.direction = (-1, 0)
if symbol == key.RIGHT:
game_state.direction = (1, 0)
if symbol == key.UP:
game_state.direction = (0, 1)
if symbol == key.DOWN:
game_state.direction = (0, -1)
if symbol == key.ENTER:
game_state.keys.append(('enter', 0))
def on_key_release(symbol, modifiers):
'''
On key release.
'''
if symbol == key.ENTER:
game_state.keys.clear()
def on_draw():
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
gl.glColor3f(1, 1, 1)
gl.glLineWidth(4)
x1 = game_field.origin_xy0_game_field[0]
y1 = game_field.origin_xy0_game_field[1]
x2 = game_field.origin_xy1_game_field[0]
y2 = game_field.origin_xy1_game_field[1]
draw_polygon((x1, y1), (x1, y2), (x2, y2), (x2, y1))
x1 = game_field.origin_xy0_menu[0]
y1 = game_field.origin_xy0_menu[1]
x2 = game_field.origin_xy1_menu[0]
y2 = game_field.origin_xy1_menu[1]
draw_polygon((x1, y1), (x1, y2), (x2, y2), (x2, y1))
batch.draw()
menu_text()
if game_state.state == 'dead':
dead_text()
if game_state.state == 'game_over':
game_over_text()
def dead_text():
draw_text('For continue set right direction',
x=game_field.size_window()[0]//2,
y=game_field.size_window()[1]//2-100,
size=30,
anchor_x='center')
def menu_text():
draw_text('in Python',
x=game_field.origin_xy0_menu[0]+25,
y=game_field.origin_xy1_menu[1]-130,
size=16,
anchor_x='left')
draw_text('Move with ← ↓ ↑ →',
x=game_field.origin_xy0_menu[0]+300,
y=game_field.origin_xy1_menu[1]-50,
size=16,
anchor_x='left')
draw_text('Eat Apples',
x=game_field.origin_xy0_menu[0]+300,
y=game_field.origin_xy1_menu[1]-80,
size=16,
anchor_x='left')
draw_text('Don\'t eat walls or yourself.',
x=game_field.origin_xy0_menu[0]+300,
y=game_field.origin_xy1_menu[1]-110,
size=16,
anchor_x='left')
draw_text(str(game_state.lifes),
x=game_field.origin_xy1_menu[0]-70,
y=game_field.origin_xy1_menu[1]-65,
size=30,
anchor_x='left')
draw_text(str(len(game_state.snake_xy)),
x=game_field.origin_xy1_menu[0]-70,
y=game_field.origin_xy1_menu[1]-115,
size=30,
anchor_x='left')
def game_over_text():
draw_text('GAME OVER',
x=game_field.size_window()[0]//2,
y=game_field.size_window()[1]//2-100,
size=30,
anchor_x='center')
draw_text('Press ENTER',
x=game_field.size_window()[0]//2,
y=game_field.size_window()[1]//2-140,
size=20,
anchor_x='center')
def move(t):
time_to_move[0] -= t
if time_to_move[0] < 0:
game_state.move(t)
if game_state.state == 'game_over' and ('enter', 0) in game_state.keys:
game_state.restart_conditions()
time = max(0.7 - 0.05 * int(len(game_state.snake_xy))/3, 0.2)
time_to_move[0] = time
def reset():
game_state = Game_state()
game_state.draw_snake_parts()
return game_state
def draw_polygon(xy1, xy2, xy3, xy4):
'''
Draw polygon.
'''
gl.glBegin(gl.GL_LINE_LOOP);
gl.glVertex2f(int(xy1[0]), int(xy1[1]));
gl.glVertex2f(int(xy2[0]), int(xy2[1]));
gl.glVertex2f(int(xy3[0]), int(xy3[1]));
gl.glVertex2f(int(xy4[0]), int(xy4[1]));
gl.glEnd();
def draw_text(text, x, y, size, anchor_x):
'''
Draw text in playfield.
'''
text = pyglet.text.Label(
text,
font_name='Arial',
font_size=size,
x=x, y=y, anchor_x=anchor_x)
text.draw()
window = pyglet.window.Window(game_field.size_window()[0], game_field.size_window()[1])
game_state = reset()
window.push_handlers(
on_draw=on_draw,
on_key_press=on_key_press,
)
pyglet.clock.schedule_interval(move, 1/30)
pyglet.clock.schedule_interval(game_state.add_food, 5)
pyglet.app.run()
| 2.609375 | 3 |
src/test/py/ltprg/game/snli/data/annotate_sua_nlp.py | forkunited/ltprg | 11 | 12791321 | <reponame>forkunited/ltprg
import sys
import mung.nlp.corenlp
input_data_dir = sys.argv[1]
output_data_dir = sys.argv[2]
annotator = mung.nlp.corenlp.CoreNLPAnnotator('$.[state, utterance]', 'contents', 'nlp')
annotator.annotate_directory(input_data_dir, output_data_dir, id_key="id", batch=100)
| 2.03125 | 2 |
auto-brightness-service.py | sheinz/auto-brightness | 1 | 12791322 | #!/usr/bin/env python
import dbus
import dbus.service
import sys
import signal
from PyQt4 import QtCore
from dbus.mainloop.qt import DBusQtMainLoop
from notifier import Notifier
from als import AmbientLightSensor
from brightnessctrl import BrightnessCtrl
class AutoBrightnessService(dbus.service.Object):
def __init__(self):
path = '/com/github/sheinz/autobrightness'
bus_loop = DBusQtMainLoop(set_as_default=True)
self._bus = dbus.SessionBus(mainloop=bus_loop)
name = dbus.service.BusName('com.github.sheinz.autobrightness',
bus=self._bus)
dbus.service.Object.__init__(self, name, path)
self.notifier = Notifier(self._bus)
self._auto = False
self._als = AmbientLightSensor()
self._br_ctrl = BrightnessCtrl(self._bus)
self._process_timer = QtCore.QTimer()
self._process_timer.timeout.connect(self.process)
@property
def auto(self):
return self._auto
@auto.setter
def auto(self, value):
self._auto = value
self.notifier.auto_brightness(self._auto)
if self._auto:
self._als.start()
self._br_ctrl.start()
self._process_timer.start(1000)
else:
self._als.stop()
self._br_ctrl.stop()
self._process_timer.stop()
def process(self):
value = self._als.get_value()
print('Light sensor: %d' % value)
if value == 0:
value = 1
self._br_ctrl.set_screen_brightness(value)
if value < 5:
self._br_ctrl.set_keyboard_light(True)
else:
self._br_ctrl.set_keyboard_light(False)
def stop(self):
self._process_timer.stop()
self._als.stop()
self._br_ctrl.stop()
@dbus.service.method(dbus_interface='com.github.sheinz.autobrightness')
def up(self):
value = self._br_ctrl.screen_brightness_up()
self.notifier.brightness(value)
@dbus.service.method(dbus_interface='com.github.sheinz.autobrightness')
def down(self):
value = self._br_ctrl.screen_brightness_down()
self.notifier.brightness(value)
@dbus.service.method(dbus_interface='com.github.sheinz.autobrightness')
def auto_toggle(self):
self.auto = not self.auto
@dbus.service.method(dbus_interface='com.github.sheinz.autobrightness')
def exit(self):
sys.exit()
class Application(QtCore.QCoreApplication):
def __init__(self, argv):
super(Application, self).__init__(argv)
self._auto_br = AutoBrightnessService()
def event(self, e):
return super(Application, self).event(e)
def quit(self):
self._auto_br.stop()
super(Application, self).quit()
def main():
app = Application(sys.argv)
app.startTimer(1000)
signal.signal(signal.SIGINT, lambda *args: app.quit())
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| 2.25 | 2 |
lecture_03_functional_programming/hw/task01.py | OlivkaFromHell/epam_python_autumn_2020 | 1 | 12791323 | """
In previous homework task 4, you wrote a cache function that remembers other function output value.
Modify it to be a parametrized decorator, so that the following code::
@cache(times=3)
def some_function():
pass
Would give out cached value up to `times` number only.
Example::
@cache(times=2)
def f():
return input('? ') # careful with input() in python2, use raw_input() instead
>> f()
? 1
'1'
>> f() # will remember previous value
'1'
>> f() # but use it up to two times only
'1'
>> f()
? 2
'2'
"""
import inspect
from typing import Callable
def cache(times: int) -> Callable:
"""Cache decorator which returns func result n times"""
cached_values = {}
def _cache(func: Callable) -> Callable:
def wrapper(*args, **kwargs):
bound = inspect.signature(func).bind(*args, **kwargs)
bound.apply_defaults()
key = str(bound.arguments)
if key not in cached_values:
cached_values[key] = [func(*args, **kwargs), times+1]
if cached_values[key][1] > 1:
cached_values[key][1] -= 1
return cached_values[key][0]
result = cached_values[key][0]
del cached_values[key]
return result
return wrapper
return _cache
| 4.28125 | 4 |
osc_tui/imageGrid.py | outscale-mdr/osc-tui | 5 | 12791324 | <reponame>outscale-mdr/osc-tui
import npyscreen
import pyperclip
import time
import createVm
import main
import popup
import selectableGrid
import virtualMachine
class ImageGrid(selectableGrid.SelectableGrid):
def __init__(self, screen, *args, **keywords):
super().__init__(screen, *args, **keywords)
self.col_titles = ["Name", "Id", "Description", "Type", "Owner"]
def on_selection(line):
popup.editImage(self.form, line)
self.on_selection = on_selection
def refresh(self, name_filter=None):
filter = {'Filters' : {'ImageNames' : [name_filter]}} if name_filter is not None else {}
groups = main.GATEWAY.ReadImages(**filter)['Images']
values = list()
for g in groups:
values.append([g['ImageName'], g['ImageId'], g['Description'],
g['ImageType'], g['AccountAlias'] if 'AccountAlias' in g else "Me"])
self.values = values
| 2.203125 | 2 |
test_QandT.py | Jul-Tedyputro/python-sample-vscode-flask-tutorial | 0 | 12791325 | def test_eggplantGUI():
print ('Mr Moritz is in action')
assert False
| 1.265625 | 1 |
src/04_Mokaro/register_new_user.py | UltiRequiem/Basic-Selenium-whit-Python | 3 | 12791326 | <filename>src/04_Mokaro/register_new_user.py
import unittest
from selenium import webdriver
from api_data_mock import ApiDataMock
class RegisterNewUser(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(executable_path='./../chromedriver')
driver = self.driver
driver.implicitly_wait(10)
driver.maximize_window()
driver.get('http://demo-store.seleniumacademy.com/customer/account/create')
def test_new_user(self):
driver = self.driver
self.assertEqual('Create New Customer Account', driver.title)
first_name = driver.find_element_by_id('firstname')
middle_name = driver.find_element_by_id('middlename')
last_name = driver.find_element_by_id('lastname')
email_address = driver.find_element_by_id('email_address')
password = driver.find_element_by_id('password')
confirm_password = driver.find_element_by_id('confirmation')
news_letter_subscription = driver.find_element_by_id('is_subscribed')
submit_button = driver.find_element_by_xpath('//*[@id="form-validate"]/div[2]/button/span/span')
self.assertTrue(first_name.is_enabled()
and middle_name.is_enabled()
and last_name.is_enabled()
and email_address.is_enabled()
and password.is_enabled()
and confirm_password.is_enabled()
and news_letter_subscription.is_enabled()
and submit_button.is_enabled())
first_name.send_keys(ApiDataMock.first_name)
middle_name.send_keys(ApiDataMock.middle_name)
last_name.send_keys(ApiDataMock.last_name)
email_address.send_keys(ApiDataMock.email_address)
password.send_keys(ApiDataMock.password)
confirm_password.send_keys(<PASSWORD>)
submit_button.click()
def tearDown(self):
self.driver.implicitly_wait(5)
self.driver.close()
if __name__ == '__main__':
unittest.main(verbosity=2)
| 2.625 | 3 |
backup_pca.py | Niels-vv/Safe-RL-With-DR | 1 | 12791327 | <gh_stars>1-10
import torch
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import IncrementalPCA as PCA
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class PCACompression:
def __init__(self, scalar, latent_space):
self.fileNames = []
self.pca_main = None
self.batch_size = 10000
self.pcaStatistic = PCA(batch_size = self.batch_size)
self.scaler = StandardScaler()
self.use_scalar = scalar
self.latent_space = latent_space
self.df = None
def create_pca(self, observations, get_statistics):
if self.use_scalar:
print("Fitting the scalar...")
self.scaler.fit(observations)
print("Transforming the scalar...")
self.df = self.scaler.transform(observations)
else:
self.df = observations
if get_statistics:
print("Fitting statistics PCA...")
self.pcaStatistic.fit(self.df)
def update_pca(self):
self.pca_main = PCA(n_components=self.latent_space, batch_size = self.batch_size)
print(f'Fitting final PCA on latent space {self.latent_space}')
self.pca_main.fit(self.df)
def state_dim_reduction(self, observation):
#state = []
#for obs in observation:
obs = observation.flatten()
if self.use_scalar:
obs = self.scaler.transform([obs])
else:
obs = [obs]
#state.append(self.pca_main.transform(obs)[0])
state = np.array(self.pca_main.transform(obs)[0])
return state
#return torch.tensor(state, dtype=torch.float, device=device)
def get_pca_dimension_info(self):
return np.cumsum(self.pcaStatistic.explained_variance_ratio_)
| 2.46875 | 2 |
Python/Doubly Linked List/DLNode.py | ooweihanchenoo/basicPrograms | 0 | 12791328 | class DLNode:
def __init__(self, init_data):
self.data = init_data
self.next = None
self.previous = None
def get_data(self):
return self.data
def get_next(self):
return self.next
def get_previous(self):
return self.previous
def set_data(self, new_data):
self.data = new_data
def set_next(self, new_next):
self.next = new_next
def set_previous(self, new_previous):
self.previous = new_previous
| 2.9375 | 3 |
popupwindow_matplotlib.py | klincke/MicroWineBar | 4 | 12791329 | <reponame>klincke/MicroWineBar<gh_stars>1-10
import os, sys
from tkinter import *
from tkinter.ttk import *
from tkinter.filedialog import asksaveasfilename
import pandas as pd
import numpy as np
import tkinter.messagebox as tmb
from skbio.diversity.alpha import shannon
from .general_functions import *
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk
from matplotlib.figure import Figure
matplotlib.rcParams.update({'font.size': 10})
from scipy.spatial.distance import squareform
class PopUpIncludingMatplotlib():
def __init__(self, root, abundance_df, all_tax_levels):
self.root = root
self.abundance_df = abundance_df
self.all_tax_levels = all_tax_levels
self.HEIGHT = 400
self.COLOR_SCHEME = ['deepskyblue',
'forestgreen',
'navy',
'darkgoldenrod',
'steelblue4',
'blue2',
'seagreen',
'hotpink4',
'deeppink4',
'darkolivegreen4',
'turquoise4',
'gold3',
'dodger blue',
'turquoise3',
'mediumorchid4',
'royalblue1',
'red3',
'springgreen3',
'steelblue2',
'darkorange2',
'springgreen4',
'skyblue4',
'firebrick4']
def save_high_resolution_figure(self, fig, title, initialfile, defaultextension='.png'):
""" saves a figure in high resolution """
filename = asksaveasfilename(title=title, initialfile=initialfile, defaultextension=defaultextension, filetypes=(("PNG files","*.png"), ("EPS files","*.eps"), ("JPEG files","*.jpg"), ("TIFF files","*.tiff")))
fig.savefig(filename, dpi=600)
return filename
#def richness_groups(self, working_samples, samples_list, tax_level):
def richness_groups(self, working_samples, sample_names, tax_level, samples1, samples2, richness, samples1_label, samples2_label):
""" """
self.create_window()
self.top.title('Richness')
fig = Figure(figsize=(5,6), dpi=120)
ax = fig.add_subplot(111)
data = [richness[samples1].values, richness[samples2].values]
bp = ax.boxplot(data)
ax.set_xticklabels([samples1_label,samples2_label], rotation=45, fontsize=12)
ax.set_ylabel('richness', fontsize=12)
#add median text
medians = [med.get_ydata()[0] for med in bp['medians']]
median_labels = [str(np.round(med, 2)) for med in medians]
#t-test (Wlech's-test does not assume equal variance)
from scipy.stats import ttest_ind
ttest_result = ttest_ind(richness[samples1].values, richness[samples2].values, equal_var=False)
ttest_res = ['T_stat: '+str(round(ttest_result[0],2)), 'p_val: '+str('{0:.0e}'.format(ttest_result[1]))]
#fig.subplots_adjust(left=0.08, right=0.98, bottom=0.2, top=0.97, hspace=0.2, wspace=0.2)
fig.set_tight_layout(True)
matplotlib_frame = Frame(self.frame)
matplotlib_frame.grid(row=2, column=0)
canvas = FigureCanvasTkAgg(fig, matplotlib_frame)
canvas.draw()
canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True)
save_button = Button(self.frame, text="Save (high resolution)", command=lambda fig=fig, title='Richness', initialfile='richness_groups': self.save_high_resolution_figure(fig, title, initialfile))
save_button.grid(row=1, column=0)
return (median_labels, ttest_res)
def richness_all_samples(self, working_samples, samples_list, tax_level):
self.create_window()
self.top.title('Richness')
self.top.title('overview of richness of all samples on ' + tax_level + ' level')
self.inner_frame = Frame(self.frame)
self.inner_frame.grid(row=1, column=0, columnspan=4)
top_space = 20
width=600
if len(samples_list)> 20:
width = 1000
start_idx = len(self.all_tax_levels) - list(self.all_tax_levels).index(tax_level)
if self.abundance_df.groupAbsoluteSamples() is not None:
absolute_working_samples = self.abundance_df.groupAbsoluteSamples()
absolute_working_samples = absolute_working_samples[samples_list].astype('int')
richness = absolute_working_samples.astype(bool).sum(axis=0)
else:
richness = working_samples.astype(bool).sum(axis=0)[start_idx:-2]
fig = Figure(figsize=(4,6), dpi=120)#, tight_layout=True)
ax = fig.add_subplot(211)
bp = ax.boxplot(richness)
for val in richness:
x = np.random.normal(1, 0.04, 1)
ax.scatter(x, val, c='grey', marker='.', alpha=0.4)
ax.set_xticklabels([''])
ax.set_ylabel('number of ' + tax_level)
ax = fig.add_subplot(212)
for i,val in enumerate(richness):
ax.scatter(richness.index[i],val,marker='.')
ax.set_xticklabels(richness.index, fontsize=8, rotation='vertical')
ax.set_xlabel('samples')
ax.set_ylabel('number of ' + tax_level)
fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.95, hspace=0.2, wspace=0.2)
matplotlib_frame = Frame(self.frame)
matplotlib_frame.grid(row=2, column=0, rowspan=2, columnspan=2)
canvas = FigureCanvasTkAgg(fig, matplotlib_frame)
canvas.draw()
canvas.get_tk_widget().grid(row=1, column=0, columnspan=4)
save_button = Button(self.inner_frame, text="Save (high resolution)", command=lambda fig=fig, title='Richness', initialfile='richness_all_samples': self.save_high_resolution_figure(fig, title, initialfile))
save_button.grid(row=1, column=0)
def shannon_diversity_all_samples(self, working_samples, samples_list, tax_level):
from skbio.diversity.alpha import shannon
self.create_window()
self.top.title('Shannon diversity')
self.top.title('overview of Shannon index of all samples on ' + tax_level + ' level')
self.inner_frame = Frame(self.frame)
self.inner_frame.grid(row=1, column=0, columnspan=4)
top_space = 20
width=600
if len(samples_list)> 20:
width = 1000
#shannon index (alpha diversity)
if self.abundance_df.groupAbsoluteSamples() is not None:
absolut_working_samples = self.abundance_df.groupAbsoluteSamples()
absolut_working_samples = absolut_working_samples[samples_list].astype('int')
shannon0 = absolut_working_samples.loc[[tax+'_' for tax in list(working_samples[tax_level])]].apply(shannon)
else:
shannon0 = []
for sample in samples_list:
shannon0.append(shannon_index(working_samples[sample].as_matrix()))
shannon0 = pd.Series(shannon0, index=samples_list)
fig = Figure(figsize=(4,6), dpi=120)#, tight_layout=True)
ax = fig.add_subplot(211)
bp = ax.boxplot(shannon0)
for val, in zip(shannon0):
x = x = np.random.normal(1, 0.04, 1)
ax.scatter(x, val, c='grey', marker='.', alpha=0.4)
ax.set_xticklabels(['Shannon diversity'])
#ax.set_ylabel('number of species')
ax = fig.add_subplot(212)
for i,val in enumerate(shannon0):
ax.scatter(shannon0.index[i],val,marker='.')
ax.set_xticklabels(shannon0.index, fontsize=8, rotation='vertical')
ax.set_xlabel('samples')
ax.set_ylabel('Shannon diversity index')
fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.95, hspace=0.3, wspace=0.3)
matplotlib_frame = Frame(self.frame)
matplotlib_frame.grid(row=2, column=0, rowspan=2, columnspan=2)
canvas = FigureCanvasTkAgg(fig, matplotlib_frame)
canvas.draw()
canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True)
save_button = Button(self.inner_frame, text="Save (high resolution)", command=lambda fig=fig, title='Shannon diversity', initialfile='shannon_all_samples': self.save_high_resolution_figure(fig, title, initialfile))
save_button.grid(row=1, column=0)
def shannon_diversity_groups(self, working_samples, sample_names, tax_level, samples1, samples2, shannon1, samples1_label, samples2_label):
""" """
self.create_window()
self.top.title('Shannon diversity')
if self.abundance_df.groupAbsoluteSamples() is not None:
absolut_working_samples = self.abundance_df.groupAbsoluteSamples()
absolut_working_samples = absolut_working_samples[sample_names].astype('int')
shannon0 = absolut_working_samples.loc[[tax+'_' for tax in list(working_samples[tax_level])]].apply(shannon)
else:
shannon0 = []
for sample in sample_names:
shannon0.append(shannon_index(working_samples[sample].as_matrix()))
shannon0 = pd.Series(shannon0, index=sample_names)
fig = Figure(figsize=(5,6), dpi=120)
ax = fig.add_subplot(111)
data = [shannon0[samples1].values, shannon0[samples2].values]
bp = ax.boxplot(data)
ax.set_xticklabels([samples1_label,samples2_label], rotation=45, fontsize=12)
ax.set_ylabel('Shannon diversity', fontsize=12)
#add median text
medians = [med.get_ydata()[0] for med in bp['medians']]
median_labels = [str(np.round(med, 2)) for med in medians]
from scipy.stats import ttest_ind
ttest_result = ttest_ind(shannon0[samples1].values, shannon0[samples2].values, equal_var=False)
ttest_res = ['T_stat: '+str(round(ttest_result[0],2)), 'p_val: '+str('{0:.0e}'.format(ttest_result[1]))]
#fig.subplots_adjust(left=0.1, right=0.98, bottom=0.2, top=0.97, hspace=0.2, wspace=0.2)
fig.set_tight_layout(True)
matplotlib_frame = Frame(self.frame)
matplotlib_frame.grid(row=2, column=0)
canvas = FigureCanvasTkAgg(fig, matplotlib_frame)
canvas.draw()
canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True)
save_button = Button(self.frame, text="Save (high resolution)", command=lambda fig=fig, title='Shannon diversity', initialfile='shannon_groups': self.save_high_resolution_figure(fig, title, initialfile))
save_button.grid(row=1, column=0)
return (median_labels, ttest_res)
def beta_diversity_heatmap(self, working_samples, samples_list, tax_level):
""" """
from skbio.diversity import beta_diversity
import seaborn as sns
if self.abundance_df.groupAbsoluteSamples() is not None:
data0 = self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int')
ids = list(data0.columns)
data = data0.transpose().values.tolist()
bc_dm = beta_diversity("braycurtis", data, ids)
g = sns.clustermap(pd.DataFrame(bc_dm.data, index=ids, columns=ids), metric='braycurtis', annot_kws={"size": 8})
self.save_high_resolution_figure(g, 'Select file to save the beta diversity heatmap', 'beta_diversity_heatmap', defaultextension='.png')
import matplotlib.pyplot as plt
plt.close("all")
def cluster_heatmap(self, working_samples, samples_list, tax_level):
""" saves a cluster heatmap based on Aitchison distance and the y-axis labels"""
from skbio.stats.composition import clr
from skbio.stats.composition import multiplicative_replacement
import seaborn as sns
if self.abundance_df.groupAbsoluteSamples() is not None:
data0 = self.abundance_df.groupAbsoluteSamples()[samples_list].astype('int')
ids = list(data0.columns)
index0 = list(data0.index)
data1 = clr(data0.transpose().values.tolist())
mr_df = multiplicative_replacement(data0.T)
mr_clr = clr(mr_df)
mr_clr_df = pd.DataFrame(mr_clr.T, index=index0, columns=ids)
#g = sns.clustermap(mr_clr_df, metric="correlation", cmap="mako", robust=True, annot_kws={"size": 6})
g = sns.clustermap(mr_clr_df, metric="euclidean", cmap="mako", robust=True, annot_kws={"size": 6}, yticklabels=False)
filename = self.save_high_resolution_figure(g, 'Select file to save the cluster heatmap', 'cluster_heatmap', defaultextension='.png')
filename = ('.').join(filename.split('.')[:-1])
#save y-axis labels
y_labels = list(data0.iloc[g.dendrogram_row.reordered_ind].index)
with open(filename+'_yaxis_labels.txt', 'w') as f:
f.write('\n'.join([x.strip('_') for x in y_labels]))
import matplotlib.pyplot as plt
plt.close("all")
def pcoa(self, pco1_group2, pco1_group1, pco2_group2, pco2_group1, samples1_label, samples2_label, pc_nums, pca=False):
self.create_window()
if pca:
self.top.title('PCA - Principal Component Analysis')
method = 'PCA'
else:
self.top.title('PCoA - Principal Coordinate Analysis')
method = 'PCoA'
fig = Figure(figsize=(6,6), dpi=120)
ax = fig.add_subplot(111)
ax.scatter(x=pco1_group1, y=pco2_group1, c='darkgreen', label=samples1_label)
ax.scatter(x=pco1_group2, y=pco2_group2, c='cornflowerblue', label=samples2_label)
#if pca:
# ax.set_title('PCA')
#else:
# ax.set_title('PCoA')
ax.set_xlabel('PC'+str(pc_nums[0]+1), fontsize=12)
ax.set_ylabel('PC'+str(pc_nums[1]+1), fontsize=12)
ax.legend(loc='best', shadow=False, scatterpoints=1)
fig.subplots_adjust(left=0.14, right=0.98, bottom=0.1, top=0.95, hspace=0.4, wspace=0.3)
matplotlib_frame = Frame(self.frame)
matplotlib_frame.grid(row=2, column=0)
canvas = FigureCanvasTkAgg(fig, matplotlib_frame)
canvas.draw()
canvas.get_tk_widget().pack(side=BOTTOM, fill=BOTH, expand=True)
save_button = Button(self.frame, text="Save (high resolution)", command=lambda fig=fig, title=method, initialfile=method: self.save_high_resolution_figure(fig, title, initialfile))
save_button.grid(row=1, column=0)
def create_window(self):
""" creates a popup window """
self.top = Toplevel(self.root)
self.top.protocol("WM_DELETE_WINDOW", self.cancel)
self.top.attributes("-topmost", 1)
self.top.attributes("-topmost", 0)
self.top.columnconfigure(0, weight=1)
self.top.rowconfigure(0, weight=1)
self.frame = Frame(self.top)
self.frame.grid(row=0, column=0, sticky=N+S+W+E)
self.frame.grid_columnconfigure(0, weight=1)
self.frame.grid_rowconfigure(0, weight=1)
#self.top.title(self.name)
#self.top.minsize(width=666, height=666)
#self.top.maxsize(width=666, height=666)
self.top.focus_set()
def cancel(self, event=None):
""" destroys/closes pop up windows """
self.top.destroy()
| 2.15625 | 2 |
scanpy/datasets/__init__.py | mkmkryu/scanpy2 | 1,171 | 12791330 | """Builtin Datasets.
"""
from ._datasets import (
blobs,
burczynski06,
krumsiek11,
moignard15,
paul15,
toggleswitch,
pbmc68k_reduced,
pbmc3k,
pbmc3k_processed,
visium_sge,
)
from ._ebi_expression_atlas import ebi_expression_atlas
| 0.992188 | 1 |
tests/test_edgeql_enums.py | sfermigier/edgedb | 7,302 | 12791331 | <gh_stars>1000+
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2019-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os.path
import edgedb
from edb.testbase import server as tb
class TestEdgeQLEnums(tb.QueryTestCase):
SCHEMA = os.path.join(os.path.dirname(__file__), 'schemas',
'enums.esdl')
async def test_edgeql_enums_cast_01(self):
await self.assert_query_result(
r'''
SELECT <color_enum_t>{'RED', 'GREEN', 'BLUE'};
''',
{'RED', 'GREEN', 'BLUE'},
)
async def test_edgeql_enums_cast_02(self):
with self.assertRaisesRegex(
edgedb.InvalidValueError,
r'invalid input value for enum .+color_enum_t.+YELLOW'):
await self.con.execute(r'''
SELECT <color_enum_t>'YELLOW';
''')
async def test_edgeql_enums_cast_03(self):
with self.assertRaisesRegex(
edgedb.InvalidValueError,
r'invalid input value for enum .+color_enum_t.+red'):
await self.con.execute(r'''
SELECT <color_enum_t>'red';
''')
async def test_edgeql_enums_cast_04(self):
with self.assertRaisesRegex(
edgedb.QueryError,
r"operator '\+\+' cannot be applied to operands of type "
r"'std::str' and 'default::color_enum_t'"):
await self.con.execute(r'''
INSERT Foo {
color := 'BLUE'
};
SELECT 'The test color is: ' ++ Foo.color;
''')
async def test_edgeql_enums_cast_05(self):
await self.con.execute(
r'''
INSERT Foo {
color := 'BLUE'
};
''')
await self.assert_query_result(
r'''
SELECT 'The test color is: ' ++ <str>Foo.color;
''',
['The test color is: BLUE'],
)
async def test_edgeql_enums_pathsyntax_01(self):
with self.assertRaisesRegex(
edgedb.QueryError,
"enum path expression lacks an enum member name"):
async with self._run_and_rollback():
await self.con.execute('SELECT color_enum_t')
with self.assertRaisesRegex(
edgedb.QueryError,
"enum path expression lacks an enum member name"):
async with self._run_and_rollback():
await self.con.execute(
'WITH e := color_enum_t SELECT e.RED'
)
with self.assertRaisesRegex(
edgedb.QueryError,
"unexpected reference to link property 'RED'"):
async with self._run_and_rollback():
await self.con.execute(
'SELECT color_enum_t@RED'
)
with self.assertRaisesRegex(
edgedb.QueryError,
"enum types do not support backlink"):
async with self._run_and_rollback():
await self.con.execute(
'SELECT color_enum_t.<RED'
)
with self.assertRaisesRegex(
edgedb.QueryError,
"an enum member name must follow enum type name in the path"):
async with self._run_and_rollback():
await self.con.execute(
'SELECT color_enum_t[IS color_enum_t].RED'
)
with self.assertRaisesRegex(
edgedb.QueryError,
"invalid property reference on a primitive type expression"):
async with self._run_and_rollback():
await self.con.execute(
'SELECT color_enum_t.RED.GREEN'
)
with self.assertRaisesRegex(
edgedb.QueryError,
"invalid property reference on a primitive type expression"):
async with self._run_and_rollback():
await self.con.execute(
'WITH x := color_enum_t.RED SELECT x.GREEN'
)
with self.assertRaisesRegex(
edgedb.QueryError,
"enum has no member called 'RAD'",
_hint="did you mean 'RED'?"):
async with self._run_and_rollback():
await self.con.execute(
'SELECT color_enum_t.RAD'
)
async def test_edgeql_enums_pathsyntax_02(self):
await self.assert_query_result(
r'''
SELECT color_enum_t.GREEN;
''',
{'GREEN'},
)
await self.assert_query_result(
r'''
SELECT default::color_enum_t.BLUE;
''',
{'BLUE'},
)
await self.assert_query_result(
r'''
WITH x := default::color_enum_t.RED SELECT x;
''',
{'RED'},
)
async def test_edgeql_enums_assignment_01(self):
# testing the INSERT assignment cast
await self.con.execute(
r'''
INSERT Foo {
color := 'RED'
};
''')
await self.assert_query_result(
r'''
SELECT Foo {
color
};
''',
[{
'color': 'RED',
}],
)
async def test_edgeql_enums_assignment_02(self):
await self.con.execute(
r'''
INSERT Foo {
color := 'RED'
};
''')
# testing the UPDATE assignment cast
await self.con.execute(
r'''
UPDATE Foo
SET {
color := 'GREEN'
};
''')
await self.assert_query_result(
r'''
SELECT Foo {
color
};
''',
[{
'color': 'GREEN',
}],
)
async def test_edgeql_enums_assignment_03(self):
# testing the INSERT assignment cast
await self.con.execute(
r'''
INSERT Bar;
''')
await self.assert_query_result(
r'''
SELECT Bar {
color
};
''',
[{
'color': 'RED',
}],
)
async def test_edgeql_enums_assignment_04(self):
await self.con.execute(
r'''
INSERT Bar;
''')
# testing the UPDATE assignment cast
await self.con.execute(
r'''
UPDATE Bar
SET {
color := 'GREEN'
};
''')
await self.assert_query_result(
r'''
SELECT Bar {
color
};
''',
[{
'color': 'GREEN',
}],
)
async def test_edgeql_enums_json_cast_01(self):
self.assertEqual(
await self.con.query(
"SELECT <json><color_enum_t>'RED'"
),
['"RED"'])
await self.assert_query_result(
"SELECT <color_enum_t><json>'RED'",
['RED'])
await self.assert_query_result(
"SELECT <color_enum_t>'RED'",
['RED'])
async def test_edgeql_enums_json_cast_02(self):
with self.assertRaisesRegex(
edgedb.InvalidValueError,
r'invalid input value for enum .+color_enum_t.+: "BANANA"'):
await self.con.execute("SELECT <color_enum_t><json>'BANANA'")
async def test_edgeql_enums_json_cast_03(self):
with self.assertRaisesRegex(
edgedb.InvalidValueError,
r'expected json string or null; got json number'):
await self.con.execute("SELECT <color_enum_t><json>12")
| 2.125 | 2 |
GUI/set_memristor_parameters.py | DuttaAbhigyan/Memristor-Simulation-Using-Python | 6 | 12791332 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 18 18:54:20 2019
@author: abhigyan
"""
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
"""Class to take in various paramters of the Memristors to be simulated"""
class set_memristor_parameters(QMainWindow):
#Create and launch the main window
def __init__(self, numberOfMemristors):
super(set_memristor_parameters, self).__init__()
self.setMemristorParametersOKButtonClicked = False
self.numberOfMemristors = numberOfMemristors
self.windowLength = 110 * self.numberOfMemristors + 280
self.windowBreadth = 550
self.setGeometry(300, 300, self.windowLength, self.windowBreadth)
self.setWindowTitle('Memristor Parameters')
self.setWindowIcon(QIcon('memristor_icon.ico'))
#Sets backgorund Image
backgroundImage = QImage('memristor1.jpg')
backgroundScaledImage = backgroundImage.scaled(QSize(self.windowLength,
self.windowBreadth))
palette = QPalette()
palette.setBrush(10, QBrush(backgroundScaledImage))
self.setPalette(palette)
#Sets Fonts
self.labelFont = QFont("Arial", 13, QFont.Bold)
self.buttonFont = QFont('Times', 13)
self.home()
self.show()
#Create the homescreen
def home(self):
#Window title
self.titleLabel = QLabel(self)
self.titleLabel.setText('Memristor Parameters')
self.titleFont = QFont("Times", 18, QFont.Bold)
self.titleLabel.setStyleSheet('QLabel{color:purple}')
self.titleFont.setUnderline(True)
self.titleLabel.setFont(self.titleFont)
self.titleLabel.setGeometry(QRect(self.windowLength/2 - 120, 10, 500, 50))
#Device numbers title
self.DeviceLabel = QLabel(self)
self.DeviceLabel.setText('Device:')
self.DeviceLabelFont = QFont("Calibri", 14, QFont.Bold)
self.DeviceLabel.setStyleSheet('QLabel{color:blue}')
self.DeviceLabel.setFont(self.DeviceLabelFont)
self.DeviceLabel.setGeometry(QRect(35, 60, 100, 50))
#Parameter labels
self.DLabel = QLabel(self)
self.DLabel.setText('D (nm):')
self.DLabel.setFont(self.labelFont)
self.DLabel.setGeometry(QRect(55, 100, 70, 50))
self.RoNLabel = QLabel(self)
self.RoNLabel.setText('R_on (\u03A9):')
self.RoNLabel.setFont(self.labelFont)
self.RoNLabel.setGeometry(QRect(37, 140, 90, 50))
self.RoFFLabel = QLabel(self)
self.RoFFLabel.setText('R_off (\u03A9):')
self.RoFFLabel.setFont(self.labelFont)
self.RoFFLabel.setGeometry(QRect(36, 180, 90, 50))
self.WLabel = QLabel(self)
self.WLabel.setText('W_0 (nm):')
self.WLabel.setFont(self.labelFont)
self.WLabel.setGeometry(QRect(33, 220, 90, 50))
self.mobLabel = QLabel(self)
self.mobLabel.setText('Mobility (\u03BC):')
self.mobLabel.setFont(self.labelFont)
self.mobLabel.setGeometry(QRect(19, 260, 100, 50))
self.polLabel = QLabel(self)
self.polLabel.setText('Polarity (\u03B7):')
self.polLabel.setFont(self.labelFont)
self.polLabel.setGeometry(QRect(22, 300, 100, 50))
self.typeLabel = QLabel(self)
self.typeLabel.setText('Type:')
self.typeLabel.setFont(self.labelFont)
self.typeLabel.setGeometry(QRect(73, 340, 100, 50))
#Stores widgets to take in parameters
self.DValueFields = []
self.R_onValueFields = []
self.R_offValueFields = []
self.W_0ValueFields = []
self.mobilityValueFields = []
self.polarityValueFields = []
self.memristorTypeValueFields = []
#Crestes the various widgets to take in Memristor Paramters
for i in range(0, self.numberOfMemristors):
numberLabel = QLabel(self)
numberLabel.setText(str(i+1))
numberLabelFont = QFont("Calibri", 14, QFont.Bold)
numberLabel.setStyleSheet('QLabel{color:blue}')
numberLabel.setFont(self.DeviceLabelFont)
numberLabel.setGeometry(QRect(75 + (1+i)*120, 62, 50, 50))
DVFBox = QLineEdit(self)
DVFBox.move(55 + (1+i)*120, 112)
DVFBox.resize(60,25)
self.DValueFields.append(DVFBox)
R_oNBox = QLineEdit(self)
R_oNBox.move(55 + (1+i)*120, 152)
R_oNBox.resize(60, 25)
self.R_onValueFields.append(R_oNBox)
R_offBox = QLineEdit(self)
R_offBox.move(55 + (1+i)*120, 192)
R_offBox.resize(60,25)
self.R_offValueFields.append(R_offBox)
W_0Box = QLineEdit(self)
W_0Box.move(55 + (1+i)*120, 232)
W_0Box.resize(60,25)
self.W_0ValueFields.append(W_0Box)
mobilityBox = QLineEdit(self)
mobilityBox.move(55 + (1+i)*120, 272)
mobilityBox.resize(60,25)
self.mobilityValueFields.append(mobilityBox)
polarityBox = QLineEdit(self)
polarityBox.move(55 + (1+i)*120, 312)
polarityBox.resize(60,25)
self.polarityValueFields.append(polarityBox)
comboBox = QComboBox(self)
comboBox.addItem('Ideal')
#comboBox3.addItem('Strukov')
#comboBox.addItem('Prodromakis')
#comboBox.addItem('Biolek')
comboBox.move(55 + (1+i)*120, 353)
comboBox.resize(80,25)
self.memristorTypeValueFields.append(comboBox)
#Creates OK and Cancel button
self.OKButton = QPushButton('OK', self)
self.OKButton.resize(100, 40)
self.OKButton.move(self.windowLength/2 -150, 473)
self.OKButton.setStyleSheet('QPushButton {color: darkgreen;}')
self.OKButtonFont = QFont('Times', 13)
self.OKButton.setFont(self.OKButtonFont)
self.OKButton.clicked.connect(self.readParameters)
self.cancelButton = QPushButton('Cancel', self)
self.cancelButton.resize(100, 40)
self.cancelButton.move(self.windowLength/2 , 473)
self.cancelButton.setStyleSheet('QPushButton {color: darkgreen;}')
self.cancelButtonFont = QFont('Times', 13)
self.cancelButton.setFont(self.cancelButtonFont)
self.cancelButton.clicked.connect(self.close)
#Reads the parameters input by user
def readParameters(self):
self.setMemristorParametersOKButtonClicked = True
self.D = []
self.R_on = []
self.R_off = []
self.W_0 = []
self.mobility = []
self.polarity = []
self.type = []
self.pValues= []
for i in range(0, self.numberOfMemristors):
if(self.DValueFields[i].text() != ''):
self.D.append(float(self.DValueFields[i].text()) * 10**-9)
else:
self.D.append(None)
if(self.R_onValueFields[i].text() != ''):
self.R_on.append(float(self.R_onValueFields[i].text()))
else:
self.R_on.append(None)
if(self.R_offValueFields[i].text() != ''):
self.R_off.append(float(self.R_offValueFields[i].text()))
else:
self.R_off.append(None)
if(self.W_0ValueFields[i].text() != ''):
self.W_0.append(float(self.W_0ValueFields[i].text()))
else:
self.W_0.append(None)
if(self.mobilityValueFields[i].text() != ''):
self.mobility.append(float(self.mobilityValueFields[i].text() * 10**-12))
else:
self.mobility.append(None)
if(self.polarityValueFields[i].text() != ''):
self.polarity.append(float(self.polarityValueFields[i].text()))
else:
self.polarity.append(None)
self.type.append(self.memristorTypeValueFields[i].currentText())
self.close()
#Getter functions
def getMemristorParamters(self):
parameterDictionary = {}
parameterDictionary['D'] = self.D[:]
parameterDictionary['R_on'] = self.R_on[:]
parameterDictionary['R_off'] = self.R_off[:]
parameterDictionary['W_0'] = self.W_0[:]
parameterDictionary['mobility'] = self.mobility[:]
parameterDictionary['polarity'] = self.polarity[:]
parameterDictionary['type'] = self.type[:]
return parameterDictionary
def getOKButton(self):
return self.setMemristorParametersOKButtonClicked
| 2.78125 | 3 |
web/WebView/admin.py | shinoyasan/intelli-switch | 12 | 12791333 | from django.contrib import admin
from .models import ServerInfo,SampleData,DeviceControl,UserApp
# Register your models here.
admin.site.register(ServerInfo)
admin.site.register(SampleData)
admin.site.register(DeviceControl)
admin.site.register(UserApp) | 1.375 | 1 |
tests/ut/python/pipeline/parse/test_sequence_assign.py | httpsgithu/mindspore | 1 | 12791334 | <reponame>httpsgithu/mindspore
# Copyright 2020-2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
""" test enumerate"""
import numpy as np
import pytest
import mindspore.nn as nn
from mindspore.nn import Cell
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from mindspore import Tensor, ms_function
from mindspore import context
def test_list_index_1d():
"""
Feature: List index assign
Description: Test list assign in pynative mode
Expectation: No exception.
"""
context.set_context(mode=context.PYNATIVE_MODE)
class Net(nn.Cell):
def construct(self):
list_ = [[1], [2, 2], [3, 3, 3]]
list_[0] = [100]
return list_
net = Net()
out = net()
assert list(out[0]) == [100]
assert list(out[1]) == [2, 2]
assert list(out[2]) == [3, 3, 3]
context.set_context(mode=context.GRAPH_MODE)
net = Net()
out = net()
assert list(out[0]) == [100]
assert list(out[1]) == [2, 2]
assert list(out[2]) == [3, 3, 3]
def test_list_neg_index_1d():
"""
Feature: List index assign
Description: Test list assign in pynative mode
Expectation: No exception.
"""
context.set_context(mode=context.PYNATIVE_MODE)
class Net(nn.Cell):
def construct(self):
list_ = [[1], [2, 2], [3, 3, 3]]
list_[-3] = [100]
return list_
net = Net()
out = net()
assert list(out[0]) == [100]
assert list(out[1]) == [2, 2]
assert list(out[2]) == [3, 3, 3]
context.set_context(mode=context.GRAPH_MODE)
out = net()
assert list(out[0]) == [100]
assert list(out[1]) == [2, 2]
assert list(out[2]) == [3, 3, 3]
def test_list_index_2d():
"""
Feature: List index assign
Description: Test list assign in pynative mode
Expectation: No exception.
"""
context.set_context(mode=context.PYNATIVE_MODE)
class Net(nn.Cell):
def construct(self):
list_ = [[1], [2, 2], [3, 3, 3]]
list_[1][0] = 200
list_[1][1] = 201
return list_
net = Net()
out = net()
assert list(out[0]) == [1]
assert list(out[1]) == [200, 201]
assert list(out[2]) == [3, 3, 3]
context.set_context(mode=context.GRAPH_MODE)
out = net()
assert list(out[0]) == [1]
assert list(out[1]) == [200, 201]
assert list(out[2]) == [3, 3, 3]
def test_list_neg_index_2d():
"""
Feature: List index assign
Description: Test list assign in pynative mode
Expectation: No exception.
"""
context.set_context(mode=context.PYNATIVE_MODE)
class Net(nn.Cell):
def construct(self):
list_ = [[1], [2, 2], [3, 3, 3]]
list_[1][-2] = 20
list_[1][-1] = 21
return list_
net = Net()
out = net()
assert list(out[0]) == [1]
assert list(out[1]) == [20, 21]
assert list(out[2]) == [3, 3, 3]
context.set_context(mode=context.GRAPH_MODE)
out = net()
assert list(out[0]) == [1]
assert list(out[1]) == [20, 21]
assert list(out[2]) == [3, 3, 3]
def test_list_index_3d():
"""
Feature: List index assign
Description: Test list assign in pynative mode
Expectation: No exception.
"""
class Net(nn.Cell):
def construct(self):
list_ = [[1], [2, 2], [[3, 3, 3]]]
list_[2][0][0] = 300
list_[2][0][1] = 301
list_[2][0][2] = 302
return list_
context.set_context(mode=context.PYNATIVE_MODE)
net = Net()
out = net()
assert list(out[0]) == [1]
assert list(out[1]) == [2, 2]
assert list(out[2][0]) == [300, 301, 302]
context.set_context(mode=context.GRAPH_MODE)
out = net()
assert list(out[0]) == [1]
assert list(out[1]) == [2, 2]
assert list(out[2][0]) == [300, 301, 302]
def test_list_neg_index_3d():
"""
Feature: List index assign
Description: Test list assign in pynative mode
Expectation: No exception.
"""
context.set_context(mode=context.PYNATIVE_MODE)
class Net(nn.Cell):
def construct(self):
list_ = [[1], [2, 2], [[3, 3, 3]]]
list_[2][0][-3] = 30
list_[2][0][-2] = 31
list_[2][0][-1] = 32
return list_
net = Net()
out = net()
assert list(out[0]) == [1]
assert list(out[1]) == [2, 2]
assert list(out[2][0]) == [30, 31, 32]
context.set_context(mode=context.GRAPH_MODE)
out = net()
assert list(out[0]) == [1]
assert list(out[1]) == [2, 2]
assert list(out[2][0]) == [30, 31, 32]
def test_list_index_1D_parameter():
context.set_context(mode=context.GRAPH_MODE)
class Net(nn.Cell):
def construct(self, x):
list_ = [x]
list_[0] = 100
return list_
net = Net()
net(Tensor(0))
def test_list_index_2D_parameter():
context.set_context(mode=context.GRAPH_MODE)
class Net(nn.Cell):
def construct(self, x):
list_ = [[x, x]]
list_[0][0] = 100
return list_
net = Net()
net(Tensor(0))
def test_list_index_3D_parameter():
context.set_context(mode=context.GRAPH_MODE)
class Net(nn.Cell):
def construct(self, x):
list_ = [[[x, x]]]
list_[0][0][0] = 100
return list_
net = Net()
net(Tensor(0))
def test_const_list_index_3D_bprop():
context.set_context(mode=context.GRAPH_MODE)
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.value = [[1], [2, 2], [[3, 3], [3, 3]]]
self.relu = P.ReLU()
def construct(self, input_x):
list_x = self.value
list_x[2][0][1] = input_x
return self.relu(list_x[2][0][1])
class GradNet(nn.Cell):
def __init__(self, net):
super(GradNet, self).__init__()
self.net = net
self.grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True)
def construct(self, x, sens):
return self.grad_all_with_sens(self.net)(x, sens)
net = Net()
grad_net = GradNet(net)
x = Tensor(np.arange(2 * 3).reshape(2, 3))
sens = Tensor(np.arange(2 * 3).reshape(2, 3))
grad_net(x, sens)
def test_parameter_list_index_3D_bprop():
context.set_context(mode=context.GRAPH_MODE)
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.value = [[1], [2, 2], [[3, 3], [3, 3]]]
self.relu = P.ReLU()
def construct(self, x, value):
list_value = [[x], [x, x], [[x, x], [x, x]]]
list_value[2][0][1] = value
return self.relu(list_value[2][0][1])
class GradNet(nn.Cell):
def __init__(self, net):
super(GradNet, self).__init__()
self.net = net
self.grad_all_with_sens = C.GradOperation(get_all=True, sens_param=True)
def construct(self, x, value, sens):
return self.grad_all_with_sens(self.net)(x, value, sens)
net = Net()
grad_net = GradNet(net)
x = Tensor(np.arange(2 * 3).reshape(2, 3))
value = Tensor(np.ones((2, 3), np.int64))
sens = Tensor(np.arange(2 * 3).reshape(2, 3))
grad_net(x, value, sens)
class Net1(Cell):
def construct(self, a, b, start=None, stop=None, step=None):
a[start:stop:step] = b[start:stop:step]
return tuple(a)
def compare_func1(a, b, start=None, stop=None, step=None):
a[start:stop:step] = b[start:stop:step]
return tuple(a)
def test_list_slice_length_equal():
"""
Feature: List assign
Description: Test list assign the size is equal
Expectation: No exception.
"""
context.set_context(mode=context.PYNATIVE_MODE)
a = [1, 2, 3, 4]
b = [5, 6, 7, 8]
python_out = compare_func1(a, b, 0, None, 2)
a = [1, 2, 3, 4]
b = [5, 6, 7, 8]
net = Net1()
pynative_mode_out = net(a, b, 0, None, 2)
assert pynative_mode_out == python_out
context.set_context(mode=context.GRAPH_MODE)
graph_out = net(a, b, 0, None, 2)
assert graph_out == python_out
def test_list_slice_length_error():
"""
Feature: List assign
Description: Test list assign the size is not equal
Expectation: ValueError.
"""
context.set_context(mode=context.GRAPH_MODE)
a = [1, 2, 3, 4, 5]
b = [5, 6, 7, 8]
net = Net1()
with pytest.raises(ValueError) as err:
net(a, b, 0, None, 2)
assert "attempt to assign sequence of size 2 to extended slice of size 3" in str(err.value)
context.set_context(mode=context.PYNATIVE_MODE)
with pytest.raises(ValueError) as err:
net(a, b, 0, None, 2)
assert "attempt to assign sequence of size 2 to extended slice of size 3" in str(err.value)
def compare_func2(a, b, start=None, stop=None, step=None):
a[start:stop:step] = b
return tuple(a)
class Net2(Cell):
def construct(self, a, b, start=None, stop=None, step=None):
a[start:stop:step] = b
return tuple(a)
def test_list_slice_shrink():
"""
Feature: List assign
Description: Test list slice shrink assign
Expectation: No exception.
"""
context.set_context(mode=context.PYNATIVE_MODE)
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = [11, 22, 33]
python_out = compare_func2(a, b, 0, 5)
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = [11, 22, 33]
net = Net2()
pynative_out = net(a, b, 0, 5)
assert pynative_out == python_out
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = [11, 22, 33]
context.set_context(mode=context.GRAPH_MODE)
graph_out = net(a, b, 0, 5)
assert graph_out == python_out
def test_list_slice_insert():
"""
Feature: List assign
Description: Test list slice insert assign
Expectation: No exception.
"""
context.set_context(mode=context.PYNATIVE_MODE)
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = [11, 22, 33, 44, 55]
python_out = compare_func2(a, b, 0, 1)
net = Net2()
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = [11, 22, 33, 44, 55]
pynative_out = net(a, b, 0, 1)
assert pynative_out == python_out
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = [11, 22, 33, 44, 55]
context.set_context(mode=context.GRAPH_MODE)
graph_out = net(a, b, 0, 1)
assert graph_out == python_out
def test_list_slice_assign():
"""
Feature: List assign
Description: Test list slice start and stop is larger than size
Expectation: No exception.
"""
context.set_context(mode=context.PYNATIVE_MODE)
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = [11, 22, 33, 44, 55]
python_out = compare_func2(a, b, -12, 456)
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = [11, 22, 33, 44, 55]
net = Net2()
pynative_out = net(a, b, -12, 456)
assert pynative_out == python_out
context.set_context(mode=context.GRAPH_MODE)
graph_out = net(a, b, -12, 456)
assert graph_out == python_out
def test_list_slice_extend():
"""
Feature: List assign
Description: Test list slice extend
Expectation: No exception.
"""
context.set_context(mode=context.PYNATIVE_MODE)
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = [11, 22, 33, 44, 55]
net = Net2()
python_out = compare_func2(a, b, 1234, 0)
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = [11, 22, 33, 44, 55]
pynative_out = net(a, b, 1234, 0)
assert pynative_out == python_out
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = [11, 22, 33, 44, 55]
context.set_context(mode=context.GRAPH_MODE)
graph_out = net(a, b, 1234, 0)
assert graph_out == python_out
def test_list_slice_extend_front():
"""
Feature: List assign
Description: Test list slice extend
Expectation: No exception.
"""
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = [11, 22, 33, 44, 55]
python_out = compare_func2(a, b, 0, 0)
context.set_context(mode=context.PYNATIVE_MODE)
net = Net2()
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = [11, 22, 33, 44, 55]
pynative_out = net(a, b, 0, 0)
assert pynative_out == python_out
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = [11, 22, 33, 44, 55]
context.set_context(mode=context.GRAPH_MODE)
graph_out = net(a, b, 0, 0)
assert graph_out == python_out
def test_list_slice_extend_inner():
"""
Feature: List assign
Description: Test list slice extend
Expectation: No exception.
"""
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = [11, 22, 33, 44, 55]
python_out = compare_func2(a, b, 5, 5)
context.set_context(mode=context.PYNATIVE_MODE)
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = [11, 22, 33, 44, 55]
net = Net2()
pynative_out = net(a, b, 5, 5)
assert pynative_out == python_out
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = [11, 22, 33, 44, 55]
context.set_context(mode=context.GRAPH_MODE)
graph_out = net(a, b, 5, 5)
assert graph_out == python_out
def test_list_slice_erase():
"""
Feature: List assign
Description: Test list slice erase
Expectation: No exception.
"""
a = [1, 2, 3, 4, 5, 6, 7]
python_out = compare_func2(a, [], 1, 3)
context.set_context(mode=context.PYNATIVE_MODE)
a = [1, 2, 3, 4, 5, 6, 7]
net = Net2()
pynative_out = net(a, [], 1, 3)
assert pynative_out == python_out
context.set_context(mode=context.GRAPH_MODE)
a = [1, 2, 3, 4, 5, 6, 7]
graph_out = net(a, [], 1, 3)
assert graph_out == python_out
def test_list_slice_tuple_without_step():
"""
Feature: List assign
Description: Test list slice assign with tuple
Expectation: No exception.
"""
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = (11, 22, 33)
python_out = compare_func2(a, b, 0, 4, None)
context.set_context(mode=context.PYNATIVE_MODE)
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = (11, 22, 33)
net = Net2()
pynative_out = net(a, b, 0, 4, None)
assert pynative_out == python_out
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = (11, 22, 33)
context.set_context(mode=context.GRAPH_MODE)
graph_out = net(a, b, 0, 4, None)
assert graph_out == python_out
def test_list_slice_tuple_with_step():
"""
Feature: List assign
Description: Test list slice assign with tuple
Expectation: No exception.
"""
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = (11, 22, 33)
python_out = compare_func2(a, b, 1, None, 3)
context.set_context(mode=context.PYNATIVE_MODE)
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = (11, 22, 33)
net = Net2()
pynative_out = net(a, b, 1, None, 3)
assert pynative_out == python_out
context.set_context(mode=context.GRAPH_MODE)
graph_out = net(a, b, 1, None, 3)
assert graph_out == python_out
def test_list_double_slice():
"""
Feature: List assign
Description: Test list double slice assign
Expectation: ValueError
"""
context.set_context(mode=context.PYNATIVE_MODE)
@ms_function
def foo(a, b, start1, stop1, step1, start2, stop2, step2):
a[start1:stop1:step1][start2: stop2: step2] = b
return a
class NetInner(Cell):
def construct(self, a, b, start1, stop1, step1, start2, stop2, step2):
a[start1:stop1:step1][start2: stop2: step2] = b
return tuple(a)
net = NetInner()
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = [11, 22, 33]
assert foo(a, b, 0, None, 1, 0, None, 3) == net(a, b, 0, None, 1, 0, None, 3)
def convert_tuple(a):
result = tuple()
for i in a:
if isinstance(i, list):
result += (tuple(i),)
continue
result += (i,)
return result
def test_list_in_list_slice():
"""
Feature: List assign
Description: Test high dimension list slice assign
Expectation: No exception.
"""
class TestNet(Cell):
def construct(self, a, b, index, start=None, stop=None, step=None):
a[index][start:stop:step] = b
return tuple(a)
def com_func3(a, b, index, start=None, stop=None, step=None):
a[index][start:stop:step] = b
return convert_tuple(a)
a = [1, 2, [1, 2, 3, 4, 5, 6, 7], 8, 9]
b = [1111, 2222]
python_out = com_func3(a, b, 2, 1, None, 3)
context.set_context(mode=context.PYNATIVE_MODE)
net = TestNet()
a = [1, 2, [1, 2, 3, 4, 5, 6, 7], 8, 9]
b = [1111, 2222]
pynative_out = convert_tuple(net(a, b, 2, 1, None, 3))
assert pynative_out == python_out
context.set_context(mode=context.GRAPH_MODE)
graph_out = convert_tuple(net(a, b, 2, 1, None, 3))
assert graph_out == python_out
def test_list_slice_negative_step():
"""
Feature: List assign
Description: Test negative step list slice assign
Expectation: No exception.
"""
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = [33, 44, 55]
python_out = compare_func2(a, b, -1, -9, -3)
context.set_context(mode=context.PYNATIVE_MODE)
net = Net2()
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = [33, 44, 55]
pynative_out = net(a, b, -1, -9, -3)
assert pynative_out == python_out
context.set_context(mode=context.GRAPH_MODE)
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = [33, 44, 55]
graph_out = net(a, b, -1, -9, -3)
assert graph_out == python_out
def test_graph_list_slice_assign_extended_number():
"""
Feature: List assign
Description: Test negative step list slice assign
Expectation: No exception.
"""
a = [1, 2, 3, 4, 5, 6]
b = 1
net = Net2()
context.set_context(mode=context.PYNATIVE_MODE)
with pytest.raises(TypeError) as err:
net(a, b, 0, None, 2)
assert "must assign iterable to extended slice" in str(err.value)
context.set_context(mode=context.GRAPH_MODE)
with pytest.raises(TypeError) as err:
net(a, b, 0, None, 2)
assert "must assign iterable to extended slice" in str(err.value)
def test_graph_list_slice_assign_number():
"""
Feature: List assign
Description: Test negative step list slice assign
Expectation: No exception.
"""
a = [1, 2, 3, 4, 5, 6]
b = 1
net = Net2()
context.set_context(mode=context.PYNATIVE_MODE)
with pytest.raises(TypeError) as err:
net(a, b, 0, None, 1)
assert "can only assign an iterable" in str(err.value)
context.set_context(mode=context.GRAPH_MODE)
with pytest.raises(TypeError) as err:
net(a, b, 0, None, 1)
assert "can only assign an iterable" in str(err.value)
def test_list_slice_negetive_error():
"""
Feature: List assign
Description: Test negative step list slice assign
Expectation: ValueError
"""
a = [1, 2, 3, 4, 5, 6, 7, 8, 9]
b = [33, 44, 55]
net = Net2()
context.set_context(mode=context.PYNATIVE_MODE)
with pytest.raises(ValueError) as err:
net(a, b, -1, -3, -3)
assert "attempt to assign sequence of size 3 to extended slice of size 1" in str(err.value)
context.set_context(mode=context.GRAPH_MODE)
with pytest.raises(ValueError) as err:
net(a, b, -1, -3, -3)
assert "attempt to assign sequence of size 3 to extended slice of size 1" in str(err.value)
| 2.234375 | 2 |
guitarfan/controlers/site/index.py | timgates42/GuitarFan | 48 | 12791335 | <gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import render_template, request, redirect, url_for, flash, Blueprint, jsonify, current_app
from sqlalchemy import func
from guitarfan.models import *
from guitarfan.extensions.flasksqlalchemy import db
from guitarfan.extensions.flaskcache import cache
bp_site_index = Blueprint('bp_site_index', __name__, template_folder="../../templates/site")
@bp_site_index.route('/')
@bp_site_index.route('/index')
def index():
hot_tabs = Tab.query.order_by(Tab.hits.desc()).limit(12)
new_tabs = Tab.query.order_by(Tab.update_time.desc()).limit(12)
return render_template('index.html', hot_tabs=hot_tabs, new_tabs=new_tabs)
@bp_site_index.route('/tagcloud.json')
@cache.cached(3600, key_prefix='tag_cloud_json')
def tag_cloud_json():
tags = []
for tag_id, tag_name, tab_count in db.session.query(Tag.id, Tag.name, func.count(Tab.id)).join(Tab, Tag.tabs).group_by(Tag.id):
tags.append({'tagId': tag_id, 'tagName': tag_name, 'count': tab_count})
return jsonify(tags=tags)
@bp_site_index.route('/stylecloud.json')
@cache.cached(3600, key_prefix='style_cloud_json')
def style_cloud_json():
styles = []
for style_id, tab_count in db.session.query(Tab.style_id, func.count(Tab.id)).group_by(Tab.style_id):
styles.append({'styleId': style_id, 'styleName': MusicStyle.get_item_text(style_id), 'count': tab_count})
return jsonify(styles=styles)
@bp_site_index.route('/robots.txt')
def robots_txt():
return """<html>
<head></head>
<body>
<pre>User-agent: *
Crawl-delay: 10
Disallow: /admin
</pre>
</body>
</html>""" | 2.03125 | 2 |
src/utils.py | GreenRiverRUS/thatmusic-api | 3 | 12791336 | import hashlib
import re
from typing import Union, Optional, Dict
from urllib.parse import urljoin
import binascii
import logging
import eyed3
from eyed3.id3 import ID3_V1
from unidecode import unidecode
from tornado import web
from settings import LOG_LEVEL
class BasicHandler(web.RequestHandler):
logger = None
def prepare(self):
self.logger.debug('{} request from {}: {}'.format(
self.request.method.capitalize(),
self.request.remote_ip,
self.request.uri)
)
self.logger.debug('Request body: {}'.format(self.request.body.decode()))
def on_finish(self):
self.log_request()
def write_result(self, result):
self.finish({'success': 1, 'data': result})
def write_error(self, status_code, **kwargs):
result = {'success': 0, 'error': self._reason, 'error_code': status_code}
if 'exc_info' in kwargs:
exception = kwargs['exc_info'][1]
if isinstance(exception, web.HTTPError):
result.update(exception.args) # TODO
self.finish(result)
def log_request(self):
self.logger.info(
'{remote_ip} {method} {request_uri} => HTTP: {status_code} ({time:.0f} ms)'.format(
remote_ip=self.request.remote_ip,
method=self.request.method.upper(),
request_uri=self.request.uri,
status_code=self.get_status(),
time=1000.0 * self.request.request_time()
)
)
def data_received(self, chunk):
pass
def reverse_full_url(self, name, *args):
host_url = "{protocol}://{host}".format(**vars(self.request))
return urljoin(host_url, self.reverse_url(name, *args))
def setup_logger(name, lvl=logging.DEBUG):
logger = logging.getLogger(name)
logger.setLevel(lvl)
basic_stream_handler = logging.StreamHandler()
basic_stream_handler.setFormatter(
logging.Formatter('%(levelname)-8s %(asctime)s %(message)s')
)
basic_stream_handler.setLevel(LOG_LEVEL)
logger.addHandler(basic_stream_handler)
logger.propagate = False
return logger
def vk_url(path: str):
return urljoin('https://api.vk.com/', path)
def crc32(string: Union[str, bytes]):
if isinstance(string, str):
string = string.encode()
return '{:08x}'.format(binascii.crc32(string) & 0xFFFFFFFF)
def md5(string: Union[str, bytes]):
if isinstance(string, str):
string = string.encode()
return hashlib.md5(string).hexdigest()
def uni_hash(hash_func: str, string):
if hash_func == 'crc32':
return crc32(string)
elif hash_func == 'md5':
return md5(string)
raise ValueError('Unknown hash function: {}'.format(hash_func))
def sanitize(string, to_lower: bool = True, alpha_numeric_only: bool = False, truncate: Optional[int] = None):
if alpha_numeric_only:
string = re.sub(r'\w+', '', string)
else:
bad_chars = ['~', '`', '!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '_', '=', '+',
'[', '{', ']', '}', '\\', '|', ';', ':', '"', "'", '—', '–', ',', '<', '>', '/', '?',
'‘', '’', '“', '”']
string = re.sub(r'|'.join(map(re.escape, bad_chars)), '', string)
string = unidecode(string) # transliteration and other staff: converts to ascii
string = string.strip()
string = re.sub(r'\s+', ' ', string)
if to_lower:
string = string.lower()
if truncate is not None:
string = string[:truncate]
return string
def set_id3_tag(path: str, audio_info: Dict):
audio = eyed3.load(path)
audio.initTag(version=ID3_V1)
audio.tag.title = unidecode(audio_info['title']).strip()
audio.tag.artist = unidecode(audio_info['artist']).strip()
audio.tag.save(version=ID3_V1)
| 2.046875 | 2 |
examples/plotting/performance_plotting.py | ndangtt/LeadingOnesDAC | 11 | 12791337 | <gh_stars>10-100
from pathlib import Path
from seaborn import plotting_context
from dacbench.logger import load_logs, log2dataframe
from dacbench.plotting import plot_performance_per_instance, plot_performance
import matplotlib.pyplot as plt
def per_instance_example():
"""
Plot CMA performance for each training instance
"""
file = Path("./data/chainererrl_cma/PerformanceTrackingWrapper.jsonl")
logs = load_logs(file)
data = log2dataframe(logs, wide=True, drop_columns=["time"])
grid = plot_performance_per_instance(
data, title="CMA Mean Performance per Instance"
)
grid.savefig("output/cma_performance_per_instance.pdf")
plt.show()
def performance_example():
"""
Plot Sigmoid performance over time, divided by seed and with each seed in its own plot
"""
file = Path("./data/sigmoid_example/PerformanceTrackingWrapper.jsonl")
logs = load_logs(file)
data = log2dataframe(logs, wide=True, drop_columns=["time"])
Path("output").mkdir(exist_ok=True)
# overall
grid = plot_performance(data, title="Overall Performance")
grid.savefig("output/sigmoid_overall_performance.pdf")
plt.show()
# per instance seed (hue)
grid = plot_performance(data, title="Overall Performance", hue="seed")
grid.savefig("output/sigmoid_overall_performance_per_seed_hue.pdf")
plt.show()
# per instance seed (col)
with plotting_context("poster"):
grid = plot_performance(
data, title="Overall Performance", col="seed", col_wrap=3
)
grid.fig.subplots_adjust(top=0.92)
grid.savefig("output/sigmoid_overall_performance_per_seed.pdf")
plt.show()
if __name__ == "__main__":
per_instance_example()
performance_example()
| 2.359375 | 2 |
training/Leetcode/109.py | voleking/ICPC | 68 | 12791338 | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def sortedListToBST(self, head):
"""
:type head: ListNode
:rtype: TreeNode
"""
def convert(head, n):
if n == 0:
return None
mid = head
for i in range(n // 2):
mid = mid.next
root = TreeNode(mid.val)
root.left = convert(head, n // 2)
root.right = convert(mid.next, n - n // 2 - 1)
return root
n, ptr = 0, head
while ptr:
n += 1
ptr = ptr.next
return convert(head, n)
| 3.8125 | 4 |
app/main/routes.py | mrtoronto/find-a-lab | 0 | 12791339 | <reponame>mrtoronto/find-a-lab<filename>app/main/routes.py
from app import db
from app.main.forms import LoginForm, RegistrationForm, EditProfileForm, \
ResetPasswordRequestForm, ResetPasswordForm, authorIndexQueryForm
from app.models import User, Result
from app.email import send_password_reset_email
from app.main import bp
from config import Config
from app.main_api_functions import *
from rq.job import Job
from datetime import datetime, timezone
from flask import render_template, flash, redirect, url_for, request, jsonify,current_app
from flask_login import login_user, logout_user, current_user, login_required
from config import Config
from werkzeug.urls import url_parse
import itertools
import re
import ast
import datetime
import pandas as pd
from collections import Counter
from geotext import GeoText
import time
@bp.before_request
def before_request():
if current_user.is_authenticated:
current_user.last_seen = datetime.datetime.now(timezone.utc)
db.session.commit()
@bp.route('/', methods=['GET', 'POST'])
@bp.route('/index', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@bp.route('/user/<username>')
@login_required
def user(username):
user = User.query.filter_by(username=username).first_or_404()
return render_template('user.html', user=user)
@bp.route('/edit_profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm(current_user.username)
if form.validate_on_submit():
current_user.username = form.username.data
current_user.about_me = form.about_me.data
db.session.commit()
flash('Your changes have been saved.')
return redirect(url_for('main.edit_profile'))
elif request.method == 'GET':
form.username.data = current_user.username
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', title='Edit Profile',
form=form)
def run_query(query_type, query_text, \
from_year, locations, affils, api_key, \
querying_user):
"""
Query data is returned in a nested dictionary and assigned to `obj_dicts` which is stored in the db.
"""
### Import create_app because this function is run by the worker
from app import create_app
from app.models import Result
app = create_app()
app.app_context().push()
if query_type == 'author_papers':
obj_dicts = query_author_papers(query = query_text,
from_year = from_year,
locations = locations,
n_authors = 25,
affils = affils,
api_key = api_key,
api_out = False)
elif query_type == 'affil_papers':
obj_dicts = query_affil_papers(query = query_text,
from_year = from_year,
locations = locations,
n_authors = 25,
affils = affils,
api_key = api_key,
api_out = False)
result = Result(
query_type = query_type,
query_text = query_text,
query_from = from_year,
query_affiliations = affils,
query_locations= locations,
user_querying = querying_user,
length_of_results = len(obj_dicts.keys()),
result_all=obj_dicts
)
db.session.add(result)
db.session.commit()
return result.id
@bp.route('/query/<query_type>', methods=['GET', 'POST'])
@login_required
def make_a_query(query_type):
"""
"""
if query_type == 'author_papers':
form = authorIndexQueryForm()
elif query_type == 'affil_papers':
form = authorIndexQueryForm()
if form.validate_on_submit():
if current_app.config['ASYNC_FUNC']:
from app.main.routes import run_query
### If async == True, queue a task with the args from the form
job = current_app.task_queue.enqueue_call(
func=run_query,
args=(query_type,
form.query_text.data, form.query_from.data,
form.locations.data, form.affiliations.data,
form.api_key.data, current_user.username),
result_ttl=current_app.config['RESULT_TTL'],
timeout=current_app.config['WORKER_TIMEOUT'])
flash(f'Your query is running! Your ID is : {job.get_id()}')
return get_results(job.get_id())
elif not current_app.config['ASYNC_FUNC']:
### Run the query without task queue if async == False
if query_type == 'affil_papers':
affil_dicts = query_affil_papers(query = form.query_text.data,
from_year = form.query_from.data,
locations = form.locations.data,
n_authors = 25,
affils = form.affiliations.data,
api_key = form.api_key.data,
api_out = False)
n_results = sum([affil_dict['total_count'] for affil_dict in \
affil_dicts.values()])
length_of_results = len(affil_dicts.keys())
return render_template('query_results/affil_papers.html', \
data = affil_dicts, n_results = n_results, unique_results = length_of_results), 200
elif query_type == 'author_papers':
author_dicts = query_author_papers(query = form.query_text.data,
from_year = form.query_from.data,
locations = form.locations.data,
n_authors = 25,
affils = form.affiliations.data,
api_key = form.api_key.data,
api_out = False)
n_results = sum([author_dict.get('total_count', 0) for author_dict in \
author_dicts.values()])
length_of_results = len(author_dicts.keys())
return render_template('query_results/author_papers.html', \
data = author_dicts, n_results = n_results, unique_results = length_of_results), 200
return render_template('make_a_query.html', form=form)
@bp.route("/results/<job_key>", methods=['GET'])
def get_results(job_key):
"""
Results page for <job_key>. If job is still running, this will redirect to the same page with the link to refresh again. When its done,
the refresh link will link to the tables.
"""
job = Job.fetch(job_key, connection=current_app.redis)
### Return results
if job.is_finished and job.result:
result = Result.query.filter_by(id=job.result).first()
if result.result_all.get('error'):
return render_template('errors/data_error.html', data = result.result_all.get('error'),
query_text = result.query_text, query_from = result.query_from ,
query_location = result.query_locations, query_affiliations = result.query_affiliations)
n_results = sum([author_dict.get('total_count', 0) for author_dict in \
result.result_all.values()])
### Return different pages for different queries
if result.query_type == 'affil_papers':
return render_template('query_results/affil_papers.html', \
data = result.result_all, n_results = n_results, unique_results = result.length_of_results), 200
elif result.query_type == 'author_papers':
return render_template('query_results/author_papers.html', \
data = result.result_all, n_results = n_results, unique_results = result.length_of_results), 200
### Refresh if job is still processing
else:
return render_template('query_results/processing.html', job_key = job_key), 202
#######
@bp.route('/api/help/', methods = ['GET'])
def help():
return {'endpoints' : {'/api/query/author_affils/' : {'parameters' :
{'query' : '', 'from' : '', 'locations' : '', 'n' : ''}, 'info' : ''},
'/api/query/author_papers/' : {'parameters' :
{'query' : '', 'from' : '', 'locations' : '', 'n' : ''}, 'info' : ''}
},
'general_notes' : '<NAME>'}
@bp.route('/api/query/author_papers/', methods = ['GET'])
def query_author_papers(query = "", from_year = "",
locations = "", n_authors = "",
affils = "", api_key = "", api_out = True):
timeit_start = time.time()
"""if request.args.get('query'):
query = request.args.get('query')
if request.args.get('from'):
from_year = int(request.args.get('from', 2000))
if request.args.get('locations'):
locations = request.args.get('locations', [])
if request.args.get('n', 25):
n_authors = request.args.get('n', 25)
if request.args.get('affiliations', []):
affils = request.args.get('affiliations', [])
if request.args.get('api_key'):
api_key = request.args.get('api_key')
if request.args.get('api_out'):
api_out = request.args.get('api_out')"""
if locations:
locations = [location.strip().lower() for location in locations.split(',')]
if affils:
affils = [affil.strip().lower() for affil in affils.split(',')]
if not api_key:
no_key_dict = {'error' : 'Please supply an API key to run your query under!'}
if api_out == True:
return jsonify(no_key_dict)
else:
return no_key_dict
out_dict = query_author_papers_data(query, from_year, locations, affils, n_authors, timeit_start, api_key)
timeit_end = time.time()
print(f'`query_author_papers` for "{query}" from {from_year} onward ran in {round(timeit_end - timeit_start,4)} seconds. Returning results.')
if api_out == True:
return jsonify(out_dict)
else:
return out_dict
@bp.route('/api/query/affil_papers/', methods = ['GET'])
def query_affil_papers(query = "",
from_year = "",
locations = "",
n_authors = "",
affils = "",
api_key = "",
api_out = True):
timeit_start = time.time()
#if request.args.get('query'):
# query = request.args.get('query')
#if request.args.get('from'):
# from_year = int(request.args.get('from', 2000))
#if request.args.get('locations'):
# locations = request.args.get('locations', [])
##if request.args.get('n', 25):
# n_authors = request.args.get('n', 25)
#if request.args.get('affiliations', []):
# affils = request.args.get('affiliations', [])
#if request.args.get('api_key'):
# api_key = request.args.get('api_key')
if locations:
locations = [location.strip().lower() for location in locations.split(',')]
if affils:
affils = [affil.strip().lower() for affil in affils.split(',')]
if not api_key:
no_key_dict = {'error' : 'Please supply an API key to run your query under!'}
if api_out == True:
return jsonify(no_key_dict)
else:
return no_key_dict
out_dict = query_affil_papers_data(query, from_year, locations, affils, n_authors, timeit_start, api_key)
timeit_end = time.time()
#print(f'`author_papers_w_location` for "{query}" from {from_year} onward ran in {round(timeit_end - timeit_start,4)} seconds. Returning results.')
print(f'`query_affil_papers` for "{query}" from {from_year} onward ran in {round(timeit_end - timeit_start,4)} seconds. Returning results.')
if api_out == True:
return jsonify(out_dict)
else:
return out_dict | 2.21875 | 2 |
auto_drive/rule_drive/ReplayRace.py | YingshuLu/self-driving-formula-racing | 0 | 12791340 | <reponame>YingshuLu/self-driving-formula-racing<filename>auto_drive/rule_drive/ReplayRace.py
import cv2
import sys
import os
from time import time, sleep
imagesFolder = sys.argv[1]
frameIntervalSec = 1.0/10
def show_image(img, name = "image", scale = 1.0, newsize = None):
if scale and scale != 1.0:
img = cv2.resize(img, newsize, interpolation=cv2.INTER_CUBIC)
cv2.namedWindow(name, cv2.WINDOW_AUTOSIZE)
cv2.imshow(name, img)
cv2.waitKey(1)
images_in_folder = [x for x in os.listdir(imagesFolder) if x.endswith('.jpg')]
for image in images_in_folder:
time_begin = time()
img = cv2.imread(os.path.join(imagesFolder,image))
show_image(img)
secs_to_sleep = frameIntervalSec - (time()-time_begin)
if secs_to_sleep>0:
sleep(secs_to_sleep) | 2.703125 | 3 |
pink/tasks/posture_task.py | tasts-robots/pink | 0 | 12791341 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2022 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Posture task specification.
"""
from typing import Optional, Tuple
import numpy as np
from ..configuration import Configuration
from .exceptions import TargetNotSet
from .task import Task
class PostureTask(Task):
"""
Regulate joint angles to a desired posture, *i.e.* a vector of actuated
joint angles. Floating base coordinates are not affected by this task.
Attributes:
cost: joint angular error cost in
:math:`[\\mathrm{cost}] / [\\mathrm{rad}]`.
target_q: Target vector in the configuration space.
A posture task is typically used for regularization as it has a steady
rank. For instance, when Upkie's legs are stretched and the Jacobian of its
contact frames become singular, the posture task will drive the knees
toward a preferred orientation.
"""
cost: float
target_q: Optional[np.ndarray]
def __init__(self, cost: float) -> None:
"""
Create task.
Args:
cost: joint angular error cost in
:math:`[\\mathrm{cost}] / [\\mathrm{rad}]`.
Note:
We assume that the first seven coordinates of the configuration are
for the floating base.
"""
self.cost = cost
self.target_q = None
def set_target(self, target_q: np.ndarray) -> None:
"""
Set task target pose in the world frame.
Args:
target_q: Target vector in the configuration space.
"""
self.target_q = target_q.copy()
def compute_task_dynamics(
self, configuration: Configuration
) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute the matrix :math:`J(q)` and vector :math:`\\alpha e(q)` such
that the task dynamics are:
.. math::
J(q) \\Delta q = \\alpha e(q)
The Jacobian matrix is :math:`J(q) \\in \\mathbb{R}^{n \\times n}`,
with :math:`n` the dimension of the robot's tangent space, and the
error vector is :math:`e(q) \\in \\mathbb{R}^n`. Both depend on the
configuration :math:`q` of the robot.
See :func:`Task.compute_task_dynamics` for more context.
Args:
configuration: Robot configuration to read kinematics from.
Returns:
Pair :math:`(J, \\alpha e)` of Jacobian matrix and error vector,
both expressed in the body frame.
"""
if self.target_q is None:
raise TargetNotSet("no posture target")
# TODO(scaron): handle models without floating base joint
jacobian = configuration.tangent.eye[6:, :]
error = (self.target_q - configuration.q)[7:]
return (jacobian, self.gain * error)
def compute_qp_objective(
self, configuration: Configuration
) -> Tuple[np.ndarray, np.ndarray]:
"""
Compute the Hessian matrix :math:`H` and linear vector :math:`c` such
that the contribution of the task to the QP objective is:
.. math::
\\| J \\Delta q - \\alpha e \\|_{W}^2
= \\frac{1}{2} \\Delta q^T H \\Delta q + c^T q
The weight matrix :math:`W \\in \\mathbb{R}^{n \\times n}` weighs and
normalizes task coordinates to the same unit. The unit of the overall
contribution is :math:`[\\mathrm{cost}]^2`. The configuration
displacement :math:`\\Delta q` is the output of inverse kinematics (we
divide it by :math:`\\Delta t` to get a commanded velocity).
Args:
robot: Robot model and configuration.
Returns:
Pair :math:`(H, c)` of Hessian matrix and linear vector of the QP
objective.
"""
jacobian, error = self.compute_task_dynamics(configuration)
weighted_jacobian = self.cost * jacobian # [cost]
weighted_error = self.cost * error # [cost]
H = weighted_jacobian.T @ weighted_jacobian
c = -weighted_error.T @ weighted_jacobian
return (H, c)
def __repr__(self):
"""
Human-readable representation of the task.
"""
return f"PostureTask(cost={self.cost}, gain={self.gain})"
| 2.484375 | 2 |
software/gen_pixels.py | kbeckmann/Glasgow | 3 | 12791342 | import colorsys
import struct
import math
PIXELS = 94
# interleaved = 1
# interleaved = 2
# interleaved = 4
interleaved = 8
f = open("test_{}.bin".format(interleaved), "wb")
for n in range(1000):
for x in range(PIXELS):
# This way we get a half "rainbow", easy to find breaks/seams
hue = float(x + n/10.) / PIXELS / 2
r, g, b = colorsys.hsv_to_rgb(
hue,
1,
16 + 16 * math.sin(2. * math.pi * (5. * -x + n / 3.) / 100.)
)
if interleaved == 2:
data1 = struct.pack("BBB", r, g, b)
data2 = struct.pack("BBB", r, 0, 0) # intentionally wrong
for i in range(len(data1)):
cur = 0
byte1 = ord(data1[i])
byte2 = ord(data2[i])
for j in range(8):
cur |= (byte1 & (2**j)) << (j + 0)
cur |= (byte2 & (2**j)) << (j + 1)
f.write(struct.pack(">H", cur))
elif interleaved == 4:
data1 = struct.pack("BBB", r, g, b)
data2 = struct.pack("BBB", r, 0, 0) # intentionally wrong
data3 = struct.pack("BBB", 0, g, 0) # intentionally wrong
data4 = struct.pack("BBB", 0, 0, b) # intentionally wrong
for i in range(len(data1)):
cur = 0
byte1 = ord(data1[i])
byte2 = ord(data2[i])
byte3 = ord(data3[i])
byte4 = ord(data4[i])
for j in range(8):
cur |= (byte1 & (2**j)) << (j * 3 + 0)
cur |= (byte2 & (2**j)) << (j * 3 + 1)
cur |= (byte3 & (2**j)) << (j * 3 + 2)
cur |= (byte4 & (2**j)) << (j * 3 + 3)
f.write(struct.pack(">L", cur))
elif interleaved == 8:
data1 = struct.pack("BBB", r, g, b)
data2 = struct.pack("BBB", r, 0, 0) # intentionally wrong
data3 = struct.pack("BBB", 0, g, 0) # intentionally wrong
data4 = struct.pack("BBB", 0, 0, b) # intentionally wrong
data5 = struct.pack("BBB", 0, g, b) # intentionally wrong
data6 = struct.pack("BBB", r, g, 0) # intentionally wrong
data7 = struct.pack("BBB", r, 0, b) # intentionally wrong
data8 = struct.pack("BBB", 0, g, b) # intentionally wrong
for i in range(len(data1)):
cur = 0
byte1 = ord(data1[i])
byte2 = ord(data2[i])
byte3 = ord(data3[i])
byte4 = ord(data4[i])
byte5 = ord(data5[i])
byte6 = ord(data6[i])
byte7 = ord(data7[i])
byte8 = ord(data8[i])
for j in range(8):
cur |= (byte1 & (2**j)) << (j * 7 + 0)
cur |= (byte2 & (2**j)) << (j * 7 + 1)
cur |= (byte3 & (2**j)) << (j * 7 + 2)
cur |= (byte4 & (2**j)) << (j * 7 + 3)
cur |= (byte5 & (2**j)) << (j * 7 + 4)
cur |= (byte6 & (2**j)) << (j * 7 + 5)
cur |= (byte7 & (2**j)) << (j * 7 + 6)
cur |= (byte8 & (2**j)) << (j * 7 + 7)
f.write(struct.pack(">Q", cur))
else:
# No interleaving
f.write(struct.pack("BBB", r, g, b))
f.close()
| 2.359375 | 2 |
backend/contrib/newsletter_subscribe/views/unsubscribe.py | szkkteam/agrosys | 0 | 12791343 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Common Python library imports
from http import HTTPStatus
# Pip package imports
from flask import render_template, request, jsonify
# Internal package imports
from backend.utils import decode_token
from ..models import NewsletterSubscribe
from ..utils import generate_resubscribe_link
from .blueprint import newsletter_subscribe
@newsletter_subscribe.route('/unsubscribe/<token>', methods=['GET'])
def unsubscribe(token):
email_str = decode_token(token)
if email_str is None:
if not request.is_json:
# Return redirect view
#return redirect(get_url())
return
return jsonify({'errors': 'Invalid token given.'}), HTTPStatus.NOT_FOUND
else:
email = NewsletterSubscribe.get_by(email=email_str)
# Commit only if the user is still active
if email.is_active:
email.is_active = False
email.save(commit=True)
if not request.is_json:
return render_template('newsletter_subscribe/email/confirm_unsubscribe.html',
resubscribe_link=generate_resubscribe_link(email.email))
return jsonify({
'email': email,
'status': 'You are successfully unsubscribed from our mailing list.',
})
| 2.25 | 2 |
tests/run_all_tests.py | GRV96/jazal | 0 | 12791344 | <reponame>GRV96/jazal
from os import system
system("pytest path_util_tests.py")
system("pytest path_checker_tests.py")
system("pytest reactive_path_checker_tests.py")
system("pytest missing_path_arg_warner_tests.py")
| 1.59375 | 2 |
python/cuXfilter/charts/cudatashader/__init__.py | AjayThorve/cuxfilter | 2 | 12791345 | from .cudatashader import scatter_geo, scatter, line, heatmap | 0.96875 | 1 |
src/preprocessing/minmax.py | kjhall01/xcast | 11 | 12791346 | <reponame>kjhall01/xcast
from ..core.utilities import *
class MinMax:
def __init__(self, min=-1, max=1):
self.range_min, self.range_max = min, max
self.range = max - min
self.min, self.max, self.x_range = None, None, None
def fit(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None):
x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim = guess_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)
check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)
self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim
X1 = X.isel()
self.min = X1.min(x_sample_dim)
self.max = X1.max(x_sample_dim)
self.x_range = self.max - self.min
self.x_range = self.x_range.where(self.x_range != 0, other=1)
def transform(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None):
x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim = guess_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)
check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)
#X1 = X.rename({x_lat_dim: self.lat_dim, x_lon_dim: self.lon_dim, x_sample_dim: self.sample_dim})
self.min = self.min.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim})
self.max = self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim})
self.x_range = self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim})
self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim
assert self.min is not None and self.max is not None, '{} Must Fit MinMaxScaler before transform'.format(dt.datetime.now())
r = ((X - self.min) / self.x_range) * self.range + self.range_min
r.attrs['generated_by'] = r.attrs['generated_by'] + '\n XCAST MinMax Transform' if 'generated_by' in r.attrs.keys() else '\n XCAST MinMax Transform'
return r
def inverse_transform(self, X, x_lat_dim=None, x_lon_dim=None, x_sample_dim=None, x_feature_dim=None):
x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim = guess_coords(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)
check_all(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim)
assert self.min is not None and self.max is not None, '{} Must Fit MinMaxScaler before inverse transform'.format(dt.datetime.now())
self.min = self.min.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim})
self.max = self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim})
self.x_range = self.max.rename({ self.lat_dim:x_lat_dim, self.lon_dim:x_lon_dim, self.feature_dim:x_feature_dim})
self.sample_dim, self.lat_dim, self.lon_dim, self.feature_dim = x_sample_dim, x_lat_dim, x_lon_dim, x_feature_dim
ret = []
for i in range(X.shape[list(X.dims).index(self.feature_dim)]):
sd = {x_feature_dim: i}
self.max.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]]
self.min.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]]
self.x_range.coords[self.feature_dim] = [X.coords[self.feature_dim].values[i]]
ret.append(((X.isel(**sd) - self.range_min) / self.range) * self.x_range + self.min)
r = xr.concat(ret, self.feature_dim)
r.attrs['generated_by'] = r.attrs['generated_by'] + '\n XCAST MinMax Inverse Transform' if 'generated_by' in r.attrs.keys() else '\n XCAST MinMax Inverse Transform'
return r
| 2.546875 | 3 |
tests/integration/test_configinventory.py | vincentbernat/lldpd | 312 | 12791347 | <filename>tests/integration/test_configinventory.py
import os
import pytest
import platform
import time
import shlex
@pytest.mark.skipif("'LLDP-MED' not in config.lldpd.features",
reason="LLDP-MED not supported")
class TestConfigInventory(object):
def test_configinventory(self, lldpd1, lldpd, lldpcli, namespaces,
replace_file):
with namespaces(2):
if os.path.isdir("/sys/class/dmi/id"):
# /sys/class/dmi/id/*
for what, value in dict(product_version="1.14",
bios_version="1.10",
product_serial="45872512",
sys_vendor="Spectacular",
product_name="Workstation",
chassis_asset_tag="487122").items():
replace_file("/sys/class/dmi/id/{}".format(what),
value)
lldpd("-M", "1")
def test_default_inventory(namespaces, lldpcli):
with namespaces(1):
if os.path.isdir("/sys/class/dmi/id"):
out = lldpcli("-f", "keyvalue", "show", "neighbors", "details")
assert out['lldp.eth0.chassis.name'] == 'ns-2.example.com'
assert out['lldp.eth0.lldp-med.inventory.hardware'] == '1.14'
assert out['lldp.eth0.lldp-med.inventory.firmware'] == '1.10'
assert out['lldp.eth0.lldp-med.inventory.serial'] == '45872512'
assert out['lldp.eth0.lldp-med.inventory.manufacturer'] == \
'Spectacular'
assert out['lldp.eth0.lldp-med.inventory.model'] == 'Workstation'
assert out['lldp.eth0.lldp-med.inventory.asset'] == '487122'
assert out['lldp.eth0.lldp-med.inventory.software'] == \
platform.release()
else:
assert 'lldp.eth0.lldp-med.inventory.hardware' not in out.items()
assert 'lldp.eth0.lldp-med.inventory.firmware' not in out.items()
assert 'lldp.eth0.lldp-med.inventory.serial' not in out.items()
assert 'lldp.eth0.lldp-med.inventory.manufacturer' not in out.items()
assert 'lldp.eth0.lldp-med.inventory.model' not in out.items()
assert 'lldp.eth0.lldp-med.inventory.asset' not in out.items()
assert 'lldp.eth0.lldp-med.inventory.software' not in out.items()
test_default_inventory(namespaces, lldpcli)
custom_values = [
('hardware-revision', 'hardware', 'SQRT2_1.41421356237309504880'),
('software-revision', 'software', 'E_2.7182818284590452354'),
('firmware-revision', 'firmware', 'PI_3.14159265358979323846'),
('serial', 'serial', 'FIBO_112358'),
('manufacturer', 'manufacturer', 'Cybertron'),
('model', 'model', 'OptimusPrime'),
('asset', 'asset', 'SQRT3_1.732050807568877')
]
with namespaces(2):
for what, pfx, value in custom_values:
result = lldpcli(
*shlex.split("configure inventory {} {}".format(what, value)))
assert result.returncode == 0
result = lldpcli("resume")
assert result.returncode == 0
result = lldpcli("update")
assert result.returncode == 0
time.sleep(3)
with namespaces(1):
out = lldpcli("-f", "keyvalue", "show", "neighbors", "details")
for what, pfx, value in custom_values:
key_to_find = "lldp.eth0.lldp-med.inventory.{}".format(pfx)
assert out[key_to_find] == value
with namespaces(2):
for what, pfx, value in custom_values:
result = lldpcli(
*shlex.split("unconfigure inventory {}".format(what)))
assert result.returncode == 0
result = lldpcli("resume")
assert result.returncode == 0
result = lldpcli("update")
assert result.returncode == 0
test_default_inventory(namespaces, lldpcli)
| 2.09375 | 2 |
tests/testapp/models.py | pawnhearts/django-reactive | 21 | 12791348 | <gh_stars>10-100
from django.db import models
from django_reactive.fields import ReactJSONSchemaField
def modify_max_length(schema, ui_schema):
import random
max_length = random.randint(20, 30)
schema["properties"]["test_field"]["maxLength"] = max_length
ui_schema["test_field"]["ui:help"] = f"Max {max_length}"
def modify_help_text(schema, ui_schema, instance=None):
if instance:
if instance.is_some_condition:
ui_schema["test_field"]["ui:help"] = "Condition is set"
else:
ui_schema["test_field"]["ui:help"] = "Condition is unset"
class RenderMethodWithObjectSchemaModel(models.Model):
is_some_condition = models.BooleanField(default=True)
json_field = ReactJSONSchemaField(
schema={
"title": "TestSchema",
"type": "object",
"required": ["test_field"],
"properties": {
"test_field": {
"type": "string",
"maxLength": 10,
"minLength": 5,
},
"another_test_field": {
"type": "string",
},
},
"additionalProperties": False,
},
ui_schema={
"test_field": {"ui:help": "Max 10"},
},
on_render=modify_help_text,
)
class RenderMethodSchemaModel(models.Model):
json_field = ReactJSONSchemaField(
schema={
"title": "TestSchema",
"type": "object",
"required": ["test_field"],
"properties": {
"test_field": {
"type": "string",
"maxLength": 10,
"minLength": 5,
},
"another_test_field": {
"type": "string",
},
},
"additionalProperties": False,
},
ui_schema={
"test_field": {"ui:help": "Max 10"},
},
on_render=modify_max_length,
)
class SchemaModel(models.Model):
json_field = ReactJSONSchemaField(
schema={
"title": "TestSchema",
"type": "object",
"required": ["test_field"],
"properties": {
"test_field": {
"type": "string",
"maxLength": 10,
"minLength": 5,
},
"another_test_field": {
"type": "string",
},
},
"additionalProperties": False,
}
)
class OptionalSchemaModel(models.Model):
json_field = ReactJSONSchemaField(
schema={
"type": "object",
"required": ["test_field"],
"properties": {"test_field": {"type": "string"}},
},
blank=True,
)
class ExtraMediaSchemaModel(models.Model):
json_field = ReactJSONSchemaField(
schema={
"type": "object",
"required": ["test_field"],
"properties": {"test_field": {"type": "string"}},
},
blank=True,
extra_css=["path/to/my/css/file.css"],
extra_js=["path/to/my/js/file.js"],
)
| 2.0625 | 2 |
backend/application.py | RMDev97/tensorboard-extensions | 0 | 12791349 | <reponame>RMDev97/tensorboard-extensions
import os
from tensorboard.plugins import base_plugin
from tensorboard.backend.event_processing import plugin_event_accumulator
from tensorboard.backend import application
from tensorboard.backend.event_processing import plugin_event_multiplexer
from .logging import _logger
import io_helpers
def gr_tensorboard_wsgi(flags, plugin_loaders, assets_zip_provider):
size_guidance = {plugin_event_accumulator.TENSORS: 50}
run_path_map = _getRunPathMapFromLogdir(flags.logdir, flags.enable_first_N_runs)
_logger.log_message_info("loading EventMultiplexer with the %d most recent runs enabled by default" % flags.enable_first_N_runs)
gr_multiplexer = plugin_event_multiplexer.EventMultiplexer(run_path_map=run_path_map,
size_guidance=size_guidance,
tensor_size_guidance=None,
purge_orphaned_data=True,
max_reload_threads=flags.max_reload_threads)
_logger.log_message_info("Done loading EventMultiplexer")
_logger.log_message_info("Loading all plugins.")
plugin_name_to_instance = {}
context = base_plugin.TBContext(
flags=flags,
logdir=flags.logdir,
multiplexer=gr_multiplexer,
assets_zip_provider=assets_zip_provider,
plugin_name_to_instance=plugin_name_to_instance,
window_title=flags.window_title)
plugins = []
for loader in plugin_loaders:
plugin = loader.load(context)
if plugin is None:
continue
plugins.append(plugin)
plugin_name_to_instance[plugin.plugin_name] = plugin
_logger.log_message_info("Done loading all plugins, now launching the tensorboard application")
return application.TensorBoardWSGI(plugins, flags.path_prefix)
def _getRunPathMapFromLogdir(logdir, most_recent_num):
if most_recent_num == 0:
return {}
elif most_recent_num > 0:
dir_list = sorted(io_helpers.get_run_paths(logdir), key=os.path.getmtime)
num_files = min(most_recent_num, len(dir_list))
return {os.path.relpath(path, logdir): path for path in dir_list[-num_files:]}
else:
return {os.path.relpath(path, logdir): path for path in io_helpers.get_run_paths(logdir)}
| 1.835938 | 2 |
pymanip/legacy_session/octmi_dat.py | ctoupoin/pymanip | 0 | 12791350 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import numpy as np
def load_octmi_dat(acquisitionName, basePath="."):
# Vérification de l'existence du fichier
datFilePath = os.path.join(os.path.normpath(basePath), acquisitionName + "_MI.dat")
if not os.path.exists(datFilePath):
print("Could not stat file", datFilePath)
raise NameError("File does not exist")
# Décompte du nombre d'éléments
nval = 0
variableList = ""
with open(datFilePath, "r") as f:
for line in f:
if line[0] == "T":
if line != variableList:
variableList = line
# print variableList
else:
nval = nval + 1
variableList = variableList.split(" ")
dictionnaire = dict()
dictionnaire["nval"] = nval
if nval > 1:
for i in range(len(variableList)):
dictionnaire[variableList[i].strip()] = np.zeros(nval)
linenum = 0
with open(datFilePath, "r") as f:
for line in f:
contentList = line.split(" ")
if contentList[0] != "Time":
if nval == 1:
for i in range(len(variableList)):
dictionnaire[variableList[i].strip()] = eval(
contentList[i].strip()
)
else:
for i in range(len(variableList)):
if i < len(contentList):
dataStr = contentList[i].strip()
if dataStr.lower() == "nan":
dictionnaire[variableList[i].strip()][linenum] = np.nan
else:
dictionnaire[variableList[i].strip()][linenum] = eval(
contentList[i].strip()
)
else:
dictionnaire[variableList[i].strip()][linenum] = np.nan
linenum = linenum + 1
return dictionnaire
| 2.375 | 2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.