code
stringlengths 20
1.05M
| apis
sequence | extract_api
stringlengths 75
5.24M
|
---|---|---|
# coding: gbk
import kNN
import numpy as np
################################################################################
# #
# 约会网站 #
# #
################################################################################
#group, labels = kNN.createDataSet()
#tlabel = kNN.classify0([0, 0], group, labels, 3)
#print(tlabel)
# 导入约会对象的数据
datingDataMat, datingLabels = kNN.file2matrix('./data/datingTestSet2.txt')
# 用 Matplotlib 创建散点图
import matplotlib
import matplotlib.pyplot as plt
#fig = plt.figure()
#ax = fig.add_subplot(111)
#ax.scatter(datingDataMat[:,1],
# datingDataMat[:,2],
# 15.0 * np.array(datingLabels),
# 15.0 * np.array(datingLabels)
# )
#plt.xlabel('玩游戏视频所耗时间百分比', fontproperties='SimHei')
#plt.ylabel('每周消费的冰淇淋公升数', fontproperties='SimHei')
#plt.show()
# 获取分类
all_cls = np.unique(datingLabels)
fig = plt.figure()
ax = fig.add_subplot(111)
# 绘图颜色
color = ['Blue', 'Yellow', 'Red']
# 按不同分类绘图
sca = [None] * len(all_cls)
for i, cls in enumerate(all_cls):
sca[i] = ax.scatter(datingDataMat[datingLabels == cls, 0],
datingDataMat[datingLabels == cls,1],
15.0 * np.array(datingLabels[datingLabels == cls]),
color[i % 3])
plt.xlabel('每年获取的飞行常客里程数')
plt.ylabel('玩游戏视频所耗时间百分比')
plt.legend(tuple(sca), ('不喜欢', '魅力一般', '极具魅力'))
plt.savefig('image/fig1.png')
# 归一化
#normMat, ranges, minVals = autoNorm(datingDataMat)
# 分类器测试
#kNN.datingClassTest()
# 约会网站预测
#kNN.classifyPerson()
| [
"matplotlib.pyplot.savefig",
"numpy.unique",
"matplotlib.pyplot.ylabel",
"kNN.file2matrix",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.figure"
] | [((594, 638), 'kNN.file2matrix', 'kNN.file2matrix', (['"""./data/datingTestSet2.txt"""'], {}), "('./data/datingTestSet2.txt')\n", (609, 638), False, 'import kNN\n'), ((1059, 1082), 'numpy.unique', 'np.unique', (['datingLabels'], {}), '(datingLabels)\n', (1068, 1082), True, 'import numpy as np\n'), ((1090, 1102), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1100, 1102), True, 'import matplotlib.pyplot as plt\n'), ((1487, 1513), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""每年获取的飞行常客里程数"""'], {}), "('每年获取的飞行常客里程数')\n", (1497, 1513), True, 'import matplotlib.pyplot as plt\n'), ((1514, 1540), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""玩游戏视频所耗时间百分比"""'], {}), "('玩游戏视频所耗时间百分比')\n", (1524, 1540), True, 'import matplotlib.pyplot as plt\n'), ((1589, 1618), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""image/fig1.png"""'], {}), "('image/fig1.png')\n", (1600, 1618), True, 'import matplotlib.pyplot as plt\n'), ((1402, 1445), 'numpy.array', 'np.array', (['datingLabels[datingLabels == cls]'], {}), '(datingLabels[datingLabels == cls])\n', (1410, 1445), True, 'import numpy as np\n')] |
import yoda
import matplotlib as mpl
import numpy as np
import math
#from termcolor import colored
# Best fit values:
# cpB = -0.22 cpW = 0.04 cpWB = 0.12 cpd = 0.85 cpD = -0.26 c3W = 0.21
# Note the sign of cpd needs to be inverted to match our conventions (however its not included at the moment)
#
#### Top of bands ####
# cpB = 0.29 cpW = 0.68 cpWB = 0.69 cpd = 3.69 cpD = 0.97 c3W = 1.46
#
#### Bottom of bands ####
# cpB = -0.74 cpW = -0.59 cpWB = -0.46 cpd = -2.0 cpD = -1.5 c3W = -1.05
#
#
# Despoina
dirs = ['WZ/despoina/Inclusive/SM/', 'WZ/despoina/Inclusive/Inclusive_yodas_BSM1/', 'WZ/despoina/Inclusive/Inclusive_yodas_BSM2/']
analysis = ['/TESTDET/', '/TESTDET_BSM1/', '/TESTDET_BSM2/']
operators = [ 'sm', 'cW' , 'cHW', 'cHB', 'cHDD', 'cHWB'] #, 'cHWtil', 'cHBtil', 'cHWBtil','cWtil']
best_fit_vals = [1 , 0.21, 0.04 , -0.22 , -0.26 , 0.12 ] #, 1, 1, 1, 1 ]
top_bands = [ 1 , 1.46, 0.68 , 0.29 , 0.97, 0.69 ]
bottom_bands = [ 1, -1.05, -0.59, -0.74 , -1.5 , -0.46 ]
#
files = ['d12-x01-y01', 'd08-x01-y01','d10-x01-y01','d14-x01-y01', 'd16-x01-y01' ]
distribs = ['MT_WZ', 'PT_Z', 'PT_W','Delta Phi WZ' , 'PT_nu']
# the distribs array is hard coded, comparing the hepdata entried with the
# labeling in the yoda files or directly comparing with the rivet analysis.cc file (See )
# missing distribs 'd05-x01-y01' -> 'fid_XS_ratio'
#define sensitivity, to circumbvent many 0/0 results
def sensit(x,y): # x and y are arrays
sensit_array = np.empty(len(x))
for i in range(len(x)):
# if (x[i] == 0 and y[i] != 0) or (x[i] != 0 and y[i] == 0):
# print("weird: EFT=", x[i], ' SM=', y[i])
if x[i] == 0 or y[i]==0:
sensit_array[i]= 00.00 # testmath.pi
else:
sensit_array[i] = (x[i]/y[i]).round(decimals=3)
return sensit_array
# First print total XSEC, then loop over the other distributions
print('########### Total XSEC: ##########################' )
print('\n ### we first print the SM as a check ### ')
# now loop over operators:
for op,bf,tb,bb in zip(operators,best_fit_vals,top_bands,bottom_bands):
print('Operator: ' + op + ", best fit val: "+ str(bf))
for dir,an in zip(dirs,analysis):
hist_sm = yoda.read(dir+'sm.yoda')[an + files[0]] #any histo is fine, we take 0 for example
vals_sm = hist_sm.areas()
filename = yoda.read(dir + op + '.yoda')
hist = filename[ an + files[0]] #any histo is fine, we take 0 for example
vals_lo = (hist.areas())
print( an + " Total XS (in fb)", np.sum(vals_lo).round(decimals=3) ,
" SM XS (in fb)", np.sum(vals_sm).round(decimals=3) ,
" ratio to SM (in %, for c=10): ", str( int((np.sum(vals_lo)/np.sum(vals_sm) - 1)*100 )) + "%")
print("\t \t Best fit ratio: (for c="+ str(bf) +")" , str( int(((np.sum(vals_lo)/np.sum(vals_sm) -1) *(bf/10) )*100)) + "%",
"Top bands: (For c="+ str(tb) +")" , str( int(((np.sum(vals_lo)/np.sum(vals_sm) -1) *(tb/10) ) *100)) + "%",
"Bottom bands: (For c="+ str(bb) +")" , str(int( ((np.sum(vals_lo)/np.sum(vals_sm) -1) *(bb/10) )*100 )) + "%",
)
print('\n \n')
#loop over distributions
for i in range(len(files)):
print('########### Distribution: ' + distribs[i] + ' ##########################' )
print('\n ### we first print the SM as a check ### \n ')
for op,bf,tb,bb in zip(operators,best_fit_vals, top_bands, bottom_bands) :
print('Operator: ' + op , "best fit val: ", bf)
for dir,an in zip(dirs,analysis):
hist_sm = yoda.read(dir+'sm.yoda')[an + files[i] ]
vals_sm = hist_sm.areas()
filename = yoda.read(dir + op + '.yoda')
hist = filename[ an + files[i]]
vals_lo = hist.areas()
print(an + "vals linear EFT", vals_lo.round(decimals=3))
print(an + "Sensitivities (in for c=10): ", (sensit(vals_lo,vals_sm)-1)*100 )
print(an + "Best fit Sensitivities (in %, for c="+ str(bf) +")", ( (sensit(vals_lo,vals_sm)-1 )*(bf/10)*100 ).round(decimals=0) )
print(an + "Error bands (up) (in % around the best fit, for c="+ str(tb) +")", ((sensit(vals_lo,vals_sm)-1)*100*(tb/10)).round(decimals=0) )
print(an + "Error bands (down) (in % around the best fit, for c="+ str(bb) +")", ((sensit(vals_lo,vals_sm)-1)*100*(bb/10)).round(decimals=0) )
print("\n")
print( '\n \n')
quit()
| [
"numpy.sum",
"yoda.read"
] | [((2428, 2457), 'yoda.read', 'yoda.read', (["(dir + op + '.yoda')"], {}), "(dir + op + '.yoda')\n", (2437, 2457), False, 'import yoda\n'), ((2293, 2319), 'yoda.read', 'yoda.read', (["(dir + 'sm.yoda')"], {}), "(dir + 'sm.yoda')\n", (2302, 2319), False, 'import yoda\n'), ((3791, 3820), 'yoda.read', 'yoda.read', (["(dir + op + '.yoda')"], {}), "(dir + op + '.yoda')\n", (3800, 3820), False, 'import yoda\n'), ((3688, 3714), 'yoda.read', 'yoda.read', (["(dir + 'sm.yoda')"], {}), "(dir + 'sm.yoda')\n", (3697, 3714), False, 'import yoda\n'), ((2616, 2631), 'numpy.sum', 'np.sum', (['vals_lo'], {}), '(vals_lo)\n', (2622, 2631), True, 'import numpy as np\n'), ((2684, 2699), 'numpy.sum', 'np.sum', (['vals_sm'], {}), '(vals_sm)\n', (2690, 2699), True, 'import numpy as np\n'), ((2778, 2793), 'numpy.sum', 'np.sum', (['vals_lo'], {}), '(vals_lo)\n', (2784, 2793), True, 'import numpy as np\n'), ((2794, 2809), 'numpy.sum', 'np.sum', (['vals_sm'], {}), '(vals_sm)\n', (2800, 2809), True, 'import numpy as np\n'), ((2902, 2917), 'numpy.sum', 'np.sum', (['vals_lo'], {}), '(vals_lo)\n', (2908, 2917), True, 'import numpy as np\n'), ((2918, 2933), 'numpy.sum', 'np.sum', (['vals_sm'], {}), '(vals_sm)\n', (2924, 2933), True, 'import numpy as np\n'), ((3026, 3041), 'numpy.sum', 'np.sum', (['vals_lo'], {}), '(vals_lo)\n', (3032, 3041), True, 'import numpy as np\n'), ((3042, 3057), 'numpy.sum', 'np.sum', (['vals_sm'], {}), '(vals_sm)\n', (3048, 3057), True, 'import numpy as np\n'), ((3164, 3179), 'numpy.sum', 'np.sum', (['vals_lo'], {}), '(vals_lo)\n', (3170, 3179), True, 'import numpy as np\n'), ((3180, 3195), 'numpy.sum', 'np.sum', (['vals_sm'], {}), '(vals_sm)\n', (3186, 3195), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Main module."""
from aintq.db import db
from aintq.task import Task
from aintq.utils import pickle_data, generate_task_name
from aintq.worker import AintQConsumer
class Aintq(object):
bind = None
registry = {}
def __init__(self, bind=None):
self.bind = bind
async def init(self):
if self.bind:
await db.set_bind(self.bind)
def create_consumer(self, **options):
return AintQConsumer(self, **options)
def task(self, **kwargs):
def decorator(func):
return TaskWrapper(
self,
func.func if isinstance(func, TaskWrapper) else func,
**kwargs)
return decorator
def register(self, func):
self.registry[generate_task_name(func)] = func
async def execute(self, func, *args, **kwargs):
async with db.transaction():
await Task.create(
name=generate_task_name(func),
params=pickle_data(*args, **kwargs)
)
class TaskWrapper(object):
def __init__(self, aintq, func, **settings):
self.aintq = aintq
self.func = func
self.settings = settings
self.aintq.register(func)
async def __call__(self, *args, **kwargs):
await self.aintq.execute(self.func, *args, **kwargs)
| [
"aintq.db.db.set_bind",
"aintq.worker.AintQConsumer",
"aintq.utils.pickle_data",
"aintq.utils.generate_task_name",
"aintq.db.db.transaction"
] | [((457, 487), 'aintq.worker.AintQConsumer', 'AintQConsumer', (['self'], {}), '(self, **options)\n', (470, 487), False, 'from aintq.worker import AintQConsumer\n'), ((776, 800), 'aintq.utils.generate_task_name', 'generate_task_name', (['func'], {}), '(func)\n', (794, 800), False, 'from aintq.utils import pickle_data, generate_task_name\n'), ((881, 897), 'aintq.db.db.transaction', 'db.transaction', ([], {}), '()\n', (895, 897), False, 'from aintq.db import db\n'), ((376, 398), 'aintq.db.db.set_bind', 'db.set_bind', (['self.bind'], {}), '(self.bind)\n', (387, 398), False, 'from aintq.db import db\n'), ((951, 975), 'aintq.utils.generate_task_name', 'generate_task_name', (['func'], {}), '(func)\n', (969, 975), False, 'from aintq.utils import pickle_data, generate_task_name\n'), ((1000, 1028), 'aintq.utils.pickle_data', 'pickle_data', (['*args'], {}), '(*args, **kwargs)\n', (1011, 1028), False, 'from aintq.utils import pickle_data, generate_task_name\n')] |
import sys, re
import numpy as np
import pandas as pd
import lammps_extract
import matplotlib.pyplot as plt
import math
def round_up(n):
multiplier = 10 ** -1
return math.ceil(n * multiplier)/multiplier
def cpu_v_gpu(data):
# check if correct data is as input
cpu_gpu_df = pd.DataFrame()
group = data.groupby(data['Configuration'])
cpu = group.get_group("CPU")
gpu = group.get_group("GPU")
gpu = gpu.reset_index()
kkgpu = group.get_group("Kokkos/GPU")
kkgpu = kkgpu.reset_index()
#speed_up = pd.DataFrame
cpu_gpu_df['# nodes'] = cpu['# nodes']
cpu_gpu_df['# CPU Performance'] = cpu['Performance']
cpu_gpu_df['# GPU Performance'] = gpu['Performance']
cpu_gpu_df['# Kokkos/GPU Performance'] = kkgpu['Performance']
speed_up_gpu = gpu['Performance']/cpu['Performance']
speed_up_kkgpu = kkgpu['Performance']/cpu['Performance']
#print(speed_up_gpu, speed_up_kkgpu)
# cpu_gpu_df = pd.DataFrame()
#print(math.ceil(max(data['Performance'])))
fig, ax1 = plt.subplots(figsize=(8,6))
ax1.plot(cpu['# nodes'], cpu['Performance'], 'bo-', label="CPU", linewidth=1.5, markersize=5)
ax1.plot(gpu['# nodes'], gpu['Performance'], 'gv-', label="GPU Package", linewidth=1.5, markersize=7.5)
ax1.plot(kkgpu['# nodes'], kkgpu['Performance'], 'r*-', label="Kokkos/GPU Package", linewidth=1.5, markersize=7.5)
ax1.set_ylim(0, (round_up(max(data['Performance'])+1)))
ax1.grid(color='k', linestyle='--', linewidth=1, alpha=0.2)
ax1.set_xlim(1, (max(cpu['# nodes'])+0.01))
ax1.set_xlabel("Number of nodes", fontsize=12, fontname="Arial")
ax1.set_ylabel("Performance (timesteps/second)", fontsize=12, fontname="Arial")
ax2 = ax1.twinx()
#print(len(gpu['Performance']), len(cpu['# nodes']))
ax2.plot(cpu['# nodes'], speed_up_gpu, 'gv', linestyle="dashed", label="Speed-up for GPU Package", linewidth=1.5, markersize=7.5)
ax2.plot(cpu['# nodes'], speed_up_kkgpu, 'r*', linestyle="dashed", label="Speed-up for Kokkos/GPU Package", linewidth=1.5, markersize=7.5)
ax2.set_ylabel("Speed up factor", fontsize=12, fontname="Arial")
ax2.set_ylim(0,10)
h1, l1 = ax1.get_legend_handles_labels()
h2, l2 = ax2.get_legend_handles_labels()
ax1.legend(h1+h2, l1+l2, loc=2, ncol=1)
fig.suptitle("11 million atom Lennard-Jones system, Intel Xeon E5-2680 v3 Haswell CPU 2x12 cores \n w. 4 K80 GPUs/nodes and Mellanox EDR InfiniBand network.\nLAMMPS 3Mar20, Intel Compiler and CUDA", fontsize=10)
fig.savefig("CPU_v_GPU.png")
def gpu_perf(data):
# new pd.DataFrame created to avoid SettingWithCopyWarning
gpu_df = pd.DataFrame()
data = data.sort_values(['# atoms', '# MPI tasks'])
data = data.reset_index()
group = data.groupby(data['# atoms'])
_4K = group.get_group(4000)
del _4K['index']
_256K = group.get_group(256000)
_256K = _256K.reset_index()
del _256K['index']
_11M = group.get_group(10976000)
_11M = _11M.reset_index()
del _11M['index']
lengths = [len(_4K), len(_256K), len(_11M)]
# checks that there are equal number of input gpu files per # atoms
if all(i == lengths[0] for i in lengths) == False:
raise Exception("Number of input GPU files do not match for normalisation plot.")
gpu_df['4K MPI tasks'] = _4K['# MPI tasks']
gpu_df['4K Performance'] = _4K['Performance']
gpu_df['4K Normalised'] = gpu_df['4K Performance']/gpu_df['4K Performance'].max()
gpu_df['256K MPI tasks'] = _256K['# MPI tasks']
gpu_df['256K Performance'] = _256K['Performance']
gpu_df['256K Normalised'] = gpu_df['256K Performance']/gpu_df['256K Performance'].max()
gpu_df['11M MPI tasks'] = _11M['# MPI tasks']
gpu_df['11M Performance'] = _11M['Performance']
gpu_df['11M Normalised'] = gpu_df['11M Performance']/gpu_df['11M Performance'].max()
fig, ax = plt.subplots()
x_ticks = []
num_gpu = 4
for i in _4K['# MPI tasks']:
string = str(num_gpu) + "gpu" + str(i) + "proc"
x_ticks.append(string)
ax.plot(gpu_df['4K MPI tasks'], gpu_df['4K Normalised'], 'bo-', label="4K atoms", linewidth=0.75, markersize=5)
ax.plot(gpu_df['256K MPI tasks'], gpu_df['256K Normalised'], 'gv-', label="256K atoms", linewidth=0.75, markersize=5)
ax.plot(gpu_df['11M MPI tasks'], gpu_df['11M Normalised'], 'r*-', label="11M atoms", linewidth=0.75, markersize=5)
ax.set_xticks(gpu_df['4K MPI tasks'])
ax.set_xlabel("Number of cores", fontsize=12, fontname="Arial")
ax.set_ylabel("Normalised speed-up factor per node", fontsize=12, fontname="Arial")
ax.legend()
ax.set_xticklabels(x_ticks)
ax.grid(color='k', linestyle='--', linewidth=1, alpha=0.2)
fig.suptitle("Lennard-Jones system in Intel Xeon E5-2680 v3 Haswell CPU 2x12 Cores \n w. x4 NVIDEA K80 GPUs/node, Malleanox EDR InfiniBand network. \n LAMMPS 3Mar20, Intel compiler and CUDA", fontsize=10)
fig.savefig("GPU_performance.png")
def scaling_rhodopsin(data):
if data['Configuration'].str.contains("CPU").any():
raise Exception("This contains LJ input files. Provide Rhodopsin input files.")
if data['Configuration'].str.contains("GPU").any():
raise Exception("This contains LJ input files. Provide Rhodopsin input files.")
if data['Configuration'].str.contains("Kokkos/GPU").any():
raise Exception("This contains LJ input files. Provide Rhodopsin input files.")
scaling = pd.DataFrame()
scaling_str = []
for col in data.columns:
if col == "Performance":
break
scaling_str.append(col)
for s in scaling_str:
scaling['Sp_'+s] = data[s][0]/data[s]
scaling['MPI tasks'] = data['# MPI tasks']
fig, ax = plt.subplots()
ax.plot(scaling['MPI tasks'], scaling['Sp_Pair'], color='tab:blue', linestyle='-', marker='v', label='Pair', linewidth=1.25, markersize=2)
ax.plot(scaling['MPI tasks'], scaling['Sp_Bond '], color='tab:green', linestyle='-', marker='v', label='Bond', linewidth=1.25, markersize=2)
ax.plot(scaling['MPI tasks'], scaling['Sp_Kspace '], color='tab:orange', linestyle='-', marker='v', label='Kspace', linewidth=1.25, markersize=2)
ax.plot(scaling['MPI tasks'], scaling['Sp_Neigh '], color='tab:red', linestyle='-', marker='v', label='Neigh', linewidth=1.25, markersize=2)
ax.plot(scaling['MPI tasks'], scaling['Sp_Comm'], color='tab:cyan', linestyle='-', marker='v', label='Comm', linewidth=1.25, markersize=2)
#ax.plot(scaling['MPI tasks'], scaling['Sp_Output'], 'yo:', label='Output', linewidth=0.75, markersize=5)
ax.plot(scaling['MPI tasks'], scaling['Sp_Modify'], color='tab:olive', linestyle='-', marker='v', label='Modify', linewidth=1.25, markersize=2)
ax.plot(scaling['MPI tasks'], scaling['Sp_Other'], color='darkorchid', linestyle='-', marker='v', label='Other', linewidth=1.25, markersize=2)
ax.plot(scaling['MPI tasks'], scaling['Sp_Wall'], 'ko', linestyle='dashed', label='Walltime', linewidth=1, markersize=4)
ax.set_xlabel("Number of cores", fontsize=12, fontname="Arial")
ax.set_ylabel("Speed-up factor", fontsize=12, fontname="Arial")
ax.set_xlim(0,400)
ax.set_ylim(0,400)
plt.legend()
ax.grid(color='k', linestyle='--', linewidth=1, alpha=0.2)
fig.suptitle("Speed-up factor for Rhodopsin system of 32K atoms", fontsize=12, y=0.92)
fig.savefig("Rhodopsin_scaling.png")
#print(scaling)
def omp_pe_rhodopsin(data, serial_run=7019):
#print(data, serial_run)
rhodo_pe_df = pd.DataFrame()
fig, ax = plt.subplots()
#data.to_csv('file.csv')
group = data.groupby(data['Configuration'])
#if "MPI" in data['Configuration'].values:
mpi_group = group.get_group("MPI")
mpi_group = mpi_group[(mpi_group['# MPI tasks'] % 40 == 0)]
mpi_group = mpi_group.reset_index()
rhodo_pe_df['Nodes'] = mpi_group['# nodes']
rhodo_pe_df['MPI_only_pe'] = (1.0/mpi_group['# nodes']/40)*(serial_run/mpi_group['Wall']*100)
ax.plot(rhodo_pe_df['Nodes'], rhodo_pe_df['MPI_only_pe'], 'ko-', label='MPI-only', linewidth=3, markersize=4)
#print(rhodo_pe_df)
#if "OMP" in data['Configuration'].values:
omp_group = group.get_group("OMP")
omp_group = omp_group.sort_values(['# OMP threads', '# nodes'])
omp_group = omp_group.reset_index()
nums = [1, 2, 4, 5, 8, 10, 20, 40]
rev_nums = nums[::-1]
omp = omp_group.groupby("# OMP threads")
for i in range(0, len(nums)):
globals()['omp%s' % nums[i]] = omp.get_group(nums[i])
globals()['omp%s' % nums[i]] = globals()['omp%s' % nums[i]].reset_index()
del globals()['omp%s' % nums[i]]['index']
del globals()['omp%s' % nums[i]]['level_0']
rhodo_pe_df[str('%s_MPI_' % rev_nums[i])+str(nums[i])+'_OMP_pe'] = (1.0/globals()['omp%s' % nums[i]]['# nodes']/40)*(serial_run/globals()['omp%s' % nums[i]]['Wall']*100)
ax.plot(rhodo_pe_df['Nodes'], rhodo_pe_df['1_MPI_40_OMP_pe'], color='tab:blue', linestyle='-', marker='o', label='1 MPI x 40 OpenMP', linewidth=1.25, markersize=4)
ax.plot(rhodo_pe_df['Nodes'], rhodo_pe_df['2_MPI_20_OMP_pe'], color='tab:green', linestyle='-', marker='v', label='2 MPI x 20 OpenMP', linewidth=1.25, markersize=4)
ax.plot(rhodo_pe_df['Nodes'], rhodo_pe_df['4_MPI_10_OMP_pe'], color='tab:orange', linestyle='-', marker='^', label='4 MPI x 10 OpenMP', linewidth=1.25, markersize=4)
ax.plot(rhodo_pe_df['Nodes'], rhodo_pe_df['5_MPI_8_OMP_pe'], color='tab:red', linestyle='-', marker='>', label='5 MPI x 8 OpenMP', linewidth=1.25, markersize=4)
ax.plot(rhodo_pe_df['Nodes'], rhodo_pe_df['8_MPI_5_OMP_pe'], color='tab:cyan', linestyle='-', marker='<', label='8 MPI x 5 OpenMP', linewidth=1.25, markersize=4)
ax.plot(rhodo_pe_df['Nodes'], rhodo_pe_df['10_MPI_4_OMP_pe'], color='tab:olive', linestyle='-', marker='*', label='10 MPI x 4 OpenMP', linewidth=1.25, markersize=4)
ax.plot(rhodo_pe_df['Nodes'], rhodo_pe_df['20_MPI_2_OMP_pe'], color='darkorchid', linestyle='-', marker='X', label='20 MPI x 2 OpenMP', linewidth=1.25, markersize=4)
ax.plot(rhodo_pe_df['Nodes'], rhodo_pe_df['40_MPI_1_OMP_pe'], color='peru', linestyle='-', marker='D', label='40 MPI x 1 OpenMP', linewidth=1.25, markersize=4)
ax.grid(color='k', linestyle='--', linewidth=1, alpha=0.2)
ax.set_ylim(0,100)
ax.set_xlim(0.5,(rhodo_pe_df['Nodes'].max()+0.5))
ax.legend(ncol=2, loc=1, fontsize=8)
ax.set_xlabel("Number of nodes", fontsize=12, fontname="Arial")
ax.set_ylabel("Parallel efficiency (%)", fontsize=12, fontname="Arial")
fig.suptitle("Intel Xeon Gold (Skylake) processors with 2x20-core 2.4 GHz,\n192 GB RAM Rhodopsin system (32K atoms), lj/charmm/coul/long\n+ PPPM with USER-OMP (Intel compiler 2019u5, GCC 8.2.0)", fontsize=11, y=0.99)
fig.savefig("Rhodopsin_omp_pe.png")
def kokkos_omp_pe_rhodopsin(data, serial_run=7019):
#if "Kokkos/OMP" in data['Configuration'].values:
rhodo_pe_df = pd.DataFrame()
fig, ax = plt.subplots()
#data.to_csv('file.csv')
group = data.groupby(data['Configuration'])
#if "MPI" in data['Configuration'].values:
mpi_group = group.get_group("MPI")
mpi_group = mpi_group[(mpi_group['# MPI tasks'] % 40 == 0)]
mpi_group = mpi_group.reset_index()
rhodo_pe_df['Nodes'] = mpi_group['# nodes']
rhodo_pe_df['MPI_only_pe'] = (1.0/mpi_group['# nodes']/40)*(serial_run/mpi_group['Wall']*100)
ax.plot(rhodo_pe_df['Nodes'], rhodo_pe_df['MPI_only_pe'], 'ko-', label='MPI-only', linewidth=3, markersize=4)
#print(kkomp_group)
kkomp_group = group.get_group("Kokkos/OMP")
kkomp_group = kkomp_group.sort_values(['# OMP threads', '# nodes'])
kkomp_group = kkomp_group.reset_index()
kkomp = kkomp_group.groupby("# OMP threads")
nums = [1, 2, 4, 5, 8, 10, 20, 40]
rev_nums = nums[::-1]
for i in range(0, len(nums)):
globals()['omp%s' % nums[i]] = kkomp.get_group(nums[i])
globals()['omp%s' % nums[i]] = globals()['omp%s' % nums[i]].reset_index()
del globals()['omp%s' % nums[i]]['index']
del globals()['omp%s' % nums[i]]['level_0']
rhodo_pe_df[str('%s_MPI_' % rev_nums[i])+str(nums[i])+'_OMP_Kokkos_pe'] = (1.0/globals()['omp%s' % nums[i]]['# nodes']/40)*(serial_run/globals()['omp%s' % nums[i]]['Wall']*100)
ax.plot(rhodo_pe_df['Nodes'], rhodo_pe_df['1_MPI_40_OMP_Kokkos_pe'], color='tab:blue', linestyle='-', marker='o', label='1 MPI x 40 OpenMP w. Kokkos', linewidth=1.25, markersize=4)
ax.plot(rhodo_pe_df['Nodes'], rhodo_pe_df['2_MPI_20_OMP_Kokkos_pe'], color='tab:green', linestyle='-', marker='v', label='2 MPI x 20 OpenMP w. Kokkos', linewidth=1.25, markersize=4)
ax.plot(rhodo_pe_df['Nodes'], rhodo_pe_df['4_MPI_10_OMP_Kokkos_pe'], color='tab:orange', linestyle='-', marker='^', label='4 MPI x 10 OpenMP w. Kokkos', linewidth=1.25, markersize=4)
ax.plot(rhodo_pe_df['Nodes'], rhodo_pe_df['5_MPI_8_OMP_Kokkos_pe'], color='tab:red', linestyle='-', marker='>', label='5 MPI x 8 OpenMP w. Kokkos', linewidth=1.25, markersize=4)
ax.plot(rhodo_pe_df['Nodes'], rhodo_pe_df['8_MPI_5_OMP_Kokkos_pe'], color='tab:cyan', linestyle='-', marker='<', label='8 MPI x 5 OpenMP w. Kokkos', linewidth=1.25, markersize=4)
ax.plot(rhodo_pe_df['Nodes'], rhodo_pe_df['10_MPI_4_OMP_Kokkos_pe'], color='tab:olive', linestyle='-', marker='*', label='10 MPI x 4 OpenMP w. Kokkos', linewidth=1.25, markersize=4)
ax.plot(rhodo_pe_df['Nodes'], rhodo_pe_df['20_MPI_2_OMP_Kokkos_pe'], color='darkorchid', linestyle='-', marker='X', label='20 MPI x 2 OpenMP w. Kokkos', linewidth=1.25, markersize=4)
ax.plot(rhodo_pe_df['Nodes'], rhodo_pe_df['40_MPI_1_OMP_Kokkos_pe'], color='peru', linestyle='-', marker='D', label='40 MPI x 1 OpenMP w. Kokkos', linewidth=1.25, markersize=4)
ax.grid(color='k', linestyle='--', linewidth=1, alpha=0.2)
ax.set_ylim(0,100)
ax.set_xlim(0.5,(rhodo_pe_df['Nodes'].max()+0.5))
ax.legend(ncol=2, loc=1, fontsize=8)
ax.set_xlabel("Number of nodes", fontsize=12, fontname="Arial")
ax.set_ylabel("Parallel efficiency (%)", fontsize=12, fontname="Arial")
fig.suptitle("Intel Xeon Gold (Skylake) processors with 2x20-core 2.4 GHz,\n192 GB RAM Rhodopsin system (32K atoms), lj/charmm/coul/long\n+ PPPM with USER-OMP and Kokkos (Intel compiler 2019u5, GCC 8.2.0)", fontsize=11, y=0.99)
fig.savefig("Rhodopsin_kokkos_omp.png")
if __name__ == "__main__":
#extract_data(sys.argv[1:])
lammps_data = lammps_extract.extract_data(sys.argv[1:])
#print(lammps_data)
#cpu_v_gpu(lammps_data)
#gpu_perf(lammps_data)
#scaling_rhodopsin(lammps_data)
omp_pe_rhodopsin(lammps_data)
kokkos_omp_pe_rhodopsin(lammps_data)
#print(log_lammps) | [
"lammps_extract.extract_data",
"math.ceil",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.legend"
] | [((292, 306), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (304, 306), True, 'import pandas as pd\n'), ((1033, 1061), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 6)'}), '(figsize=(8, 6))\n', (1045, 1061), True, 'import matplotlib.pyplot as plt\n'), ((2643, 2657), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (2655, 2657), True, 'import pandas as pd\n'), ((3880, 3894), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3892, 3894), True, 'import matplotlib.pyplot as plt\n'), ((5472, 5486), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (5484, 5486), True, 'import pandas as pd\n'), ((5760, 5774), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5772, 5774), True, 'import matplotlib.pyplot as plt\n'), ((7222, 7234), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (7232, 7234), True, 'import matplotlib.pyplot as plt\n'), ((7546, 7560), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (7558, 7560), True, 'import pandas as pd\n'), ((7576, 7590), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (7588, 7590), True, 'import matplotlib.pyplot as plt\n'), ((10967, 10981), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10979, 10981), True, 'import pandas as pd\n'), ((10997, 11011), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (11009, 11011), True, 'import matplotlib.pyplot as plt\n'), ((14479, 14520), 'lammps_extract.extract_data', 'lammps_extract.extract_data', (['sys.argv[1:]'], {}), '(sys.argv[1:])\n', (14506, 14520), False, 'import lammps_extract\n'), ((175, 200), 'math.ceil', 'math.ceil', (['(n * multiplier)'], {}), '(n * multiplier)\n', (184, 200), False, 'import math\n')] |
from __future__ import annotations
import logging
from datetime import datetime
import numpy
from psycopg2 import sql
from psycopg2.extensions import AsIs, register_adapter
from psycopg2.extras import Json, RealDictCursor
from psycopg2.pool import ThreadedConnectionPool
def adapt_numpy_float64(numpy_float64):
return AsIs(numpy_float64)
def adapt_numpy_int64(numpy_int64):
return AsIs(numpy_int64)
register_adapter(numpy.float64, adapt_numpy_float64)
register_adapter(numpy.int64, adapt_numpy_int64)
class MissingDataError(Exception):
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super().__init__(message)
def exception_decorator(wrapped_function):
def _wrapper(*args, **kwargs):
try:
result = wrapped_function(*args, **kwargs)
except Exception as error:
logging.getLogger('database_connector').exception(
'Exception occurred in %s.', wrapped_function.__name__
)
raise type(error)(
f'Exception occurred in {wrapped_function.__name__}: {str(error)}'
)
return result
return _wrapper
class DatabaseConnector:
db_instance = None
@classmethod
def get_db_instance(cls) -> DatabaseConnector:
if cls.db_instance is None:
cls.db_instance = cls()
return cls.db_instance
def __init__(self):
self.logger = logging.getLogger('database_connector')
try:
self.pool = ThreadedConnectionPool(
1, 10,
user='postgres',
password='<PASSWORD>',
host='127.0.0.1',
port='5432',
database='postgres'
)
self.schema = 'linkprediction'
except Exception:
logging.getLogger('database_connector').exception(
'Exception occurred while connecting to the database'
)
def _get_dict_cursor(self, conn):
return conn.cursor(
cursor_factory=RealDictCursor
)
def _get_connection(self):
return self.pool.getconn()
def _put_connection(self, conn):
self.pool.putconn(conn)
# ####################################
# BERUFENET #
# ####################################
@exception_decorator
def get_occupations_by_column(self, column_type, value):
"""
column_type: possible values = ['record_id', 'job_id', 'job_title']
value: value of the column_type
"""
if column_type not in ['record_id', 'job_id', 'job_title']:
raise ValueError('Parameter column_type is not valid!')
conn = self._get_connection()
cur = self._get_dict_cursor(conn)
query = sql.SQL(
"SELECT * FROM {}.berufenet WHERE {} = {};"
).format(
*map(sql.Identifier, (self.schema, column_type)),
*map(sql.Literal, (value,))
)
cur.execute(query)
records = cur.fetchall()
self._put_connection(conn)
if records is None:
raise MissingDataError('Select statement returned None.')
return records
@exception_decorator
def get_occupation_by_hierarchy(self, field_of_activity, subject_area, column_type, value):
"""
column_type: possible values = ['job_id', 'job_title']
value: value of the column_type
"""
if column_type not in ['job_id', 'job_title']:
raise ValueError('Parameter column_type is not valid!')
conn = self._get_connection()
cur = self._get_dict_cursor(conn)
query = sql.SQL(
"SELECT * FROM {}.berufenet \
WHERE field_of_activity = {} \
and subject_area = {} \
and {} = {};"
).format(
*map(sql.Identifier, (self.schema,)),
*map(sql.Literal, (field_of_activity, subject_area)),
*map(sql.Identifier, (column_type,)),
*map(sql.Literal, (value,))
)
cur.execute(query)
record = cur.fetchone()
self._put_connection(conn)
if record is None:
raise MissingDataError('Select statement returned None.')
return record
# ####################################
# PROJECTS #
# ####################################
@exception_decorator
def add_project(self, designation, description):
conn = self._get_connection()
cur = conn.cursor()
query = f'INSERT INTO {self.schema}.projects (designation, description) ' \
'VALUES (%s, %s) ' \
'RETURNING project_id;'
params = (designation, description)
cur.execute(query, params)
record = cur.fetchone()
conn.commit()
self._put_connection(conn)
return next(iter(record), None)
@exception_decorator
def get_project_by_id(self, id_type, reference_id):
"""
id_type: possible values = ['project_id']
reference_id: value of the id
"""
if id_type not in ['project_id']:
raise ValueError('Parameter id_type is not valid!')
conn = self._get_connection()
cur = self._get_dict_cursor(conn)
query = sql.SQL(
"SELECT * FROM {}.projects WHERE {} = {};"
).format(
*map(sql.Identifier, (self.schema, id_type)),
*map(sql.Literal, (reference_id,))
)
cur.execute(query)
record = cur.fetchone()
self._put_connection(conn)
if record is None:
raise MissingDataError('Select statement returned None.')
return record
@exception_decorator
def get_projects(self):
conn = self._get_connection()
cur = self._get_dict_cursor(conn)
query = f'SELECT * FROM {self.schema}.projects;'
cur.execute(query)
records = cur.fetchall()
self._put_connection(conn)
if records is None:
raise MissingDataError('Select statement returned None.')
return records
@exception_decorator
def delete_project(self, project_id):
conn = self._get_connection()
cur = conn.cursor()
query = f'DELETE FROM {self.schema}.projects WHERE project_id = %s'
params = (project_id,)
cur.execute(query, params)
conn.commit()
self._put_connection(conn)
return cur.statusmessage
@exception_decorator
def set_original_network_of_project(self, project_id, original_network_id):
conn = self._get_connection()
cur = conn.cursor()
query = f'UPDATE {self.schema}.projects ' \
'SET original_network_id = %s ' \
'WHERE project_id = %s'
params = (original_network_id, project_id)
cur.execute(query, params)
conn.commit()
self._put_connection(conn)
@exception_decorator
def set_predicted_network_of_project(self, project_id, predicted_network_id):
conn = self._get_connection()
cur = conn.cursor()
query = f'UPDATE {self.schema}.projects ' \
'SET predicted_network_id = %s ' \
'WHERE project_id = %s'
params = (predicted_network_id, project_id)
cur.execute(query, params)
conn.commit()
self._put_connection(conn)
# ####################################
# ORIGINAL_NETWORK #
# ####################################
@exception_decorator
def add_original_network_to_project(self, designation, directed, multigraph, project_id):
conn = self._get_connection()
cur = conn.cursor()
query = f'INSERT INTO {self.schema}.original_network' \
'(designation, directed, multigraph, project_id) ' \
'VALUES (%s, %s, %s, %s) ' \
'RETURNING original_network_id;'
params = (designation, directed, multigraph, project_id)
cur.execute(query, params)
original_network_id = next(iter(cur.fetchone()), None)
conn.commit()
self._put_connection(conn)
if original_network_id is not None:
self.set_original_network_of_project(
project_id, original_network_id)
return original_network_id
@exception_decorator
def get_original_network_by_id(self, id_type, reference_id):
"""
id_type: possible values = ['original_network_id', 'project_id']
reference_id: value of the id
"""
if id_type not in ['original_network_id', 'project_id']:
raise ValueError('Parameter id_type is not valid!')
conn = self._get_connection()
cur = self._get_dict_cursor(conn)
query = sql.SQL(
"SELECT * FROM {}.original_network WHERE {} = {};"
).format(
*map(sql.Identifier, (self.schema, id_type)),
*map(sql.Literal, (reference_id,))
)
cur.execute(query)
record = cur.fetchone()
self._put_connection(conn)
if record is None:
raise MissingDataError('Select statement returned None.')
return record
@exception_decorator
def delete_original_network_by_id(self, id_type, reference_id):
"""
id_type: possible values = ['original_network_id', 'project_id']
reference_id: value of the id
"""
if id_type not in ['original_network_id', 'project_id']:
raise ValueError('Parameter id_type is not valid!')
conn = self._get_connection()
cur = conn.cursor()
query = sql.SQL(
"DELETE FROM {}.original_network WHERE {} = {};"
).format(
*map(sql.Identifier, (self.schema, id_type)),
*map(sql.Literal, (reference_id,))
)
cur.execute(query)
conn.commit()
self._put_connection(conn)
# ####################################
# ORIGINAL_EDGES #
# ####################################
@exception_decorator
def add_edges_to_original_network(self, edge_list, original_network_id):
"""
edge_list: [(source_node: uuid, target_node: uuid)]
original_network_id: uuid
"""
conn = self._get_connection()
cur = conn.cursor()
args_str = ','.join(
cur.mogrify(
"(%s, %s, %s)",
(source_node, target_node, original_network_id)
).decode("utf-8")
for source_node, target_node in edge_list)
cur.execute(
f'INSERT INTO {self.schema}.original_edges '
'(source_node, target_node, original_network_id) '
f'VALUES {args_str} '
'RETURNING original_edge_id;'
)
records = [next(iter(record))
for record in cur.fetchall() if len(record) > 0]
conn.commit()
self._put_connection(conn)
return records
@exception_decorator
def get_edges_of_original_network_by_id(self, id_type, reference_id):
"""
id_type: possible values = ['original_edge_id', 'original_network_id', 'project_id']
reference_id: value of the id
"""
if id_type not in ['original_edge_id', 'original_network_id', 'project_id']:
raise ValueError('Parameter id_type is not valid!')
if id_type == 'project_id':
original_network = self.get_original_network_by_id('project_id', reference_id)
id_type = 'original_network_id'
reference_id = original_network['original_network_id']
conn = self._get_connection()
cur = self._get_dict_cursor(conn)
query = sql.SQL(
"SELECT * FROM {}.original_edges WHERE {} = {};"
).format(
*map(sql.Identifier, (self.schema, id_type)),
*map(sql.Literal, (reference_id,))
)
cur.execute(query)
records = cur.fetchall()
self._put_connection(conn)
if records is None:
raise MissingDataError('Select statement returned None.')
return records
@exception_decorator
def delete_edges_of_original_network_by_id(self, id_type, reference_id):
"""
id_type: possible values = ['original_edge_id', 'original_network_id', 'project_id']
reference_id: value of the id
"""
if id_type not in ['original_edge_id', 'original_network_id', 'project_id']:
raise ValueError('Parameter id_type is not valid!')
if id_type == 'project_id':
reference_id = self.get_original_network_by_id('project_id', reference_id)
id_type = 'original_network_id'
conn = self._get_connection()
cur = conn.cursor()
query = sql.SQL(
"DELETE FROM {}.original_edges WHERE {} = {};"
).format(
*map(sql.Identifier, (self.schema, id_type)),
*map(sql.Literal, (reference_id,))
)
cur.execute(query)
conn.commit()
self._put_connection(conn)
# ####################################
# PREDICTED_NETWORK #
# ####################################
@exception_decorator
def add_predicted_network_to_project(self, designation, project_id):
conn = self._get_connection()
cur = conn.cursor()
query = f'INSERT INTO {self.schema}.predicted_network' \
'(designation, project_id) ' \
'VALUES (%s, %s) ' \
'RETURNING predicted_network_id;'
params = (designation, project_id)
cur.execute(query, params)
predicted_network_id = next(iter(cur.fetchone()), None)
conn.commit()
self._put_connection(conn)
if predicted_network_id is not None:
self.set_predicted_network_of_project(
project_id, predicted_network_id)
return predicted_network_id
@exception_decorator
def get_predicted_network_by_id(self, id_type, reference_id):
"""
id_type: possible values = ['predicted_network_id', 'project_id']
reference_id: value of the id
"""
if id_type not in ['predicted_network_id', 'project_id']:
raise ValueError('Parameter id_type is not valid!')
conn = self._get_connection()
cur = self._get_dict_cursor(conn)
query = sql.SQL(
"SELECT * FROM {}.predicted_network WHERE {} = {};"
).format(
*map(sql.Identifier, (self.schema, id_type)),
*map(sql.Literal, (reference_id,))
)
cur.execute(query)
record = cur.fetchone()
self._put_connection(conn)
if record is None:
raise MissingDataError('Select statement returned None.')
return record
@exception_decorator
def delete_predicted_network_by_id(self, id_type, reference_id):
"""
id_type: possible values = ['predicted_network_id', 'project_id']
reference_id: value of the id
"""
if id_type not in ['predicted_network_id', 'project_id']:
raise ValueError('Parameter id_type is not valid!')
conn = self._get_connection()
cur = conn.cursor()
query = sql.SQL(
"DELETE FROM {}.predicted_network WHERE {} = {};"
).format(
*map(sql.Identifier, (self.schema, id_type)),
*map(sql.Literal, (reference_id,))
)
cur.execute(query)
conn.commit()
self._put_connection(conn)
# ####################################
# NETWORK_FEATURES #
# ####################################
@exception_decorator
def add_selected_network_feature_to_project(
self,
designation: str,
feature_type: str,
parameters: dict,
predicted_network_id: str
):
conn = self._get_connection()
cur = conn.cursor()
query = f'INSERT INTO {self.schema}.selected_network_features '\
'(designation, feature_type, parameters, predicted_network_id) ' \
'VALUES (%s, %s, %s, %s);'
params = (designation, feature_type, Json(parameters), predicted_network_id)
cur.execute(query, params)
conn.commit()
self._put_connection(conn)
@exception_decorator
def get_selected_network_features_by_id(self, id_type, reference_id):
"""
id_type: possible values = ['selected_feature_id', 'predicted_network_id']
reference_id: value of the id
"""
if id_type not in ['selected_feature_id', 'predicted_network_id']:
raise ValueError('Parameter id_type is not valid!')
conn = self._get_connection()
cur = self._get_dict_cursor(conn)
query = sql.SQL(
"SELECT * FROM {}.selected_network_features WHERE {} = {};"
).format(
*map(sql.Identifier, (self.schema, id_type)),
*map(sql.Literal, (reference_id,))
)
cur.execute(query)
records = cur.fetchall()
self._put_connection(conn)
if records is None:
raise MissingDataError('Select statement returned None.')
return records
@exception_decorator
def delete_selected_network_features_by_id(self, id_type, reference_id):
"""
id_type: possible values = ['selected_feature_id', 'predicted_network_id']
reference_id: value of the id
"""
if id_type not in ['selected_feature_id', 'predicted_network_id']:
raise ValueError('Parameter id_type is not valid!')
conn = self._get_connection()
cur = conn.cursor()
query = sql.SQL(
"DELETE FROM {}.selected_network_features WHERE {} = {};"
).format(
*map(sql.Identifier, (self.schema, id_type)),
*map(sql.Literal, (reference_id,))
)
cur.execute(query)
conn.commit()
self._put_connection(conn)
@exception_decorator
def get_standard_network_features(self):
"""
Return value is static.
"""
conn = self._get_connection()
cur = self._get_dict_cursor(conn)
query = f'SELECT * FROM {self.schema}.standard_network_features;'
cur.execute(query)
records = cur.fetchall()
self._put_connection(conn)
if records is None:
raise MissingDataError('Select statement returned None.')
return records
# ####################################
# PREDICTED_EDGES #
# ####################################
@exception_decorator
def add_edge_to_predicted_network(self, edge, edge_color, predicted_network_id):
"""
edge: tuple (source_node: uuid, target_node: uuid)
edge_color: str with color code
predicted_network_id: uuid
"""
conn = self._get_connection()
cur = conn.cursor()
query = f'INSERT INTO {self.schema}.predicted_edges ' \
'(source_node, target_node, edge_color, predicted_network_id) ' \
'VALUES (%s, %s, %s, %s) ' \
'RETURNING predicted_edge_id;'
params = (*edge, edge_color, predicted_network_id)
cur.execute(query, params)
record = cur.fetchone()
conn.commit()
self._put_connection(conn)
return next(iter(record))
@exception_decorator
def get_edges_of_predicted_network_by_id(self, id_type, reference_id):
"""
id_type: possible values = ['predicted_edge_id', 'predicted_network_id', 'project_id']
reference_id: value of the id
"""
if id_type not in ['predicted_edge_id', 'predicted_network_id', 'project_id']:
raise ValueError('Parameter id_type is not valid!')
if id_type == 'project_id':
reference_id = self.get_predicted_network_by_id('project_id', reference_id)
id_type = 'predicted_network_id'
conn = self._get_connection()
cur = self._get_dict_cursor(conn)
query = sql.SQL(
"SELECT * FROM {}.predicted_edges WHERE {} = {};"
).format(
*map(sql.Identifier, (self.schema, id_type)),
*map(sql.Literal, (reference_id,))
)
cur.execute(query)
records = cur.fetchall()
self._put_connection(conn)
if records is None:
raise MissingDataError('Select statement returned None.')
return records
@exception_decorator
def delete_edges_of_predicted_network_by_id(self, id_type, reference_id):
"""
id_type: possible values = ['predicted_edge_id', 'predicted_network_id', 'project_id']
reference_id: value of the id
"""
if id_type not in ['predicted_edge_id', 'predicted_network_id', 'project_id']:
raise ValueError('Parameter id_type is not valid!')
if id_type == 'project_id':
reference_id = self.get_predicted_network_by_id('project_id', reference_id)
id_type = 'predicted_network_id'
conn = self._get_connection()
cur = conn.cursor()
query = sql.SQL(
"DELETE FROM {}.predicted_edges WHERE {} = {};"
).format(
*map(sql.Identifier, (self.schema, id_type)),
*map(sql.Literal, (reference_id,))
)
cur.execute(query)
conn.commit()
self._put_connection(conn)
# ####################################
# PREDICTED_EDGE_COMPONENTS #
# ####################################
@exception_decorator
def add_component_to_predicted_edge(self, edge, predicted, prediction_score, predicted_edge_id):
"""
edge: tuple (source_node: uuid, target_node: uuid)
predicted: bool
predicted_edge_id: uuid
"""
conn = self._get_connection()
cur = conn.cursor()
query = f'INSERT INTO {self.schema}.predicted_edge_components ' \
'(source, target, predicted, prediction_score, predicted_edge_id) ' \
'VALUES (%s, %s, %s, %s, %s) ' \
'RETURNING edge_component_id;'
params = (*edge, predicted, prediction_score, predicted_edge_id)
cur.execute(query, params)
record = cur.fetchone()
conn.commit()
self._put_connection(conn)
return next(iter(record))
@exception_decorator
def get_components_of_predicted_edge_by_id(self, id_type, reference_id):
"""
id_type: possible values = ['edge_component_id', 'predicted_edge_id']
reference_id: value of the id
"""
if id_type not in ['edge_component_id', 'predicted_edge_id']:
raise ValueError('Parameter id_type is not valid!')
conn = self._get_connection()
cur = self._get_dict_cursor(conn)
query = sql.SQL(
"SELECT * FROM {}.predicted_edge_components WHERE {} = {};"
).format(
*map(sql.Identifier, (self.schema, id_type)),
*map(sql.Literal, (reference_id,))
)
cur.execute(query)
records = cur.fetchall()
self._put_connection(conn)
if records is None:
raise MissingDataError('Select statement returned None.')
return records
@exception_decorator
def delete_component_of_predicted_edge_by_id(self, id_type, reference_id):
"""
id_type: possible values = ['edge_component_id', 'predicted_edge_id']
reference_id: value of the id
"""
if id_type not in ['edge_component_id', 'predicted_edge_id']:
raise ValueError('Parameter id_type is not valid!')
conn = self._get_connection()
cur = conn.cursor()
query = sql.SQL(
"DELETE FROM {}.predicted_edge_components WHERE {} = {};"
).format(
*map(sql.Identifier, (self.schema, id_type)),
*map(sql.Literal, (reference_id,))
)
cur.execute(query)
conn.commit()
self._put_connection(conn)
# ####################################
# APPLIED_METHODS #
# ####################################
@exception_decorator
def add_applied_methods_to_predicted_edge_component(self, method_list, edge_component_id):
"""
method_list: [(method_designation: str, method_components: dict)]
edge_component_id: uuid
"""
conn = self._get_connection()
cur = conn.cursor()
args_str = ','.join(
cur.mogrify(
"(%s, %s, %s)",
(method_designation, method_components, edge_component_id)
).decode("utf-8")
for method_designation, method_components in method_list)
cur.execute(
f'INSERT INTO {self.schema}.applied_methods '
'(method_designation, method_components, edge_component_id) '
f'VALUES {args_str} '
'RETURNING applied_method_id;'
)
records = [next(iter(record))
for record in cur.fetchall() if len(record) > 0]
conn.commit()
self._put_connection(conn)
return records
@exception_decorator
def get_applied_methods_of_predicted_edge_component_by_id(self, id_type, reference_id):
"""
id_type: possible values = ['applied_method_id', 'edge_component_id']
reference_id: value of the id
"""
if id_type not in ['applied_method_id', 'edge_component_id']:
raise ValueError('Parameter id_type is not valid!')
conn = self._get_connection()
cur = self._get_dict_cursor(conn)
query = sql.SQL(
"SELECT * FROM {}.applied_methods WHERE {} = {};"
).format(
*map(sql.Identifier, (self.schema, id_type)),
*map(sql.Literal, (reference_id,))
)
cur.execute(query)
records = cur.fetchall()
self._put_connection(conn)
if records is None:
raise MissingDataError('Select statement returned None.')
return records
@exception_decorator
def delete_applied_methods_of_predicted_edge_component_by_id(self, id_type, reference_id):
"""
id_type: possible values = ['applied_method_id', 'edge_component_id']
reference_id: value of the id
"""
if id_type not in ['applied_method_id', 'edge_component_id']:
raise ValueError('Parameter id_type is not valid!')
conn = self._get_connection()
cur = conn.cursor()
query = sql.SQL(
"DELETE FROM {}.applied_methods WHERE {} = {};"
).format(
*map(sql.Identifier, (self.schema, id_type)),
*map(sql.Literal, (reference_id,))
)
cur.execute(query)
conn.commit()
self._put_connection(conn)
# ####################################
# LINKPREDICTION_STATUS #
# ####################################
@exception_decorator
def add_linkprediction_status(
self,
thread_id: int,
current_step: int,
max_steps: int,
process_step: str,
status_value: str,
id_type: str,
reference_id: str
):
"""
thread_id: Id of the thread that is calling this method
current_step: Current process step as number
max_steps: Count of all steps
process_step: Description of the current process step
status_value: Status value of the current process step
id_type: Possible values = ['project_id', 'predicted_network_id']
reference_id: Value of the Id
"""
if id_type not in ['project_id', 'predicted_network_id']:
raise ValueError('Parameter id_type is not valid!')
if id_type == 'project_id':
predicted_network = self.get_predicted_network_by_id(id_type, reference_id)
reference_id = predicted_network['predicted_network_id']
id_type = 'predicted_network_id'
conn = self._get_connection()
cur = conn.cursor()
query = f'INSERT INTO {self.schema}.prediction_status ' \
'(thread_id, log_timestamp, current_step, max_steps, process_step, ' \
'status_value, predicted_network_id) ' \
'VALUES (%s, %s, %s, %s, %s, %s, %s) ' \
'RETURNING status_id;'
params = (thread_id, datetime.now(), current_step, max_steps,
process_step, status_value, reference_id)
cur.execute(query, params)
record = cur.fetchone()
conn.commit()
self._put_connection(conn)
return next(iter(record), None)
@exception_decorator
def get_last_linkprediction_status_by_id(self, id_type, reference_id):
"""
id_type: possible values = ['status_id', 'project_id', 'predicted_network_id']
reference_id: value of the id
"""
if id_type not in ['status_id', 'project_id', 'predicted_network_id']:
raise ValueError('Parameter id_type is not valid!')
if id_type == 'project_id':
predicted_network = self.get_predicted_network_by_id(id_type, reference_id)
reference_id = predicted_network['predicted_network_id']
id_type = 'predicted_network_id'
conn = self._get_connection()
cur = self._get_dict_cursor(conn)
query = sql.SQL(
'SELECT * FROM {}.prediction_status WHERE {} = {} ' \
'ORDER BY log_timestamp DESC LIMIT 1;'
).format(
*map(sql.Identifier, (self.schema, id_type)),
*map(sql.Literal, (reference_id,))
)
cur.execute(query)
records = cur.fetchone()
self._put_connection(conn)
if records is None:
raise MissingDataError('Select statement returned None.')
return records
@exception_decorator
def delete_linkprediction_status_by_id(self, id_type, reference_id):
"""
id_type: possible values = ['status_id', 'project_id', 'predicted_network_id']
reference_id: value of the id
"""
if id_type not in ['status_id', 'project_id', 'predicted_network_id']:
raise ValueError('Parameter id_type is not valid!')
conn = self._get_connection()
cur = conn.cursor()
query = sql.SQL(
"DELETE FROM {}.prediction_status WHERE {} = {};"
).format(
*map(sql.Identifier, (self.schema, id_type)),
*map(sql.Literal, (reference_id,))
)
cur.execute(query)
conn.commit()
self._put_connection(conn)
# ####################################
# EVALUATION_RESULTS #
# ####################################
@exception_decorator
def add_or_update_evaluation_result(self, project_id, result_data):
"""
project_id: project id as uuid (str)
result_data: result data either as JSON string or as python dict
"""
conn = self._get_connection()
cur = conn.cursor()
if isinstance(result_data, dict):
result_data = Json(result_data)
if self._check_if_row_exists('evaluation_results', 'project_id', project_id) is True:
query = f'UPDATE {self.schema}.evaluation_results ' \
'SET result_data = %s ' \
'WHERE project_id = %s ' \
'RETURNING result_id;'
params = (result_data, project_id)
else:
query = f'INSERT INTO {self.schema}.evaluation_results '\
'(project_id, result_data) ' \
'VALUES (%s, %s) ' \
'RETURNING result_id;'
params = (project_id, result_data)
cur.execute(query, params)
record = cur.fetchone()
conn.commit()
self._put_connection(conn)
return next(iter(record), None)
@exception_decorator
def get_evaluation_result_by_id(self, id_type, reference_id: str):
"""
id_type: possible values = ['result_id', 'project_id']
reference_id: value of the id
"""
if id_type not in ['result_id', 'project_id']:
raise ValueError('Parameter id_type is not valid!')
conn = self._get_connection()
cur = self._get_dict_cursor(conn)
query = sql.SQL(
"SELECT * FROM {}.evaluation_results WHERE {} = {};"
).format(
*map(sql.Identifier, (self.schema, id_type)),
*map(sql.Literal, (reference_id,))
)
cur.execute(query)
record = cur.fetchone()
self._put_connection(conn)
if record is None:
raise MissingDataError('Select statement returned None.')
return record
@exception_decorator
def delete_evaluation_result_by_id(self, id_type, reference_id):
"""
id_type: possible values = ['result_id', 'project_id']
reference_id: value of the id
"""
if id_type not in ['result_id', 'project_id']:
raise ValueError('Parameter id_type is not valid!')
conn = self._get_connection()
cur = conn.cursor()
query = sql.SQL(
"DELETE FROM {}.evaluation_results WHERE {} = {};"
).format(
*map(sql.Identifier, (self.schema, id_type)),
*map(sql.Literal, (reference_id,))
)
cur.execute(query)
conn.commit()
self._put_connection(conn)
# ####################################
# NODES #
# ####################################
@exception_decorator
def add_node(self, node_network_id, designation, original_network_id):
conn = self._get_connection()
cur = conn.cursor()
query = f'INSERT INTO {self.schema}.nodes '\
'(node_network_id, designation, original_network_id) ' \
'VALUES (%s, %s, %s) ' \
'RETURNING node_id;'
params = (node_network_id, designation, original_network_id)
cur.execute(query, params)
record = cur.fetchone()
conn.commit()
self._put_connection(conn)
return next(iter(record), None)
@exception_decorator
def add_nodes(self, node_list: list, original_network_id: str, predicted_network_id: str):
"""
node_list: [(node_network_id: int, designation: str)]
original_network_id: uuid
predicted_network_id: uuid
"""
conn = self._get_connection()
cur = conn.cursor()
args_str = ','.join(
cur.mogrify(
"(%s, %s, %s, %s)",
(node_network_id, designation, original_network_id, predicted_network_id)
).decode("utf-8")
for node_network_id, designation in node_list)
cur.execute(
f'INSERT INTO {self.schema}.nodes '
'(node_network_id, designation, original_network_id, predicted_network_id) '
f'VALUES {args_str} '
'RETURNING node_network_id, node_id;'
)
records = cur.fetchall()
conn.commit()
self._put_connection(conn)
return records
@exception_decorator
def get_nodes_by_id(self, id_type, reference_id: str):
"""
id_type: possible values = ['node_id', 'original_network_id', 'predicted_network_id', 'project_id']
reference_id: value of the id
"""
if id_type not in ['node_id', 'original_network_id', 'predicted_network_id', 'project_id']:
raise ValueError('Parameter id_type is not valid!')
if id_type == 'project_id':
original_network = self.get_original_network_by_id(id_type, reference_id)
id_type = 'original_network_id'
reference_id = original_network['original_network_id']
conn = self._get_connection()
cur = self._get_dict_cursor(conn)
query = sql.SQL(
"SELECT * FROM {}.nodes WHERE {} = {};"
).format(
*map(sql.Identifier, (self.schema, id_type)),
*map(sql.Literal, (reference_id,))
)
cur.execute(query)
records = cur.fetchall()
self._put_connection(conn)
if records is None:
raise MissingDataError('Select statement returned None.')
return records
@exception_decorator
def delete_nodes_by_id(self, id_type, reference_id):
"""
id_type: possible values = ['node_id', 'original_network_id']
reference_id: value of the id
"""
if id_type not in ['node_id', 'original_network_id']:
raise ValueError('Parameter id_type is not valid!')
conn = self._get_connection()
cur = conn.cursor()
query = sql.SQL(
"DELETE FROM {}.nodes WHERE {} = {};"
).format(
*map(sql.Identifier, (self.schema, id_type)),
*map(sql.Literal, (reference_id,))
)
cur.execute(query)
conn.commit()
self._put_connection(conn)
@exception_decorator
def add_node_attributes(self, attribute_list, node_id):
"""
attribute_list: [(attribute_name: str, attribute_value: str)]
node_id: uuid
"""
conn = self._get_connection()
cur = conn.cursor()
args_str = ','.join(
cur.mogrify(
"(%s, %s, %s)",
(attribute_name, attribute_value, node_id)
).decode("utf-8")
for attribute_name, attribute_value in attribute_list)
cur.execute(
f'INSERT INTO {self.schema}.node_attributes '
'(attribute_name, attribute_value, node_id) '
f'VALUES {args_str} '
'RETURNING node_attribute_id;'
)
records = [next(iter(record)) for record in cur.fetchall() if len(record) > 0]
conn.commit()
self._put_connection(conn)
return records
@exception_decorator
def get_node_attributes_by_id(self, id_type: str, reference_id: str):
"""
id_type: possible values = ['node_id', 'node_attribute_id']
reference_id: value of the id
"""
if id_type not in ['node_id', 'node_attribute_id']:
raise ValueError('Parameter id_type is not valid!')
conn = self._get_connection()
cur = self._get_dict_cursor(conn)
query = sql.SQL(
"SELECT * FROM {}.node_attributes WHERE {} = {};"
).format(
*map(sql.Identifier, (self.schema, id_type)),
*map(sql.Literal, (reference_id,))
)
cur.execute(query)
records = cur.fetchall()
self._put_connection(conn)
if records is None:
raise MissingDataError('Select statement returned None.')
return records
@exception_decorator
def get_distinct_node_attributes_by_id(self, id_type: str, reference_id: str):
"""
id_type: possible values = ['original_network_id', 'predicted_network_id', 'project_id']
reference_id: value of the id
"""
if id_type not in ['original_network_id', 'predicted_network_id', 'project_id']:
raise ValueError('Parameter id_type is not valid!')
if id_type == 'project_id':
original_network = self.get_original_network_by_id(id_type, reference_id)
reference_id = original_network['original_network_id']
id_type = 'original_network_id'
conn = self._get_connection()
cur = conn.cursor()
query = sql.SQL(
f'SELECT attribute_name FROM {self.schema}.node_attributes attr '
f'INNER JOIN {self.schema}.nodes nodes '
'ON nodes.node_id=attr.node_id '
'WHERE nodes.{} = {} '
'GROUP BY attribute_name;'
).format(
*map(sql.Identifier, (id_type,)),
*map(sql.Literal, (reference_id,))
)
cur.execute(query)
records = cur.fetchall()
self._put_connection(conn)
if records is None:
raise MissingDataError('Select statement returned None.')
return [next(iter(record)) for record in records]
@exception_decorator
def delete_node_attributes_by_id(self, id_type, reference_id):
"""
id_type: possible values = ['node_id', 'node_attribute_id']
reference_id: value of the id
"""
if id_type not in ['node_id', 'node_attribute_id']:
raise ValueError('Parameter id_type is not valid!')
conn = self._get_connection()
cur = conn.cursor()
query = sql.SQL(
"DELETE FROM {}.node_attributes WHERE {} = {};"
).format(
*map(sql.Identifier, (self.schema, id_type)),
*map(sql.Literal, (reference_id,))
)
cur.execute(query)
conn.commit()
self._put_connection(conn)
# ####################################
# GENERIC_METHODS #
# ####################################
@exception_decorator
def _check_if_row_exists(self, table: str, column: str, value: str) -> bool:
"""
table: name of the database table in the stored schema
column: name of the column to look for
value: value of the column
"""
conn = self._get_connection()
cur = conn.cursor()
query = sql.SQL(
"SELECT EXISTS(SELECT 1 FROM {}.{} WHERE {} = {});"
).format(
*map(sql.Identifier, (self.schema, table, column)),
*map(sql.Literal, (value,))
)
cur.execute(query)
record = cur.fetchone()
self._put_connection(conn)
return next(iter(record))
| [
"logging.getLogger",
"psycopg2.sql.SQL",
"psycopg2.extensions.AsIs",
"psycopg2.pool.ThreadedConnectionPool",
"datetime.datetime.now",
"psycopg2.extras.Json",
"psycopg2.extensions.register_adapter"
] | [((415, 467), 'psycopg2.extensions.register_adapter', 'register_adapter', (['numpy.float64', 'adapt_numpy_float64'], {}), '(numpy.float64, adapt_numpy_float64)\n', (431, 467), False, 'from psycopg2.extensions import AsIs, register_adapter\n'), ((468, 516), 'psycopg2.extensions.register_adapter', 'register_adapter', (['numpy.int64', 'adapt_numpy_int64'], {}), '(numpy.int64, adapt_numpy_int64)\n', (484, 516), False, 'from psycopg2.extensions import AsIs, register_adapter\n'), ((326, 345), 'psycopg2.extensions.AsIs', 'AsIs', (['numpy_float64'], {}), '(numpy_float64)\n', (330, 345), False, 'from psycopg2.extensions import AsIs, register_adapter\n'), ((395, 412), 'psycopg2.extensions.AsIs', 'AsIs', (['numpy_int64'], {}), '(numpy_int64)\n', (399, 412), False, 'from psycopg2.extensions import AsIs, register_adapter\n'), ((1463, 1502), 'logging.getLogger', 'logging.getLogger', (['"""database_connector"""'], {}), "('database_connector')\n", (1480, 1502), False, 'import logging\n'), ((1540, 1666), 'psycopg2.pool.ThreadedConnectionPool', 'ThreadedConnectionPool', (['(1)', '(10)'], {'user': '"""postgres"""', 'password': '"""<PASSWORD>"""', 'host': '"""127.0.0.1"""', 'port': '"""5432"""', 'database': '"""postgres"""'}), "(1, 10, user='postgres', password='<PASSWORD>', host=\n '127.0.0.1', port='5432', database='postgres')\n", (1562, 1666), False, 'from psycopg2.pool import ThreadedConnectionPool\n'), ((16289, 16305), 'psycopg2.extras.Json', 'Json', (['parameters'], {}), '(parameters)\n', (16293, 16305), False, 'from psycopg2.extras import Json, RealDictCursor\n'), ((28541, 28555), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (28553, 28555), False, 'from datetime import datetime\n'), ((31272, 31289), 'psycopg2.extras.Json', 'Json', (['result_data'], {}), '(result_data)\n', (31276, 31289), False, 'from psycopg2.extras import Json, RealDictCursor\n'), ((2832, 2884), 'psycopg2.sql.SQL', 'sql.SQL', (['"""SELECT * FROM {}.berufenet WHERE {} = {};"""'], {}), "('SELECT * FROM {}.berufenet WHERE {} = {};')\n", (2839, 2884), False, 'from psycopg2 import sql\n'), ((3711, 3870), 'psycopg2.sql.SQL', 'sql.SQL', (['"""SELECT * FROM {}.berufenet WHERE field_of_activity = {} and subject_area = {} and {} = {};"""'], {}), "(\n 'SELECT * FROM {}.berufenet WHERE field_of_activity = {} and subject_area = {} and {} = {};'\n )\n", (3718, 3870), False, 'from psycopg2 import sql\n'), ((5365, 5416), 'psycopg2.sql.SQL', 'sql.SQL', (['"""SELECT * FROM {}.projects WHERE {} = {};"""'], {}), "('SELECT * FROM {}.projects WHERE {} = {};')\n", (5372, 5416), False, 'from psycopg2 import sql\n'), ((8857, 8916), 'psycopg2.sql.SQL', 'sql.SQL', (['"""SELECT * FROM {}.original_network WHERE {} = {};"""'], {}), "('SELECT * FROM {}.original_network WHERE {} = {};')\n", (8864, 8916), False, 'from psycopg2 import sql\n'), ((9715, 9772), 'psycopg2.sql.SQL', 'sql.SQL', (['"""DELETE FROM {}.original_network WHERE {} = {};"""'], {}), "('DELETE FROM {}.original_network WHERE {} = {};')\n", (9722, 9772), False, 'from psycopg2 import sql\n'), ((11803, 11860), 'psycopg2.sql.SQL', 'sql.SQL', (['"""SELECT * FROM {}.original_edges WHERE {} = {};"""'], {}), "('SELECT * FROM {}.original_edges WHERE {} = {};')\n", (11810, 11860), False, 'from psycopg2 import sql\n'), ((12878, 12933), 'psycopg2.sql.SQL', 'sql.SQL', (['"""DELETE FROM {}.original_edges WHERE {} = {};"""'], {}), "('DELETE FROM {}.original_edges WHERE {} = {};')\n", (12885, 12933), False, 'from psycopg2 import sql\n'), ((14491, 14551), 'psycopg2.sql.SQL', 'sql.SQL', (['"""SELECT * FROM {}.predicted_network WHERE {} = {};"""'], {}), "('SELECT * FROM {}.predicted_network WHERE {} = {};')\n", (14498, 14551), False, 'from psycopg2 import sql\n'), ((15353, 15411), 'psycopg2.sql.SQL', 'sql.SQL', (['"""DELETE FROM {}.predicted_network WHERE {} = {};"""'], {}), "('DELETE FROM {}.predicted_network WHERE {} = {};')\n", (15360, 15411), False, 'from psycopg2 import sql\n'), ((16901, 16969), 'psycopg2.sql.SQL', 'sql.SQL', (['"""SELECT * FROM {}.selected_network_features WHERE {} = {};"""'], {}), "('SELECT * FROM {}.selected_network_features WHERE {} = {};')\n", (16908, 16969), False, 'from psycopg2 import sql\n'), ((17800, 17866), 'psycopg2.sql.SQL', 'sql.SQL', (['"""DELETE FROM {}.selected_network_features WHERE {} = {};"""'], {}), "('DELETE FROM {}.selected_network_features WHERE {} = {};')\n", (17807, 17866), False, 'from psycopg2 import sql\n'), ((20186, 20244), 'psycopg2.sql.SQL', 'sql.SQL', (['"""SELECT * FROM {}.predicted_edges WHERE {} = {};"""'], {}), "('SELECT * FROM {}.predicted_edges WHERE {} = {};')\n", (20193, 20244), False, 'from psycopg2 import sql\n'), ((21269, 21325), 'psycopg2.sql.SQL', 'sql.SQL', (['"""DELETE FROM {}.predicted_edges WHERE {} = {};"""'], {}), "('DELETE FROM {}.predicted_edges WHERE {} = {};')\n", (21276, 21325), False, 'from psycopg2 import sql\n'), ((22977, 23045), 'psycopg2.sql.SQL', 'sql.SQL', (['"""SELECT * FROM {}.predicted_edge_components WHERE {} = {};"""'], {}), "('SELECT * FROM {}.predicted_edge_components WHERE {} = {};')\n", (22984, 23045), False, 'from psycopg2 import sql\n'), ((23868, 23934), 'psycopg2.sql.SQL', 'sql.SQL', (['"""DELETE FROM {}.predicted_edge_components WHERE {} = {};"""'], {}), "('DELETE FROM {}.predicted_edge_components WHERE {} = {};')\n", (23875, 23934), False, 'from psycopg2 import sql\n'), ((25785, 25843), 'psycopg2.sql.SQL', 'sql.SQL', (['"""SELECT * FROM {}.applied_methods WHERE {} = {};"""'], {}), "('SELECT * FROM {}.applied_methods WHERE {} = {};')\n", (25792, 25843), False, 'from psycopg2 import sql\n'), ((26682, 26738), 'psycopg2.sql.SQL', 'sql.SQL', (['"""DELETE FROM {}.applied_methods WHERE {} = {};"""'], {}), "('DELETE FROM {}.applied_methods WHERE {} = {};')\n", (26689, 26738), False, 'from psycopg2 import sql\n'), ((29533, 29639), 'psycopg2.sql.SQL', 'sql.SQL', (['"""SELECT * FROM {}.prediction_status WHERE {} = {} ORDER BY log_timestamp DESC LIMIT 1;"""'], {}), "(\n 'SELECT * FROM {}.prediction_status WHERE {} = {} ORDER BY log_timestamp DESC LIMIT 1;'\n )\n", (29540, 29639), False, 'from psycopg2 import sql\n'), ((30481, 30539), 'psycopg2.sql.SQL', 'sql.SQL', (['"""DELETE FROM {}.prediction_status WHERE {} = {};"""'], {}), "('DELETE FROM {}.prediction_status WHERE {} = {};')\n", (30488, 30539), False, 'from psycopg2 import sql\n'), ((32500, 32561), 'psycopg2.sql.SQL', 'sql.SQL', (['"""SELECT * FROM {}.evaluation_results WHERE {} = {};"""'], {}), "('SELECT * FROM {}.evaluation_results WHERE {} = {};')\n", (32507, 32561), False, 'from psycopg2 import sql\n'), ((33341, 33400), 'psycopg2.sql.SQL', 'sql.SQL', (['"""DELETE FROM {}.evaluation_results WHERE {} = {};"""'], {}), "('DELETE FROM {}.evaluation_results WHERE {} = {};')\n", (33348, 33400), False, 'from psycopg2 import sql\n'), ((36087, 36135), 'psycopg2.sql.SQL', 'sql.SQL', (['"""SELECT * FROM {}.nodes WHERE {} = {};"""'], {}), "('SELECT * FROM {}.nodes WHERE {} = {};')\n", (36094, 36135), False, 'from psycopg2 import sql\n'), ((36920, 36966), 'psycopg2.sql.SQL', 'sql.SQL', (['"""DELETE FROM {}.nodes WHERE {} = {};"""'], {}), "('DELETE FROM {}.nodes WHERE {} = {};')\n", (36927, 36966), False, 'from psycopg2 import sql\n'), ((38547, 38605), 'psycopg2.sql.SQL', 'sql.SQL', (['"""SELECT * FROM {}.node_attributes WHERE {} = {};"""'], {}), "('SELECT * FROM {}.node_attributes WHERE {} = {};')\n", (38554, 38605), False, 'from psycopg2 import sql\n'), ((39703, 39902), 'psycopg2.sql.SQL', 'sql.SQL', (['f"""SELECT attribute_name FROM {self.schema}.node_attributes attr INNER JOIN {self.schema}.nodes nodes ON nodes.node_id=attr.node_id WHERE nodes.{{}} = {{}} GROUP BY attribute_name;"""'], {}), "(\n f'SELECT attribute_name FROM {self.schema}.node_attributes attr INNER JOIN {self.schema}.nodes nodes ON nodes.node_id=attr.node_id WHERE nodes.{{}} = {{}} GROUP BY attribute_name;'\n )\n", (39710, 39902), False, 'from psycopg2 import sql\n'), ((40763, 40819), 'psycopg2.sql.SQL', 'sql.SQL', (['"""DELETE FROM {}.node_attributes WHERE {} = {};"""'], {}), "('DELETE FROM {}.node_attributes WHERE {} = {};')\n", (40770, 40819), False, 'from psycopg2 import sql\n'), ((41536, 41596), 'psycopg2.sql.SQL', 'sql.SQL', (['"""SELECT EXISTS(SELECT 1 FROM {}.{} WHERE {} = {});"""'], {}), "('SELECT EXISTS(SELECT 1 FROM {}.{} WHERE {} = {});')\n", (41543, 41596), False, 'from psycopg2 import sql\n'), ((887, 926), 'logging.getLogger', 'logging.getLogger', (['"""database_connector"""'], {}), "('database_connector')\n", (904, 926), False, 'import logging\n'), ((1853, 1892), 'logging.getLogger', 'logging.getLogger', (['"""database_connector"""'], {}), "('database_connector')\n", (1870, 1892), False, 'import logging\n')] |
import aioredis
import pytest
from ddtrace import Pin
from ddtrace import tracer
from ddtrace.contrib.aioredis.patch import aioredis_version
from ddtrace.contrib.aioredis.patch import patch
from ddtrace.contrib.aioredis.patch import unpatch
from ddtrace.vendor.wrapt import ObjectProxy
from tests.utils import override_config
from ..config import REDIS_CONFIG
@pytest.mark.asyncio
@pytest.fixture
async def redis_client():
r = await get_redis_instance()
yield r
def get_redis_instance():
if aioredis_version >= (2, 0):
return aioredis.from_url("redis://127.0.0.1:%s" % REDIS_CONFIG["port"])
return aioredis.create_redis(("localhost", REDIS_CONFIG["port"]))
@pytest.mark.asyncio
@pytest.fixture(autouse=True)
async def traced_aioredis(redis_client):
await redis_client.flushall()
patch()
try:
yield
finally:
unpatch()
await redis_client.flushall()
def test_patching():
"""
When patching aioredis library
We wrap the correct methods
When unpatching aioredis library
We unwrap the correct methods
"""
if aioredis_version >= (2, 0):
assert isinstance(aioredis.client.Redis.execute_command, ObjectProxy)
assert isinstance(aioredis.client.Redis.pipeline, ObjectProxy)
assert isinstance(aioredis.client.Pipeline.pipeline, ObjectProxy)
unpatch()
assert not isinstance(aioredis.client.Redis.execute_command, ObjectProxy)
assert not isinstance(aioredis.client.Redis.pipeline, ObjectProxy)
assert not isinstance(aioredis.client.Pipeline.pipeline, ObjectProxy)
else:
assert isinstance(aioredis.Redis.execute, ObjectProxy)
unpatch()
assert not isinstance(aioredis.Redis.execute, ObjectProxy)
@pytest.mark.asyncio
@pytest.mark.snapshot
async def test_basic_request(redis_client):
val = await redis_client.get("cheese")
assert val is None
@pytest.mark.asyncio
@pytest.mark.snapshot
async def test_long_command(redis_client):
length = 1000
val_list = await redis_client.mget(*range(length))
assert len(val_list) == length
for val in val_list:
assert val is None
@pytest.mark.asyncio
@pytest.mark.snapshot
async def test_override_service_name(redis_client):
with override_config("aioredis", dict(service_name="myaioredis")):
val = await redis_client.get("cheese")
assert val is None
await redis_client.set("cheese", "my-cheese")
val = await redis_client.get("cheese")
if isinstance(val, bytes):
val = val.decode()
assert val == "my-cheese"
@pytest.mark.asyncio
@pytest.mark.snapshot
async def test_pin(redis_client):
Pin.override(redis_client, service="my-aioredis")
val = await redis_client.get("cheese")
assert val is None
@pytest.mark.asyncio
@pytest.mark.snapshot(variants={"": aioredis_version >= (2, 0), "13": aioredis_version < (2, 0)})
async def test_pipeline_traced(redis_client):
if aioredis_version >= (2, 0):
p = await redis_client.pipeline(transaction=False)
await p.set("blah", "boo")
await p.set("foo", "bar")
await p.get("blah")
await p.get("foo")
else:
p = redis_client.pipeline()
p.set("blah", "boo")
p.set("foo", "bar")
p.get("blah")
p.get("foo")
response_list = await p.execute()
assert response_list[0] is True # response from redis.set is OK if successfully pushed
assert response_list[1] is True
assert (
response_list[2].decode() == "boo"
) # response from hset is 'Integer reply: The number of fields that were added.'
assert response_list[3].decode() == "bar"
@pytest.mark.asyncio
@pytest.mark.snapshot(variants={"": aioredis_version >= (2, 0), "13": aioredis_version < (2, 0)})
async def test_two_traced_pipelines(redis_client):
with tracer.trace("web-request", service="test"):
if aioredis_version >= (2, 0):
p1 = await redis_client.pipeline(transaction=False)
p2 = await redis_client.pipeline(transaction=False)
await p1.set("blah", "boo")
await p2.set("foo", "bar")
await p1.get("blah")
await p2.get("foo")
else:
p1 = redis_client.pipeline()
p2 = redis_client.pipeline()
p1.set("blah", "boo")
p2.set("foo", "bar")
p1.get("blah")
p2.get("foo")
response_list1 = await p1.execute()
response_list2 = await p2.execute()
assert response_list1[0] is True # response from redis.set is OK if successfully pushed
assert response_list2[0] is True
assert (
response_list1[1].decode() == "boo"
) # response from hset is 'Integer reply: The number of fields that were added.'
assert response_list2[1].decode() == "bar"
| [
"ddtrace.contrib.aioredis.patch.unpatch",
"pytest.mark.snapshot",
"ddtrace.Pin.override",
"ddtrace.tracer.trace",
"ddtrace.contrib.aioredis.patch.patch",
"aioredis.from_url",
"pytest.fixture",
"aioredis.create_redis"
] | [((711, 739), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (725, 739), False, 'import pytest\n'), ((2839, 2940), 'pytest.mark.snapshot', 'pytest.mark.snapshot', ([], {'variants': "{'': aioredis_version >= (2, 0), '13': aioredis_version < (2, 0)}"}), "(variants={'': aioredis_version >= (2, 0), '13': \n aioredis_version < (2, 0)})\n", (2859, 2940), False, 'import pytest\n'), ((3725, 3826), 'pytest.mark.snapshot', 'pytest.mark.snapshot', ([], {'variants': "{'': aioredis_version >= (2, 0), '13': aioredis_version < (2, 0)}"}), "(variants={'': aioredis_version >= (2, 0), '13': \n aioredis_version < (2, 0)})\n", (3745, 3826), False, 'import pytest\n'), ((628, 686), 'aioredis.create_redis', 'aioredis.create_redis', (["('localhost', REDIS_CONFIG['port'])"], {}), "(('localhost', REDIS_CONFIG['port']))\n", (649, 686), False, 'import aioredis\n'), ((820, 827), 'ddtrace.contrib.aioredis.patch.patch', 'patch', ([], {}), '()\n', (825, 827), False, 'from ddtrace.contrib.aioredis.patch import patch\n'), ((2699, 2748), 'ddtrace.Pin.override', 'Pin.override', (['redis_client'], {'service': '"""my-aioredis"""'}), "(redis_client, service='my-aioredis')\n", (2711, 2748), False, 'from ddtrace import Pin\n'), ((552, 616), 'aioredis.from_url', 'aioredis.from_url', (["('redis://127.0.0.1:%s' % REDIS_CONFIG['port'])"], {}), "('redis://127.0.0.1:%s' % REDIS_CONFIG['port'])\n", (569, 616), False, 'import aioredis\n'), ((872, 881), 'ddtrace.contrib.aioredis.patch.unpatch', 'unpatch', ([], {}), '()\n', (879, 881), False, 'from ddtrace.contrib.aioredis.patch import unpatch\n'), ((1367, 1376), 'ddtrace.contrib.aioredis.patch.unpatch', 'unpatch', ([], {}), '()\n', (1374, 1376), False, 'from ddtrace.contrib.aioredis.patch import unpatch\n'), ((1693, 1702), 'ddtrace.contrib.aioredis.patch.unpatch', 'unpatch', ([], {}), '()\n', (1700, 1702), False, 'from ddtrace.contrib.aioredis.patch import unpatch\n'), ((3883, 3926), 'ddtrace.tracer.trace', 'tracer.trace', (['"""web-request"""'], {'service': '"""test"""'}), "('web-request', service='test')\n", (3895, 3926), False, 'from ddtrace import tracer\n')] |
from django.contrib import admin
from .models import Comment, Review
admin.site.register(Comment)
admin.site.register(Review) | [
"django.contrib.admin.site.register"
] | [((71, 99), 'django.contrib.admin.site.register', 'admin.site.register', (['Comment'], {}), '(Comment)\n', (90, 99), False, 'from django.contrib import admin\n'), ((100, 127), 'django.contrib.admin.site.register', 'admin.site.register', (['Review'], {}), '(Review)\n', (119, 127), False, 'from django.contrib import admin\n')] |
#!/usr/bin/python
import broadlink_ac_mqtt.classes.broadlink.ac_db as broadlink
import os
import time
import sys
import logging
import argparse
import yaml
import paho.mqtt.client as mqtt
import tempfile
import json
import traceback
sys.path.insert(
1, os.path.join(os.path.dirname(os.path.realpath(__file__)), "classes", "broadlink")
)
logger = logging.getLogger(__name__)
config = {}
device_objects = {}
class AcToMqtt:
previous_status = {}
last_update = {}
def __init__(self, config):
self.config = config
""
def test(self, config):
for device in config["devices"]:
device_bla = broadlink.gendevice(
devtype=0xFFFFFFF,
host=(device["ip"], device["port"]),
mac=bytearray.fromhex(device["mac"]),
name=device["name"],
)
status = device_bla.set_temperature(32)
# print status
def discover(self):
# Go discovery
discovered_devices = broadlink.discover(
timeout=5, bind_to_ip=self.config["bind_to_ip"]
)
devices = {}
if discovered_devices == None:
error_msg = "No Devices Found, make sure you on the same network segment"
logger.debug(error_msg)
# print "nothing found"
sys.exit()
# Make sure correct device id
for device in discovered_devices:
logging.debug(f"device: {device} device.devtype: {hex(device.devtype)}")
if device.devtype == 0x4E2A:
devices[device.status["macaddress"]] = device
logging.debug(f"Returning devices: {devices}")
return devices
def make_device_objects(self, device_list=None):
device_objects = {}
if device_list == [] or device_list == None:
error_msg = " Cannot make device objects, empty list given"
logger.error(error_msg)
sys.exit()
for device in device_list:
device_objects[device["mac"]] = broadlink.gendevice(
devtype=0x4E2A,
host=(device["ip"], device["port"]),
mac=bytearray.fromhex(device["mac"]),
name=device["name"],
update_interval=self.config["update_interval"],
)
return device_objects
def stop(self):
try:
self._mqtt.disconnect()
except:
""""""
def start(self, config, devices=None):
self.device_objects = devices
self.config = config
# If there no devices so throw error
if devices == [] or devices == None:
print("No devices defined")
logger.error(
"No Devices defined, either enable discovery or add them to config"
)
return
else:
logger.debug("Following devices configured %s" % repr(devices))
# we are alive ##Update PID file
try:
for key in devices:
device = devices[key]
# Just check status on every update interval
if key in self.last_update:
logger.debug("Checking %s for timeout" % key)
if (
self.last_update[key] + self.config["update_interval"]
) > time.time():
logger.debug(
"Timeout %s not done, so lets wait a abit : %s : %s"
% (
self.config["update_interval"],
self.last_update[key] + self.config["update_interval"],
time.time(),
)
)
time.sleep(0.5)
continue
else:
""""""
# print "timeout done"
# Get the status, the global update interval is used as well to reduce requests to aircons as they slow
status = device.get_ac_status()
# print status
if status:
# Update last time checked
self.last_update[key] = time.time()
self.publish_mqtt_info(status)
else:
logger.debug("No status")
except Exception as e:
logger.critical(e)
logger.debug(traceback.format_exc())
# Something went wrong.....
return 1
def dump_homeassistant_config_from_devices(self, devices):
if devices == {}:
print("No devices defined")
sys.exit()
devices_array = self.make_devices_array_from_devices(devices)
if devices_array == {}:
print("something went wrong, no devices found")
sys.exit()
print("**************** Start copy below ****************")
a = []
for key in devices_array:
# Echo
device = devices_array[key]
device["platform"] = "mqtt"
a.append(device)
print(yaml.dump({"climate": a}))
print("**************** Stop copy above ****************")
def make_devices_array_from_devices(self, devices):
devices_array = {}
for device in devices.values():
# topic = self.config["mqtt_auto_discovery_topic"]+"/climate/"+device.status["macaddress"]+"/config"
logging.debug(f"device: {device}")
name = ""
if not device.name:
name = device.status["macaddress"]
else:
name = device.name.encode("ascii", "ignore")
device_array = {
# ,"power_command_topic" : self.config["mqtt_topic_prefix"]+ device.status["macaddress"]+"/power/set"
"name": str(name.decode("utf-8")),
"mode_command_topic": self.config["mqtt_topic_prefix"]
+ device.status["macaddress"]
+ "/mode_homeassistant/set",
"temperature_command_topic": self.config["mqtt_topic_prefix"]
+ device.status["macaddress"]
+ "/temp/set",
"fan_mode_command_topic": self.config["mqtt_topic_prefix"]
+ device.status["macaddress"]
+ "/fanspeed_homeassistant/set",
"action_topic": self.config["mqtt_topic_prefix"]
+ device.status["macaddress"]
+ "/homeassistant/set", # Read values
"current_temperature_topic": self.config["mqtt_topic_prefix"]
+ device.status["macaddress"]
+ "/ambient_temp/value",
"mode_state_topic": self.config["mqtt_topic_prefix"]
+ device.status["macaddress"]
+ "/mode_homeassistant/value",
"temperature_state_topic": self.config["mqtt_topic_prefix"]
+ device.status["macaddress"]
+ "/temp/value",
"fan_mode_state_topic": self.config["mqtt_topic_prefix"]
+ device.status["macaddress"]
+ "/fanspeed_homeassistant/value",
"fan_modes": [
"Auto",
"Low",
"Medium",
"High",
"Turbo",
"Mute",
], # @Anonym-tsk
"modes": ["off", "cool", "heat", "fan_only", "dry"],
"max_temp": 32.0,
"min_temp": 16.0,
"precision": 0.5,
"temp_step": 0.5,
"unique_id": device.status["macaddress"],
"device": {
"ids": device.status["macaddress"],
"name": str(name.decode("utf-8")),
"model": "Aircon",
"mf": "Broadlink",
"sw": broadlink.version,
},
"pl_avail": "online",
"pl_not_avail": "offline",
"availability_topic": self.config["mqtt_topic_prefix"] + "LWT",
}
devices_array[device.status["macaddress"]] = device_array
return devices_array
def publish_mqtt_auto_discovery(self, devices):
if devices == [] or devices == None:
print("No devices defined")
logger.error(
"No Devices defined, either enable discovery or add them to config"
)
sys.exit()
# Make an array
devices_array = self.make_devices_array_from_devices(devices)
if devices_array == {}:
print("something went wrong, no devices found")
sys.exit()
# If retain is set for MQTT, then retain it
if self.config["mqtt_auto_discovery_topic_retain"]:
retain = self.config["mqtt_auto_discovery_topic_retain"]
else:
retain = False
logger.debug("HA config Retain set to: " + str(retain))
# Loop da loop all devices and publish discovery settings
for key in devices_array:
device = devices_array[key]
topic = (
self.config["mqtt_auto_discovery_topic"] + "/climate/" + key + "/config"
)
# Publish
self._publish(topic, json.dumps(device), retain=retain)
def publish_mqtt_info(self, status, force_update=False):
# If auto discovery is used, then always update
if not force_update:
force_update = (
True
if "mqtt_auto_discovery_topic" in self.config
and self.config["mqtt_auto_discovery_topic"]
else False
)
logger.debug("Force update is: " + str(force_update))
# Publish all values in status
for key in status:
# Make sure its a string
value = status[key]
# check if device already in previous_status
if not force_update and status["macaddress"] in self.previous_status:
# Check if key in state
if key in self.previous_status[status["macaddress"]]:
# If the values are same, skip it to make mqtt less chatty #17
if self.previous_status[status["macaddress"]][key] == value:
# print ("value same key:%s, value:%s vs : %s" % (key,value,self.previous_status[status['macaddress']][key]))
continue
else:
""""""
# print ("value NOT Same key:%s, value:%s vs : %s" % (key,value,self.previous_status[status['macaddress']][key]))
pubResult = self._publish(
self.config["mqtt_topic_prefix"]
+ status["macaddress"]
+ "/"
+ key
+ "/value",
value,
)
if pubResult != None:
logger.warning('Publishing Result: "%s"' % mqtt.error_string(pubResult))
if pubResult == mqtt.MQTT_ERR_NO_CONN:
self.connect_mqtt()
break
# Set previous to current
self.previous_status[status["macaddress"]] = status
return
# self._publish(binascii.hexlify(status['macaddress'])+'/'+ 'temp/value',status['temp']);
def _publish(self, topic, value, retain=False, qos=0):
payload = value
logger.debug('publishing on topic "%s", data "%s"' % (topic, payload))
pubResult = self._mqtt.publish(topic, payload=payload, qos=qos, retain=retain)
# If there error, then debug log and return not None
if pubResult[0] != 0:
logger.debug('Publishing Result: "%s"' % mqtt.error_string(pubResult[0]))
return pubResult[0]
def connect_mqtt(self):
# Setup client
self._mqtt = mqtt.Client(
client_id=self.config["mqtt_client_id"], clean_session=True, userdata=None
)
# Set last will and testament
self._mqtt.will_set(self.config["mqtt_topic_prefix"] + "LWT", "offline", True)
# Auth
if self.config["mqtt_user"] and self.config["mqtt_password"]:
self._mqtt.username_pw_set(
self.config["mqtt_user"], self.config["mqtt_password"]
)
# Setup callbacks
self._mqtt.on_connect = self._on_mqtt_connect
self._mqtt.on_message = self._on_mqtt_message
self._mqtt.on_log = self._on_mqtt_log
self._mqtt.on_subscribed = self._mqtt_on_subscribe
# Connect
logger.debug(
"Coneccting to MQTT: %s with client ID = %s"
% (self.config["mqtt_host"], self.config["mqtt_client_id"])
)
self._mqtt.connect(
self.config["mqtt_host"],
port=self.config["mqtt_port"],
keepalive=60,
bind_address="",
)
# Start
# creates new thread and runs Mqtt.loop_forever() in it.
self._mqtt.loop_start()
def _on_mqtt_log(self, client, userdata, level, buf):
if level == mqtt.MQTT_LOG_ERR:
logger.debug("Mqtt log: " + buf)
def _mqtt_on_subscribe(self, client, userdata, mid, granted_qos):
logger.debug("Mqtt Subscribed")
def _on_mqtt_message(self, client, userdata, msg):
try:
logger.debug(
"Mqtt Message Received! Userdata: %s, Message %s"
% (userdata, msg.topic + " " + str(msg.payload))
)
# Function is second last .. decode to str #43
function = str(msg.topic.split("/")[-2])
address = msg.topic.split("/")[-3]
# Make sure its proper STR .. python3 #43 .. very
address = address.encode("ascii", "ignore").decode("utf-8")
# 43 decode to force to str
value = str(msg.payload.decode("ascii"))
logger.debug(
"Mqtt decoded --> Function: %s, Address: %s, value: %s"
% (function, address, value)
)
except Exception as e:
logger.critical(e)
return
# Process received ##Probably need to exit here as well if command not send, but should exit on status update above .. grr, hate stupid python
if function == "temp":
try:
if self.device_objects.get(address):
status = self.device_objects[address].set_temperature(float(value))
if status:
self.publish_mqtt_info(status)
else:
logger.debug(
"Device not on list of devices %s, type:%s"
% (address, type(address))
)
return
except Exception as e:
logger.critical(e)
return
elif function == "power":
if value.lower() == "on":
status = self.device_objects[address].switch_on()
if status:
self.publish_mqtt_info(status)
elif value.lower() == "off":
status = self.device_objects[address].switch_off()
if status:
self.publish_mqtt_info(status)
else:
logger.debug(
"Switch has invalid value, values is on/off received %s", value
)
return
elif function == "mode":
status = self.device_objects[address].set_mode(value)
if status:
self.publish_mqtt_info(status)
else:
logger.debug("Mode has invalid value %s", value)
return
elif function == "fanspeed":
if value.lower() == "turbo":
status = self.device_objects[address].set_turbo("ON")
# status = self.device_objects[address].set_mute("OFF")
elif value.lower() == "mute":
status = self.device_objects[address].set_mute("ON")
else:
# status = self.device_objects[address].set_mute("ON")
# status = self.device_objects[address].set_turbo("OFF")
status = self.device_objects[address].set_fanspeed(value)
if status:
self.publish_mqtt_info(status)
else:
logger.debug("Fanspeed has invalid value %s", value)
return
elif function == "fanspeed_homeassistant":
if value.lower() == "turbo":
status = self.device_objects[address].set_turbo("ON")
# status = self.device_objects[address].set_mute("OFF")
elif value.lower() == "mute":
status = self.device_objects[address].set_mute("ON")
else:
# status = self.device_objects[address].set_mute("ON")
# status = self.device_objects[address].set_turbo("OFF")
status = self.device_objects[address].set_fanspeed(value)
if status:
self.publish_mqtt_info(status)
else:
logger.debug("Fanspeed_homeassistant has invalid value %s", value)
return
elif function == "mode_homekit":
status = self.device_objects[address].set_homekit_mode(value)
if status:
self.publish_mqtt_info(status)
else:
logger.debug("Mode_homekit has invalid value %s", value)
return
elif function == "mode_homeassistant":
status = self.device_objects[address].set_homeassistant_mode(value)
if status:
self.publish_mqtt_info(status)
else:
logger.debug("Mode_homeassistant has invalid value %s", value)
return
elif function == "state":
if value == "refresh":
logger.debug("Refreshing states")
status = self.device_objects[address].get_ac_status()
else:
logger.debug("Command not valid: " + value)
return
if status:
self.publish_mqtt_info(status, force_update=True)
else:
logger.debug("Unable to refresh")
return
return
elif function == "fixation_v":
try:
if self.device_objects.get(address):
status = self.device_objects[address].set_fixation_v(value)
if status:
self.publish_mqtt_info(status)
else:
logger.debug(
"Device not on list of devices %s, type:%s"
% (address, type(address))
)
return
except Exception as e:
logger.critical(e)
return
elif function == "fixation_h":
try:
if self.device_objects.get(address):
status = self.device_objects[address].set_fixation_h(value)
if status:
self.publish_mqtt_info(status)
else:
logger.debug(
"Device not on list of devices %s, type:%s"
% (address, type(address))
)
return
except Exception as e:
logger.critical(e)
return
else:
logger.debug("No function match")
return
def _on_mqtt_connect(self, client, userdata, flags, rc):
"""
RC definition:
0: Connection successful
1: Connection refused - incorrect protocol version
2: Connection refused - invalid client identifier
3: Connection refused - server unavailable
4: Connection refused - bad username or password
5: Connection refused - not authorised
6-255: Currently unused.
"""
logger.debug(
"Mqtt connected! client=%s, userdata=%s, flags=%s, rc=%s"
% (client, userdata, flags, rc)
)
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
sub_topic = self.config["mqtt_topic_prefix"] + "+/+/set"
client.subscribe(sub_topic)
logger.debug("Listing on %s for messages" % (sub_topic))
# LWT
self._publish(self.config["mqtt_topic_prefix"] + "LWT", "online", retain=True)
| [
"logging.getLogger",
"traceback.format_exc",
"logging.debug",
"yaml.dump",
"broadlink_ac_mqtt.classes.broadlink.ac_db.discover",
"paho.mqtt.client.Client",
"json.dumps",
"time.sleep",
"os.path.realpath",
"paho.mqtt.client.error_string",
"sys.exit",
"time.time"
] | [((353, 380), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (370, 380), False, 'import logging\n'), ((1019, 1086), 'broadlink_ac_mqtt.classes.broadlink.ac_db.discover', 'broadlink.discover', ([], {'timeout': '(5)', 'bind_to_ip': "self.config['bind_to_ip']"}), "(timeout=5, bind_to_ip=self.config['bind_to_ip'])\n", (1037, 1086), True, 'import broadlink_ac_mqtt.classes.broadlink.ac_db as broadlink\n'), ((1630, 1676), 'logging.debug', 'logging.debug', (['f"""Returning devices: {devices}"""'], {}), "(f'Returning devices: {devices}')\n", (1643, 1676), False, 'import logging\n'), ((12004, 12095), 'paho.mqtt.client.Client', 'mqtt.Client', ([], {'client_id': "self.config['mqtt_client_id']", 'clean_session': '(True)', 'userdata': 'None'}), "(client_id=self.config['mqtt_client_id'], clean_session=True,\n userdata=None)\n", (12015, 12095), True, 'import paho.mqtt.client as mqtt\n'), ((287, 313), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (303, 313), False, 'import os\n'), ((1341, 1351), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1349, 1351), False, 'import sys\n'), ((1957, 1967), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1965, 1967), False, 'import sys\n'), ((4719, 4729), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4727, 4729), False, 'import sys\n'), ((4905, 4915), 'sys.exit', 'sys.exit', ([], {}), '()\n', (4913, 4915), False, 'import sys\n'), ((5176, 5201), 'yaml.dump', 'yaml.dump', (["{'climate': a}"], {}), "({'climate': a})\n", (5185, 5201), False, 'import yaml\n'), ((5522, 5556), 'logging.debug', 'logging.debug', (['f"""device: {device}"""'], {}), "(f'device: {device}')\n", (5535, 5556), False, 'import logging\n'), ((8576, 8586), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8584, 8586), False, 'import sys\n'), ((8786, 8796), 'sys.exit', 'sys.exit', ([], {}), '()\n', (8794, 8796), False, 'import sys\n'), ((9407, 9425), 'json.dumps', 'json.dumps', (['device'], {}), '(device)\n', (9417, 9425), False, 'import json\n'), ((4274, 4285), 'time.time', 'time.time', ([], {}), '()\n', (4283, 4285), False, 'import time\n'), ((4494, 4516), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (4514, 4516), False, 'import traceback\n'), ((11865, 11896), 'paho.mqtt.client.error_string', 'mqtt.error_string', (['pubResult[0]'], {}), '(pubResult[0])\n', (11882, 11896), True, 'import paho.mqtt.client as mqtt\n'), ((3361, 3372), 'time.time', 'time.time', ([], {}), '()\n', (3370, 3372), False, 'import time\n'), ((3802, 3817), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (3812, 3817), False, 'import time\n'), ((11112, 11140), 'paho.mqtt.client.error_string', 'mqtt.error_string', (['pubResult'], {}), '(pubResult)\n', (11129, 11140), True, 'import paho.mqtt.client as mqtt\n'), ((3709, 3720), 'time.time', 'time.time', ([], {}), '()\n', (3718, 3720), False, 'import time\n')] |
from django.urls import path
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import LogoutView
from . import views
app_name = 'accounts'
urlpatterns = [
path("register/", views.UserCreateView.as_view(), name="register"),
path("setup/", login_required(views.StudentSetupView.as_view()), name="student-setup"),
path("logout/", login_required(LogoutView.as_view()), name="logout"),
path("login/", views.LoginView.as_view(), name="login"),
path("detail/<int:pk>/", login_required(views.UserDetailView.as_view()), name="detail"),
path("update/<int:pk>/", login_required(views.UserUpdateView.as_view()), name="update"),
] | [
"django.contrib.auth.views.LogoutView.as_view"
] | [((427, 447), 'django.contrib.auth.views.LogoutView.as_view', 'LogoutView.as_view', ([], {}), '()\n', (445, 447), False, 'from django.contrib.auth.views import LogoutView\n')] |
# -*- coding: utf-8 -*-
# @Time : 2020/12/22 4:49 PM
# @Author : Kevin
'''
接收用户问题,判断是否是问答模型
'''
import fasttext
import config
from utils import sentence_process
import os
def train():
# 1.直接给文件路径 2.文本分词 __label__ 3.文件里的label前缀默认是__label__
model=fasttext.train_supervised(config.think_train_data_path,epoch=20,lr=0.001,wordNgrams=2,label="__label__")
print(config.think_intention_recognition_model_path)
model.save_model(config.think_intention_recognition_model_path)
return model
class IntentionRecognition():
def __init__(self):
# 加载模型,如果加载不到,则进入训练模式,训练出一个模型,并保存
if os.path.exists(config.think_intention_recognition_model_path):
self.model = fasttext.load_model(config.think_intention_recognition_model_path)
else:
self.model = train()
def if_ask_question(self,sentence):
word_list=sentence_process.cut_sentence_by_character(sentence)
label,scores=self.model.predict(" ".join(word_list))
return label[0],scores[0]
| [
"os.path.exists",
"fasttext.train_supervised",
"fasttext.load_model",
"utils.sentence_process.cut_sentence_by_character"
] | [((259, 371), 'fasttext.train_supervised', 'fasttext.train_supervised', (['config.think_train_data_path'], {'epoch': '(20)', 'lr': '(0.001)', 'wordNgrams': '(2)', 'label': '"""__label__"""'}), "(config.think_train_data_path, epoch=20, lr=0.001,\n wordNgrams=2, label='__label__')\n", (284, 371), False, 'import fasttext\n'), ((616, 677), 'os.path.exists', 'os.path.exists', (['config.think_intention_recognition_model_path'], {}), '(config.think_intention_recognition_model_path)\n', (630, 677), False, 'import os\n'), ((878, 930), 'utils.sentence_process.cut_sentence_by_character', 'sentence_process.cut_sentence_by_character', (['sentence'], {}), '(sentence)\n', (920, 930), False, 'from utils import sentence_process\n'), ((704, 770), 'fasttext.load_model', 'fasttext.load_model', (['config.think_intention_recognition_model_path'], {}), '(config.think_intention_recognition_model_path)\n', (723, 770), False, 'import fasttext\n')] |
import os
from shutil import copyfile
import sys
import json
import re
os.system('pip install Minio --user')
from minio import Minio
# Load Credential file
copyfile('../tmp/creds.json', './creds.json')
with open('creds.json') as f:
creds = json.load(f)
f.close()
# Remove possible http scheme for Minio
url = re.compile(r"https?://")
cos_endpoint = url.sub('', creds['cos_endpoint'])
# Download the data and model file from the object storage.
cos = Minio(cos_endpoint,
access_key=creds['cos_access_key'],
secret_key=creds['cos_secret_key'],
secure=True)
cos.fget_object(creds['bucket_name'], creds['data_filename'], creds['data_filename'])
cos.fget_object(creds['bucket_name'], creds['model_filename'], creds['model_filename'])
os.system('chmod 755 %s' % creds['model_filename'])
os.system(creds['spark_entrypoint'])
os.system('zip -r model.zip model')
os.system('zip -r train_data.zip train_data')
cos.fput_object(creds['bucket_name'], 'model.zip', 'model.zip')
cos.fput_object(creds['bucket_name'], 'train_data.zip', 'train_data.zip')
cos.fput_object(creds['bucket_name'], 'evaluation.json', 'evaluation.json')
print('Trained model and train_data are uploaded.')
| [
"re.compile",
"minio.Minio",
"shutil.copyfile",
"json.load",
"os.system"
] | [((73, 110), 'os.system', 'os.system', (['"""pip install Minio --user"""'], {}), "('pip install Minio --user')\n", (82, 110), False, 'import os\n'), ((161, 206), 'shutil.copyfile', 'copyfile', (['"""../tmp/creds.json"""', '"""./creds.json"""'], {}), "('../tmp/creds.json', './creds.json')\n", (169, 206), False, 'from shutil import copyfile\n'), ((319, 342), 're.compile', 're.compile', (['"""https?://"""'], {}), "('https?://')\n", (329, 342), False, 'import re\n'), ((461, 570), 'minio.Minio', 'Minio', (['cos_endpoint'], {'access_key': "creds['cos_access_key']", 'secret_key': "creds['cos_secret_key']", 'secure': '(True)'}), "(cos_endpoint, access_key=creds['cos_access_key'], secret_key=creds[\n 'cos_secret_key'], secure=True)\n", (466, 570), False, 'from minio import Minio\n'), ((778, 829), 'os.system', 'os.system', (["('chmod 755 %s' % creds['model_filename'])"], {}), "('chmod 755 %s' % creds['model_filename'])\n", (787, 829), False, 'import os\n'), ((830, 866), 'os.system', 'os.system', (["creds['spark_entrypoint']"], {}), "(creds['spark_entrypoint'])\n", (839, 866), False, 'import os\n'), ((867, 902), 'os.system', 'os.system', (['"""zip -r model.zip model"""'], {}), "('zip -r model.zip model')\n", (876, 902), False, 'import os\n'), ((903, 948), 'os.system', 'os.system', (['"""zip -r train_data.zip train_data"""'], {}), "('zip -r train_data.zip train_data')\n", (912, 948), False, 'import os\n'), ((249, 261), 'json.load', 'json.load', (['f'], {}), '(f)\n', (258, 261), False, 'import json\n')] |
import torch.nn as nn
from mmdet.core import bbox2result
from .. import builder
from ..registry import DETECTORS
from .base import BaseDetector
from mmdet.core import aggmulti_apply
from collections import OrderedDict
import torch
import numpy as np
@DETECTORS.register_module
class SingleStageDetector(BaseDetector):
"""Base class for single-stage detectors.
Single-stage detectors directly and densely predict bounding boxes on the
output features of the backbone+neck.
"""
def __init__(self,
backbone,
neck=None,
agg=None,
bbox_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
index=False):
super(SingleStageDetector, self).__init__()
self.backbone = builder.build_backbone(backbone)
if neck is not None:
self.neck = builder.build_neck(neck)
self.bbox_head = builder.build_head(bbox_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
self.init_weights(pretrained=pretrained)
self.agg_check=agg
if agg is not None:
self.agg=builder.build_agg(agg)
self.index=index
def init_weights(self, pretrained=None):
super(SingleStageDetector, self).init_weights(pretrained)
self.backbone.init_weights(pretrained=pretrained)
if self.with_neck:
if isinstance(self.neck, nn.Sequential):
for m in self.neck:
m.init_weights()
else:
self.neck.init_weights()
self.bbox_head.init_weights()
def extract_feat(self, img):
"""Directly extract features from the backbone+neck
"""
# torch.Size([2, 3, 384, 1248])
x = self.backbone(img)
# torch.Size([2, 256, 96, 312])
# torch.Size([2, 512, 48, 156])
# torch.Size([2, 1024, 24, 78])
# torch.Size([2, 2048, 12, 39])
if self.with_neck:
x = self.neck(x)
# torch.Size([2, 256, 48, 156])
# torch.Size([2, 256, 24, 78])
# torch.Size([2, 256, 12, 39])
# torch.Size([2, 256, 6, 20])
# torch.Size([2, 256, 3, 10])
return x
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmedetection/tools/get_flops.py`
"""
x = self.extract_feat(img)
outs = self.bbox_head(x)
return outs
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
x = self.extract_feat(img)
if self.agg_check:
# x,trans_loss=self.agg(x)
x=self.agg(x)
if isinstance(x, tuple):
outs = self.bbox_head(x)
loss_inputs = outs + (gt_bboxes, gt_labels, img_metas, self.train_cfg)
losses = self.bbox_head.loss(
*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
# return losses,trans_loss
return losses
else:
losses_all=[]
# print('list')
#[tuple(agg_output),tuple(refer_out),tuple(support1_out),tuple(support1_out)]
for i in range(len(x)):
outs = self.bbox_head(x[i])
loss_inputs = outs + (gt_bboxes, gt_labels, img_metas, self.train_cfg)
losses = self.bbox_head.loss(
*loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
losses_all.append(losses)
# continue
return losses_all
def simple_test(self, img, img_meta, rescale=False):
print(img.shape)
print('single test')
if img.shape[1]>3:
n=img.shape[1]//3
img=img.view(n,3,img.shape[2],img.shape[3])
# print(((img[0]==img[1]).sum().float()/3)/(img.shape[-1]*img.shape[-2]))
#0.1864
# print(img.shape)
# torch.Size([2, 256, 48, 156])
# torch.Size([2, 256, 24, 78])
# torch.Size([2, 256, 12, 39])
# torch.Size([2, 256, 6, 20])
# torch.Size([2, 256, 3, 10])
x = self.extract_feat(img)
if self.agg_check:
x=self.agg.forward_test(x)
# agg_load=np.load('/home/ld/RepPoints/offset/agg_st_support/2/agg_f.npy')
# agg=torch.from_numpy(agg_load).to(img.device)
# print('agg check in single stage',(x[0]==agg).all())
# load=[]
# for i in range(len(x)):
# # print(x[i].shape)
# if i==0:
# load.append(agg)
# else:
# load.append(x[i])
# x=tuple(load)
outs = self.bbox_head(x)
index=self.index
index=True
bbox_inputs = outs + (img_meta, self.test_cfg, rescale)
bbox_list = self.bbox_head.get_bboxes(*bbox_inputs,index=index)
if index:
box_loc=bbox_list[0][2]
bbox_list=[bbox_list[0][:2]]
bbox_results = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels in bbox_list
]
if index:
return bbox_results[0],box_loc
else:
return bbox_results[0]
def aug_test(self, imgs, img_metas, rescale=False):
raise NotImplementedError
def simple_trackor(self, img, img_meta, rescale=False):
print(img.shape)
print('single eval')
if img.shape[1]>3:
n=img.shape[1]//3
img=img.view(n,3,img.shape[2],img.shape[3])
# print(((img[0]==img[1]).sum().float()/3)/(img.shape[-1]*img.shape[-2]))
#0.1864
# print(img.shape)
# torch.Size([2, 256, 48, 156])
# torch.Size([2, 256, 24, 78])
# torch.Size([2, 256, 12, 39])
# torch.Size([2, 256, 6, 20])
# torch.Size([2, 256, 3, 10])
x = self.extract_feat(img)
if self.agg_check:
x=self.agg.forward_eval(x)
if isinstance(x, tuple):
outs = self.bbox_head(x)
index=self.index
index=True
bbox_inputs = outs + (img_meta, self.test_cfg, rescale)
bbox_list = self.bbox_head.get_bboxes(*bbox_inputs,index=index)
if index:
box_loc=bbox_list[0][2]
bbox_list=[bbox_list[0][:2]]
bbox_results = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels in bbox_list
]
if index:
return bbox_results[0],box_loc
else:
return bbox_results[0]
else:
out=[]
# length 12: out=[tuple(refer_out),tuple(agg_out)]+support_out
for i in range(len(x)):
outs = self.bbox_head(x[i])
index=self.index
index=True
bbox_inputs = outs + (img_meta, self.test_cfg, rescale)
bbox_list = self.bbox_head.get_bboxes(*bbox_inputs,index=index)
if index:
box_loc=bbox_list[0][2]
bbox_list=[bbox_list[0][:2]]
bbox_results = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels in bbox_list
]
if index:
out.append([bbox_results[0],box_loc])
else:
out.append(bbox_results[0])
print(len(out))
return out | [
"mmdet.core.bbox2result"
] | [((5133, 5196), 'mmdet.core.bbox2result', 'bbox2result', (['det_bboxes', 'det_labels', 'self.bbox_head.num_classes'], {}), '(det_bboxes, det_labels, self.bbox_head.num_classes)\n', (5144, 5196), False, 'from mmdet.core import bbox2result\n'), ((6556, 6619), 'mmdet.core.bbox2result', 'bbox2result', (['det_bboxes', 'det_labels', 'self.bbox_head.num_classes'], {}), '(det_bboxes, det_labels, self.bbox_head.num_classes)\n', (6567, 6619), False, 'from mmdet.core import bbox2result\n'), ((7388, 7451), 'mmdet.core.bbox2result', 'bbox2result', (['det_bboxes', 'det_labels', 'self.bbox_head.num_classes'], {}), '(det_bboxes, det_labels, self.bbox_head.num_classes)\n', (7399, 7451), False, 'from mmdet.core import bbox2result\n')] |
# -*- coding: utf-8 -*-
"""
Color conversion functions and utilities.
CMF creation function
---------------------
* :func:`.load_cmf` : loads specter cmf data from file or pre-defined table
* :func:`.load_tcmf` : loads transmission cmf data from file or pre-defined table
* :func:`.cmf2tcmf` : converts cmf to transmission cmf.
* :func:`.srf2cmf` : converts spectral respone data to cmf
* :func:`.load_specter` : load specter from file or from data.
* :func:`.normalize_specter` : for specter normalization.
Color conversion
----------------
* :func:`.specter2color` : converts specter data to color RGB or gray image.
* :func:`.apply_gamma` : applies gamma curve to linear data
* :func:`.apply_srgb_gamma` : applies sRGB gamma curve to linear data
* :func:`.xyz2rgb` : Converts XYZ data to RGB
* :func:`.xyz2gray` : Converts XYZ data to YYY (gray)
* :func:`.spec2xyz` : Converts specter to XYZ
"""
from __future__ import absolute_import, print_function, division
from dtmm.conf import FDTYPE, NUMBA_TARGET, NFDTYPE, NUMBA_CACHE, DATAPATH, CMF
import numpy as np
import numba
import os
#DATAPATH = os.path.join(os.path.dirname(__file__), "data")
# D65 standard light 5nm specter
D65PATH = os.path.join(DATAPATH, "D65.dat" )
#: color matrix for sRGB color space in D65 reference white
XYZ2RGBD65 = np.array([[ 3.2404542, -1.5371385, -0.4985314],
[-0.9692660, 1.8760108, 0.0415560],
[ 0.0556434, -0.2040259, 1.0572252]])
RGB2XYZ = np.linalg.inv(XYZ2RGBD65)
#Srgb tranfer function constants
SRGBIGAMMA = 1/2.4
SRGBSLOPE = 12.92
SRGBLINPOINT = 0.0031308
SRGBA = 0.055
@numba.vectorize([NFDTYPE(NFDTYPE, NFDTYPE)], nopython = True, target = NUMBA_TARGET, cache = NUMBA_CACHE)
def apply_gamma(value, gamma):
"""apply_gamma(value, gamma)
Applies standard gamma function (transfer function) to the given (linear) data.
Parameters
----------
value : float
Input value
gamma : float
Gamma factor"""
if value > 1.:
return 1.
if value < 0:
return 0.
else:
return value**(1./gamma)
@numba.vectorize([NFDTYPE(NFDTYPE)], nopython = True, target = NUMBA_TARGET, cache = NUMBA_CACHE)
def apply_srgb_gamma(value):
"""apply_srgb_gamma(value)
Applies sRGB gamma function (transfer function) to the given (linear) data."""
if value < SRGBLINPOINT:
if value < 0.:
return 0.
else:
return SRGBSLOPE * value
else:
if value > 1.:
return 1.
else:
return (1+SRGBA)*value**SRGBIGAMMA-SRGBA
@numba.guvectorize([(NFDTYPE[:], NFDTYPE[:])], '(n)->(n)', target = NUMBA_TARGET, cache = NUMBA_CACHE)
def xyz2srgb(xyz, rgb):
"""xyz2srgb(xyz)
Converts XYZ value to RGB value based on the sRGB working space with
D65 reference white.
"""
assert len(xyz) >= 3
xyz0 = xyz[0]
xyz1 = xyz[1]
xyz2 = xyz[2]
for k in range(3):
rgb[k] = XYZ2RGBD65[k,0] * xyz0 + XYZ2RGBD65[k,1]* xyz1 + XYZ2RGBD65[k,2]* xyz2
@numba.guvectorize([(NFDTYPE[:], NFDTYPE[:])], '(n)->(n)', target = NUMBA_TARGET, cache = NUMBA_CACHE)
def xyz2gray(xyz, gray):
"""xyz2gray(xyz)
Converts XYZ value to Gray color"""
assert len(xyz) >= 3
y = xyz[1]
for k in range(3):
gray[k] = y
@numba.guvectorize([(NFDTYPE[:],NFDTYPE[:,:],NFDTYPE[:])], '(n),(n,m)->(m)', target = NUMBA_TARGET, cache = NUMBA_CACHE)
def spec2xyz(spec,cmf,xyz):
"""spec2xyz(spec,cmf)
Converts specter array to xyz value.
Parameters
----------
spec : array_like
Input specter data
cmf : array_like
Color matching function
Returns
-------
xyz : ndarray
Computed xyz value."""
for j in range(cmf.shape[1]):
xyz[j] = 0.
for i in range(cmf.shape[0]):
xyz[j] = xyz[j] + cmf[i,j]*spec[i]
def specter2color(spec, cmf, norm = False, gamma = True, gray = False, out = None):
"""Converts specter data to RGB data (color or gray).
Specter shape must be [...,k], where wavelengths are in the last axis. cmf
must be a valid color matchin function array of size [k,3].
Parameters
----------
spec : array
Specter data of shape [..., n] where each data element in the array has
n wavelength values
cmf : array
A color matching function (array of shape [n,3]) that converts the specter data
to a XYZ color.
norm : bool or float, optional
If set to False, no data normalization is performed (default). If True,
internally, xyz data is normalized in the range [0,1.], so that no clipping occurs.
If it is a float, data is normalized to this value.
gamma : bool or float, optional
If gamma is True srgb gamma function is applied (default). If float is
provided, standard gamma factor is applied with a given gamma value. If False,
no gamma correction is performed.
gray : bool, optional
Whether gray output is calculated (color by default)
out : array, optional
Output array of shape (...,3)
Returns
-------
rgb : ndarray
A computed RGB value.
Notes
-----
Numpy broadcasting rules apply to spec and cmf.
Example
-------
>>> cmf = load_tcmf()
>>> specter2color([1]*81, cmf)#should be close to 1,1,1
... # doctest: +NORMALIZE_WHITESPACE
array([0.99994901, 1. , 0.99998533])
"""
#if isinstance(spec, list):
# spec = np.add.reduce(spec)
cmf = np.asarray(cmf)
if cmf.shape[-1] != 3:
raise ValueError("Grayscale cmf! Cannot convert to color.")
out = spec2xyz(spec,cmf, out)
if norm is True:
#normalize to max in any of the XYZ channels.. so that no clipping occurs.
out = np.divide(out,out.max(),out)
elif norm != 0:
out = np.divide(out,norm,out)
if gray == True:
out = xyz2gray(out,out)
else:
out = xyz2srgb(out,out)
if gamma is True:
apply_srgb_gamma(out,out)
elif gamma is not False:
apply_gamma(out,gamma,out)
return out
def srf2cmf(srf, out = None):
"""Converts spectral response function (Y) to color matching function (XYZ).
Parameters
----------
srff : array_like
Spectral response function of shape (...,n)
out : ndarray, optional
Output array.
Returns
-------
cmf: ndarray
A color matching function array of shape (...,n,3)
"""
if out is None:
out = np.empty(shape = srf.shape + (3,), dtype = srf.dtype)
out[...,0] = RGB2XYZ[0,0] * srf + RGB2XYZ[0,1] * srf + RGB2XYZ[0,2] * srf
out[...,1] = srf
out[...,2] = RGB2XYZ[2,0] * srf + RGB2XYZ[2,1] * srf + RGB2XYZ[2,2] * srf
return out
def normalize_specter(spec, cmf, out = None):
"""Normalizes specter based on the color matching function. (cmf array) so that
calculated Y value is 1.
Parameters
----------
spec : array_like
Input illuminant specter data of shape (...,n).
cmf : array_like
Color matching function of shape (...,n,3).
out : ndarray, optional
Output array.
Returns
-------
normalized_spec : ndarray
A normalized version of the input specter
Notes
-----
Numpy broadcasting rules apply to spec and cmf.
"""
cmf = np.asarray(cmf)
if cmf.shape[-1] == 3:
#cmf is color matching function
xyz = spec2xyz(spec,cmf)
norm = xyz[...,1] #Y value is ligtness.. normalize it to this
else:
raise ValueError("Incompatible cmf shape")
return np.divide(spec,norm,out)
def load_tcmf(wavelengths = None, illuminant = "D65", cmf = CMF, norm = True, retx = False,
single_wavelength = False):
"""Loads transmission color matching function.
This functions loads a CIE XYZ color matching function and transforms it
to a transmission color matching function for a given illuminant. Resulting
CMF matrix will transform unity into white color.
Parameters
----------
wavelengths : array_like, optional
Wavelengths at which data is computed. If not specified (default), original
data from the 5nm tabulated data is returned.
illuminant : str, optional
Name of the standard illuminant or path to illuminant data.
cmf : str, optional
Name or path to the cmf function. Can be 'CIE1931' for CIE 1931 2-deg
5nm tabulated data, 'CIE1964' for CIE1964 10-deg 5nm tabulatd data, or
'CIE2006-2' or 'CIE2006-10' for a proposed CIE 2006 2- or 10-deg 5nm
tabulated data.
norm : int, optional
By default cmf is normalized so that unity transmission value over the
full spectral range of the illuminant is converted to XYZ color with Y=1.
You can disable this by setting norm = 0. If you set norm = 2, then the
cmf is normalized for the interpolated spectra at given wavelengths,
and not to the full bandwidth of the spectra (norm = 1).
retx : bool, optional
Should the selected wavelengths be returned as well.
single_wavelength : bool, optional
If specified, color matching function for single wavelengths specter is
calculated by interpolation. By default, specter is assumed to be a
piece-wise linear function and continuous between the specified
wavelengts, and data is integrated instead.
Returns
-------
cmf : array
Color matching function array of shape [n,3] or a tuple of (x,cmf)
if retx is specified.
Example
-------
>>> cmf = load_tcmf()
>>> specter2color([1]*81, cmf) #should be close to 1,1,1
... # doctest: +NORMALIZE_WHITESPACE
array([0.99994901, 1. , 0.99998533])
"""
if wavelengths is not None and len(wavelengths) == 1:
single_wavelength = True
if single_wavelength == True:
x, cmf = load_cmf(wavelengths, single_wavelength = True,retx = True, cmf = cmf)
spec = load_specter(wavelengths = x, illuminant = illuminant)
cmf = cmf2tcmf(cmf, spec, norm = bool(norm))
else:
x,cmf = load_cmf(retx = True, cmf = cmf)
spec = load_specter(wavelengths = x, illuminant = illuminant)
cmf = cmf2tcmf(cmf, spec, norm = bool(norm))
if wavelengths is not None:
cmf = integrate_data(wavelengths, x,cmf)
x = wavelengths
if norm == 2:
cmf = cmf2tcmf(cmf, [1.]*len(wavelengths), norm = norm)
if retx == True:
return x, cmf
else:
return cmf
def cmf2tcmf(cmf, spec, norm = True, out = None):
"""Converts CMF table to specter-normalized transmission CMF table
Parameters
----------
cmf : array_like
Color matchinf function array
spec : array_like
Illuminant specter array
norm : bool, optional
Whether to normalize illuminant specter before constructing the CMF.
out : ndarray, optional
Output array
Returns
-------
out : ndarray
A transmission color matching function array.
Notes
-----
Numpy broadcasting rules apply to spec and cmf.
"""
cmf = np.asarray(cmf,FDTYPE)
spec = np.asarray(spec,FDTYPE)
if norm == True:
spec = normalize_specter(spec, cmf)
return np.multiply(spec[:,np.newaxis],cmf, out = out)
def load_specter(wavelengths = None, illuminant = "D65", retx = False):
"""Loads illuminant specter data from file.
Parameters
----------
wavelengths : array_like, optional
Wavelengths at which data is interpolated
illuminant : str, or array, optional
Name of the standard illuminant or filename. If specified as array, it must
be an array of shape (n,2). The first column decribes wavelength and the
second is the intensity.
retx : bool, optional
Should the selected wavelengths be returned as well.
Returns
-------
specter : array
Specter array of shape [num] or a tuple of (x,specter)
if retx is specified
Example
-------
#D65 evaluated at three wavelengths
>>> spec = load_specter((450,500,550), "D65")
#illuminant with triangular specter evaluated at three wavelengths
>>> spec = load_specter([450,475,500,], illuminant = [[400,0],[500,1],[600,0]])
"""
if isinstance(illuminant, str):
try:
# predefined data in a file
data = np.loadtxt(os.path.join(DATAPATH, illuminant + ".dat"))
except:
data = np.loadtxt(illuminant)
else:
data = np.asarray(illuminant)
if data.ndim != 2 and data.shape[-1] != 2:
raise ValueError("Not a valid illuminant data")
if wavelengths is not None:
data = interpolate_data(wavelengths, data[:,0], data[:,1:])
data = np.ascontiguousarray(data[:,0], dtype = FDTYPE)
else:
wavelengths = np.ascontiguousarray(data[:,0], dtype = FDTYPE)
data = np.ascontiguousarray(data[:,1], dtype = FDTYPE)
if retx == True:
return wavelengths, data
else:
return data
def load_cmf(wavelengths = None, cmf = CMF, retx = False, single_wavelength = False):
"""Load XYZ Color Matching function.
This function loads tabulated data and re-calculates xyz array on a
given range of wavelength values.
See also load_tcmf.
Parameters
----------
wavelengths : array_like, optional
A 1D array of wavelengths at which data is computed. If not specified
(default), original data from the 5nm tabulated data is returned.
cmf : str, optional
Name or path to the cmf function. Can be 'CIE1931' for CIE 1931 2-deg
5nm tabulated data, 'CIE1964' for CIE1964 10-deg 5nm tabulated data, or
'CIE2006-2' or 'CIE2006-10' for a proposed CIE 2006 2- or 10-deg 5nm
tabulated data. For grayscale cameras, there is a 'CMOS' spectral
response data. You can also provide 'FLAT' for flat (unity) response function.
retx : bool, optional
Should the selected wavelengths be returned as well.
single_wavelength : bool, optional
If specified, color matching function for single wavelengths specter is
calculated by interpolation. By default, specter is assumed to be a
piece-wise linear function and continuous between the specified
wavelengts, and data is integrated instead.
Returns
-------
cmf : array
Color matching function array of shape [n,3] or a tuple of (x,cmf)
if retx is specified.
"""
try:
if cmf == "FLAT":
if wavelengths is None:
wavelengths = np.arange(380,781,5)
data = np.zeros((len(wavelengths),3))
data[:,1] = 100. #100% QE
if retx == True:
return wavelengths, data
else:
return data
if cmf.startswith("CIE"):
data = np.loadtxt(os.path.join(DATAPATH, cmf + "XYZ.dat"))
else:
data = np.loadtxt(os.path.join(DATAPATH, cmf + "Y.dat"))
except:
data = np.loadtxt(cmf)
if data.shape[-1] == 4:
x, data = np.ascontiguousarray(data[:,0], dtype = FDTYPE), np.ascontiguousarray(data[:,1:], dtype = FDTYPE)
elif data.shape[-1] == 2:
x, data = np.ascontiguousarray(data[:,0], dtype = FDTYPE), np.ascontiguousarray(data[:,1], dtype = FDTYPE)
else:
raise ValueError("Not a valid cmf data!")
if wavelengths is not None:
wavelengths = np.asarray(wavelengths, dtype = FDTYPE)
if wavelengths.ndim != 1:
raise ValueError("Wavelengths has to be 1D array")
if len(wavelengths) == 1:
single_wavelength = True
if single_wavelength == True and wavelengths is not None:
data = interpolate_data(wavelengths, x, data)
x = wavelengths
elif wavelengths is not None:
data = integrate_data(wavelengths, x,data)
x = wavelengths
if data.ndim == 1:
#convert spectral response to cmf
data = srf2cmf(data)
if retx == True:
return x, data
else:
return data
def interpolate_data(x, x0, data):
"""Interpolates data
Parameters
----------
x : array_like
The x-coordinates at which to evaluate the interpolated values.
x0 : array_like
The x-coordinates of the data points, must be increasing.
data : ndarray
A 1D or 2D array of datapoints to interpolate.
Returns
-------
y : ndarray
The interpolated values.
"""
data = np.asarray(data, dtype = FDTYPE)
x0 = np.asarray(x0)
x = np.asarray(x)
if data.ndim in (1,2) and x0.ndim == 1 and x.ndim == 1:
if data.ndim == 2:
out = np.zeros(shape = x.shape + data.shape[1:], dtype = data.dtype)
rows, cols = data.shape
for i in range(cols):
#f = interpolate.interp1d(x0, data[:,i],fill_value = 0, kind="linear")
#out[...,i] = f(x)
out[...,i] = np.interp(x, x0, data[:,i],left = 0., right = 0.)
return out
else:
return np.interp(x, x0, data, left = 0., right = 0.)
else:
raise ValueError("Invalid dimensions of input data.")
def integrate_data(x,x0,cmf):
"""Integrates data.
This function takes the original data and computes new data at specified x
coordinates by a weighted integration of the original data. For each new
x value, it multiplies the data with a triangular kernel and integrates.
The width of the kernel is computed from the spacings in x.
Parameters
----------
x : array_like
The x-coordinates at which to compute the integrated data.
x0 : array_like
The x-coordinates of the data points, must be increasing.
data : ndarray
A 1D or 2D array of datapoints to integrate.
Returns
-------
y : ndarray
The integrated values.
"""
cmf = np.asarray(cmf)
x0 = np.asarray(x0)
xout = np.asarray(x)
ndim = cmf.ndim
if ndim in (1,2) and x0.ndim == 1 and xout.ndim == 1:
dxs = x0[1:]-x0[0:-1]
dx = dxs[0]
if not np.all(dxs == dx):
raise ValueError("x0 must have equal spacings")
out = np.zeros(shape = (len(x),)+cmf.shape[1:], dtype = cmf.dtype)
n = len(x)
for i in range(n):
if i == 0:
x,y = _rxn(xout,i,dx,ndim)
data = (interpolate_data(x,x0,cmf)*y).sum(0)
elif i == n-1:
x,y = _lxn(xout,i,dx,ndim)
data = (interpolate_data(x,x0,cmf)*y).sum(0)
else:
x,y = _rxn(xout,i,dx,ndim)
tmp = (interpolate_data(x,x0,cmf)*y)
tmp[0] = tmp[0]/2 #first element is counted two times...
data = tmp.sum(0)
x,y = _lxn(xout,i,dx,ndim)
tmp = (interpolate_data(x,x0,cmf)*y)
tmp[0] = tmp[0]/2 #first element is counted two times...
data += tmp.sum(0)
out[i,...] = data
return out
else:
raise ValueError("Invalid dimensions of input data.")
def _rxn(x,i,dx,ndim):
xlow, xhigh = x[i], x[i+1]
dx = (xhigh - xlow)/dx
if dx < 1:
import warnings
warnings.warn("The resolution of the integrand is too low.", stacklevel = 2)
#n = int(round(dx))+1
n = int(dx-1)+1
dx = dx/n
xout = np.linspace(xlow,xhigh,n+1)
yout = np.linspace(1.,0.,n+1)*dx
if ndim == 2:
return xout,yout[:,None]
else:
return xout,yout
def _lxn(x,i,dx, ndim):
xlow, xhigh = x[i-1], x[i]
dx = (xhigh - xlow)/dx
if dx < 1:
import warnings
warnings.warn("The resolution of the integrand is too low.", stacklevel = 2)
#n = int(round(dx))+1
n = int(dx-1)+1
dx = dx/n
xout = np.linspace(xhigh,xlow,n+1)
yout = np.linspace(1.,0.,n+1)*dx
if ndim == 2:
return xout,yout[:,None]
else:
return xout,yout
if __name__ == "__main__":
import doctest
doctest.testmod()
import matplotlib.pyplot as plt
num = 81
for num in (81,41,21,11,5):
wavelengths = np.linspace(380,780,num)
x,xyz = load_tcmf(wavelengths, norm = True,retx = True, single_wavelength = True)
plt.plot(x,xyz[:,0],label = num) #X color
x,xyz = load_tcmf(wavelengths, norm = True,retx = True, single_wavelength = False)
plt.plot(x,xyz[:,0],"o") #X color
plt.legend()
plt.show()
| [
"numpy.ascontiguousarray",
"numpy.array",
"numpy.divide",
"numpy.arange",
"numpy.multiply",
"matplotlib.pyplot.plot",
"numpy.asarray",
"numpy.linspace",
"doctest.testmod",
"numpy.empty",
"warnings.warn",
"numpy.all",
"numba.guvectorize",
"numpy.interp",
"dtmm.conf.NFDTYPE",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show",
"os.path.join",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.loadtxt"
] | [((1200, 1233), 'os.path.join', 'os.path.join', (['DATAPATH', '"""D65.dat"""'], {}), "(DATAPATH, 'D65.dat')\n", (1212, 1233), False, 'import os\n'), ((1308, 1430), 'numpy.array', 'np.array', (['[[3.2404542, -1.5371385, -0.4985314], [-0.969266, 1.8760108, 0.041556], [\n 0.0556434, -0.2040259, 1.0572252]]'], {}), '([[3.2404542, -1.5371385, -0.4985314], [-0.969266, 1.8760108, \n 0.041556], [0.0556434, -0.2040259, 1.0572252]])\n', (1316, 1430), True, 'import numpy as np\n'), ((1489, 1514), 'numpy.linalg.inv', 'np.linalg.inv', (['XYZ2RGBD65'], {}), '(XYZ2RGBD65)\n', (1502, 1514), True, 'import numpy as np\n'), ((2578, 2680), 'numba.guvectorize', 'numba.guvectorize', (['[(NFDTYPE[:], NFDTYPE[:])]', '"""(n)->(n)"""'], {'target': 'NUMBA_TARGET', 'cache': 'NUMBA_CACHE'}), "([(NFDTYPE[:], NFDTYPE[:])], '(n)->(n)', target=\n NUMBA_TARGET, cache=NUMBA_CACHE)\n", (2595, 2680), False, 'import numba\n'), ((3027, 3129), 'numba.guvectorize', 'numba.guvectorize', (['[(NFDTYPE[:], NFDTYPE[:])]', '"""(n)->(n)"""'], {'target': 'NUMBA_TARGET', 'cache': 'NUMBA_CACHE'}), "([(NFDTYPE[:], NFDTYPE[:])], '(n)->(n)', target=\n NUMBA_TARGET, cache=NUMBA_CACHE)\n", (3044, 3129), False, 'import numba\n'), ((3317, 3439), 'numba.guvectorize', 'numba.guvectorize', (['[(NFDTYPE[:], NFDTYPE[:, :], NFDTYPE[:])]', '"""(n),(n,m)->(m)"""'], {'target': 'NUMBA_TARGET', 'cache': 'NUMBA_CACHE'}), "([(NFDTYPE[:], NFDTYPE[:, :], NFDTYPE[:])],\n '(n),(n,m)->(m)', target=NUMBA_TARGET, cache=NUMBA_CACHE)\n", (3334, 3439), False, 'import numba\n'), ((5580, 5595), 'numpy.asarray', 'np.asarray', (['cmf'], {}), '(cmf)\n', (5590, 5595), True, 'import numpy as np\n'), ((7482, 7497), 'numpy.asarray', 'np.asarray', (['cmf'], {}), '(cmf)\n', (7492, 7497), True, 'import numpy as np\n'), ((7740, 7766), 'numpy.divide', 'np.divide', (['spec', 'norm', 'out'], {}), '(spec, norm, out)\n', (7749, 7766), True, 'import numpy as np\n'), ((11386, 11409), 'numpy.asarray', 'np.asarray', (['cmf', 'FDTYPE'], {}), '(cmf, FDTYPE)\n', (11396, 11409), True, 'import numpy as np\n'), ((11420, 11444), 'numpy.asarray', 'np.asarray', (['spec', 'FDTYPE'], {}), '(spec, FDTYPE)\n', (11430, 11444), True, 'import numpy as np\n'), ((11520, 11566), 'numpy.multiply', 'np.multiply', (['spec[:, np.newaxis]', 'cmf'], {'out': 'out'}), '(spec[:, np.newaxis], cmf, out=out)\n', (11531, 11566), True, 'import numpy as np\n'), ((16952, 16982), 'numpy.asarray', 'np.asarray', (['data'], {'dtype': 'FDTYPE'}), '(data, dtype=FDTYPE)\n', (16962, 16982), True, 'import numpy as np\n'), ((16994, 17008), 'numpy.asarray', 'np.asarray', (['x0'], {}), '(x0)\n', (17004, 17008), True, 'import numpy as np\n'), ((17017, 17030), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (17027, 17030), True, 'import numpy as np\n'), ((18412, 18427), 'numpy.asarray', 'np.asarray', (['cmf'], {}), '(cmf)\n', (18422, 18427), True, 'import numpy as np\n'), ((18437, 18451), 'numpy.asarray', 'np.asarray', (['x0'], {}), '(x0)\n', (18447, 18451), True, 'import numpy as np\n'), ((18463, 18476), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (18473, 18476), True, 'import numpy as np\n'), ((19930, 19961), 'numpy.linspace', 'np.linspace', (['xlow', 'xhigh', '(n + 1)'], {}), '(xlow, xhigh, n + 1)\n', (19941, 19961), True, 'import numpy as np\n'), ((20359, 20390), 'numpy.linspace', 'np.linspace', (['xhigh', 'xlow', '(n + 1)'], {}), '(xhigh, xlow, n + 1)\n', (20370, 20390), True, 'import numpy as np\n'), ((20565, 20582), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (20580, 20582), False, 'import doctest\n'), ((21006, 21018), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (21016, 21018), True, 'import matplotlib.pyplot as plt\n'), ((21023, 21033), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21031, 21033), True, 'import matplotlib.pyplot as plt\n'), ((1643, 1668), 'dtmm.conf.NFDTYPE', 'NFDTYPE', (['NFDTYPE', 'NFDTYPE'], {}), '(NFDTYPE, NFDTYPE)\n', (1650, 1668), False, 'from dtmm.conf import FDTYPE, NUMBA_TARGET, NFDTYPE, NUMBA_CACHE, DATAPATH, CMF\n'), ((2108, 2124), 'dtmm.conf.NFDTYPE', 'NFDTYPE', (['NFDTYPE'], {}), '(NFDTYPE)\n', (2115, 2124), False, 'from dtmm.conf import FDTYPE, NUMBA_TARGET, NFDTYPE, NUMBA_CACHE, DATAPATH, CMF\n'), ((6615, 6664), 'numpy.empty', 'np.empty', ([], {'shape': '(srf.shape + (3,))', 'dtype': 'srf.dtype'}), '(shape=srf.shape + (3,), dtype=srf.dtype)\n', (6623, 6664), True, 'import numpy as np\n'), ((12846, 12868), 'numpy.asarray', 'np.asarray', (['illuminant'], {}), '(illuminant)\n', (12856, 12868), True, 'import numpy as np\n'), ((13104, 13150), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['data[:, 0]'], {'dtype': 'FDTYPE'}), '(data[:, 0], dtype=FDTYPE)\n', (13124, 13150), True, 'import numpy as np\n'), ((13184, 13230), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['data[:, 0]'], {'dtype': 'FDTYPE'}), '(data[:, 0], dtype=FDTYPE)\n', (13204, 13230), True, 'import numpy as np\n'), ((13247, 13293), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['data[:, 1]'], {'dtype': 'FDTYPE'}), '(data[:, 1], dtype=FDTYPE)\n', (13267, 13293), True, 'import numpy as np\n'), ((15873, 15910), 'numpy.asarray', 'np.asarray', (['wavelengths'], {'dtype': 'FDTYPE'}), '(wavelengths, dtype=FDTYPE)\n', (15883, 15910), True, 'import numpy as np\n'), ((19782, 19856), 'warnings.warn', 'warnings.warn', (['"""The resolution of the integrand is too low."""'], {'stacklevel': '(2)'}), "('The resolution of the integrand is too low.', stacklevel=2)\n", (19795, 19856), False, 'import warnings\n'), ((19969, 19997), 'numpy.linspace', 'np.linspace', (['(1.0)', '(0.0)', '(n + 1)'], {}), '(1.0, 0.0, n + 1)\n', (19980, 19997), True, 'import numpy as np\n'), ((20211, 20285), 'warnings.warn', 'warnings.warn', (['"""The resolution of the integrand is too low."""'], {'stacklevel': '(2)'}), "('The resolution of the integrand is too low.', stacklevel=2)\n", (20224, 20285), False, 'import warnings\n'), ((20398, 20426), 'numpy.linspace', 'np.linspace', (['(1.0)', '(0.0)', '(n + 1)'], {}), '(1.0, 0.0, n + 1)\n', (20409, 20426), True, 'import numpy as np\n'), ((20686, 20712), 'numpy.linspace', 'np.linspace', (['(380)', '(780)', 'num'], {}), '(380, 780, num)\n', (20697, 20712), True, 'import numpy as np\n'), ((20818, 20851), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'xyz[:, 0]'], {'label': 'num'}), '(x, xyz[:, 0], label=num)\n', (20826, 20851), True, 'import matplotlib.pyplot as plt\n'), ((20968, 20995), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'xyz[:, 0]', '"""o"""'], {}), "(x, xyz[:, 0], 'o')\n", (20976, 20995), True, 'import matplotlib.pyplot as plt\n'), ((5931, 5956), 'numpy.divide', 'np.divide', (['out', 'norm', 'out'], {}), '(out, norm, out)\n', (5940, 5956), True, 'import numpy as np\n'), ((15438, 15453), 'numpy.loadtxt', 'np.loadtxt', (['cmf'], {}), '(cmf)\n', (15448, 15453), True, 'import numpy as np\n'), ((15505, 15551), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['data[:, 0]'], {'dtype': 'FDTYPE'}), '(data[:, 0], dtype=FDTYPE)\n', (15525, 15551), True, 'import numpy as np\n'), ((15555, 15602), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['data[:, 1:]'], {'dtype': 'FDTYPE'}), '(data[:, 1:], dtype=FDTYPE)\n', (15575, 15602), True, 'import numpy as np\n'), ((17137, 17195), 'numpy.zeros', 'np.zeros', ([], {'shape': '(x.shape + data.shape[1:])', 'dtype': 'data.dtype'}), '(shape=x.shape + data.shape[1:], dtype=data.dtype)\n', (17145, 17195), True, 'import numpy as np\n'), ((17532, 17575), 'numpy.interp', 'np.interp', (['x', 'x0', 'data'], {'left': '(0.0)', 'right': '(0.0)'}), '(x, x0, data, left=0.0, right=0.0)\n', (17541, 17575), True, 'import numpy as np\n'), ((18624, 18641), 'numpy.all', 'np.all', (['(dxs == dx)'], {}), '(dxs == dx)\n', (18630, 18641), True, 'import numpy as np\n'), ((12718, 12761), 'os.path.join', 'os.path.join', (['DATAPATH', "(illuminant + '.dat')"], {}), "(DATAPATH, illuminant + '.dat')\n", (12730, 12761), False, 'import os\n'), ((12798, 12820), 'numpy.loadtxt', 'np.loadtxt', (['illuminant'], {}), '(illuminant)\n', (12808, 12820), True, 'import numpy as np\n'), ((14982, 15004), 'numpy.arange', 'np.arange', (['(380)', '(781)', '(5)'], {}), '(380, 781, 5)\n', (14991, 15004), True, 'import numpy as np\n'), ((15287, 15326), 'os.path.join', 'os.path.join', (['DATAPATH', "(cmf + 'XYZ.dat')"], {}), "(DATAPATH, cmf + 'XYZ.dat')\n", (15299, 15326), False, 'import os\n'), ((15372, 15409), 'os.path.join', 'os.path.join', (['DATAPATH', "(cmf + 'Y.dat')"], {}), "(DATAPATH, cmf + 'Y.dat')\n", (15384, 15409), False, 'import os\n'), ((15652, 15698), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['data[:, 0]'], {'dtype': 'FDTYPE'}), '(data[:, 0], dtype=FDTYPE)\n', (15672, 15698), True, 'import numpy as np\n'), ((15702, 15748), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['data[:, 1]'], {'dtype': 'FDTYPE'}), '(data[:, 1], dtype=FDTYPE)\n', (15722, 15748), True, 'import numpy as np\n'), ((17425, 17474), 'numpy.interp', 'np.interp', (['x', 'x0', 'data[:, i]'], {'left': '(0.0)', 'right': '(0.0)'}), '(x, x0, data[:, i], left=0.0, right=0.0)\n', (17434, 17474), True, 'import numpy as np\n')] |
import json
class HttpMessage:
def __init__(self, is_success, data):
self.isSuccess = is_success
self.data = data
def getJson(self):
info = {
"is_success": self.isSuccess,
"data": self.data
}
return json.dumps(info, ensure_ascii=False)
| [
"json.dumps"
] | [((274, 310), 'json.dumps', 'json.dumps', (['info'], {'ensure_ascii': '(False)'}), '(info, ensure_ascii=False)\n', (284, 310), False, 'import json\n')] |
import utils
from .stock import Stock
class Portfolio(object):
def __init__(self, stock_list=None):
self.stock_list = stock_list or []
def add_stock(self, s):
if isinstance(s, Stock):
self.stock_list.append(s)
def calc_sma(self, from_date, to_date, strategy, window):
if not isinstance(window, list):
window = [window]
# extract reference price for the period stock_list
# result is a list of dict
result = [stock.sample(from_date, to_date, strategy) for stock in self.stock_list]
tmp = utils.aggregate_add(result)
xs = tmp.keys()
ys_ = tmp.values()
ys = []
for w in window:
sma = utils.SMA(w)
ys.append(map(sma, ys_))
return xs, ys
| [
"utils.aggregate_add",
"utils.SMA"
] | [((585, 612), 'utils.aggregate_add', 'utils.aggregate_add', (['result'], {}), '(result)\n', (604, 612), False, 'import utils\n'), ((726, 738), 'utils.SMA', 'utils.SMA', (['w'], {}), '(w)\n', (735, 738), False, 'import utils\n')] |
import urwid
import locale
class Track(object):
width = 0
height = 0
name = ''
palette = []
def __init__(self, width=0, height=0, name=''):
self.width = width
self.height = height
self.name = name
def render(self, region):
raise NotImplementedError()
def setSize(self, width, height):
self.width = width
self.height = height
def getSize(self):
return width, height
def getPalette(self):
return self.palette
def centerBase(self, region):
(_, start, end) = region.explode()
midPoint = (start + end) / 2
return midPoint
def formatPosition(self, position):
return locale.format("%d", position, grouping=True)
class TrackLabel(Track):
label = None
labelFormat = '%s | '
def __init__(self, label, width=0, height=0, name=''):
super(TrackLabel, self).__init__(width, height, name)
self.label = label
def render(self, region):
labelTxt = self.labelFormat % self.label
txt = urwid.Text(('bold', labelTxt), 'right', 'clip')
return txt
class VCFTrack(Track):
variants = []
palette = [('variant', 'dark cyan', 'dark cyan')]
def __init__(self, variants, width=0, height=0, name=''):
super(VCFTrack, self).__init__(width, height, name)
self.variants = variants
def filterVariants(self, region):
(chromosome, start, end) = region.explode()
chromosome = chromosome.replace('chr', '')
fvars = filter(lambda v: v.CHROM == chromosome and
(v.POS >= start and v.POS <= end),
self.variants)
mfvars = {v.POS:v for v in fvars}
return mfvars
def render(self, region):
visVariants = self.filterVariants(region)
(_, start, end) = region.explode()
variantBars = []
singlePosEl = urwid.Text(' ')
for pos in range(start, end):
variant = visVariants.get(pos)
if not variant:
variantBars.append(singlePosEl)
else:
variantBars.append(urwid.AttrMap(singlePosEl, 'variant'))
cols = urwid.Columns(variantBars)
return cols
class DivTrack(Track):
divider = '-'
def __init__(self, width=0, height=0, divider='-', name=''):
super(DivTrack, self).__init__(width, height, name)
self.divider = divider
def render(self, region):
div = urwid.Divider(self.divider)
return div
class ScaleTrack(Track):
def render(self, region):
_, start, end = region.explode()
centerBase = self.centerBase(region)
scaleTxt = " %s bps " % self.formatPosition(end - start)
txtLen = len(scaleTxt)
scaleEl = urwid.Text(scaleTxt)
leftArrow = urwid.Text("<")
filler = urwid.Divider("-")
rightArrow = urwid.Text(">")
return urwid.Columns([(1, leftArrow),
(centerBase - start - (txtLen / 2) - 1, filler),
(txtLen, scaleEl),
(end - centerBase - (txtLen / 2) - 1, filler),
(1, rightArrow)])
class LocationTrack(Track):
def render(self, region):
_, start, end = region.explode()
centerBase = self.centerBase(region)
locTxt = "|- %s bp" % self.formatPosition(centerBase)
locEl = urwid.Text(locTxt)
filler = urwid.Divider(" ")
return urwid.Columns([(centerBase - start, filler), locEl, filler])
class ReferenceTrack(Track):
twoBitFile = None
palette = [('base-a', 'white', 'dark green'),
('base-t', 'white', 'dark red'),
('base-g', 'white', 'light magenta'),
('base-c', 'white', 'light blue')]
def __init__(self, twoBitFile, width=0, height=0, name=''):
super(ReferenceTrack, self).__init__(width, height, name)
self.twoBitFile = twoBitFile
def render(self, region):
chromosome, start, end = region.explode()
regionSequence = self.twoBitFile[chromosome][start-1:end]
visibleSequence = regionSequence[0:self.width]
baseCols = []
for base in visibleSequence:
txt = urwid.Text(base.upper())
base = urwid.AttrMap(txt, 'base-%s' % base.lower())
baseCols.append(base)
cols = urwid.Columns(baseCols)
return cols
| [
"urwid.Columns",
"locale.format",
"urwid.Divider",
"urwid.AttrMap",
"urwid.Text"
] | [((708, 752), 'locale.format', 'locale.format', (['"""%d"""', 'position'], {'grouping': '(True)'}), "('%d', position, grouping=True)\n", (721, 752), False, 'import locale\n'), ((1065, 1112), 'urwid.Text', 'urwid.Text', (["('bold', labelTxt)", '"""right"""', '"""clip"""'], {}), "(('bold', labelTxt), 'right', 'clip')\n", (1075, 1112), False, 'import urwid\n'), ((1927, 1942), 'urwid.Text', 'urwid.Text', (['""" """'], {}), "(' ')\n", (1937, 1942), False, 'import urwid\n'), ((2209, 2235), 'urwid.Columns', 'urwid.Columns', (['variantBars'], {}), '(variantBars)\n', (2222, 2235), False, 'import urwid\n'), ((2500, 2527), 'urwid.Divider', 'urwid.Divider', (['self.divider'], {}), '(self.divider)\n', (2513, 2527), False, 'import urwid\n'), ((2804, 2824), 'urwid.Text', 'urwid.Text', (['scaleTxt'], {}), '(scaleTxt)\n', (2814, 2824), False, 'import urwid\n'), ((2845, 2860), 'urwid.Text', 'urwid.Text', (['"""<"""'], {}), "('<')\n", (2855, 2860), False, 'import urwid\n'), ((2878, 2896), 'urwid.Divider', 'urwid.Divider', (['"""-"""'], {}), "('-')\n", (2891, 2896), False, 'import urwid\n'), ((2918, 2933), 'urwid.Text', 'urwid.Text', (['""">"""'], {}), "('>')\n", (2928, 2933), False, 'import urwid\n'), ((2950, 3118), 'urwid.Columns', 'urwid.Columns', (['[(1, leftArrow), (centerBase - start - txtLen / 2 - 1, filler), (txtLen,\n scaleEl), (end - centerBase - txtLen / 2 - 1, filler), (1, rightArrow)]'], {}), '([(1, leftArrow), (centerBase - start - txtLen / 2 - 1, filler\n ), (txtLen, scaleEl), (end - centerBase - txtLen / 2 - 1, filler), (1,\n rightArrow)])\n', (2963, 3118), False, 'import urwid\n'), ((3457, 3475), 'urwid.Text', 'urwid.Text', (['locTxt'], {}), '(locTxt)\n', (3467, 3475), False, 'import urwid\n'), ((3493, 3511), 'urwid.Divider', 'urwid.Divider', (['""" """'], {}), "(' ')\n", (3506, 3511), False, 'import urwid\n'), ((3527, 3587), 'urwid.Columns', 'urwid.Columns', (['[(centerBase - start, filler), locEl, filler]'], {}), '([(centerBase - start, filler), locEl, filler])\n', (3540, 3587), False, 'import urwid\n'), ((4426, 4449), 'urwid.Columns', 'urwid.Columns', (['baseCols'], {}), '(baseCols)\n', (4439, 4449), False, 'import urwid\n'), ((2154, 2191), 'urwid.AttrMap', 'urwid.AttrMap', (['singlePosEl', '"""variant"""'], {}), "(singlePosEl, 'variant')\n", (2167, 2191), False, 'import urwid\n')] |
import requests
import logging
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
class DummyAdaptor(HTTPAdapter):
def send(self, request, stream=False, timeout=None, verify=True,
cert=None, proxies=None):
logging.debug(request.url)
return super(DummyAdaptor, self).send(request, stream=stream, timeout=timeout, verify=verify,
cert=cert, proxies=proxies)
# wrapp base url and credentials to monkey-patched session object
class SessionWithUrlBase(requests.Session):
def __init__(self, url_base, login, pwd, *args, **kwargs):
super(SessionWithUrlBase, self).__init__(*args, **kwargs)
self.url_base = url_base
self.auth = (login, pwd)
def request(self, method, url, **kwargs):
modified_url = self.url_base + url
return super(SessionWithUrlBase, self).request(method, modified_url, **kwargs)
def get_adapter(self, url):
return DummyAdaptor()
def create_session(settings_api_url, settings_login, settings_pwd):
requests.Session = SessionWithUrlBase
session = requests.Session(settings_api_url, settings_login, settings_pwd)
# https://www.peterbe.com/plog/best-practice-with-retries-with-requests
retry = Retry(
total=100,
read=100,
connect=100,
backoff_factor=0.3,
status_forcelist=(500, 502, 504),
)
adapter = HTTPAdapter(max_retries=retry)
session.mount('http://', adapter)
session.mount('https://', adapter)
return session
| [
"requests.packages.urllib3.util.retry.Retry",
"logging.debug",
"requests.adapters.HTTPAdapter",
"requests.Session"
] | [((1155, 1219), 'requests.Session', 'requests.Session', (['settings_api_url', 'settings_login', 'settings_pwd'], {}), '(settings_api_url, settings_login, settings_pwd)\n', (1171, 1219), False, 'import requests\n'), ((1308, 1405), 'requests.packages.urllib3.util.retry.Retry', 'Retry', ([], {'total': '(100)', 'read': '(100)', 'connect': '(100)', 'backoff_factor': '(0.3)', 'status_forcelist': '(500, 502, 504)'}), '(total=100, read=100, connect=100, backoff_factor=0.3,\n status_forcelist=(500, 502, 504))\n', (1313, 1405), False, 'from requests.packages.urllib3.util.retry import Retry\n'), ((1463, 1493), 'requests.adapters.HTTPAdapter', 'HTTPAdapter', ([], {'max_retries': 'retry'}), '(max_retries=retry)\n', (1474, 1493), False, 'from requests.adapters import HTTPAdapter\n'), ((279, 305), 'logging.debug', 'logging.debug', (['request.url'], {}), '(request.url)\n', (292, 305), False, 'import logging\n')] |
from xml.dom import minidom
from xml.etree.ElementTree import tostring
import os
from flask import Flask, render_template, request, jsonify, make_response, \
send_from_directory
from xml_generator import generate_sitemap
app = Flask(__name__)
@app.route('/favicon.ico')
def favicon():
return send_from_directory(
os.path.join(app.root_path, 'static'),
'favicon.ico',
mimetype='image/vnd.microsoft.icon'
)
@app.route('/')
def landing_page():
return render_template('index.html')
def adapter(value):
if isinstance(value, str):
value = value.split('\n')
return value
@app.route('/api', methods=['POST'])
def api():
try:
parameters = request.json
url = parameters['url']
end_points_list = parameters['endPoints']
prettify = parameters.get('prettify', False)
languages = parameters.get('languages', [])
end_points_list = adapter(end_points_list)
languages = adapter(languages)
root = generate_sitemap(url, end_points_list, languages)
xml = tostring(root, encoding='unicode')
if prettify:
xml = minidom.parseString(xml).toprettyxml(
indent=" "
)
res = make_response(jsonify({'xml': xml}), 200)
return res
except Exception as e:
print(e)
return make_response('Bad Request', 400)
if __name__ == '__main__':
app.run(debug=True)
| [
"flask.render_template",
"flask.Flask",
"xml.etree.ElementTree.tostring",
"os.path.join",
"xml.dom.minidom.parseString",
"flask.make_response",
"xml_generator.generate_sitemap",
"flask.jsonify"
] | [((234, 249), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (239, 249), False, 'from flask import Flask, render_template, request, jsonify, make_response, send_from_directory\n'), ((495, 524), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (510, 524), False, 'from flask import Flask, render_template, request, jsonify, make_response, send_from_directory\n'), ((334, 371), 'os.path.join', 'os.path.join', (['app.root_path', '"""static"""'], {}), "(app.root_path, 'static')\n", (346, 371), False, 'import os\n'), ((1016, 1065), 'xml_generator.generate_sitemap', 'generate_sitemap', (['url', 'end_points_list', 'languages'], {}), '(url, end_points_list, languages)\n', (1032, 1065), False, 'from xml_generator import generate_sitemap\n'), ((1080, 1114), 'xml.etree.ElementTree.tostring', 'tostring', (['root'], {'encoding': '"""unicode"""'}), "(root, encoding='unicode')\n", (1088, 1114), False, 'from xml.etree.ElementTree import tostring\n'), ((1263, 1284), 'flask.jsonify', 'jsonify', (["{'xml': xml}"], {}), "({'xml': xml})\n", (1270, 1284), False, 'from flask import Flask, render_template, request, jsonify, make_response, send_from_directory\n'), ((1369, 1402), 'flask.make_response', 'make_response', (['"""Bad Request"""', '(400)'], {}), "('Bad Request', 400)\n", (1382, 1402), False, 'from flask import Flask, render_template, request, jsonify, make_response, send_from_directory\n'), ((1154, 1178), 'xml.dom.minidom.parseString', 'minidom.parseString', (['xml'], {}), '(xml)\n', (1173, 1178), False, 'from xml.dom import minidom\n')] |
import argparse
import numpy as np
import cv2
import os
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.utils.data import DataLoader
from model import FFDNet
import utils
def read_image(image_path, is_gray):
"""
:return: Normalized Image (C * W * H)
"""
if is_gray:
image = cv2.imread(image_path, cv2.IMREAD_GRAYSCALE)
image = np.expand_dims(image.T, 0) # 1 * W * H
else:
image = cv2.imread(image_path)
image = (cv2.cvtColor(image, cv2.COLOR_BGR2RGB)).transpose(2, 1, 0) # 3 * W * H
return utils.normalize(image)
def load_images(is_train, is_gray, base_path):
"""
:param base_path: ./train_data/
:return: List[Patches] (C * W * H)
"""
if is_gray:
train_dir = 'gray/train/'
val_dir = 'gray/val/'
else:
train_dir = 'rgb/train/'
val_dir = 'rgb/val/'
image_dir = base_path.replace('\'', '').replace('"', '') + (train_dir if is_train else val_dir)
print('> Loading images in ' + image_dir)
images = []
for fn in next(os.walk(image_dir))[2]:
image = read_image(image_dir + fn, is_gray)
images.append(image)
return images
def images_to_patches(images, patch_size):
"""
:param images: List[Image (C * W * H)]
:param patch_size: int
:return: (n * C * W * H)
"""
patches_list = []
for image in images:
patches = utils.image_to_patches(image, patch_size=patch_size)
if len(patches) != 0:
patches_list.append(patches)
del images
return np.vstack(patches_list)
def train(args):
print('> Loading dataset...')
# Images
train_dataset = load_images(is_train=True, is_gray=args.is_gray, base_path=args.train_path)
val_dataset = load_images(is_train=False, is_gray=args.is_gray, base_path=args.train_path)
print(f'\tTrain image datasets: {len(train_dataset)}')
print(f'\tVal image datasets: {len(val_dataset)}')
# Patches
train_dataset = images_to_patches(train_dataset, patch_size=args.patch_size)
val_dataset = images_to_patches(val_dataset, patch_size=args.patch_size)
print(f'\tTrain patch datasets: {train_dataset.shape}')
print(f'\tVal patch datasets: {val_dataset.shape}')
# DataLoader
train_dataloader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, num_workers=6)
val_dataloader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False, num_workers=6)
print(f'\tTrain batch number: {len(train_dataloader)}')
print(f'\tVal batch number: {len(val_dataloader)}')
# Noise list
train_noises = args.train_noise_interval # [0, 75, 15]
val_noises = args.val_noise_interval # [0, 60, 30]
train_noises = list(range(train_noises[0], train_noises[1], train_noises[2]))
val_noises = list(range(val_noises[0], val_noises[1], val_noises[2]))
print(f'\tTrain noise internal: {train_noises}')
print(f'\tVal noise internal: {val_noises}')
print('\n')
# Model & Optim
model = FFDNet(is_gray=args.is_gray)
model.apply(utils.weights_init_kaiming)
if args.cuda:
model = model.cuda()
loss_fn = nn.MSELoss(reduction='sum')
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
print('> Start training...')
for epoch_idx in range(args.epoches):
# Train
loss_idx = 0
train_losses = 0
model.train()
start_time = time.time()
for batch_idx, batch_data in enumerate(train_dataloader):
# According to internal, add noise
for int_noise_sigma in train_noises:
noise_sigma = int_noise_sigma / 255
new_images = utils.add_batch_noise(batch_data, noise_sigma)
noise_sigma = torch.FloatTensor(np.array([noise_sigma for idx in range(new_images.shape[0])]))
new_images = Variable(new_images)
noise_sigma = Variable(noise_sigma)
if args.cuda:
new_images = new_images.cuda()
noise_sigma = noise_sigma.cuda()
# Predict
images_pred = model(new_images, noise_sigma)
train_loss = loss_fn(images_pred, batch_data.to(images_pred.device))
train_losses += train_loss
loss_idx += 1
optimizer.zero_grad()
train_loss.backward()
optimizer.step()
# Log Progress
stop_time = time.time()
all_num = len(train_dataloader) * len(train_noises)
done_num = batch_idx * len(train_noises) + train_noises.index(int_noise_sigma) + 1
rest_time = int((stop_time - start_time) / done_num * (all_num - done_num))
percent = int(done_num / all_num * 100)
print(f'\rEpoch: {epoch_idx + 1} / {args.epoches}, ' +
f'Batch: {batch_idx + 1} / {len(train_dataloader)}, ' +
f'Noise_Sigma: {int_noise_sigma} / {train_noises[-1]}, ' +
f'Train_Loss: {train_loss}, ' +
f'=> {rest_time}s, {percent}%', end='')
train_losses /= loss_idx
print(f', Avg_Train_Loss: {train_losses}, All: {int(stop_time - start_time)}s')
# Evaluate
loss_idx = 0
val_losses = 0
if (epoch_idx + 1) % args.val_epoch != 0:
continue
model.eval()
start_time = time.time()
for batch_idx, batch_data in enumerate(val_dataloader):
# According to internal, add noise
for int_noise_sigma in val_noises:
noise_sigma = int_noise_sigma / 255
new_images = utils.add_batch_noise(batch_data, noise_sigma)
noise_sigma = torch.FloatTensor(np.array([noise_sigma for idx in range(new_images.shape[0])]))
new_images = Variable(new_images)
noise_sigma = Variable(noise_sigma)
if args.cuda:
new_images = new_images.cuda()
noise_sigma = noise_sigma.cuda()
# Predict
images_pred = model(new_images, noise_sigma)
val_loss = loss_fn(images_pred, batch_data.to(images_pred.device))
val_losses += val_loss
loss_idx += 1
# Log Progress
stop_time = time.time()
all_num = len(val_dataloader) * len(val_noises)
done_num = batch_idx * len(val_noises) + val_noises.index(int_noise_sigma) + 1
rest_time = int((stop_time - start_time) / done_num * (all_num - done_num))
percent = int(done_num / all_num * 100)
print(f'\rEpoch: {epoch_idx + 1} / {args.epoches}, ' +
f'Batch: {batch_idx + 1} / {len(val_dataloader)}, ' +
f'Noise_Sigma: {int_noise_sigma} / {val_noises[-1]}, ' +
f'Val_Loss: {val_loss}, ' +
f'=> {rest_time}s, {percent}%', end='')
val_losses /= loss_idx
print(f', Avg_Val_Loss: {val_losses}, All: {int(stop_time - start_time)}s')
# Save Checkpoint
if (epoch_idx + 1) % args.save_checkpoints == 0:
model_path = args.model_path + ('net_gray_checkpoint.pth' if args.is_gray else 'net_rgb_checkpoint.pth')
torch.save(model.state_dict(), model_path)
print(f'| Saved Checkpoint at Epoch {epoch_idx + 1} to {model_path}')
# Final Save Model Dict
model.eval()
model_path = args.model_path + ('net_gray.pth' if args.is_gray else 'net_rgb.pth')
torch.save(model.state_dict(), model_path)
print(f'Saved State Dict in {model_path}')
print('\n')
def test(args):
# Image
image = cv2.imread(args.test_path)
if image is None:
raise Exception(f'File {args.test_path} not found or error')
is_gray = utils.is_image_gray(image)
image = read_image(args.test_path, is_gray)
print("{} image shape: {}".format("Gray" if is_gray else "RGB", image.shape))
# Expand odd shape to even
expend_W = False
expend_H = False
if image.shape[1] % 2 != 0:
expend_W = True
image = np.concatenate((image, image[:, -1, :][:, np.newaxis, :]), axis=1)
if image.shape[2] % 2 != 0:
expend_H = True
image = np.concatenate((image, image[:, :, -1][:, :, np.newaxis]), axis=2)
# Noise
image = torch.FloatTensor([image]) # 1 * C(1 / 3) * W * H
if args.add_noise:
image = utils.add_batch_noise(image, args.noise_sigma)
noise_sigma = torch.FloatTensor([args.noise_sigma])
# Model & GPU
model = FFDNet(is_gray=is_gray)
if args.cuda:
image = image.cuda()
noise_sigma = noise_sigma.cuda()
model = model.cuda()
# Dict
model_path = args.model_path + ('net_gray.pth' if is_gray else 'net_rgb.pth')
print(f"> Loading model param in {model_path}...")
state_dict = torch.load(model_path)
model.load_state_dict(state_dict)
model.eval()
print('\n')
# Test
with torch.no_grad():
start_time = time.time()
image_pred = model(image, noise_sigma)
stop_time = time.time()
print("Test time: {0:.4f}s".format(stop_time - start_time))
# PSNR
psnr = utils.batch_psnr(img=image_pred, imclean=image, data_range=1)
print("PSNR denoised {0:.2f}dB".format(psnr))
# UnExpand odd
if expend_W:
image_pred = image_pred[:, :, :-1, :]
if expend_H:
image_pred = image_pred[:, :, :, :-1]
# Save
cv2.imwrite("ffdnet.png", utils.variable_to_cv2_image(image_pred))
if args.add_noise:
cv2.imwrite("noisy.png", utils.variable_to_cv2_image(image))
def main():
parser = argparse.ArgumentParser()
# Train
parser.add_argument("--train_path", type=str, default='./train_data/', help='Train dataset dir.')
parser.add_argument("--is_gray", action='store_true', help='Train gray/rgb model.')
parser.add_argument("--patch_size", type=int, default=32, help='Uniform size of training images patches.')
parser.add_argument("--train_noise_interval", nargs=3, type=int, default=[0, 75, 15], help='Train dataset noise sigma set interval.')
parser.add_argument("--val_noise_interval", nargs=3, type=int, default=[0, 60, 30], help='Validation dataset noise sigma set interval.')
parser.add_argument("--batch_size", type=int, default=256, help='Batch size for training.')
parser.add_argument("--epoches", type=int, default=80, help='Total number of training epoches.')
parser.add_argument("--val_epoch", type=int, default=5, help='Total number of validation epoches.')
parser.add_argument("--learning_rate", type=float, default=1e-3, help='The initial learning rate for Adam.')
parser.add_argument("--save_checkpoints", type=int, default=5, help='Save checkpoint every epoch.')
# Test
parser.add_argument("--test_path", type=str, default='./test_data/color.png', help='Test image path.')
parser.add_argument("--noise_sigma", type=float, default=25, help='Input uniform noise sigma for test.')
parser.add_argument('--add_noise', action='store_true', help='Add noise_sigma to input or not.')
# Global
parser.add_argument("--model_path", type=str, default='./models/', help='Model loading and saving path.')
parser.add_argument("--use_gpu", action='store_true', help='Train and test using GPU.')
parser.add_argument("--is_train", action='store_true', help='Do train.')
parser.add_argument("--is_test", action='store_true', help='Do test.')
args = parser.parse_args()
assert (args.is_train or args.is_test), 'is_train 和 is_test 至少有一个为 True'
args.cuda = args.use_gpu and torch.cuda.is_available()
print("> Parameters: ")
for k, v in zip(args.__dict__.keys(), args.__dict__.values()):
print(f'\t{k}: {v}')
print('\n')
# Normalize noise level
args.noise_sigma /= 255
args.train_noise_interval[1] += 1
args.val_noise_interval[1] += 1
if args.is_train:
train(args)
if args.is_test:
test(args)
if __name__ == "__main__":
main()
| [
"torch.nn.MSELoss",
"torch.cuda.is_available",
"os.walk",
"utils.variable_to_cv2_image",
"argparse.ArgumentParser",
"numpy.vstack",
"numpy.concatenate",
"model.FFDNet",
"torch.autograd.Variable",
"cv2.cvtColor",
"utils.add_batch_noise",
"time.time",
"cv2.imread",
"utils.normalize",
"utils.image_to_patches",
"torch.load",
"utils.is_image_gray",
"torch.utils.data.DataLoader",
"numpy.expand_dims",
"torch.no_grad",
"torch.FloatTensor",
"utils.batch_psnr"
] | [((660, 682), 'utils.normalize', 'utils.normalize', (['image'], {}), '(image)\n', (675, 682), False, 'import utils\n'), ((1657, 1680), 'numpy.vstack', 'np.vstack', (['patches_list'], {}), '(patches_list)\n', (1666, 1680), True, 'import numpy as np\n'), ((2381, 2467), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(True)', 'num_workers': '(6)'}), '(train_dataset, batch_size=args.batch_size, shuffle=True,\n num_workers=6)\n', (2391, 2467), False, 'from torch.utils.data import DataLoader\n'), ((2485, 2570), 'torch.utils.data.DataLoader', 'DataLoader', (['val_dataset'], {'batch_size': 'args.batch_size', 'shuffle': '(False)', 'num_workers': '(6)'}), '(val_dataset, batch_size=args.batch_size, shuffle=False,\n num_workers=6)\n', (2495, 2570), False, 'from torch.utils.data import DataLoader\n'), ((3122, 3150), 'model.FFDNet', 'FFDNet', ([], {'is_gray': 'args.is_gray'}), '(is_gray=args.is_gray)\n', (3128, 3150), False, 'from model import FFDNet\n'), ((3256, 3283), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (3266, 3283), True, 'import torch.nn as nn\n'), ((7979, 8005), 'cv2.imread', 'cv2.imread', (['args.test_path'], {}), '(args.test_path)\n', (7989, 8005), False, 'import cv2\n'), ((8111, 8137), 'utils.is_image_gray', 'utils.is_image_gray', (['image'], {}), '(image)\n', (8130, 8137), False, 'import utils\n'), ((8649, 8675), 'torch.FloatTensor', 'torch.FloatTensor', (['[image]'], {}), '([image])\n', (8666, 8675), False, 'import torch\n'), ((8803, 8840), 'torch.FloatTensor', 'torch.FloatTensor', (['[args.noise_sigma]'], {}), '([args.noise_sigma])\n', (8820, 8840), False, 'import torch\n'), ((8872, 8895), 'model.FFDNet', 'FFDNet', ([], {'is_gray': 'is_gray'}), '(is_gray=is_gray)\n', (8878, 8895), False, 'from model import FFDNet\n'), ((9179, 9201), 'torch.load', 'torch.load', (['model_path'], {}), '(model_path)\n', (9189, 9201), False, 'import torch\n'), ((9518, 9579), 'utils.batch_psnr', 'utils.batch_psnr', ([], {'img': 'image_pred', 'imclean': 'image', 'data_range': '(1)'}), '(img=image_pred, imclean=image, data_range=1)\n', (9534, 9579), False, 'import utils\n'), ((9977, 10002), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10000, 10002), False, 'import argparse\n'), ((407, 451), 'cv2.imread', 'cv2.imread', (['image_path', 'cv2.IMREAD_GRAYSCALE'], {}), '(image_path, cv2.IMREAD_GRAYSCALE)\n', (417, 451), False, 'import cv2\n'), ((468, 494), 'numpy.expand_dims', 'np.expand_dims', (['image.T', '(0)'], {}), '(image.T, 0)\n', (482, 494), True, 'import numpy as np\n'), ((533, 555), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (543, 555), False, 'import cv2\n'), ((1507, 1559), 'utils.image_to_patches', 'utils.image_to_patches', (['image'], {'patch_size': 'patch_size'}), '(image, patch_size=patch_size)\n', (1529, 1559), False, 'import utils\n'), ((3536, 3547), 'time.time', 'time.time', ([], {}), '()\n', (3545, 3547), False, 'import time\n'), ((5590, 5601), 'time.time', 'time.time', ([], {}), '()\n', (5599, 5601), False, 'import time\n'), ((8414, 8480), 'numpy.concatenate', 'np.concatenate', (['(image, image[:, -1, :][:, np.newaxis, :])'], {'axis': '(1)'}), '((image, image[:, -1, :][:, np.newaxis, :]), axis=1)\n', (8428, 8480), True, 'import numpy as np\n'), ((8553, 8619), 'numpy.concatenate', 'np.concatenate', (['(image, image[:, :, -1][:, :, np.newaxis])'], {'axis': '(2)'}), '((image, image[:, :, -1][:, :, np.newaxis]), axis=2)\n', (8567, 8619), True, 'import numpy as np\n'), ((8738, 8784), 'utils.add_batch_noise', 'utils.add_batch_noise', (['image', 'args.noise_sigma'], {}), '(image, args.noise_sigma)\n', (8759, 8784), False, 'import utils\n'), ((9298, 9313), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (9311, 9313), False, 'import torch\n'), ((9336, 9347), 'time.time', 'time.time', ([], {}), '()\n', (9345, 9347), False, 'import time\n'), ((9415, 9426), 'time.time', 'time.time', ([], {}), '()\n', (9424, 9426), False, 'import time\n'), ((9818, 9857), 'utils.variable_to_cv2_image', 'utils.variable_to_cv2_image', (['image_pred'], {}), '(image_pred)\n', (9845, 9857), False, 'import utils\n'), ((12374, 12399), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (12397, 12399), False, 'import torch\n'), ((1160, 1178), 'os.walk', 'os.walk', (['image_dir'], {}), '(image_dir)\n', (1167, 1178), False, 'import os\n'), ((9915, 9949), 'utils.variable_to_cv2_image', 'utils.variable_to_cv2_image', (['image'], {}), '(image)\n', (9942, 9949), False, 'import utils\n'), ((573, 611), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2RGB'], {}), '(image, cv2.COLOR_BGR2RGB)\n', (585, 611), False, 'import cv2\n'), ((3791, 3837), 'utils.add_batch_noise', 'utils.add_batch_noise', (['batch_data', 'noise_sigma'], {}), '(batch_data, noise_sigma)\n', (3812, 3837), False, 'import utils\n'), ((3978, 3998), 'torch.autograd.Variable', 'Variable', (['new_images'], {}), '(new_images)\n', (3986, 3998), False, 'from torch.autograd import Variable\n'), ((4029, 4050), 'torch.autograd.Variable', 'Variable', (['noise_sigma'], {}), '(noise_sigma)\n', (4037, 4050), False, 'from torch.autograd import Variable\n'), ((4601, 4612), 'time.time', 'time.time', ([], {}), '()\n', (4610, 4612), False, 'import time\n'), ((5841, 5887), 'utils.add_batch_noise', 'utils.add_batch_noise', (['batch_data', 'noise_sigma'], {}), '(batch_data, noise_sigma)\n', (5862, 5887), False, 'import utils\n'), ((6028, 6048), 'torch.autograd.Variable', 'Variable', (['new_images'], {}), '(new_images)\n', (6036, 6048), False, 'from torch.autograd import Variable\n'), ((6079, 6100), 'torch.autograd.Variable', 'Variable', (['noise_sigma'], {}), '(noise_sigma)\n', (6087, 6100), False, 'from torch.autograd import Variable\n'), ((6567, 6578), 'time.time', 'time.time', ([], {}), '()\n', (6576, 6578), False, 'import time\n')] |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
sphinxcontrib.cmtinc
~~~~~~~~~~~~~~~~~~~~~~~
Extract comments from source files.
See the README file for details.
:author: <NAME>. <<EMAIL>>
:license: MIT, see LICENSE for details
"""
import sys
import os.path
import re
import time
from docutils import io, nodes, statemachine, utils
from docutils.utils.error_reporting import SafeString, ErrorString
from docutils.utils.error_reporting import locale_encoding
from docutils.parsers.rst import Directive, convert_directive_function
from docutils.parsers.rst import directives, roles, states
from docutils.parsers.rst.directives.body import CodeBlock, NumberLines
from docutils.parsers.rst.roles import set_classes
from docutils.transforms import misc
from docutils.statemachine import ViewList
#from sphinx.util import logging
#logger = logging.getLogger(__name__)
COMMENT_STYLES = {
'C-style': {
'multiline': re.compile("^\s*\/\*\*.*$"),
'multiline_end': re.compile("(.*)\*\/\ *$"),
'whitespace_content': re.compile("^\s*(?:\*|#|(?:\/\/))?(\s*.*)$"),
},
'hash': {
'multiline': re.compile("^\s*(#:).*$"),
'multiline_end': re.compile("^\s*(#\.).*$"),
'whitespace_content': re.compile("^\s*(?:# ?)?(\s*.*)$"),
},
}
class IncludeComments(Directive):
"""
Include content read from a separate source file.
Content may be parsed by the parser, or included as a literal
block. The encoding of the included file can be specified. Only
a part of the given file argument may be included by specifying
start and end line or text to match before and/or after the text
to be used.
based on the Include Directives at http://svn.code.sf.net/p/docutils/code/trunk/docutils/docutils/parsers/rst/directives/misc.py
and https://github.com/sphinx-doc/sphinx/blob/master/sphinx/directives/other.py
"""
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = True
option_spec = {'style': str,
'literal': directives.flag,
'code': directives.unchanged,
'encoding': directives.encoding,
'tab-width': int,
'start-line': int,
'end-line': int,
'start-after': directives.unchanged_required,
'end-before': directives.unchanged_required,
# ignored except for 'literal' or 'code':
'number-lines': directives.unchanged, # integer or None
'class': directives.class_option,
'name': directives.unchanged}
standard_include_path = os.path.join(os.path.dirname(states.__file__),
'include')
def filterText(self, rawtext):
includeLine = 0
filterdText = ViewList("",'comment')
identationstack = []
keepwhitespaces = False
codeindentfactor = []
codeparamarker = False # Marker for the \codepara tag
for line in rawtext.split('\n'):
ignoreLine = False;
m = self.comment_options['multiline'].match(line)
if(m):
includeLine +=1
ignoreLine = True;
if (includeLine > 0):
if("\\toggle_keepwhitespaces" in line):
ignoreLine = True
keepwhitespaces = not keepwhitespaces
match_code_tag = re.search(r'(?P<whitespace>\s*)\\(?P<tag>(code|codepara|multicomment)\b)', line)
if match_code_tag:
includeLine +=1
ignoreLine = True;
leading_whitespace = len(match_code_tag.group('whitespace'))
identationstack.append(leading_whitespace)
# When we match \codepara, that includes lines until the next blank, with no
# matching marker needed.
if match_code_tag.group('tag') == 'codepara':
codeparamarker = True
if (any(tag in line for tag in
["\endcode", "\end_multicomment"])):
includeLine -=1
identationstack.pop()
ignoreLine = True;
m = self.comment_options['multiline_end'].match(line)
if(m and includeLine > 0):
filterdText.append('\n','comment')
includeLine -=1
ignoreLine = True;
if (not ignoreLine and includeLine > 0):
indent = sum(identationstack)
if (indent <= 0 and not keepwhitespaces):
linecontent = self.comment_options['whitespace_content'].match(line).group(1)
filterdText.append('%s\n' % (linecontent),'comment')
else:
filterdText.append('%s%s\n' % ((' ' * indent), line),'comment')
# If codepara mode is on, then we turn it off when we hit blank line content.
if codeparamarker and len(line.strip()) == 0:
codeparamarker = False
identationstack.pop()
includeLine -= 1
#else:
#filterdText.append( 'D%d %s%s\n' % (includeLine, identation, line),'comment')
if (includeLine != 0):
raise self.severe("Comments may not be closed correctly! Open comments: %d" % (includeLine))
return ''.join(filterdText)
def run(self):
"""Include a file as part of the content of this reST file."""
# from sphynx Include Directive in https://github.com/sphinx-doc/sphinx/blob/master/sphinx/directives/other.py
# type: () -> List[nodes.Node]
env = self.state.document.settings.env
if self.arguments[0].startswith('<') and \
self.arguments[0].endswith('>'):
# docutils "standard" includes, do not do path processing
return BaseInclude.run(self)
rel_filename, filename = env.relfn2path(self.arguments[0])
self.arguments[0] = filename
env.note_included(filename)
#end
if not self.state.document.settings.file_insertion_enabled:
raise self.warning('"%s" directive disabled.' % self.name)
source = self.state_machine.input_lines.source(
self.lineno - self.state_machine.input_offset - 1)
source_dir = os.path.dirname(os.path.abspath(source))
path = directives.path(self.arguments[0])
if path.startswith('<') and path.endswith('>'):
path = os.path.join(self.standard_include_path, path[1:-1])
path = os.path.normpath(os.path.join(source_dir, path))
path = utils.relative_path(None, path)
path = nodes.reprunicode(path)
encoding = self.options.get(
'encoding', self.state.document.settings.input_encoding)
e_handler=self.state.document.settings.input_encoding_error_handler
tab_width = self.options.get(
'tab-width', self.state.document.settings.tab_width)
try:
self.state.document.settings.record_dependencies.add(path)
include_file = io.FileInput(source_path=path,
encoding=encoding,
error_handler=e_handler)
except UnicodeEncodeError as error:
raise self.severe(u'Problems with "%s" directive path:\n'
'Cannot encode input file path "%s" '
'(wrong locale?).' %
(self.name, SafeString(path)))
except IOError as error:
raise self.severe(u'Problems with "%s" directive path:\n%s.' %
(self.name, ErrorString(error)))
startline = self.options.get('start-line', None)
endline = self.options.get('end-line', None)
try:
if startline or (endline is not None):
lines = include_file.readlines()
rawtext = ''.join(lines[startline:endline])
else:
rawtext = include_file.read()
except UnicodeError as error:
raise self.severe(u'Problem with "%s" directive:\n%s' %
(self.name, ErrorString(error)))
# start-after/end-before: no restrictions on newlines in match-text,
# and no restrictions on matching inside lines vs. line boundaries
after_text = self.options.get('start-after', None)
if after_text:
# skip content in rawtext before *and incl.* a matching text
after_index = rawtext.find(after_text)
if after_index < 0:
raise self.severe('Problem with "start-after" option of "%s" '
'directive:\nText not found.' % self.name)
rawtext = rawtext[after_index + len(after_text):]
before_text = self.options.get('end-before', None)
if before_text:
# skip content in rawtext after *and incl.* a matching text
before_index = rawtext.find(before_text)
if before_index < 0:
raise self.severe('Problem with "end-before" option of "%s" '
'directive:\nText not found.' % self.name)
rawtext = rawtext[:before_index]
# Handle alternate comment styles
style = self.options.get('style', 'C-style')
if style not in COMMENT_STYLES:
raise self.severe('Cannot find comment style "%s", not in %s'
% (style, COMMENT_STYLES.keys()))
self.comment_options = COMMENT_STYLES[style]
rawtext = self.filterText(rawtext)
#if (path == "../examples/neuropil_hydra.c"):
#raise self.severe('filterd text from %s:\n%s' % (path, rawtext))
include_lines = statemachine.string2lines(rawtext, tab_width,
convert_whitespace=True)
if 'literal' in self.options:
# Convert tabs to spaces, if `tab_width` is positive.
if tab_width >= 0:
text = rawtext.expandtabs(tab_width)
else:
text = rawtext
literal_block = nodes.literal_block(rawtext, source=path,
classes=self.options.get('class', []))
literal_block.line = 1
self.add_name(literal_block)
if 'number-lines' in self.options:
try:
startline = int(self.options['number-lines'] or 1)
except ValueError:
raise self.error(':number-lines: with non-integer '
'start value')
endline = startline + len(include_lines)
if text.endswith('\n'):
text = text[:-1]
tokens = NumberLines([([], text)], startline, endline)
for classes, value in tokens:
if classes:
literal_block += nodes.inline(value, value,
classes=classes)
else:
literal_block += nodes.Text(value, value)
else:
literal_block += nodes.Text(text, text)
return [literal_block]
if 'code' in self.options:
self.options['source'] = path
codeblock = CodeBlock(self.name,
[self.options.pop('code')], # arguments
self.options,
include_lines, # content
self.lineno,
self.content_offset,
self.block_text,
self.state,
self.state_machine)
return codeblock.run()
self.state_machine.insert_input(include_lines, path)
return []
def setup(app):
app.add_directive('include-comment', IncludeComments)
| [
"re.search",
"docutils.parsers.rst.directives.path",
"docutils.nodes.inline",
"re.compile",
"docutils.utils.error_reporting.ErrorString",
"docutils.utils.relative_path",
"docutils.statemachine.ViewList",
"docutils.nodes.Text",
"docutils.parsers.rst.directives.body.NumberLines",
"docutils.io.FileInput",
"docutils.statemachine.string2lines",
"docutils.utils.error_reporting.SafeString",
"docutils.nodes.reprunicode"
] | [((956, 987), 're.compile', 're.compile', (['"""^\\\\s*\\\\/\\\\*\\\\*.*$"""'], {}), "('^\\\\s*\\\\/\\\\*\\\\*.*$')\n", (966, 987), False, 'import re\n'), ((1014, 1043), 're.compile', 're.compile', (['"""(.*)\\\\*\\\\/\\\\ *$"""'], {}), "('(.*)\\\\*\\\\/\\\\ *$')\n", (1024, 1043), False, 'import re\n'), ((1076, 1125), 're.compile', 're.compile', (['"""^\\\\s*(?:\\\\*|#|(?:\\\\/\\\\/))?(\\\\s*.*)$"""'], {}), "('^\\\\s*(?:\\\\*|#|(?:\\\\/\\\\/))?(\\\\s*.*)$')\n", (1086, 1125), False, 'import re\n'), ((1180, 1206), 're.compile', 're.compile', (['"""^\\\\s*(#:).*$"""'], {}), "('^\\\\s*(#:).*$')\n", (1190, 1206), False, 'import re\n'), ((1236, 1264), 're.compile', 're.compile', (['"""^\\\\s*(#\\\\.).*$"""'], {}), "('^\\\\s*(#\\\\.).*$')\n", (1246, 1264), False, 'import re\n'), ((1298, 1334), 're.compile', 're.compile', (['"""^\\\\s*(?:# ?)?(\\\\s*.*)$"""'], {}), "('^\\\\s*(?:# ?)?(\\\\s*.*)$')\n", (1308, 1334), False, 'import re\n'), ((2936, 2959), 'docutils.statemachine.ViewList', 'ViewList', (['""""""', '"""comment"""'], {}), "('', 'comment')\n", (2944, 2959), False, 'from docutils.statemachine import ViewList\n'), ((6580, 6614), 'docutils.parsers.rst.directives.path', 'directives.path', (['self.arguments[0]'], {}), '(self.arguments[0])\n', (6595, 6614), False, 'from docutils.parsers.rst import directives, roles, states\n'), ((6822, 6853), 'docutils.utils.relative_path', 'utils.relative_path', (['None', 'path'], {}), '(None, path)\n', (6841, 6853), False, 'from docutils import io, nodes, statemachine, utils\n'), ((6869, 6892), 'docutils.nodes.reprunicode', 'nodes.reprunicode', (['path'], {}), '(path)\n', (6886, 6892), False, 'from docutils import io, nodes, statemachine, utils\n'), ((9994, 10064), 'docutils.statemachine.string2lines', 'statemachine.string2lines', (['rawtext', 'tab_width'], {'convert_whitespace': '(True)'}), '(rawtext, tab_width, convert_whitespace=True)\n', (10019, 10064), False, 'from docutils import io, nodes, statemachine, utils\n'), ((7289, 7363), 'docutils.io.FileInput', 'io.FileInput', ([], {'source_path': 'path', 'encoding': 'encoding', 'error_handler': 'e_handler'}), '(source_path=path, encoding=encoding, error_handler=e_handler)\n', (7301, 7363), False, 'from docutils import io, nodes, statemachine, utils\n'), ((3555, 3642), 're.search', 're.search', (['"""(?P<whitespace>\\\\s*)\\\\\\\\(?P<tag>(code|codepara|multicomment)\\\\b)"""', 'line'], {}), "('(?P<whitespace>\\\\s*)\\\\\\\\(?P<tag>(code|codepara|multicomment)\\\\b)',\n line)\n", (3564, 3642), False, 'import re\n'), ((11030, 11075), 'docutils.parsers.rst.directives.body.NumberLines', 'NumberLines', (['[([], text)]', 'startline', 'endline'], {}), '([([], text)], startline, endline)\n', (11041, 11075), False, 'from docutils.parsers.rst.directives.body import CodeBlock, NumberLines\n'), ((11436, 11458), 'docutils.nodes.Text', 'nodes.Text', (['text', 'text'], {}), '(text, text)\n', (11446, 11458), False, 'from docutils import io, nodes, statemachine, utils\n'), ((11195, 11238), 'docutils.nodes.inline', 'nodes.inline', (['value', 'value'], {'classes': 'classes'}), '(value, value, classes=classes)\n', (11207, 11238), False, 'from docutils import io, nodes, statemachine, utils\n'), ((11360, 11384), 'docutils.nodes.Text', 'nodes.Text', (['value', 'value'], {}), '(value, value)\n', (11370, 11384), False, 'from docutils import io, nodes, statemachine, utils\n'), ((7719, 7735), 'docutils.utils.error_reporting.SafeString', 'SafeString', (['path'], {}), '(path)\n', (7729, 7735), False, 'from docutils.utils.error_reporting import SafeString, ErrorString\n'), ((7880, 7898), 'docutils.utils.error_reporting.ErrorString', 'ErrorString', (['error'], {}), '(error)\n', (7891, 7898), False, 'from docutils.utils.error_reporting import SafeString, ErrorString\n'), ((8396, 8414), 'docutils.utils.error_reporting.ErrorString', 'ErrorString', (['error'], {}), '(error)\n', (8407, 8414), False, 'from docutils.utils.error_reporting import SafeString, ErrorString\n')] |
"""
This game is contributed by <NAME> as part of team 'hava'
for Python Week's 48 hour hackathon
"""
# Importing necessary modules
import pyglet
import random
def start_asteroid_deflector():
# Adding images to path
pyglet.resource.path = ["resources"]
pyglet.resource.reindex()
class AsteroidsWindow(pyglet.window.Window):
# Initilizing game window
def __init__(self):
super(AsteroidsWindow, self).__init__()
self.keys = pyglet.window.key.KeyStateHandler()
self.push_handlers(self.keys)
# Setting game name caption
self.set_caption("Asteroid Deflector")
self.ship_image = pyglet.resource.image("alienblaster.png")
self.asteroid_image = pyglet.resource.image("asteroid.png")
self.center_image(self.ship_image)
self.center_image(self.asteroid_image)
self.ship = pyglet.sprite.Sprite(img=self.ship_image, x=30, y=30)
self.ship.scale = 0.3
self.ship.rotation = 180
self.score_label = pyglet.text.Label(
text="Score:0 Highscore:0", x=10, y=10)
self.score = 0
self.highscore = 0
self.asteroids = []
self.stars = []
pyglet.clock.schedule_interval(self.game_tick, 0.005)
# Method to update all game elements
def game_tick(self, dt):
self.update_stars()
self.update_asteroids()
self.update_ship()
self.update_score()
self.draw_elements()
# Method to draw elements
def draw_elements(self):
self.clear()
for star in self.stars:
star.draw()
for asteroid in self.asteroids:
asteroid.draw()
self.ship.draw()
self.score_label.draw()
# Method to update stars
def update_stars(self):
if self.score % 8 == 0:
self.stars.append(pyglet.text.Label(
text="*", x=random.randint(0, 800), y=600))
for star in self.stars:
star.y -= 20
if star.y < 0:
self.stars.remove(star)
# Method to update asteroids
def update_asteroids(self):
if random.randint(0, 45) == 3:
ast = pyglet.sprite.Sprite(
img=self.asteroid_image, x=random.randint(0, 800), y=600)
ast.scale = 0.3
self.asteroids.append(ast)
for asteroid in self.asteroids:
asteroid.y -= 7
if asteroid.y < 0:
self.asteroids.remove(asteroid)
for asteroid in self.asteroids:
if self.sprites_collide(asteroid, self.ship):
self.asteroids.remove(asteroid)
self.score = 0
# Method to update ship
def update_ship(self):
if self.keys[pyglet.window.key.LEFT] and not self.ship.x < 0:
self.ship.x -= 4
elif self.keys[pyglet.window.key.RIGHT] and not self.ship.x > 625:
self.ship.x += 4
# Method to update score
def update_score(self):
self.score += 1
if self.score > self.highscore:
self.highscore = self.score
self.score_label.text = "Score: {} Highscore: {}".format(
self.score, self.highscore)
def center_image(self, image):
image.anchor_x = image.width/2
image.anchor_y = image.height/2
# Method to check collisions
def sprites_collide(self, spr1, spr2):
return (spr1.x-spr2.x)**2 + (spr1.y-spr2.y)**2 < (spr1.width/2 + spr2.width/2)**2
# Starting game application
game_window = AsteroidsWindow()
pyglet.app.run()
| [
"pyglet.resource.image",
"pyglet.window.key.KeyStateHandler",
"pyglet.app.run",
"pyglet.resource.reindex",
"pyglet.clock.schedule_interval",
"pyglet.sprite.Sprite",
"pyglet.text.Label",
"random.randint"
] | [((268, 293), 'pyglet.resource.reindex', 'pyglet.resource.reindex', ([], {}), '()\n', (291, 293), False, 'import pyglet\n'), ((3868, 3884), 'pyglet.app.run', 'pyglet.app.run', ([], {}), '()\n', (3882, 3884), False, 'import pyglet\n'), ((483, 518), 'pyglet.window.key.KeyStateHandler', 'pyglet.window.key.KeyStateHandler', ([], {}), '()\n', (516, 518), False, 'import pyglet\n'), ((684, 725), 'pyglet.resource.image', 'pyglet.resource.image', (['"""alienblaster.png"""'], {}), "('alienblaster.png')\n", (705, 725), False, 'import pyglet\n'), ((760, 797), 'pyglet.resource.image', 'pyglet.resource.image', (['"""asteroid.png"""'], {}), "('asteroid.png')\n", (781, 797), False, 'import pyglet\n'), ((922, 975), 'pyglet.sprite.Sprite', 'pyglet.sprite.Sprite', ([], {'img': 'self.ship_image', 'x': '(30)', 'y': '(30)'}), '(img=self.ship_image, x=30, y=30)\n', (942, 975), False, 'import pyglet\n'), ((1079, 1136), 'pyglet.text.Label', 'pyglet.text.Label', ([], {'text': '"""Score:0 Highscore:0"""', 'x': '(10)', 'y': '(10)'}), "(text='Score:0 Highscore:0', x=10, y=10)\n", (1096, 1136), False, 'import pyglet\n'), ((1286, 1339), 'pyglet.clock.schedule_interval', 'pyglet.clock.schedule_interval', (['self.game_tick', '(0.005)'], {}), '(self.game_tick, 0.005)\n', (1316, 1339), False, 'import pyglet\n'), ((2329, 2350), 'random.randint', 'random.randint', (['(0)', '(45)'], {}), '(0, 45)\n', (2343, 2350), False, 'import random\n'), ((2448, 2470), 'random.randint', 'random.randint', (['(0)', '(800)'], {}), '(0, 800)\n', (2462, 2470), False, 'import random\n'), ((2068, 2090), 'random.randint', 'random.randint', (['(0)', '(800)'], {}), '(0, 800)\n', (2082, 2090), False, 'import random\n')] |
import matplotlib.pyplot as plt
import pandas as pd
import pytask
import seaborn as sns
from sid import get_colors
from src.config import BLD
from src.config import PLOT_END_DATE
from src.config import PLOT_SIZE
from src.config import PLOT_START_DATE
from src.config import SRC
from src.plotting.plotting import BLUE
from src.plotting.plotting import style_plot
from src.testing.shared import convert_weekly_to_daily
from src.testing.shared import get_date_from_year_and_week
plt.rcParams.update(
{
"axes.spines.right": False,
"axes.spines.top": False,
"legend.frameon": False,
"figure.figsize": (12, 3.5),
}
)
@pytask.mark.depends_on(
{
"data": BLD / "data" / "raw_time_series" / "test_distribution.xlsx",
"testing_shared.py": SRC / "testing" / "shared.py",
}
)
@pytask.mark.produces(
{
"data": BLD / "data" / "testing" / "characteristics_of_the_tested.csv",
"share_of_tests_for_symptomatics_series": BLD
/ "data"
/ "testing"
/ "share_of_tests_for_symptomatics_series.pkl",
"mean_age": BLD / "data" / "testing" / "mean_age_of_tested.pdf",
"share_with_symptom_status": BLD
/ "data"
/ "testing"
/ "share_of_tested_with_symptom_status.pdf",
"symptom_shares": BLD
/ "figures"
/ "data"
/ "testing"
/ "share_of_pcr_tests_going_to_symptomatics.pdf",
"used_share_pcr_going_to_symptomatic": BLD
/ "figures"
/ "data"
/ "testing"
/ "used_share_of_pcr_tests_going_to_symptomatics.pdf",
}
)
def task_prepare_characteristics_of_the_tested(depends_on, produces):
df = pd.read_excel(depends_on["data"], sheet_name="Klinische_Aspekte", header=1)
df = _clean_data(df)
df = convert_weekly_to_daily(df.reset_index(), divide_by_7_cols=[])
plot_data = df[df["date"].between(PLOT_START_DATE, PLOT_END_DATE)]
fig, ax = _plot_df_column(plot_data, "mean_age")
fig, ax = style_plot(fig, ax)
fig.tight_layout()
fig.savefig(produces["mean_age"])
plt.close()
fig, ax = _plot_df_column(plot_data, "share_with_symptom_status")
fig, ax = style_plot(fig, ax)
fig.tight_layout()
fig.savefig(produces["share_with_symptom_status"])
plt.close()
symptom_shares = [
"share_symptomatic_lower_bound",
"share_symptomatic_among_known",
"share_symptomatic_upper_bound",
]
df = df.set_index("date")
to_concat = [df]
for share in symptom_shares:
extrapolated = _extrapolate_series_after_february(df[share])
to_concat.append(extrapolated)
df = pd.concat(to_concat, axis=1)
colors = get_colors("categorical", len(symptom_shares))
fig, ax = plt.subplots(figsize=PLOT_SIZE)
for share, color in zip(symptom_shares, colors):
extrapolated = f"{share}_extrapolated"
sns.lineplot(x=df.index, y=df[share], ax=ax, color=color, label=share)
sns.lineplot(x=df.index, y=df[extrapolated], ax=ax, color=color)
fig.tight_layout()
fig, ax = style_plot(fig, ax)
fig.savefig(produces["symptom_shares"])
plt.close()
share_of_tests_for_symptomatics_series = df[
[
"share_symptomatic_lower_bound_extrapolated",
"share_symptomatic_among_known_extrapolated",
]
].mean(axis=1)
share_of_tests_for_symptomatics_series.to_pickle(
produces["share_of_tests_for_symptomatics_series"]
)
df = df.reset_index().rename(columns={"index": "date"})
df.to_csv(produces["data"])
fig, ax = plt.subplots(figsize=PLOT_SIZE)
sns.lineplot(
x=share_of_tests_for_symptomatics_series.index,
y=share_of_tests_for_symptomatics_series,
color=BLUE,
linewidth=3.0,
alpha=0.6,
)
fig, ax = style_plot(fig, ax)
fig.tight_layout()
fig.savefig(produces["used_share_pcr_going_to_symptomatic"])
def _clean_data(df):
share_sym_de = "Anteil keine, bzw. keine für COVID-19 bedeutsamen Symptome"
column_translation = {
"Meldejahr": "year",
"MW": "week",
"Fälle gesamt": "n_total_cases",
"Mittelwert Alter (Jahre)": "mean_age",
"Anzahl mit Angaben zu Symptomen": "n_with_symptom_status",
share_sym_de: "share_asymptomatic_among_known",
}
df = df.rename(columns=column_translation)
df = df[column_translation.values()]
df["date"] = df.apply(get_date_from_year_and_week, axis=1)
df = df.set_index("date")
df["share_with_symptom_status"] = df["n_with_symptom_status"] / df["n_total_cases"]
df["share_symptomatic_among_known"] = 1 - df["share_asymptomatic_among_known"]
keep = [
"mean_age",
"share_with_symptom_status",
"share_asymptomatic_among_known",
"share_symptomatic_among_known",
]
df = df[keep]
df["share_without_symptom_status"] = 1 - df["share_with_symptom_status"]
# The lower bound on the share of symptomatics is assuming everyone without
# symptom status was asymptomatic
df["share_symptomatic_lower_bound"] = (
df["share_symptomatic_among_known"] * df["share_with_symptom_status"]
)
df["share_symptomatic_upper_bound"] = (
df["share_symptomatic_lower_bound"] + df["share_without_symptom_status"]
)
return df
def _extrapolate_series_after_february(sr, end_date="2021-08-30"):
end_date = pd.Timestamp(end_date)
last_empirical_date = min(pd.Timestamp("2021-02-28"), sr.index.max())
empirical_part = sr[:last_empirical_date]
extension_index = pd.date_range(
last_empirical_date + pd.Timedelta(days=1), end_date
)
extension_value = sr[
last_empirical_date - pd.Timedelta(days=30) : last_empirical_date
].mean()
extension = pd.Series(extension_value, index=extension_index)
out = pd.concat([empirical_part, extension])
out.name = f"{sr.name}_extrapolated"
return out
def _plot_df_column(df, cols, title=None):
if isinstance(cols, str):
cols = [cols]
fig, ax = plt.subplots(figsize=PLOT_SIZE)
for col in cols:
label = col.replace("_", " ").title()
sns.lineplot(x=df["date"], y=df[col], ax=ax, label=label)
if title is not None:
ax.set_title(title)
elif len(cols) == 1:
ax.set_title(label)
style_plot(fig, ax)
return fig, ax
| [
"pandas.Series",
"pytask.mark.depends_on",
"pandas.Timedelta",
"src.plotting.plotting.style_plot",
"matplotlib.pyplot.close",
"matplotlib.pyplot.rcParams.update",
"seaborn.lineplot",
"pandas.read_excel",
"pandas.Timestamp",
"pandas.concat",
"matplotlib.pyplot.subplots",
"pytask.mark.produces"
] | [((478, 611), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'axes.spines.right': False, 'axes.spines.top': False, 'legend.frameon': \n False, 'figure.figsize': (12, 3.5)}"], {}), "({'axes.spines.right': False, 'axes.spines.top': False,\n 'legend.frameon': False, 'figure.figsize': (12, 3.5)})\n", (497, 611), True, 'import matplotlib.pyplot as plt\n'), ((656, 809), 'pytask.mark.depends_on', 'pytask.mark.depends_on', (["{'data': BLD / 'data' / 'raw_time_series' / 'test_distribution.xlsx',\n 'testing_shared.py': SRC / 'testing' / 'shared.py'}"], {}), "({'data': BLD / 'data' / 'raw_time_series' /\n 'test_distribution.xlsx', 'testing_shared.py': SRC / 'testing' /\n 'shared.py'})\n", (678, 809), False, 'import pytask\n'), ((832, 1477), 'pytask.mark.produces', 'pytask.mark.produces', (["{'data': BLD / 'data' / 'testing' / 'characteristics_of_the_tested.csv',\n 'share_of_tests_for_symptomatics_series': BLD / 'data' / 'testing' /\n 'share_of_tests_for_symptomatics_series.pkl', 'mean_age': BLD / 'data' /\n 'testing' / 'mean_age_of_tested.pdf', 'share_with_symptom_status': BLD /\n 'data' / 'testing' / 'share_of_tested_with_symptom_status.pdf',\n 'symptom_shares': BLD / 'figures' / 'data' / 'testing' /\n 'share_of_pcr_tests_going_to_symptomatics.pdf',\n 'used_share_pcr_going_to_symptomatic': BLD / 'figures' / 'data' /\n 'testing' / 'used_share_of_pcr_tests_going_to_symptomatics.pdf'}"], {}), "({'data': BLD / 'data' / 'testing' /\n 'characteristics_of_the_tested.csv',\n 'share_of_tests_for_symptomatics_series': BLD / 'data' / 'testing' /\n 'share_of_tests_for_symptomatics_series.pkl', 'mean_age': BLD / 'data' /\n 'testing' / 'mean_age_of_tested.pdf', 'share_with_symptom_status': BLD /\n 'data' / 'testing' / 'share_of_tested_with_symptom_status.pdf',\n 'symptom_shares': BLD / 'figures' / 'data' / 'testing' /\n 'share_of_pcr_tests_going_to_symptomatics.pdf',\n 'used_share_pcr_going_to_symptomatic': BLD / 'figures' / 'data' /\n 'testing' / 'used_share_of_pcr_tests_going_to_symptomatics.pdf'})\n", (852, 1477), False, 'import pytask\n'), ((1694, 1769), 'pandas.read_excel', 'pd.read_excel', (["depends_on['data']"], {'sheet_name': '"""Klinische_Aspekte"""', 'header': '(1)'}), "(depends_on['data'], sheet_name='Klinische_Aspekte', header=1)\n", (1707, 1769), True, 'import pandas as pd\n'), ((2008, 2027), 'src.plotting.plotting.style_plot', 'style_plot', (['fig', 'ax'], {}), '(fig, ax)\n', (2018, 2027), False, 'from src.plotting.plotting import style_plot\n'), ((2093, 2104), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2102, 2104), True, 'import matplotlib.pyplot as plt\n'), ((2190, 2209), 'src.plotting.plotting.style_plot', 'style_plot', (['fig', 'ax'], {}), '(fig, ax)\n', (2200, 2209), False, 'from src.plotting.plotting import style_plot\n'), ((2292, 2303), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2301, 2303), True, 'import matplotlib.pyplot as plt\n'), ((2659, 2687), 'pandas.concat', 'pd.concat', (['to_concat'], {'axis': '(1)'}), '(to_concat, axis=1)\n', (2668, 2687), True, 'import pandas as pd\n'), ((2763, 2794), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'PLOT_SIZE'}), '(figsize=PLOT_SIZE)\n', (2775, 2794), True, 'import matplotlib.pyplot as plt\n'), ((3085, 3104), 'src.plotting.plotting.style_plot', 'style_plot', (['fig', 'ax'], {}), '(fig, ax)\n', (3095, 3104), False, 'from src.plotting.plotting import style_plot\n'), ((3153, 3164), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (3162, 3164), True, 'import matplotlib.pyplot as plt\n'), ((3597, 3628), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'PLOT_SIZE'}), '(figsize=PLOT_SIZE)\n', (3609, 3628), True, 'import matplotlib.pyplot as plt\n'), ((3633, 3782), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': 'share_of_tests_for_symptomatics_series.index', 'y': 'share_of_tests_for_symptomatics_series', 'color': 'BLUE', 'linewidth': '(3.0)', 'alpha': '(0.6)'}), '(x=share_of_tests_for_symptomatics_series.index, y=\n share_of_tests_for_symptomatics_series, color=BLUE, linewidth=3.0,\n alpha=0.6)\n', (3645, 3782), True, 'import seaborn as sns\n'), ((3835, 3854), 'src.plotting.plotting.style_plot', 'style_plot', (['fig', 'ax'], {}), '(fig, ax)\n', (3845, 3854), False, 'from src.plotting.plotting import style_plot\n'), ((5426, 5448), 'pandas.Timestamp', 'pd.Timestamp', (['end_date'], {}), '(end_date)\n', (5438, 5448), True, 'import pandas as pd\n'), ((5802, 5851), 'pandas.Series', 'pd.Series', (['extension_value'], {'index': 'extension_index'}), '(extension_value, index=extension_index)\n', (5811, 5851), True, 'import pandas as pd\n'), ((5862, 5900), 'pandas.concat', 'pd.concat', (['[empirical_part, extension]'], {}), '([empirical_part, extension])\n', (5871, 5900), True, 'import pandas as pd\n'), ((6068, 6099), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'PLOT_SIZE'}), '(figsize=PLOT_SIZE)\n', (6080, 6099), True, 'import matplotlib.pyplot as plt\n'), ((6344, 6363), 'src.plotting.plotting.style_plot', 'style_plot', (['fig', 'ax'], {}), '(fig, ax)\n', (6354, 6363), False, 'from src.plotting.plotting import style_plot\n'), ((2904, 2974), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': 'df.index', 'y': 'df[share]', 'ax': 'ax', 'color': 'color', 'label': 'share'}), '(x=df.index, y=df[share], ax=ax, color=color, label=share)\n', (2916, 2974), True, 'import seaborn as sns\n'), ((2983, 3047), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': 'df.index', 'y': 'df[extrapolated]', 'ax': 'ax', 'color': 'color'}), '(x=df.index, y=df[extrapolated], ax=ax, color=color)\n', (2995, 3047), True, 'import seaborn as sns\n'), ((5479, 5505), 'pandas.Timestamp', 'pd.Timestamp', (['"""2021-02-28"""'], {}), "('2021-02-28')\n", (5491, 5505), True, 'import pandas as pd\n'), ((6175, 6232), 'seaborn.lineplot', 'sns.lineplot', ([], {'x': "df['date']", 'y': 'df[col]', 'ax': 'ax', 'label': 'label'}), "(x=df['date'], y=df[col], ax=ax, label=label)\n", (6187, 6232), True, 'import seaborn as sns\n'), ((5636, 5656), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': '(1)'}), '(days=1)\n', (5648, 5656), True, 'import pandas as pd\n'), ((5729, 5750), 'pandas.Timedelta', 'pd.Timedelta', ([], {'days': '(30)'}), '(days=30)\n', (5741, 5750), True, 'import pandas as pd\n')] |
#!/usr/bin/python3
from sys import argv as args
import numpy as np
import math
import sys
import csv
if len(args) == 1:
print("Sintax: %s <DIMS> <NOISE> <SAMPLES> <OUTPUT_CSV_FILEPATH>" % args[0])
sys.exit(0)
dims = int(args[1]) if len(args) >= 2 else 10
noise = float(args[2]) if len(args) >= 3 else 0.0
samples = int(args[3]) if len(args) >= 4 else 1000
output = args[4] if len(args) >= 5 else "./regression_dataset.csv"
with open(output, "w") as fout:
writer = csv.writer(fout, delimiter=";")
header = ["Y"] + ["X%d" % i for i in range(dims)]
writer.writerow(header)
offsets = np.random.rand(dims) * 2 * math.pi
frequencies = np.random.rand(dims) * 1.0
y_data = np.zeros( (samples, dims + 1) )
y_data[:,1:] = np.random.normal(0, 1, (samples, dims)) * 2 * math.pi
noise_data = np.random.normal(0, noise, samples)
for s in range(samples):
y_data[s,0] = np.sum(np.sin(offsets + y_data[s,1:] * frequencies)) + noise_data[s]
writer.writerow([str(x) for x in y_data[s,:]])
| [
"numpy.random.normal",
"numpy.random.rand",
"csv.writer",
"numpy.zeros",
"sys.exit",
"numpy.sin"
] | [((209, 220), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (217, 220), False, 'import sys\n'), ((489, 520), 'csv.writer', 'csv.writer', (['fout'], {'delimiter': '""";"""'}), "(fout, delimiter=';')\n", (499, 520), False, 'import csv\n'), ((720, 749), 'numpy.zeros', 'np.zeros', (['(samples, dims + 1)'], {}), '((samples, dims + 1))\n', (728, 749), True, 'import numpy as np\n'), ((842, 877), 'numpy.random.normal', 'np.random.normal', (['(0)', 'noise', 'samples'], {}), '(0, noise, samples)\n', (858, 877), True, 'import numpy as np\n'), ((675, 695), 'numpy.random.rand', 'np.random.rand', (['dims'], {}), '(dims)\n', (689, 695), True, 'import numpy as np\n'), ((622, 642), 'numpy.random.rand', 'np.random.rand', (['dims'], {}), '(dims)\n', (636, 642), True, 'import numpy as np\n'), ((771, 810), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(samples, dims)'], {}), '(0, 1, (samples, dims))\n', (787, 810), True, 'import numpy as np\n'), ((937, 982), 'numpy.sin', 'np.sin', (['(offsets + y_data[s, 1:] * frequencies)'], {}), '(offsets + y_data[s, 1:] * frequencies)\n', (943, 982), True, 'import numpy as np\n')] |
import sqlite3
data = sqlite3.connect("database.db")
datacur = data.cursor()
datacur.execute("SELECT * FROM match")
pdata = datacur.fetchall()
def calculate_points(pdata):# Calculates points
points = 0.0
score = pdata[1]
try:
strike_rate = float(pdata[1]) / float(pdata[2])
except:
strike_rate = 0
fours, sixes = float(pdata[3]), float(pdata[4])
twos = int(((score - (4 * fours) - (6 * sixes))) / 2)
wickets = 10 * float(pdata[8])
try:
economy = float(pdata[7]) / (float(pdata[5]) / 6)
except:
economy = 0
Fielding = float(pdata[9]) + float(pdata[10]) + float(pdata[11])
points += (fours + (2 * sixes) + (10 * Fielding) + twos + wickets)
if score > 100:
points += 10
elif score >= 50:
points += 5
if strike_rate > 1:
points += 4
elif strike_rate >= 0.8:
points += 2
if wickets >= 5:
points += 10
elif wickets > 3:
points += 5
if economy >= 3.5 and economy <= 4.5:
points += 4
elif economy >= 2 and economy < 3.5:
points += 7
elif economy < 2:
points += 10
return points
player_points = {}
for p in pdata: # calculates points and stores in dictionary
player_points[p[0]] = calculate_points(p)
print(player_points) | [
"sqlite3.connect"
] | [((25, 55), 'sqlite3.connect', 'sqlite3.connect', (['"""database.db"""'], {}), "('database.db')\n", (40, 55), False, 'import sqlite3\n')] |
import unittest
import time
from collections import OrderedDict
import random
import sys
import normalization
import typ
from app_tree import UnfinishedLeaf
from cache import Cache, CacheNop
from domain_fparity_apptree import d_general_even_parity
from normalization import Normalizator, NormalizatorNop
from generator import Generator
from generator_static import ts, get_num
from parsers import parse_ctx, parse_typ
REALLY_SHORT_TIME = 0.01
def d1():
return (parse_typ((('P', 'A', ('P', 'A', 'A')), '->', ('P', 'A', ('P', 'A', 'A')))),
parse_ctx(OrderedDict([
("s", (("a", "->", ("b", "->", "c")), '->',
(("a", "->", "b"), "->", ("a", "->", "c")))),
("k", ("a", "->", ("b", "->", "a"))),
("seri", (("Dag", 'a', 'b'), '->', (("Dag", 'b', 'c'), '->', ("Dag", 'a', 'c')))),
("para", (("Dag", 'a', 'b'), '->', (("Dag", 'c', 'd'), '->', ("Dag", ('P', 'a', 'c'), ('P', 'b', 'd'))))),
("mkDag", (("a", "->", "b"), '->', ("Dag", "a", "b"))),
("deDag", (("Dag", "a", "b"), '->', ("a", "->", "b"),)),
("mkP", ("a", "->", ("b", "->", ('P', "a", 'b')))),
("fst", (('P', "a", 'b'), '->', 'a')),
("snd", (('P', "a", 'b'), '->', 'b')),
])),
4)
def d_general_even_parity_sk():
return (parse_typ('Bool'),
parse_ctx(OrderedDict([
('xs', ('List', 'Bool')),
("s", (("a", "->", ("b", "->", "c")), '->', (("a", "->", "b"), "->", ("a", "->", "c")))),
("k", ("a", "->", ("b", "->", "a"))),
("and", ('Bool', '->', ('Bool', '->', 'Bool'))),
("or", ('Bool', '->', ('Bool', '->', 'Bool'))),
("nand", ('Bool', '->', ('Bool', '->', 'Bool'))),
("nor", ('Bool', '->', ('Bool', '->', 'Bool'))),
('foldr', (('a', '->', ('b', '->', 'b')), '->', ('b', '->', (('List', 'a'), '->', 'b')))),
('true', 'Bool'),
('false', 'Bool')
# ("head", (('List', 'Bool'), '->', ('Maybe', 'Bool'))),
# ("tail", (('List', 'Bool'), '->', ('Maybe', ('List', 'Bool')))),
])),
5)
def d2():
return (parse_typ('B'),
parse_ctx(OrderedDict([
("f", ("A", "->", 'B')),
("x", "A"),
("y", "B"),
])),
5)
def d3():
return (parse_typ(('a', '->', 'b')),
parse_ctx(OrderedDict([
("s", (("a", "->", ("b", "->", "c")), '->',
(("a", "->", "b"), "->", ("a", "->", "c")))),
("k", ("a", "->", ("b", "->", "a"))),
])),
5)
class TestGen(unittest.TestCase):
def test_d2(self):
return
for goal, gamma, max_k in [d_general_even_parity()]:#d1(), d2(), d3()]:
g = Generator(gamma, normalizator=normalization.Normalizator)
for k in range(1, max_k + 1):
g_num = g.get_num(k, goal)
print(g_num)
def test_d(self):
for goal, gamma, max_k in [d_general_even_parity(), d1(), d2(), d3()]:
g = Generator(gamma, normalizator=normalization.NormalizatorNop)
gnf = Generator(gamma, normalizator=normalization.Normalizator)
gNC = Generator(gamma, normalizator=normalization.NormalizatorNop, cache=CacheNop)
gnfNC = Generator(gamma, normalizator=normalization.Normalizator, cache=CacheNop)
res = []
for k in range(1, max_k + 1):
# check static generator
s_num = get_num(gamma, k, goal)
s_trees = set(tr.tree for tr in ts(gamma, k, goal, 0))
self.assertEqual(s_num, len(s_trees))
for t in s_trees:
self.assertTrue(t.is_well_typed(gamma))
# check generator
g_num = g.get_num(k, goal)
self.assertEqual(s_num, g_num)
res.append(g_num)
#print(g_num)
# check generator in nf
self.assertEqual(s_num, gnf.get_num(k, goal))
for i in range(10):
t = gnf.gen_one(k, goal)
if s_num == 0:
self.assertIsNone(t)
else:
self.assertTrue(t.is_well_typed(gamma))
# check generator without cache
self.assertEqual(s_num, gNC.get_num(k, goal))
# check generator in nf without cache
self.assertEqual(s_num, gnfNC.get_num(k, goal))
# second run should have the same results
# but it should be much faster
start = time.time()
for k in range(1, max_k + 1):
g_num = g.get_num(k, goal)
self.assertEqual(res[k - 1], g_num)
end = time.time()
self.assertLess(end - start, REALLY_SHORT_TIME)
def test_skeletons(self):
check_skeletons(self)
IS_LOG_PRINTING = False
def set_log_printing(new_val=True):
global IS_LOG_PRINTING
IS_LOG_PRINTING = new_val
def log(*args):
if IS_LOG_PRINTING:
print(*args)
else:
pass
def check_skeletons(tester):
for goal, gamma, max_k in [d1(), d2(), d3()]:
log('goal:', goal)
# gamma.add_internal_pair() # todo uplne smazat až bude fungovat
g = Generator(gamma)
for k in range(1, max_k+1):
log(' k:', k)
check_successors(tester, g, k, goal)
def check_successors(tester, generator, k, goal_typ):
sk = UnfinishedLeaf()
sk_smart = UnfinishedLeaf(goal_typ)
all_trees = set(tr.tree for tr in ts(generator.gamma, k, goal_typ, 0))
if all_trees:
check_successors_acc(tester, generator, k, goal_typ, sk, sk_smart, all_trees)
def log_expansion(parent_skeleton, next_skeletons, start_time):
delta_time = time.time() - start_time
ss_str = ' ... ' + ', '.join((str(s) for s in next_skeletons)) if next_skeletons else ''
num = str(len(next_skeletons))
log(' dt=', '%.2f' % delta_time, parent_skeleton, (' --> num=' + num), ss_str)
def check_successors_acc(tester, generator, k, goal_typ, parent_skeleton, parent_skeleton_smart, all_trees):
t = time.time()
skeletons = parent_skeleton.successors(generator, k, goal_typ)
log_expansion(parent_skeleton, skeletons, t)
t = time.time()
skeletons_smart = parent_skeleton_smart.successors_smart(generator, k)
log_expansion(parent_skeleton_smart, skeletons_smart, t)
tester.assertEqual(len(skeletons), len(skeletons_smart))
tester.assertEqual([str(s) for s in skeletons], [str(s) for s in skeletons_smart])
log()
if len(skeletons_smart) > 0:
tree_smart = generator.gen_one_uf_smart(parent_skeleton_smart, k)
log(' eg:', str(tree_smart))
tester.assertTrue(tree_smart.is_well_typed(generator.gamma))
else:
tester.assertEqual(len(all_trees), 1)
return
skeleton2trees = {}
sk2sk_smart = {}
for (sk, sk_smart) in zip(skeletons, skeletons_smart):
# log(' ', sk)
# log(' ', sk_smart)
sk2sk_smart[sk] = sk_smart
for tree in all_trees:
has_skeleton = False
for sk in skeletons:
if sk.is_skeleton_of(tree):
tester.assertFalse(has_skeleton)
has_skeleton = True
skeleton2trees.setdefault(sk, []).append(tree)
tester.assertTrue(has_skeleton)
if len(skeletons) != len(skeleton2trees):
tester.assertEqual(len(skeletons), len(skeleton2trees))
for sk, all_trees_new in skeleton2trees.items():
check_successors_acc(tester, generator, k, goal_typ, sk, sk2sk_smart[sk], all_trees_new)
def check_generators_have_same_outputs(generators, goal, max_k):
def check_eq(xs):
return all(x == xs[0] for x in xs)
def check_eq_info(xs):
if len(xs) == 0:
return True
head = xs[0]
for x in xs:
if x != head:
print('!!!\n', str(x), '\n', str(head))
return False
return True
for k in range(1, max_k + 1):
print('-- k =', k, '-' * 30)
sub_results_s = []
for gen_name, gen in generators.items():
print(' ', gen_name, '...', end='')
sub_results = gen.subs(k, goal, 0)
print('done')
sub_results_s.append(sub_results)
print(check_eq_info(sub_results_s))
def separate_error_404():
# seed = random.randint(0, sys.maxsize)
seed = 7669612278400467845
random.seed(seed)
print(seed)
goal, gamma, max_k = d3()
gene = Generator(gamma)
hax_k = 3
hax_typ = parse_typ(('_P_', 4, (5, '->', (6, '->', 7))))
hax_tree = gene.gen_one(hax_k, hax_typ)
print(hax_tree.typ)
def separate_error_404_sub():
goal, gamma, max_k = d3()
gene = Generator(gamma)
k = 1
n = 4
typ = parse_typ((1, '->', (2, '->', 3)))
tree = gene.subs(k, typ, n)
print(tree.typ)
def separate_error_ip_new():
goal, gamma, max_k = d3()
gene = Generator(gamma)
k = 2
skel = UnfinishedLeaf(goal)
set_log_printing(True)
t = time.time()
next_skels = skel.successors_smart(gene, k)
log_expansion(skel, next_skels, t)
# print(next_skels)
def separate_error_bad_smart_expansion_2017_02_28():
print('Separating error: bad_expansion_2017_02_28')
problem_goal, problem_gamma, _ = d3()
gene = Generator(problem_gamma)
problem_k = 5
skel_0 = UnfinishedLeaf(problem_goal)
set_log_printing(True)
def succ(sk, path=None, is_smart=True, goal_typ=None):
t = time.time()
if is_smart:
next_sks = sk.successors_smart(gene, problem_k)
else:
next_sks = sk.successors(gene, problem_k, goal_typ)
log_expansion(sk, next_sks, t)
if not path:
return next_sks
else:
i = path[0]
path = path[1:]
next_one = next_sks[i]
print(' i=', i, 'selected:', next_one)
return succ(next_one, path, is_smart, goal_typ) if path else next_one
bug_path_1 = [0, 0, 0, 2, 0, 0] # (((k (? ?)) ?) ?)
bug_path_2 = [0, 0, 0, 2, 0, 0]
skel = succ(skel_0, bug_path_1, False, problem_goal)
print(skel)
print()
seed = 42
random.seed(seed)
print('seed:', seed)
tree = gene.gen_one_uf(skel, problem_k, problem_goal)
log(str(tree))
log('is_well_typed:', tree.is_well_typed(gene.gamma))
print()
skel = succ(skel_0, bug_path_2)
print(skel)
print()
if __name__ == "__main__":
if True:
unittest.main()
# separate_error_ip_new()
# separate_error_404()
# separate_error_404_sub()
# separate_error_bad_smart_expansion_2017_02_28()
else:
# seed = random.randint(0, sys.maxsize)
seed = 1482646273836000672
# seed = 2659613674626116145
# seed = 249273683574813401
random.seed(seed)
print(seed)
# print('randomState:', random.getstate())
IS_LOG_PRINTING = True
check_skeletons(TestGen())
if not True:
goal, gamma, max_k = d2()
# print(gamma, '\n')
# gamma.add_internal_pair() # todo uplne smazat až bude fungovat
print(gamma, '\n')
gen = Generator(gamma)
k = 2
skeleton = UnfinishedLeaf()
skeleton_smart = UnfinishedLeaf(goal)
succs = skeleton.successors(gen, k, goal)
print('[', ','.join(str(s) for s in succs), ']')
succs_smart = skeleton_smart.successors_smart(gen, k)
print('[', ','.join(str(s) for s in succs_smart), ']')
skeleton = succs[0]
skeleton_smart = succs_smart[0]
succs = skeleton.successors(gen, k, goal)
print('[', ','.join(str(s) for s in succs), ']')
succs_smart = skeleton_smart.successors_smart(gen, k)
print('[', ','.join(str(s) for s in succs_smart), ']')
if not True:
goal, gamma, max_k = d3() # d1()
# max_k = 2
gens = {
'gen_full': Generator(gamma, cache=Cache, normalizator=Normalizator),
'gen_cache_only': Generator(gamma, cache=Cache, normalizator=NormalizatorNop),
'gen_norm_only': Generator(gamma, cache=CacheNop, normalizator=Normalizator),
'gen_lame': Generator(gamma, cache=CacheNop, normalizator=NormalizatorNop)
}
check_generators_have_same_outputs(gens, goal, max_k)
if not True:
import time
goal, gamma, max_k = d3() # d1()
max_k = 2
gen = Generator(gamma, cache=Cache, normalizator=Normalizator)
if True:
print(gamma)
print('=' * 30)
print(goal)
print('=' * 30, '\n')
def generate_stuff():
a = time.time()
for k in range(1, max_k + 1):
print('-- k =', k, '-' * 30)
num = gen.get_num(k, goal)
sub_results = gen.subs(k, goal, 0)
print('NUM =', num, '\n')
for sub_res in sub_results:
print(sub_res)
print("\ntime: %.2f s\n" % (time.time() - a))
generate_stuff()
if False:
print('=' * 40, '\n')
generate_stuff()
| [
"generator.Generator",
"collections.OrderedDict",
"generator_static.get_num",
"domain_fparity_apptree.d_general_even_parity",
"unittest.main",
"random.seed",
"generator_static.ts",
"time.time",
"app_tree.UnfinishedLeaf",
"parsers.parse_typ"
] | [((5731, 5747), 'app_tree.UnfinishedLeaf', 'UnfinishedLeaf', ([], {}), '()\n', (5745, 5747), False, 'from app_tree import UnfinishedLeaf\n'), ((5763, 5787), 'app_tree.UnfinishedLeaf', 'UnfinishedLeaf', (['goal_typ'], {}), '(goal_typ)\n', (5777, 5787), False, 'from app_tree import UnfinishedLeaf\n'), ((6406, 6417), 'time.time', 'time.time', ([], {}), '()\n', (6415, 6417), False, 'import time\n'), ((6543, 6554), 'time.time', 'time.time', ([], {}), '()\n', (6552, 6554), False, 'import time\n'), ((8765, 8782), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (8776, 8782), False, 'import random\n'), ((8841, 8857), 'generator.Generator', 'Generator', (['gamma'], {}), '(gamma)\n', (8850, 8857), False, 'from generator import Generator\n'), ((8886, 8932), 'parsers.parse_typ', 'parse_typ', (["('_P_', 4, (5, '->', (6, '->', 7)))"], {}), "(('_P_', 4, (5, '->', (6, '->', 7))))\n", (8895, 8932), False, 'from parsers import parse_ctx, parse_typ\n'), ((9075, 9091), 'generator.Generator', 'Generator', (['gamma'], {}), '(gamma)\n', (9084, 9091), False, 'from generator import Generator\n'), ((9122, 9156), 'parsers.parse_typ', 'parse_typ', (["(1, '->', (2, '->', 3))"], {}), "((1, '->', (2, '->', 3)))\n", (9131, 9156), False, 'from parsers import parse_ctx, parse_typ\n'), ((9281, 9297), 'generator.Generator', 'Generator', (['gamma'], {}), '(gamma)\n', (9290, 9297), False, 'from generator import Generator\n'), ((9319, 9339), 'app_tree.UnfinishedLeaf', 'UnfinishedLeaf', (['goal'], {}), '(goal)\n', (9333, 9339), False, 'from app_tree import UnfinishedLeaf\n'), ((9377, 9388), 'time.time', 'time.time', ([], {}), '()\n', (9386, 9388), False, 'import time\n'), ((9665, 9689), 'generator.Generator', 'Generator', (['problem_gamma'], {}), '(problem_gamma)\n', (9674, 9689), False, 'from generator import Generator\n'), ((9721, 9749), 'app_tree.UnfinishedLeaf', 'UnfinishedLeaf', (['problem_goal'], {}), '(problem_goal)\n', (9735, 9749), False, 'from app_tree import UnfinishedLeaf\n'), ((10543, 10560), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (10554, 10560), False, 'import random\n'), ((470, 545), 'parsers.parse_typ', 'parse_typ', (["(('P', 'A', ('P', 'A', 'A')), '->', ('P', 'A', ('P', 'A', 'A')))"], {}), "((('P', 'A', ('P', 'A', 'A')), '->', ('P', 'A', ('P', 'A', 'A'))))\n", (479, 545), False, 'from parsers import parse_ctx, parse_typ\n'), ((1389, 1406), 'parsers.parse_typ', 'parse_typ', (['"""Bool"""'], {}), "('Bool')\n", (1398, 1406), False, 'from parsers import parse_ctx, parse_typ\n'), ((2303, 2317), 'parsers.parse_typ', 'parse_typ', (['"""B"""'], {}), "('B')\n", (2312, 2317), False, 'from parsers import parse_ctx, parse_typ\n'), ((2508, 2535), 'parsers.parse_typ', 'parse_typ', (["('a', '->', 'b')"], {}), "(('a', '->', 'b'))\n", (2517, 2535), False, 'from parsers import parse_ctx, parse_typ\n'), ((5538, 5554), 'generator.Generator', 'Generator', (['gamma'], {}), '(gamma)\n', (5547, 5554), False, 'from generator import Generator\n'), ((6050, 6061), 'time.time', 'time.time', ([], {}), '()\n', (6059, 6061), False, 'import time\n'), ((9850, 9861), 'time.time', 'time.time', ([], {}), '()\n', (9859, 9861), False, 'import time\n'), ((10849, 10864), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10862, 10864), False, 'import unittest\n'), ((11199, 11216), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (11210, 11216), False, 'import random\n'), ((11554, 11570), 'generator.Generator', 'Generator', (['gamma'], {}), '(gamma)\n', (11563, 11570), False, 'from generator import Generator\n'), ((11605, 11621), 'app_tree.UnfinishedLeaf', 'UnfinishedLeaf', ([], {}), '()\n', (11619, 11621), False, 'from app_tree import UnfinishedLeaf\n'), ((11647, 11667), 'app_tree.UnfinishedLeaf', 'UnfinishedLeaf', (['goal'], {}), '(goal)\n', (11661, 11667), False, 'from app_tree import UnfinishedLeaf\n'), ((12840, 12896), 'generator.Generator', 'Generator', (['gamma'], {'cache': 'Cache', 'normalizator': 'Normalizator'}), '(gamma, cache=Cache, normalizator=Normalizator)\n', (12849, 12896), False, 'from generator import Generator\n'), ((569, 1178), 'collections.OrderedDict', 'OrderedDict', (["[('s', (('a', '->', ('b', '->', 'c')), '->', (('a', '->', 'b'), '->', ('a',\n '->', 'c')))), ('k', ('a', '->', ('b', '->', 'a'))), ('seri', (('Dag',\n 'a', 'b'), '->', (('Dag', 'b', 'c'), '->', ('Dag', 'a', 'c')))), (\n 'para', (('Dag', 'a', 'b'), '->', (('Dag', 'c', 'd'), '->', ('Dag', (\n 'P', 'a', 'c'), ('P', 'b', 'd'))))), ('mkDag', (('a', '->', 'b'), '->',\n ('Dag', 'a', 'b'))), ('deDag', (('Dag', 'a', 'b'), '->', ('a', '->',\n 'b'))), ('mkP', ('a', '->', ('b', '->', ('P', 'a', 'b')))), ('fst', ((\n 'P', 'a', 'b'), '->', 'a')), ('snd', (('P', 'a', 'b'), '->', 'b'))]"], {}), "([('s', (('a', '->', ('b', '->', 'c')), '->', (('a', '->', 'b'),\n '->', ('a', '->', 'c')))), ('k', ('a', '->', ('b', '->', 'a'))), (\n 'seri', (('Dag', 'a', 'b'), '->', (('Dag', 'b', 'c'), '->', ('Dag', 'a',\n 'c')))), ('para', (('Dag', 'a', 'b'), '->', (('Dag', 'c', 'd'), '->', (\n 'Dag', ('P', 'a', 'c'), ('P', 'b', 'd'))))), ('mkDag', (('a', '->', 'b'\n ), '->', ('Dag', 'a', 'b'))), ('deDag', (('Dag', 'a', 'b'), '->', ('a',\n '->', 'b'))), ('mkP', ('a', '->', ('b', '->', ('P', 'a', 'b')))), (\n 'fst', (('P', 'a', 'b'), '->', 'a')), ('snd', (('P', 'a', 'b'), '->',\n 'b'))])\n", (580, 1178), False, 'from collections import OrderedDict\n'), ((1430, 1949), 'collections.OrderedDict', 'OrderedDict', (["[('xs', ('List', 'Bool')), ('s', (('a', '->', ('b', '->', 'c')), '->', ((\n 'a', '->', 'b'), '->', ('a', '->', 'c')))), ('k', ('a', '->', ('b',\n '->', 'a'))), ('and', ('Bool', '->', ('Bool', '->', 'Bool'))), ('or', (\n 'Bool', '->', ('Bool', '->', 'Bool'))), ('nand', ('Bool', '->', ('Bool',\n '->', 'Bool'))), ('nor', ('Bool', '->', ('Bool', '->', 'Bool'))), (\n 'foldr', (('a', '->', ('b', '->', 'b')), '->', ('b', '->', (('List',\n 'a'), '->', 'b')))), ('true', 'Bool'), ('false', 'Bool')]"], {}), "([('xs', ('List', 'Bool')), ('s', (('a', '->', ('b', '->', 'c')),\n '->', (('a', '->', 'b'), '->', ('a', '->', 'c')))), ('k', ('a', '->', (\n 'b', '->', 'a'))), ('and', ('Bool', '->', ('Bool', '->', 'Bool'))), (\n 'or', ('Bool', '->', ('Bool', '->', 'Bool'))), ('nand', ('Bool', '->',\n ('Bool', '->', 'Bool'))), ('nor', ('Bool', '->', ('Bool', '->', 'Bool')\n )), ('foldr', (('a', '->', ('b', '->', 'b')), '->', ('b', '->', ((\n 'List', 'a'), '->', 'b')))), ('true', 'Bool'), ('false', 'Bool')])\n", (1441, 1949), False, 'from collections import OrderedDict\n'), ((2341, 2403), 'collections.OrderedDict', 'OrderedDict', (["[('f', ('A', '->', 'B')), ('x', 'A'), ('y', 'B')]"], {}), "([('f', ('A', '->', 'B')), ('x', 'A'), ('y', 'B')])\n", (2352, 2403), False, 'from collections import OrderedDict\n'), ((2559, 2704), 'collections.OrderedDict', 'OrderedDict', (["[('s', (('a', '->', ('b', '->', 'c')), '->', (('a', '->', 'b'), '->', ('a',\n '->', 'c')))), ('k', ('a', '->', ('b', '->', 'a')))]"], {}), "([('s', (('a', '->', ('b', '->', 'c')), '->', (('a', '->', 'b'),\n '->', ('a', '->', 'c')))), ('k', ('a', '->', ('b', '->', 'a')))])\n", (2570, 2704), False, 'from collections import OrderedDict\n'), ((2897, 2920), 'domain_fparity_apptree.d_general_even_parity', 'd_general_even_parity', ([], {}), '()\n', (2918, 2920), False, 'from domain_fparity_apptree import d_general_even_parity\n'), ((2958, 3015), 'generator.Generator', 'Generator', (['gamma'], {'normalizator': 'normalization.Normalizator'}), '(gamma, normalizator=normalization.Normalizator)\n', (2967, 3015), False, 'from generator import Generator\n'), ((3189, 3212), 'domain_fparity_apptree.d_general_even_parity', 'd_general_even_parity', ([], {}), '()\n', (3210, 3212), False, 'from domain_fparity_apptree import d_general_even_parity\n'), ((3249, 3309), 'generator.Generator', 'Generator', (['gamma'], {'normalizator': 'normalization.NormalizatorNop'}), '(gamma, normalizator=normalization.NormalizatorNop)\n', (3258, 3309), False, 'from generator import Generator\n'), ((3328, 3385), 'generator.Generator', 'Generator', (['gamma'], {'normalizator': 'normalization.Normalizator'}), '(gamma, normalizator=normalization.Normalizator)\n', (3337, 3385), False, 'from generator import Generator\n'), ((3404, 3480), 'generator.Generator', 'Generator', (['gamma'], {'normalizator': 'normalization.NormalizatorNop', 'cache': 'CacheNop'}), '(gamma, normalizator=normalization.NormalizatorNop, cache=CacheNop)\n', (3413, 3480), False, 'from generator import Generator\n'), ((3501, 3574), 'generator.Generator', 'Generator', (['gamma'], {'normalizator': 'normalization.Normalizator', 'cache': 'CacheNop'}), '(gamma, normalizator=normalization.Normalizator, cache=CacheNop)\n', (3510, 3574), False, 'from generator import Generator\n'), ((4838, 4849), 'time.time', 'time.time', ([], {}), '()\n', (4847, 4849), False, 'import time\n'), ((5005, 5016), 'time.time', 'time.time', ([], {}), '()\n', (5014, 5016), False, 'import time\n'), ((12327, 12383), 'generator.Generator', 'Generator', (['gamma'], {'cache': 'Cache', 'normalizator': 'Normalizator'}), '(gamma, cache=Cache, normalizator=Normalizator)\n', (12336, 12383), False, 'from generator import Generator\n'), ((12415, 12474), 'generator.Generator', 'Generator', (['gamma'], {'cache': 'Cache', 'normalizator': 'NormalizatorNop'}), '(gamma, cache=Cache, normalizator=NormalizatorNop)\n', (12424, 12474), False, 'from generator import Generator\n'), ((12505, 12564), 'generator.Generator', 'Generator', (['gamma'], {'cache': 'CacheNop', 'normalizator': 'Normalizator'}), '(gamma, cache=CacheNop, normalizator=Normalizator)\n', (12514, 12564), False, 'from generator import Generator\n'), ((12590, 12652), 'generator.Generator', 'Generator', (['gamma'], {'cache': 'CacheNop', 'normalizator': 'NormalizatorNop'}), '(gamma, cache=CacheNop, normalizator=NormalizatorNop)\n', (12599, 12652), False, 'from generator import Generator\n'), ((13074, 13085), 'time.time', 'time.time', ([], {}), '()\n', (13083, 13085), False, 'import time\n'), ((3704, 3727), 'generator_static.get_num', 'get_num', (['gamma', 'k', 'goal'], {}), '(gamma, k, goal)\n', (3711, 3727), False, 'from generator_static import ts, get_num\n'), ((5826, 5861), 'generator_static.ts', 'ts', (['generator.gamma', 'k', 'goal_typ', '(0)'], {}), '(generator.gamma, k, goal_typ, 0)\n', (5828, 5861), False, 'from generator_static import ts, get_num\n'), ((13433, 13444), 'time.time', 'time.time', ([], {}), '()\n', (13442, 13444), False, 'import time\n'), ((3776, 3797), 'generator_static.ts', 'ts', (['gamma', 'k', 'goal', '(0)'], {}), '(gamma, k, goal, 0)\n', (3778, 3797), False, 'from generator_static import ts, get_num\n')] |
"""The setup for the sphinx extension."""
from typing import Any
from sphinx.application import Sphinx
def setup_sphinx(app: Sphinx, load_parser=False):
"""Initialize all settings and transforms in Sphinx."""
# we do this separately to setup,
# so that it can be called by external packages like myst_nb
from myst_parser.config.main import MdParserConfig
from myst_parser.parsers.sphinx_ import MystParser
from myst_parser.sphinx_ext.directives import (
FigureMarkdown,
SubstitutionReferenceRole,
)
from myst_parser.sphinx_ext.mathjax import override_mathjax
from myst_parser.sphinx_ext.myst_refs import MystReferenceResolver
if load_parser:
app.add_source_suffix(".md", "markdown")
app.add_source_parser(MystParser)
app.add_role("sub-ref", SubstitutionReferenceRole())
app.add_directive("figure-md", FigureMarkdown)
app.add_post_transform(MystReferenceResolver)
for name, default, field in MdParserConfig().as_triple():
if not field.metadata.get("docutils_only", False):
# TODO add types?
app.add_config_value(f"myst_{name}", default, "env", types=Any)
app.connect("builder-inited", create_myst_config)
app.connect("builder-inited", override_mathjax)
def create_myst_config(app):
from sphinx.util import logging
# Ignore type checkers because the attribute is dynamically assigned
from sphinx.util.console import bold # type: ignore[attr-defined]
from myst_parser import __version__
from myst_parser.config.main import MdParserConfig
logger = logging.getLogger(__name__)
values = {
name: app.config[f"myst_{name}"]
for name, _, field in MdParserConfig().as_triple()
if not field.metadata.get("docutils_only", False)
}
try:
app.env.myst_config = MdParserConfig(**values)
logger.info(bold("myst v%s:") + " %s", __version__, app.env.myst_config)
except (TypeError, ValueError) as error:
logger.error("myst configuration invalid: %s", error.args[0])
app.env.myst_config = MdParserConfig()
| [
"myst_parser.sphinx_ext.directives.SubstitutionReferenceRole",
"sphinx.util.console.bold",
"sphinx.util.logging.getLogger",
"myst_parser.config.main.MdParserConfig"
] | [((1610, 1637), 'sphinx.util.logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1627, 1637), False, 'from sphinx.util import logging\n'), ((822, 849), 'myst_parser.sphinx_ext.directives.SubstitutionReferenceRole', 'SubstitutionReferenceRole', ([], {}), '()\n', (847, 849), False, 'from myst_parser.sphinx_ext.directives import FigureMarkdown, SubstitutionReferenceRole\n'), ((1858, 1882), 'myst_parser.config.main.MdParserConfig', 'MdParserConfig', ([], {}), '(**values)\n', (1872, 1882), False, 'from myst_parser.config.main import MdParserConfig\n'), ((986, 1002), 'myst_parser.config.main.MdParserConfig', 'MdParserConfig', ([], {}), '()\n', (1000, 1002), False, 'from myst_parser.config.main import MdParserConfig\n'), ((2109, 2125), 'myst_parser.config.main.MdParserConfig', 'MdParserConfig', ([], {}), '()\n', (2123, 2125), False, 'from myst_parser.config.main import MdParserConfig\n'), ((1903, 1920), 'sphinx.util.console.bold', 'bold', (['"""myst v%s:"""'], {}), "('myst v%s:')\n", (1907, 1920), False, 'from sphinx.util.console import bold\n'), ((1725, 1741), 'myst_parser.config.main.MdParserConfig', 'MdParserConfig', ([], {}), '()\n', (1739, 1741), False, 'from myst_parser.config.main import MdParserConfig\n')] |
'''
Controller for sitemap
'''
import logging
from ckan.lib.base import BaseController
from ckan.model import Session, Package
from ckan.lib.helpers import url_for
from lxml import etree
from pylons import config, response
from pylons.decorators.cache import beaker_cache
import math
SITEMAP_NS = "http://www.sitemaps.org/schemas/sitemap/0.9"
log = logging.getLogger(__file__)
class SitemapController(BaseController):
@beaker_cache(expire=3600*24, type="dbm", invalidate_on_startup=True)
def _render_sitemap(self, page):
"""
Build the XML
"""
root = etree.Element("urlset", nsmap={None: SITEMAP_NS})
#pkgs = Session.query(Package).all()
pkgs = Session.query(Package).filter(Package.private == False).offset((int(page)-1)*25).limit(25)
for pkg in pkgs:
url = etree.SubElement(root, 'url')
loc = etree.SubElement(url, 'loc')
pkg_url = url_for(controller='package', action="read", id = pkg.name)
loc.text = config.get('ckan.site_url') + pkg_url
lastmod = etree.SubElement(url, 'lastmod')
lastmod.text = pkg.latest_related_revision.timestamp.strftime('%Y-%m-%d')
for res in pkg.resources:
url = etree.SubElement(root, 'url')
loc = etree.SubElement(url, 'loc')
loc.text = config.get('ckan.site_url') + url_for(controller="package", action="resource_read", id = pkg.name, resource_id = res.id)
lastmod = etree.SubElement(url, 'lastmod')
lastmod.text = res.created.strftime('%Y-%m-%d')
response.headers['Content-type'] = 'text/xml'
return etree.tostring(root, pretty_print=True)
def view(self):
"""
List datasets 25 at a time
"""
#Sitemap Index
root = etree.Element("sitemapindex", nsmap={None: SITEMAP_NS})
pkgs = Session.query(Package).filter(Package.private == False).count()
count = int(math.ceil(pkgs/25.5))+1
for i in range(1,count):
sitemap = etree.SubElement(root, 'sitemap')
loc = etree.SubElement(sitemap, 'loc')
loc.text = config.get('ckan.site_url') + url_for(controller="ckanext.sitemap.controller:SitemapController", action="index", page=i)
response.headers['Content-type'] = 'text/xml'
return etree.tostring(root, pretty_print=True)
#.limit() and .offset()
#return self._render_sitemap()
def index(self, page):
"""
Create an index of all xml pages
"""
return self._render_sitemap(page) | [
"logging.getLogger",
"lxml.etree.Element",
"math.ceil",
"lxml.etree.SubElement",
"pylons.decorators.cache.beaker_cache",
"ckan.lib.helpers.url_for",
"pylons.config.get",
"ckan.model.Session.query",
"lxml.etree.tostring"
] | [((352, 379), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (369, 379), False, 'import logging\n'), ((428, 498), 'pylons.decorators.cache.beaker_cache', 'beaker_cache', ([], {'expire': '(3600 * 24)', 'type': '"""dbm"""', 'invalidate_on_startup': '(True)'}), "(expire=3600 * 24, type='dbm', invalidate_on_startup=True)\n", (440, 498), False, 'from pylons.decorators.cache import beaker_cache\n'), ((595, 644), 'lxml.etree.Element', 'etree.Element', (['"""urlset"""'], {'nsmap': '{None: SITEMAP_NS}'}), "('urlset', nsmap={None: SITEMAP_NS})\n", (608, 644), False, 'from lxml import etree\n'), ((1681, 1720), 'lxml.etree.tostring', 'etree.tostring', (['root'], {'pretty_print': '(True)'}), '(root, pretty_print=True)\n', (1695, 1720), False, 'from lxml import etree\n'), ((1839, 1894), 'lxml.etree.Element', 'etree.Element', (['"""sitemapindex"""'], {'nsmap': '{None: SITEMAP_NS}'}), "('sitemapindex', nsmap={None: SITEMAP_NS})\n", (1852, 1894), False, 'from lxml import etree\n'), ((2371, 2410), 'lxml.etree.tostring', 'etree.tostring', (['root'], {'pretty_print': '(True)'}), '(root, pretty_print=True)\n', (2385, 2410), False, 'from lxml import etree\n'), ((839, 868), 'lxml.etree.SubElement', 'etree.SubElement', (['root', '"""url"""'], {}), "(root, 'url')\n", (855, 868), False, 'from lxml import etree\n'), ((887, 915), 'lxml.etree.SubElement', 'etree.SubElement', (['url', '"""loc"""'], {}), "(url, 'loc')\n", (903, 915), False, 'from lxml import etree\n'), ((938, 995), 'ckan.lib.helpers.url_for', 'url_for', ([], {'controller': '"""package"""', 'action': '"""read"""', 'id': 'pkg.name'}), "(controller='package', action='read', id=pkg.name)\n", (945, 995), False, 'from ckan.lib.helpers import url_for\n'), ((1081, 1113), 'lxml.etree.SubElement', 'etree.SubElement', (['url', '"""lastmod"""'], {}), "(url, 'lastmod')\n", (1097, 1113), False, 'from lxml import etree\n'), ((2073, 2106), 'lxml.etree.SubElement', 'etree.SubElement', (['root', '"""sitemap"""'], {}), "(root, 'sitemap')\n", (2089, 2106), False, 'from lxml import etree\n'), ((2125, 2157), 'lxml.etree.SubElement', 'etree.SubElement', (['sitemap', '"""loc"""'], {}), "(sitemap, 'loc')\n", (2141, 2157), False, 'from lxml import etree\n'), ((1021, 1048), 'pylons.config.get', 'config.get', (['"""ckan.site_url"""'], {}), "('ckan.site_url')\n", (1031, 1048), False, 'from pylons import config, response\n'), ((1260, 1289), 'lxml.etree.SubElement', 'etree.SubElement', (['root', '"""url"""'], {}), "(root, 'url')\n", (1276, 1289), False, 'from lxml import etree\n'), ((1312, 1340), 'lxml.etree.SubElement', 'etree.SubElement', (['url', '"""loc"""'], {}), "(url, 'loc')\n", (1328, 1340), False, 'from lxml import etree\n'), ((1515, 1547), 'lxml.etree.SubElement', 'etree.SubElement', (['url', '"""lastmod"""'], {}), "(url, 'lastmod')\n", (1531, 1547), False, 'from lxml import etree\n'), ((1994, 2016), 'math.ceil', 'math.ceil', (['(pkgs / 25.5)'], {}), '(pkgs / 25.5)\n', (2003, 2016), False, 'import math\n'), ((2181, 2208), 'pylons.config.get', 'config.get', (['"""ckan.site_url"""'], {}), "('ckan.site_url')\n", (2191, 2208), False, 'from pylons import config, response\n'), ((2211, 2306), 'ckan.lib.helpers.url_for', 'url_for', ([], {'controller': '"""ckanext.sitemap.controller:SitemapController"""', 'action': '"""index"""', 'page': 'i'}), "(controller='ckanext.sitemap.controller:SitemapController', action=\n 'index', page=i)\n", (2218, 2306), False, 'from ckan.lib.helpers import url_for\n'), ((1368, 1395), 'pylons.config.get', 'config.get', (['"""ckan.site_url"""'], {}), "('ckan.site_url')\n", (1378, 1395), False, 'from pylons import config, response\n'), ((1398, 1488), 'ckan.lib.helpers.url_for', 'url_for', ([], {'controller': '"""package"""', 'action': '"""resource_read"""', 'id': 'pkg.name', 'resource_id': 'res.id'}), "(controller='package', action='resource_read', id=pkg.name,\n resource_id=res.id)\n", (1405, 1488), False, 'from ckan.lib.helpers import url_for\n'), ((1910, 1932), 'ckan.model.Session.query', 'Session.query', (['Package'], {}), '(Package)\n', (1923, 1932), False, 'from ckan.model import Session, Package\n'), ((705, 727), 'ckan.model.Session.query', 'Session.query', (['Package'], {}), '(Package)\n', (718, 727), False, 'from ckan.model import Session, Package\n')] |
import pandas as pd
import tempfile
class Utils:
@staticmethod
def load_data(path, index_col=0):
df = pd.read_csv(path, index_col=0)
return df
@staticmethod
def get_training_data(df):
training_data = pd.DataFrame(df["2014-01-01":"2018-01-01"])
X = training_data.drop(columns="power")
y = training_data["power"]
return X, y
@staticmethod
def get_validation_data(df):
validation_data = pd.DataFrame(df["2018-01-01":"2019-01-01"])
X = validation_data.drop(columns="power")
y = validation_data["power"]
return X, y
@staticmethod
def get_temporary_directory_path(prefix, suffix):
"""
Get a temporary directory and files for artifacts
:param prefix: name of the file
:param suffix: .csv, .txt, .png etc
:return: object to tempfile.
"""
temp = tempfile.NamedTemporaryFile(prefix=prefix, suffix=suffix)
return temp
@staticmethod
def print_pandas_dataset(d, n=5):
"""
Given a Pandas dataFrame show the dimensions sizes
:param d: Pandas dataFrame
:return: None
"""
print("rows = %d; columns=%d" % (d.shape[0], d.shape[1]))
print(d.head(n))
| [
"pandas.DataFrame",
"pandas.read_csv",
"tempfile.NamedTemporaryFile"
] | [((115, 145), 'pandas.read_csv', 'pd.read_csv', (['path'], {'index_col': '(0)'}), '(path, index_col=0)\n', (126, 145), True, 'import pandas as pd\n'), ((232, 275), 'pandas.DataFrame', 'pd.DataFrame', (["df['2014-01-01':'2018-01-01']"], {}), "(df['2014-01-01':'2018-01-01'])\n", (244, 275), True, 'import pandas as pd\n'), ((447, 490), 'pandas.DataFrame', 'pd.DataFrame', (["df['2018-01-01':'2019-01-01']"], {}), "(df['2018-01-01':'2019-01-01'])\n", (459, 490), True, 'import pandas as pd\n'), ((868, 925), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'prefix': 'prefix', 'suffix': 'suffix'}), '(prefix=prefix, suffix=suffix)\n', (895, 925), False, 'import tempfile\n')] |
import itertools
import logging
import time
import numpy as np
import pytest
from mpmath import mp
from qecsim import paulitools as pt
from qecsim.models.generic import DepolarizingErrorModel, BiasedDepolarizingErrorModel
from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder
def _is_close(a, b, rtol=1e-05, atol=1e-08):
# np.isclose for mp.mpf, i.e. absolute(a - b) <= (atol + rtol * absolute(b))
try:
return [mp.almosteq(le, ri, rel_eps=rtol, abs_eps=atol) for le, ri in itertools.zip_longest(a, b)]
except TypeError:
return mp.almosteq(a, b, rel_eps=rtol, abs_eps=atol)
# @pytest.mark.perf
# @pytest.mark.parametrize('error_pauli, chi', [
# (PlanarCode(29, 29).new_pauli().site('X', (1, 3), (4, 2)).site('Z', (6, 4), (1, 1)), 8),
# ])
# def test_planar_rmps_perf(error_pauli, chi):
# with CliRunner().isolated_filesystem():
# error = error_pauli.to_bsf()
# code = error_pauli.code
# decoder = PlanarRMPSDecoder(chi=chi)
# syndrome = pt.bsp(error, code.stabilizers.T)
# for i in range(5):
# print('# decode ', i)
# recovery = decoder.decode(code, syndrome)
# assert np.array_equal(pt.bsp(recovery, code.stabilizers.T), syndrome), (
# 'recovery {} does not give the same syndrome as the error {}'.format(recovery, error))
# assert np.all(pt.bsp(recovery ^ error, code.stabilizers.T) == 0), (
# 'recovery ^ error ({} ^ {}) does not commute with stabilizers.'.format(recovery, error))
def test_planar_rmps_decoder_properties():
decoder = PlanarRMPSDecoder(chi=8, mode='r', stp=0.5, tol=1e-14)
assert isinstance(decoder.label, str)
assert isinstance(repr(decoder), str)
assert isinstance(str(decoder), str)
@pytest.mark.parametrize('chi, mode, stp, tol', [
(None, 'c', None, None),
(6, 'c', None, None),
(None, 'r', None, None),
(None, 'a', None, None),
(None, 'c', 0.5, None),
(None, 'c', None, 0.1),
(None, 'c', None, 1),
])
def test_planar_rmps_decoder_new_valid_parameters(chi, mode, stp, tol):
PlanarRMPSDecoder(chi=chi, mode=mode, stp=stp, tol=tol) # no error raised
@pytest.mark.parametrize('chi, mode, stp, tol', [
(-1, 'c', None, None), # invalid chi
(0.1, 'c', None, None), # invalid chi
('asdf', 'c', None, None), # invalid chi
(None, None, None, None), # invalid mode
(None, 't', None, None), # invalid mode
(None, 2, None, None), # invalid mode
(None, 'c', -0.1, None), # invalid stp
(None, 'c', 1.1, None), # invalid stp
(None, 'c', 'asdf', None), # invalid stp
(None, 'c', None, -1), # invalid tol
(None, 'c', None, 'asdf'), # invalid tol
])
def test_planar_rmps_decoder_new_invalid_parameters(chi, mode, stp, tol):
with pytest.raises((ValueError, TypeError), match=r"^PlanarRMPSDecoder") as exc_info:
PlanarRMPSDecoder(chi=chi, mode=mode, stp=stp, tol=tol)
print(exc_info)
@pytest.mark.parametrize('error_pauli', [
PlanarCode(3, 3).new_pauli().site('X', (2, 0)).site('Y', (3, 3)),
PlanarCode(5, 5).new_pauli().site('X', (3, 1)).site('Y', (2, 2)).site('Z', (6, 4)),
PlanarCode(7, 7).new_pauli().site('X', (4, 2)).site('Y', (3, 3)).site('Z', (8, 4), (7, 3)),
])
def test_planar_rmps_decoder_sample_recovery(error_pauli):
error = error_pauli.to_bsf()
code = error_pauli.code
syndrome = pt.bsp(error, code.stabilizers.T)
recovery_pauli = PlanarRMPSDecoder.sample_recovery(code, syndrome)
recovery = recovery_pauli.to_bsf()
assert np.array_equal(pt.bsp(recovery, code.stabilizers.T), syndrome), (
'recovery {} does not give the same syndrome as the error {}'.format(recovery, error))
assert np.all(pt.bsp(recovery ^ error, code.stabilizers.T) == 0), (
'recovery ^ error ({} ^ {}) does not commute with stabilizers.'.format(recovery, error))
@pytest.mark.parametrize('mode, rtol', [
('c', 1e-4), # contract by column, tolerance is O(0.0001). Tolerance is better than with Bravyi results O(1).
('r', 1e-4), # contract by row.
('a', 1e-4), # averaged. Tolerance unchanged because symmetry is same for row and column.
])
def test_planar_rmps_decoder_cosets_probability_inequality(mode, rtol):
code = PlanarCode(25, 25)
decoder = PlanarRMPSDecoder(chi=5, mode=mode)
# probabilities
prob_dist = DepolarizingErrorModel().probability_distribution(0.1)
# coset probabilities for null Pauli
coset_ps, _ = decoder._coset_probabilities(prob_dist, code.new_pauli())
coset_i_p, coset_x_p, coset_y_p, coset_z_p = coset_ps
# expect Pr(IG) > Pr(XG) ~= Pr(ZG) > Pr(YG)
print('{} > {} ~= {} > {}. rtol={}'.format(
coset_i_p, coset_x_p, coset_z_p, coset_y_p, abs(coset_x_p - coset_z_p) / abs(coset_z_p)))
print('types: Pr(IG):{}, Pr(XG):{}, Pr(ZG):{}, Pr(YG):{}'.format(
type(coset_i_p), type(coset_x_p), type(coset_z_p), type(coset_y_p)))
assert coset_i_p > coset_x_p, 'Coset probabilites do not satisfy Pr(IG) > Pr(XG)'
assert coset_i_p > coset_z_p, 'Coset probabilites do not satisfy Pr(IG) > Pr(ZG)'
assert _is_close(coset_x_p, coset_z_p, rtol=rtol, atol=0), 'Coset probabilites do not satisfy Pr(XG) ~= Pr(ZG)'
assert coset_x_p > coset_y_p, 'Coset probabilites do not satisfy Pr(XG) > Pr(YG)'
assert coset_z_p > coset_y_p, 'Coset probabilites do not satisfy Pr(ZG) > Pr(YG)'
@pytest.mark.parametrize('shape, mode', [
((4, 4), 'c'),
((3, 4), 'c'),
((4, 3), 'c'),
((4, 4), 'r'),
((3, 4), 'r'),
((4, 3), 'r'),
])
def test_planar_rmps_decoder_cosets_probability_pair_optimisation(shape, mode):
code = PlanarCode(*shape)
decoder = PlanarRMPSDecoder(mode=mode)
# probabilities
prob_dist = BiasedDepolarizingErrorModel(bias=10).probability_distribution(0.1)
# coset probabilities for null Pauli
coset_i_ps, _ = decoder._coset_probabilities(prob_dist, code.new_pauli())
# X
coset_x_ps, _ = decoder._coset_probabilities(prob_dist, code.new_pauli().logical_x())
# expect Pr(iIG) ~= Pr(xXG)
assert _is_close(coset_i_ps[0], coset_x_ps[1], rtol=1e-15, atol=0), (
'Coset probabilites do not satisfy Pr(iIG) ~= Pr(xXG)')
# expect Pr(iXG) ~= Pr(xIG)
assert _is_close(coset_i_ps[1], coset_x_ps[0], rtol=1e-15, atol=0), (
'Coset probabilites do not satisfy Pr(iXG) ~= Pr(xIG)')
# expect Pr(iYG) ~= Pr(xZG)
assert _is_close(coset_i_ps[2], coset_x_ps[3], rtol=1e-15, atol=0), (
'Coset probabilites do not satisfy Pr(iYG) ~= Pr(xZG)')
# expect Pr(iZG) ~= Pr(xYG)
assert _is_close(coset_i_ps[3], coset_x_ps[2], rtol=1e-15, atol=0), (
'Coset probabilites do not satisfy Pr(iZG) ~= Pr(xYG)')
# Y
coset_y_ps, _ = decoder._coset_probabilities(prob_dist, code.new_pauli().logical_x().logical_z())
# expect Pr(iIG) ~= Pr(yYG)
assert _is_close(coset_i_ps[0], coset_y_ps[2], rtol=1e-15, atol=0), (
'Coset probabilites do not satisfy Pr(iIG) ~= Pr(yYG)')
# expect Pr(iXG) ~= Pr(yZG)
assert _is_close(coset_i_ps[1], coset_y_ps[3], rtol=1e-15, atol=0), (
'Coset probabilites do not satisfy Pr(iXG) ~= Pr(yZG)')
# expect Pr(iYG) ~= Pr(yIG)
assert _is_close(coset_i_ps[2], coset_y_ps[0], rtol=1e-15, atol=0), (
'Coset probabilites do not satisfy Pr(iYG) ~= Pr(yIG)')
# expect Pr(iZG) ~= Pr(yXG)
assert _is_close(coset_i_ps[3], coset_y_ps[1], rtol=1e-15, atol=0), (
'Coset probabilites do not satisfy Pr(iZG) ~= Pr(yXG)')
# Z
coset_z_ps, _ = decoder._coset_probabilities(prob_dist, code.new_pauli().logical_z())
# expect Pr(iIG) ~= Pr(zZG)
assert _is_close(coset_i_ps[0], coset_z_ps[3], rtol=1e-15, atol=0), (
'Coset probabilites do not satisfy Pr(iIG) ~= Pr(zZG)')
# expect Pr(iXG) ~= Pr(zYG)
assert _is_close(coset_i_ps[1], coset_z_ps[2], rtol=1e-15, atol=0), (
'Coset probabilites do not satisfy Pr(iXG) ~= Pr(zYG)')
# expect Pr(iYG) ~= Pr(zXG)
assert _is_close(coset_i_ps[2], coset_z_ps[1], rtol=1e-15, atol=0), (
'Coset probabilites do not satisfy Pr(iYG) ~= Pr(zXG)')
# expect Pr(iZG) ~= Pr(zIG)
assert _is_close(coset_i_ps[3], coset_z_ps[0], rtol=1e-15, atol=0), (
'Coset probabilites do not satisfy Pr(iZG) ~= Pr(zIG)')
@pytest.mark.parametrize('sample_pauli_f, sample_pauli_g', [
(PlanarCode(5, 5).new_pauli(), PlanarCode(5, 5).new_pauli()),
(PlanarCode(5, 5).new_pauli(), PlanarCode(5, 5).new_pauli().plaquette((1, 4)).plaquette((4, 5))),
(PlanarCode(5, 5).new_pauli().logical_x(),
PlanarCode(5, 5).new_pauli().logical_x().plaquette((0, 5)).plaquette((2, 5)).plaquette((4, 5))),
(PlanarCode(5, 5).new_pauli().logical_z(),
PlanarCode(5, 5).new_pauli().logical_z().plaquette((3, 0)).plaquette((3, 2)).plaquette((3, 4))),
])
def test_planar_rmps_decoder_cosets_probability_equivalence(sample_pauli_f, sample_pauli_g):
decoder = PlanarRMPSDecoder(chi=8)
# probabilities
prob_dist = DepolarizingErrorModel().probability_distribution(0.1)
# coset probabilities
coset_f_ps, _ = decoder._coset_probabilities(prob_dist, sample_pauli_f)
coset_g_ps, _ = decoder._coset_probabilities(prob_dist, sample_pauli_g)
print('#Pr(fG)=', coset_f_ps)
print('#Pr(gG)=', coset_g_ps)
assert all(_is_close(coset_f_ps, coset_g_ps, rtol=1e-9, atol=0)), (
'Coset probabilites do not satisfy Pr(fG) ~= Pr(gG)')
@pytest.mark.parametrize('error_pauli, chi', [
(PlanarCode(2, 2).new_pauli().site('X', (0, 0)), None),
(PlanarCode(4, 4).new_pauli().site('X', (2, 2), (4, 2)), None),
(PlanarCode(5, 5).new_pauli().site('X', (2, 2), (4, 2)), 4),
(PlanarCode(5, 5).new_pauli().site('X', (2, 2), (4, 2)).site('Z', (6, 4), (2, 0)), 6),
(PlanarCode(5, 5).new_pauli().site('X', (1, 3), (4, 2)).site('Z', (6, 4), (1, 1)), 8),
(PlanarCode(3, 5).new_pauli().site('X', (1, 3), (4, 2)).site('Z', (2, 4), (1, 7)), 6),
(PlanarCode(5, 3).new_pauli().site('X', (1, 3), (4, 2)).site('Z', (8, 4), (3, 1)), 6),
(PlanarCode(5, 3).new_pauli().site('Y', (1, 3), (4, 2)).site('Z', (8, 4), (6, 4), (4, 4)), 6),
(PlanarCode(5, 3).new_pauli()
.site('Y', (1, 3), (3, 3), (5, 3))
.site('Z', (8, 4), (6, 4), (4, 4)), 6),
(PlanarCode(5, 3).new_pauli().site('X', (1, 3), (3, 3), (5, 3), (8, 4), (6, 4), (4, 4)), 6),
(PlanarCode(5, 3).new_pauli().site('Y', (1, 3), (3, 3), (5, 3), (8, 4), (6, 4), (4, 4)), 6),
(PlanarCode(5, 3).new_pauli().site('Z', (1, 3), (3, 3), (5, 3), (8, 4), (6, 4), (4, 4)), 6),
])
def test_planar_rmps_decoder_decode(error_pauli, chi, caplog):
with caplog.at_level(logging.WARN):
error = error_pauli.to_bsf()
code = error_pauli.code
syndrome = pt.bsp(error, code.stabilizers.T)
decoder = PlanarRMPSDecoder(chi=chi)
recovery = decoder.decode(code, syndrome)
assert np.array_equal(pt.bsp(recovery, code.stabilizers.T), syndrome), (
'recovery {} does not give the same syndrome as the error {}'.format(recovery, error))
assert np.all(pt.bsp(recovery ^ error, code.stabilizers.T) == 0), (
'recovery ^ error ({} ^ {}) does not commute with stabilizers.'.format(recovery, error))
assert len(caplog.records) == 0, 'Unexpected log messages: {}'.format(caplog.text)
def test_planar_rmps_decoder_small_codes_exact_approx():
code = PlanarCode(4, 4)
exact_decoder = PlanarRMPSDecoder()
approx_decoder = PlanarRMPSDecoder(chi=8)
identity = code.new_pauli()
# probabilities
prob_dist = BiasedDepolarizingErrorModel(bias=10).probability_distribution(probability=0.1)
# coset probabilities
exact_coset_ps, _ = exact_decoder._coset_probabilities(prob_dist, identity)
approx_coset_ps, _ = approx_decoder._coset_probabilities(prob_dist, identity)
print('#exact Pr(G)=', exact_coset_ps)
print('#approx Pr(G)=', approx_coset_ps)
assert all(_is_close(exact_coset_ps, approx_coset_ps, rtol=1e-11, atol=0)), (
'Coset probabilites do not satisfy exact Pr(G) ~= approx Pr(G)')
def test_planar_rmps_decoder_correlated_errors():
# check MPS decoder successfully decodes for error
# I--+--I--+--I
# I I
# Y--+--I--+--Y
# I I
# I--+--I--+--I
# and MWPM decoder fails as expected
code = PlanarCode(3, 3)
error = code.new_pauli().site('Y', (2, 0), (2, 4)).to_bsf()
syndrome = pt.bsp(error, code.stabilizers.T)
# MPS decoder
decoder = PlanarRMPSDecoder()
recovery = decoder.decode(code, syndrome)
# check recovery ^ error commutes with stabilizers (by construction)
assert np.all(pt.bsp(recovery ^ error, code.stabilizers.T) == 0), (
'recovery ^ error ({} ^ {}) does not commute with stabilizers for MPS decoder.'.format(recovery, error))
# check recovery ^ error commutes with logicals (we expect this to succeed for MPS)
assert np.all(pt.bsp(recovery ^ error, code.logicals.T) == 0), (
'recovery ^ error ({} ^ {}) does not commute with logicals for MPS decoder.'.format(recovery, error))
# MWPM decoder
decoder = PlanarMWPMDecoder()
recovery = decoder.decode(code, syndrome)
# check recovery ^ error commutes with stabilizers (by construction)
assert np.all(pt.bsp(recovery ^ error, code.stabilizers.T) == 0), (
'recovery ^ error ({} ^ {}) does not commute with stabilizers for MWPM decoder.'.format(recovery, error))
# check recovery ^ error commutes with logicals (we expect this to fail for MWPM)
assert not np.all(pt.bsp(recovery ^ error, code.logicals.T) == 0), (
'recovery ^ error ({} ^ {}) does commute with logicals for MWPM decoder.'.format(recovery, error))
def test_planar_rmps_decoder_cosets_probability_stp():
# parameters
sample = PlanarCode(3, 4).new_pauli().site('Y', (2, 0), (2, 4))
prob_dist = DepolarizingErrorModel().probability_distribution(0.1)
# coset probabilities exact
exact_coset_ps, _ = PlanarRMPSDecoder(mode='a')._coset_probabilities(prob_dist, sample)
print('#exact_coset_ps=', exact_coset_ps)
# coset probabilities approx (chi=6)
approx_coset_ps, _ = PlanarRMPSDecoder(chi=6, mode='a')._coset_probabilities(prob_dist, sample)
print('#approx_coset_ps=', approx_coset_ps)
assert all(_is_close(exact_coset_ps, approx_coset_ps, rtol=1e-14, atol=0)), (
'approx_coset_ps not close to exact_coset_ps')
# coset probabilities approx (chi=6, stp=0)
coset_ps, _ = PlanarRMPSDecoder(chi=6, mode='a', stp=0)._coset_probabilities(prob_dist, sample)
print('#coset_ps (chi=6, stp=0)=', coset_ps)
assert all(_is_close(approx_coset_ps, coset_ps, rtol=0, atol=0)), (
'coset_ps (chi=6, stp=0) not equal to approx_coset_ps')
# coset probabilities approx (chi=6, stp=1)
coset_ps, _ = PlanarRMPSDecoder(chi=6, mode='a', stp=1)._coset_probabilities(prob_dist, sample)
print('#coset_ps (chi=6, stp=1)=', coset_ps)
assert all(_is_close(exact_coset_ps, coset_ps, rtol=0, atol=0)), (
'coset_ps (chi=6, stp=1) not equal to exact_coset_ps')
# coset probabilities approx (chi=6, stp=0.5)
coset_ps, _ = PlanarRMPSDecoder(chi=6, mode='a', stp=0.5)._coset_probabilities(prob_dist, sample)
print('#coset_ps (chi=6, stp=0.5)=', coset_ps)
assert all(_is_close(exact_coset_ps, coset_ps, rtol=1e-10, atol=0)), (
'coset_ps (chi=6, stp=0.5) not close to exact_coset_ps')
assert all(_is_close(approx_coset_ps, coset_ps, rtol=1e-10, atol=0)), (
'coset_ps (chi=6, stp=0.5) not close to approx_coset_ps')
@pytest.mark.parametrize('error_pauli', [
PlanarCode(3, 3).new_pauli().site('X', (2, 0)).site('Y', (3, 3)),
PlanarCode(5, 5).new_pauli().site('X', (3, 1)).site('Y', (2, 2)).site('Z', (6, 4)),
PlanarCode(7, 7).new_pauli().site('X', (4, 2)).site('Y', (3, 3)).site('Z', (8, 4), (7, 3)),
])
def test_planar_rmps_mps_accuracy(error_pauli):
error = error_pauli.to_bsf()
code = error_pauli.code
syndrome = pt.bsp(error, code.stabilizers.T)
recovery_pauli = PlanarRMPSDecoder.sample_recovery(code, syndrome)
prob_dist = DepolarizingErrorModel().probability_distribution(0.1)
rmps_coset_ps, _ = PlanarRMPSDecoder(chi=8)._coset_probabilities(prob_dist, recovery_pauli)
print('#rmps_coset_ps (chi=8)=', rmps_coset_ps)
mps_coset_ps, _ = PlanarMPSDecoder(chi=8)._coset_probabilities(prob_dist, recovery_pauli)
print('#mps_coset_ps (chi=8)=', mps_coset_ps)
assert all(_is_close(rmps_coset_ps, mps_coset_ps, rtol=1e-1, atol=0)), (
'rmps_coset_ps (chi=8) not close to mps_coset_ps (chi=8)')
@pytest.mark.perf
def test_planar_rmps_mwpm_performance():
n_run = 5
code = PlanarCode(25, 25)
error_model = DepolarizingErrorModel()
error_probability = 0.4
def _timed_runs(decoder):
start_time = time.time()
for _ in range(n_run):
error = error_model.generate(code, error_probability)
syndrome = pt.bsp(error, code.stabilizers.T)
recovery = decoder.decode(code, syndrome)
assert np.all(pt.bsp(recovery ^ error, code.stabilizers.T) == 0), (
'recovery ^ error ({} ^ {}) does not commute with stabilizers.'.format(recovery, error))
return time.time() - start_time
rmps_time = _timed_runs(PlanarRMPSDecoder(chi=8))
mwpm_time = _timed_runs(PlanarMWPMDecoder())
# expect mps_time < mwpm_time
print('rmps_time = {} < {} = mwpm_time'.format(rmps_time, mwpm_time))
assert rmps_time < mwpm_time, 'RMPS decoder slower than MWPM decoder'
@pytest.mark.perf
def test_planar_rmps_mps_performance():
n_run = 5
code = PlanarCode(21, 21)
error_model = DepolarizingErrorModel()
error_probability = 0.2
def _timed_runs(decoder):
start_time = time.time()
for _ in range(n_run):
error = error_model.generate(code, error_probability)
syndrome = pt.bsp(error, code.stabilizers.T)
recovery = decoder.decode(code, syndrome)
assert np.all(pt.bsp(recovery ^ error, code.stabilizers.T) == 0), (
'recovery ^ error ({} ^ {}) does not commute with stabilizers.'.format(recovery, error))
return time.time() - start_time
rmps_time = _timed_runs(PlanarRMPSDecoder(chi=8))
mps_time = _timed_runs(PlanarMPSDecoder(chi=8))
# expect rmps_time < mps_time
print('rmps_time = {} < {} = mps_time'.format(rmps_time, mps_time))
assert rmps_time < mps_time, 'RMPS decoder slower than MPS decoder'
| [
"qecsim.models.planar.PlanarMPSDecoder",
"qecsim.models.planar.PlanarRMPSDecoder",
"mpmath.mp.almosteq",
"qecsim.models.generic.DepolarizingErrorModel",
"itertools.zip_longest",
"qecsim.models.planar.PlanarMWPMDecoder",
"pytest.mark.parametrize",
"qecsim.paulitools.bsp",
"qecsim.models.planar.PlanarCode",
"pytest.raises",
"qecsim.models.planar.PlanarRMPSDecoder.sample_recovery",
"time.time",
"qecsim.models.generic.BiasedDepolarizingErrorModel"
] | [((1834, 2058), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""chi, mode, stp, tol"""', "[(None, 'c', None, None), (6, 'c', None, None), (None, 'r', None, None), (\n None, 'a', None, None), (None, 'c', 0.5, None), (None, 'c', None, 0.1),\n (None, 'c', None, 1)]"], {}), "('chi, mode, stp, tol', [(None, 'c', None, None), (6,\n 'c', None, None), (None, 'r', None, None), (None, 'a', None, None), (\n None, 'c', 0.5, None), (None, 'c', None, 0.1), (None, 'c', None, 1)])\n", (1857, 2058), False, 'import pytest\n'), ((2235, 2574), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""chi, mode, stp, tol"""', "[(-1, 'c', None, None), (0.1, 'c', None, None), ('asdf', 'c', None, None),\n (None, None, None, None), (None, 't', None, None), (None, 2, None, None\n ), (None, 'c', -0.1, None), (None, 'c', 1.1, None), (None, 'c', 'asdf',\n None), (None, 'c', None, -1), (None, 'c', None, 'asdf')]"], {}), "('chi, mode, stp, tol', [(-1, 'c', None, None), (0.1,\n 'c', None, None), ('asdf', 'c', None, None), (None, None, None, None),\n (None, 't', None, None), (None, 2, None, None), (None, 'c', -0.1, None),\n (None, 'c', 1.1, None), (None, 'c', 'asdf', None), (None, 'c', None, -1\n ), (None, 'c', None, 'asdf')])\n", (2258, 2574), False, 'import pytest\n'), ((3945, 4034), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""mode, rtol"""', "[('c', 0.0001), ('r', 0.0001), ('a', 0.0001)]"], {}), "('mode, rtol', [('c', 0.0001), ('r', 0.0001), ('a', \n 0.0001)])\n", (3968, 4034), False, 'import pytest\n'), ((5457, 5592), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""shape, mode"""', "[((4, 4), 'c'), ((3, 4), 'c'), ((4, 3), 'c'), ((4, 4), 'r'), ((3, 4), 'r'),\n ((4, 3), 'r')]"], {}), "('shape, mode', [((4, 4), 'c'), ((3, 4), 'c'), ((4, \n 3), 'c'), ((4, 4), 'r'), ((3, 4), 'r'), ((4, 3), 'r')])\n", (5480, 5592), False, 'import pytest\n'), ((1651, 1705), 'qecsim.models.planar.PlanarRMPSDecoder', 'PlanarRMPSDecoder', ([], {'chi': '(8)', 'mode': '"""r"""', 'stp': '(0.5)', 'tol': '(1e-14)'}), "(chi=8, mode='r', stp=0.5, tol=1e-14)\n", (1668, 1705), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((2157, 2212), 'qecsim.models.planar.PlanarRMPSDecoder', 'PlanarRMPSDecoder', ([], {'chi': 'chi', 'mode': 'mode', 'stp': 'stp', 'tol': 'tol'}), '(chi=chi, mode=mode, stp=stp, tol=tol)\n', (2174, 2212), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((3457, 3490), 'qecsim.paulitools.bsp', 'pt.bsp', (['error', 'code.stabilizers.T'], {}), '(error, code.stabilizers.T)\n', (3463, 3490), True, 'from qecsim import paulitools as pt\n'), ((3512, 3561), 'qecsim.models.planar.PlanarRMPSDecoder.sample_recovery', 'PlanarRMPSDecoder.sample_recovery', (['code', 'syndrome'], {}), '(code, syndrome)\n', (3545, 3561), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((4318, 4336), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(25)', '(25)'], {}), '(25, 25)\n', (4328, 4336), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((4351, 4386), 'qecsim.models.planar.PlanarRMPSDecoder', 'PlanarRMPSDecoder', ([], {'chi': '(5)', 'mode': 'mode'}), '(chi=5, mode=mode)\n', (4368, 4386), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((5706, 5724), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['*shape'], {}), '(*shape)\n', (5716, 5724), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((5739, 5767), 'qecsim.models.planar.PlanarRMPSDecoder', 'PlanarRMPSDecoder', ([], {'mode': 'mode'}), '(mode=mode)\n', (5756, 5767), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((8976, 9000), 'qecsim.models.planar.PlanarRMPSDecoder', 'PlanarRMPSDecoder', ([], {'chi': '(8)'}), '(chi=8)\n', (8993, 9000), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((11428, 11444), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(4)', '(4)'], {}), '(4, 4)\n', (11438, 11444), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((11465, 11484), 'qecsim.models.planar.PlanarRMPSDecoder', 'PlanarRMPSDecoder', ([], {}), '()\n', (11482, 11484), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((11506, 11530), 'qecsim.models.planar.PlanarRMPSDecoder', 'PlanarRMPSDecoder', ([], {'chi': '(8)'}), '(chi=8)\n', (11523, 11530), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((12363, 12379), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(3)', '(3)'], {}), '(3, 3)\n', (12373, 12379), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((12459, 12492), 'qecsim.paulitools.bsp', 'pt.bsp', (['error', 'code.stabilizers.T'], {}), '(error, code.stabilizers.T)\n', (12465, 12492), True, 'from qecsim import paulitools as pt\n'), ((12525, 12544), 'qecsim.models.planar.PlanarRMPSDecoder', 'PlanarRMPSDecoder', ([], {}), '()\n', (12542, 12544), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((13149, 13168), 'qecsim.models.planar.PlanarMWPMDecoder', 'PlanarMWPMDecoder', ([], {}), '()\n', (13166, 13168), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((16028, 16061), 'qecsim.paulitools.bsp', 'pt.bsp', (['error', 'code.stabilizers.T'], {}), '(error, code.stabilizers.T)\n', (16034, 16061), True, 'from qecsim import paulitools as pt\n'), ((16083, 16132), 'qecsim.models.planar.PlanarRMPSDecoder.sample_recovery', 'PlanarRMPSDecoder.sample_recovery', (['code', 'syndrome'], {}), '(code, syndrome)\n', (16116, 16132), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((16726, 16744), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(25)', '(25)'], {}), '(25, 25)\n', (16736, 16744), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((16763, 16787), 'qecsim.models.generic.DepolarizingErrorModel', 'DepolarizingErrorModel', ([], {}), '()\n', (16785, 16787), False, 'from qecsim.models.generic import DepolarizingErrorModel, BiasedDepolarizingErrorModel\n'), ((17684, 17702), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(21)', '(21)'], {}), '(21, 21)\n', (17694, 17702), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((17721, 17745), 'qecsim.models.generic.DepolarizingErrorModel', 'DepolarizingErrorModel', ([], {}), '()\n', (17743, 17745), False, 'from qecsim.models.generic import DepolarizingErrorModel, BiasedDepolarizingErrorModel\n'), ((2856, 2922), 'pytest.raises', 'pytest.raises', (['(ValueError, TypeError)'], {'match': '"""^PlanarRMPSDecoder"""'}), "((ValueError, TypeError), match='^PlanarRMPSDecoder')\n", (2869, 2922), False, 'import pytest\n'), ((2945, 3000), 'qecsim.models.planar.PlanarRMPSDecoder', 'PlanarRMPSDecoder', ([], {'chi': 'chi', 'mode': 'mode', 'stp': 'stp', 'tol': 'tol'}), '(chi=chi, mode=mode, stp=stp, tol=tol)\n', (2962, 3000), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((3627, 3663), 'qecsim.paulitools.bsp', 'pt.bsp', (['recovery', 'code.stabilizers.T'], {}), '(recovery, code.stabilizers.T)\n', (3633, 3663), True, 'from qecsim import paulitools as pt\n'), ((10781, 10814), 'qecsim.paulitools.bsp', 'pt.bsp', (['error', 'code.stabilizers.T'], {}), '(error, code.stabilizers.T)\n', (10787, 10814), True, 'from qecsim import paulitools as pt\n'), ((10833, 10859), 'qecsim.models.planar.PlanarRMPSDecoder', 'PlanarRMPSDecoder', ([], {'chi': 'chi'}), '(chi=chi)\n', (10850, 10859), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((16868, 16879), 'time.time', 'time.time', ([], {}), '()\n', (16877, 16879), False, 'import time\n'), ((17342, 17366), 'qecsim.models.planar.PlanarRMPSDecoder', 'PlanarRMPSDecoder', ([], {'chi': '(8)'}), '(chi=8)\n', (17359, 17366), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((17396, 17415), 'qecsim.models.planar.PlanarMWPMDecoder', 'PlanarMWPMDecoder', ([], {}), '()\n', (17413, 17415), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((17826, 17837), 'time.time', 'time.time', ([], {}), '()\n', (17835, 17837), False, 'import time\n'), ((18300, 18324), 'qecsim.models.planar.PlanarRMPSDecoder', 'PlanarRMPSDecoder', ([], {'chi': '(8)'}), '(chi=8)\n', (18317, 18324), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((18353, 18376), 'qecsim.models.planar.PlanarMPSDecoder', 'PlanarMPSDecoder', ([], {'chi': '(8)'}), '(chi=8)\n', (18369, 18376), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((477, 524), 'mpmath.mp.almosteq', 'mp.almosteq', (['le', 'ri'], {'rel_eps': 'rtol', 'abs_eps': 'atol'}), '(le, ri, rel_eps=rtol, abs_eps=atol)\n', (488, 524), False, 'from mpmath import mp\n'), ((605, 650), 'mpmath.mp.almosteq', 'mp.almosteq', (['a', 'b'], {'rel_eps': 'rtol', 'abs_eps': 'atol'}), '(a, b, rel_eps=rtol, abs_eps=atol)\n', (616, 650), False, 'from mpmath import mp\n'), ((3791, 3835), 'qecsim.paulitools.bsp', 'pt.bsp', (['(recovery ^ error)', 'code.stabilizers.T'], {}), '(recovery ^ error, code.stabilizers.T)\n', (3797, 3835), True, 'from qecsim import paulitools as pt\n'), ((4423, 4447), 'qecsim.models.generic.DepolarizingErrorModel', 'DepolarizingErrorModel', ([], {}), '()\n', (4445, 4447), False, 'from qecsim.models.generic import DepolarizingErrorModel, BiasedDepolarizingErrorModel\n'), ((5804, 5841), 'qecsim.models.generic.BiasedDepolarizingErrorModel', 'BiasedDepolarizingErrorModel', ([], {'bias': '(10)'}), '(bias=10)\n', (5832, 5841), False, 'from qecsim.models.generic import DepolarizingErrorModel, BiasedDepolarizingErrorModel\n'), ((9037, 9061), 'qecsim.models.generic.DepolarizingErrorModel', 'DepolarizingErrorModel', ([], {}), '()\n', (9059, 9061), False, 'from qecsim.models.generic import DepolarizingErrorModel, BiasedDepolarizingErrorModel\n'), ((10940, 10976), 'qecsim.paulitools.bsp', 'pt.bsp', (['recovery', 'code.stabilizers.T'], {}), '(recovery, code.stabilizers.T)\n', (10946, 10976), True, 'from qecsim import paulitools as pt\n'), ((11599, 11636), 'qecsim.models.generic.BiasedDepolarizingErrorModel', 'BiasedDepolarizingErrorModel', ([], {'bias': '(10)'}), '(bias=10)\n', (11627, 11636), False, 'from qecsim.models.generic import DepolarizingErrorModel, BiasedDepolarizingErrorModel\n'), ((12682, 12726), 'qecsim.paulitools.bsp', 'pt.bsp', (['(recovery ^ error)', 'code.stabilizers.T'], {}), '(recovery ^ error, code.stabilizers.T)\n', (12688, 12726), True, 'from qecsim import paulitools as pt\n'), ((12955, 12996), 'qecsim.paulitools.bsp', 'pt.bsp', (['(recovery ^ error)', 'code.logicals.T'], {}), '(recovery ^ error, code.logicals.T)\n', (12961, 12996), True, 'from qecsim import paulitools as pt\n'), ((13306, 13350), 'qecsim.paulitools.bsp', 'pt.bsp', (['(recovery ^ error)', 'code.stabilizers.T'], {}), '(recovery ^ error, code.stabilizers.T)\n', (13312, 13350), True, 'from qecsim import paulitools as pt\n'), ((13898, 13922), 'qecsim.models.generic.DepolarizingErrorModel', 'DepolarizingErrorModel', ([], {}), '()\n', (13920, 13922), False, 'from qecsim.models.generic import DepolarizingErrorModel, BiasedDepolarizingErrorModel\n'), ((14010, 14037), 'qecsim.models.planar.PlanarRMPSDecoder', 'PlanarRMPSDecoder', ([], {'mode': '"""a"""'}), "(mode='a')\n", (14027, 14037), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((14191, 14225), 'qecsim.models.planar.PlanarRMPSDecoder', 'PlanarRMPSDecoder', ([], {'chi': '(6)', 'mode': '"""a"""'}), "(chi=6, mode='a')\n", (14208, 14225), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((14518, 14559), 'qecsim.models.planar.PlanarRMPSDecoder', 'PlanarRMPSDecoder', ([], {'chi': '(6)', 'mode': '"""a"""', 'stp': '(0)'}), "(chi=6, mode='a', stp=0)\n", (14535, 14559), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((14852, 14893), 'qecsim.models.planar.PlanarRMPSDecoder', 'PlanarRMPSDecoder', ([], {'chi': '(6)', 'mode': '"""a"""', 'stp': '(1)'}), "(chi=6, mode='a', stp=1)\n", (14869, 14893), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((15186, 15229), 'qecsim.models.planar.PlanarRMPSDecoder', 'PlanarRMPSDecoder', ([], {'chi': '(6)', 'mode': '"""a"""', 'stp': '(0.5)'}), "(chi=6, mode='a', stp=0.5)\n", (15203, 15229), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((16149, 16173), 'qecsim.models.generic.DepolarizingErrorModel', 'DepolarizingErrorModel', ([], {}), '()\n', (16171, 16173), False, 'from qecsim.models.generic import DepolarizingErrorModel, BiasedDepolarizingErrorModel\n'), ((16227, 16251), 'qecsim.models.planar.PlanarRMPSDecoder', 'PlanarRMPSDecoder', ([], {'chi': '(8)'}), '(chi=8)\n', (16244, 16251), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((16374, 16397), 'qecsim.models.planar.PlanarMPSDecoder', 'PlanarMPSDecoder', ([], {'chi': '(8)'}), '(chi=8)\n', (16390, 16397), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((17000, 17033), 'qecsim.paulitools.bsp', 'pt.bsp', (['error', 'code.stabilizers.T'], {}), '(error, code.stabilizers.T)\n', (17006, 17033), True, 'from qecsim import paulitools as pt\n'), ((17288, 17299), 'time.time', 'time.time', ([], {}), '()\n', (17297, 17299), False, 'import time\n'), ((17958, 17991), 'qecsim.paulitools.bsp', 'pt.bsp', (['error', 'code.stabilizers.T'], {}), '(error, code.stabilizers.T)\n', (17964, 17991), True, 'from qecsim import paulitools as pt\n'), ((18246, 18257), 'time.time', 'time.time', ([], {}), '()\n', (18255, 18257), False, 'import time\n'), ((539, 566), 'itertools.zip_longest', 'itertools.zip_longest', (['a', 'b'], {}), '(a, b)\n', (560, 566), False, 'import itertools\n'), ((11112, 11156), 'qecsim.paulitools.bsp', 'pt.bsp', (['(recovery ^ error)', 'code.stabilizers.T'], {}), '(recovery ^ error, code.stabilizers.T)\n', (11118, 11156), True, 'from qecsim import paulitools as pt\n'), ((13582, 13623), 'qecsim.paulitools.bsp', 'pt.bsp', (['(recovery ^ error)', 'code.logicals.T'], {}), '(recovery ^ error, code.logicals.T)\n', (13588, 13623), True, 'from qecsim import paulitools as pt\n'), ((8405, 8421), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(5)', '(5)'], {}), '(5, 5)\n', (8415, 8421), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((8435, 8451), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(5)', '(5)'], {}), '(5, 5)\n', (8445, 8451), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((8471, 8487), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(5)', '(5)'], {}), '(5, 5)\n', (8481, 8487), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((13827, 13843), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(3)', '(4)'], {}), '(3, 4)\n', (13837, 13843), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((17114, 17158), 'qecsim.paulitools.bsp', 'pt.bsp', (['(recovery ^ error)', 'code.stabilizers.T'], {}), '(recovery ^ error, code.stabilizers.T)\n', (17120, 17158), True, 'from qecsim import paulitools as pt\n'), ((18072, 18116), 'qecsim.paulitools.bsp', 'pt.bsp', (['(recovery ^ error)', 'code.stabilizers.T'], {}), '(recovery ^ error, code.stabilizers.T)\n', (18078, 18116), True, 'from qecsim import paulitools as pt\n'), ((8573, 8589), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(5)', '(5)'], {}), '(5, 5)\n', (8583, 8589), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((8722, 8738), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(5)', '(5)'], {}), '(5, 5)\n', (8732, 8738), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((9526, 9542), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(2)', '(2)'], {}), '(2, 2)\n', (9536, 9542), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((9586, 9602), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(4)', '(4)'], {}), '(4, 4)\n', (9596, 9602), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((9654, 9670), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(5)', '(5)'], {}), '(5, 5)\n', (9664, 9670), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((10301, 10317), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(5)', '(3)'], {}), '(5, 3)\n', (10311, 10317), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((10398, 10414), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(5)', '(3)'], {}), '(5, 3)\n', (10408, 10414), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((10495, 10511), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(5)', '(3)'], {}), '(5, 3)\n', (10505, 10511), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((3069, 3085), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(3)', '(3)'], {}), '(3, 3)\n', (3079, 3085), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((15651, 15667), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(3)', '(3)'], {}), '(3, 3)\n', (15661, 15667), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((8501, 8517), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(5)', '(5)'], {}), '(5, 5)\n', (8511, 8517), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((9719, 9735), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(5)', '(5)'], {}), '(5, 5)\n', (9729, 9735), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((9810, 9826), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(5)', '(5)'], {}), '(5, 5)\n', (9820, 9826), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((9901, 9917), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(3)', '(5)'], {}), '(3, 5)\n', (9911, 9917), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((9992, 10008), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(5)', '(3)'], {}), '(5, 3)\n', (10002, 10008), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((10083, 10099), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(5)', '(3)'], {}), '(5, 3)\n', (10093, 10099), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((10182, 10198), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(5)', '(3)'], {}), '(5, 3)\n', (10192, 10198), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((3139, 3155), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(5)', '(5)'], {}), '(5, 5)\n', (3149, 3155), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((3227, 3243), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(7)', '(7)'], {}), '(7, 7)\n', (3237, 3243), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((15721, 15737), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(5)', '(5)'], {}), '(5, 5)\n', (15731, 15737), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((15809, 15825), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(7)', '(7)'], {}), '(7, 7)\n', (15819, 15825), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((8620, 8636), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(5)', '(5)'], {}), '(5, 5)\n', (8630, 8636), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n'), ((8769, 8785), 'qecsim.models.planar.PlanarCode', 'PlanarCode', (['(5)', '(5)'], {}), '(5, 5)\n', (8779, 8785), False, 'from qecsim.models.planar import PlanarCode, PlanarRMPSDecoder, PlanarMWPMDecoder, PlanarMPSDecoder\n')] |
# SmoothGrad
from .guided_backprop import get_guided_backprop
from .grad_cam import get_grad_cam
from .guided_grad_cam import get_guided_grad_cam
from .guided_ig import get_guided_integrated_grads, compute_grads
from .integrated_grads import get_integrated_grads
from .vanilla_grad import get_vanilla_grad
from ..utils.process import get_last_layer
import numpy as np
def get_smoothgrad(model, io_imgs, class_id, LAYER_NAME=None, MODALITY="FLAIR", XAI_MODE="classification",
XAI="GBP", DIMENSION="2d", STDEV_SPREAD=.15, N_SAMPLES=5, MAGNITUDE=True):
#XAI="GBP", DIMENSION="2d", STDEV_SPREAD=.15, N_SAMPLES=25, MAGNITUDE=True):
new_shape = io_imgs.shape[1:len(io_imgs.shape)]
if XAI_MODE == "segmentation" and XAI=="GBP":
new_shape = io_imgs.shape[1:len(io_imgs.shape)] +(3,)
total_gradients = np.zeros(new_shape, dtype=np.float32)
#print("Shape of total_gradients:", total_gradients.shape)
stdev = STDEV_SPREAD * (np.max(io_imgs) - np.min(io_imgs))
for _ in range(N_SAMPLES):
noise = np.random.normal(0, stdev, io_imgs.shape)
x_plus_noise = io_imgs + noise
if XAI=="VANILLA":
grads = get_vanilla_grad(model, x_plus_noise, class_id, LAYER_NAME, MODALITY, XAI_MODE)
elif XAI=="GBP":
grads = get_guided_backprop(model, x_plus_noise, class_id, LAYER_NAME, MODALITY, XAI_MODE)
elif XAI=="IG":
grads = get_integrated_grads(model, x_plus_noise, class_id, LAYER_NAME, MODALITY, XAI_MODE)
elif XAI=="GIG":
grads = get_guided_integrated_grads(model, x_plus_noise, class_id, LAYER_NAME, MODALITY, XAI_MODE)
elif XAI=="GCAM":
grads = get_grad_cam(model, x_plus_noise, class_id, LAYER_NAME, MODALITY, XAI_MODE, DIMENSION)
elif XAI=="GGCAM":
grads = get_guided_grad_cam(model, x_plus_noise, class_id, LAYER_NAME, MODALITY, XAI_MODE, DIMENSION)
if MAGNITUDE:
total_gradients += (grads * grads)
else:
total_gradients += grads
return total_gradients / N_SAMPLES
| [
"numpy.random.normal",
"numpy.zeros",
"numpy.min",
"numpy.max"
] | [((864, 901), 'numpy.zeros', 'np.zeros', (['new_shape'], {'dtype': 'np.float32'}), '(new_shape, dtype=np.float32)\n', (872, 901), True, 'import numpy as np\n'), ((1075, 1116), 'numpy.random.normal', 'np.random.normal', (['(0)', 'stdev', 'io_imgs.shape'], {}), '(0, stdev, io_imgs.shape)\n', (1091, 1116), True, 'import numpy as np\n'), ((993, 1008), 'numpy.max', 'np.max', (['io_imgs'], {}), '(io_imgs)\n', (999, 1008), True, 'import numpy as np\n'), ((1011, 1026), 'numpy.min', 'np.min', (['io_imgs'], {}), '(io_imgs)\n', (1017, 1026), True, 'import numpy as np\n')] |
from concurrent import futures
import grpc
from proto.qoin.proto import hello_pb2_grpc, face_mesh_pb2_grpc, hand_tracking_pb2_grpc
from server.face_mesh import FaceMeshService
from server.hand_tracking import HandTrackingService
from server.hello import HelloService
class Server:
def __init__(self, max_workers=3):
self.server = grpc.server(futures.ThreadPoolExecutor(max_workers=max_workers))
def run(self, port=50051):
hello_service = HelloService()
hand_tracking_service = HandTrackingService()
face_mesh_service = FaceMeshService()
hello_pb2_grpc.add_GreeterServicer_to_server(hello_service, self.server)
hand_tracking_pb2_grpc.add_HandTrackingServicer_to_server(hand_tracking_service, self.server)
face_mesh_pb2_grpc.add_FaceMeshServicer_to_server(face_mesh_service, self.server)
self.server.add_insecure_port(f'0.0.0.0:{port}')
self.server.start()
print(f"Server running http://0.0.0.0:{port}")
self.server.wait_for_termination()
if __name__ == "__main__":
server = Server()
server.run()
| [
"concurrent.futures.ThreadPoolExecutor",
"proto.qoin.proto.hand_tracking_pb2_grpc.add_HandTrackingServicer_to_server",
"server.face_mesh.FaceMeshService",
"server.hello.HelloService",
"proto.qoin.proto.face_mesh_pb2_grpc.add_FaceMeshServicer_to_server",
"proto.qoin.proto.hello_pb2_grpc.add_GreeterServicer_to_server",
"server.hand_tracking.HandTrackingService"
] | [((467, 481), 'server.hello.HelloService', 'HelloService', ([], {}), '()\n', (479, 481), False, 'from server.hello import HelloService\n'), ((514, 535), 'server.hand_tracking.HandTrackingService', 'HandTrackingService', ([], {}), '()\n', (533, 535), False, 'from server.hand_tracking import HandTrackingService\n'), ((564, 581), 'server.face_mesh.FaceMeshService', 'FaceMeshService', ([], {}), '()\n', (579, 581), False, 'from server.face_mesh import FaceMeshService\n'), ((591, 663), 'proto.qoin.proto.hello_pb2_grpc.add_GreeterServicer_to_server', 'hello_pb2_grpc.add_GreeterServicer_to_server', (['hello_service', 'self.server'], {}), '(hello_service, self.server)\n', (635, 663), False, 'from proto.qoin.proto import hello_pb2_grpc, face_mesh_pb2_grpc, hand_tracking_pb2_grpc\n'), ((672, 770), 'proto.qoin.proto.hand_tracking_pb2_grpc.add_HandTrackingServicer_to_server', 'hand_tracking_pb2_grpc.add_HandTrackingServicer_to_server', (['hand_tracking_service', 'self.server'], {}), '(hand_tracking_service\n , self.server)\n', (729, 770), False, 'from proto.qoin.proto import hello_pb2_grpc, face_mesh_pb2_grpc, hand_tracking_pb2_grpc\n'), ((774, 860), 'proto.qoin.proto.face_mesh_pb2_grpc.add_FaceMeshServicer_to_server', 'face_mesh_pb2_grpc.add_FaceMeshServicer_to_server', (['face_mesh_service', 'self.server'], {}), '(face_mesh_service, self.\n server)\n', (823, 860), False, 'from proto.qoin.proto import hello_pb2_grpc, face_mesh_pb2_grpc, hand_tracking_pb2_grpc\n'), ((358, 409), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {'max_workers': 'max_workers'}), '(max_workers=max_workers)\n', (384, 409), False, 'from concurrent import futures\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `pyfesom2` package."""
import pytest
import os
import numpy as np
import xarray as xr
import matplotlib.pylab as plt
# import matplotlib
from matplotlib.testing.compare import compare_images
from matplotlib.testing.decorators import _image_directories
from pyfesom2 import pyfesom2
from pyfesom2 import load_mesh
from pyfesom2 import get_data
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
my_data_folder = os.path.join(THIS_DIR, 'data')
def test_readmesh():
mesh_path = os.path.join(my_data_folder, 'pi-grid')
mesh = load_mesh(mesh_path, usepickle=False, usejoblib=False)
assert mesh.n2d == 3140
assert mesh.e2d == 5839
mesh = load_mesh(mesh_path, usepickle=True, usejoblib=False)
assert os.path.exists(os.path.join(my_data_folder, 'pi-grid', 'pickle_mesh_py3_fesom2'))
os.remove(os.path.join(my_data_folder, 'pi-grid', 'pickle_mesh_py3_fesom2'))
mesh = load_mesh(mesh_path, usepickle=False, usejoblib=True)
assert os.path.exists(os.path.join(my_data_folder, 'pi-grid', 'joblib_mesh_py3_fesom2'))
os.remove(os.path.join(my_data_folder, 'pi-grid', 'joblib_mesh_py3_fesom2'))
mesh = load_mesh(mesh_path)
assert os.path.exists(os.path.join(my_data_folder, 'pi-grid', 'pickle_mesh_py3_fesom2'))
os.remove(os.path.join(my_data_folder, 'pi-grid', 'pickle_mesh_py3_fesom2'))
print(mesh)
def test_get_data():
mesh_path = os.path.join(my_data_folder, 'pi-grid')
data_path = os.path.join(my_data_folder, 'pi-results')
mesh = load_mesh(mesh_path, usepickle=False, usejoblib=False)
# variable on vertices
temp = get_data(data_path, 'temp', 1948, mesh, depth=0)
assert type(temp) == np.ndarray
assert temp.min() == pytest.approx(-1.8680784)
assert temp.max() == pytest.approx(29.083563)
# variable on elements
u = get_data(data_path, 'u', 1948, mesh, depth=0)
assert type(u) == np.ndarray
assert u.min() == pytest.approx(-0.5859177)
assert u.max() == pytest.approx(0.30641124)
# 2d variable on vertices
ice = get_data(data_path, 'a_ice', 1948, mesh, depth=0)
assert type(u) == np.ndarray
assert ice.mean() == pytest.approx(0.2859408)
# get multiple years
temp = get_data(data_path, 'temp', [1948, 1949], mesh, depth=0)
assert temp.mean() == pytest.approx(8.664016)
# get one record from multiple files
temp = get_data(data_path, 'temp', [1948, 1949], mesh, records=slice(0, 1), depth=0)
assert temp.mean() == pytest.approx(8.670743)
# get different depth
temp = get_data(data_path, 'temp', [1948, 1949], mesh, depth=200)
assert temp.mean() == pytest.approx(6.2157564)
# get different depth and different aggregation
temp = get_data(data_path, 'temp', [1948, 1949], mesh, depth=200, how='max')
assert temp.mean() == pytest.approx(6.3983703)
# directly open ncfile (in data 1948, but we directly request 1949)
temp = get_data(data_path, 'temp', [1948], mesh, depth=200, how='max',
ncfile='{}/{}'.format(data_path, "temp.fesom.1949.nc"))
assert temp.mean() == pytest.approx(6.2478514)
# return dask array
temp = get_data(data_path, 'temp', [1948, 1949], mesh, depth = 200, how='max',
compute=False)
assert isinstance(temp, xr.DataArray)
# use range as argument
temp = get_data(data_path, 'temp', range(1948, 1950), mesh, depth = 200, how='max')
mmean = temp.mean()
assert mmean == pytest.approx(6.3983703)
| [
"pytest.approx",
"pyfesom2.load_mesh",
"os.path.join",
"os.path.abspath",
"pyfesom2.get_data"
] | [((479, 509), 'os.path.join', 'os.path.join', (['THIS_DIR', '"""data"""'], {}), "(THIS_DIR, 'data')\n", (491, 509), False, 'import os\n'), ((435, 460), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (450, 460), False, 'import os\n'), ((549, 588), 'os.path.join', 'os.path.join', (['my_data_folder', '"""pi-grid"""'], {}), "(my_data_folder, 'pi-grid')\n", (561, 588), False, 'import os\n'), ((600, 654), 'pyfesom2.load_mesh', 'load_mesh', (['mesh_path'], {'usepickle': '(False)', 'usejoblib': '(False)'}), '(mesh_path, usepickle=False, usejoblib=False)\n', (609, 654), False, 'from pyfesom2 import load_mesh\n'), ((722, 775), 'pyfesom2.load_mesh', 'load_mesh', (['mesh_path'], {'usepickle': '(True)', 'usejoblib': '(False)'}), '(mesh_path, usepickle=True, usejoblib=False)\n', (731, 775), False, 'from pyfesom2 import load_mesh\n'), ((961, 1014), 'pyfesom2.load_mesh', 'load_mesh', (['mesh_path'], {'usepickle': '(False)', 'usejoblib': '(True)'}), '(mesh_path, usepickle=False, usejoblib=True)\n', (970, 1014), False, 'from pyfesom2 import load_mesh\n'), ((1200, 1220), 'pyfesom2.load_mesh', 'load_mesh', (['mesh_path'], {}), '(mesh_path)\n', (1209, 1220), False, 'from pyfesom2 import load_mesh\n'), ((1450, 1489), 'os.path.join', 'os.path.join', (['my_data_folder', '"""pi-grid"""'], {}), "(my_data_folder, 'pi-grid')\n", (1462, 1489), False, 'import os\n'), ((1506, 1548), 'os.path.join', 'os.path.join', (['my_data_folder', '"""pi-results"""'], {}), "(my_data_folder, 'pi-results')\n", (1518, 1548), False, 'import os\n'), ((1560, 1614), 'pyfesom2.load_mesh', 'load_mesh', (['mesh_path'], {'usepickle': '(False)', 'usejoblib': '(False)'}), '(mesh_path, usepickle=False, usejoblib=False)\n', (1569, 1614), False, 'from pyfesom2 import load_mesh\n'), ((1653, 1701), 'pyfesom2.get_data', 'get_data', (['data_path', '"""temp"""', '(1948)', 'mesh'], {'depth': '(0)'}), "(data_path, 'temp', 1948, mesh, depth=0)\n", (1661, 1701), False, 'from pyfesom2 import get_data\n'), ((1876, 1921), 'pyfesom2.get_data', 'get_data', (['data_path', '"""u"""', '(1948)', 'mesh'], {'depth': '(0)'}), "(data_path, 'u', 1948, mesh, depth=0)\n", (1884, 1921), False, 'from pyfesom2 import get_data\n'), ((2093, 2142), 'pyfesom2.get_data', 'get_data', (['data_path', '"""a_ice"""', '(1948)', 'mesh'], {'depth': '(0)'}), "(data_path, 'a_ice', 1948, mesh, depth=0)\n", (2101, 2142), False, 'from pyfesom2 import get_data\n'), ((2264, 2320), 'pyfesom2.get_data', 'get_data', (['data_path', '"""temp"""', '[1948, 1949]', 'mesh'], {'depth': '(0)'}), "(data_path, 'temp', [1948, 1949], mesh, depth=0)\n", (2272, 2320), False, 'from pyfesom2 import get_data\n'), ((2590, 2648), 'pyfesom2.get_data', 'get_data', (['data_path', '"""temp"""', '[1948, 1949]', 'mesh'], {'depth': '(200)'}), "(data_path, 'temp', [1948, 1949], mesh, depth=200)\n", (2598, 2648), False, 'from pyfesom2 import get_data\n'), ((2764, 2833), 'pyfesom2.get_data', 'get_data', (['data_path', '"""temp"""', '[1948, 1949]', 'mesh'], {'depth': '(200)', 'how': '"""max"""'}), "(data_path, 'temp', [1948, 1949], mesh, depth=200, how='max')\n", (2772, 2833), False, 'from pyfesom2 import get_data\n'), ((3196, 3284), 'pyfesom2.get_data', 'get_data', (['data_path', '"""temp"""', '[1948, 1949]', 'mesh'], {'depth': '(200)', 'how': '"""max"""', 'compute': '(False)'}), "(data_path, 'temp', [1948, 1949], mesh, depth=200, how='max',\n compute=False)\n", (3204, 3284), False, 'from pyfesom2 import get_data\n'), ((802, 867), 'os.path.join', 'os.path.join', (['my_data_folder', '"""pi-grid"""', '"""pickle_mesh_py3_fesom2"""'], {}), "(my_data_folder, 'pi-grid', 'pickle_mesh_py3_fesom2')\n", (814, 867), False, 'import os\n'), ((883, 948), 'os.path.join', 'os.path.join', (['my_data_folder', '"""pi-grid"""', '"""pickle_mesh_py3_fesom2"""'], {}), "(my_data_folder, 'pi-grid', 'pickle_mesh_py3_fesom2')\n", (895, 948), False, 'import os\n'), ((1041, 1106), 'os.path.join', 'os.path.join', (['my_data_folder', '"""pi-grid"""', '"""joblib_mesh_py3_fesom2"""'], {}), "(my_data_folder, 'pi-grid', 'joblib_mesh_py3_fesom2')\n", (1053, 1106), False, 'import os\n'), ((1122, 1187), 'os.path.join', 'os.path.join', (['my_data_folder', '"""pi-grid"""', '"""joblib_mesh_py3_fesom2"""'], {}), "(my_data_folder, 'pi-grid', 'joblib_mesh_py3_fesom2')\n", (1134, 1187), False, 'import os\n'), ((1247, 1312), 'os.path.join', 'os.path.join', (['my_data_folder', '"""pi-grid"""', '"""pickle_mesh_py3_fesom2"""'], {}), "(my_data_folder, 'pi-grid', 'pickle_mesh_py3_fesom2')\n", (1259, 1312), False, 'import os\n'), ((1328, 1393), 'os.path.join', 'os.path.join', (['my_data_folder', '"""pi-grid"""', '"""pickle_mesh_py3_fesom2"""'], {}), "(my_data_folder, 'pi-grid', 'pickle_mesh_py3_fesom2')\n", (1340, 1393), False, 'import os\n'), ((1764, 1789), 'pytest.approx', 'pytest.approx', (['(-1.8680784)'], {}), '(-1.8680784)\n', (1777, 1789), False, 'import pytest\n'), ((1815, 1839), 'pytest.approx', 'pytest.approx', (['(29.083563)'], {}), '(29.083563)\n', (1828, 1839), False, 'import pytest\n'), ((1978, 2003), 'pytest.approx', 'pytest.approx', (['(-0.5859177)'], {}), '(-0.5859177)\n', (1991, 2003), False, 'import pytest\n'), ((2026, 2051), 'pytest.approx', 'pytest.approx', (['(0.30641124)'], {}), '(0.30641124)\n', (2039, 2051), False, 'import pytest\n'), ((2202, 2226), 'pytest.approx', 'pytest.approx', (['(0.2859408)'], {}), '(0.2859408)\n', (2215, 2226), False, 'import pytest\n'), ((2347, 2370), 'pytest.approx', 'pytest.approx', (['(8.664016)'], {}), '(8.664016)\n', (2360, 2370), False, 'import pytest\n'), ((2528, 2551), 'pytest.approx', 'pytest.approx', (['(8.670743)'], {}), '(8.670743)\n', (2541, 2551), False, 'import pytest\n'), ((2675, 2699), 'pytest.approx', 'pytest.approx', (['(6.2157564)'], {}), '(6.2157564)\n', (2688, 2699), False, 'import pytest\n'), ((2860, 2884), 'pytest.approx', 'pytest.approx', (['(6.3983703)'], {}), '(6.3983703)\n', (2873, 2884), False, 'import pytest\n'), ((3135, 3159), 'pytest.approx', 'pytest.approx', (['(6.2478514)'], {}), '(6.2478514)\n', (3148, 3159), False, 'import pytest\n'), ((3507, 3531), 'pytest.approx', 'pytest.approx', (['(6.3983703)'], {}), '(6.3983703)\n', (3520, 3531), False, 'import pytest\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-01 17:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ac_seguridad', '0014_actividad'),
]
operations = [
migrations.AddField(
model_name='estacionamiento',
name='monto_tarifa',
field=models.IntegerField(default=1000),
),
migrations.AddField(
model_name='estacionamiento',
name='tarifa_plana',
field=models.BooleanField(default=True),
),
]
| [
"django.db.models.BooleanField",
"django.db.models.IntegerField"
] | [((410, 443), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'default': '(1000)'}), '(default=1000)\n', (429, 443), False, 'from django.db import migrations, models\n'), ((578, 611), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)'}), '(default=True)\n', (597, 611), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env python
import pathlib
from rtfparse.parser import Rtf_Parser
from rtfparse.renderers import de_encapsulate_html
source_path = pathlib.Path(r"path/to/your/rtf/document.rtf")
target_path = pathlib.Path(r"path/to/your/html/de_encapsulated.html")
parser = Rtf_Parser(rtf_path=source_path)
parsed = parser.parse_file()
renderer = de_encapsulate_html.De_encapsulate_HTML()
with open(target_path, mode="w", encoding="utf-8") as html_file:
renderer.render(parsed, html_file)
| [
"rtfparse.parser.Rtf_Parser",
"rtfparse.renderers.de_encapsulate_html.De_encapsulate_HTML",
"pathlib.Path"
] | [((145, 190), 'pathlib.Path', 'pathlib.Path', (['"""path/to/your/rtf/document.rtf"""'], {}), "('path/to/your/rtf/document.rtf')\n", (157, 190), False, 'import pathlib\n'), ((206, 260), 'pathlib.Path', 'pathlib.Path', (['"""path/to/your/html/de_encapsulated.html"""'], {}), "('path/to/your/html/de_encapsulated.html')\n", (218, 260), False, 'import pathlib\n'), ((273, 305), 'rtfparse.parser.Rtf_Parser', 'Rtf_Parser', ([], {'rtf_path': 'source_path'}), '(rtf_path=source_path)\n', (283, 305), False, 'from rtfparse.parser import Rtf_Parser\n'), ((347, 388), 'rtfparse.renderers.de_encapsulate_html.De_encapsulate_HTML', 'de_encapsulate_html.De_encapsulate_HTML', ([], {}), '()\n', (386, 388), False, 'from rtfparse.renderers import de_encapsulate_html\n')] |
# -*- coding: utf-8 -*-
## This file contain data extraction module extracts thermograms from Variotherm sensor camera and optris sensor camera
### AUTHOR : <NAME>
### MATRICULATION NUMBER : 65074
### STUDENT PROJECT TUBF: Projekt LaDECO (Machine learning on thermographic videos)
import numpy as np
import matplotlib.pyplot as plt
import struct
import os
import cv2 as cv
from thermograms.Utilities import Utilities
class VarioTherm():
"""Extraction of thermograms throught thermal sensor processing
"""
def __init__(self) -> None:
""" Intitial parameters to perform thermal sensor processing
"""
self.thermo_video_data = None
self.file_extension = None
self.file_type = None
self.file_type_extension = None
self.sequence_step_data = {}
self.max_length = None
self.m_index = 0
self.sequence_offset = 0
self.sequence_count = 0
self.sequence_count_max = 0
self.no_time_seq = 0
self.image_data = None
self.image_length = 0
self.image_index = 0
self.image_width = 0
self.image_height = 0
pass
def read_byte(self, index=0, length=4, type='int', byteorder='little'):
"""
Read byte and conert it into the required format
Args:
index (int, optional): stary index of the byte . Defaults to 0.
length (int, optional): length of the byte. Defaults to 4.
type (str, optional): type to which the byte needs to be converted. Defaults to 'int'.
byteorder (str, optional): type of byte order. Defaults to 'little'.
Returns:
_type_: value after conversion
"""
# checking if the index of the byte exceeds the lenght of the file
if index + length > self.max_length:
# extracting the byte data from the main file
temp = self.thermo_video_data[index:self.max_length - length]
# updating the global index to max length
self.m_index = self.max_length
else:
# extracting the byte data from the main file
temp = self.thermo_video_data[index:index + length]
# updating the global index
self.m_index = index + length
# converting the extracted byte information to required data format
if type == 'int':
output = int.from_bytes(temp, byteorder, signed=True)
return output
elif type == 'float':
output = struct.unpack('f', temp)
return output[0]
else:
return temp
def image_read_byte(self, index=0, length=2, type='int', byteorder='little'):
"""
Reads the image byte and convert it into required data format
Args:
index (int, optional): _description_. Defaults to 0.
length (int, optional): _description_. Defaults to 2.
type (str, optional): _description_. Defaults to 'int'.
byteorder (str, optional): _description_. Defaults to 'little'.
Returns:
_type_: convered data
"""
# checking if the index of the byte exceeds the lenght of the file
if index + length > self.image_length:
# extracting the byte data from the main file
temp = self.image_data[index:self.image_length - length]
# updating the image index to max length
self.image_index = self.image_length
else:
# extracting the byte data from the main file
temp = self.image_data[index:index + length]
# updating the image index
self.image_index = index + length
# converting the extracted byte information to required data format
if type == 'int':
output = int.from_bytes(temp, byteorder, signed=True)
return output
elif type == 'float':
output = struct.unpack('f', temp)
return output[0]
elif type == 'double':
output = struct.unpack('d', temp)
return output[0]
else:
return temp
def set_index(self, index):
"""updates the global index
Args:
index (int): index position which needs to be updated to global index
"""
# checking if the index is in between the range of binary file
if self.max_length > index > 0:
self.m_index = index
# if the index is greater then max_length
elif index >= self.max_length:
self.m_index = self.max_length
else:
self.m_index = 0
def sequence_block_data(self, disp=False):
"""
Extraction of sequence information of thermogram which are required for
identifying the length and position of the thermogram in the binary file
Args:
disp (bool, optional): parameter to print dataa. Defaults to False.
"""
# looping over the max sequence counter
for i in range(self.sequence_count_max):
# extraction of respective data
data = {'step_type': self.read_byte(self.m_index), 'description1': self.read_byte(self.m_index),
'frame_index': self.read_byte(self.m_index), 'step_offset': self.read_byte(self.m_index),
'step_size': self.read_byte(self.m_index), 'header_size': 0x6C0}
if data['header_size'] > data['step_size']:
data['header_size'] = data['step_size']
data['header_offset'] = 0
data['image_offset'] = data['header_size']
data['image_size'] = data['step_size'] - data['image_offset']
data['description2'] = self.read_byte(self.m_index)
data['description3'] = self.read_byte(self.m_index)
data['description4'] = self.read_byte(self.m_index)
if data['step_type'] == 1:
# creating a numpy array to store the respective thermogram information
self.sequence_step_data[self.no_time_seq + 1] = data
self.no_time_seq += 1
if data['frame_index'] % 50 == 0 and disp:
print(self.sequence_step_data[self.no_time_seq])
# To avoid the last two thermograms
self.no_time_seq = self.no_time_seq - 2
pass
def video_info_extraction(self, info_index=1084):
"""
Extract thermal sensor parameter present in the binary file
Args:
info_index (int, optional): start index for video informartion. Defaults to 1084.
Returns:
_type_: dictionary contain thermal sensor information
"""
# Dictionary to store and update sensor information
video_info = {}
self.image_index = info_index + 92
device_min_range = self.image_read_byte(self.image_index, length=4, type='float')
device_max_range = self.image_read_byte(self.image_index, length=4, type='float')
video_info['device_min_range'] = str(device_min_range)
video_info['device_max_range'] = str(device_max_range)
self.image_index += 42
device = self.image_read_byte(self.image_index, length=10, type='str')
video_info['device'] = str(device)
self.image_index += 34
device_series_number = self.image_read_byte(self.image_index, length=6, type='str')
video_info['device_series_number'] = str(device_series_number)
self.image_index += 10
sensor = self.image_read_byte(self.image_index, length=12, type='str')
video_info['sensor'] = str(sensor)
self.image_index += 18
sensor_calibration = self.image_read_byte(self.image_index, length=32, type='str')
video_info['sensor_calibration'] = str(sensor_calibration)
self.image_index += 276
video_timestamp = self.image_read_byte(self.image_index, length=8, type='double')
video_timestamp_extension = self.image_read_byte(self.image_index, length=4, type='int')
self.image_index += 2
sensor_name = self.image_read_byte(self.image_index, length=10, type='str')
video_info['camera'] = str(sensor_name)
self.image_index += 45
video_format = self.image_read_byte(self.image_index, length=16, type='str')
video_info['video_format'] = str(video_format)
return video_info
def data_file_reading(self, root_dir, experiment, read_mode='rb'):
"""
Reads the .irb file format and convert it into binary file format
after which the data is extracted
Args:
root_dir (str): path of the file
experiment (str): name of the file
read_mode (str, optional): convert to the required file format. Defaults to 'rb'.(raw binary file format)
"""
# file path of the video
video_file_path = os.path.join(
root_dir, experiment, experiment + '.irb')
# reading the video and converting it into binary file format
with open(video_file_path, read_mode) as file:
self.thermo_video_data = file.read()
# print('The length of the sequence',len(self.thermo_video_data))
# extracting initial video parameter
self.max_length = len(self.thermo_video_data)
self.file_extension = self.read_byte(self.m_index, length=5, type='str')
if self.file_extension != b"\xFFIRB\x00":
print('File extension is not irb')
self.file_type = self.read_byte(self.m_index, length=8, type='str')
self.file_type_extension = self.read_byte(self.m_index, length=8, type='str')
self.initial_flag = self.read_byte(self.m_index)
self.sequence_offset = self.read_byte(self.m_index)
self.sequence_count = self.read_byte(self.m_index)
self.sequence_count_max = self.sequence_count + self.initial_flag
self.set_index(self.sequence_offset)
# extrating the thermogram sequence data based on the above initial information
self.sequence_block_data()
print('Number of time steps:', self.no_time_seq)
pass
def image_extraction(self, data_dic, root_dir, experiment, disp=False):
"""
Extraction of thermogram data based on the data obtained in sequence_block_data
Args:
data_dic (numpy array): image sequence data obtained in sequence_block_data
root_dir (str): path to save the image information
experiment (str): name of the experiment
disp (bool, optional): parameter to print the image information . Defaults to False.
Returns:
_type_: thermogram
"""
# extracting image sequence information like start index of the thermogram and length
index = data_dic['step_offset']
size = data_dic['step_size']
frame_index = data_dic['frame_index']
# print(type(index),size)
# creating a dictionary to store thermogram information like width ,height etc
image_info = {}
self.image_length = size
self.image_data = self.read_byte(index, size, 'str')
# print(self.image_data)
image_info['image_size'] = self.image_length
self.image_index = 0
bytes_per_pixel = self.image_read_byte(self.image_index, length=1, byteorder='big')
compressed = self.image_read_byte(self.image_index, length=2, byteorder='big')
image_info['bytes_per_pixel'] = str(bytes_per_pixel)
image_info['compressed'] = str(compressed)
self.image_index += 2
self.image_width = self.image_read_byte(
self.image_index, length=2, type='int', byteorder='big')
self.image_height = self.image_read_byte(
self.image_index, length=2, type='int', byteorder='big')
self.image_index += 4
image_info['image_width'] = str(self.image_width)
image_info['image_height'] = str(self.image_height)
image_info['time_steps'] = str(self.no_time_seq)
width_check = self.image_read_byte(
self.image_index, length=2, type='int', byteorder='big')
# if width_check == image_width-1:
# raise Exception('width donot match')
self.image_index += 2
height_check = self.image_read_byte(
self.image_index, length=2, type='int', byteorder='big')
# if height_check == image_height-1:
# raise Exception('height donot match')
self.image_index += 5
emissivity = self.image_read_byte(self.image_index, length=4,
type='float', byteorder='big')
image_info['emissivity'] = str(emissivity)
distance = self.image_read_byte(self.image_index, length=4,
type='float', byteorder='big')
image_info['distance'] = str(distance)
environment_temp = self.image_read_byte(self.image_index, length=4,
type='float', byteorder='big')
self.image_index += 4
path_temperature = self.image_read_byte(self.image_index, length=4,
type='float', byteorder='big')
image_info['path_temperature'] = str(path_temperature)
self.image_index += 4
center_wavelength = self.image_read_byte(self.image_index, length=4,
type='float', byteorder='big')
image_info['center_wavelength'] = str(center_wavelength)
self.image_index = 60
interpolation_temp = []
# the temperatures in the thermogram are stores in sequential format
## where two adjust value have to be interpolated to obtain the true temperature
for i in range(256):
# converting the byte data to float and appending to a list
interpolation_temp.append(
self.image_read_byte(self.image_index, length=4, type='float', byteorder='little'))
# extraction of thermal sensor data
video_info = self.video_info_extraction()
# interpolating the temperature data to get true temperature.
temperature_data = self.temperature_interpolation(data_dic['image_offset'], interpolation_temp)
if frame_index == 1:
# exporting the video information into a config file
csv_name = 'evaluation-config.conf'
# Utilities().check_n_create_directory(file_path)
txt_file_path = os.path.join(
root_dir, experiment)
csv_path = os.path.join(txt_file_path, csv_name)
with open(csv_path, 'w') as f:
f.write("# -*- coding: utf-8 -*- \n")
f.write('Versuchsbezeichnung = \n')
f.write('Beschreibung = \n')
f.write('# Allgemein \n')
for key in video_info.keys():
f.write(str(key) + "=" + str(video_info[key]) + "\n")
for key in image_info.keys():
f.write(str(key) + "=" + str(image_info[key]) + "\n")
f.write('changeTemperatureToCelsius = False \n')
f.write('frequency=50 \n')
f.write('plot3DElevation = 65 \n')
f.write('plot3DAzimuth = None \n')
f.write('plot3DXLabel = Width [Pixel] \n')
f.write('plot3DYLabel = Height [Pixel] \n')
f.write('plot3DZLabelIntegral = \n')
f.write('plot3DZLabelRise = m [K/s] \n')
f.write('plot3DWidth = 16.0 \n')
f.write('plot3DHeight = 12.0 \n')
f.write('plot3DDPI = 300 \n')
f.write('plot3DFileFormat = png \n')
f.write('plot2DWidth = 16.0 \n')
f.write('plot2DHeight = 12.0 \n')
f.write('plot2DDPI = 300 \n')
f.write('plot2DFileFormat = png \n')
f.write('evaluationArea = [] \n')
f.write('temperatureTimeCurves =[] \n')
f.write('IgnoreTimeAtStart = 0 \n')
f.write('temperaturDelta =1 \n')
# video_info_df = pd.DataFrame.from_dict(video_info,orient='index')
# image_info_df = pd.DataFrame.from_dict(image_info, orient='index')
# evaluation_configuration = pd.concat([video_info_df, image_info_df])
# evaluation_configuration.to_csv(csv_path)
if disp and (frame_index % 10 == 0):
# plots the heat map of thermogram
plt.imshow(temperature_data.reshape(
(self.image_width, self.image_height)).astype(np.float64), cmap='RdYlBu_r')
plt.title('Temperature profile ' + str(frame_index) + ' time step')
plt.xlabel('Height (pixcels)')
plt.ylabel('width (pixcels)')
plt.colorbar()
plt.show(block=False)
plt.pause(0.75)
plt.close("all")
return temperature_data
def temperature_interpolation(self, index, interpolation_temp):
"""
Interpolation function to map value of thermogram to obtain true temperature
Args:
index (_type_): start index for interpolation values
interpolation_temp (_type_): list of temperatures which have to be interploated
Returns:
_type_: True thermograms
"""
no_pixcels = self.image_height * self.image_width
temperature_data = []
f = 0
self.image_index = index
# runs for the number of pixcels
for i in range(no_pixcels):
# reads the pixcel positons(x,y)
pixcel_1 = self.image_read_byte(self.image_index, length=1, type='int', byteorder='big')
pixcel_2 = self.image_read_byte(self.image_index, length=1, type='int', byteorder='big')
# interpolation function obtained from general data processing of thermal sensor data
f = pixcel_1 * (1.0 / 256.0)
pixcel_temperature = interpolation_temp[pixcel_2 +
1] * f + interpolation_temp[pixcel_2] * (1.0 - f)
# if the true temperature is less 0 K, then min range of the sensor is assigned
if pixcel_temperature < 0:
pixcel_temperature = 255.0
temperature_data.append(pixcel_temperature)
return np.array(temperature_data)
def image_sequence_extractor(self, root_dir, experiment, disp=False):
"""
Combine all the above methods to extract the thermogram and store it in numpy array
Args:
root_dir (_type_): path of the .irb file
experiment (_type_): _description_
disp (bool, optional): _description_. Defaults to False.
Returns:
_type_: _description_
"""
# reads the data and convert into binary format and extracts image sequence information like start,lenght indices of thermogram
self.data_file_reading(root_dir, experiment)
# creating a numpy array to store the thermograms
image_sequence = np.zeros(shape=(256, 256, len(self.sequence_step_data)-2))
print('\nExtracting temperature profile sequence')
print('Progress: [', end='', flush=True)
# running for all sequences
for i in range(1, len(self.sequence_step_data)-2):
# extracting image information for each block
data_dic = self.sequence_step_data[i]
# extracting thermogram of each sequence
step_imag_temp = self.image_extraction(data_dic, root_dir, experiment, disp)
# reshaping the extracted thermogram based on the extracted image width and height.
image_sequence[:, :, i-1] = step_imag_temp.reshape((self.image_width, self.image_height))
if i % 10 == 0:
print('■', end='', flush=True)
print('] loaded ' + str(self.no_time_seq) + ' time steps', end='', flush=True)
return image_sequence
class Optris():
"""
Processing of thermal sensor data
"""
def __init__(self, root_directory, video_file_name) -> None:
"""
initial parameterss
Args:
root_directory (str): path of the .ravi file
video_file_name (str): name of the .ravi file
"""
self.Root_directory = root_directory
self.Ravi_file_name = video_file_name
pass
def video_simulation(self, fps=30, vs_disp=False):
"""
Simulation of optris thermal video file
Args:
fps (int, optional): Frames per second. Defaults to 30.
vs_disp (bool, optional): Parameter to perform simulation. Defaults to False.
Raises:
Exception: file is not in .ravi file format
"""
# path of the ravi file
video_file_path = os.path.join(self.Root_directory, self.Ravi_file_name)
# using open CV .avi module to open data
ravi_video_data = cv.VideoCapture(video_file_path)
# changing the format of the file to .avi for video processing
ravi_video_data.set(cv.CAP_PROP_FORMAT, -1)
# changing the frame per seconds of the video
ravi_video_data.set(cv.CAP_PROP_FPS, fps)
# checking for the file format and raising error
if not ravi_video_data.isOpened():
raise Exception('Error while loading the {} video file'.format(self.Ravi_file_name))
# extracting the height and width of the video
width = int(ravi_video_data.get(cv.CAP_PROP_FRAME_WIDTH))
height = int(ravi_video_data.get(cv.CAP_PROP_FRAME_HEIGHT))
f = 0
print("Simulation Started \n")
print('Progress: [', end='', flush=True)
# opening the video for playing
while ravi_video_data.isOpened() is True:
# reading and fetching data for each frame
fetch_status, frame = ravi_video_data.read()
if fetch_status is False:
print('] simulated ' + str(f) + ' time steps', end='', flush=True)
print(' playing video is complete')
break
# resizing the frame for display
re_frame = frame.view(dtype=np.int16).reshape(height, width)
actual_frame = re_frame[1:, :]
# To compensate the camera movement, the intensity peaks are identified and normalization is
# performed for better visualization
displace_frame = cv.normalize(actual_frame, None, 0, 255, cv.NORM_MINMAX, cv.CV_8U)
# Applying colormap for better visualization
disp_color = cv.applyColorMap(displace_frame, cv.COLORMAP_JET)
# Plotting each frame
cv.imshow('Optris RAVI file output', disp_color)
#if f==950:
#print(f)
# plt.imshow(displace_frame,cmap='RdYlBu_r', interpolation='None')
# plt.axis('off')
# plt.savefig(r"D:\STUDY_MATERIAL\document\optris_python"+str(f)+".png",dpi=600,bbox_inches='tight',transparent=True)
cv.waitKey(10)
#print(f)
f += 1
if f % 60 == 0:
print('■', end='', flush=True)
ravi_video_data.release()
cv.destroyAllWindows()
pass
def ravi_to_yuv(self):
""" Convert .ravi to yuv (binary file format)
"""
ravi_file_path = os.path.join(self.Root_directory, self.Ravi_file_name)
yuv_file_name = self.Ravi_file_name[:-4] + "yuv"
yuv_file_path = os.path.join(self.Root_directory, yuv_file_name)
command = "ffmpeg -y -f avi -i '" + ravi_file_path + "' -vcodec rawvideo '" + yuv_file_path + "'"
print(command)
os.system(command)
pass
if __name__ == '__main__':
root_directory = r'utilites\datasets'
experiment = r"2021-05-11 - Variantenvergleich - VarioTherm IR-Strahler - Winkel 45°"
Vario = VarioTherm()
temperature_data = Vario.image_sequence_extractor(root_dir, experiment, True)
np.save(file_name + r'Documents/temp/temp1.npy', temperature_data)
root_directory = r'utilites\datasets'
video_file_name=r'experiment_1.ravi'
a= Optris(root_directory,video_file_name)
a.video_simulation() | [
"cv2.applyColorMap",
"cv2.normalize",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.colorbar",
"os.path.join",
"cv2.imshow",
"matplotlib.pyplot.close",
"numpy.array",
"struct.unpack",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"matplotlib.pyplot.pause",
"os.system",
"cv2.waitKey",
"numpy.save",
"matplotlib.pyplot.show"
] | [((24149, 24214), 'numpy.save', 'np.save', (["(file_name + 'Documents/temp/temp1.npy')", 'temperature_data'], {}), "(file_name + 'Documents/temp/temp1.npy', temperature_data)\n", (24156, 24214), True, 'import numpy as np\n'), ((8902, 8957), 'os.path.join', 'os.path.join', (['root_dir', 'experiment', "(experiment + '.irb')"], {}), "(root_dir, experiment, experiment + '.irb')\n", (8914, 8957), False, 'import os\n'), ((18486, 18512), 'numpy.array', 'np.array', (['temperature_data'], {}), '(temperature_data)\n', (18494, 18512), True, 'import numpy as np\n'), ((20970, 21024), 'os.path.join', 'os.path.join', (['self.Root_directory', 'self.Ravi_file_name'], {}), '(self.Root_directory, self.Ravi_file_name)\n', (20982, 21024), False, 'import os\n'), ((21100, 21132), 'cv2.VideoCapture', 'cv.VideoCapture', (['video_file_path'], {}), '(video_file_path)\n', (21115, 21132), True, 'import cv2 as cv\n'), ((23367, 23389), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (23387, 23389), True, 'import cv2 as cv\n'), ((23523, 23577), 'os.path.join', 'os.path.join', (['self.Root_directory', 'self.Ravi_file_name'], {}), '(self.Root_directory, self.Ravi_file_name)\n', (23535, 23577), False, 'import os\n'), ((23659, 23707), 'os.path.join', 'os.path.join', (['self.Root_directory', 'yuv_file_name'], {}), '(self.Root_directory, yuv_file_name)\n', (23671, 23707), False, 'import os\n'), ((23845, 23863), 'os.system', 'os.system', (['command'], {}), '(command)\n', (23854, 23863), False, 'import os\n'), ((14523, 14557), 'os.path.join', 'os.path.join', (['root_dir', 'experiment'], {}), '(root_dir, experiment)\n', (14535, 14557), False, 'import os\n'), ((14598, 14635), 'os.path.join', 'os.path.join', (['txt_file_path', 'csv_name'], {}), '(txt_file_path, csv_name)\n', (14610, 14635), False, 'import os\n'), ((16848, 16878), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Height (pixcels)"""'], {}), "('Height (pixcels)')\n", (16858, 16878), True, 'import matplotlib.pyplot as plt\n'), ((16891, 16920), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""width (pixcels)"""'], {}), "('width (pixcels)')\n", (16901, 16920), True, 'import matplotlib.pyplot as plt\n'), ((16933, 16947), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (16945, 16947), True, 'import matplotlib.pyplot as plt\n'), ((16960, 16981), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (16968, 16981), True, 'import matplotlib.pyplot as plt\n'), ((16994, 17009), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.75)'], {}), '(0.75)\n', (17003, 17009), True, 'import matplotlib.pyplot as plt\n'), ((17022, 17038), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (17031, 17038), True, 'import matplotlib.pyplot as plt\n'), ((22591, 22657), 'cv2.normalize', 'cv.normalize', (['actual_frame', 'None', '(0)', '(255)', 'cv.NORM_MINMAX', 'cv.CV_8U'], {}), '(actual_frame, None, 0, 255, cv.NORM_MINMAX, cv.CV_8U)\n', (22603, 22657), True, 'import cv2 as cv\n'), ((22740, 22789), 'cv2.applyColorMap', 'cv.applyColorMap', (['displace_frame', 'cv.COLORMAP_JET'], {}), '(displace_frame, cv.COLORMAP_JET)\n', (22756, 22789), True, 'import cv2 as cv\n'), ((22837, 22885), 'cv2.imshow', 'cv.imshow', (['"""Optris RAVI file output"""', 'disp_color'], {}), "('Optris RAVI file output', disp_color)\n", (22846, 22885), True, 'import cv2 as cv\n'), ((23192, 23206), 'cv2.waitKey', 'cv.waitKey', (['(10)'], {}), '(10)\n', (23202, 23206), True, 'import cv2 as cv\n'), ((2530, 2554), 'struct.unpack', 'struct.unpack', (['"""f"""', 'temp'], {}), "('f', temp)\n", (2543, 2554), False, 'import struct\n'), ((3954, 3978), 'struct.unpack', 'struct.unpack', (['"""f"""', 'temp'], {}), "('f', temp)\n", (3967, 3978), False, 'import struct\n'), ((4060, 4084), 'struct.unpack', 'struct.unpack', (['"""d"""', 'temp'], {}), "('d', temp)\n", (4073, 4084), False, 'import struct\n')] |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 14 08:23:15 2018
@author: philt
"""
# Data Preprocessing Template
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Salary_Data.csv')
X = dataset.iloc[:, :-1].values # Select indep variable (All but last column)
y = dataset.iloc[:, 1].values # Select Depedent variables (Last Column)
# Taking care of missing data
# Encoding categorical data
# Dummy encoding. Want to decode the countries
# EDA - Visualizing the Raw Data
#plt.scatter(X, y, color= 'red')
#plt.title('Salary Vs Experiance (Raw Data Set)')
#plt.xlabel('Years of Experiance')
#plt.ylabel("Salary")
#plt.show()
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=1/3, random_state=0)
# Feature Scaling
'''from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
# sc_y = StandardScaler
# y_train = sc_y.fit_transform(y_train)'''
#Fitting Simple Linear Regression to the Training set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# Predicting the Test set results
y_pred = regressor.predict(X_test)
#Visualizing the Training set results
fig = plt.figure()
title = fig.suptitle('Salary Vs Experiance (Training Set)')
fig.subplots_adjust(top=0.85, wspace=0.3)
ax1 = fig.add_subplot(121)
ax1.set_xlabel('Years (Training Set)')
ax1.set_ylabel('Salary')
ax1.scatter(X_train, y_train, color= 'red')
ax1.plot(X_train, regressor.predict(X_train), color= 'blue')
#Visualizing the Test set results
ax2 = fig.add_subplot(122)
ax2.set_xlabel('Years (Test Set)')
ax2.set_ylabel('Salary')
ax2.scatter(X_test, y_test, color= 'red')
ax2.plt.plot(X_train, regressor.predict(X_train), color= 'blue')
plt.show() | [
"pandas.read_csv",
"matplotlib.pyplot.figure",
"sklearn.cross_validation.train_test_split",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.show"
] | [((248, 278), 'pandas.read_csv', 'pd.read_csv', (['"""Salary_Data.csv"""'], {}), "('Salary_Data.csv')\n", (259, 278), True, 'import pandas as pd\n'), ((874, 929), 'sklearn.cross_validation.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(1 / 3)', 'random_state': '(0)'}), '(X, y, test_size=1 / 3, random_state=0)\n', (890, 929), False, 'from sklearn.cross_validation import train_test_split\n'), ((1277, 1295), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (1293, 1295), False, 'from sklearn.linear_model import LinearRegression\n'), ((1443, 1455), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1453, 1455), True, 'import matplotlib.pyplot as plt\n'), ((1985, 1995), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1993, 1995), True, 'import matplotlib.pyplot as plt\n')] |
#!/usr/bin/env python3
from typing import (
Any,
Dict,
List,
Optional,
Set,
Tuple,
)
from bs4 import BeautifulSoup
import requests
import logging
import hashlib
import pathlib
#
# helpers
#
def cleanup_name(s: str, remove_quote=False) -> str:
if remove_quote:
return s.replace("'", '').title().replace(' ', '')
else:
return s.replace("'", "\\'").title().replace(' ', '')
def get_wiki(url: str) -> str:
digest = hashlib.md5(bytes(url, 'utf-8')).hexdigest()
cache = pathlib.Path('wiki_cache') / digest
if cache.exists():
return cache.read_text()
print(f'fetching: {url}')
html = requests.get(url).text
with cache.open('w') as f:
f.write(html)
return html
def start_python_file(filename: str):
logging.info(f'writing file {filename}')
f = open(filename, 'w')
f.write('#!/usr/bin/env python3\n')
f.write('# this file is auto-generated by gen_from_wiki.py\n')
f.write('from __future__ import annotations\n')
return f
#
# PlayerClass
#
_player_classes: Optional[List[str]] = None
_player_class_abilities: Optional[Dict[str, Dict[str, int]]] = None
_player_class_equipment: Optional[Dict[str, Dict[str, int]]] = None
def gen_player_classes() -> List[str]:
global _player_classes
global _player_class_abilities
global _player_class_equipment
if _player_classes is not None:
return _player_classes
logging.info('fetching classes from wiki')
html = get_wiki('https://wiki.retro-mmo.com/wiki/Category:Classes')
soup = BeautifulSoup(html, 'html.parser')
content = soup.select('#mw-pages')[0]
lis = content.findAll('li')
classes = []
for li in lis:
children = li.select('a')
assert len(children) == 1
classes.append(children[0].string)
class_abilities: Dict[str, Dict[str, int]] = {}
class_equipment: Dict[str, Dict[str, int]] = {}
for classname in classes:
html = get_wiki(f'https://wiki.retro-mmo.com/wiki/{classname}')
soup = BeautifulSoup(html, 'html.parser')
ability_table, equipment_table, *_ = soup.select('.wikitable')
# abilities
class_abilities[classname] = {}
trs = ability_table.select('tbody')[0].select('tr')
for tr in trs:
tds = tr.select('td')
if len(tds) != 3: continue
_, ability, level = tds
ability = ability.select('a')[0].string
level = int(level.string)
class_abilities[classname][ability] = level
class_equipment[classname] = {}
trs = equipment_table.select('tbody')[0].select('tr')
for tr in trs:
tds = tr.select('td')
if len(tds) != 3: continue
_, equipment , level = tds
equipment = equipment.select('a')[0].string
level = int(level.string)
class_equipment[classname][equipment] = level
_player_class_abilities = class_abilities
_player_class_equipment = class_equipment
_player_classes = classes
return classes
def gen_player_class_abilities() -> Dict[str, Dict[str, int]]:
gen_player_classes()
assert _player_class_abilities is not None
return _player_class_abilities
def gen_player_class_equipment() -> Dict[str, Dict[str, int]]:
gen_player_classes()
assert _player_class_equipment is not None
return _player_class_equipment
def write_player_classes() -> None:
classes = gen_player_classes()
f = start_python_file('player_class.py')
f.write('from typing import (\n')
f.write(' Dict,\n')
f.write(' Tuple,\n')
f.write(')\n')
f.write('import enum\n')
f.write('import functools\n')
f.write('\n')
f.write('from ..item import EquipmentItem\n')
f.write('from .ability import Ability\n')
f.write('\n\n')
f.write('class PlayerClass(enum.Enum):\n')
f.write('\n')
for pc in classes:
f.write(f" {pc} = '{pc.lower()}'\n")
f.write('\n')
f.write(f' @staticmethod\n')
f.write(f' @functools.cache\n')
f.write(f' def get_abilities(cls: PlayerClass, level=10) -> Tuple[Ability, ...]:\n')
f.write(f' from .class_info import CLASS_ABILITIES\n')
f.write(f' abilities = CLASS_ABILITIES[cls]\n')
f.write(f' return tuple(\n')
f.write(f' a for a, lv in abilities.items()\n')
f.write(f' if lv <= level\n')
f.write(f' )\n')
f.write('\n')
f.write(f' @staticmethod\n')
f.write(f' @functools.cache\n')
f.write(f' def get_equipment(cls: PlayerClass, level=10) -> Tuple[\'EquipmentItem\', ...]:\n')
f.write(f' from .class_info import CLASS_EQUIPMENT\n')
f.write(f' equipment = CLASS_EQUIPMENT[cls]\n')
f.write(f' return tuple(\n')
f.write(f' e for e, lv in equipment.items()\n')
f.write(f' if lv <= level\n')
f.write(f' )\n')
f.write('\n')
f.close()
def write_class_info() -> None:
classes = gen_player_classes()
f = start_python_file('class_info.py')
f.write('from typing import (\n')
f.write(' Dict,\n')
f.write(' Tuple,\n')
f.write(')\n')
f.write('from ..item import EquipmentItem\n')
f.write('from .ability import Ability\n')
f.write('from .equipment import find_equipment\n')
f.write('from .player_class import PlayerClass\n')
f.write('\n\n')
abilities = gen_player_class_abilities()
f.write('CLASS_ABILITIES: Dict[PlayerClass, Dict[Ability, int]] = {\n')
for pc in classes:
f.write(f' PlayerClass.{pc}: {{\n')
for ability, level in abilities[pc].items():
name = cleanup_name(ability)
f.write(f' Ability.{name}: {level},\n')
f.write(' },\n')
f.write('}\n\n\n')
equipments = gen_player_class_equipment()
f.write('CLASS_EQUIPMENT: Dict[PlayerClass, Dict[\'EquipmentItem\', int]] = {\n')
for pc in classes:
f.write(f' PlayerClass.{pc}: {{\n')
for equipment, level in equipments[pc].items():
name = cleanup_name(equipment)
f.write(f' find_equipment(\'{name}\'): {level},\n')
f.write(' },\n')
f.write('}\n')
f.write('\n')
f.close()
#
# PlayerStats
#
_player_stats = None
def gen_player_stats(player_class: str) -> List[List[int]]:
global _player_stats
if _player_stats is not None:
return _player_stats
logging.info('fetching player stats from wiki')
html = get_wiki(f'https://wiki.retro-mmo.com/wiki/{player_class}')
soup = BeautifulSoup(html, 'html.parser')
contents = soup.select('.mw-parser-output')
assert len(contents) == 1
content = contents[0]
table = None
found_stats = False
for child in content.children:
if child.name == 'h2':
span = child.select('span')
if len(span) == 1:
attrs = span[0].get_attribute_list('id')
if attrs == ['Stats']:
found_stats = True
elif found_stats and child.name == 'table':
table = child
break
if table is None:
raise ValueError('could not find Stats table')
tbody = table.select('tbody')[0]
current_level = 1
stats_list = [
[0, 0, 0, 0, 0, 0, 0, 0],
]
for tr in tbody.select('tr'):
tds = tr.select('td')
if len(tds) == 0:
continue
level, *stats = map(int, [td.string for td in tds])
assert level == len(stats_list)
assert len(stats) == 8
stats_list.append(stats)
_player_stats = stats_list
return stats_list
def write_player_stats() -> None:
f = start_python_file('player_stats.py')
f.write('from .player_class import PlayerClass\n')
f.write('from ..stats import Stats\n')
f.write('\n\n')
f.write('STATS_BY_PLAYER_CLASS = {\n')
for pc in gen_player_classes():
f.write(f' PlayerClass.{pc}: [\n')
stats = gen_player_stats(pc)
for row in stats:
f.write(f' Stats(*{tuple(row)}),\n')
f.write(' ],\n')
f.write('}\n')
f.close()
#
# Abilities
#
_abilities = None
def gen_abilities() -> List[str]:
global _abilities
if _abilities is not None:
return _abilities
logging.info('fetching abilities from wiki')
html = get_wiki(f'https://wiki.retro-mmo.com/wiki/Category:Abilities')
soup = BeautifulSoup(html, 'html.parser')
content = soup.select('.mw-category')[0]
lis = content.findAll('li')
abilities = []
for li in lis:
children = li.select('a')
assert len(children) == 1
abilities.append(children[0].string)
return abilities
def write_abilities() -> None:
f = start_python_file('ability.py')
f.write('import enum\n')
f.write('\n\n')
f.write('class Ability(enum.Enum):\n')
f.write('\n')
for ability in gen_abilities():
key = cleanup_name(ability)
val = ability.lower()
f.write(f" {key} = '{val}'\n")
f.write('\n')
f.close()
#
# Equipment
#
_equipment_names = None
_equipment_slots = None
_equipment = None
def gen_equipment_names() -> List[str]:
global _equipment_names
if _equipment_names is not None:
return _equipment_names
logging.info('fetching equipment names from wiki')
html = get_wiki(f'https://wiki.retro-mmo.com/wiki/Category:Equipment_items')
soup = BeautifulSoup(html, 'html.parser')
content = soup.select('.mw-category')[0]
lis = content.findAll('li')
equipment_names = []
for li in lis:
children = li.select('a')
assert len(children) == 1
equipment_names.append(children[0].string)
_equipment_names = equipment_names
return equipment_names
def gen_equipment() -> Dict[str, Any]:
global _equipment
global _equipment_slots
global _equipment_names
if _equipment is not None:
return _equipment
logging.info('fetching equipment from wiki')
equipment = {}
slots = set()
for name in gen_equipment_names():
html = get_wiki(f'https://wiki.retro-mmo.com/wiki/{name}')
soup = BeautifulSoup(html, 'html.parser')
content = soup.select('.retrommo-infobox')[0]
level = None
classes = None
slot = None
stats = [0] * 8
tradable = None
sell = None
for tr in content.findAll('tr'):
tds = tr.findAll('td')
if len(tds) != 2:
continue
key, val = tds
key = key.select('a')[0].string.strip()
if key == 'Class':
classes = [
a.string.strip()
for a in val.select('a')
]
if len(classes) == 0 and val.string.strip() == 'All':
classes = [c for c in gen_player_classes()]
continue
val = val.string.strip()
if key == 'Level':
level = int(val)
elif key == 'Slot':
slots.add(val)
slot = val
elif key == 'Agility':
stats[4] = int(val)
elif key == 'Defense':
stats[3] = int(val)
elif key == 'Intelligence':
stats[5] = int(val)
elif key == 'Luck':
stats[7] = int(val)
elif key == 'Strength':
stats[2] = int(val)
elif key == 'Wisdom':
stats[6] = int(val)
elif key == 'Tradable':
tradable = val == 'Yes'
elif key == 'Sell':
sell = int(val)
attributes = (level, classes, slot, tradable, sell)
assert None not in attributes, f'{name} - {attributes}'
equipment[cleanup_name(name)] = {
'name': name,
'classes': classes,
'stats': stats,
'slot': slot,
'tradable': tradable,
'sell': sell,
}
_equipment_slots = slots
_equipment = equipment
return equipment
def gen_equipment_slots() -> Set[str]:
gen_equipment()
assert _equipment_slots is not None
return _equipment_slots
def write_equipment_slots() -> None:
f = start_python_file('equipment_slot.py')
f.write('import enum\n')
f.write('\n\n')
f.write('class EquipmentSlot(enum.Enum):\n')
for slot in gen_equipment_slots():
slot_name = cleanup_name(slot)
slot = slot.lower()
f.write(f" {slot_name} = '{slot}'\n")
f.write('\n')
def write_equipment() -> None:
by_slot: Dict[str, List[Dict[str, Any]]] = {}
for name, item in gen_equipment().items():
slot = item['slot']
if slot not in by_slot:
by_slot[slot] = []
by_slot[slot].append(item)
f = start_python_file('equipment.py')
f.write('from typing import Optional, Tuple\n')
f.write('import enum\n')
f.write('import functools\n')
f.write('\n')
f.write('from ..item import EquipmentItem\n')
f.write('from ..stats import Stats\n')
f.write('from .equipment_slot import EquipmentSlot\n')
f.write('from .player_class import PlayerClass\n')
f.write('\n\n')
f.write('def find_equipment(name: str) -> EquipmentItem:\n')
f.write(" name = name.replace(\"'\", \"\\'\").title().replace(' ', '')\n")
f.write(' try: return OffHandEquipment[name]\n')
f.write(' except KeyError: pass\n')
f.write(' try: return MainHandEquipment[name]\n')
f.write(' except KeyError: pass\n')
f.write(' try: return HeadEquipment[name]\n')
f.write(' except KeyError: pass\n')
f.write(' try: return BodyEquipment[name]\n')
f.write(' except KeyError: pass\n')
f.write(" raise ValueError(f\'invalid equipment: {name}\')\n")
f.write('\n\n')
for slot_name in by_slot:
classname = cleanup_name(slot_name) + 'Equipment'
f.write(f'class {classname}(EquipmentItem, enum.Enum):\n\n')
f.write(f' @staticmethod\n')
f.write(f' @functools.cache\n')
f.write(f' def by_class(cls: PlayerClass) -> Tuple[{classname}, ...]:\n')
f.write(f' return tuple(\n')
f.write(f' c for c in {classname}\n')
f.write(f' if cls in c.value.classes\n')
f.write(f' )\n')
f.write('\n')
for item in by_slot[slot_name]:
name = item['name'].replace("'", "\\'")
item_name = cleanup_name(item['name'], True)
tradable = item['tradable']
value = item['sell']
classes = item['classes']
stats = item['stats']
slot = cleanup_name(item['slot'])
classes_str = ', '.join(f'PlayerClass.{c}' for c in classes)
f.write(f' {item_name} = (\n')
f.write(f" '{name}',\n")
f.write(f' {tradable}, # tradable\n')
f.write(f' {value}, # sell value\n')
f.write(f' ({classes_str}),\n')
f.write(f' Stats.from_sequence({stats}),\n')
f.write(f' EquipmentSlot.{slot},\n')
f.write(f' )\n')
f.write('\n\n')
f.write('GearType = Tuple[\n')
f.write(' Optional[HeadEquipment],\n')
f.write(' Optional[BodyEquipment],\n')
f.write(' Optional[MainHandEquipment],\n')
f.write(' Optional[OffHandEquipment],\n')
f.write(']\n')
f.write('\n')
f.close()
# TODO: cosmetic items
# TODO: consumable items
#
# All
#
def write_all():
write_player_classes()
write_player_stats()
write_abilities()
write_equipment_slots()
write_equipment()
write_class_info()
if __name__ == '__main__':
write_all()
| [
"bs4.BeautifulSoup",
"logging.info",
"requests.get",
"pathlib.Path"
] | [((797, 837), 'logging.info', 'logging.info', (['f"""writing file {filename}"""'], {}), "(f'writing file {filename}')\n", (809, 837), False, 'import logging\n'), ((1449, 1491), 'logging.info', 'logging.info', (['"""fetching classes from wiki"""'], {}), "('fetching classes from wiki')\n", (1461, 1491), False, 'import logging\n'), ((1576, 1610), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (1589, 1610), False, 'from bs4 import BeautifulSoup\n'), ((6509, 6556), 'logging.info', 'logging.info', (['"""fetching player stats from wiki"""'], {}), "('fetching player stats from wiki')\n", (6521, 6556), False, 'import logging\n'), ((6640, 6674), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (6653, 6674), False, 'from bs4 import BeautifulSoup\n'), ((8379, 8423), 'logging.info', 'logging.info', (['"""fetching abilities from wiki"""'], {}), "('fetching abilities from wiki')\n", (8391, 8423), False, 'import logging\n'), ((8511, 8545), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (8524, 8545), False, 'from bs4 import BeautifulSoup\n'), ((9386, 9436), 'logging.info', 'logging.info', (['"""fetching equipment names from wiki"""'], {}), "('fetching equipment names from wiki')\n", (9398, 9436), False, 'import logging\n'), ((9530, 9564), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (9543, 9564), False, 'from bs4 import BeautifulSoup\n'), ((10053, 10097), 'logging.info', 'logging.info', (['"""fetching equipment from wiki"""'], {}), "('fetching equipment from wiki')\n", (10065, 10097), False, 'import logging\n'), ((526, 552), 'pathlib.Path', 'pathlib.Path', (['"""wiki_cache"""'], {}), "('wiki_cache')\n", (538, 552), False, 'import pathlib\n'), ((661, 678), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (673, 678), False, 'import requests\n'), ((2055, 2089), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (2068, 2089), False, 'from bs4 import BeautifulSoup\n'), ((10257, 10291), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (10270, 10291), False, 'from bs4 import BeautifulSoup\n')] |
import datetime
from floodsystem.plot import plot_water_level_with_fit
from floodsystem.flood import stations_highest_rel_level
from floodsystem.stationdata import build_station_list, update_water_levels
from floodsystem.datafetcher import fetch_measure_levels
stations = build_station_list()
update_water_levels (stations)
six_stations_with_highest_relative_levels = stations_highest_rel_level(stations,6)
five_stations_with_highest_relative_levels = six_stations_with_highest_relative_levels[1:]
#This section of code plots the graphs with the polynomials for the five graphs. Time period = 3 days
for i in five_stations_with_highest_relative_levels:
dt = 3
station, relative_level = i
dates, levels = fetch_measure_levels(station.measure_id, dt=datetime.timedelta(days=dt))
plot_water_level_with_fit(station.name, dates, levels, 4)
| [
"floodsystem.stationdata.build_station_list",
"floodsystem.flood.stations_highest_rel_level",
"datetime.timedelta",
"floodsystem.plot.plot_water_level_with_fit",
"floodsystem.stationdata.update_water_levels"
] | [((273, 293), 'floodsystem.stationdata.build_station_list', 'build_station_list', ([], {}), '()\n', (291, 293), False, 'from floodsystem.stationdata import build_station_list, update_water_levels\n'), ((294, 323), 'floodsystem.stationdata.update_water_levels', 'update_water_levels', (['stations'], {}), '(stations)\n', (313, 323), False, 'from floodsystem.stationdata import build_station_list, update_water_levels\n'), ((369, 408), 'floodsystem.flood.stations_highest_rel_level', 'stations_highest_rel_level', (['stations', '(6)'], {}), '(stations, 6)\n', (395, 408), False, 'from floodsystem.flood import stations_highest_rel_level\n'), ((794, 851), 'floodsystem.plot.plot_water_level_with_fit', 'plot_water_level_with_fit', (['station.name', 'dates', 'levels', '(4)'], {}), '(station.name, dates, levels, 4)\n', (819, 851), False, 'from floodsystem.plot import plot_water_level_with_fit\n'), ((761, 788), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'dt'}), '(days=dt)\n', (779, 788), False, 'import datetime\n')] |
from example8 import dataBase, Pet, Owner, Toy
pet1 = Pet('7asán')
pet2 = Pet('Malak')
pet3 = Pet('Fiera')
dataBase.session.add_all([pet1, pet2, pet3])
dataBase.session.commit()
pets= Pet.query.all()
print('Show all pets')
print(pets)
pet1 = Pet.query.filter_by(name='7asán').first()
pet2 = Pet.query.filter_by(name='Malak').first()
pet3 = Pet.query.filter_by(name='Fiera').first()
owner1 = Owner('Mónica', pet1.id)
owner2 = Owner('Naim', pet2.id)
owner3 = Owner('Emir', pet3.id)
dataBase.session.add_all([owner1, owner2, owner3])
dataBase.session.commit()
owners = Owner.query.all()
print('Show all owners')
print(owners)
toy1 = Toy('Ball', pet1.id)
toy2 = Toy('Little stick', pet1.id)
toy3 = Toy('Ball', pet2.id)
toy4 = Toy('Little stick', pet2.id)
toy5 = Toy('Ball', pet3.id)
toy6 = Toy('Little stick', pet3.id)
dataBase.session.add_all([toy1, toy2, toy3, toy4, toy5, toy6])
dataBase.session.commit()
toys = Toy.query.all()
print('Show all Toys')
print(toys)
Pet.show_toy()
| [
"example8.Pet",
"example8.dataBase.session.commit",
"example8.Pet.show_toy",
"example8.Owner.query.all",
"example8.Toy",
"example8.Pet.query.filter_by",
"example8.Pet.query.all",
"example8.Toy.query.all",
"example8.dataBase.session.add_all",
"example8.Owner"
] | [((59, 71), 'example8.Pet', 'Pet', (['"""7asán"""'], {}), "('7asán')\n", (62, 71), False, 'from example8 import dataBase, Pet, Owner, Toy\n'), ((80, 92), 'example8.Pet', 'Pet', (['"""Malak"""'], {}), "('Malak')\n", (83, 92), False, 'from example8 import dataBase, Pet, Owner, Toy\n'), ((101, 113), 'example8.Pet', 'Pet', (['"""Fiera"""'], {}), "('Fiera')\n", (104, 113), False, 'from example8 import dataBase, Pet, Owner, Toy\n'), ((117, 161), 'example8.dataBase.session.add_all', 'dataBase.session.add_all', (['[pet1, pet2, pet3]'], {}), '([pet1, pet2, pet3])\n', (141, 161), False, 'from example8 import dataBase, Pet, Owner, Toy\n'), ((163, 188), 'example8.dataBase.session.commit', 'dataBase.session.commit', ([], {}), '()\n', (186, 188), False, 'from example8 import dataBase, Pet, Owner, Toy\n'), ((198, 213), 'example8.Pet.query.all', 'Pet.query.all', ([], {}), '()\n', (211, 213), False, 'from example8 import dataBase, Pet, Owner, Toy\n'), ((415, 439), 'example8.Owner', 'Owner', (['"""Mónica"""', 'pet1.id'], {}), "('Mónica', pet1.id)\n", (420, 439), False, 'from example8 import dataBase, Pet, Owner, Toy\n'), ((450, 472), 'example8.Owner', 'Owner', (['"""Naim"""', 'pet2.id'], {}), "('Naim', pet2.id)\n", (455, 472), False, 'from example8 import dataBase, Pet, Owner, Toy\n'), ((483, 505), 'example8.Owner', 'Owner', (['"""Emir"""', 'pet3.id'], {}), "('Emir', pet3.id)\n", (488, 505), False, 'from example8 import dataBase, Pet, Owner, Toy\n'), ((507, 557), 'example8.dataBase.session.add_all', 'dataBase.session.add_all', (['[owner1, owner2, owner3]'], {}), '([owner1, owner2, owner3])\n', (531, 557), False, 'from example8 import dataBase, Pet, Owner, Toy\n'), ((559, 584), 'example8.dataBase.session.commit', 'dataBase.session.commit', ([], {}), '()\n', (582, 584), False, 'from example8 import dataBase, Pet, Owner, Toy\n'), ((597, 614), 'example8.Owner.query.all', 'Owner.query.all', ([], {}), '()\n', (612, 614), False, 'from example8 import dataBase, Pet, Owner, Toy\n'), ((666, 686), 'example8.Toy', 'Toy', (['"""Ball"""', 'pet1.id'], {}), "('Ball', pet1.id)\n", (669, 686), False, 'from example8 import dataBase, Pet, Owner, Toy\n'), ((695, 723), 'example8.Toy', 'Toy', (['"""Little stick"""', 'pet1.id'], {}), "('Little stick', pet1.id)\n", (698, 723), False, 'from example8 import dataBase, Pet, Owner, Toy\n'), ((732, 752), 'example8.Toy', 'Toy', (['"""Ball"""', 'pet2.id'], {}), "('Ball', pet2.id)\n", (735, 752), False, 'from example8 import dataBase, Pet, Owner, Toy\n'), ((761, 789), 'example8.Toy', 'Toy', (['"""Little stick"""', 'pet2.id'], {}), "('Little stick', pet2.id)\n", (764, 789), False, 'from example8 import dataBase, Pet, Owner, Toy\n'), ((798, 818), 'example8.Toy', 'Toy', (['"""Ball"""', 'pet3.id'], {}), "('Ball', pet3.id)\n", (801, 818), False, 'from example8 import dataBase, Pet, Owner, Toy\n'), ((827, 855), 'example8.Toy', 'Toy', (['"""Little stick"""', 'pet3.id'], {}), "('Little stick', pet3.id)\n", (830, 855), False, 'from example8 import dataBase, Pet, Owner, Toy\n'), ((859, 921), 'example8.dataBase.session.add_all', 'dataBase.session.add_all', (['[toy1, toy2, toy3, toy4, toy5, toy6]'], {}), '([toy1, toy2, toy3, toy4, toy5, toy6])\n', (883, 921), False, 'from example8 import dataBase, Pet, Owner, Toy\n'), ((923, 948), 'example8.dataBase.session.commit', 'dataBase.session.commit', ([], {}), '()\n', (946, 948), False, 'from example8 import dataBase, Pet, Owner, Toy\n'), ((959, 974), 'example8.Toy.query.all', 'Toy.query.all', ([], {}), '()\n', (972, 974), False, 'from example8 import dataBase, Pet, Owner, Toy\n'), ((1015, 1029), 'example8.Pet.show_toy', 'Pet.show_toy', ([], {}), '()\n', (1027, 1029), False, 'from example8 import dataBase, Pet, Owner, Toy\n'), ((261, 294), 'example8.Pet.query.filter_by', 'Pet.query.filter_by', ([], {'name': '"""7asán"""'}), "(name='7asán')\n", (280, 294), False, 'from example8 import dataBase, Pet, Owner, Toy\n'), ((311, 344), 'example8.Pet.query.filter_by', 'Pet.query.filter_by', ([], {'name': '"""Malak"""'}), "(name='Malak')\n", (330, 344), False, 'from example8 import dataBase, Pet, Owner, Toy\n'), ((361, 394), 'example8.Pet.query.filter_by', 'Pet.query.filter_by', ([], {'name': '"""Fiera"""'}), "(name='Fiera')\n", (380, 394), False, 'from example8 import dataBase, Pet, Owner, Toy\n')] |
import sys
import pandas as pd
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
Reads two csv files, containing data about disaster related
messages and the categories of the messages, and combines
them into a single dataframe.
Parameters
----------
messages_filepath : string -> The path to the csv file
containing data about disaster related messages.
categories_filepath : string -> The path to the csv file
containing data about the categories of the messages.
Returns
-------
df -> The DataFrame combining the two datasets.
"""
messages = pd.read_csv(messages_filepath)
categories = pd.read_csv(categories_filepath)
df = pd.merge(messages, categories)
return df
def clean_data(df):
"""
Cleans the data in the disaster response
DataFrame by:
- extracting the 'categories' column from the DataFrame and
expanding it into a separate 'categories' DataFrame;
- getting the list of categories and setting them as column
names in the 'categories' DataFrame;
- setting each value in the 'categories' DataFrame columns
to either '1' (if the message belongs to this category) or
'0' (if the message does not belong to this category);
- converting the values in the 'categories' DataFrame from
string to int and replacing erroneous values of '2' to '0';
- dropping the 'categories' column from the original DataFrame;
- merging the original DataFrame with the new 'categories'
DataFrame;
- dropping duplicates in the merged DataFrame.
Parameters
----------
df: DataFrame -> The Dataframe containing data about the disaster
related messages and their categories.
Returns
-------
df_clean -> The cleaned and processed DataFrame.
"""
categories = df.categories.str.split(';', expand=True)
row = categories.loc[0]
category_colnames = row.str.rstrip('-10')
categories.columns = category_colnames
for column in categories:
# set each value to be the last character of the string
categories[column] = categories[column].apply(lambda x: x.split('-')[1])
# convert column from string to numeric
categories[column] = categories[column].astype('int')
categories = categories.replace({2: 0})
df.drop(columns='categories', inplace=True)
df_clean = pd.concat([df, categories], axis=1)
df_clean.drop_duplicates(inplace=True)
return df_clean
def save_data(df, database_filename):
"""
Saves the cleaned DataFrame as a SQLite database.
Parameters
----------
df: DataFrame -> The cleaned DataFrame.
database_filename: string -> The name of the SQLite database.
Returns
-------
None.
"""
engine = create_engine('sqlite:///' + database_filename)
df.to_sql('messages', engine, if_exists='replace', index=False)
def main():
"""
Loads, cleans, and saves the data related to disaster
response messages in a format ready for applying machine
learning tasks.
Parameters
----------
None.
Returns
-------
None.
"""
if len(sys.argv) == 4:
messages_filepath, categories_filepath, database_filepath = sys.argv[1:]
print('Loading data...\n MESSAGES: {}\n CATEGORIES: {}'
.format(messages_filepath, categories_filepath))
df = load_data(messages_filepath, categories_filepath)
print('Cleaning data...')
df = clean_data(df)
print('Saving data...\n DATABASE: {}'.format(database_filepath))
save_data(df, database_filepath)
print('Cleaned data saved to database!')
else:
print('Please provide the filepaths of the messages and categories ' \
'datasets as the first and second argument respectively, as ' \
'well as the filepath of the database to save the cleaned data ' \
'to as the third argument. \n\nExample: python process_data.py ' \
'disaster_messages.csv disaster_categories.csv ' \
'DisasterResponse.db')
if __name__ == '__main__':
main() | [
"pandas.merge",
"pandas.concat",
"pandas.read_csv",
"sqlalchemy.create_engine"
] | [((665, 695), 'pandas.read_csv', 'pd.read_csv', (['messages_filepath'], {}), '(messages_filepath)\n', (676, 695), True, 'import pandas as pd\n'), ((713, 745), 'pandas.read_csv', 'pd.read_csv', (['categories_filepath'], {}), '(categories_filepath)\n', (724, 745), True, 'import pandas as pd\n'), ((755, 785), 'pandas.merge', 'pd.merge', (['messages', 'categories'], {}), '(messages, categories)\n', (763, 785), True, 'import pandas as pd\n'), ((2448, 2483), 'pandas.concat', 'pd.concat', (['[df, categories]'], {'axis': '(1)'}), '([df, categories], axis=1)\n', (2457, 2483), True, 'import pandas as pd\n'), ((2849, 2896), 'sqlalchemy.create_engine', 'create_engine', (["('sqlite:///' + database_filename)"], {}), "('sqlite:///' + database_filename)\n", (2862, 2896), False, 'from sqlalchemy import create_engine\n')] |
from __future__ import division
import numpy as np
import pysynphot as S
__all__ = ["loadBandPass", "averageFnu"]
filter_path = "/Users/shangguan/Softwares/my_module/sgPhot/filters/"
def loadBandPass(filter_name, band_name=None, wave_unit="micron", band_unit="angstrom"):
"""
Load the bandpass. The filter position and names are obtained and
specified by myself.
Parameters
----------
filter_name : string
The name of the fitler.
band_name (optional) : string
The name of the band.
wave_unit : string, default: "micron"
The unit of the wavelength in the filter file.
band_unit : string, default: "angstrom"
The unit of the wavelength used in the bandpass data.
Returns
-------
bp : pysynphot bandpass
The bandpass generated from pysynphot.
Notes
-----
None.
"""
bp_array = np.genfromtxt(filter_path+"{0}.dat".format(filter_name))
bp = S.ArrayBandpass(bp_array[:, 0], bp_array[:, 1], waveunits=wave_unit,
name=band_name)
bp.convert(band_unit)
return bp
def averageFnu(wavelength, flux, bandpass, wave_units="micron", tol=1e-3, QuietMode=False):
"""
Calculate the average flux density (fnu) based on the input spectrum
(wavelength and flux) and bandpass. The input bandpass should be a photon
response function:
fnu = integrate (fnu * bp dnu / nu) / integrate (bp dnu / nu)
Parameters
----------
wavelength : array like
The array of the wavelength of the spectrum.
flux : array like
The array of the flux density of the spectrum.
bandpass : pysynphot bandpass
The filter response curve, which should be a photon response function.
wave_units : string
The units of the wavelength that should be matched for both spectrum and
bandpass.
tol : float; default: 0.001
The tolerance of the maximum response outside the overlapping wavelength.
QuietMode : bool; default: False
Do not raise warning or print anything, if True.
Returns
-------
fnu : float
The band-average flux density.
Notes
-----
None.
"""
bandpass.convert(wave_units)
#-> Find the overlaping wavelength regime.
wave_bp = bandpass.wave
wmin = np.max([np.nanmin(wavelength), np.nanmin(wave_bp)])
wmax = np.min([np.nanmax(wavelength), np.nanmax(wave_bp)])
#-> Check the throughput
thrp = bandpass.throughput
thrp_max = np.max(thrp)
fltr_left = wave_bp <= wmin
fltr_rght = wave_bp >= wmax
if np.sum(fltr_left) > 0:
thrp_left = np.max(thrp[fltr_left])
else:
thrp_left = 0
if np.sum(fltr_rght) > 0:
thrp_rght = np.max(thrp[fltr_rght])
else:
thrp_rght = 0
thrp_out = np.max([thrp_left, thrp_rght])
if ((thrp_out/thrp_max) > tol) & (not QuietMode):
raise Warning("Warning [averageFnu]: There may be significant emission missed due to the wavelength mismatch!")
#-> Calculate the average flux density
fltr = (wavelength >= wmin) & (wavelength <= wmax)
wave = wavelength[fltr]
flux = flux[fltr]
thrp = bandpass.sample(wave)
signal = np.trapz(thrp/wave*flux, x=wave)
norm = np.trapz(thrp/wave, x=wave)
fnu = signal / norm
return fnu
| [
"numpy.trapz",
"numpy.max",
"numpy.sum",
"numpy.nanmax",
"pysynphot.ArrayBandpass",
"numpy.nanmin"
] | [((952, 1041), 'pysynphot.ArrayBandpass', 'S.ArrayBandpass', (['bp_array[:, 0]', 'bp_array[:, 1]'], {'waveunits': 'wave_unit', 'name': 'band_name'}), '(bp_array[:, 0], bp_array[:, 1], waveunits=wave_unit, name=\n band_name)\n', (967, 1041), True, 'import pysynphot as S\n'), ((2509, 2521), 'numpy.max', 'np.max', (['thrp'], {}), '(thrp)\n', (2515, 2521), True, 'import numpy as np\n'), ((2813, 2843), 'numpy.max', 'np.max', (['[thrp_left, thrp_rght]'], {}), '([thrp_left, thrp_rght])\n', (2819, 2843), True, 'import numpy as np\n'), ((3212, 3248), 'numpy.trapz', 'np.trapz', (['(thrp / wave * flux)'], {'x': 'wave'}), '(thrp / wave * flux, x=wave)\n', (3220, 3248), True, 'import numpy as np\n'), ((3258, 3287), 'numpy.trapz', 'np.trapz', (['(thrp / wave)'], {'x': 'wave'}), '(thrp / wave, x=wave)\n', (3266, 3287), True, 'import numpy as np\n'), ((2593, 2610), 'numpy.sum', 'np.sum', (['fltr_left'], {}), '(fltr_left)\n', (2599, 2610), True, 'import numpy as np\n'), ((2636, 2659), 'numpy.max', 'np.max', (['thrp[fltr_left]'], {}), '(thrp[fltr_left])\n', (2642, 2659), True, 'import numpy as np\n'), ((2699, 2716), 'numpy.sum', 'np.sum', (['fltr_rght'], {}), '(fltr_rght)\n', (2705, 2716), True, 'import numpy as np\n'), ((2742, 2765), 'numpy.max', 'np.max', (['thrp[fltr_rght]'], {}), '(thrp[fltr_rght])\n', (2748, 2765), True, 'import numpy as np\n'), ((2327, 2348), 'numpy.nanmin', 'np.nanmin', (['wavelength'], {}), '(wavelength)\n', (2336, 2348), True, 'import numpy as np\n'), ((2350, 2368), 'numpy.nanmin', 'np.nanmin', (['wave_bp'], {}), '(wave_bp)\n', (2359, 2368), True, 'import numpy as np\n'), ((2390, 2411), 'numpy.nanmax', 'np.nanmax', (['wavelength'], {}), '(wavelength)\n', (2399, 2411), True, 'import numpy as np\n'), ((2413, 2431), 'numpy.nanmax', 'np.nanmax', (['wave_bp'], {}), '(wave_bp)\n', (2422, 2431), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'system.ui'
#
# Created by: PyQt5 UI code generator 5.12
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(800, 622)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.widget = QtWidgets.QWidget(self.centralwidget)
self.widget.setGeometry(QtCore.QRect(10, 10, 120, 471))
self.widget.setObjectName("widget")
self.layoutWidget = QtWidgets.QWidget(self.widget)
self.layoutWidget.setGeometry(QtCore.QRect(10, 20, 100, 120))
self.layoutWidget.setObjectName("layoutWidget")
self.gridLayout = QtWidgets.QGridLayout(self.layoutWidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.commodity_btn = QtWidgets.QPushButton(self.layoutWidget)
font = QtGui.QFont()
font.setPointSize(13)
font.setBold(True)
font.setWeight(75)
self.commodity_btn.setFont(font)
self.commodity_btn.setObjectName("commodity_btn")
self.gridLayout.addWidget(self.commodity_btn, 0, 0, 1, 1)
self.employee_btn = QtWidgets.QPushButton(self.layoutWidget)
font = QtGui.QFont()
font.setPointSize(13)
font.setBold(True)
font.setWeight(75)
self.employee_btn.setFont(font)
self.employee_btn.setObjectName("employee_btn")
self.gridLayout.addWidget(self.employee_btn, 1, 0, 1, 1)
self.receipt_btn = QtWidgets.QPushButton(self.layoutWidget)
font = QtGui.QFont()
font.setPointSize(13)
font.setBold(True)
font.setWeight(75)
self.receipt_btn.setFont(font)
self.receipt_btn.setObjectName("receipt_btn")
self.gridLayout.addWidget(self.receipt_btn, 2, 0, 1, 1)
self.detail_btn = QtWidgets.QPushButton(self.layoutWidget)
font = QtGui.QFont()
font.setPointSize(13)
font.setBold(True)
font.setWeight(75)
self.detail_btn.setFont(font)
self.detail_btn.setObjectName("detail_btn")
self.gridLayout.addWidget(self.detail_btn, 3, 0, 1, 1)
self.widget_2 = QtWidgets.QWidget(self.centralwidget)
self.widget_2.setGeometry(QtCore.QRect(150, 60, 631, 491))
self.widget_2.setObjectName("widget_2")
self.tableView = QtWidgets.QTableView(self.widget_2)
self.tableView.setGeometry(QtCore.QRect(10, 10, 601, 431))
self.tableView.setObjectName("tableView")
self.next_btn = QtWidgets.QPushButton(self.widget_2)
self.next_btn.setGeometry(QtCore.QRect(530, 450, 75, 23))
self.next_btn.setObjectName("next_btn")
self.pre_btn = QtWidgets.QPushButton(self.widget_2)
self.pre_btn.setGeometry(QtCore.QRect(10, 450, 75, 23))
self.pre_btn.setObjectName("pre_btn")
self.keyword = QtWidgets.QLineEdit(self.centralwidget)
self.keyword.setGeometry(QtCore.QRect(161, 22, 133, 20))
self.keyword.setObjectName("keyword")
self.feature = QtWidgets.QComboBox(self.centralwidget)
self.feature.setGeometry(QtCore.QRect(297, 22, 121, 20))
self.feature.setObjectName("feature")
self.search_btn = QtWidgets.QPushButton(self.centralwidget)
self.search_btn.setGeometry(QtCore.QRect(424, 21, 75, 23))
self.search_btn.setObjectName("search_btn")
self.update_btn = QtWidgets.QPushButton(self.centralwidget)
self.update_btn.setGeometry(QtCore.QRect(505, 21, 75, 23))
self.update_btn.setObjectName("update_btn")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 23))
self.menubar.setObjectName("menubar")
self.commodity = QtWidgets.QMenu(self.menubar)
self.commodity.setObjectName("commodity")
self.employee = QtWidgets.QMenu(self.menubar)
self.employee.setObjectName("employee")
self.receipt = QtWidgets.QMenu(self.menubar)
self.receipt.setObjectName("receipt")
self.detail = QtWidgets.QMenu(self.menubar)
self.detail.setObjectName("detail")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.actionnew = QtWidgets.QAction(MainWindow)
self.actionnew.setObjectName("actionnew")
self.actiondelete = QtWidgets.QAction(MainWindow)
self.actiondelete.setObjectName("actiondelete")
self.actionnew_2 = QtWidgets.QAction(MainWindow)
self.actionnew_2.setObjectName("actionnew_2")
self.actiondelete_2 = QtWidgets.QAction(MainWindow)
self.actiondelete_2.setObjectName("actiondelete_2")
self.actionnew_3 = QtWidgets.QAction(MainWindow)
self.actionnew_3.setObjectName("actionnew_3")
self.actiondelete_3 = QtWidgets.QAction(MainWindow)
self.actiondelete_3.setObjectName("actiondelete_3")
self.actionnew_4 = QtWidgets.QAction(MainWindow)
self.actionnew_4.setObjectName("actionnew_4")
self.actiondelete_4 = QtWidgets.QAction(MainWindow)
self.actiondelete_4.setObjectName("actiondelete_4")
self.commodity.addAction(self.actionnew)
self.commodity.addAction(self.actiondelete)
self.employee.addAction(self.actionnew_2)
self.employee.addAction(self.actiondelete_2)
self.receipt.addAction(self.actionnew_3)
self.receipt.addAction(self.actiondelete_3)
self.detail.addAction(self.actionnew_4)
self.detail.addAction(self.actiondelete_4)
self.menubar.addAction(self.commodity.menuAction())
self.menubar.addAction(self.employee.menuAction())
self.menubar.addAction(self.receipt.menuAction())
self.menubar.addAction(self.detail.menuAction())
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "超市管理主界面"))
self.commodity_btn.setText(_translate("MainWindow", "商品表"))
self.employee_btn.setText(_translate("MainWindow", "职员表"))
self.receipt_btn.setText(_translate("MainWindow", "收据单表"))
self.detail_btn.setText(_translate("MainWindow", "收据明细表"))
self.next_btn.setText(_translate("MainWindow", "下一页"))
self.pre_btn.setText(_translate("MainWindow", "上一页"))
self.search_btn.setText(_translate("MainWindow", "搜索"))
self.update_btn.setText(_translate("MainWindow", "刷新"))
self.commodity.setTitle(_translate("MainWindow", "商品"))
self.employee.setTitle(_translate("MainWindow", "职员"))
self.receipt.setTitle(_translate("MainWindow", "收据"))
self.detail.setTitle(_translate("MainWindow", "收据细节"))
self.actionnew.setText(_translate("MainWindow", "new"))
self.actiondelete.setText(_translate("MainWindow", "delete"))
self.actionnew_2.setText(_translate("MainWindow", "new"))
self.actiondelete_2.setText(_translate("MainWindow", "delete"))
self.actionnew_3.setText(_translate("MainWindow", "new"))
self.actiondelete_3.setText(_translate("MainWindow", "delete"))
self.actionnew_4.setText(_translate("MainWindow", "new"))
self.actiondelete_4.setText(_translate("MainWindow", "delete"))
| [
"PyQt5.QtWidgets.QWidget",
"PyQt5.QtWidgets.QLineEdit",
"PyQt5.QtWidgets.QMenu",
"PyQt5.QtGui.QFont",
"PyQt5.QtWidgets.QComboBox",
"PyQt5.QtCore.QMetaObject.connectSlotsByName",
"PyQt5.QtWidgets.QTableView",
"PyQt5.QtWidgets.QAction",
"PyQt5.QtCore.QRect",
"PyQt5.QtWidgets.QGridLayout",
"PyQt5.QtWidgets.QStatusBar",
"PyQt5.QtWidgets.QPushButton",
"PyQt5.QtWidgets.QMenuBar"
] | [((414, 443), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['MainWindow'], {}), '(MainWindow)\n', (431, 443), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((524, 561), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['self.centralwidget'], {}), '(self.centralwidget)\n', (541, 561), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((698, 728), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['self.widget'], {}), '(self.widget)\n', (715, 728), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((881, 921), 'PyQt5.QtWidgets.QGridLayout', 'QtWidgets.QGridLayout', (['self.layoutWidget'], {}), '(self.layoutWidget)\n', (902, 921), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1058, 1098), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.layoutWidget'], {}), '(self.layoutWidget)\n', (1079, 1098), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1114, 1127), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (1125, 1127), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1405, 1445), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.layoutWidget'], {}), '(self.layoutWidget)\n', (1426, 1445), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1461, 1474), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (1472, 1474), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1747, 1787), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.layoutWidget'], {}), '(self.layoutWidget)\n', (1768, 1787), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((1803, 1816), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (1814, 1816), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2084, 2124), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.layoutWidget'], {}), '(self.layoutWidget)\n', (2105, 2124), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2140, 2153), 'PyQt5.QtGui.QFont', 'QtGui.QFont', ([], {}), '()\n', (2151, 2153), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2415, 2452), 'PyQt5.QtWidgets.QWidget', 'QtWidgets.QWidget', (['self.centralwidget'], {}), '(self.centralwidget)\n', (2432, 2452), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2593, 2628), 'PyQt5.QtWidgets.QTableView', 'QtWidgets.QTableView', (['self.widget_2'], {}), '(self.widget_2)\n', (2613, 2628), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2770, 2806), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.widget_2'], {}), '(self.widget_2)\n', (2791, 2806), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2944, 2980), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.widget_2'], {}), '(self.widget_2)\n', (2965, 2980), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3114, 3153), 'PyQt5.QtWidgets.QLineEdit', 'QtWidgets.QLineEdit', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3133, 3153), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3288, 3327), 'PyQt5.QtWidgets.QComboBox', 'QtWidgets.QComboBox', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3307, 3327), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3465, 3506), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3486, 3506), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3652, 3693), 'PyQt5.QtWidgets.QPushButton', 'QtWidgets.QPushButton', (['self.centralwidget'], {}), '(self.centralwidget)\n', (3673, 3693), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3892, 3922), 'PyQt5.QtWidgets.QMenuBar', 'QtWidgets.QMenuBar', (['MainWindow'], {}), '(MainWindow)\n', (3910, 3922), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4056, 4085), 'PyQt5.QtWidgets.QMenu', 'QtWidgets.QMenu', (['self.menubar'], {}), '(self.menubar)\n', (4071, 4085), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4160, 4189), 'PyQt5.QtWidgets.QMenu', 'QtWidgets.QMenu', (['self.menubar'], {}), '(self.menubar)\n', (4175, 4189), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4261, 4290), 'PyQt5.QtWidgets.QMenu', 'QtWidgets.QMenu', (['self.menubar'], {}), '(self.menubar)\n', (4276, 4290), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4359, 4388), 'PyQt5.QtWidgets.QMenu', 'QtWidgets.QMenu', (['self.menubar'], {}), '(self.menubar)\n', (4374, 4388), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4502, 4534), 'PyQt5.QtWidgets.QStatusBar', 'QtWidgets.QStatusBar', (['MainWindow'], {}), '(MainWindow)\n', (4522, 4534), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4658, 4687), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (4675, 4687), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4766, 4795), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (4783, 4795), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4879, 4908), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (4896, 4908), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((4993, 5022), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (5010, 5022), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5110, 5139), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (5127, 5139), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5224, 5253), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (5241, 5253), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5341, 5370), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (5358, 5370), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((5455, 5484), 'PyQt5.QtWidgets.QAction', 'QtWidgets.QAction', (['MainWindow'], {}), '(MainWindow)\n', (5472, 5484), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((6231, 6280), 'PyQt5.QtCore.QMetaObject.connectSlotsByName', 'QtCore.QMetaObject.connectSlotsByName', (['MainWindow'], {}), '(MainWindow)\n', (6268, 6280), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((594, 624), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(10)', '(120)', '(471)'], {}), '(10, 10, 120, 471)\n', (606, 624), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((767, 797), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(20)', '(100)', '(120)'], {}), '(10, 20, 100, 120)\n', (779, 797), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2487, 2518), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(150)', '(60)', '(631)', '(491)'], {}), '(150, 60, 631, 491)\n', (2499, 2518), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2664, 2694), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(10)', '(601)', '(431)'], {}), '(10, 10, 601, 431)\n', (2676, 2694), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((2841, 2871), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(530)', '(450)', '(75)', '(23)'], {}), '(530, 450, 75, 23)\n', (2853, 2871), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3014, 3043), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(10)', '(450)', '(75)', '(23)'], {}), '(10, 450, 75, 23)\n', (3026, 3043), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3187, 3217), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(161)', '(22)', '(133)', '(20)'], {}), '(161, 22, 133, 20)\n', (3199, 3217), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3361, 3391), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(297)', '(22)', '(121)', '(20)'], {}), '(297, 22, 121, 20)\n', (3373, 3391), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3543, 3572), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(424)', '(21)', '(75)', '(23)'], {}), '(424, 21, 75, 23)\n', (3555, 3572), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3730, 3759), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(505)', '(21)', '(75)', '(23)'], {}), '(505, 21, 75, 23)\n', (3742, 3759), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n'), ((3956, 3983), 'PyQt5.QtCore.QRect', 'QtCore.QRect', (['(0)', '(0)', '(800)', '(23)'], {}), '(0, 0, 800, 23)\n', (3968, 3983), False, 'from PyQt5 import QtCore, QtGui, QtWidgets\n')] |
from asyncore import loop
import torch
import numpy as np
from eet import EETGPT2Model
from transformers import GPT2Model
import time
using_half = True
prompt_seq_len = 512
batch = 5
max_seq_len = 1024
loop = 10
def main():
# 输入数据构造,实际业务输入应该是tokens
input = np.random.randint(1000,9000,prompt_seq_len * batch,dtype="int64")
inputs = np.random.randint(1000,9000,1 * batch,dtype="int64")
# prompt context
input_full_decoder = torch.from_numpy(input).long().reshape(batch, prompt_seq_len).cuda()
input_inc_decoder = torch.from_numpy(inputs).long().reshape(batch, 1).cuda()
data_type = torch.float32
if using_half:
data_type = torch.float16
# load model
eet_model = EETGPT2Model.from_pretrained('gpt2',max_batch = batch, full_seq_len = prompt_seq_len,data_type = data_type)
torch_model = GPT2Model.from_pretrained('gpt2').cuda()
if using_half:
torch_model =torch_model.half()
attention_mask = None
# prediction
torch.cuda.synchronize()
t1 = time.perf_counter()
'''
first_pass 用于判断生成任务时是否是第一步,也就是是否是在做提示词的推理。true代表在做提示词的推理,false代表在做生成推理
由于eet不会返回past_key_value,前一步的信息全部在内部做了保存,所以没法通过past_key_value做判断,故增加此参数。
'''
for j in range(loop):
input_ids = input_full_decoder
first_pass = True
for i in range(max_seq_len-prompt_seq_len):
res_eet = eet_model(input_ids,first_pass= first_pass,attention_mask = attention_mask)
if first_pass:
first_pass = False
input_ids = input_inc_decoder
torch.cuda.synchronize()
t2 = time.perf_counter()
print('Time for EET : ', t2 - t1)
torch.cuda.synchronize()
t3 = time.perf_counter()
for j in range(loop):
input_ids = input_full_decoder
past_key_values = None
for i in range(max_seq_len-prompt_seq_len):
with torch.no_grad():
res_torch = torch_model(input_ids,past_key_values = past_key_values,attention_mask = attention_mask)
past_key_values = res_torch.past_key_values
input_ids = input_inc_decoder
torch.cuda.synchronize()
t4 = time.perf_counter()
print('Time for torch : ', t4 - t3)
print('SpeedUp is : ', (t4 - t3)/(t2- t1))
if __name__ == '__main__':
main()
| [
"transformers.GPT2Model.from_pretrained",
"time.perf_counter",
"torch.from_numpy",
"eet.EETGPT2Model.from_pretrained",
"torch.cuda.synchronize",
"numpy.random.randint",
"torch.no_grad"
] | [((280, 348), 'numpy.random.randint', 'np.random.randint', (['(1000)', '(9000)', '(prompt_seq_len * batch)'], {'dtype': '"""int64"""'}), "(1000, 9000, prompt_seq_len * batch, dtype='int64')\n", (297, 348), True, 'import numpy as np\n'), ((360, 415), 'numpy.random.randint', 'np.random.randint', (['(1000)', '(9000)', '(1 * batch)'], {'dtype': '"""int64"""'}), "(1000, 9000, 1 * batch, dtype='int64')\n", (377, 415), True, 'import numpy as np\n'), ((739, 847), 'eet.EETGPT2Model.from_pretrained', 'EETGPT2Model.from_pretrained', (['"""gpt2"""'], {'max_batch': 'batch', 'full_seq_len': 'prompt_seq_len', 'data_type': 'data_type'}), "('gpt2', max_batch=batch, full_seq_len=\n prompt_seq_len, data_type=data_type)\n", (767, 847), False, 'from eet import EETGPT2Model\n'), ((1020, 1044), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1042, 1044), False, 'import torch\n'), ((1055, 1074), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1072, 1074), False, 'import time\n'), ((1606, 1630), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1628, 1630), False, 'import torch\n'), ((1641, 1660), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1658, 1660), False, 'import time\n'), ((1709, 1733), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (1731, 1733), False, 'import torch\n'), ((1744, 1763), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1761, 1763), False, 'import time\n'), ((2174, 2198), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (2196, 2198), False, 'import torch\n'), ((2209, 2228), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2226, 2228), False, 'import time\n'), ((866, 899), 'transformers.GPT2Model.from_pretrained', 'GPT2Model.from_pretrained', (['"""gpt2"""'], {}), "('gpt2')\n", (891, 899), False, 'from transformers import GPT2Model\n'), ((1934, 1949), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1947, 1949), False, 'import torch\n'), ((461, 484), 'torch.from_numpy', 'torch.from_numpy', (['input'], {}), '(input)\n', (477, 484), False, 'import torch\n'), ((557, 581), 'torch.from_numpy', 'torch.from_numpy', (['inputs'], {}), '(inputs)\n', (573, 581), False, 'import torch\n')] |
# This code is to get the tephigrams from Indian Meteorological Department website
# For any isseus please contact KVNG Vikram
# Adress of sonder image for required location can by selected from the map http://satellite.imd.gov.in/map_skm2.html
# use that address below (Use http:// )
address = 'http://satellite.imd.gov.in/img/Thiruvanantapurum.gif'
import requests
from PIL import Image
response = requests.get(address,stream=True).raw
im = Image.open(response)
im.show()
| [
"PIL.Image.open",
"requests.get"
] | [((451, 471), 'PIL.Image.open', 'Image.open', (['response'], {}), '(response)\n', (461, 471), False, 'from PIL import Image\n'), ((407, 441), 'requests.get', 'requests.get', (['address'], {'stream': '(True)'}), '(address, stream=True)\n', (419, 441), False, 'import requests\n')] |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name='estore',
version='0.0.3',
description='Meta package for estore packages',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/lostwire/estore',
author='Jnxy',
author_email='<EMAIL>',
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Framework :: AsyncIO",
"Environment :: Web Environment",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Topic :: Database" ],
extras_require = {
"client": [ "estore-client" ],
"server": [ "estore-server" ],
"all": [ "estore-client", "estore-server" ],
},
install_requires = [
'estore-base'
])
| [
"setuptools.setup"
] | [((88, 757), 'setuptools.setup', 'setuptools.setup', ([], {'name': '"""estore"""', 'version': '"""0.0.3"""', 'description': '"""Meta package for estore packages"""', 'long_description': 'long_description', 'long_description_content_type': '"""text/markdown"""', 'url': '"""https://github.com/lostwire/estore"""', 'author': '"""Jnxy"""', 'author_email': '"""<EMAIL>"""', 'classifiers': "['Development Status :: 2 - Pre-Alpha', 'Framework :: AsyncIO',\n 'Environment :: Web Environment',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3', 'Topic :: Database']", 'extras_require': "{'client': ['estore-client'], 'server': ['estore-server'], 'all': [\n 'estore-client', 'estore-server']}", 'install_requires': "['estore-base']"}), "(name='estore', version='0.0.3', description=\n 'Meta package for estore packages', long_description=long_description,\n long_description_content_type='text/markdown', url=\n 'https://github.com/lostwire/estore', author='Jnxy', author_email=\n '<EMAIL>', classifiers=['Development Status :: 2 - Pre-Alpha',\n 'Framework :: AsyncIO', 'Environment :: Web Environment',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3', 'Topic :: Database'],\n extras_require={'client': ['estore-client'], 'server': ['estore-server'\n ], 'all': ['estore-client', 'estore-server']}, install_requires=[\n 'estore-base'])\n", (104, 757), False, 'import setuptools\n')] |
from datetime import date
def dados():
ano_nascimento = int(input('Qual o seu ano de nascimento?\n>'))
idade = date.today().year - ano_nascimento
print(f'Como você tem {idade} anos,')
print(f'você está alocado na categoria: \033[1;31m{categoria_dados(idade)}\033[m.')
def categoria_dados(idade):
if idade <= 9:
categoria = 'MIRIM'
elif idade <= 14:
categoria = 'INFANTIL'
elif idade <= 19:
categoria = 'JUNIOR'
elif idade <= 20:
categoria = 'SÊNIOR'
else:
categoria = 'MASTER'
return categoria
dados()
| [
"datetime.date.today"
] | [((121, 133), 'datetime.date.today', 'date.today', ([], {}), '()\n', (131, 133), False, 'from datetime import date\n')] |
# Generated by Django 2.2.16 on 2020-09-16 14:41
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import simple_history.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('identities', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.SeparateDatabaseAndState(
state_operations=[
migrations.CreateModel(
name='Trait',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True,
serialize=False, verbose_name='ID')),
('trait_key', models.CharField(max_length=200)),
('value_type', models.CharField(blank=True,
choices=[('int', 'Integer'),
('unicode', 'String'),
('bool', 'Boolean'),
('float', 'Float')],
default='unicode',
max_length=10, null=True)),
('boolean_value', models.NullBooleanField()),
('integer_value', models.IntegerField(blank=True, null=True)),
('string_value',
models.CharField(blank=True, max_length=2000, null=True)),
('float_value', models.FloatField(blank=True, null=True)),
('created_date', models.DateTimeField(auto_now_add=True,
verbose_name='DateCreated')),
('identity',
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE,
related_name='identity_traits',
to='identities.Identity')),
],
options={
'verbose_name_plural': 'User Traits',
'ordering': ['id'],
'unique_together': {('trait_key', 'identity')},
'db_table': 'environments_trait'
},
),
],
database_operations=[]
)
]
| [
"django.db.models.FloatField",
"django.db.models.IntegerField",
"django.db.models.NullBooleanField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.migrations.swappable_dependency",
"django.db.models.CharField"
] | [((317, 374), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (348, 374), False, 'from django.db import migrations, models\n'), ((612, 705), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (628, 705), False, 'from django.db import migrations, models\n'), ((790, 822), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (806, 822), False, 'from django.db import migrations, models\n'), ((864, 1039), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'choices': "[('int', 'Integer'), ('unicode', 'String'), ('bool', 'Boolean'), ('float',\n 'Float')]", 'default': '"""unicode"""', 'max_length': '(10)', 'null': '(True)'}), "(blank=True, choices=[('int', 'Integer'), ('unicode',\n 'String'), ('bool', 'Boolean'), ('float', 'Float')], default='unicode',\n max_length=10, null=True)\n", (880, 1039), False, 'from django.db import migrations, models\n'), ((1439, 1464), 'django.db.models.NullBooleanField', 'models.NullBooleanField', ([], {}), '()\n', (1462, 1464), False, 'from django.db import migrations, models\n'), ((1509, 1551), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1528, 1551), False, 'from django.db import migrations, models\n'), ((1620, 1676), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(2000)', 'null': '(True)'}), '(blank=True, max_length=2000, null=True)\n', (1636, 1676), False, 'from django.db import migrations, models\n'), ((1719, 1759), 'django.db.models.FloatField', 'models.FloatField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1736, 1759), False, 'from django.db import migrations, models\n'), ((1803, 1870), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""DateCreated"""'}), "(auto_now_add=True, verbose_name='DateCreated')\n", (1823, 1870), False, 'from django.db import migrations, models\n'), ((1997, 2122), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""identity_traits"""', 'to': '"""identities.Identity"""'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='identity_traits', to='identities.Identity')\n", (2014, 2122), False, 'from django.db import migrations, models\n')] |
from math import *
from OpenGL.GL import *
from OpenGL.GLU import *
from lldbvis.settings import constants
from lldbvis.settings import *
from lldbvis.tree.label import Label
from lldbvis.util.material import Material
from lldbvis.util.vectors import Vector3
class NodeGeometry:
def __init__(self, node, radius=constants.DEFAULT_OPENGL_NODE_RADIUS):
self.position = Vector3()
self.radius = radius
self.child_distance = 0
self.acceleration = Vector3()
self.velocity = Vector3()
self.material = constants.OPENGL_NODE_MATERIAL
self.collapsed = False
self.node = node
self.label = Label(self.node, Vector3(self.x, self.y, self.z))
@property
def color(self):
if self.node.isProcessNode():
return ColorScheme.PROCESS_NODE.value
elif self.node.isThreadNode():
return ColorScheme.THREAD_NODE.value
elif self.node.isFrameNode():
return ColorScheme.FRAME_NODE.value
elif self.node.isValueNode():
return ColorScheme.VALUE_NODE.value
return ColorScheme.DEFAULT.value
@property
def absolutePosition(self):
pos = self.position.__copy__()
if not self.node.isRoot():
pos += self.node.parent.geom.absolutePosition
return pos
@property
def x(self):
return self.position.x
@property
def y(self):
return self.position.y
@property
def z(self):
return self.position.z
@property
def absoluteX(self):
return self.absolutePosition.x
@property
def absoluteY(self):
return self.absolutePosition.y
@property
def absoluteZ(self):
return self.absolutePosition.z
def toggleCollapsed(self):
self.collapsed = not self.collapsed
def _preOutline(self):
glPushAttrib(GL_ALL_ATTRIB_BITS)
glEnable(GL_LIGHTING)
glClearStencil(0)
glClear(GL_STENCIL_BUFFER_BIT)
glEnable(GL_STENCIL_TEST)
glStencilFunc(GL_ALWAYS, 1, 0xffff)
glStencilOp(GL_KEEP, GL_KEEP, GL_REPLACE)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
def _postOutline(self, quadric):
glDisable(GL_LIGHTING)
glStencilFunc(GL_NOTEQUAL, 1, 0xffff)
glStencilOp(GL_KEEP, GL_KEEP, GL_REPLACE)
glLineWidth(3.0)
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
glColor3f(1, 1, 1)
gluSphere(quadric, self.radius, constants.OPENGL_NODE_SPHERE_SLICES, constants.OPENGL_NODE_SPHERE_STACKS)
glPopAttrib()
def _drawCylinder(self, pos, radius, subdivisions, quadric):
v1 = pos.__copy__()
n1 = v1.unit()
n2 = Vector3.unitZ()
angle = n1.angle(n2)
if abs(angle) < 0.001:
angle = 0
axis = -n1.cross(n2).unit()
angle = angle * 180 / pi
glRotatef(angle, axis.x, axis.y, axis.z)
gluQuadricOrientation(quadric, GLU_OUTSIDE)
gluCylinder(quadric, radius, radius, v1.length(), subdivisions, 1)
def draw(self, widget):
quadric = widget.quadric
highlight_id = widget.selectedId
glEnable(GL_DEPTH_TEST)
glPushMatrix()
glTranslatef(self.x, self.y, self.z)
if not self.collapsed:
for i in range(self.node.size()):
child = self.node[i]
glPushAttrib(GL_ALL_ATTRIB_BITS)
glPushMatrix()
glDisable(GL_COLOR_MATERIAL)
Material.chrome().setGL()
self._drawCylinder(child.geom.position, constants.OPENGL_EDGE_CYLINDER_RADIUS,
constants.OPENGL_EDGE_CYLINDER_SUBDIVISIONS, quadric)
glPopMatrix()
glPopAttrib()
child.draw(widget)
# draw outline
outlined = self.node.id == highlight_id
if outlined:
self._preOutline()
self.material.setGL()
glColor3f(self.color.r, self.color.g, self.color.b)
glLoadName(self.node.id)
gluSphere(quadric, self.radius, constants.OPENGL_NODE_SPHERE_SLICES, constants.OPENGL_NODE_SPHERE_STACKS)
if outlined:
self._postOutline(quadric)
# draw label
if widget.camera.distance(
self.absolutePosition - widget.selectedNode().absolutePosition) < \
constants.OPENGL_MINIMAL_LABEL_ZOOM_DISTANCE or highlight_id == self.node.id:
self.label.draw(widget)
glPopMatrix()
| [
"lldbvis.util.material.Material.chrome",
"lldbvis.util.vectors.Vector3.unitZ",
"lldbvis.util.vectors.Vector3"
] | [((382, 391), 'lldbvis.util.vectors.Vector3', 'Vector3', ([], {}), '()\n', (389, 391), False, 'from lldbvis.util.vectors import Vector3\n'), ((482, 491), 'lldbvis.util.vectors.Vector3', 'Vector3', ([], {}), '()\n', (489, 491), False, 'from lldbvis.util.vectors import Vector3\n'), ((516, 525), 'lldbvis.util.vectors.Vector3', 'Vector3', ([], {}), '()\n', (523, 525), False, 'from lldbvis.util.vectors import Vector3\n'), ((2712, 2727), 'lldbvis.util.vectors.Vector3.unitZ', 'Vector3.unitZ', ([], {}), '()\n', (2725, 2727), False, 'from lldbvis.util.vectors import Vector3\n'), ((677, 708), 'lldbvis.util.vectors.Vector3', 'Vector3', (['self.x', 'self.y', 'self.z'], {}), '(self.x, self.y, self.z)\n', (684, 708), False, 'from lldbvis.util.vectors import Vector3\n'), ((3525, 3542), 'lldbvis.util.material.Material.chrome', 'Material.chrome', ([], {}), '()\n', (3540, 3542), False, 'from lldbvis.util.material import Material\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Data fitting module.
.. <NAME> <<EMAIL>>
.. 2017-04-03 15:25:36 PM EDT
"""
import lmfit
import numpy as np
class FitModel(object):
"""Data fitting model:
- *gaussuan*: :math:`a exp(-(x-x_0)^2/2/x_{std}^2) + y_0`
- *polynomial*: :math:`\Sigma_{i=0}^n x^i a_i`
- *power*: :math:`a x ^ b`
- *sin*: :math:`a \sin(b x + c) + d`
Parameters
----------
model : str
Name of fitting model, 'gaussian' by default.
params :
Initial fitting parameters.
Keyword Arguments
-----------------
x : array
Data to fit, *x* array.
y : array
Data to fit, *y* array.
n : int
Highest order for polynomial fit model.
xmin : float
Lower limit of fitting data range.
xmax : float
Upper limit of fitting data range.
"""
def __init__(self, model='gaussian', params=None, **kws):
if params is None:
params = lmfit.Parameters()
self._model = model
self._params = params
try:
self._x, self._y = kws['x'], kws['y']
except:
self._x, self._y = [], []
try:
self.n = kws['n']
except:
self.n = 1 # when model is polynomial, highest order
self.n += 1 # range(n + 1): [0, n]
# data fitting window
self.x_fit_min, self.x_fit_max = kws.get('xmin'), kws.get('xmax')
# fitting method
self._method = 'leastsq'
self._set_params_func = {
'gaussian': self._set_params_gaussian,
'polynomial': self._set_params_polynomial,
}
self._fitfunc = {
'gaussian': self._fit_gaussian,
'polynomial': self._fit_polynomial,
}
self._gen_func_text = {
'gaussian': self._gen_func_text_gaussian,
'polynomial': self._gen_func_text_polynomial,
}
self._fit_result = None
@property
def model(self):
"""str: Fitting model name, *gaussian* by default.
Available options:
- *gaussian*
"""
return self._model
@model.setter
def mode(self, model):
self._model = model
@property
def method(self):
"""str: Fitting method name, *leastsq* by default.
Available options:
- *leastsq*: Levenberg-Marquardt
- *least_squares*: Least-Squares minimization, using Trust Region
Reflective method by default
- *differential_evolution*: differential evolution
- *brute*: brute force method
- *nelder*: Nelder-Mead
- *lbfgsb*: L-BFGS-B
- *powell*: Powell
- *cg*: Conjugate-Gradient
- *newton*: Newton-Congugate-Gradient
- *cobyla*: Cobyla
- *tnc*: Truncate Newton
- *trust-ncg*: Trust Newton-Congugate-Gradient
- *dogleg*: Dogleg
- *slsqp*: Sequential Linear Squares Programming
"""
return self._method
@method.setter
def method(self, method):
self._method = method
def _fit_gaussian(self, p, x):
a = p['a'].value
x0 = p['x0'].value
y0 = p['y0'].value
xstd = p['xstd'].value
return a * np.exp(-(x - x0) ** 2.0 / 2.0 / xstd / xstd) + y0
def _fit_polynomial(self, p, x):
f = 0
for i in range(self.n):
f += p['a' + str(i)].value * x ** i
return f
def _errfunc(self, p, f, x, y):
return f(p, x) - y
def set_data(self, data=None, x=None, y=None):
"""Set raw data to fit, prefer *data* parameter.
Parameters
----------
data : Array
Holds x and y data, shape should be ``(n,2)``.
x : Array
X data array.
y : Array
Y data array.
"""
if data is not None:
self._x, self._y = data[:, 0], data[:, 1]
else:
if x is not None:
self._x = x
if y is not None:
self._y = y
def get_data(self):
"""Return raw data, tuple of array x and y.
"""
return self._x, self._y
# def _set_fitfunc(self, type=None):
# """Type: gaussian, linear, quadratic, polynomial, power, sin
# """
# if type is not None:
# self._model = type
def _gen_func_text_gaussian(self, p0):
a = p0['a'].value
x0 = p0['x0'].value
y0 = p0['y0'].value
xstd = p0['xstd'].value
retfun = '$f(x) = a e^{-\\frac{(x-x_0)^2}{2\sigma_x^2}}+y_0$'
retcoe = '$a = %.3f, x_0 = %.3f, \sigma_x = %.3f, y_0 = %.3f$' % (a, x0, xstd, y0)
return {'func': retfun, 'fcoef': retcoe}
def _gen_func_text_polynomial(self, p0):
retfun = '$f(x) = \sum_{i=0}^{%s}\,a_i x^i$' % (self.n)
retcoe = ','.join(['$a_{%d} = %.3f$' % (i, p0['a' + str(i)].value) for i in range(self.n)])
return {'func': retfun, 'fcoef': retcoe}
def set_params(self, **p0):
"""Set fitting parameters.
Parameters
----------
p0 : dict
Initial fitting parameters.
"""
self._set_params_func[self._model](p0)
def _set_params_gaussian(self, p0):
self._params.add('a', value=p0['a'])
self._params.add('x0', value=p0['x0'])
self._params.add('y0', value=p0['y0'])
self._params.add('xstd', value=p0['xstd'])
def _set_params_polynomial(self, p0):
for i in range(self.n):
pi_name = 'a' + str(i)
self._params.add(pi_name, value=p0[pi_name])
def get_fitfunc(self, p0=None):
"""Get fitting function.
Parameters
----------
p0 : dict
Fitting parameters.
Returns
-------
ret : tuple
Tuple of fitting function and text label for plotting.
"""
if p0 is None:
p0 = self._fit_result.params
f_func = self._fitfunc[self._model]
gen_func = self._gen_func_text[self._model]
f_text = gen_func(p0)
return f_func, f_text
def get_fit_result(self):
"""Return fitting results.
"""
return self._fit_result
def fit(self):
"""Do data fittig.
"""
p = self._params
f = self._fitfunc[self._model]
x, y = self._x, self._y
xmin = self.x_fit_min if self.x_fit_min is not None else x.min()
xmax = self.x_fit_max if self.x_fit_max is not None else x.max()
x_fit, idx = FitModel.get_range(x, xmin, xmax)
y_fit = y[idx]
m = self._method
res = lmfit.minimize(self._errfunc, p, method=m, args=(f, x_fit, y_fit))
self._fit_result = res
return res
def fit_report(self):
"""Generate fitting report.
"""
# gaussian model
if self._model == 'gaussian':
if self._fit_result is not None:
p = self._fit_result.params
retstr1 = "Fitting Function:" + "\n"
retstr2 = "a*exp(-(x-x0)^2/2/sx^2)+y0" + "\n"
retstr3 = "Fitting Output:" + "\n"
retstr4 = "{a0_k:<3s}: {a0_v:>10.4f}\n".format(a0_k='a', a0_v=p['a'].value)
retstr5 = "{x0_k:<3s}: {x0_v:>10.4f}\n".format(x0_k='x0', x0_v=p['x0'].value)
retstr6 = "{sx_k:<3s}: {sx_v:>10.4f}\n".format(sx_k='sx', sx_v=p['xstd'].value)
retstr7 = "{y0_k:<3s}: {y0_v:>10.4f}".format(y0_k='y0', y0_v=p['y0'].value)
return retstr1 + retstr2 + retstr3 + retstr4 + retstr5 + retstr6 + retstr7
else:
return "Nothing to report."
elif self._model == 'polynomial':
if self._fit_result is not None:
p = self._fit_result.params
retstr = "Fitting Function:" + "\n"
fstr = '+'.join(['a' + str(i) + '*x^' + str(i) for i in range(self.n)])
fstr = fstr.replace('*x^0', '')
fstr = fstr.replace('x^1', 'x')
retstr += fstr + '\n'
retstr += "Fitting Output:" + "\n"
for i in range(self.n):
ki = 'a' + str(i)
retstr += "{k:<3s}: {v:>10.4f}\n".format(k=ki, v=p[ki].value)
return retstr
else:
return "Nothing to report."
def calc_p0(self):
"""Return p0 from input x, y.
"""
if self._model == 'gaussian':
x, xdata = self._x, self._y
x0 = np.sum(x * xdata) / np.sum(xdata)
p0 = {'a': xdata.max(),
'x0': x0,
'xstd': (np.sum((x - x0) ** 2 * xdata) / np.sum(xdata)) ** 0.5,
'y0': 0,
}
elif self._model == 'polynomial':
p0 = {'a' + str(i): 1 for i in range(self.n)}
return p0
@staticmethod
def get_range(x, xmin, xmax):
"""Find array range.
Parameters
----------
x : array
Orignal numpy 1D array.
xmin : float
Min of x range.
xmax : float
Max of x range.
Returns
-------
ret : tuple
Sub-array and indice in original array.
"""
if xmin >= xmax:
return x, np.arange(x.size)
idx1, idx2 = np.where(x > xmin), np.where(x < xmax)
idx = np.intersect1d(idx1, idx2)
return x[idx], idx
def gaussian_fit(x, xdata):
"""Return fit function and :math:`x_0`, :math:`\sigma_x` for gaussian fit.
Parameters
----------
x : array
Data to fit, x col.
xdata : array
Data to fit, y col.
Returns
-------
ret : tuple
Tuple of fitting function, x0 and xstd.
"""
fm = FitModel()
x0 = np.sum(x * xdata) / np.sum(xdata)
p0 = {'a': xdata.max(),
'x0': x0,
'xstd': (np.sum((x - x0) ** 2 * xdata) / np.sum(xdata)) ** 0.5,
'y0': 0
}
fm.set_data(x=x, y=xdata)
fm.set_params(**p0)
res = fm.fit()
x0, xstd = [res.params[k].value for k in ('x0', 'xstd')]
def fit_func(x):
return fm.get_fitfunc(res.params)[0](res.params, x)
return fit_func, x0, xstd
| [
"numpy.intersect1d",
"numpy.arange",
"numpy.where",
"numpy.exp",
"numpy.sum",
"lmfit.Parameters",
"lmfit.minimize"
] | [((6662, 6728), 'lmfit.minimize', 'lmfit.minimize', (['self._errfunc', 'p'], {'method': 'm', 'args': '(f, x_fit, y_fit)'}), '(self._errfunc, p, method=m, args=(f, x_fit, y_fit))\n', (6676, 6728), False, 'import lmfit\n'), ((9450, 9476), 'numpy.intersect1d', 'np.intersect1d', (['idx1', 'idx2'], {}), '(idx1, idx2)\n', (9464, 9476), True, 'import numpy as np\n'), ((9858, 9875), 'numpy.sum', 'np.sum', (['(x * xdata)'], {}), '(x * xdata)\n', (9864, 9875), True, 'import numpy as np\n'), ((9878, 9891), 'numpy.sum', 'np.sum', (['xdata'], {}), '(xdata)\n', (9884, 9891), True, 'import numpy as np\n'), ((980, 998), 'lmfit.Parameters', 'lmfit.Parameters', ([], {}), '()\n', (996, 998), False, 'import lmfit\n'), ((9397, 9415), 'numpy.where', 'np.where', (['(x > xmin)'], {}), '(x > xmin)\n', (9405, 9415), True, 'import numpy as np\n'), ((9417, 9435), 'numpy.where', 'np.where', (['(x < xmax)'], {}), '(x < xmax)\n', (9425, 9435), True, 'import numpy as np\n'), ((3259, 3303), 'numpy.exp', 'np.exp', (['(-(x - x0) ** 2.0 / 2.0 / xstd / xstd)'], {}), '(-(x - x0) ** 2.0 / 2.0 / xstd / xstd)\n', (3265, 3303), True, 'import numpy as np\n'), ((8576, 8593), 'numpy.sum', 'np.sum', (['(x * xdata)'], {}), '(x * xdata)\n', (8582, 8593), True, 'import numpy as np\n'), ((8596, 8609), 'numpy.sum', 'np.sum', (['xdata'], {}), '(xdata)\n', (8602, 8609), True, 'import numpy as np\n'), ((9358, 9375), 'numpy.arange', 'np.arange', (['x.size'], {}), '(x.size)\n', (9367, 9375), True, 'import numpy as np\n'), ((9959, 9988), 'numpy.sum', 'np.sum', (['((x - x0) ** 2 * xdata)'], {}), '((x - x0) ** 2 * xdata)\n', (9965, 9988), True, 'import numpy as np\n'), ((9991, 10004), 'numpy.sum', 'np.sum', (['xdata'], {}), '(xdata)\n', (9997, 10004), True, 'import numpy as np\n'), ((8701, 8730), 'numpy.sum', 'np.sum', (['((x - x0) ** 2 * xdata)'], {}), '((x - x0) ** 2 * xdata)\n', (8707, 8730), True, 'import numpy as np\n'), ((8733, 8746), 'numpy.sum', 'np.sum', (['xdata'], {}), '(xdata)\n', (8739, 8746), True, 'import numpy as np\n')] |
from ctypes import c_float
from pyglet.gl import GL_TRIANGLES, GL_QUADS, glInterleavedArrays, glDrawArrays
class DrawBundle:
shape_by_n_points = {3: GL_TRIANGLES, 4: GL_QUADS}
def __init__(self, draw_shape, n_dimensions, vertices: list=None, normals: list=None, tex_coords: list=None,
colors: list=None):
self._n_dimensions = n_dimensions
self._draw_shape = draw_shape
self._n_vertices = len(vertices)
self.draw_data = []
self._vertices = vertices
self._normals = normals
self._tex_coords = tex_coords
self._colors = colors
self._draw_data_encoding_mode = self._determine_data_encoding_mode()
self.data_length = len(self.draw_data)
self.c_arr = c_float * self.data_length
self.c_draw_data = self.c_arr(*self.draw_data)
def _determine_data_encoding_mode(self):
return 'v3f'
def draw(self):
glInterleavedArrays(self._draw_data_encoding_mode, 0, self.c_draw_data)
glDrawArrays(self._draw_shape, 0, self._n_vertices)
| [
"pyglet.gl.glDrawArrays",
"pyglet.gl.glInterleavedArrays"
] | [((940, 1011), 'pyglet.gl.glInterleavedArrays', 'glInterleavedArrays', (['self._draw_data_encoding_mode', '(0)', 'self.c_draw_data'], {}), '(self._draw_data_encoding_mode, 0, self.c_draw_data)\n', (959, 1011), False, 'from pyglet.gl import GL_TRIANGLES, GL_QUADS, glInterleavedArrays, glDrawArrays\n'), ((1020, 1071), 'pyglet.gl.glDrawArrays', 'glDrawArrays', (['self._draw_shape', '(0)', 'self._n_vertices'], {}), '(self._draw_shape, 0, self._n_vertices)\n', (1032, 1071), False, 'from pyglet.gl import GL_TRIANGLES, GL_QUADS, glInterleavedArrays, glDrawArrays\n')] |
from vyper import compiler
from ethereum.tools import tester
from ethereum import utils as ethereum_utils
# http://web3py.readthedocs.io/en/stable
import web3
from web3 import Web3, HTTPProvider, EthereumTesterProvider, IPCProvider
from sys import platform
from web3.contract import Contract
GETH_IPC_PATH = '/Users/Ls/code/blockchain/geth-node/chaindata/geth.ipc'
GENERIC_PASSWORD_TO_ENCRYPT = '<PASSWORD>'
provider_ipc = IPCProvider(GETH_IPC_PATH);
# provider_ethereum_test = EthereumTesterProvider()
# HTTP Provider Reference: http://web3py.readthedocs.io/en/stable/providers.html#httpprovider
# Run `truffle develop` with a configuration to start Ganache CLI.
# It does not appear to work when I just run ganache-cli with flags such as `--port 9545`
provider_http = Web3.HTTPProvider("http://127.0.0.1:9545")
# web3.py instance
web3 = Web3(provider_http)
print('OS Platform: {}'.format(platform))
print('Web3 provider: {}'.format(web3))
# print("Block Number: %s", web3.eth.blockNumber)
def get_encoded_contract_constructor_arguments(constructor_args=None):
if constructor_args:
return contract_translator.encode_constructor_arguments(constructor_args['args'])
else:
return b''
def get_logs(last_receipt, contract, event_name=None):
# Get all log ids from the contract events
contract_log_ids = contract.translator.event_data.keys()
# Filter and return all logs originating from the contract
# or only those matching the event_name (if specified)
logs = [log for log in last_receipt.logs
if log.topics[0] in contract_log_ids and
log.address == contract.address and
(not event_name or
contract.translator.event_data[log.topics[0]]['name'] == event_name)]
assert len(logs) > 0, "No logs in the last receipt of the contract"
# Return all events decoded from the last receipt of the contract
return [contract.translator.decode_event(log.topics, log.data) for log in logs]
def get_last_log_from_contract_receipts(tester, contract, event_name=None):
# Get only the receipts for the last block from the chain (aka tester.s)
last_receipt = tester.s.head_state.receipts[-1]
# Get last log event with correct name and return the decoded event
print(get_logs(last_receipt, contract, event_name=event_name))
return get_logs(last_receipt, contract, event_name=event_name)[-1]
# Set the Vyper compiler to run when the Vyper language is requested
tester.languages['vyper'] = compiler.Compiler()
# Set the new "chain" (aka tester.s)
tester.s = tester.Chain()
tester.s.head_state.gas_limit = 10**9
initial_chain_state = tester.s.snapshot()
# Load contract source code
source_code = open('contracts/auctions/simple_open_auction.v.py').read()
# Compile contract code interface (aka tester.c)
FIVE_DAYS = 432000
tester.c = tester.s.contract(source_code, language='vyper', args=[tester.accounts[0], FIVE_DAYS])
# Generate ABI from contract source code
abi = tester.languages['vyper'].mk_full_signature(source_code)
print("ABI: %s", abi)
# Generate Contract Translator from ABI
contract_translator = tester.ContractTranslator(abi)
# Generate Bytecode from contract source code
contract_constructor_args = []
byte_code = tester.languages['vyper'].compile(source_code) + \
get_encoded_contract_constructor_arguments(contract_constructor_args)
# print("Bytecode: %s", byte_code)
address = tester.s.tx(to=b'', data=byte_code)
print("Address: %s", address)
# Instantiate contract from its ABI and Bytecode
contract_instance = tester.ABIContract(tester.s, abi, address)
print("Contract Instance: %s", contract_instance)
# Execute method on the tester chain to check the beneficiary is correct
assert ethereum_utils.remove_0x_head(tester.c.beneficiary()) == tester.accounts[0].hex()
# Execute method on the tester chain to check bidding time is 5 days
assert tester.c.auction_end() == tester.s.head_state.timestamp + FIVE_DAYS
# Revert chain state on failed transaction
tester.s.revert(initial_chain_state)
# Instantiate and deploy contract
contract_instance_web3 = web3.eth.contract(abi=abi, bytecode=byte_code)
print("Contract Instance with Web3: %s", contract_instance)
# Note: If we're running a Geth Node then I can use
# `web3.personal.listAccounts` but when I am using Ganache CLI
# I have to use `web3.eth.accounts` instead
if web3.personal.listAccounts:
# Geth Node
print("Accounts: %s", web3.personal.listAccounts[0])
first_account = web3.personal.listAccounts[0]
else:
# Ganache CLI on port 9545 with `truffle develop`
print("Accounts: %s", web3.eth.accounts[0])
first_account = web3.eth.accounts[0]
# Set Default account since this is used by
# /Users/Me/.pyenv/versions/3.6.2/lib/python3.6/site-packages/web3/contract.py", line 742
web3.eth.defaultAccount = first_account
print("Default Account: %s", web3.eth.defaultAccount);
if web3.personal.listAccounts:
# Only need to unlock the account when using Geth Node
# Note necessary when using Ganache CLI on port 9545 with `truffle develop`
print("Unlocked Default Account: %s", web3.personal.unlockAccount(web3.eth.defaultAccount, GENERIC_PASSWORD_TO_ENCRYPT))
# Alternative using Web3.py 4.1.0 and Geth
# https://github.com/ltfschoen/geth-node
# http://web3py.readthedocs.io/en/stable/contracts.html?highlight=deploy
# Get transaction hash from deployed contract
transaction_fields = {
'from': first_account,
'gasPrice': web3.eth.gasPrice
}
# Vyper contract Constructor Parameters expected
# i.e. See signature of Construction Function in simple_open_auction.v.py
# containing expected Constructor Parameters:
# def __init__(_beneficiary: address, _bidding_time: timedelta):
_bidding_time = 4000
contract_data = contract_instance_web3 \
.constructor(web3.eth.defaultAccount, _bidding_time) \
.buildTransaction(transaction_fields)
deploy_txn_hash = web3.eth.sendTransaction(contract_data)
print("Deployed Contract Tx Hash: %d", deploy_txn_hash)
# IMPORTANT: Ensure that you start mining in the Geth Node before
# the deploying the contract using the Geth JavaScript Console with `miner.start(1)`
# so it returns the tx receipt after its mined the block
mined_txn_receipt = web3.eth.waitForTransactionReceipt(deploy_txn_hash, timeout=1200)
print("Mined Transaction Receipt: %s", mined_txn_receipt)
txn_receipt = web3.eth.getTransactionReceipt(deploy_txn_hash)
print("Transaction Receipt: %s", txn_receipt)
# Note that the deployed Transaction Hash is shown in the Geth Logs.
# It may be used to obtain the tx receipt from the tx hash by running
# the following in the Geth JavaScript console:
# web3.eth.getTransactionReceipt('<INSERT_TRANSACTION_HASH')
deployed_contract_address = mined_txn_receipt['contractAddress']
contract_instance = web3.eth.contract(address=deployed_contract_address, abi=abi)
print("Contract Instance: %s", contract_instance)
print("Called Getter method beneficiary() from Deployed Contract Instance: %s",
contract_instance.functions.beneficiary().call())
print("Called Getter method auction_end() set by Constructor Parameter \
_bidding_time from Deployed Contract Instance: %s",
contract_instance.functions.auction_end().call()) | [
"web3.eth.sendTransaction",
"web3.eth.getTransactionReceipt",
"ethereum.tools.tester.c.beneficiary",
"ethereum.tools.tester.s.contract",
"ethereum.tools.tester.s.tx",
"web3.Web3",
"ethereum.tools.tester.ContractTranslator",
"ethereum.tools.tester.ABIContract",
"ethereum.tools.tester.s.revert",
"web3.eth.contract",
"vyper.compiler.Compiler",
"ethereum.tools.tester.s.snapshot",
"ethereum.tools.tester.Chain",
"web3.personal.unlockAccount",
"web3.IPCProvider",
"ethereum.tools.tester.c.auction_end",
"web3.eth.waitForTransactionReceipt",
"web3.Web3.HTTPProvider"
] | [((426, 452), 'web3.IPCProvider', 'IPCProvider', (['GETH_IPC_PATH'], {}), '(GETH_IPC_PATH)\n', (437, 452), False, 'from web3 import Web3, HTTPProvider, EthereumTesterProvider, IPCProvider\n'), ((774, 816), 'web3.Web3.HTTPProvider', 'Web3.HTTPProvider', (['"""http://127.0.0.1:9545"""'], {}), "('http://127.0.0.1:9545')\n", (791, 816), False, 'from web3 import Web3, HTTPProvider, EthereumTesterProvider, IPCProvider\n'), ((843, 862), 'web3.Web3', 'Web3', (['provider_http'], {}), '(provider_http)\n', (847, 862), False, 'from web3 import Web3, HTTPProvider, EthereumTesterProvider, IPCProvider\n'), ((2502, 2521), 'vyper.compiler.Compiler', 'compiler.Compiler', ([], {}), '()\n', (2519, 2521), False, 'from vyper import compiler\n'), ((2570, 2584), 'ethereum.tools.tester.Chain', 'tester.Chain', ([], {}), '()\n', (2582, 2584), False, 'from ethereum.tools import tester\n'), ((2645, 2664), 'ethereum.tools.tester.s.snapshot', 'tester.s.snapshot', ([], {}), '()\n', (2662, 2664), False, 'from ethereum.tools import tester\n'), ((2845, 2935), 'ethereum.tools.tester.s.contract', 'tester.s.contract', (['source_code'], {'language': '"""vyper"""', 'args': '[tester.accounts[0], FIVE_DAYS]'}), "(source_code, language='vyper', args=[tester.accounts[0],\n FIVE_DAYS])\n", (2862, 2935), False, 'from ethereum.tools import tester\n'), ((3120, 3150), 'ethereum.tools.tester.ContractTranslator', 'tester.ContractTranslator', (['abi'], {}), '(abi)\n', (3145, 3150), False, 'from ethereum.tools import tester\n'), ((3410, 3445), 'ethereum.tools.tester.s.tx', 'tester.s.tx', ([], {'to': "b''", 'data': 'byte_code'}), "(to=b'', data=byte_code)\n", (3421, 3445), False, 'from ethereum.tools import tester\n'), ((3545, 3587), 'ethereum.tools.tester.ABIContract', 'tester.ABIContract', (['tester.s', 'abi', 'address'], {}), '(tester.s, abi, address)\n', (3563, 3587), False, 'from ethereum.tools import tester\n'), ((3987, 4023), 'ethereum.tools.tester.s.revert', 'tester.s.revert', (['initial_chain_state'], {}), '(initial_chain_state)\n', (4002, 4023), False, 'from ethereum.tools import tester\n'), ((4084, 4130), 'web3.eth.contract', 'web3.eth.contract', ([], {'abi': 'abi', 'bytecode': 'byte_code'}), '(abi=abi, bytecode=byte_code)\n', (4101, 4130), False, 'import web3\n'), ((5931, 5970), 'web3.eth.sendTransaction', 'web3.eth.sendTransaction', (['contract_data'], {}), '(contract_data)\n', (5955, 5970), False, 'import web3\n'), ((6259, 6324), 'web3.eth.waitForTransactionReceipt', 'web3.eth.waitForTransactionReceipt', (['deploy_txn_hash'], {'timeout': '(1200)'}), '(deploy_txn_hash, timeout=1200)\n', (6293, 6324), False, 'import web3\n'), ((6398, 6445), 'web3.eth.getTransactionReceipt', 'web3.eth.getTransactionReceipt', (['deploy_txn_hash'], {}), '(deploy_txn_hash)\n', (6428, 6445), False, 'import web3\n'), ((6827, 6888), 'web3.eth.contract', 'web3.eth.contract', ([], {'address': 'deployed_contract_address', 'abi': 'abi'}), '(address=deployed_contract_address, abi=abi)\n', (6844, 6888), False, 'import web3\n'), ((3876, 3898), 'ethereum.tools.tester.c.auction_end', 'tester.c.auction_end', ([], {}), '()\n', (3896, 3898), False, 'from ethereum.tools import tester\n'), ((3748, 3770), 'ethereum.tools.tester.c.beneficiary', 'tester.c.beneficiary', ([], {}), '()\n', (3768, 3770), False, 'from ethereum.tools import tester\n'), ((5102, 5187), 'web3.personal.unlockAccount', 'web3.personal.unlockAccount', (['web3.eth.defaultAccount', 'GENERIC_PASSWORD_TO_ENCRYPT'], {}), '(web3.eth.defaultAccount,\n GENERIC_PASSWORD_TO_ENCRYPT)\n', (5129, 5187), False, 'import web3\n')] |
import argparse
import glob
import os
import pickle
from kitti_horizon_raw import KITTIHorizonRaw
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument('--raw_path', default='/data/scene_understanding/KITTI/rawdata', type=str,
help='path to KITTI rawdata')
parser.add_argument('--target_path', default='/data/kluger/tmp/kitti_horizon_test', type=str,
help='path to save processed data')
parser.add_argument('--image_scale', default=0.5, type=float,
help='image scaling factor')
args = parser.parse_args()
dataset = KITTIHorizonRaw(dataset_path=args.raw_path, img_scale=args.image_scale)
dates = [
'2011_09_26',
'2011_09_28',
'2011_09_29',
'2011_09_30',
'2011_10_03'
]
for date in dates:
date_dir = args.raw_path + "/" + date
drive_dirs = glob.glob(date_dir + "/*sync")
drive_dirs.sort()
drives = []
for drive_dir in drive_dirs:
drive = drive_dir.split("_")[-2]
drives.append(drive)
for drive_id in drives:
target_dir = os.path.join(args.target_path, "%s/%s" % (date, drive_id))
if not os.path.exists(target_dir):
os.makedirs(target_dir)
drive = dataset.get_drive(date, drive_id)
num_images = len(drive)
for idx, image in enumerate(iter(drive.rgb)):
data = dataset.process_single_image(drive, image, idx)
pickle_file = target_dir + "/%06d.pkl" % idx
print(pickle_file)
with open(pickle_file, 'wb') as f:
pickle.dump(data, f, -1)
| [
"os.path.exists",
"pickle.dump",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.join",
"glob.glob",
"kitti_horizon_raw.KITTIHorizonRaw"
] | [((140, 179), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '""""""'}), "(description='')\n", (163, 179), False, 'import argparse\n'), ((656, 727), 'kitti_horizon_raw.KITTIHorizonRaw', 'KITTIHorizonRaw', ([], {'dataset_path': 'args.raw_path', 'img_scale': 'args.image_scale'}), '(dataset_path=args.raw_path, img_scale=args.image_scale)\n', (671, 727), False, 'from kitti_horizon_raw import KITTIHorizonRaw\n'), ((950, 980), 'glob.glob', 'glob.glob', (["(date_dir + '/*sync')"], {}), "(date_dir + '/*sync')\n", (959, 980), False, 'import glob\n'), ((1202, 1260), 'os.path.join', 'os.path.join', (['args.target_path', "('%s/%s' % (date, drive_id))"], {}), "(args.target_path, '%s/%s' % (date, drive_id))\n", (1214, 1260), False, 'import os\n'), ((1280, 1306), 'os.path.exists', 'os.path.exists', (['target_dir'], {}), '(target_dir)\n', (1294, 1306), False, 'import os\n'), ((1324, 1347), 'os.makedirs', 'os.makedirs', (['target_dir'], {}), '(target_dir)\n', (1335, 1347), False, 'import os\n'), ((1740, 1764), 'pickle.dump', 'pickle.dump', (['data', 'f', '(-1)'], {}), '(data, f, -1)\n', (1751, 1764), False, 'import pickle\n')] |
from keystone.manage2 import base
from keystone.manage2 import common
from keystone.manage2 import mixins
@common.arg('--where-id',
required=True,
help='identifies the credential to update by ID')
@common.arg('--user-id',
required=False,
help='change the user the credential applies to, by ID')
@common.arg('--tenant-id',
required=False,
help='change the tenant this credential applies to, by ID')
@common.arg('--type',
required=True,
help="change the credential type (e.g. 'EC2')")
@common.arg('--key',
required=True,
help="change the credential key")
@common.arg('--secret',
required=True,
help="change the credential secret")
class Command(base.BaseBackendCommand, mixins.DateTimeMixin):
"""Updates the specified credential."""
# pylint: disable=E1101,R0913
def update_credential(self, id, user_id=None, tenant_id=None,
cred_type=None, secret=None, key=None):
obj = self.get_credential(id)
self.get_user(user_id)
self.get_tenant(tenant_id)
if user_id is not None:
obj.user_id = user_id
if tenant_id is not None:
obj.tenant_id = tenant_id
if cred_type is not None:
obj.type = cred_type
if key is not None:
obj.key = key
if secret is not None:
obj.secret = secret
self.credential_manager.update(id, obj)
def run(self, args):
"""Process argparse args, and print results to stdout"""
self.update_credential(id=args.where_id, user_id=args.user_id,
tenant_id=args.tenant_id, cred_type=args.type,
key=args.key, secret=args.secret)
| [
"keystone.manage2.common.arg"
] | [((109, 203), 'keystone.manage2.common.arg', 'common.arg', (['"""--where-id"""'], {'required': '(True)', 'help': '"""identifies the credential to update by ID"""'}), "('--where-id', required=True, help=\n 'identifies the credential to update by ID')\n", (119, 203), False, 'from keystone.manage2 import common\n'), ((208, 309), 'keystone.manage2.common.arg', 'common.arg', (['"""--user-id"""'], {'required': '(False)', 'help': '"""change the user the credential applies to, by ID"""'}), "('--user-id', required=False, help=\n 'change the user the credential applies to, by ID')\n", (218, 309), False, 'from keystone.manage2 import common\n'), ((314, 420), 'keystone.manage2.common.arg', 'common.arg', (['"""--tenant-id"""'], {'required': '(False)', 'help': '"""change the tenant this credential applies to, by ID"""'}), "('--tenant-id', required=False, help=\n 'change the tenant this credential applies to, by ID')\n", (324, 420), False, 'from keystone.manage2 import common\n'), ((425, 513), 'keystone.manage2.common.arg', 'common.arg', (['"""--type"""'], {'required': '(True)', 'help': '"""change the credential type (e.g. \'EC2\')"""'}), '(\'--type\', required=True, help=\n "change the credential type (e.g. \'EC2\')")\n', (435, 513), False, 'from keystone.manage2 import common\n'), ((518, 586), 'keystone.manage2.common.arg', 'common.arg', (['"""--key"""'], {'required': '(True)', 'help': '"""change the credential key"""'}), "('--key', required=True, help='change the credential key')\n", (528, 586), False, 'from keystone.manage2 import common\n'), ((596, 670), 'keystone.manage2.common.arg', 'common.arg', (['"""--secret"""'], {'required': '(True)', 'help': '"""change the credential secret"""'}), "('--secret', required=True, help='change the credential secret')\n", (606, 670), False, 'from keystone.manage2 import common\n')] |
"""
"""
import click
from planemo.cli import pass_context
from planemo.io import info
from planemo import github_util
PLANEMO_TEST_VIEWER_URL_TEMPLATE = (
"http://galaxyproject.github.io/planemo/tool_test_viewer.html"
"?test_data_url=%s"
)
target_path = click.Path(
file_okay=True,
dir_okay=False,
resolve_path=True,
)
@click.command("share_test")
@click.argument(
'path',
metavar="FILE_PATH",
type=target_path,
default="tool_test_output.json",
)
@pass_context
def cli(ctx, path, **kwds):
"""Publish JSON test results to Github Gist and produce sharable URL.
Sharable URL can be used to share an HTML version of the report that
can be easily embedded in pull requests or commit messages. Requires
a ~/.planemo.yml with Github 'username' and 'password' defined in a
'github' section of that configuration file.
"""
file_url = github_util.publish_as_gist_file(ctx, path)
share_url = PLANEMO_TEST_VIEWER_URL_TEMPLATE % file_url
info("File published to Github Gist.")
info("Raw URL: %s" % file_url)
info("Share results with URL: %s" % share_url)
markdown = "[View Tool Test Results](%s)" % share_url
info("Embed results with markdown: %s" % markdown)
| [
"click.argument",
"planemo.io.info",
"planemo.github_util.publish_as_gist_file",
"click.Path",
"click.command"
] | [((265, 326), 'click.Path', 'click.Path', ([], {'file_okay': '(True)', 'dir_okay': '(False)', 'resolve_path': '(True)'}), '(file_okay=True, dir_okay=False, resolve_path=True)\n', (275, 326), False, 'import click\n'), ((345, 372), 'click.command', 'click.command', (['"""share_test"""'], {}), "('share_test')\n", (358, 372), False, 'import click\n'), ((374, 473), 'click.argument', 'click.argument', (['"""path"""'], {'metavar': '"""FILE_PATH"""', 'type': 'target_path', 'default': '"""tool_test_output.json"""'}), "('path', metavar='FILE_PATH', type=target_path, default=\n 'tool_test_output.json')\n", (388, 473), False, 'import click\n'), ((896, 939), 'planemo.github_util.publish_as_gist_file', 'github_util.publish_as_gist_file', (['ctx', 'path'], {}), '(ctx, path)\n', (928, 939), False, 'from planemo import github_util\n'), ((1004, 1042), 'planemo.io.info', 'info', (['"""File published to Github Gist."""'], {}), "('File published to Github Gist.')\n", (1008, 1042), False, 'from planemo.io import info\n'), ((1047, 1077), 'planemo.io.info', 'info', (["('Raw URL: %s' % file_url)"], {}), "('Raw URL: %s' % file_url)\n", (1051, 1077), False, 'from planemo.io import info\n'), ((1082, 1128), 'planemo.io.info', 'info', (["('Share results with URL: %s' % share_url)"], {}), "('Share results with URL: %s' % share_url)\n", (1086, 1128), False, 'from planemo.io import info\n'), ((1191, 1241), 'planemo.io.info', 'info', (["('Embed results with markdown: %s' % markdown)"], {}), "('Embed results with markdown: %s' % markdown)\n", (1195, 1241), False, 'from planemo.io import info\n')] |
# (C) Copyright 2010-2020 Enthought, Inc., Austin, TX
# All rights reserved.
import os
from unittest import TestCase, mock
from traits.testing.unittest_tools import UnittestTools
from force_bdss.api import DataValue
from force_gromacs.tests.probe_classes.chemicals import ProbeMolecule
from force_gromacs.tests.probe_classes.simulation_builders import (
ProbeSimulationBuilder
)
from force_gromacs.tests.probe_classes.pipelines import (
ProbeGromacsPipeline
)
from force_gromacs.data_sources.simulation.simulation_factory import (
SimulationFactory
)
SIMULATION_DATASOURCE_PATH = ('force_gromacs.data_sources.simulation'
'.simulation_data_source.SimulationDataSource')
SIMULATION_BUILDER_PATH = (f"{SIMULATION_DATASOURCE_PATH}"
'.create_simulation_builder')
class TestSimulationDataSource(TestCase, UnittestTools):
def setUp(self):
self.factory = SimulationFactory({'id': '0',
'name': 'Simulation'})
self.data_source = self.factory.create_data_source()
#: Example input values
self.size = 4000
self.name = 'test_experiment'
self.martini_parameters = 'test_martini.itp'
self.md_min_parameters = 'test_min_parm.mdp'
self.md_prod_parameters = 'test_prod_parm.mdp'
self.model = self.factory.create_model()
self.model.n_molecule_types = 2
self.model.martini_parameters = self.martini_parameters
self.model.md_prod_parameters = self.md_prod_parameters
self.model.md_min_parameters = self.md_min_parameters
self.model.size = self.size
self.model.name = self.name
self.water = ProbeMolecule('Water')
self.salt = ProbeMolecule('Salt')
self.input_values = [[self.water, self.salt]]
def test_basic_function(self):
in_slots = self.data_source.slots(self.model)[0]
self.assertEqual(2, len(in_slots))
data_values = [
DataValue(type=slot.type, value=value)
for slot, value in zip(in_slots, self.input_values)
]
with mock.patch(SIMULATION_BUILDER_PATH) as mock_sim:
mock_sim.return_value = ProbeSimulationBuilder()
with mock.patch('os.path.exists') as mock_exists:
mock_exists.return_value = True
with self.assertTraitChanges(
self.model, 'event', count=0):
self.data_source.run(self.model, data_values)
self.model.ow_data = True
with self.assertTraitChanges(
self.model, 'event', count=1):
res = self.data_source.run(self.model, data_values)
self.assertEqual(1, len(res))
self.assertEqual('/path/to/trajectory.gro', res[0].value)
def test_default_traits(self):
self.assertEqual(
os.getcwd(),
self.model.output_directory
)
def test__check_perform_simulation(self):
with mock.patch('os.path.exists') as mock_exists:
# If data doesnt exist, always perform simulation
mock_exists.return_value = False
self.model.ow_data = False
self.assertTrue(
self.data_source._check_perform_simulation(
self.model, '/some/path'))
self.model.ow_data = True
self.assertTrue(
self.data_source._check_perform_simulation(
self.model, '/some/path'))
# If data exists, only perform simulation when required
# by model
mock_exists.return_value = True
self.assertTrue(
self.data_source._check_perform_simulation(
self.model, '/some/path'))
self.model.ow_data = False
self.assertFalse(
self.data_source._check_perform_simulation(
self.model, '/some/path'))
def test_slots(self):
self.model.n_molecule_types = 4
in_slots = self.data_source.slots(self.model)[0]
self.assertEqual(4, len(in_slots))
def test__n_molecule_types_check(self):
model = self.factory.create_model()
model.n_molecule_types = 0
errors = model.verify()
messages = [error.local_error for error in errors]
self.assertIn(
"Number of molecule types must be at least 1",
messages
)
def test_not_implemented_error(self):
with self.assertRaises(NotImplementedError):
self.data_source.create_simulation_builder(None, None)
def test_create_bash_script(self):
name = 'test_experiment'
pipeline = ProbeGromacsPipeline()
bash_script = self.data_source.create_bash_script(
pipeline, name
)
commands = bash_script.split('\n')
self.assertEqual(17, len(commands))
self.assertEqual(
'# test_experiment', commands[0]
)
def test_notify_bash_script(self):
bash_script = ('# experiment_5.0\n'
'mdrun -s test_topol.tpr\n')
with self.assertTraitChanges(
self.model, 'event', count=1):
self.model.notify_bash_script(
bash_script
)
def test_driver_event(self):
in_slots = self.data_source.slots(self.model)[0]
data_values = [
DataValue(type=slot.type, value=value)
for slot, value in zip(in_slots, self.input_values)
]
with mock.patch('force_gromacs.data_sources.simulation'
'.simulation_data_source.SimulationDataSource'
'.create_simulation_builder') as mocksim:
mocksim.return_value = ProbeSimulationBuilder()
with self.assertTraitChanges(
self.model, 'event', count=1):
self.data_source.run(self.model, data_values)
| [
"force_gromacs.data_sources.simulation.simulation_factory.SimulationFactory",
"os.getcwd",
"force_gromacs.tests.probe_classes.simulation_builders.ProbeSimulationBuilder",
"force_gromacs.tests.probe_classes.pipelines.ProbeGromacsPipeline",
"force_bdss.api.DataValue",
"unittest.mock.patch",
"force_gromacs.tests.probe_classes.chemicals.ProbeMolecule"
] | [((939, 991), 'force_gromacs.data_sources.simulation.simulation_factory.SimulationFactory', 'SimulationFactory', (["{'id': '0', 'name': 'Simulation'}"], {}), "({'id': '0', 'name': 'Simulation'})\n", (956, 991), False, 'from force_gromacs.data_sources.simulation.simulation_factory import SimulationFactory\n'), ((1726, 1748), 'force_gromacs.tests.probe_classes.chemicals.ProbeMolecule', 'ProbeMolecule', (['"""Water"""'], {}), "('Water')\n", (1739, 1748), False, 'from force_gromacs.tests.probe_classes.chemicals import ProbeMolecule\n'), ((1769, 1790), 'force_gromacs.tests.probe_classes.chemicals.ProbeMolecule', 'ProbeMolecule', (['"""Salt"""'], {}), "('Salt')\n", (1782, 1790), False, 'from force_gromacs.tests.probe_classes.chemicals import ProbeMolecule\n'), ((4757, 4779), 'force_gromacs.tests.probe_classes.pipelines.ProbeGromacsPipeline', 'ProbeGromacsPipeline', ([], {}), '()\n', (4777, 4779), False, 'from force_gromacs.tests.probe_classes.pipelines import ProbeGromacsPipeline\n'), ((2019, 2057), 'force_bdss.api.DataValue', 'DataValue', ([], {'type': 'slot.type', 'value': 'value'}), '(type=slot.type, value=value)\n', (2028, 2057), False, 'from force_bdss.api import DataValue\n'), ((2146, 2181), 'unittest.mock.patch', 'mock.patch', (['SIMULATION_BUILDER_PATH'], {}), '(SIMULATION_BUILDER_PATH)\n', (2156, 2181), False, 'from unittest import TestCase, mock\n'), ((2231, 2255), 'force_gromacs.tests.probe_classes.simulation_builders.ProbeSimulationBuilder', 'ProbeSimulationBuilder', ([], {}), '()\n', (2253, 2255), False, 'from force_gromacs.tests.probe_classes.simulation_builders import ProbeSimulationBuilder\n'), ((2927, 2938), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2936, 2938), False, 'import os\n'), ((3051, 3079), 'unittest.mock.patch', 'mock.patch', (['"""os.path.exists"""'], {}), "('os.path.exists')\n", (3061, 3079), False, 'from unittest import TestCase, mock\n'), ((5484, 5522), 'force_bdss.api.DataValue', 'DataValue', ([], {'type': 'slot.type', 'value': 'value'}), '(type=slot.type, value=value)\n', (5493, 5522), False, 'from force_bdss.api import DataValue\n'), ((5611, 5742), 'unittest.mock.patch', 'mock.patch', (['"""force_gromacs.data_sources.simulation.simulation_data_source.SimulationDataSource.create_simulation_builder"""'], {}), "(\n 'force_gromacs.data_sources.simulation.simulation_data_source.SimulationDataSource.create_simulation_builder'\n )\n", (5621, 5742), False, 'from unittest import TestCase, mock\n'), ((5834, 5858), 'force_gromacs.tests.probe_classes.simulation_builders.ProbeSimulationBuilder', 'ProbeSimulationBuilder', ([], {}), '()\n', (5856, 5858), False, 'from force_gromacs.tests.probe_classes.simulation_builders import ProbeSimulationBuilder\n'), ((2273, 2301), 'unittest.mock.patch', 'mock.patch', (['"""os.path.exists"""'], {}), "('os.path.exists')\n", (2283, 2301), False, 'from unittest import TestCase, mock\n')] |
"""Linked List tests."""
def test_node_has_attributes():
"""Node object has data and next attributes."""
from linked_list import Node
n = Node(1)
assert hasattr(n, 'data')
assert hasattr(n, 'next')
def test_list_has_attributes():
"""Linked List has expected attributes on initialization."""
from linked_list import LinkedList
ll = LinkedList()
assert hasattr(ll, 'head')
assert hasattr(ll, '_size')
assert not ll.head
assert ll._size == 0
def test_pushed_node_is_head():
"""Pushing a new node to empty list makes it the head."""
from linked_list import LinkedList
ll = LinkedList()
ll.push(1)
assert ll.head.data == 1
def test_pushing_nodes_adds_to_size():
"""Push should increase the size attribute of the list."""
from linked_list import LinkedList
ll = LinkedList()
for i in range(10):
ll.push(i)
assert ll._size == 10
def test_pop_removes_head():
"""Pop on list with one node empties it."""
from linked_list import LinkedList
ll = LinkedList()
ll.push(1)
assert ll.pop() == 1
assert not ll.head
assert ll._size == 0
def test_valid_search_return():
"""Search with valid data should return the correct node."""
from linked_list import LinkedList
ll = LinkedList()
for i in range(10):
ll.push(i)
assert ll.search(5).data == 5
def test_valid_search_head_node():
"""Search with valid data returns correct node if it is the head."""
from linked_list import LinkedList
ll = LinkedList()
ll.push(3)
assert ll.search(3) == ll.head
def test_invalid_search():
"""Search with data not in the list returns None."""
from linked_list import LinkedList
ll = LinkedList()
ll.push(2)
ll.push(4)
ll.push(6)
assert ll.search(8) is None
def test_remove_valid_node():
"""Successfully remove the node with the specified data."""
from linked_list import LinkedList
ll = LinkedList()
for i in range(5):
ll.push(i)
assert ll.remove(2) == 2
assert len(ll) == 4
assert ll.head.next.data == 3
def test_remove_head():
"""Remove with data in head node pops it from the list."""
from linked_list import LinkedList
ll = LinkedList()
ll.push(1)
ll.push(2)
ll.push(3)
assert ll.remove(3) == 3
assert ll.head.data == 2
assert len(ll) == 2
def test_remove_head_only_node():
"""Remove the head in a list with only one node."""
from linked_list import LinkedList
ll = LinkedList()
ll.push(1)
assert ll.remove(1) == 1
assert not ll.head
assert len(ll) == 0
def test_display():
"""Display method returns string of data in the list in order."""
from linked_list import LinkedList
ll = LinkedList()
for i in range(1, 11):
ll.push(i)
assert ll.display() == '(10, 9, 8, 7, 6, 5, 4, 3, 2, 1)'
| [
"linked_list.LinkedList",
"linked_list.Node"
] | [((152, 159), 'linked_list.Node', 'Node', (['(1)'], {}), '(1)\n', (156, 159), False, 'from linked_list import Node\n'), ((367, 379), 'linked_list.LinkedList', 'LinkedList', ([], {}), '()\n', (377, 379), False, 'from linked_list import LinkedList\n'), ((635, 647), 'linked_list.LinkedList', 'LinkedList', ([], {}), '()\n', (645, 647), False, 'from linked_list import LinkedList\n'), ((844, 856), 'linked_list.LinkedList', 'LinkedList', ([], {}), '()\n', (854, 856), False, 'from linked_list import LinkedList\n'), ((1053, 1065), 'linked_list.LinkedList', 'LinkedList', ([], {}), '()\n', (1063, 1065), False, 'from linked_list import LinkedList\n'), ((1301, 1313), 'linked_list.LinkedList', 'LinkedList', ([], {}), '()\n', (1311, 1313), False, 'from linked_list import LinkedList\n'), ((1549, 1561), 'linked_list.LinkedList', 'LinkedList', ([], {}), '()\n', (1559, 1561), False, 'from linked_list import LinkedList\n'), ((1746, 1758), 'linked_list.LinkedList', 'LinkedList', ([], {}), '()\n', (1756, 1758), False, 'from linked_list import LinkedList\n'), ((1980, 1992), 'linked_list.LinkedList', 'LinkedList', ([], {}), '()\n', (1990, 1992), False, 'from linked_list import LinkedList\n'), ((2259, 2271), 'linked_list.LinkedList', 'LinkedList', ([], {}), '()\n', (2269, 2271), False, 'from linked_list import LinkedList\n'), ((2539, 2551), 'linked_list.LinkedList', 'LinkedList', ([], {}), '()\n', (2549, 2551), False, 'from linked_list import LinkedList\n'), ((2783, 2795), 'linked_list.LinkedList', 'LinkedList', ([], {}), '()\n', (2793, 2795), False, 'from linked_list import LinkedList\n')] |
import logging
import argparse
import sys
from openapi_spec_validator import (
openapi_v2_spec_validator, openapi_v3_spec_validator,
)
from openapi_spec_validator.exceptions import ValidationError
from openapi_spec_validator.readers import read_from_stdin, read_from_filename
logger = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s %(levelname)s %(name)s %(message)s',
level=logging.WARNING
)
def main(args=None):
parser = argparse.ArgumentParser()
parser.add_argument('filename', help="Absolute or relative path to file")
parser.add_argument(
'--schema',
help="OpenAPI schema (default: 3.0.0)",
type=str,
choices=['2.0', '3.0.0'],
default='3.0.0'
)
args = parser.parse_args(args)
# choose source
reader = read_from_filename
if args.filename in ['-', '/-']:
reader = read_from_stdin
# read source
try:
spec, spec_url = reader(args.filename)
except Exception as exc:
print(exc)
sys.exit(1)
# choose the validator
validators = {
'2.0': openapi_v2_spec_validator,
'3.0.0': openapi_v3_spec_validator,
}
validator = validators[args.schema]
# validate
try:
validator.validate(spec, spec_url=spec_url)
except ValidationError as exc:
print(exc)
sys.exit(1)
except Exception as exc:
print(exc)
sys.exit(2)
else:
print('OK')
if __name__ == '__main__':
main()
| [
"logging.getLogger",
"argparse.ArgumentParser",
"logging.basicConfig",
"sys.exit"
] | [((291, 318), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (308, 318), False, 'import logging\n'), ((319, 422), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)s %(name)s %(message)s"""', 'level': 'logging.WARNING'}), "(format='%(asctime)s %(levelname)s %(name)s %(message)s',\n level=logging.WARNING)\n", (338, 422), False, 'import logging\n'), ((465, 490), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (488, 490), False, 'import argparse\n'), ((1033, 1044), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1041, 1044), False, 'import sys\n'), ((1363, 1374), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1371, 1374), False, 'import sys\n'), ((1431, 1442), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (1439, 1442), False, 'import sys\n')] |
import pyrebase
url = "https://amongus-htn-default-rtdb.firebaseio.com/"
class FirebaseHandler:
def __init__(self):
config = {
"apiKey": None,
"authDomain": None,
"databaseURL": url,
"storageBucket": None,
}
firebase = pyrebase.initialize_app(config)
self.db = firebase.database() | [
"pyrebase.initialize_app"
] | [((298, 329), 'pyrebase.initialize_app', 'pyrebase.initialize_app', (['config'], {}), '(config)\n', (321, 329), False, 'import pyrebase\n')] |
import argparse
import re
import json
from importlib import import_module
from difflib import SequenceMatcher
from pathlib import Path, PurePath
from os import walk
#regex_sphinx_link = '\`(.+)\<(.+)\>\`\_'
regex_sphinx_link = '\`([^\`]+)\ (\<[^\`]+\>\`\_)'
regex_parameters = '(\w*)\ *(?:\()(\w*)(?:\)):?\ *(\(\w*\):)?\ *(.*?)(?:\.)\ *(?:File type:\ *)(\w+)\.\ *(\`(?:.+)\<(.*)\>\`\_\.)?\ *(?:Accepted formats:\ *)(.+)(?:\.)?'
regex_param_value = '(\w*)\ *(?:(?:\()(.*)(?:\)))?'
#regex_property_values = '(?:\*\ *\*\*)(.*)(?:\*\*)\ *(?:\(\*)(\w*)(?:\*\))\ *\-\ *(?:\()(.*?)(?:\))\ *(?:(?:\[)(\d+(?:\.\d+)?)\-(\d+(?:\.\d+)?)(?:\|)?(\d+(?:\.\d+)?)?(?:\]))?\ *(?:(?:\[)(.*)(?:\]))?\ *(.*)?(?:Values:\ *)(.+)(?:\.)?'
regex_property_values = '(?:\*\ *\*\*)(.*)(?:\*\*)\ *(?:\(\*)(\w*)(?:\*\))\ *\-\ ?(?:\()(.*)(?:\))\ *(?:(?:\[)([\-]?\d+(?:\.\d+)?)\~([\-]?\d+(?:\.\d+)?)(?:\|)?(\d+(?:\.\d+)?)?(?:\]))?\ *(?:(?:\[)(.*)(?:\]))?\ *(.*)\ ?(?:Values:\ *)(.+)(?:\.)?'
regex_property_non_values = '(?:\*\ *\*\*)(.*)(?:\*\*)\ *(?:\(\*)(\w*)(?:\*\))\ *\-\ *(?:\()(.*?)(?:\))\ *(?:(?:\[)([\-]?\d+(?:\.\d+)?)\~([\-]?\d+(?:\.\d+)?)(?:\|)?(\d+(?:\.\d+)?)?(?:\]))?\ *(?:(?:\[)(.*)(?:\]))?\ *(.*)?'
#################
#regex_property_values = '(?:\*\s*\*\*)(.*)(?:\*\*)\s*(?:\(\*)(\w*)(?:\*\))\s*\-\s*(?:\()(.*)(?:\))\s*(?:(?:\[)(\w*)\-(\w*)(?:\|)?(\w*)?(?:\]))?\s*(?:(?:\[)(.*)(?:\]))?\s([a-zA-Z0-9_\- ().&?,!;]+)(?:\s|\.)+(?:(?:Values:)(.+))?'
###################
regex_prop_value = '([a-zA-Z0-9_\-\+:\/\/\.\ \,\*\#]+)\ *(?:(?:\()(.*)?(?:\)))?'
regex_info = '\*\ *(.*)'
regex_info_item = '(.*?)\:(?:\ *)(.*)'
class JSONSchemaGenerator():
def __init__(self, input_package, output_path, **kwargs):
self.input_package = input_package
# check if output_path exists
if not Path(output_path).exists():
raise SystemExit('Unexisting output path')
# check if output_path has correct structure
if not input_package in output_path:
raise SystemExit('Incorrect output path. The structure must be: path/biobb_package/biobb_package')
self.output_path = PurePath(output_path).joinpath('json_schemas')
if not Path(self.output_path).exists():
raise SystemExit('Incorrect output path. The structure must be: path/biobb_package/biobb_package')
def similar_string(self, a, b):
""" check similarity between two strings """
return SequenceMatcher(None, a, b).ratio()
def getType(self, type):
""" return JSON friendly type """
if type == 'str': return 'string'
if type == 'int': return 'integer'
if type == 'float': return 'number'
if type == 'bool': return 'boolean'
if type == 'dict': return 'object'
if type == 'list': return 'array'
return type
def getDefaultProperty(self, type, default):
""" return default according to type """
if default == 'None' and type != 'dict': return None
elif default == 'None' and type == 'dict': default = {}
elif type != 'dict': default = re.sub('\"|\'', '', default)
elif type == 'dict': default = json.loads(default)
if type == 'str' or type == 'string': return default
if type == 'int': return int(default)
if type == 'float': return float(default)
if type == 'bool': return default.lower() in ("yes", "true", "t", "1")
if type == 'dict': return default
return default
def getMinMaxStep(self, prop_type, prop_min):
if prop_type == "float": return float(prop_min)
elif prop_type == "int": return int(prop_min)
def replaceLink(self, matchobj):
return matchobj.group(1).strip()
def getParamFormats(self, vals, description):
list_vals = re.split(', |,',vals)
formats = []
file_formats = []
for val in list_vals:
f = re.findall(regex_param_value, val)[0]
formats.append('.*\.{0}$'.format(f[0]))
ff = {
"extension": '.*\.{0}$'.format(f[0]),
"description": description.strip('.')
}
if f[1]:
ffs = re.split('\|',f[1])
for item in ffs:
parts = re.split('\:',item)
ff[parts[0]] = parts[1]
file_formats.append(ff)
return formats, file_formats
def getPropFormats(self, vals, type_):
if not vals: return None, None
formats = []
prop_formats = []
list_vals = re.split(', |,',vals)
for val in list_vals:
# trick for cases when there are parenthesis in the format name
val = re.sub(r'\\\(', '****', val)
val = re.sub(r'\\\)', '++++', val)
val = re.sub(regex_sphinx_link, self.replaceLink, val)
f = re.findall(regex_prop_value, val)[0]
frmt = f[0].strip(' ')
if type_ == 'integer': frmt = int(f[0])
if type_ == 'float': frmt = float(f[0])
desc = f[1] if f[1] else None
# trick for cases when there are parenthesis in the format name
if type_ != 'integer' and type_ != 'float':
if '****' in frmt:
frmt = re.sub(r'\*\*\*\*', '(', frmt)
frmt = re.sub(r'\+\+\+\+', ')', frmt)
formats.append(frmt)
ff = {
"name": frmt,
"description": desc
}
prop_formats.append(ff)
return formats, prop_formats
def getParameters(self, row, required):
# get list with all info in parameters:
# * property id
# * property type
# * property description
# * mandatory / optional
# * file type
# * sample file
# * formats
param = row.strip()
param = re.findall(regex_parameters, param)[0]
param_id = param[0]
param_type = param[1]
if not param[2]: required.append(param_id)
description = re.sub(regex_sphinx_link, self.replaceLink, param[3])
filetype = param[4]
sample = param[6] if param[6] else None
formats, file_formats = self.getParamFormats(param[7], description)
p = {
"type": self.getType(param_type),
"description": description,
"filetype": filetype,
"sample": sample,
"enum": formats,
"file_formats": file_formats
}
return param_id, p, required
def getProperties(self, row):
# get list with all info in properties:
# * property id
# * property type
# * property default
# * property min-max|step
# * WF property
# * property description
# * property possible values
prop = row.strip()
regex = regex_property_values if 'Values:' in row else regex_property_non_values
#regex = regex_property_values
prop = re.findall(regex, prop)[0]
prop_id = prop[0]
prop_type = prop[1]
default = prop[2]
prop_min = prop[3] if prop[3] else None
prop_max = prop[4] if prop[4] else None
prop_step = prop[5] if prop[5] else None
wf_prop = True if prop[6] else False
description = re.sub(regex_sphinx_link, self.replaceLink, prop[7])
if len(prop) == 9: formats, property_formats = self.getPropFormats(prop[8].rstrip('\.'), self.getType(prop_type))
#formats, property_formats = self.getPropFormats(prop[8])
p = {
"type": self.getType(prop_type),
"default": self.getDefaultProperty(prop_type, default),
"wf_prop": wf_prop,
"description": description
}
if prop_min:
p["min"] = self.getMinMaxStep(prop_type, prop_min)
if prop_max: p["max"] = self.getMinMaxStep(prop_type, prop_max)
if prop_step: p["step"] = self.getMinMaxStep(prop_type, prop_step)
if len(prop) == 9:
p["enum"] = formats
p["property_formats"] = property_formats
#if formats and property_formats:
# p["enum"] = formats
# p["property_formats"] = property_formats
return prop_id, p
def getInfoProp(self, info_prop):
info_prop = re.findall(regex_info_item, info_prop)[0]
return info_prop[0], info_prop[1]
def getGenericInfo(self, row):
output = row.strip()
if output.startswith('|'):
output = output.replace('|', '')
output = output.strip()
output = re.sub(regex_sphinx_link, self.replaceLink, output)
else:
output = None
return output
def parseDocs(self, doclines, module):
""" parse python docs to object / JSON format """
# clean empty spaces from doclines
doclines = list(filter(lambda name: name.strip(), doclines))
# get name, title and description
name = self.getGenericInfo(doclines[0])
title = self.getGenericInfo(doclines[1])
description = self.getGenericInfo(doclines[2])
# parse documentation
args = False
info = False
required = []
obj_info = {}
object_schema = {
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "http://bioexcel.eu/" + self.input_package + "/json_schemas/1.0/" + module,
"name": name,
"title": title,
"description": description,
"type": "object",
"info": [],
"required": [],
"properties": {}
}
properties = {}
for row in doclines:
leading = len(row) - len(row.lstrip())
# check if arguments
if 'Args:' in row:
args = True
info = False
if args:
# first level: I/O & parameters dictionary
if leading == 8:
if 'properties' not in row:
param_id, p, required = self.getParameters(row, required)
properties[param_id] = p
# second level: properties
if leading == 12 and not row.isspace():
if not "properties" in properties:
properties["properties"] = { "type": "object", "properties": {} }
prop_level1, p = self.getProperties(row)
properties["properties"]["properties"][prop_level1] = p
# third level: parameters
if(leading == 16):
if not "parameters" in properties["properties"]["properties"][prop_level1]:
properties["properties"]["properties"][prop_level1]["type"] = "object"
properties["properties"]["properties"][prop_level1]["parameters"] = {}
prop_level2, p = self.getProperties(row)
properties["properties"]["properties"][prop_level1]["parameters"][prop_level2] = p
# check if examples
r = row.strip()
if r.startswith('Examples'):
info = False
args = False
# check if info
if 'Info:' in row:
info = True
args = False
if info:
if leading == 8:
info_id = row.strip()
info_id = re.findall(regex_info, info_id)[0].strip(':')
obj_info[info_id] = {}
if leading == 12:
info_prop = row.strip()
info_prop = re.findall(regex_info, info_prop)[0].strip(':')
k, v = self.getInfoProp(info_prop)
obj_info[info_id][k] = v
object_schema["info"] = obj_info
object_schema["required"] = required
object_schema["properties"] = properties
object_schema["additionalProperties"] = False
return object_schema
def cleanOutputPath(self):
""" removes all JSON files from the output path (except the biobb_package.json file) and all the config files """
# get all files in json_schemas folder
files = []
for (dirpath, dirnames, filenames) in walk(self.output_path):
files.extend(filenames)
break
# remove biobb_package.json file from array of files
if(self.input_package + '.json' in files): files.remove(self.input_package + '.json')
# remove files from array of files
for f in files:
path = PurePath(self.output_path).joinpath(f)
Path(path).unlink()
def saveJSONFile(self, module, object_schema):
""" save JSON file for each module """
path = PurePath(self.output_path).joinpath(module + '.json')
with open(path, 'w') as file:
json.dump(object_schema, file, indent=4)
print(str(path) + " file saved")
def launch(self):
""" launch function for JSONSchemaGenerator """
# import package
packages = import_module(self.input_package)
# remove old JSON files
self.cleanOutputPath()
# get documentation of python files
for package in packages.__all__:
# for every package import all modules
modules = import_module(self.input_package + '.' + package)
for module in modules.__all__:
print("Parsing " + str(PurePath(self.output_path).joinpath(module + '.json')))
# json schemas
# import single module
mod = import_module(self.input_package + '.' + package + '.' + module)
# get class name through similarity with module name
sel_class = ''
similarity = 0;
for item in dir(mod):
if ( item[0].isupper() and
not item.startswith('Path') and
not item.startswith('Pure') and
not item.startswith('check_') ):
s = self.similar_string(item, module)
if s > similarity:
sel_class = item
similarity = s
# exceptions:
if sel_class == "KMeans" and module == "k_means":
sel_class = "KMeansClustering"
if sel_class == "KMeans" and module == "dbscan":
sel_class = "DBSCANClustering"
if sel_class == "AgglomerativeClustering":
sel_class = "AgglClustering"
if sel_class == "SpectralClustering":
sel_class = "SpecClustering"
# get class documentation
klass = getattr(mod, sel_class)
doclines = klass.__doc__.splitlines()
object_schema = self.parseDocs(doclines, module)
self.saveJSONFile(module, object_schema)
def main():
parser = argparse.ArgumentParser(description="Creates json_schemas for given BioBB package.",
formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999),
epilog='''Examples: \njson_generator.py -p biobb_package -o path/to/biobb_package/biobb_package\njson_generator.py --package biobb_package --output path/to/biobb_package/biobb_package''')
required_args = parser.add_argument_group('required arguments')
required_args.add_argument('--package', '-p', required=True, help='BioBB package to be parsed.')
required_args.add_argument('--output', '-o', required=True, help='Output path to the biobb_package/biobb_package folder.')
args = parser.parse_args()
JSONSchemaGenerator(input_package=args.package, output_path=args.output).launch()
if __name__ == '__main__':
main()
| [
"re.split",
"json.loads",
"importlib.import_module",
"pathlib.Path",
"json.dump",
"difflib.SequenceMatcher",
"pathlib.PurePath",
"argparse.RawTextHelpFormatter",
"re.sub",
"re.findall",
"os.walk"
] | [((3783, 3805), 're.split', 're.split', (['""", |,"""', 'vals'], {}), "(', |,', vals)\n", (3791, 3805), False, 'import re\n'), ((4577, 4599), 're.split', 're.split', (['""", |,"""', 'vals'], {}), "(', |,', vals)\n", (4585, 4599), False, 'import re\n'), ((6103, 6156), 're.sub', 're.sub', (['regex_sphinx_link', 'self.replaceLink', 'param[3]'], {}), '(regex_sphinx_link, self.replaceLink, param[3])\n', (6109, 6156), False, 'import re\n'), ((7379, 7431), 're.sub', 're.sub', (['regex_sphinx_link', 'self.replaceLink', 'prop[7]'], {}), '(regex_sphinx_link, self.replaceLink, prop[7])\n', (7385, 7431), False, 'import re\n'), ((12448, 12470), 'os.walk', 'walk', (['self.output_path'], {}), '(self.output_path)\n', (12452, 12470), False, 'from os import walk\n'), ((13267, 13300), 'importlib.import_module', 'import_module', (['self.input_package'], {}), '(self.input_package)\n', (13280, 13300), False, 'from importlib import import_module\n'), ((4725, 4755), 're.sub', 're.sub', (['"""\\\\\\\\\\\\("""', '"""****"""', 'val'], {}), "('\\\\\\\\\\\\(', '****', val)\n", (4731, 4755), False, 'import re\n'), ((4772, 4802), 're.sub', 're.sub', (['"""\\\\\\\\\\\\)"""', '"""++++"""', 'val'], {}), "('\\\\\\\\\\\\)', '++++', val)\n", (4778, 4802), False, 'import re\n'), ((4820, 4868), 're.sub', 're.sub', (['regex_sphinx_link', 'self.replaceLink', 'val'], {}), '(regex_sphinx_link, self.replaceLink, val)\n', (4826, 4868), False, 'import re\n'), ((5932, 5967), 're.findall', 're.findall', (['regex_parameters', 'param'], {}), '(regex_parameters, param)\n', (5942, 5967), False, 'import re\n'), ((7059, 7082), 're.findall', 're.findall', (['regex', 'prop'], {}), '(regex, prop)\n', (7069, 7082), False, 'import re\n'), ((8396, 8434), 're.findall', 're.findall', (['regex_info_item', 'info_prop'], {}), '(regex_info_item, info_prop)\n', (8406, 8434), False, 'import re\n'), ((8682, 8733), 're.sub', 're.sub', (['regex_sphinx_link', 'self.replaceLink', 'output'], {}), '(regex_sphinx_link, self.replaceLink, output)\n', (8688, 8733), False, 'import re\n'), ((13060, 13100), 'json.dump', 'json.dump', (['object_schema', 'file'], {'indent': '(4)'}), '(object_schema, file, indent=4)\n', (13069, 13100), False, 'import json\n'), ((13524, 13573), 'importlib.import_module', 'import_module', (["(self.input_package + '.' + package)"], {}), "(self.input_package + '.' + package)\n", (13537, 13573), False, 'from importlib import import_module\n'), ((2103, 2124), 'pathlib.PurePath', 'PurePath', (['output_path'], {}), '(output_path)\n', (2111, 2124), False, 'from pathlib import Path, PurePath\n'), ((2423, 2450), 'difflib.SequenceMatcher', 'SequenceMatcher', (['None', 'a', 'b'], {}), '(None, a, b)\n', (2438, 2450), False, 'from difflib import SequenceMatcher\n'), ((3899, 3933), 're.findall', 're.findall', (['regex_param_value', 'val'], {}), '(regex_param_value, val)\n', (3909, 3933), False, 'import re\n'), ((4187, 4208), 're.split', 're.split', (['"""\\\\|"""', 'f[1]'], {}), "('\\\\|', f[1])\n", (4195, 4208), False, 'import re\n'), ((4886, 4919), 're.findall', 're.findall', (['regex_prop_value', 'val'], {}), '(regex_prop_value, val)\n', (4896, 4919), False, 'import re\n'), ((12955, 12981), 'pathlib.PurePath', 'PurePath', (['self.output_path'], {}), '(self.output_path)\n', (12963, 12981), False, 'from pathlib import Path, PurePath\n'), ((13806, 13870), 'importlib.import_module', 'import_module', (["(self.input_package + '.' + package + '.' + module)"], {}), "(self.input_package + '.' + package + '.' + module)\n", (13819, 13870), False, 'from importlib import import_module\n'), ((15368, 15416), 'argparse.RawTextHelpFormatter', 'argparse.RawTextHelpFormatter', (['prog'], {'width': '(99999)'}), '(prog, width=99999)\n', (15397, 15416), False, 'import argparse\n'), ((1782, 1799), 'pathlib.Path', 'Path', (['output_path'], {}), '(output_path)\n', (1786, 1799), False, 'from pathlib import Path, PurePath\n'), ((2166, 2188), 'pathlib.Path', 'Path', (['self.output_path'], {}), '(self.output_path)\n', (2170, 2188), False, 'from pathlib import Path, PurePath\n'), ((3074, 3101), 're.sub', 're.sub', (['""""|\'"""', '""""""', 'default'], {}), '(\'"|\\\'\', \'\', default)\n', (3080, 3101), False, 'import re\n'), ((4284, 4305), 're.split', 're.split', (['"""\\\\:"""', 'item'], {}), "('\\\\:', item)\n", (4292, 4305), False, 'import re\n'), ((5302, 5335), 're.sub', 're.sub', (['"""\\\\*\\\\*\\\\*\\\\*"""', '"""("""', 'frmt'], {}), "('\\\\*\\\\*\\\\*\\\\*', '(', frmt)\n", (5308, 5335), False, 'import re\n'), ((5360, 5393), 're.sub', 're.sub', (['"""\\\\+\\\\+\\\\+\\\\+"""', '""")"""', 'frmt'], {}), "('\\\\+\\\\+\\\\+\\\\+', ')', frmt)\n", (5366, 5393), False, 'import re\n'), ((12769, 12795), 'pathlib.PurePath', 'PurePath', (['self.output_path'], {}), '(self.output_path)\n', (12777, 12795), False, 'from pathlib import Path, PurePath\n'), ((12820, 12830), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (12824, 12830), False, 'from pathlib import Path, PurePath\n'), ((3142, 3161), 'json.loads', 'json.loads', (['default'], {}), '(default)\n', (3152, 3161), False, 'import json\n'), ((11604, 11635), 're.findall', 're.findall', (['regex_info', 'info_id'], {}), '(regex_info, info_id)\n', (11614, 11635), False, 'import re\n'), ((11804, 11837), 're.findall', 're.findall', (['regex_info', 'info_prop'], {}), '(regex_info, info_prop)\n', (11814, 11837), False, 'import re\n'), ((13657, 13683), 'pathlib.PurePath', 'PurePath', (['self.output_path'], {}), '(self.output_path)\n', (13665, 13683), False, 'from pathlib import Path, PurePath\n')] |
from pickle import FALSE
from django.shortcuts import redirect, render, resolve_url
from django.http import HttpResponse
from flask import jsonify
from posApp.models import Category, Products, Sales, salesItems
from django.db.models import Count, Sum
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect
import json, sys
from datetime import date, datetime
# Login
def login_user(request):
logout(request)
resp = {"status": 'failed', 'msg': ''}
username = ''
password = ''
if request.POST:
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
resp['status'] = 'success'
else:
resp['msg'] = "Incorrect username or password"
else:
resp['msg'] = "Incorrect username or password"
return HttpResponse(json.dumps(resp), content_type='application/json')
# Logout
def logoutuser(request):
logout(request)
return redirect('/')
# Create your views here.
# @login_required(login_url=resolve_url('/pharmacy/login/'))
def home(request):
now = datetime.now()
current_year = now.strftime("%Y")
current_month = now.strftime("%m")
current_day = now.strftime("%d")
categories = len(Category.objects.all())
products = len(Products.objects.all())
transaction = len(Sales.objects.filter(
date_added__year=current_year,
date_added__month=current_month,
date_added__day=current_day
))
today_sales = Sales.objects.filter(
date_added__year=current_year,
date_added__month=current_month,
date_added__day=current_day
).all()
total_sales = sum(today_sales.values_list('grand_total', flat=True))
context = {
'page_title': 'Home',
'categories': categories,
'products': products,
'transaction': transaction,
'total_sales': total_sales,
}
return render(request, 'posApp/home.html', context)
def about(request):
context = {
'page_title': 'About',
}
return render(request, 'posApp/about.html', context)
# Categories
@login_required
def category(request):
category_list = Category.objects.all()
# category_list = {}
context = {
'page_title': 'Category List',
'category': category_list,
}
return render(request, 'posApp/category.html', context)
@login_required
def manage_category(request):
category = {}
if request.method == 'GET':
data = request.GET
id = ''
if 'id' in data:
id = data['id']
if id.isnumeric() and int(id) > 0:
category = Category.objects.filter(id=id).first()
context = {
'category': category
}
return render(request, 'posApp/manage_category.html', context)
@login_required
def save_category(request):
data = request.POST
resp = {'status': 'failed'}
try:
if (data['id']).isnumeric() and int(data['id']) > 0:
save_category = Category.objects.filter(id=data['id']).update(name=data['name'],
description=data['description'],
status=data['status'])
else:
save_category = Category(name=data['name'], description=data['description'], status=data['status'])
save_category.save()
resp['status'] = 'success'
messages.success(request, 'Category Successfully saved.')
except:
resp['status'] = 'failed'
return HttpResponse(json.dumps(resp), content_type="application/json")
@login_required
def delete_category(request):
data = request.POST
resp = {'status': ''}
try:
Category.objects.filter(id=data['id']).delete()
resp['status'] = 'success'
messages.success(request, 'Category Successfully deleted.')
except:
resp['status'] = 'failed'
return HttpResponse(json.dumps(resp), content_type="application/json")
# Products
@login_required
def products(request):
product_list = Products.objects.all()
context = {
'page_title': 'Product List',
'products': product_list,
}
return render(request, 'posApp/products.html', context)
@login_required
def manage_products(request):
product = {}
categories = Category.objects.filter(status=1).all()
if request.method == 'GET':
data = request.GET
id = ''
if 'id' in data:
id = data['id']
if id.isnumeric() and int(id) > 0:
product = Products.objects.filter(id=id).first()
context = {
'product': product,
'categories': categories
}
return render(request, 'posApp/manage_product.html', context)
def test(request):
categories = Category.objects.all()
context = {
'categories': categories
}
return render(request, 'posApp/test.html', context)
@login_required
def save_product(request):
data = request.POST
resp = {'status': 'failed'}
id = ''
if 'id' in data:
id = data['id']
if id.isnumeric() and int(id) > 0:
check = Products.objects.exclude(id=id).filter(code=data['code']).all()
else:
check = Products.objects.filter(code=data['code']).all()
if len(check) > 0:
resp['msg'] = "Product Code Already Exists in the database"
else:
category = Category.objects.filter(id=data['category_id']).first()
try:
if (data['id']).isnumeric() and int(data['id']) > 0:
save_product = Products.objects.filter(id=data['id']).update(code=data['code'], category_id=category,
name=data['name'],
description=data['description'],
price=float(data['price']),
status=data['status'])
else:
save_product = Products(code=data['code'], category_id=category, name=data['name'],
description=data['description'], price=float(data['price']),
status=data['status'])
save_product.save()
resp['status'] = 'success'
messages.success(request, 'Product Successfully saved.')
except:
resp['status'] = 'failed'
return HttpResponse(json.dumps(resp), content_type="application/json")
@login_required
def delete_product(request):
data = request.POST
resp = {'status': ''}
try:
Products.objects.filter(id=data['id']).delete()
resp['status'] = 'success'
messages.success(request, 'Product Successfully deleted.')
except:
resp['status'] = 'failed'
return HttpResponse(json.dumps(resp), content_type="application/json")
@login_required
def pos(request):
products = Products.objects.filter(status=1)
product_json = []
for product in products:
product_json.append({'id': product.id, 'name': product.name, 'price': float(product.price)})
context = {
'page_title': "Point of Sale",
'products': products,
'product_json': json.dumps(product_json)
}
# return HttpResponse('')
return render(request, 'posApp/pos.html', context)
@login_required
def checkout_modal(request):
grand_total = 0
if 'grand_total' in request.GET:
grand_total = request.GET['grand_total']
context = {
'grand_total': grand_total,
}
return render(request, 'posApp/checkout.html', context)
@login_required
def save_pos(request):
resp = {'status': 'failed', 'msg': ''}
data = request.POST
pref = datetime.now().year + datetime.now().year
i = 1
while True:
code = '{:0>5}'.format(i)
i += int(1)
check = Sales.objects.filter(code=str(pref) + str(code)).all()
if len(check) <= 0:
break
code = str(pref) + str(code)
try:
sales = Sales(code=code, sub_total=data['sub_total'], tax=data['tax'], tax_amount=data['tax_amount'],
grand_total=data['grand_total'], tendered_amount=data['tendered_amount'],
amount_change=data['amount_change']).save()
sale_id = Sales.objects.last().pk
i = 0
for prod in data.getlist('product_id[]'):
product_id = prod
sale = Sales.objects.filter(id=sale_id).first()
product = Products.objects.filter(id=product_id).first()
qty = data.getlist('qty[]')[i]
price = data.getlist('price[]')[i]
total = float(qty) * float(price)
print({'sale_id': sale, 'product_id': product, 'qty': qty, 'price': price, 'total': total})
salesItems(sale_id=sale, product_id=product, qty=qty, price=price, total=total).save()
i += int(1)
resp['status'] = 'success'
resp['sale_id'] = sale_id
messages.success(request, "Sale Record has been saved.")
except:
resp['msg'] = "An error occured"
print("Unexpected error:", sys.exc_info()[0])
return HttpResponse(json.dumps(resp), content_type="application/json")
@login_required
def salesList(request):
sales = Sales.objects.all()
sale_data = []
for sale in sales:
data = {}
for field in sale._meta.get_fields(include_parents=False):
if field.related_model is None:
data[field.name] = getattr(sale, field.name)
data['items'] = salesItems.objects.filter(sale_id=sale).all()
data['item_count'] = len(data['items'])
if 'tax_amount' in data:
data['tax_amount'] = format(float(data['tax_amount']), '.2f')
# print(data)
sale_data.append(data)
# print(sale_data)
context = {
'page_title': 'Sales Transactions',
'sale_data': sale_data,
}
# return HttpResponse('')
return render(request, 'posApp/sales.html', context)
@login_required
def receipt(request):
id = request.GET.get('id')
sales = Sales.objects.filter(id=id).first()
transaction = {}
for field in Sales._meta.get_fields():
if field.related_model is None:
transaction[field.name] = getattr(sales, field.name)
if 'tax_amount' in transaction:
transaction['tax_amount'] = format(float(transaction['tax_amount']))
ItemList = salesItems.objects.filter(sale_id=sales).all()
context = {
"transaction": transaction,
"salesItems": ItemList
}
return render(request, 'posApp/receipt.html', context)
# return HttpResponse('')
@login_required
def delete_sale(request):
resp = {'status': 'failed', 'msg': ''}
id = request.POST.get('id')
try:
delete = Sales.objects.filter(id=id).delete()
resp['status'] = 'success'
messages.success(request, 'Sale Record has been deleted.')
except:
resp['msg'] = "An error occured"
print("Unexpected error:", sys.exc_info()[0])
return HttpResponse(json.dumps(resp), content_type='application/json')
| [
"posApp.models.Sales",
"sys.exc_info",
"posApp.models.Sales.objects.last",
"django.shortcuts.render",
"posApp.models.Category",
"json.dumps",
"posApp.models.Products.objects.all",
"django.shortcuts.redirect",
"posApp.models.salesItems",
"posApp.models.Products.objects.exclude",
"django.contrib.auth.logout",
"posApp.models.Category.objects.all",
"django.contrib.auth.authenticate",
"posApp.models.Sales.objects.filter",
"posApp.models.Category.objects.filter",
"posApp.models.Products.objects.filter",
"posApp.models.salesItems.objects.filter",
"posApp.models.Sales._meta.get_fields",
"django.contrib.auth.login",
"datetime.datetime.now",
"django.contrib.messages.success",
"posApp.models.Sales.objects.all"
] | [((551, 566), 'django.contrib.auth.logout', 'logout', (['request'], {}), '(request)\n', (557, 566), False, 'from django.contrib.auth import authenticate, login, logout\n'), ((1253, 1268), 'django.contrib.auth.logout', 'logout', (['request'], {}), '(request)\n', (1259, 1268), False, 'from django.contrib.auth import authenticate, login, logout\n'), ((1281, 1294), 'django.shortcuts.redirect', 'redirect', (['"""/"""'], {}), "('/')\n", (1289, 1294), False, 'from django.shortcuts import redirect\n'), ((1419, 1433), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1431, 1433), False, 'from datetime import date, datetime\n'), ((2267, 2311), 'django.shortcuts.render', 'render', (['request', '"""posApp/home.html"""', 'context'], {}), "(request, 'posApp/home.html', context)\n", (2273, 2311), False, 'from django.shortcuts import redirect, render, resolve_url\n'), ((2405, 2450), 'django.shortcuts.render', 'render', (['request', '"""posApp/about.html"""', 'context'], {}), "(request, 'posApp/about.html', context)\n", (2411, 2450), False, 'from django.shortcuts import redirect, render, resolve_url\n'), ((2531, 2553), 'posApp.models.Category.objects.all', 'Category.objects.all', ([], {}), '()\n', (2551, 2553), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((2692, 2740), 'django.shortcuts.render', 'render', (['request', '"""posApp/category.html"""', 'context'], {}), "(request, 'posApp/category.html', context)\n", (2698, 2740), False, 'from django.shortcuts import redirect, render, resolve_url\n'), ((3120, 3175), 'django.shortcuts.render', 'render', (['request', '"""posApp/manage_category.html"""', 'context'], {}), "(request, 'posApp/manage_category.html', context)\n", (3126, 3175), False, 'from django.shortcuts import redirect, render, resolve_url\n'), ((4522, 4544), 'posApp.models.Products.objects.all', 'Products.objects.all', ([], {}), '()\n', (4542, 4544), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((4655, 4703), 'django.shortcuts.render', 'render', (['request', '"""posApp/products.html"""', 'context'], {}), "(request, 'posApp/products.html', context)\n", (4661, 4703), False, 'from django.shortcuts import redirect, render, resolve_url\n'), ((5172, 5226), 'django.shortcuts.render', 'render', (['request', '"""posApp/manage_product.html"""', 'context'], {}), "(request, 'posApp/manage_product.html', context)\n", (5178, 5226), False, 'from django.shortcuts import redirect, render, resolve_url\n'), ((5269, 5291), 'posApp.models.Category.objects.all', 'Category.objects.all', ([], {}), '()\n', (5289, 5291), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((5362, 5406), 'django.shortcuts.render', 'render', (['request', '"""posApp/test.html"""', 'context'], {}), "(request, 'posApp/test.html', context)\n", (5368, 5406), False, 'from django.shortcuts import redirect, render, resolve_url\n'), ((7585, 7618), 'posApp.models.Products.objects.filter', 'Products.objects.filter', ([], {'status': '(1)'}), '(status=1)\n', (7608, 7618), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((7962, 8005), 'django.shortcuts.render', 'render', (['request', '"""posApp/pos.html"""', 'context'], {}), "(request, 'posApp/pos.html', context)\n", (7968, 8005), False, 'from django.shortcuts import redirect, render, resolve_url\n'), ((8239, 8287), 'django.shortcuts.render', 'render', (['request', '"""posApp/checkout.html"""', 'context'], {}), "(request, 'posApp/checkout.html', context)\n", (8245, 8287), False, 'from django.shortcuts import redirect, render, resolve_url\n'), ((10003, 10022), 'posApp.models.Sales.objects.all', 'Sales.objects.all', ([], {}), '()\n', (10020, 10022), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((10714, 10759), 'django.shortcuts.render', 'render', (['request', '"""posApp/sales.html"""', 'context'], {}), "(request, 'posApp/sales.html', context)\n", (10720, 10759), False, 'from django.shortcuts import redirect, render, resolve_url\n'), ((10925, 10949), 'posApp.models.Sales._meta.get_fields', 'Sales._meta.get_fields', ([], {}), '()\n', (10947, 10949), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((11343, 11390), 'django.shortcuts.render', 'render', (['request', '"""posApp/receipt.html"""', 'context'], {}), "(request, 'posApp/receipt.html', context)\n", (11349, 11390), False, 'from django.shortcuts import redirect, render, resolve_url\n'), ((779, 829), 'django.contrib.auth.authenticate', 'authenticate', ([], {'username': 'username', 'password': 'password'}), '(username=username, password=password)\n', (791, 829), False, 'from django.contrib.auth import authenticate, login, logout\n'), ((1157, 1173), 'json.dumps', 'json.dumps', (['resp'], {}), '(resp)\n', (1167, 1173), False, 'import json, sys\n'), ((1573, 1595), 'posApp.models.Category.objects.all', 'Category.objects.all', ([], {}), '()\n', (1593, 1595), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((1617, 1639), 'posApp.models.Products.objects.all', 'Products.objects.all', ([], {}), '()\n', (1637, 1639), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((1664, 1782), 'posApp.models.Sales.objects.filter', 'Sales.objects.filter', ([], {'date_added__year': 'current_year', 'date_added__month': 'current_month', 'date_added__day': 'current_day'}), '(date_added__year=current_year, date_added__month=\n current_month, date_added__day=current_day)\n', (1684, 1782), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((3863, 3920), 'django.contrib.messages.success', 'messages.success', (['request', '"""Category Successfully saved."""'], {}), "(request, 'Category Successfully saved.')\n", (3879, 3920), False, 'from django.contrib import messages\n'), ((3994, 4010), 'json.dumps', 'json.dumps', (['resp'], {}), '(resp)\n', (4004, 4010), False, 'import json, sys\n'), ((4261, 4320), 'django.contrib.messages.success', 'messages.success', (['request', '"""Category Successfully deleted."""'], {}), "(request, 'Category Successfully deleted.')\n", (4277, 4320), False, 'from django.contrib import messages\n'), ((4394, 4410), 'json.dumps', 'json.dumps', (['resp'], {}), '(resp)\n', (4404, 4410), False, 'import json, sys\n'), ((7080, 7096), 'json.dumps', 'json.dumps', (['resp'], {}), '(resp)\n', (7090, 7096), False, 'import json, sys\n'), ((7346, 7404), 'django.contrib.messages.success', 'messages.success', (['request', '"""Product Successfully deleted."""'], {}), "(request, 'Product Successfully deleted.')\n", (7362, 7404), False, 'from django.contrib import messages\n'), ((7478, 7494), 'json.dumps', 'json.dumps', (['resp'], {}), '(resp)\n', (7488, 7494), False, 'import json, sys\n'), ((7887, 7911), 'json.dumps', 'json.dumps', (['product_json'], {}), '(product_json)\n', (7897, 7911), False, 'import json, sys\n'), ((9701, 9757), 'django.contrib.messages.success', 'messages.success', (['request', '"""Sale Record has been saved."""'], {}), "(request, 'Sale Record has been saved.')\n", (9717, 9757), False, 'from django.contrib import messages\n'), ((9893, 9909), 'json.dumps', 'json.dumps', (['resp'], {}), '(resp)\n', (9903, 9909), False, 'import json, sys\n'), ((11657, 11715), 'django.contrib.messages.success', 'messages.success', (['request', '"""Sale Record has been deleted."""'], {}), "(request, 'Sale Record has been deleted.')\n", (11673, 11715), False, 'from django.contrib import messages\n'), ((11851, 11867), 'json.dumps', 'json.dumps', (['resp'], {}), '(resp)\n', (11861, 11867), False, 'import json, sys\n'), ((1832, 1950), 'posApp.models.Sales.objects.filter', 'Sales.objects.filter', ([], {'date_added__year': 'current_year', 'date_added__month': 'current_month', 'date_added__day': 'current_day'}), '(date_added__year=current_year, date_added__month=\n current_month, date_added__day=current_day)\n', (1852, 1950), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((3700, 3788), 'posApp.models.Category', 'Category', ([], {'name': "data['name']", 'description': "data['description']", 'status': "data['status']"}), "(name=data['name'], description=data['description'], status=data[\n 'status'])\n", (3708, 3788), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((4792, 4825), 'posApp.models.Category.objects.filter', 'Category.objects.filter', ([], {'status': '(1)'}), '(status=1)\n', (4815, 4825), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((6942, 6998), 'django.contrib.messages.success', 'messages.success', (['request', '"""Product Successfully saved."""'], {}), "(request, 'Product Successfully saved.')\n", (6958, 6998), False, 'from django.contrib import messages\n'), ((8414, 8428), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8426, 8428), False, 'from datetime import date, datetime\n'), ((8436, 8450), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (8448, 8450), False, 'from datetime import date, datetime\n'), ((9000, 9020), 'posApp.models.Sales.objects.last', 'Sales.objects.last', ([], {}), '()\n', (9018, 9020), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((10849, 10876), 'posApp.models.Sales.objects.filter', 'Sales.objects.filter', ([], {'id': 'id'}), '(id=id)\n', (10869, 10876), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((11189, 11229), 'posApp.models.salesItems.objects.filter', 'salesItems.objects.filter', ([], {'sale_id': 'sales'}), '(sale_id=sales)\n', (11214, 11229), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((909, 929), 'django.contrib.auth.login', 'login', (['request', 'user'], {}), '(request, user)\n', (914, 929), False, 'from django.contrib.auth import authenticate, login, logout\n'), ((4168, 4206), 'posApp.models.Category.objects.filter', 'Category.objects.filter', ([], {'id': "data['id']"}), "(id=data['id'])\n", (4191, 4206), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((5723, 5765), 'posApp.models.Products.objects.filter', 'Products.objects.filter', ([], {'code': "data['code']"}), "(code=data['code'])\n", (5746, 5765), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((5896, 5943), 'posApp.models.Category.objects.filter', 'Category.objects.filter', ([], {'id': "data['category_id']"}), "(id=data['category_id'])\n", (5919, 5943), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((7253, 7291), 'posApp.models.Products.objects.filter', 'Products.objects.filter', ([], {'id': "data['id']"}), "(id=data['id'])\n", (7276, 7291), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((8723, 8937), 'posApp.models.Sales', 'Sales', ([], {'code': 'code', 'sub_total': "data['sub_total']", 'tax': "data['tax']", 'tax_amount': "data['tax_amount']", 'grand_total': "data['grand_total']", 'tendered_amount': "data['tendered_amount']", 'amount_change': "data['amount_change']"}), "(code=code, sub_total=data['sub_total'], tax=data['tax'], tax_amount=\n data['tax_amount'], grand_total=data['grand_total'], tendered_amount=\n data['tendered_amount'], amount_change=data['amount_change'])\n", (8728, 8937), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((10286, 10325), 'posApp.models.salesItems.objects.filter', 'salesItems.objects.filter', ([], {'sale_id': 'sale'}), '(sale_id=sale)\n', (10311, 10325), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((11575, 11602), 'posApp.models.Sales.objects.filter', 'Sales.objects.filter', ([], {'id': 'id'}), '(id=id)\n', (11595, 11602), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((3013, 3043), 'posApp.models.Category.objects.filter', 'Category.objects.filter', ([], {'id': 'id'}), '(id=id)\n', (3036, 3043), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((3385, 3423), 'posApp.models.Category.objects.filter', 'Category.objects.filter', ([], {'id': "data['id']"}), "(id=data['id'])\n", (3408, 3423), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((5032, 5062), 'posApp.models.Products.objects.filter', 'Products.objects.filter', ([], {'id': 'id'}), '(id=id)\n', (5055, 5062), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((9141, 9173), 'posApp.models.Sales.objects.filter', 'Sales.objects.filter', ([], {'id': 'sale_id'}), '(id=sale_id)\n', (9161, 9173), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((9205, 9243), 'posApp.models.Products.objects.filter', 'Products.objects.filter', ([], {'id': 'product_id'}), '(id=product_id)\n', (9228, 9243), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((9509, 9588), 'posApp.models.salesItems', 'salesItems', ([], {'sale_id': 'sale', 'product_id': 'product', 'qty': 'qty', 'price': 'price', 'total': 'total'}), '(sale_id=sale, product_id=product, qty=qty, price=price, total=total)\n', (9519, 9588), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((9849, 9863), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (9861, 9863), False, 'import json, sys\n'), ((11807, 11821), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (11819, 11821), False, 'import json, sys\n'), ((5631, 5662), 'posApp.models.Products.objects.exclude', 'Products.objects.exclude', ([], {'id': 'id'}), '(id=id)\n', (5655, 5662), False, 'from posApp.models import Category, Products, Sales, salesItems\n'), ((6064, 6102), 'posApp.models.Products.objects.filter', 'Products.objects.filter', ([], {'id': "data['id']"}), "(id=data['id'])\n", (6087, 6102), False, 'from posApp.models import Category, Products, Sales, salesItems\n')] |
import datetime
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import timezone
import requests
from data_refinery_common.models import DatasetAnnotation
class Command(BaseCommand):
help = "Post downloads summary to slack"
def add_arguments(self, parser):
parser.add_argument(
"--days",
type=int,
default=7, # default to a week
help=("Number of days in the past for which to build the stats"),
)
parser.add_argument(
"--channel",
type=str,
default="ccdl-general",
help=("Optional parameter to choose the channel where the message will be posted."),
)
def handle(self, *args, **options):
days = options["days"]
start_time = timezone.now() - datetime.timedelta(days=-days)
annotation_queryset = DatasetAnnotation.objects.filter(
created_at__gt=start_time
).prefetch_related("dataset")
annotations = [
annotation
for annotation in annotation_queryset
if annotation.data["start"] and should_display_email(annotation.dataset.email_address)
]
unique_users = list(set(annotation.dataset.email_address for annotation in annotations))
unique_ips = list(set(annotation.data["ip"] for annotation in annotations))
if unique_users:
fallback_text = "In the last {0} days, {1} users downloaded datasets from {2} locations.".format(
days, len(unique_users), len(unique_ips)
)
else:
fallback_text = "There were no downloads in the last {0} days.".format(days)
new_users = ""
returning_users = ""
for email in unique_users:
user_annotations = annotation_queryset.filter(dataset__email_address=email)
total_downloads = user_annotations.count()
unique_locations = list(set(annotation.data["ip"] for annotation in user_annotations))
locations = ", ".join(get_ip_location(ip) for ip in unique_locations)
is_new_user = DatasetAnnotation.objects.filter(
created_at__lt=start_time, dataset__email_address=email
)
text = "{0} | {1} downloads from {2}\n".format(email, total_downloads, locations)
if is_new_user:
new_users += text
else:
returning_users += text
blocks = [
{
"type": "section",
"text": {"type": "plain_text", "emoji": True, "text": fallback_text},
}
]
if new_users:
blocks.append(
{
"type": "section",
"text": {"type": "mrkdwn", "text": "*New users* \n" + new_users,},
}
)
if returning_users:
blocks.append(
{
"type": "section",
"text": {"type": "mrkdwn", "text": "*Returning users* \n" + returning_users,},
}
)
# Post to slack
requests.post(
settings.ENGAGEMENTBOT_WEBHOOK,
json={
"username": "EngagementBot",
"icon_emoji": ":halal:",
"channel": "#" + options["channel"],
"text": fallback_text,
"blocks": blocks,
},
headers={"Content-Type": "application/json"},
timeout=10,
)
def should_display_email(email: str) -> bool:
""" Returns true if the given email is not associated with the CCDL suers """
if not email:
return False
return not (
email.startswith("cansav09")
or email.startswith("arielsvn")
or email.startswith("jaclyn.n.taroni")
or email.startswith("kurt.wheeler")
or email.startswith("greenescientist")
or "@alexslemonade.org" not in email
or email.startswith("miserlou")
or email.startswith("d.prasad")
or email is ("<EMAIL>")
or email is ("<EMAIL>")
)
def get_ip_location(remote_ip):
try:
city = requests.get("https://ipapi.co/" + remote_ip + "/json/", timeout=10).json()["city"]
except Exception:
city = remote_ip
return city
| [
"requests.post",
"requests.get",
"django.utils.timezone.now",
"data_refinery_common.models.DatasetAnnotation.objects.filter",
"datetime.timedelta"
] | [((3181, 3437), 'requests.post', 'requests.post', (['settings.ENGAGEMENTBOT_WEBHOOK'], {'json': "{'username': 'EngagementBot', 'icon_emoji': ':halal:', 'channel': '#' +\n options['channel'], 'text': fallback_text, 'blocks': blocks}", 'headers': "{'Content-Type': 'application/json'}", 'timeout': '(10)'}), "(settings.ENGAGEMENTBOT_WEBHOOK, json={'username':\n 'EngagementBot', 'icon_emoji': ':halal:', 'channel': '#' + options[\n 'channel'], 'text': fallback_text, 'blocks': blocks}, headers={\n 'Content-Type': 'application/json'}, timeout=10)\n", (3194, 3437), False, 'import requests\n'), ((842, 856), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (854, 856), False, 'from django.utils import timezone\n'), ((859, 889), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(-days)'}), '(days=-days)\n', (877, 889), False, 'import datetime\n'), ((2167, 2260), 'data_refinery_common.models.DatasetAnnotation.objects.filter', 'DatasetAnnotation.objects.filter', ([], {'created_at__lt': 'start_time', 'dataset__email_address': 'email'}), '(created_at__lt=start_time,\n dataset__email_address=email)\n', (2199, 2260), False, 'from data_refinery_common.models import DatasetAnnotation\n'), ((921, 980), 'data_refinery_common.models.DatasetAnnotation.objects.filter', 'DatasetAnnotation.objects.filter', ([], {'created_at__gt': 'start_time'}), '(created_at__gt=start_time)\n', (953, 980), False, 'from data_refinery_common.models import DatasetAnnotation\n'), ((4233, 4301), 'requests.get', 'requests.get', (["('https://ipapi.co/' + remote_ip + '/json/')"], {'timeout': '(10)'}), "('https://ipapi.co/' + remote_ip + '/json/', timeout=10)\n", (4245, 4301), False, 'import requests\n')] |
import pandas as pd
from pyscf import gto, scf, mcscf
from pyqmc.mc import initial_guess
from pyqmc.multiplywf import MultiplyWF
from pyqmc.accumulators import EnergyAccumulator
from pyqmc.slater import Slater
from pyqmc.wftools import generate_jastrow
import numpy as np
import time
def test_ecp_sj(C2_ccecp_rhf, nconf=10000):
"""test whether the cutoff saves us time without changing the energy too much.
Because it's a stochastic evaluation, random choices can make a big difference, so we only require 10% agreement between these two."""
mol, mf = C2_ccecp_rhf
THRESHOLDS = [1e15, 10]
np.random.seed(1234)
coords = initial_guess(mol, nconf)
wf = MultiplyWF(Slater(mol, mf), generate_jastrow(mol)[0])
wf.recompute(coords)
times = []
energies = []
for threshold in THRESHOLDS:
np.random.seed(1234)
eacc = EnergyAccumulator(mol, threshold)
start = time.time()
energy = eacc(coords, wf)
end = time.time()
times.append(end - start)
energies.append(np.mean(energy["total"]))
# print(times, energies)
assert times[1] < times[0]
assert (energies[1] - energies[0]) / energies[0] < 0.1
if __name__ == "__main__":
mol = gto.M(
atom="""C 0 0 0
C 1 0 0 """,
ecp="ccecp",
basis="ccecpccpvdz",
)
mf = scf.RHF(mol).run()
test_ecp_sj((mol, mf))
| [
"numpy.mean",
"pyscf.scf.RHF",
"pyscf.gto.M",
"pyqmc.wftools.generate_jastrow",
"numpy.random.seed",
"pyqmc.accumulators.EnergyAccumulator",
"pyqmc.mc.initial_guess",
"pyqmc.slater.Slater",
"time.time"
] | [((612, 632), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (626, 632), True, 'import numpy as np\n'), ((646, 671), 'pyqmc.mc.initial_guess', 'initial_guess', (['mol', 'nconf'], {}), '(mol, nconf)\n', (659, 671), False, 'from pyqmc.mc import initial_guess\n'), ((1234, 1325), 'pyscf.gto.M', 'gto.M', ([], {'atom': '"""C 0 0 0 \n C 1 0 0 """', 'ecp': '"""ccecp"""', 'basis': '"""ccecpccpvdz"""'}), '(atom="""C 0 0 0 \n C 1 0 0 """, ecp=\'ccecp\', basis=\n \'ccecpccpvdz\')\n', (1239, 1325), False, 'from pyscf import gto, scf, mcscf\n'), ((692, 707), 'pyqmc.slater.Slater', 'Slater', (['mol', 'mf'], {}), '(mol, mf)\n', (698, 707), False, 'from pyqmc.slater import Slater\n'), ((834, 854), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (848, 854), True, 'import numpy as np\n'), ((870, 903), 'pyqmc.accumulators.EnergyAccumulator', 'EnergyAccumulator', (['mol', 'threshold'], {}), '(mol, threshold)\n', (887, 903), False, 'from pyqmc.accumulators import EnergyAccumulator\n'), ((920, 931), 'time.time', 'time.time', ([], {}), '()\n', (929, 931), False, 'import time\n'), ((980, 991), 'time.time', 'time.time', ([], {}), '()\n', (989, 991), False, 'import time\n'), ((709, 730), 'pyqmc.wftools.generate_jastrow', 'generate_jastrow', (['mol'], {}), '(mol)\n', (725, 730), False, 'from pyqmc.wftools import generate_jastrow\n'), ((1050, 1074), 'numpy.mean', 'np.mean', (["energy['total']"], {}), "(energy['total'])\n", (1057, 1074), True, 'import numpy as np\n'), ((1361, 1373), 'pyscf.scf.RHF', 'scf.RHF', (['mol'], {}), '(mol)\n', (1368, 1373), False, 'from pyscf import gto, scf, mcscf\n')] |
# -*- coding: utf-8 -*-
"""HIVE-COTE v1 test code."""
import numpy as np
from numpy import testing
from sklearn.ensemble import RandomForestClassifier
from sktime.classification.hybrid import HIVECOTEV1
from sktime.datasets import load_unit_test
def test_hivecote_v1_on_unit_test_data():
"""Test of HIVECOTEV1 on unit test data."""
# load unit test data
X_train, y_train = load_unit_test(split="train")
X_test, y_test = load_unit_test(split="test")
indices = np.random.RandomState(0).choice(len(y_train), 10, replace=False)
# train HIVE-COTE v1
hc1 = HIVECOTEV1(
random_state=0,
stc_params={
"estimator": RandomForestClassifier(n_estimators=3),
"n_shapelet_samples": 500,
"max_shapelets": 20,
},
tsf_params={"n_estimators": 10},
rise_params={"n_estimators": 10},
cboss_params={"n_parameter_samples": 25, "max_ensemble_size": 5},
)
hc1.fit(X_train, y_train)
# assert probabilities are the same
probas = hc1.predict_proba(X_test.iloc[indices])
testing.assert_array_almost_equal(probas, hivecote_v1_unit_test_probas, decimal=2)
hivecote_v1_unit_test_probas = np.array(
[
[
0.08232436967368748,
0.9176756303263125,
],
[
0.5161621848368437,
0.48383781516315627,
],
[
0.0,
1.0,
],
[
0.925,
0.075,
],
[
0.8261138340619067,
0.17388616593809328,
],
[
0.9676756303263125,
0.03232436967368746,
],
[
0.7869430829690466,
0.2130569170309533,
],
[
0.0,
1.0,
],
[
0.7661621848368437,
0.23383781516315624,
],
[
0.95,
0.05000000000000001,
],
]
)
| [
"sktime.datasets.load_unit_test",
"numpy.testing.assert_array_almost_equal",
"sklearn.ensemble.RandomForestClassifier",
"numpy.array",
"numpy.random.RandomState"
] | [((1195, 1551), 'numpy.array', 'np.array', (['[[0.08232436967368748, 0.9176756303263125], [0.5161621848368437, \n 0.48383781516315627], [0.0, 1.0], [0.925, 0.075], [0.8261138340619067, \n 0.17388616593809328], [0.9676756303263125, 0.03232436967368746], [\n 0.7869430829690466, 0.2130569170309533], [0.0, 1.0], [\n 0.7661621848368437, 0.23383781516315624], [0.95, 0.05000000000000001]]'], {}), '([[0.08232436967368748, 0.9176756303263125], [0.5161621848368437, \n 0.48383781516315627], [0.0, 1.0], [0.925, 0.075], [0.8261138340619067, \n 0.17388616593809328], [0.9676756303263125, 0.03232436967368746], [\n 0.7869430829690466, 0.2130569170309533], [0.0, 1.0], [\n 0.7661621848368437, 0.23383781516315624], [0.95, 0.05000000000000001]])\n', (1203, 1551), True, 'import numpy as np\n'), ((388, 417), 'sktime.datasets.load_unit_test', 'load_unit_test', ([], {'split': '"""train"""'}), "(split='train')\n", (402, 417), False, 'from sktime.datasets import load_unit_test\n'), ((439, 467), 'sktime.datasets.load_unit_test', 'load_unit_test', ([], {'split': '"""test"""'}), "(split='test')\n", (453, 467), False, 'from sktime.datasets import load_unit_test\n'), ((1079, 1165), 'numpy.testing.assert_array_almost_equal', 'testing.assert_array_almost_equal', (['probas', 'hivecote_v1_unit_test_probas'], {'decimal': '(2)'}), '(probas, hivecote_v1_unit_test_probas,\n decimal=2)\n', (1112, 1165), False, 'from numpy import testing\n'), ((482, 506), 'numpy.random.RandomState', 'np.random.RandomState', (['(0)'], {}), '(0)\n', (503, 506), True, 'import numpy as np\n'), ((665, 703), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(3)'}), '(n_estimators=3)\n', (687, 703), False, 'from sklearn.ensemble import RandomForestClassifier\n')] |
#! /usr/bin/env python3
"""
Bishbot - https://github.com/ldgregory/bishbot
<NAME> <<EMAIL>>
bishbot.py v0.8
Tested to Python v3.7.3
Description:
Discord Bot
Changelog:
20200728 - Cleanup and testing for commiting to git
20200522 - Significant rewrite to use COGS
20200521 - Better error handling
20200520 - Case insensitive bot commands, tips, DM nickname change
20200519 - Total code refactor
Moved to external text files to clean up code
Got rid of aiohttp library for images
Beefed up $server information
20200518 - Added 8-Ball, Insults, PEP-8 compliance, Bot status message
20200517 - Adding various commands
20200516 - Initial code
Todo:
Move load, unload and reload commands to cogs/utility.py
Dependencies:
python3 -m pip install -U discord.py
python3 -m pip install -U python-dotenv
Copyright 2020 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import discord
from discord.ext import commands, tasks
from dotenv import load_dotenv
from itertools import cycle
class TextColor:
BLOGR = str('\033[7;32m')
BLUE = str('\033[1;34m')
GREEN = str('\033[1;32m')
PURPLE = str('\033[1;35m')
RED = str('\033[1;31m')
RESET = str('\033[0m')
YELLOW = str('\033[1;33m')
def responses(file):
"""
Loads an array of responses from a txt file.
This function does not strip \n intentionally.
Arguments:
file {text} -- possible responses, one per line
Returns:
array -- array of possible responses
"""
with open(file, 'r') as fh:
lines = []
for line in fh:
lines.append(line)
return lines
def read_file(file):
"""
Loads a text file to output informational text.
This function does not strip \n intentionally.
Arguments:
file {text} -- complete text of file
Returns:
str -- complete text of file
"""
with open(file, 'r') as fh:
text = fh.read()
return text
# Load responses
boss_ids = responses('txt/bb_boss_ids.txt')
change_nickname_text = read_file('txt/bb_change_nickname.txt')
change_bot_status_text = responses('txt/bb_bot_status.txt')
def main():
# Load settings from .env file
load_dotenv()
BOT_PREFIX = os.getenv('DISCORD_BOT_PREFIX')
GUILD = os.getenv('DISCORD_GUILD')
TOKEN = os.getenv('DISCORD_TOKEN')
ERROR_LOG = os.getenv('DISCORD_ERROR_LOG')
# Instantiate bot and set prefix to listen for
intents = discord.Intents.default()
intents.members = True
bot = commands.Bot(command_prefix=BOT_PREFIX, intents=intents, case_insensitive=True)
bot_status = cycle(change_bot_status_text)
# Cogs management
@bot.command(name='load',
description='Load a cog',
help='Load a cog',
ignore_extra=True,
hidden=True,
enabled=True)
async def load(ctx, extension):
bot.load_extension(f"cogs.{extension.lower()}")
await ctx.channel.send(f"Cog {extension.lower()} loaded.")
@bot.command(name='reload',
description='Reload a cog',
help='Reload a cog',
ignore_extra=True,
hidden=True,
enabled=True)
async def _reload(ctx, extension):
bot.unload_extension(f"cogs.{extension.lower()}")
bot.load_extension(f"cogs.{extension.lower()}")
await ctx.channel.send(f"Cog {extension.lower()} reloaded.")
@bot.command(name='unload',
description='Unload a cog',
help='Unload a cog',
ignore_extra=True,
hidden=True,
enabled=True)
async def unload(ctx, extension):
bot.unload_extension(f"cogs.{extension.lower()}")
await ctx.channel.send(f"Cog {extension.lower()} unloaded.")
# Do initial load of cogs on bot start in cogs folder
# Either rename unwanted cogs to different extension or move them out of
# the cogs folder.
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
bot.load_extension(f"cogs.{filename[:-3]}")
# Bot Events ------------------------------------------------------------------
# Send a DM to new members about changing their nickname
@bot.event
async def on_member_join(member):
await member.create_dm()
await member.dm_channel.send(change_nickname_text)
# Send a message to general that someone important showed up
@bot.event
async def on_member_update(before, after):
if str(after.status) == "online" and str(after.id) in boss_ids:
channel = discord.utils.get(after.guild.channels, name='general')
await channel.send(f"Quick!!! Look busy! One of the big bosses are online! ({after.name})")
# Error handling logged to ERROR_LOG file
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.CommandNotFound):
await ctx.channel.send(f"Beep, boop! Does not compute. Maybe try {BOT_PREFIX}help.")
elif isinstance(error, commands.DisabledCommand):
await ctx.channel.send(f"Beep, boop! Sorry, that command is currently disabled.")
elif isinstance(error, commands.MissingRequiredArgument):
await ctx.channel.send(f"Beep, boop! Err, something's missing here. Try {BOT_PREFIX}help.")
elif isinstance(error, commands.TooManyArguments):
await ctx.channel.send(f"Beep, boop! Buffer overflow! Too many arguments.")
elif isinstance(error, commands.CommandOnCooldown):
await ctx.channel.send(f"Beep, boop! Command is smoking hot! Give it a few minutes to cool down.")
elif isinstance(error, commands.MissingPermissions):
await ctx.channel.send(f"Beep, boop! Denied! You don't have access!.")
with open(ERROR_LOG, 'a') as fh:
fh.write(f"Unhandled message: {error}\n")
# Changes the bot status to random statuses pulled from txt/bb_bot_status.txt
@tasks.loop(seconds=60)
async def change_bot_status():
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name=next(bot_status)))
# Information about our bot and its status when run
@bot.event
async def on_ready():
change_bot_status.start()
guild = discord.utils.get(bot.guilds, name=GUILD)
print(f"{TextColor.BLUE}{bot.user} (id: {bot.user.id}) is connected to {guild.name} (id: {guild.id}){TextColor.RESET}")
print(f"{TextColor.GREEN}{bot.user.name} is ready!{TextColor.RESET}")
bot.run(TOKEN)
if __name__ == '__main__':
main()
| [
"itertools.cycle",
"os.listdir",
"os.getenv",
"discord.ext.commands.Bot",
"discord.utils.get",
"dotenv.load_dotenv",
"discord.ext.tasks.loop",
"discord.Intents.default"
] | [((2708, 2721), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (2719, 2721), False, 'from dotenv import load_dotenv\n'), ((2739, 2770), 'os.getenv', 'os.getenv', (['"""DISCORD_BOT_PREFIX"""'], {}), "('DISCORD_BOT_PREFIX')\n", (2748, 2770), False, 'import os\n'), ((2783, 2809), 'os.getenv', 'os.getenv', (['"""DISCORD_GUILD"""'], {}), "('DISCORD_GUILD')\n", (2792, 2809), False, 'import os\n'), ((2822, 2848), 'os.getenv', 'os.getenv', (['"""DISCORD_TOKEN"""'], {}), "('DISCORD_TOKEN')\n", (2831, 2848), False, 'import os\n'), ((2865, 2895), 'os.getenv', 'os.getenv', (['"""DISCORD_ERROR_LOG"""'], {}), "('DISCORD_ERROR_LOG')\n", (2874, 2895), False, 'import os\n'), ((2963, 2988), 'discord.Intents.default', 'discord.Intents.default', ([], {}), '()\n', (2986, 2988), False, 'import discord\n'), ((3026, 3105), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': 'BOT_PREFIX', 'intents': 'intents', 'case_insensitive': '(True)'}), '(command_prefix=BOT_PREFIX, intents=intents, case_insensitive=True)\n', (3038, 3105), False, 'from discord.ext import commands, tasks\n'), ((3123, 3152), 'itertools.cycle', 'cycle', (['change_bot_status_text'], {}), '(change_bot_status_text)\n', (3128, 3152), False, 'from itertools import cycle\n'), ((4518, 4538), 'os.listdir', 'os.listdir', (['"""./cogs"""'], {}), "('./cogs')\n", (4528, 4538), False, 'import os\n'), ((6524, 6546), 'discord.ext.tasks.loop', 'tasks.loop', ([], {'seconds': '(60)'}), '(seconds=60)\n', (6534, 6546), False, 'from discord.ext import commands, tasks\n'), ((6847, 6888), 'discord.utils.get', 'discord.utils.get', (['bot.guilds'], {'name': 'GUILD'}), '(bot.guilds, name=GUILD)\n', (6864, 6888), False, 'import discord\n'), ((5145, 5200), 'discord.utils.get', 'discord.utils.get', (['after.guild.channels'], {'name': '"""general"""'}), "(after.guild.channels, name='general')\n", (5162, 5200), False, 'import discord\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri May 22 15:09:22 2015
@author: kp14
"""
import datetime
import logging
import re
from lxml import etree
from lxml import objectify
logging.basicConfig(level=logging.WARN)
NS = re.compile("\{http\:\/\/uniprot\.org\/unirule-[0-9]\.[0-9]\}")
# NS = '{http://uniprot.org/unirule-1.0}'
NS_uniprot = "{http://uniprot.org/uniprot}"
class UniRule:
"""Class representing a rule as used by the UniRule system."""
def __init__(self):
self.meta = {}
self.main = BasicRule()
self.cases = []
self.sam_features = []
@property
def status(self):
"""Return status of a UniRule - apply, test, disused."""
return self.meta["status"]
@property
def id(self):
"""Return UniRule ID, e.g. UR000000001."""
return self.meta["id"]
@property
def creator(self):
"""Return creator of UniRule."""
return self.meta["creator"]
@property
def date_created(self):
"""Date when rule was created.
returns:
Datetime object
"""
return datetime.datetime.strptime(self.meta["created"][:10], "%Y-%m-%d")
@property
def date_last_modified(self):
"""Date when rule was last modified.
returns:
Datetime object
"""
return datetime.datetime.strptime(self.meta["modified"][:10], "%Y-%m-%d")
@property
def id_pipeline(self):
"""Returns the ID by pipeline.
Next to the UniRule ID, almost evey rule has a second identifier
based on the the pipeline it is coming from, e.g. RU362000 for
RuleBase.
"""
return self.meta["oldRuleNum"]
def created_after(self, date):
try:
dto = datetime.datetime.strptime(date, "%Y-%m-%d")
return self.date_created > dto
except ValueError:
print("Date has to be given in format: YYYY-MM-DD")
def created_before(self, date):
try:
dto = datetime.datetime.strptime(date, "%Y-%m-%d")
return self.date_created < dto
except ValueError:
print("Date has to be given in format: YYYY-MM-DD")
def has_cases(self):
"""Return whether rule has cases."""
return bool(self.cases)
def has_ec(self):
"""Return whether rule has EC numbers either in main or in cases."""
has_ec = False
for annot in self.iter_annotations():
has_ec = self._is_enzyme_number(annot)
if has_ec:
break
return has_ec
def is_rulebase(self):
"""Return whether rule comes from the Rulebase pipeline."""
return self._is_from_pipeline("rulebase")
def is_hamap(self):
"""Return whether rule comes from the HAMAP pipeline."""
return self._is_from_pipeline("hamap")
def is_pir(self):
"""Return whether rule comes from PIR`s pipeline."""
return self._is_from_pipeline("pir")
def get_ec(self):
"""Return EC numbers in rule.
Returns:
list of strings
"""
ec_list = []
for ann in self.iter_annotations():
if self._is_enzyme_number(ann):
ec_list.append(ann.value)
return ec_list
def get_taxonomic_space(self):
"""Return a set of taxonomic contstraints used in the main rule."""
tx = set()
for cond in self.iter_main_conditions():
if cond.type == "taxon":
tx.add(cond)
must_have = [c.value for c in tx if not c.negative]
if must_have:
must_have_txt = ", ".join(must_have)
else:
must_have_txt = "-"
must_not_have = [c.value for c in tx if c.negative]
if must_not_have:
must_not_have_txt = ", ".join(must_not_have)
else:
must_not_have_txt = "-"
return "Must be (OR): {0}\nMust not be: {1}".format(
must_have_txt, must_not_have_txt
)
def iter_conditions(self):
"""Iterate over all conditions in main and cases."""
yield from self.iter_main_conditions()
yield from self.iter_case_conditions()
def iter_annotations(self):
"""Iterate over all annotations in main and cases."""
yield from self.iter_main_annotations()
yield from self.iter_case_annotations()
def iter_main_conditions(self):
"""Iterate over conditions in main only."""
return self.main.iter_conditions()
def iter_main_annotations(self):
"""Iterate over annotations in main only."""
return self.main.iter_annotations()
def iter_case_conditions(self):
"""Iterate over conditions in cases only."""
for c in self.cases:
yield from c.iter_conditions()
def iter_case_annotations(self):
"""Iterate over annotations in cases only."""
for c in self.cases:
yield from c.annotations
def _is_enzyme_number(self, annot):
"""Helper method to determine whether an EC number is among the annotations."""
return annot.subtype == "ecNumber"
def _is_from_pipeline(self, ppln):
"""Helper method to determine pipeline of origin."""
mapper = {"hamap": "MF", "pir": "PIR", "rulebase": "RU"}
ppln_low = ppln.lower()
try:
return self.meta["oldRuleNum"].startswith(mapper[ppln_low])
except KeyError:
print("Invalid pipeline. Choose hamap, pir or rulebase.")
def __str__(self):
template = (
"Rule ID: {0}\n"
"Main:\n"
"Number of condition sets: {1}\n"
"Number of annotations {2}\n"
"Number of cases: {3}\n"
)
string = template.format(
self.meta["id"],
len(self.main.conditions),
len(self.main.annotations),
len(self.cases),
)
return string
class BasicRule:
"""Basic components of a rule: a set of condition(s) (sets) and annotations."""
def __init__(self):
self.conditions = []
self.annotations = []
def iter_annotations(self):
"""Iterate over annotations."""
yield from self.annotations
def iter_conditions(self):
"""Iterate over conditions."""
for condition_set in self.conditions:
yield from condition_set
def __str__(self):
template = "Number of condition sets: {0}\n" "Number of annotations: {1}\n"
string = template.format(len(self.conditions), len(self.annotations))
return string
class SamFeature(BasicRule):
"""Represents a SAM feature.
SAM features are predictors for transmembrane domains, signal peptides
and coiled-coil domains.
"""
def __init__(self):
super(SamFeature, self).__init__()
self.trigger = None
self.min_hits = None
self.max_hits = None
def __str__(self):
template = (
"Number of conditions: {0}\n"
"Number of annotations: {1}\n"
"Trigger: {2}\n"
"Range: {3}\n"
)
string = template.format(
len(self.conditions),
len(self.annotations),
self.trigger,
"-".join([self.min_hits, self.max_hits]),
)
return string
class Condition:
"""Represents conditions as used in UniRule.
Conditions are prerequisites for a rule to be applied to a given
UniProtKB entry. Currently, conditions used are InterPro signatures,
taxonomic nodes, proteome properties, sequence flags and length.
"""
def __init__(self):
self.type = None
self.negative = False
self.value = None
def __str__(self):
return "{0}, {1}, {2}".format(self.type, self.value, self.negative)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.type, self.value, self.negative))
class Annotation:
"""Annotations as used in UniRule.
Annotations are applied once conditions are met by a UniProtKB entry.
Many types of annotations are used. In accordance with the UniProtKB
flat file format, the annotations have a class equivalent to their
line type, an optional type like FUNCTION and subtype like FullName.
"""
def __init__(self):
self.class_ = None
self.type = None
self.subtype = None
self.value = None
def __str__(self):
return self.__dict__.__str__()
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash((self.class_, self.type, self.subtype, self.value))
def parse_rules(filename):
"""Extract rule information from UniRules in a file."""
with open(filename, "rb") as data:
logging.info("Starting work on file: {}".format(filename))
xml = data.read()
root = objectify.fromstring(xml)
objectify.deannotate(root, cleanup_namespaces=True)
for rule in root.unirule:
rule_id = rule.attrib["id"]
logging.info("Parsing rule: {}".format(rule_id))
uni = UniRule()
logging.info("Extracting meta from : {}".format(rule_id))
extract_meta(rule, uni)
logging.info("Extracting conditions from : {}".format(rule_id))
extract_main_conditions(rule, uni)
logging.info("Extracting annotations from : {}".format(rule_id))
extract_main_annotations(rule, uni)
try:
for case in rule.cases.case:
logging.info("Found a case.")
basic_rule = BasicRule()
uni.cases.append(basic_rule)
extract_case_conditions(case, uni)
extract_case_annotations(case, uni)
except AttributeError:
logging.info("Rule appears to have no cases: {}".format(rule_id))
try:
for sam_ft in rule.samFeatureSet:
sam = SamFeature()
for trig in rule.samFeatureSet.samTrigger.getchildren():
sam.trigger = NS.sub("", trig.tag)
# sam.trigger = trig.tag.replace(NS, '')
sam.min_hits = trig.expectedHits.attrib["start"]
sam.max_hits = trig.expectedHits.attrib["end"]
try:
for c_set in rule.samFeatureSet.conditionSet:
condition_list = _extract_conditions(c_set)
sam.conditions.extend(condition_list)
except AttributeError:
logging.info("SamFeature appears to have no extra conditions.")
try:
for a in rule.samFeatureSet.annotations.annotation:
anno_list = _extract_annotations(a)
sam.annotations.extend(anno_list)
except AttributeError:
logging.info("SamFeature appears to have no extra annotations.")
uni.sam_features.append(sam)
except AttributeError:
logging.info("Ruel appears to have no SAM features.")
yield uni
def extract_meta(rule, uni):
uni.meta.update(rule.attrib)
for info in rule.information.getchildren():
try:
key = NS.sub("", info.tag)
# key = info.tag.replace(NS, '')
val = info.text
uni.meta[key] = val
except:
print("Error in: {}".format(info))
def extract_main_conditions(rule, uni):
for c_set in rule.main.conditionSets.conditionSet:
condition_list = _extract_conditions(c_set)
uni.main.conditions.append(condition_list)
def extract_case_conditions(case, uni):
for c_set in case.conditionSets.conditionSet:
condition_list = _extract_conditions(c_set)
logging.info("Extracted condition list from case: {}".format(condition_list))
uni.cases[-1].conditions.append(condition_list)
def _extract_conditions(rule_element):
c_list = []
for child in rule_element.getchildren():
cond = Condition()
cond.type = child.attrib["type"]
try:
cond.negative = child.attrib["negative"] == "true"
except KeyError:
cond.negative = False
try:
cond.value = child.value.text
if child.value.attrib:
for key, val in child.value.attrib.items():
setattr(cond, key, val)
except AttributeError:
pass
try:
cond.range = child.range
start = child.range.attrib["start"]
end = child.range.attrib["end"]
cond.start = start
cond.end = end
string_ = ": start:{0} end:{1}".format(start, end)
if cond.value:
cond.value += string_
else:
cond.value = string_
except AttributeError:
pass
c_list.append(cond)
return c_list
def extract_main_annotations(rule, uni):
try:
for annot in rule.main.annotations.annotation:
uni.main.annotations.extend(_extract_annotations(annot))
logging.info(
"Extracting annotations from main in rule: {}".format(rule.attrib["id"])
)
except AttributeError:
logging.warn(
"Rule appears to have no annotations in main: {}".format(rule.attrib["id"])
)
def extract_case_annotations(case, uni):
annotation_list = []
try:
for annot in case.annotations.annotation:
annotation_list.extend(_extract_annotations(annot))
except AttributeError:
logging.warning(
"Case appears to have no annotations: {}".format(rule.attrib["id"])
)
uni.cases[-1].annotations.extend(annotation_list)
def _extract_annotations(annotation_element):
annotation_list = []
class_element = annotation_element.getchildren()[0] # Only one toplevel element
class_ = NS.sub("", class_element.tag)
# class_ = class_element.tag.replace(NS, '')
logging.info("Parsing class: {}".format(class_))
if class_ == "comment":
attribs = class_element.attrib
if attribs["type"] not in ["subcellular location", "cofactor"]:
attribs["value"] = class_element.getchildren()[0].text
attribs["class_"] = class_
annotation_list.append(_create_annotation(attribs))
elif attribs["type"] == "subcellular location":
for location in class_element.getchildren():
for loc in location.getchildren():
attribs[loc.tag.replace(NS_uniprot, "")] = loc.text
sub_cell_components = []
for k in ["location", "topology", "orientation"]:
try:
sub_cell_components.append(attribs[k])
except KeyError:
pass
attribs["value"] = " / ".join(sub_cell_components)
attribs["class_"] = class_
annotation_list.append(_create_annotation(attribs))
elif attribs["type"] == "cofactor":
for cfac in class_element.getchildren():
if "cofactor" in cfac.tag:
for data in cfac.getchildren():
if "name" in data.tag:
attribs["value"] = data.text
elif "dbReference" in data.tag:
attribs["id"] = data.attrib["id"]
attribs["class_"] = class_
annotation_list.append(_create_annotation(attribs))
elif "text" in cfac.tag:
attribs.clear()
attribs["class_"] = class_
attribs["type"] = "cofactor"
attribs["note"] = cfac.text
annotation_list.append(_create_annotation(attribs))
elif class_ == "keyword":
attribs = class_element.attrib
attribs["value"] = class_element.text
attribs["class_"] = class_
annotation_list.append(_create_annotation(attribs))
elif class_ == "gene":
for gene_ele in class_element.getchildren():
attribs = gene_ele.attrib
attribs["value"] = gene_ele.text
attribs["class_"] = class_
annotation_list.append(_create_annotation(attribs))
elif class_ == "protein":
for typ in class_element.getchildren():
typ_ = NS.sub("", typ.tag)
# typ_ = typ.tag.replace(NS, '')
if typ_ == "alternativeName":
attribs = {}
attribs["type"] = typ_
attribs["subtype"] = "fullName"
attribs["class_"] = class_
attribs["value"] = typ.fullName.text
annotation_list.append(_create_annotation(attribs))
elif typ_ == "flag":
attribs = {}
attribs["type"] = typ_
attribs["value"] = typ.value.text
attribs["class_"] = class_
annotation_list.append(_create_annotation(attribs))
elif typ_ == "recommendedName":
for subtyp in typ.getchildren():
attribs = {}
attribs["type"] = typ_
attribs["subtype"] = NS.sub("", subtyp.tag)
# attribs['subtype'] = subtyp.tag.replace(NS, '')
attribs["value"] = subtyp.text
attribs["class_"] = class_
annotation_list.append(_create_annotation(attribs))
return annotation_list
def _create_annotation(adict):
annotation = Annotation()
annotation.__dict__.update(**adict)
return annotation
| [
"logging.basicConfig",
"lxml.objectify.fromstring",
"re.compile",
"datetime.datetime.strptime",
"lxml.objectify.deannotate",
"logging.info"
] | [((176, 215), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.WARN'}), '(level=logging.WARN)\n', (195, 215), False, 'import logging\n'), ((222, 292), 're.compile', 're.compile', (['"""\\\\{http\\\\:\\\\/\\\\/uniprot\\\\.org\\\\/unirule-[0-9]\\\\.[0-9]\\\\}"""'], {}), "('\\\\{http\\\\:\\\\/\\\\/uniprot\\\\.org\\\\/unirule-[0-9]\\\\.[0-9]\\\\}')\n", (232, 292), False, 'import re\n'), ((1108, 1173), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["self.meta['created'][:10]", '"""%Y-%m-%d"""'], {}), "(self.meta['created'][:10], '%Y-%m-%d')\n", (1134, 1173), False, 'import datetime\n'), ((1337, 1403), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (["self.meta['modified'][:10]", '"""%Y-%m-%d"""'], {}), "(self.meta['modified'][:10], '%Y-%m-%d')\n", (1363, 1403), False, 'import datetime\n'), ((9195, 9220), 'lxml.objectify.fromstring', 'objectify.fromstring', (['xml'], {}), '(xml)\n', (9215, 9220), False, 'from lxml import objectify\n'), ((9229, 9280), 'lxml.objectify.deannotate', 'objectify.deannotate', (['root'], {'cleanup_namespaces': '(True)'}), '(root, cleanup_namespaces=True)\n', (9249, 9280), False, 'from lxml import objectify\n'), ((1766, 1810), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date', '"""%Y-%m-%d"""'], {}), "(date, '%Y-%m-%d')\n", (1792, 1810), False, 'import datetime\n'), ((2013, 2057), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['date', '"""%Y-%m-%d"""'], {}), "(date, '%Y-%m-%d')\n", (2039, 2057), False, 'import datetime\n'), ((9880, 9909), 'logging.info', 'logging.info', (['"""Found a case."""'], {}), "('Found a case.')\n", (9892, 9909), False, 'import logging\n'), ((11529, 11582), 'logging.info', 'logging.info', (['"""Ruel appears to have no SAM features."""'], {}), "('Ruel appears to have no SAM features.')\n", (11541, 11582), False, 'import logging\n'), ((11006, 11069), 'logging.info', 'logging.info', (['"""SamFeature appears to have no extra conditions."""'], {}), "('SamFeature appears to have no extra conditions.')\n", (11018, 11069), False, 'import logging\n'), ((11364, 11428), 'logging.info', 'logging.info', (['"""SamFeature appears to have no extra annotations."""'], {}), "('SamFeature appears to have no extra annotations.')\n", (11376, 11428), False, 'import logging\n')] |
from fv3fit import DenseHyperparameters
import xarray as xr
import numpy as np
import pytest
from fv3fit.keras._models.shared.sequences import ThreadedSequencePreLoader
from fv3fit.keras._models.models import DenseModel
from fv3fit._shared import PackerConfig, SliceConfig
from fv3fit.keras._models.shared import ClipConfig
import fv3fit
import tensorflow.keras
def test__ThreadedSequencePreLoader():
""" Check correctness of the pre-loaded sequence"""
sequence = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
loader = ThreadedSequencePreLoader(sequence, num_workers=4)
result = [item for item in loader]
assert len(result) == len(sequence)
for item in result:
assert item in sequence
@pytest.mark.parametrize("base_state", ["manual", "default"])
def test_DenseModel_jacobian(base_state):
class IdentityModel(DenseModel):
def get_model(self, n, m):
x = tensorflow.keras.Input(shape=[n])
model = tensorflow.keras.Model(inputs=[x], outputs=[x])
model.compile()
return model
batch = xr.Dataset(
{
"a": (["x", "z"], np.arange(10, dtype=np.float).reshape(2, 5)),
"b": (["x", "z"], np.arange(10, dtype=np.float).reshape(2, 5)),
}
)
model = IdentityModel(["a"], ["b"], DenseHyperparameters(["a"], ["b"]))
model.fit([batch])
if base_state == "manual":
jacobian = model.jacobian(batch[["a"]].isel(x=0))
elif base_state == "default":
jacobian = model.jacobian()
assert jacobian[("a", "b")].dims == ("b", "a")
np.testing.assert_allclose(np.asarray(jacobian[("a", "b")]), np.eye(5))
def test_nonnegative_model_outputs():
hyperparameters = DenseHyperparameters(
["input"], ["output"], nonnegative_outputs=True
)
model = DenseModel(["input"], ["output"], hyperparameters,)
batch = xr.Dataset(
{
"input": (["x"], np.arange(100)),
# even with negative targets, trained model should be nonnegative
"output": (["x"], np.full((100,), -1e4)),
}
)
model.fit([batch])
prediction = model.predict(batch)
assert prediction.min() >= 0.0
def test_DenseModel_clipped_inputs():
hyperparameters = DenseHyperparameters(
["a", "b"], ["c"], clip_config=PackerConfig({"a": {"z": SliceConfig(None, 3)}}),
)
model = DenseModel(["a", "b"], ["c"], hyperparameters)
nz = 5
dims = ["x", "y", "z"]
shape = (2, 2, nz)
arr = np.arange(np.prod(shape)).reshape(shape).astype(float)
input_data = xr.Dataset({"a": (dims, arr), "b": (dims, arr), "c": (dims, arr + 1)})
slice_filled_input = xr.Dataset(
{"a": input_data["a"].where(input_data.z < 3).fillna(1.0), "b": input_data["b"]}
)
model.fit([input_data])
prediction_clipped = model.predict(input_data)
assert model.X_packer._n_features["a"] == 3
assert model.X_packer._n_features["b"] == 5
prediction_nan_filled = model.predict(slice_filled_input)
xr.testing.assert_allclose(prediction_nan_filled, prediction_clipped, rtol=1e-3)
def test_loaded_DenseModel_predicts_with_clipped_inputs(tmpdir):
hyperparameters = DenseHyperparameters(
["a", "b"], ["c"], clip_config=PackerConfig({"a": {"z": SliceConfig(None, 3)}}),
)
model = DenseModel(["a", "b"], ["c"], hyperparameters)
nz = 5
dims = ["x", "y", "z"]
shape = (2, 2, nz)
arr = np.arange(np.prod(shape)).reshape(shape).astype(float)
input_data = xr.Dataset({"a": (dims, arr), "b": (dims, arr), "c": (dims, arr + 1)})
model.fit([input_data])
prediction = model.predict(input_data)
output_path = str(tmpdir.join("trained_model"))
fv3fit.dump(model, output_path)
model_loaded = fv3fit.load(output_path)
loaded_prediction = model_loaded.predict(input_data)
xr.testing.assert_allclose(prediction, loaded_prediction)
def test_DenseModel_raises_not_implemented_error_with_clipped_output_data():
hyperparameters = DenseHyperparameters(
["a", "b"], ["c"], clip_config=ClipConfig({"c": {"z": SliceConfig(None, 3)}}),
)
with pytest.raises(NotImplementedError):
DenseModel(
["a", "b"], ["c"], hyperparameters,
)
| [
"numpy.prod",
"numpy.eye",
"fv3fit.dump",
"xarray.testing.assert_allclose",
"fv3fit.load",
"fv3fit._shared.SliceConfig",
"numpy.asarray",
"xarray.Dataset",
"pytest.mark.parametrize",
"fv3fit.DenseHyperparameters",
"pytest.raises",
"fv3fit.keras._models.models.DenseModel",
"numpy.full",
"fv3fit.keras._models.shared.sequences.ThreadedSequencePreLoader",
"numpy.arange"
] | [((710, 770), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""base_state"""', "['manual', 'default']"], {}), "('base_state', ['manual', 'default'])\n", (733, 770), False, 'import pytest\n'), ((521, 571), 'fv3fit.keras._models.shared.sequences.ThreadedSequencePreLoader', 'ThreadedSequencePreLoader', (['sequence'], {'num_workers': '(4)'}), '(sequence, num_workers=4)\n', (546, 571), False, 'from fv3fit.keras._models.shared.sequences import ThreadedSequencePreLoader\n'), ((1707, 1776), 'fv3fit.DenseHyperparameters', 'DenseHyperparameters', (["['input']", "['output']"], {'nonnegative_outputs': '(True)'}), "(['input'], ['output'], nonnegative_outputs=True)\n", (1727, 1776), False, 'from fv3fit import DenseHyperparameters\n'), ((1803, 1853), 'fv3fit.keras._models.models.DenseModel', 'DenseModel', (["['input']", "['output']", 'hyperparameters'], {}), "(['input'], ['output'], hyperparameters)\n", (1813, 1853), False, 'from fv3fit.keras._models.models import DenseModel\n'), ((2370, 2416), 'fv3fit.keras._models.models.DenseModel', 'DenseModel', (["['a', 'b']", "['c']", 'hyperparameters'], {}), "(['a', 'b'], ['c'], hyperparameters)\n", (2380, 2416), False, 'from fv3fit.keras._models.models import DenseModel\n'), ((2561, 2631), 'xarray.Dataset', 'xr.Dataset', (["{'a': (dims, arr), 'b': (dims, arr), 'c': (dims, arr + 1)}"], {}), "({'a': (dims, arr), 'b': (dims, arr), 'c': (dims, arr + 1)})\n", (2571, 2631), True, 'import xarray as xr\n'), ((3009, 3095), 'xarray.testing.assert_allclose', 'xr.testing.assert_allclose', (['prediction_nan_filled', 'prediction_clipped'], {'rtol': '(0.001)'}), '(prediction_nan_filled, prediction_clipped, rtol=\n 0.001)\n', (3035, 3095), True, 'import xarray as xr\n'), ((3308, 3354), 'fv3fit.keras._models.models.DenseModel', 'DenseModel', (["['a', 'b']", "['c']", 'hyperparameters'], {}), "(['a', 'b'], ['c'], hyperparameters)\n", (3318, 3354), False, 'from fv3fit.keras._models.models import DenseModel\n'), ((3499, 3569), 'xarray.Dataset', 'xr.Dataset', (["{'a': (dims, arr), 'b': (dims, arr), 'c': (dims, arr + 1)}"], {}), "({'a': (dims, arr), 'b': (dims, arr), 'c': (dims, arr + 1)})\n", (3509, 3569), True, 'import xarray as xr\n'), ((3697, 3728), 'fv3fit.dump', 'fv3fit.dump', (['model', 'output_path'], {}), '(model, output_path)\n', (3708, 3728), False, 'import fv3fit\n'), ((3748, 3772), 'fv3fit.load', 'fv3fit.load', (['output_path'], {}), '(output_path)\n', (3759, 3772), False, 'import fv3fit\n'), ((3834, 3891), 'xarray.testing.assert_allclose', 'xr.testing.assert_allclose', (['prediction', 'loaded_prediction'], {}), '(prediction, loaded_prediction)\n', (3860, 3891), True, 'import xarray as xr\n'), ((1299, 1333), 'fv3fit.DenseHyperparameters', 'DenseHyperparameters', (["['a']", "['b']"], {}), "(['a'], ['b'])\n", (1319, 1333), False, 'from fv3fit import DenseHyperparameters\n'), ((1600, 1630), 'numpy.asarray', 'np.asarray', (["jacobian['a', 'b']"], {}), "(jacobian['a', 'b'])\n", (1610, 1630), True, 'import numpy as np\n'), ((1634, 1643), 'numpy.eye', 'np.eye', (['(5)'], {}), '(5)\n', (1640, 1643), True, 'import numpy as np\n'), ((4118, 4152), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (4131, 4152), False, 'import pytest\n'), ((4162, 4208), 'fv3fit.keras._models.models.DenseModel', 'DenseModel', (["['a', 'b']", "['c']", 'hyperparameters'], {}), "(['a', 'b'], ['c'], hyperparameters)\n", (4172, 4208), False, 'from fv3fit.keras._models.models import DenseModel\n'), ((1918, 1932), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (1927, 1932), True, 'import numpy as np\n'), ((2043, 2068), 'numpy.full', 'np.full', (['(100,)', '(-10000.0)'], {}), '((100,), -10000.0)\n', (2050, 2068), True, 'import numpy as np\n'), ((1121, 1150), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'np.float'}), '(10, dtype=np.float)\n', (1130, 1150), True, 'import numpy as np\n'), ((1197, 1226), 'numpy.arange', 'np.arange', (['(10)'], {'dtype': 'np.float'}), '(10, dtype=np.float)\n', (1206, 1226), True, 'import numpy as np\n'), ((2327, 2347), 'fv3fit._shared.SliceConfig', 'SliceConfig', (['None', '(3)'], {}), '(None, 3)\n', (2338, 2347), False, 'from fv3fit._shared import PackerConfig, SliceConfig\n'), ((2499, 2513), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (2506, 2513), True, 'import numpy as np\n'), ((3265, 3285), 'fv3fit._shared.SliceConfig', 'SliceConfig', (['None', '(3)'], {}), '(None, 3)\n', (3276, 3285), False, 'from fv3fit._shared import PackerConfig, SliceConfig\n'), ((3437, 3451), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (3444, 3451), True, 'import numpy as np\n'), ((4077, 4097), 'fv3fit._shared.SliceConfig', 'SliceConfig', (['None', '(3)'], {}), '(None, 3)\n', (4088, 4097), False, 'from fv3fit._shared import PackerConfig, SliceConfig\n')] |
# This script converts text to audio using Amazon Polly, and plays out the audio
# using mpg123 system command.
#
# Prereq: mpg123 utility must be installed. 'sudo apt-get install mpg123'
import os
import sys
import time
from contextlib import closing
from boto3 import Session
from botocore.exceptions import BotoCoreError, ClientError
CHUNK_SIZE = 1024
AUDIO_FILE = "/tmp/audio.mp3"
# Create a client using the credentials and region defined in the adminuser
# section of the AWS credentials and configuration files
session = Session(region_name="us-west-2")
polly = session.client("polly")
try:
# Request speech synthesis
if len(sys.argv) == 1:
exit() #nothing to synthesize
response = polly.synthesize_speech(Text=sys.argv[1],
VoiceId="Brian",
TextType="ssml",
OutputFormat="mp3",
SampleRate="22050")
audioStream = response.get("AudioStream")
if audioStream:
mp3file = open(AUDIO_FILE, 'w')
# Note: Closing the stream is important as the service throttles on
# the number of parallel connections. Here we are using
# contextlib.closing to ensure the close method of the stream object
# will be called automatically at the end of the with statement's
# scope.
with closing(audioStream) as managed_stream:
# Write the stream's content in chunks to a file
while True:
data = managed_stream.read(CHUNK_SIZE)
mp3file.write(data)
# If there's no more data to read, stop streaming
if not data:
break
# Ensure any buffered output has been transmitted and close the
# stream
mp3file.flush()
mp3file.close()
print("Streaming completed, starting player...")
command_to_run = 'mpg123 ' + AUDIO_FILE
os.system(command_to_run)
print("Player finished.")
else:
# The stream passed in is empty
print("Nothing to stream.")
except (BotoCoreError, ClientError) as err:
# The service returned an error
print("ERROR: %s" % err)
| [
"os.system",
"boto3.Session",
"contextlib.closing"
] | [((531, 563), 'boto3.Session', 'Session', ([], {'region_name': '"""us-west-2"""'}), "(region_name='us-west-2')\n", (538, 563), False, 'from boto3 import Session\n'), ((1988, 2013), 'os.system', 'os.system', (['command_to_run'], {}), '(command_to_run)\n', (1997, 2013), False, 'import os\n'), ((1382, 1402), 'contextlib.closing', 'closing', (['audioStream'], {}), '(audioStream)\n', (1389, 1402), False, 'from contextlib import closing\n')] |
from app import db
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
Building = db.Column(db.String(10), index=True, unique=False)
Room = db.Column(db.String(10), index=True, unique=False)
Location_x = db.Column(db.String(20), index=True, unique=False)
Location_y = db.Column(db.String(20), index=True, unique=False)
SSID = db.Column(db.String(20), index=True, unique=False)
BSSID = db.Column(db.String(20), index=True, unique=False)
Frequency = db.Column(db.String(10), index=True, unique=False)
Level = db.Column(db.String(10), index=True, unique=False)
Model = db.Column(db.String(10), index=True, unique=False)
Time = db.Column(db.String(10), index=True, unique=False)
def __repr__(self):
return '<User %r>' % (self.id)
'''class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
nickname = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)
posts = db.relationship('Post', backref='author', lazy='dynamic')
def __repr__(self):
return '<User %r>' % (self.nickname)
class Post(db.Model):
id = db.Column(db.Integer, primary_key = True)
body = db.Column(db.String(140))
timestamp = db.Column(db.DateTime)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
def __repr__(self):
return '<Post %r>' % (self.body)
nickname = db.Column(db.String(64), index=True, unique=True)
email = db.Column(db.String(120), index=True, unique=True)''' | [
"app.db.String",
"app.db.Column"
] | [((48, 87), 'app.db.Column', 'db.Column', (['db.Integer'], {'primary_key': '(True)'}), '(db.Integer, primary_key=True)\n', (57, 87), False, 'from app import db\n'), ((110, 123), 'app.db.String', 'db.String', (['(10)'], {}), '(10)\n', (119, 123), False, 'from app import db\n'), ((169, 182), 'app.db.String', 'db.String', (['(10)'], {}), '(10)\n', (178, 182), False, 'from app import db\n'), ((234, 247), 'app.db.String', 'db.String', (['(20)'], {}), '(20)\n', (243, 247), False, 'from app import db\n'), ((299, 312), 'app.db.String', 'db.String', (['(20)'], {}), '(20)\n', (308, 312), False, 'from app import db\n'), ((358, 371), 'app.db.String', 'db.String', (['(20)'], {}), '(20)\n', (367, 371), False, 'from app import db\n'), ((418, 431), 'app.db.String', 'db.String', (['(20)'], {}), '(20)\n', (427, 431), False, 'from app import db\n'), ((482, 495), 'app.db.String', 'db.String', (['(10)'], {}), '(10)\n', (491, 495), False, 'from app import db\n'), ((542, 555), 'app.db.String', 'db.String', (['(10)'], {}), '(10)\n', (551, 555), False, 'from app import db\n'), ((602, 615), 'app.db.String', 'db.String', (['(10)'], {}), '(10)\n', (611, 615), False, 'from app import db\n'), ((662, 675), 'app.db.String', 'db.String', (['(10)'], {}), '(10)\n', (671, 675), False, 'from app import db\n')] |
from django.forms import ModelForm, ModelChoiceField
from itembase.core.models import Address, AddressType, AddressUsage, Client, EngagementType # , Modules
class ClientForm(ModelForm):
# service_start = forms.DateField(widget=forms.SelectDateWidget)
engagement = ModelChoiceField(queryset=EngagementType.objects, empty_label="Select Engagement")
parent = ModelChoiceField(queryset=Client.objects.filter(client_status__lt=5), required=False)
class Meta:
model = Client
fields = [
'client_code',
'client_name',
'engagement',
'parent',
'service_start',
'service_end',
'client_status',
'imp_fee_status',
'production_support_number',
'upload_address',
'iq_support_address',
'approved',
]
class ClientAddressForm(ModelForm):
address_type = ModelChoiceField(queryset=AddressType.objects.order_by('id'))
# used_on = ModelChoiceField(queryset=AddressUsage.objects.order_by('id'))
client = ModelChoiceField(queryset=Client.objects.order_by('client_name'))
class Meta:
model = Address
fields = [
'client',
'address_type', 'used_on', 'address1', 'address2', 'city',
'state', 'postal_code', 'country', 'phone_number', 'email', 'primary', 'status',
]
| [
"itembase.core.models.AddressType.objects.order_by",
"django.forms.ModelChoiceField",
"itembase.core.models.Client.objects.order_by",
"itembase.core.models.Client.objects.filter"
] | [((278, 365), 'django.forms.ModelChoiceField', 'ModelChoiceField', ([], {'queryset': 'EngagementType.objects', 'empty_label': '"""Select Engagement"""'}), "(queryset=EngagementType.objects, empty_label=\n 'Select Engagement')\n", (294, 365), False, 'from django.forms import ModelForm, ModelChoiceField\n'), ((400, 442), 'itembase.core.models.Client.objects.filter', 'Client.objects.filter', ([], {'client_status__lt': '(5)'}), '(client_status__lt=5)\n', (421, 442), False, 'from itembase.core.models import Address, AddressType, AddressUsage, Client, EngagementType\n'), ((958, 992), 'itembase.core.models.AddressType.objects.order_by', 'AddressType.objects.order_by', (['"""id"""'], {}), "('id')\n", (986, 992), False, 'from itembase.core.models import Address, AddressType, AddressUsage, Client, EngagementType\n'), ((1112, 1150), 'itembase.core.models.Client.objects.order_by', 'Client.objects.order_by', (['"""client_name"""'], {}), "('client_name')\n", (1135, 1150), False, 'from itembase.core.models import Address, AddressType, AddressUsage, Client, EngagementType\n')] |
import argparse
import logging
from os.path import join, basename
from time import perf_counter
from autobias.argmin_modules.affine_nll import AffineNLL
from autobias.argmin_modules.argmin_function import NumpyOptimizer
from autobias.argmin_modules.argmin_transform import ArgminTransformFunction
from autobias.argmin_modules.l2_norm import L2NormPenalty
from autobias.datasets.mnist import *
from autobias.experiments.train_args import add_train_args
from autobias.model.mnist_model import ImageClfModel, NullMNISTPredictor
from autobias.model.mnist_model import PredictFromFlattened, FromBiasFeaturePredictor
from autobias.modules.classifier_ensembles import ClfArgminEnsemble, ClfHead, \
ClfBiAdversary
from autobias.modules.layers import seq, FullyConnected, Dropout, NullMapper, Conv2d
from autobias.training import train_utils
from autobias.training.data_batcher import SubsetSampler, StratifiedSampler
from autobias.training.data_iterator import TorchDataIterator
from autobias.training.evaluate import run_evaluation
from autobias.training.evaluator import ClfEnsembleEvaluator
from autobias.training.optimizer import SGD
from autobias.training.post_fit_rescaling_ensemble import FitRescaleParameters
from autobias.training.trainer import EvalDataset, Trainer, StoppingPoint
from autobias.utils import py_utils
"""
Run our MNIST experiments, this models are small enough it is seems best to train them
on a single CPU core. Multiple experiments can be run in parralell by using this
script with the `init_only` flag, then running `training_models_batch` on the
newly created output directory.
"""
def get_low_capacity_model(sz=28, n_classes=10):
return PredictFromFlattened(
NullMapper(),
seq(
FullyConnected(sz * sz * 3, 128, "relu"),
Dropout(0.5),
FullyConnected(128, n_classes, None)
)
)
def get_high_capacity_model(sz=28, n_classes=10):
return PredictFromFlattened(
Conv2d(3, 8, (7, 7)),
seq(
FullyConnected(8 * (sz - 6) * (sz - 6), 128, "relu"),
Dropout(0.5),
FullyConnected(128, n_classes, None)
)
)
MODES = [
"none", "mce", "oracle", "nobp", "adv", "noci",
]
def main():
py_utils.add_stdout_logger()
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", choices=["patch", "split", "background"], required=True,
help="Bias to train on")
add_train_args(parser, entropy_w=False, default_adv_penalty=None, default_batch_size=1024,
default_epochs=100, default_entropy_penalty=None, lc_weight_default=None)
parser.add_argument("--lr", type=float, default=0.01)
parser.add_argument("--nruns", type=int, default=1)
args = parser.parse_args()
dataset = args.dataset
if dataset == "patch":
ds = MNISTPatches
n_classes = 10
w = 30
elif dataset == "background":
ds = MNISTBackgroundColor
w = 28
n_classes = 10
elif dataset == "split":
ds = MNISTDependent
n_classes = 4
w = 30
else:
raise NotImplementedError(f"Unknown dataset {dataset}")
p = 0.9
n_per_class = 200
train = ds(p, True, (0, n_per_class))
opt = SGD(args.lr, momentum=0.9)
eval_sets = [
EvalDataset(ds(p, True, (1400, 2400)),
TorchDataIterator(SubsetSampler(None, args.batch_size)), "id"),
EvalDataset(ds(1./n_classes, True, (1400, 2400)),
TorchDataIterator(SubsetSampler(None, args.batch_size)), "od"),
]
train.cache = True
for ds in eval_sets:
ds.cache = True
def build_model():
hc = get_high_capacity_model(w, n_classes)
if args.mode == "none":
# An ensemble with a Null predictor
predictor = ClfArgminEnsemble(
[
ClfHead(predictor=NullMNISTPredictor(n_classes), head_name="bias"),
ClfHead(predictor=hc, head_name="debiased")
],
n_classes,
)
elif args.mode == "adv":
if args.adversary_loss is None:
if dataset == "patch":
adv_loss = 0.01
elif dataset == "background":
adv_loss = 0.08
elif dataset == "split":
adv_loss = 0.01
else:
raise RuntimeError()
else:
adv_loss = args.adversary_loss
if args.lc_weight is None:
# Default depends on the bias
if dataset == "patch":
lc_w = 0.7
elif dataset == "background":
lc_w = 0.05
elif dataset == "split":
lc_w = 0.02
else:
raise RuntimeError()
else:
lc_w = args.lc_weight
predictor = ClfBiAdversary(
hc, get_low_capacity_model(w, n_classes),
n_classes, adv_w=adv_loss, bias_loss=lc_w,
main_loss=0.0, joint_loss=1.0, use_y_values=True, joint_adv=False
)
elif args.mode == "oracle":
# An ensemble with a gold bias-predictor
bias = FromBiasFeaturePredictor(p, n_classes)
predictor = ClfArgminEnsemble(
[
ClfHead(predictor=bias, head_name="bias"),
ClfHead(predictor=hc, head_name="debiased")
],
n_classes,
)
else:
if args.mode.startswith("mce"):
rescaler = lambda: ArgminTransformFunction(AffineNLL(
n_classes, n_classes, NumpyOptimizer(),
residual=True, penalty=L2NormPenalty(0.002),
fix_last_bias_to_zero=True,
))
elif args.mode == "noci":
rescaler = lambda: None
elif args.mode == "nobp":
rescaler = lambda: ArgminTransformFunction(AffineNLL(
n_classes, n_classes, NumpyOptimizer(),
residual=True, penalty=L2NormPenalty(0.002),
fix_last_bias_to_zero=True,
), backprop_argmin=False)
else:
raise ValueError("Unknown mode: " + args.mode)
predictor = ClfArgminEnsemble(
[
ClfHead(
predictor=get_low_capacity_model(w, n_classes), head_name="bias",
rescaler=rescaler(),
nll_penalty=0.2 if args.lc_weight is None else args.lc_weight,
),
ClfHead(
predictor=hc, head_name="debiased",
rescaler=rescaler(),
)
],
n_classes
)
return ImageClfModel(predictor)
evaluator = ClfEnsembleEvaluator()
if args.mode in {"mce", "nobp"}:
hook = FitRescaleParameters(1024, None, sort=False)
else:
hook = None
trainer = Trainer(
opt,
train,
eval_sets,
train_eval_iterator=TorchDataIterator(SubsetSampler(None, args.batch_size)),
train_iterator=TorchDataIterator(
StratifiedSampler(args.batch_size, n_repeat=10)),
num_train_epochs=args.epochs,
evaluator=evaluator,
pre_eval_hook=hook,
tb_factor=args.batch_size/256,
save_each_epoch=False,
progress_bar=True,
eval_progress_bar=False,
epoch_progress_bar=False,
early_stopping_criteria=StoppingPoint("train", "nll/joint", 3e-4, 3),
log_to_tb=False,
)
for r in range(args.nruns):
if args.nruns > 1:
print("")
print("")
print("*" * 10 + f" STARTING RUN {r+1}/{args.nruns} " + "*" * 10)
# Build a model for each run to ensure it is fully reset
model = build_model()
if args.output_dir:
if r == 0:
train_utils.clear_if_nonempty(args.output_dir)
train_utils.init_model_dir(args.output_dir, trainer, model)
subdir = train_utils.select_subdir(args.output_dir)
else:
subdir = None
if args.init_only:
return
if subdir is not None:
logging.info(f"Start run for {subdir}")
if args.time:
t0 = perf_counter()
else:
t0 = None
try:
if subdir is not None:
with open(join(subdir, "console.out"), "w") as f:
trainer.training_run(model, subdir, no_cuda=True, print_out=f)
else:
trainer.training_run(model, subdir, no_cuda=True)
except Exception as e:
if args.nruns == 1 or isinstance(e, KeyboardInterrupt):
raise e
logging.warning("Error during training: " + str(e))
continue
if args.time:
logging.info(f"Training took {perf_counter() - t0:.3f} seconds")
if __name__ == '__main__':
main()
| [
"autobias.modules.layers.Dropout",
"autobias.training.train_utils.select_subdir",
"autobias.model.mnist_model.ImageClfModel",
"autobias.utils.py_utils.add_stdout_logger",
"autobias.experiments.train_args.add_train_args",
"autobias.modules.classifier_ensembles.ClfHead",
"logging.info",
"argparse.ArgumentParser",
"autobias.modules.layers.FullyConnected",
"autobias.modules.layers.NullMapper",
"time.perf_counter",
"autobias.training.train_utils.init_model_dir",
"autobias.modules.layers.Conv2d",
"autobias.training.data_batcher.SubsetSampler",
"autobias.training.data_batcher.StratifiedSampler",
"autobias.training.evaluator.ClfEnsembleEvaluator",
"autobias.training.post_fit_rescaling_ensemble.FitRescaleParameters",
"autobias.argmin_modules.argmin_function.NumpyOptimizer",
"autobias.model.mnist_model.NullMNISTPredictor",
"autobias.training.trainer.StoppingPoint",
"autobias.model.mnist_model.FromBiasFeaturePredictor",
"os.path.join",
"autobias.training.train_utils.clear_if_nonempty",
"autobias.training.optimizer.SGD",
"autobias.argmin_modules.l2_norm.L2NormPenalty"
] | [((2172, 2200), 'autobias.utils.py_utils.add_stdout_logger', 'py_utils.add_stdout_logger', ([], {}), '()\n', (2198, 2200), False, 'from autobias.utils import py_utils\n'), ((2212, 2237), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2235, 2237), False, 'import argparse\n'), ((2379, 2552), 'autobias.experiments.train_args.add_train_args', 'add_train_args', (['parser'], {'entropy_w': '(False)', 'default_adv_penalty': 'None', 'default_batch_size': '(1024)', 'default_epochs': '(100)', 'default_entropy_penalty': 'None', 'lc_weight_default': 'None'}), '(parser, entropy_w=False, default_adv_penalty=None,\n default_batch_size=1024, default_epochs=100, default_entropy_penalty=\n None, lc_weight_default=None)\n', (2393, 2552), False, 'from autobias.experiments.train_args import add_train_args\n'), ((3126, 3152), 'autobias.training.optimizer.SGD', 'SGD', (['args.lr'], {'momentum': '(0.9)'}), '(args.lr, momentum=0.9)\n', (3129, 3152), False, 'from autobias.training.optimizer import SGD\n'), ((6195, 6217), 'autobias.training.evaluator.ClfEnsembleEvaluator', 'ClfEnsembleEvaluator', ([], {}), '()\n', (6215, 6217), False, 'from autobias.training.evaluator import ClfEnsembleEvaluator\n'), ((1697, 1709), 'autobias.modules.layers.NullMapper', 'NullMapper', ([], {}), '()\n', (1707, 1709), False, 'from autobias.modules.layers import seq, FullyConnected, Dropout, NullMapper, Conv2d\n'), ((1928, 1948), 'autobias.modules.layers.Conv2d', 'Conv2d', (['(3)', '(8)', '(7, 7)'], {}), '(3, 8, (7, 7))\n', (1934, 1948), False, 'from autobias.modules.layers import seq, FullyConnected, Dropout, NullMapper, Conv2d\n'), ((6155, 6179), 'autobias.model.mnist_model.ImageClfModel', 'ImageClfModel', (['predictor'], {}), '(predictor)\n', (6168, 6179), False, 'from autobias.model.mnist_model import ImageClfModel, NullMNISTPredictor\n'), ((6265, 6309), 'autobias.training.post_fit_rescaling_ensemble.FitRescaleParameters', 'FitRescaleParameters', (['(1024)', 'None'], {'sort': '(False)'}), '(1024, None, sort=False)\n', (6285, 6309), False, 'from autobias.training.post_fit_rescaling_ensemble import FitRescaleParameters\n'), ((1726, 1766), 'autobias.modules.layers.FullyConnected', 'FullyConnected', (['(sz * sz * 3)', '(128)', '"""relu"""'], {}), "(sz * sz * 3, 128, 'relu')\n", (1740, 1766), False, 'from autobias.modules.layers import seq, FullyConnected, Dropout, NullMapper, Conv2d\n'), ((1774, 1786), 'autobias.modules.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1781, 1786), False, 'from autobias.modules.layers import seq, FullyConnected, Dropout, NullMapper, Conv2d\n'), ((1794, 1830), 'autobias.modules.layers.FullyConnected', 'FullyConnected', (['(128)', 'n_classes', 'None'], {}), '(128, n_classes, None)\n', (1808, 1830), False, 'from autobias.modules.layers import seq, FullyConnected, Dropout, NullMapper, Conv2d\n'), ((1965, 2017), 'autobias.modules.layers.FullyConnected', 'FullyConnected', (['(8 * (sz - 6) * (sz - 6))', '(128)', '"""relu"""'], {}), "(8 * (sz - 6) * (sz - 6), 128, 'relu')\n", (1979, 2017), False, 'from autobias.modules.layers import seq, FullyConnected, Dropout, NullMapper, Conv2d\n'), ((2025, 2037), 'autobias.modules.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (2032, 2037), False, 'from autobias.modules.layers import seq, FullyConnected, Dropout, NullMapper, Conv2d\n'), ((2045, 2081), 'autobias.modules.layers.FullyConnected', 'FullyConnected', (['(128)', 'n_classes', 'None'], {}), '(128, n_classes, None)\n', (2059, 2081), False, 'from autobias.modules.layers import seq, FullyConnected, Dropout, NullMapper, Conv2d\n'), ((6821, 6867), 'autobias.training.trainer.StoppingPoint', 'StoppingPoint', (['"""train"""', '"""nll/joint"""', '(0.0003)', '(3)'], {}), "('train', 'nll/joint', 0.0003, 3)\n", (6834, 6867), False, 'from autobias.training.trainer import EvalDataset, Trainer, StoppingPoint\n'), ((7319, 7361), 'autobias.training.train_utils.select_subdir', 'train_utils.select_subdir', (['args.output_dir'], {}), '(args.output_dir)\n', (7344, 7361), False, 'from autobias.training import train_utils\n'), ((7463, 7502), 'logging.info', 'logging.info', (['f"""Start run for {subdir}"""'], {}), "(f'Start run for {subdir}')\n", (7475, 7502), False, 'import logging\n'), ((7533, 7547), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (7545, 7547), False, 'from time import perf_counter\n'), ((3247, 3283), 'autobias.training.data_batcher.SubsetSampler', 'SubsetSampler', (['None', 'args.batch_size'], {}), '(None, args.batch_size)\n', (3260, 3283), False, 'from autobias.training.data_batcher import SubsetSampler, StratifiedSampler\n'), ((3381, 3417), 'autobias.training.data_batcher.SubsetSampler', 'SubsetSampler', (['None', 'args.batch_size'], {}), '(None, args.batch_size)\n', (3394, 3417), False, 'from autobias.training.data_batcher import SubsetSampler, StratifiedSampler\n'), ((6433, 6469), 'autobias.training.data_batcher.SubsetSampler', 'SubsetSampler', (['None', 'args.batch_size'], {}), '(None, args.batch_size)\n', (6446, 6469), False, 'from autobias.training.data_batcher import SubsetSampler, StratifiedSampler\n'), ((6516, 6563), 'autobias.training.data_batcher.StratifiedSampler', 'StratifiedSampler', (['args.batch_size'], {'n_repeat': '(10)'}), '(args.batch_size, n_repeat=10)\n', (6533, 6563), False, 'from autobias.training.data_batcher import SubsetSampler, StratifiedSampler\n'), ((7188, 7234), 'autobias.training.train_utils.clear_if_nonempty', 'train_utils.clear_if_nonempty', (['args.output_dir'], {}), '(args.output_dir)\n', (7217, 7234), False, 'from autobias.training import train_utils\n'), ((7243, 7302), 'autobias.training.train_utils.init_model_dir', 'train_utils.init_model_dir', (['args.output_dir', 'trainer', 'model'], {}), '(args.output_dir, trainer, model)\n', (7269, 7302), False, 'from autobias.training import train_utils\n'), ((3770, 3813), 'autobias.modules.classifier_ensembles.ClfHead', 'ClfHead', ([], {'predictor': 'hc', 'head_name': '"""debiased"""'}), "(predictor=hc, head_name='debiased')\n", (3777, 3813), False, 'from autobias.modules.classifier_ensembles import ClfArgminEnsemble, ClfHead, ClfBiAdversary\n'), ((4831, 4869), 'autobias.model.mnist_model.FromBiasFeaturePredictor', 'FromBiasFeaturePredictor', (['p', 'n_classes'], {}), '(p, n_classes)\n', (4855, 4869), False, 'from autobias.model.mnist_model import PredictFromFlattened, FromBiasFeaturePredictor\n'), ((7631, 7658), 'os.path.join', 'join', (['subdir', '"""console.out"""'], {}), "(subdir, 'console.out')\n", (7635, 7658), False, 'from os.path import join, basename\n'), ((3710, 3739), 'autobias.model.mnist_model.NullMNISTPredictor', 'NullMNISTPredictor', (['n_classes'], {}), '(n_classes)\n', (3728, 3739), False, 'from autobias.model.mnist_model import ImageClfModel, NullMNISTPredictor\n'), ((4927, 4968), 'autobias.modules.classifier_ensembles.ClfHead', 'ClfHead', ([], {'predictor': 'bias', 'head_name': '"""bias"""'}), "(predictor=bias, head_name='bias')\n", (4934, 4968), False, 'from autobias.modules.classifier_ensembles import ClfArgminEnsemble, ClfHead, ClfBiAdversary\n'), ((4980, 5023), 'autobias.modules.classifier_ensembles.ClfHead', 'ClfHead', ([], {'predictor': 'hc', 'head_name': '"""debiased"""'}), "(predictor=hc, head_name='debiased')\n", (4987, 5023), False, 'from autobias.modules.classifier_ensembles import ClfArgminEnsemble, ClfHead, ClfBiAdversary\n'), ((8047, 8061), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (8059, 8061), False, 'from time import perf_counter\n'), ((5204, 5220), 'autobias.argmin_modules.argmin_function.NumpyOptimizer', 'NumpyOptimizer', ([], {}), '()\n', (5218, 5220), False, 'from autobias.argmin_modules.argmin_function import NumpyOptimizer\n'), ((5255, 5275), 'autobias.argmin_modules.l2_norm.L2NormPenalty', 'L2NormPenalty', (['(0.002)'], {}), '(0.002)\n', (5268, 5275), False, 'from autobias.argmin_modules.l2_norm import L2NormPenalty\n'), ((5516, 5532), 'autobias.argmin_modules.argmin_function.NumpyOptimizer', 'NumpyOptimizer', ([], {}), '()\n', (5530, 5532), False, 'from autobias.argmin_modules.argmin_function import NumpyOptimizer\n'), ((5567, 5587), 'autobias.argmin_modules.l2_norm.L2NormPenalty', 'L2NormPenalty', (['(0.002)'], {}), '(0.002)\n', (5580, 5587), False, 'from autobias.argmin_modules.l2_norm import L2NormPenalty\n')] |
"""Jedi mini server for deoplete-jedi
This script allows Jedi to run using the Python interpreter that is found in
the user's environment instead of the one Neovim is using.
Jedi seems to accumulate latency with each completion. To deal with this, the
server is restarted after 50 completions. This threshold is relatively high
considering that deoplete-jedi caches completion results. These combined
should make deoplete-jedi's completions pretty fast and responsive.
"""
from __future__ import unicode_literals
import argparse
import functools
import logging
import os
import re
import shlex
import struct
import subprocess
import sys
import threading
import time
from glob import glob
# This is be possible because the path is inserted in deoplete_jedi.py as well
# as set in PYTHONPATH by the Client class.
from deoplete_jedi import utils
try:
import cPickle as pickle
except ImportError:
import pickle
libpath = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'vendored')
jedi_path = os.path.join(libpath, 'jedi')
parso_path = os.path.join(libpath, 'parso')
# Type mapping. Empty values will use the key value instead.
# Keep them 5 characters max to minimize required space to display.
_types = {
'import': 'imprt',
'class': '',
'function': 'def',
'globalstmt': 'var',
'instance': 'var',
'statement': 'var',
'keyword': 'keywd',
'module': 'mod',
'param': 'arg',
'property': 'prop',
'bool': '',
'bytes': 'byte',
'complex': 'cmplx',
'dict': '',
'list': '',
'float': '',
'int': '',
'object': 'obj',
'set': '',
'slice': '',
'str': '',
'tuple': '',
'mappingproxy': 'dict', # cls.__dict__
'member_descriptor': 'cattr',
'getset_descriptor': 'cprop',
'method_descriptor': 'cdef',
}
class StreamError(Exception):
"""Error in reading/writing streams."""
class StreamEmpty(StreamError):
"""Empty stream data."""
class ServerError(Exception):
"""Error from crashed server.
Will have server's stderr as args[1].
"""
def stream_read(pipe):
"""Read data from the pipe."""
buffer = getattr(pipe, 'buffer', pipe)
header = buffer.read(4)
if not len(header):
raise StreamEmpty
if len(header) < 4:
raise StreamError('Incorrect byte length')
length = struct.unpack('I', header)[0]
data = buffer.read(length)
if len(data) < length:
raise StreamError('Got less data than expected')
return pickle.loads(data)
def stream_write(pipe, obj):
"""Write data to the pipe."""
data = pickle.dumps(obj, 2)
header = struct.pack(b'I', len(data))
buffer = getattr(pipe, 'buffer', pipe)
buffer.write(header + data)
pipe.flush()
def strip_decor(source):
"""Remove decorators lines
If the decorator is a function call, this will leave them dangling. Jedi
should be fine with this since they'll look like tuples just hanging out
not doing anything important.
"""
return re.sub(r'^(\s*)@\w+', r'\1', source, flags=re.M)
def retry_completion(func):
"""Decorator to retry a completion
A second attempt is made with decorators stripped from the source.
Older comment:
Decorators on incomplete functions cause an error to be raised by
Jedi. I assume this is because Jedi is attempting to evaluate
the return value of the wrapped, but broken, function.
Our solution is to simply strip decorators from the source since
we are a completion service, not the syntax police.
"""
@functools.wraps(func)
def wrapper(self, source, *args, **kwargs):
try:
return func(self, source, *args, **kwargs)
except Exception:
if '@' in source:
log.warn('Retrying completion %r', func.__name__, exc_info=True)
try:
return func(self, strip_decor(source), *args, **kwargs)
except Exception:
pass
log.warn('Failed completion %r', func.__name__, exc_info=True)
return wrapper
class Server(object):
"""Server class
This is created when this script is ran directly.
"""
def __init__(self, desc_len=0, short_types=False, show_docstring=False):
self.desc_len = desc_len
self.use_short_types = short_types
self.show_docstring = show_docstring
self.unresolved_imports = set()
from jedi import settings
settings.use_filesystem_cache = False
def _loop(self):
while True:
data = stream_read(sys.stdin)
if not isinstance(data, tuple):
continue
cache_key, source, line, col, filename, options = data
orig_path = sys.path[:]
add_path = self.find_extra_sys_path(filename)
if add_path and add_path not in sys.path:
# Add the found path to sys.path. I'm not 100% certain if this
# is actually helping anything, but it feels like the right
# thing to do.
sys.path.insert(0, add_path)
if filename:
sys.path.append(os.path.dirname(filename))
if isinstance(options, dict):
extra = options.get('extra_path')
if extra:
if not isinstance(extra, list):
extra = [extra]
sys.path.extend(extra)
# Add extra paths if working on a Python remote plugin.
sys.path.extend(utils.rplugin_runtime_paths(options))
out = self.script_completion(source, line, col, filename)
if not out and cache_key[-1] == 'vars':
log.debug('Fallback to scoped completions')
out = self.scoped_completions(source, filename, cache_key[-2])
if not out and isinstance(options, dict) and 'synthetic' in options:
synthetic = options.get('synthetic')
log.debug('Using synthetic completion: %r', synthetic)
out = self.script_completion(synthetic['src'],
synthetic['line'],
synthetic['col'], filename)
if not out and cache_key[-1] in ('package', 'local'):
# The backup plan
# TODO(blueyed): remove this (far too less results for e.g.
# numpy), or at least do not cache it to disk.
log.debug('Fallback to module completions')
try:
out = self.module_completions(cache_key[0], sys.path)
except Exception:
pass
stream_write(sys.stdout, out)
sys.path[:] = orig_path
def run(self):
log.debug('Starting server. sys.path = %r', sys.path)
try:
stream_write(sys.stdout, tuple(sys.version_info))
self._loop()
except StreamEmpty:
log.debug('Input closed. Shutting down.')
except Exception:
log.exception('Server Exception. Shutting down.')
sys.exit(1)
def find_extra_sys_path(self, filename):
"""Find the file's "root"
This tries to determine the script's root package. The first step is
to scan upward until there are no longer __init__.py files. If that
fails, check immediate subdirectories to find __init__.py files which
could mean that the current script is not part of a package, but has
sub-modules.
"""
add_path = ''
dirname = os.path.dirname(filename)
scan_dir = dirname
while len(scan_dir) \
and os.path.isfile(os.path.join(scan_dir, '__init__.py')):
scan_dir = os.path.dirname(scan_dir)
if scan_dir != dirname:
add_path = scan_dir
elif glob('{}/*/__init__.py'.format(dirname)):
add_path = dirname
return add_path
def module_completions(self, module, paths):
"""Directly get completions from the module file
This is the fallback if all else fails for module completion.
"""
found = utils.module_search(module, paths)
if not found:
return None
log.debug('Found script for fallback completions: %r', found)
mod_parts = tuple(re.sub(r'\.+', '.', module).strip('.').split('.'))
path_parts = os.path.splitext(found)[0].split('/')
if path_parts[-1] == '__init__':
path_parts.pop()
path_parts = tuple(path_parts)
match_mod = mod_parts
ml = len(mod_parts)
for i in range(ml):
if path_parts[i - ml:] == mod_parts[:ml - i]:
match_mod = mod_parts[-i:]
break
log.debug('Remainder to match: %r', match_mod)
import jedi
completions = jedi.api.names(path=found, references=True)
completions = utils.jedi_walk(completions)
while len(match_mod):
for c in completions:
if c.name == match_mod[0]:
completions = c.defined_names()
break
else:
log.debug('No more matches at %r', match_mod[0])
return []
match_mod = match_mod[:-1]
out = []
tmp_filecache = {}
seen = set()
for c in completions:
parsed = self.parse_completion(c, tmp_filecache)
seen_key = (parsed['type'], parsed['name'])
if seen_key in seen:
continue
seen.add(seen_key)
out.append(parsed)
return out
@retry_completion
def script_completion(self, source, line, col, filename):
"""Standard Jedi completions"""
import jedi
log.debug('Line: %r, Col: %r, Filename: %r', line, col, filename)
completions = jedi.Script(source, line, col, filename).completions()
out = []
tmp_filecache = {}
for c in completions:
out.append(self.parse_completion(c, tmp_filecache))
return out
def get_parents(self, c):
"""Collect parent blocks
This is for matching a request's cache key when performing scoped
completions.
"""
parents = []
while True:
try:
c = c.parent()
parents.insert(0, c.name)
if c.type == 'module':
break
except AttributeError:
break
return tuple(parents)
def resolve_import(self, completion, depth=0, max_depth=10, seen=None):
"""Follow import until it no longer is an import type"""
if seen is None:
seen = []
seen.append(completion)
log.debug('Resolving: %r', completion)
defs = completion.goto_assignments()
if not defs:
return None
resolved = defs[0]
if resolved in seen:
return None
if resolved.type == 'import' and depth < max_depth:
return self.resolve_import(resolved, depth + 1, max_depth, seen)
log.debug('Resolved: %r', resolved)
return resolved
@retry_completion
def scoped_completions(self, source, filename, parent):
"""Scoped completion
This gets all definitions for a specific scope allowing them to be
cached without needing to consider the current position in the source.
This would be slow in Vim without threading.
"""
import jedi
completions = jedi.api.names(source, filename, all_scopes=True)
out = []
tmp_filecache = {}
seen = set()
for c in completions:
c_parents = self.get_parents(c)
if parent and (len(c_parents) > len(parent)
or c_parents != parent[:len(c_parents)]):
continue
if c.type == 'import' and c.full_name not in self.unresolved_imports:
resolved = self.resolve_import(c)
if resolved is None:
log.debug('Could not resolve import: %r', c.full_name)
self.unresolved_imports.add(c.full_name)
continue
else:
c = resolved
parsed = self.parse_completion(c, tmp_filecache)
seen_key = (parsed['name'], parsed['type'])
if seen_key in seen:
continue
seen.add(seen_key)
out.append(parsed)
return out
def completion_dict(self, name, type_, comp):
"""Final construction of the completion dict."""
doc = comp.docstring()
i = doc.find('\n\n')
if i != -1:
doc = doc[i:]
params = None
try:
if type_ in ('function', 'class'):
params = []
for i, p in enumerate(comp.params):
desc = p.description.strip()
if i == 0 and desc == 'self':
continue
if '\\n' in desc:
desc = desc.replace('\\n', '\\x0A')
# Note: Hack for jedi param bugs
if desc.startswith('param ') or desc == 'param':
desc = desc[5:].strip()
if desc:
params.append(desc)
except Exception:
params = None
return {
'module': comp.module_path,
'name': name,
'type': type_,
'short_type': _types.get(type_),
'doc': doc.strip(),
'params': params,
}
def parse_completion(self, comp, cache):
"""Return a tuple describing the completion.
Returns (name, type, description, abbreviated)
"""
name = comp.name
type_ = comp.type
desc = comp.description
if type_ == 'instance' and desc.startswith(('builtins.', 'posix.')):
# Simple description
builtin_type = desc.rsplit('.', 1)[-1]
if builtin_type in _types:
return self.completion_dict(name, builtin_type, comp)
if type_ == 'class' and desc.startswith('builtins.'):
return self.completion_dict(name, type_, comp)
if type_ == 'function':
if comp.module_path not in cache and comp.line and comp.line > 1 \
and os.path.exists(comp.module_path):
with open(comp.module_path, 'r') as fp:
cache[comp.module_path] = fp.readlines()
lines = cache.get(comp.module_path)
if isinstance(lines, list) and len(lines) > 1 \
and comp.line < len(lines) and comp.line > 1:
# Check the function's decorators to check if it's decorated
# with @property
i = comp.line - 2
while i >= 0:
line = lines[i].lstrip()
if not line.startswith('@'):
break
if line.startswith('@property'):
return self.completion_dict(name, 'property', comp)
i -= 1
return self.completion_dict(name, type_, comp)
return self.completion_dict(name, type_, comp)
class Client(object):
"""Client object
This will be used by deoplete-jedi to interact with the server.
"""
max_completion_count = 50
def __init__(self, python_path, desc_len=0, short_types=False, show_docstring=False,
debug=False):
self._server = None
self.restarting = threading.Lock()
self.version = (0, 0, 0, 'final', 0)
self.env = os.environ.copy()
self.env.update({'PYTHONPATH': self._make_pythonpath()})
self.cmd = [python_path, '-u', os.path.normpath(__file__),
'--desc-length', str(desc_len)]
if short_types:
self.cmd.append('--short-types')
if show_docstring:
self.cmd.append('--docstrings')
if debug:
self.cmd.extend(('--debug', debug[0], '--debug-level',
str(debug[1])))
# Handle any exceptions from the first server startup, which might
# include PermissionDenied for an invalid python_path.
try:
self.restart()
except Exception as exc:
from deoplete.exceptions import SourceInitError
raise SourceInitError('Failed to start server ({}): {}'.format(
self.cmd_string, exc))
@property
def cmd_string(self):
"""Get self.cmd as a string to be run from a shell."""
cmd = ['PYTHONPATH=%s' % self.env['PYTHONPATH']] + self.cmd
return ' '.join(shlex.quote(x) for x in cmd)
def shutdown(self):
"""Shut down the server."""
if self._server is not None and self._server.returncode is None:
# Closing the server's stdin will cause it to exit.
self._server.stdin.close()
self._server.kill()
def restart(self):
"""Start or restart the server
If a server is already running, shut it down.
"""
with self.restarting:
self.shutdown()
log.debug('Starting server process: %s' % (self.cmd_string,))
self._server = subprocess.Popen(self.cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=self.env)
# Might result in "pyenv: version `foo' is not installed (set by
# /cwd/.python-version)" on stderr.
try:
self.version = stream_read(self._server.stdout)
except Exception:
import traceback
from deoplete.exceptions import SourceInitError
out, err = self._server.communicate()
raise SourceInitError(
'Server exited with {}. stderr=[{}], cmd={!r}.\n{}'.format(
self._server.returncode,
err.decode(),
' '.join(self.cmd),
traceback.format_exc()))
self._count = 0
def completions(self, *args):
"""Get completions from the server.
If the number of completions already performed reaches a threshold,
restart the server.
"""
if self._count > self.max_completion_count:
self.restart()
self._count += 1
try:
stream_write(self._server.stdin, args)
return stream_read(self._server.stdout)
except BrokenPipeError:
out, err = self._server.communicate()
raise ServerError(
'Server exited with %s.' % self._server.returncode,
err.decode())
except StreamError as exc:
if self.restarting.acquire(False):
self.restarting.release()
log.error('Caught %s during handling completions(%s), '
' restarting server', exc, args)
self.restart()
time.sleep(0.2)
@staticmethod
def _make_pythonpath():
"""Makes the PYTHONPATH environment variable to be passed to the server.
We append any paths that are prevalent during startup.
"""
pythonpath = os.pathsep.join((
parso_path,
jedi_path,
os.path.dirname(os.path.dirname(__file__))))
if 'PYTHONPATH' in os.environ.keys():
pythonpath = os.pathsep.join((pythonpath, os.environ.get('PYTHONPATH')))
return pythonpath
if __name__ == '__main__':
log = logging.getLogger('deoplete').getChild('jedi.server')
formatter = logging.Formatter('%(asctime)s %(levelname)-8s '
'[%(process)d] (%(name)s) %(message)s')
# Always log errors to stderr.
error_handler = logging.StreamHandler(sys.stderr)
error_handler.setFormatter(formatter)
error_handler.setLevel(logging.ERROR)
log.addHandler(error_handler)
parser = argparse.ArgumentParser()
parser.add_argument('--desc-length', type=int)
parser.add_argument('--short-types', action='store_true')
parser.add_argument('--docstrings', action='store_true')
parser.add_argument('--debug', default='')
parser.add_argument('--debug-level', type=int, default=logging.DEBUG)
args = parser.parse_args()
if args.debug:
handler = logging.FileHandler(args.debug)
handler.setFormatter(formatter)
handler.setLevel(args.debug_level)
log.addHandler(handler)
log.setLevel(logging.DEBUG)
server = Server(args.desc_length, args.short_types, args.docstrings)
server.run()
else:
log = logging.getLogger('deoplete').getChild('jedi.client')
if not log.handlers:
log.addHandler(logging.NullHandler())
| [
"logging.getLogger",
"logging.StreamHandler",
"sys.path.insert",
"pickle.dumps",
"jedi.Script",
"time.sleep",
"jedi.api.names",
"sys.exit",
"pickle.loads",
"deoplete_jedi.utils.rplugin_runtime_paths",
"os.path.exists",
"argparse.ArgumentParser",
"threading.Lock",
"subprocess.Popen",
"deoplete_jedi.utils.jedi_walk",
"functools.wraps",
"os.path.normpath",
"logging.FileHandler",
"sys.path.extend",
"shlex.quote",
"logging.NullHandler",
"os.environ.keys",
"deoplete_jedi.utils.module_search",
"os.path.splitext",
"os.path.dirname",
"struct.unpack",
"re.sub",
"traceback.format_exc",
"logging.Formatter",
"os.path.join",
"os.environ.get",
"os.environ.copy"
] | [((1037, 1066), 'os.path.join', 'os.path.join', (['libpath', '"""jedi"""'], {}), "(libpath, 'jedi')\n", (1049, 1066), False, 'import os\n'), ((1080, 1110), 'os.path.join', 'os.path.join', (['libpath', '"""parso"""'], {}), "(libpath, 'parso')\n", (1092, 1110), False, 'import os\n'), ((2518, 2536), 'pickle.loads', 'pickle.loads', (['data'], {}), '(data)\n', (2530, 2536), False, 'import pickle\n'), ((2613, 2633), 'pickle.dumps', 'pickle.dumps', (['obj', '(2)'], {}), '(obj, 2)\n', (2625, 2633), False, 'import pickle\n'), ((3035, 3084), 're.sub', 're.sub', (['"""^(\\\\s*)@\\\\w+"""', '"""\\\\1"""', 'source'], {'flags': 're.M'}), "('^(\\\\s*)@\\\\w+', '\\\\1', source, flags=re.M)\n", (3041, 3084), False, 'import re\n'), ((3579, 3600), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (3594, 3600), False, 'import functools\n'), ((20059, 20149), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(levelname)-8s [%(process)d] (%(name)s) %(message)s"""'], {}), "(\n '%(asctime)s %(levelname)-8s [%(process)d] (%(name)s) %(message)s')\n", (20076, 20149), False, 'import logging\n'), ((20238, 20271), 'logging.StreamHandler', 'logging.StreamHandler', (['sys.stderr'], {}), '(sys.stderr)\n', (20259, 20271), False, 'import logging\n'), ((20404, 20429), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (20427, 20429), False, 'import argparse\n'), ((2362, 2388), 'struct.unpack', 'struct.unpack', (['"""I"""', 'header'], {}), "('I', header)\n", (2375, 2388), False, 'import struct\n'), ((7658, 7683), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (7673, 7683), False, 'import os\n'), ((8247, 8281), 'deoplete_jedi.utils.module_search', 'utils.module_search', (['module', 'paths'], {}), '(module, paths)\n', (8266, 8281), False, 'from deoplete_jedi import utils\n'), ((8952, 8995), 'jedi.api.names', 'jedi.api.names', ([], {'path': 'found', 'references': '(True)'}), '(path=found, references=True)\n', (8966, 8995), False, 'import jedi\n'), ((9018, 9046), 'deoplete_jedi.utils.jedi_walk', 'utils.jedi_walk', (['completions'], {}), '(completions)\n', (9033, 9046), False, 'from deoplete_jedi import utils\n'), ((11659, 11708), 'jedi.api.names', 'jedi.api.names', (['source', 'filename'], {'all_scopes': '(True)'}), '(source, filename, all_scopes=True)\n', (11673, 11708), False, 'import jedi\n'), ((15774, 15790), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (15788, 15790), False, 'import threading\n'), ((15855, 15872), 'os.environ.copy', 'os.environ.copy', ([], {}), '()\n', (15870, 15872), False, 'import os\n'), ((20794, 20825), 'logging.FileHandler', 'logging.FileHandler', (['args.debug'], {}), '(args.debug)\n', (20813, 20825), False, 'import logging\n'), ((984, 1009), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (999, 1009), False, 'import os\n'), ((7839, 7864), 'os.path.dirname', 'os.path.dirname', (['scan_dir'], {}), '(scan_dir)\n', (7854, 7864), False, 'import os\n'), ((15978, 16004), 'os.path.normpath', 'os.path.normpath', (['__file__'], {}), '(__file__)\n', (15994, 16004), False, 'import os\n'), ((17498, 17613), 'subprocess.Popen', 'subprocess.Popen', (['self.cmd'], {'stdin': 'subprocess.PIPE', 'stdout': 'subprocess.PIPE', 'stderr': 'subprocess.PIPE', 'env': 'self.env'}), '(self.cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, env=self.env)\n', (17514, 17613), False, 'import subprocess\n'), ((19820, 19837), 'os.environ.keys', 'os.environ.keys', ([], {}), '()\n', (19835, 19837), False, 'import os\n'), ((19989, 20018), 'logging.getLogger', 'logging.getLogger', (['"""deoplete"""'], {}), "('deoplete')\n", (20006, 20018), False, 'import logging\n'), ((21084, 21113), 'logging.getLogger', 'logging.getLogger', (['"""deoplete"""'], {}), "('deoplete')\n", (21101, 21113), False, 'import logging\n'), ((21186, 21207), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (21205, 21207), False, 'import logging\n'), ((5103, 5131), 'sys.path.insert', 'sys.path.insert', (['(0)', 'add_path'], {}), '(0, add_path)\n', (5118, 5131), False, 'import sys\n'), ((7182, 7193), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (7190, 7193), False, 'import sys\n'), ((7776, 7813), 'os.path.join', 'os.path.join', (['scan_dir', '"""__init__.py"""'], {}), "(scan_dir, '__init__.py')\n", (7788, 7813), False, 'import os\n'), ((9974, 10014), 'jedi.Script', 'jedi.Script', (['source', 'line', 'col', 'filename'], {}), '(source, line, col, filename)\n', (9985, 10014), False, 'import jedi\n'), ((14553, 14585), 'os.path.exists', 'os.path.exists', (['comp.module_path'], {}), '(comp.module_path)\n', (14567, 14585), False, 'import os\n'), ((16911, 16925), 'shlex.quote', 'shlex.quote', (['x'], {}), '(x)\n', (16922, 16925), False, 'import shlex\n'), ((5189, 5214), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (5204, 5214), False, 'import os\n'), ((5447, 5469), 'sys.path.extend', 'sys.path.extend', (['extra'], {}), '(extra)\n', (5462, 5469), False, 'import sys\n'), ((5575, 5611), 'deoplete_jedi.utils.rplugin_runtime_paths', 'utils.rplugin_runtime_paths', (['options'], {}), '(options)\n', (5602, 5611), False, 'from deoplete_jedi import utils\n'), ((8497, 8520), 'os.path.splitext', 'os.path.splitext', (['found'], {}), '(found)\n', (8513, 8520), False, 'import os\n'), ((19430, 19445), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (19440, 19445), False, 'import time\n'), ((19764, 19789), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (19779, 19789), False, 'import os\n'), ((19893, 19921), 'os.environ.get', 'os.environ.get', (['"""PYTHONPATH"""'], {}), "('PYTHONPATH')\n", (19907, 19921), False, 'import os\n'), ((8425, 8452), 're.sub', 're.sub', (['"""\\\\.+"""', '"""."""', 'module'], {}), "('\\\\.+', '.', module)\n", (8431, 8452), False, 'import re\n'), ((18447, 18469), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (18467, 18469), False, 'import traceback\n')] |
from . import serializers
from rest_framework.response import Response
from collections import OrderedDict
from django.utils.translation import ugettext as _
OK = {
"code": "OK",
"message": _("Success")
}
class EsPaging:
pass
def es_paging(es_query, request, page=None, item_per_page=None):
if not page:
page = int(request.GET.get("page", "1"))
if not item_per_page:
item_per_page = int(request.GET.get("perPage", 10))
start = (page - 1) * item_per_page
end = start + item_per_page
paginator = EsPaging()
data = es_query[start:end].execute().hits
paginator.data = data.hits
paginator.num_pages = data.total
paginator.current_page = page
return paginator
def ok(data=None, paginator=None):
msg = OrderedDict()
msg['meta'] = OrderedDict()
msg['meta']["code"] = OK["code"]
msg['meta']["message"] = OK["message"]
if data:
msg['data'] = data
if paginator:
msg['pagination'] = {
"num_pages": paginator.num_pages,
"current_page": paginator.current_page,
}
return Response(msg)
def restrict_search(request, key, es_query):
paginator = es_paging(es_query, request)
serialization = serializers.es_serialize(paginator.data)
return ok({key: serialization}, paginator)
| [
"django.utils.translation.ugettext",
"collections.OrderedDict",
"rest_framework.response.Response"
] | [((199, 211), 'django.utils.translation.ugettext', '_', (['"""Success"""'], {}), "('Success')\n", (200, 211), True, 'from django.utils.translation import ugettext as _\n'), ((772, 785), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (783, 785), False, 'from collections import OrderedDict\n'), ((804, 817), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (815, 817), False, 'from collections import OrderedDict\n'), ((1105, 1118), 'rest_framework.response.Response', 'Response', (['msg'], {}), '(msg)\n', (1113, 1118), False, 'from rest_framework.response import Response\n')] |
#!/usr/bin/env python3
"""
Copyright 2017, <NAME>, HKUST.
Training preprocessings.
"""
import os
import time
from random import shuffle, seed
import tensorflow as tf
import progressbar
from tools.common import Notify
from tools.io import parse_corr_to_match_set, read_list
FLAGS = tf.app.flags.FLAGS
def get_match_set_list(imageset_list_path, q_diff_thld, rot_diff_thld):
"""Get the path list of match sets.
Args:
imageset_list_path: Path to imageset list.
q_diff_thld: Threshold of image pair sampling regarding camera orientation.
Returns:
match_set_list: List of match set path.
"""
imageset_list = [os.path.join(FLAGS.gl3d, 'data', i)
for i in read_list(imageset_list_path)]
print(Notify.INFO, 'Use # imageset', len(imageset_list), Notify.ENDC)
match_set_list = []
# discard image pairs whose image simiarity is beyond the threshold.
for i in imageset_list:
match_set_folder = os.path.join(i, 'match_sets')
if os.path.exists(match_set_folder):
match_set_files = os.listdir(match_set_folder)
for val in match_set_files:
name, ext = os.path.splitext(val)
if ext == '.match_set':
splits = name.split('_')
q_diff = int(splits[2])
rot_diff = int(splits[3])
if q_diff >= q_diff_thld and rot_diff <= rot_diff_thld:
match_set_list.append(
os.path.join(match_set_folder, val))
# ensure the testing gives deterministic results.
if not FLAGS.is_training:
seed(0)
shuffle(match_set_list)
print(Notify.INFO, 'Get # match sets', len(match_set_list), Notify.ENDC)
return match_set_list
def prepare_match_sets(regenerate, is_training, q_diff_thld=3, rot_diff_thld=60, data_split='comb'):
"""Generate match sets from corr.bin files. Index match sets w.r.t global image index list.
Args:
regenerate: Flag to indicate whether to regenerate match sets.
is_training: Use training imageset or testing imageset.
img_sim_thld: Threshold of image pair sampling regarding image similarity.
rot_diff_thld: Threshold of image pair sampling regarding rotation difference.
data_list: Data split name.
Returns:
match_set_list: List of match sets path.
global_img_list: List of global image path.
global_context_feat_list:
"""
# get necessary lists.
gl3d_list_folder = os.path.join(FLAGS.gl3d, 'list', data_split)
global_info = read_list(os.path.join(
gl3d_list_folder, 'image_index_offset.txt'))
global_img_list = [os.path.join(FLAGS.gl3d, i) for i in read_list(
os.path.join(gl3d_list_folder, 'image_list.txt'))]
global_reg_feat_list = [os.path.join(FLAGS.gl3d, i) for i in read_list(
os.path.join(gl3d_list_folder, 'regional_feat_list.txt'))]
global_depth_list = [os.path.join(FLAGS.gl3d, i) for i in read_list(
os.path.join(gl3d_list_folder, 'depth_list.txt'))]
lock_path = os.path.join(os.path.dirname(
os.path.realpath(__file__)), '.complete')
# generate match set files.
if os.path.exists(lock_path) and not regenerate:
print(Notify.INFO, 'Lock file exists without regeneration request. Skip the preparation.', Notify.ENDC)
else:
if os.path.exists(lock_path) and not FLAGS.dry_run:
os.remove(lock_path)
print(Notify.WARNING, 'Prepare match sets upon request.', Notify.ENDC)
prog_bar = progressbar.ProgressBar()
prog_bar.max_value = len(global_info)
start_time = time.time()
offset = 0
for idx, val in enumerate(global_info):
record = val.split(' ')
out_match_set_path = os.path.join(
FLAGS.gl3d, 'data', record[0], 'match_sets')
in_corr_path = os.path.join(
FLAGS.gl3d, 'data', record[0], 'geolabel', 'corr.bin')
kpt_path = os.path.join(FLAGS.gl3d, 'data', record[0], 'img_kpts')
camera_path = os.path.join(
FLAGS.gl3d, 'data', record[0], 'geolabel', 'cameras.txt')
parse_corr_to_match_set(in_corr_path, kpt_path, camera_path, out_match_set_path,
FLAGS.num_corr, offset, dry_run=FLAGS.dry_run,
visualize=False, global_img_list=global_img_list)
offset = int(record[2])
prog_bar.update(idx)
assert offset == len(global_img_list), Notify.FAIL + \
' Assertion fails.' + Notify.ENDC
# create a lock file in case of incomplete preperation.
open(lock_path, 'w')
format_str = ('Time cost preparing match sets %.3f sec')
print(Notify.INFO, format_str %
(time.time() - start_time), Notify.ENDC)
# get the match set list.
imageset_list_name = 'imageset_train.txt' if is_training else 'imageset_test.txt'
match_set_list = get_match_set_list(os.path.join(
gl3d_list_folder, imageset_list_name), q_diff_thld, rot_diff_thld)
return match_set_list, global_img_list, global_depth_list, global_reg_feat_list
| [
"os.path.exists",
"os.listdir",
"random.shuffle",
"tools.io.parse_corr_to_match_set",
"tools.io.read_list",
"os.path.join",
"os.path.splitext",
"random.seed",
"os.path.realpath",
"os.remove",
"time.time",
"progressbar.ProgressBar"
] | [((1667, 1690), 'random.shuffle', 'shuffle', (['match_set_list'], {}), '(match_set_list)\n', (1674, 1690), False, 'from random import shuffle, seed\n'), ((2550, 2594), 'os.path.join', 'os.path.join', (['FLAGS.gl3d', '"""list"""', 'data_split'], {}), "(FLAGS.gl3d, 'list', data_split)\n", (2562, 2594), False, 'import os\n'), ((653, 688), 'os.path.join', 'os.path.join', (['FLAGS.gl3d', '"""data"""', 'i'], {}), "(FLAGS.gl3d, 'data', i)\n", (665, 688), False, 'import os\n'), ((976, 1005), 'os.path.join', 'os.path.join', (['i', '"""match_sets"""'], {}), "(i, 'match_sets')\n", (988, 1005), False, 'import os\n'), ((1017, 1049), 'os.path.exists', 'os.path.exists', (['match_set_folder'], {}), '(match_set_folder)\n', (1031, 1049), False, 'import os\n'), ((1655, 1662), 'random.seed', 'seed', (['(0)'], {}), '(0)\n', (1659, 1662), False, 'from random import shuffle, seed\n'), ((2623, 2679), 'os.path.join', 'os.path.join', (['gl3d_list_folder', '"""image_index_offset.txt"""'], {}), "(gl3d_list_folder, 'image_index_offset.txt')\n", (2635, 2679), False, 'import os\n'), ((2713, 2740), 'os.path.join', 'os.path.join', (['FLAGS.gl3d', 'i'], {}), '(FLAGS.gl3d, i)\n', (2725, 2740), False, 'import os\n'), ((2848, 2875), 'os.path.join', 'os.path.join', (['FLAGS.gl3d', 'i'], {}), '(FLAGS.gl3d, i)\n', (2860, 2875), False, 'import os\n'), ((2988, 3015), 'os.path.join', 'os.path.join', (['FLAGS.gl3d', 'i'], {}), '(FLAGS.gl3d, i)\n', (3000, 3015), False, 'import os\n'), ((3230, 3255), 'os.path.exists', 'os.path.exists', (['lock_path'], {}), '(lock_path)\n', (3244, 3255), False, 'import os\n'), ((3589, 3614), 'progressbar.ProgressBar', 'progressbar.ProgressBar', ([], {}), '()\n', (3612, 3614), False, 'import progressbar\n'), ((3682, 3693), 'time.time', 'time.time', ([], {}), '()\n', (3691, 3693), False, 'import time\n'), ((5059, 5109), 'os.path.join', 'os.path.join', (['gl3d_list_folder', 'imageset_list_name'], {}), '(gl3d_list_folder, imageset_list_name)\n', (5071, 5109), False, 'import os\n'), ((719, 748), 'tools.io.read_list', 'read_list', (['imageset_list_path'], {}), '(imageset_list_path)\n', (728, 748), False, 'from tools.io import parse_corr_to_match_set, read_list\n'), ((1081, 1109), 'os.listdir', 'os.listdir', (['match_set_folder'], {}), '(match_set_folder)\n', (1091, 1109), False, 'import os\n'), ((3149, 3175), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (3165, 3175), False, 'import os\n'), ((3409, 3434), 'os.path.exists', 'os.path.exists', (['lock_path'], {}), '(lock_path)\n', (3423, 3434), False, 'import os\n'), ((3470, 3490), 'os.remove', 'os.remove', (['lock_path'], {}), '(lock_path)\n', (3479, 3490), False, 'import os\n'), ((3830, 3887), 'os.path.join', 'os.path.join', (['FLAGS.gl3d', '"""data"""', 'record[0]', '"""match_sets"""'], {}), "(FLAGS.gl3d, 'data', record[0], 'match_sets')\n", (3842, 3887), False, 'import os\n'), ((3932, 3999), 'os.path.join', 'os.path.join', (['FLAGS.gl3d', '"""data"""', 'record[0]', '"""geolabel"""', '"""corr.bin"""'], {}), "(FLAGS.gl3d, 'data', record[0], 'geolabel', 'corr.bin')\n", (3944, 3999), False, 'import os\n'), ((4040, 4095), 'os.path.join', 'os.path.join', (['FLAGS.gl3d', '"""data"""', 'record[0]', '"""img_kpts"""'], {}), "(FLAGS.gl3d, 'data', record[0], 'img_kpts')\n", (4052, 4095), False, 'import os\n'), ((4122, 4192), 'os.path.join', 'os.path.join', (['FLAGS.gl3d', '"""data"""', 'record[0]', '"""geolabel"""', '"""cameras.txt"""'], {}), "(FLAGS.gl3d, 'data', record[0], 'geolabel', 'cameras.txt')\n", (4134, 4192), False, 'import os\n'), ((4222, 4407), 'tools.io.parse_corr_to_match_set', 'parse_corr_to_match_set', (['in_corr_path', 'kpt_path', 'camera_path', 'out_match_set_path', 'FLAGS.num_corr', 'offset'], {'dry_run': 'FLAGS.dry_run', 'visualize': '(False)', 'global_img_list': 'global_img_list'}), '(in_corr_path, kpt_path, camera_path,\n out_match_set_path, FLAGS.num_corr, offset, dry_run=FLAGS.dry_run,\n visualize=False, global_img_list=global_img_list)\n', (4245, 4407), False, 'from tools.io import parse_corr_to_match_set, read_list\n'), ((1178, 1199), 'os.path.splitext', 'os.path.splitext', (['val'], {}), '(val)\n', (1194, 1199), False, 'import os\n'), ((2769, 2817), 'os.path.join', 'os.path.join', (['gl3d_list_folder', '"""image_list.txt"""'], {}), "(gl3d_list_folder, 'image_list.txt')\n", (2781, 2817), False, 'import os\n'), ((2904, 2960), 'os.path.join', 'os.path.join', (['gl3d_list_folder', '"""regional_feat_list.txt"""'], {}), "(gl3d_list_folder, 'regional_feat_list.txt')\n", (2916, 2960), False, 'import os\n'), ((3044, 3092), 'os.path.join', 'os.path.join', (['gl3d_list_folder', '"""depth_list.txt"""'], {}), "(gl3d_list_folder, 'depth_list.txt')\n", (3056, 3092), False, 'import os\n'), ((4863, 4874), 'time.time', 'time.time', ([], {}), '()\n', (4872, 4874), False, 'import time\n'), ((1526, 1561), 'os.path.join', 'os.path.join', (['match_set_folder', 'val'], {}), '(match_set_folder, val)\n', (1538, 1561), False, 'import os\n')] |
from appJar import gui
app = gui()
app.setPadding(10,10)
#app.setBg("red")
app.addLabel("0", "This is where it starts")
app.setLabelBg("0", "white")
app.startLabelFrame("Big One")
app.setLabelFrameBg("Big One", "green")
app.addLabel("1", "This is a label")
app.setLabelPadding("1", 4, 5)
app.addLabel("2", "This is a label")
app.addLabel("3", "This is a label")
app.stopLabelFrame()
app.addLabel("10", "This is where it starts")
app.startLabelFrame("Big Two")
#app.setContainerPadding(10,10)
app.setLabelFrameBg("Big Two", "orange")
app.addLabel("4", "This is a label")
app.addLabel("5", "This is a label")
app.addLabel("6", "This is a label")
app.stopLabelFrame()
app.addLabel("11", "This is where it starts")
app.go()
| [
"appJar.gui"
] | [((30, 35), 'appJar.gui', 'gui', ([], {}), '()\n', (33, 35), False, 'from appJar import gui\n')] |
import time
import struct
import socket
import logging
from contextlib import closing
import cPickle
import numpy as np
# TODO: verify that the log levels are correct here.
logger = logging.getLogger(__name__)
def get_udp_packets(ri, npkts, streamid, stream_reg='streamid', addr=('192.168.1.1', 12345)):
ri.r.write_int(stream_reg, 0)
with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as s:
s.bind(addr)
s.settimeout(0)
nstale = 0
try:
while s.recv(2000):
nstale += 1
if nstale:
logger.info("Flushed {} packets.".format(nstale))
except socket.error:
pass
s.settimeout(1)
ri.r.write_int(stream_reg, streamid)
pkts = []
while len(pkts) < npkts:
pkt = s.recv(2000)
if pkt:
pkts.append(pkt)
else:
logger.warning("Did not receive UDP data.")
break
ri.r.write_int(stream_reg, 0)
return pkts
def get_udp_data(ri, npkts, streamid, chans, nfft, stream_reg='streamid', addr=('192.168.1.1', 12345)):
pkts = get_udp_packets(ri, npkts, streamid, stream_reg=stream_reg, addr=addr)
darray, seqnos = decode_packets(pkts, streamid, chans, nfft)
return darray, seqnos
ptype = np.dtype([('idle', '>i2'),
('idx', '>i2'),
('stream', '>i2'),
('chan', '>i2'),
('mcntr', '>i4')])
hdr_fmt = ">4HI"
hdr_size = struct.calcsize(hdr_fmt)
pkt_size = hdr_size + 1024
null_pkt = "\x00" * 1024
def decode_packets(plist, streamid, chans, nfft, pkts_per_chunk=16, capture_failures=False):
nchan = chans.shape[0]
mcnt_inc = nfft * 2 ** 12 / nchan
next_seqno = None
mcnt_top = 0
dset = []
mcntoff = None
last_mcnt_ovf = None
seqnos = []
nextseqnos = []
chan0 = None
for pnum, pkt in enumerate(plist):
if len(pkt) != pkt_size:
logger.warning("Packet size is {} but expected {}.".format(len(pkt), pkt_size))
continue
pidle, pidx, pstream, pchan, pmcnt = struct.unpack(hdr_fmt, pkt[:hdr_size])
if pstream != streamid:
logger.warning("Stream id is {} but expected {}".format(pstream, streamid))
continue
if next_seqno is None:
mcnt_top = 0
last_mcnt_ovf = pmcnt
else:
if pmcnt < mcnt_inc:
if last_mcnt_ovf != pmcnt:
message = "Detected mcnt overflow {} {} {} {} {} {} {}"
logger.info(message.format(last_mcnt_ovf, pmcnt, pidx, next_seqno, mcnt_top / 2 ** 32, pnum, mcntoff))
mcnt_top += 2 ** 32
last_mcnt_ovf = pmcnt
else:
# print "continuation of previous mcnt overflow",pidx
pass
else:
last_mcnt_ovf = None
chunkno, pmcntoff = divmod(pmcnt + mcnt_top, mcnt_inc)
# print chunkno,pmcnt,pmcntoff,pidx
seqno = (chunkno) * pkts_per_chunk + pidx
# print seqno
seqnos.append(seqno)
nextseqnos.append(next_seqno)
if next_seqno is None:
chan0 = pchan
# print "found first packet",seqno,pidx
next_seqno = seqno
mcntoff = pmcntoff
# print pchan
if mcntoff != pmcntoff:
logger.warning("mcnt offset jumped: was {} and is now {} ... dropping ...".format(mcntoff, pmcntoff))
continue
if pchan != chan0:
logger.warning("warning: channel id changed from {} to {}.".format(chan0, pchan))
if seqno - next_seqno < 0:
logger.warning("seqno diff: {} {} {}".format(seqno - next_seqno, seqno, next_seqno))
continue # trying to go backwards
if seqno == next_seqno:
dset.append(pkt[hdr_size:])
next_seqno += 1
else:
message = "sequence number skip: expected {} and got {}; inserting {} null packets; {} {}"
logger.warning(message.format(next_seqno, seqno, seqno - next_seqno, pnum, pidx))
if capture_failures: # seqno-next_seqno == 32768:
fname = time.strftime("udp_skip_%Y-%m-%d_%H%M%S.pkl")
logger.warning("caught special case, writing to disk: {}".format(fname))
fh = open(fname, 'w')
cPickle.dump(
dict(plist=plist, dset=dset, pnum=pnum, pkt=pkt, streamid=streamid, chans=chans, nfft=nfft),
fh, cPickle.HIGHEST_PROTOCOL)
fh.close()
for k in range(seqno - next_seqno + 1):
dset.append(null_pkt)
next_seqno += 1
dset = ''.join(dset)
ns = (len(dset) // (4 * nchan))
dset = dset[:ns * (4 * nchan)]
darray = np.fromstring(dset, dtype='>i2').astype('float32').view('complex64')
darray.shape = (ns, nchan)
shift = np.flatnonzero(chans == (chan0))[0] - (nchan - 1)
darray = np.roll(darray, shift, axis=1)
return darray, np.array(seqnos)
| [
"logging.getLogger",
"struct.calcsize",
"numpy.roll",
"socket.socket",
"numpy.flatnonzero",
"time.strftime",
"numpy.array",
"struct.unpack",
"numpy.dtype",
"numpy.fromstring"
] | [((184, 211), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (201, 211), False, 'import logging\n'), ((1334, 1435), 'numpy.dtype', 'np.dtype', (["[('idle', '>i2'), ('idx', '>i2'), ('stream', '>i2'), ('chan', '>i2'), (\n 'mcntr', '>i4')]"], {}), "([('idle', '>i2'), ('idx', '>i2'), ('stream', '>i2'), ('chan',\n '>i2'), ('mcntr', '>i4')])\n", (1342, 1435), True, 'import numpy as np\n'), ((1533, 1557), 'struct.calcsize', 'struct.calcsize', (['hdr_fmt'], {}), '(hdr_fmt)\n', (1548, 1557), False, 'import struct\n'), ((5111, 5141), 'numpy.roll', 'np.roll', (['darray', 'shift'], {'axis': '(1)'}), '(darray, shift, axis=1)\n', (5118, 5141), True, 'import numpy as np\n'), ((2150, 2188), 'struct.unpack', 'struct.unpack', (['hdr_fmt', 'pkt[:hdr_size]'], {}), '(hdr_fmt, pkt[:hdr_size])\n', (2163, 2188), False, 'import struct\n'), ((5161, 5177), 'numpy.array', 'np.array', (['seqnos'], {}), '(seqnos)\n', (5169, 5177), True, 'import numpy as np\n'), ((359, 407), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (372, 407), False, 'import socket\n'), ((5048, 5078), 'numpy.flatnonzero', 'np.flatnonzero', (['(chans == chan0)'], {}), '(chans == chan0)\n', (5062, 5078), True, 'import numpy as np\n'), ((4312, 4357), 'time.strftime', 'time.strftime', (['"""udp_skip_%Y-%m-%d_%H%M%S.pkl"""'], {}), "('udp_skip_%Y-%m-%d_%H%M%S.pkl')\n", (4325, 4357), False, 'import time\n'), ((4936, 4968), 'numpy.fromstring', 'np.fromstring', (['dset'], {'dtype': '""">i2"""'}), "(dset, dtype='>i2')\n", (4949, 4968), True, 'import numpy as np\n')] |
#!/usr/bin/env python
import cv_bridge
import message_filters
import numpy as np
import rospy
import tf
# For debug
# import grasp_fusion_lib
from jsk_topic_tools import ConnectionBasedTransport
from jsk_topic_tools.log_utils import logerr_throttle
from sensor_msgs.msg import CameraInfo
from sensor_msgs.msg import Image
from grasp_fusion_lib.contrib import grasp_fusion
class GetHeightmap(ConnectionBasedTransport):
def __init__(self):
super(GetHeightmap, self).__init__()
self.heightmap_frame = rospy.get_param('~heightmap_frame')
# Size[m] of each height map pixel
self.voxel_size = rospy.get_param('~voxel_size')
self.listener = tf.TransformListener()
self.tft = tf.TransformerROS()
self.br = cv_bridge.CvBridge()
# ROS publishers
self.pub_rgb = self.advertise('~output/rgb', Image, queue_size=1)
self.pub_depth = self.advertise('~output/depth', Image, queue_size=1)
self.pub_label = self.advertise('~output/label', Image, queue_size=1)
self._bg_label = rospy.get_param('~bg_label', 0)
def subscribe(self):
self.sub_rgb = message_filters.Subscriber(
'~input/rgb', Image, queue_size=1, buff_size=2**24
)
self.sub_depth = message_filters.Subscriber(
'~input/depth', Image, queue_size=1, buff_size=2**24
)
self.sub_info = message_filters.Subscriber(
'~input/camera_info', CameraInfo
)
sync = message_filters.ApproximateTimeSynchronizer(
[self.sub_rgb, self.sub_depth, self.sub_info],
queue_size=100,
slop=0.1,
)
sync.registerCallback(self.callback, 'rgb')
self.sub_label = message_filters.Subscriber(
'~input/label', Image, queue_size=1, buff_size=2**24
)
sync = message_filters.ApproximateTimeSynchronizer(
[self.sub_label, self.sub_depth, self.sub_info],
queue_size=100,
slop=0.1,
)
sync.registerCallback(self.callback, 'label')
def unsubscribe(self):
self.sub_rgb.unregister()
self.sub_depth.unregister()
self.sub_info.unregister()
self.sub_label.unregister()
def callback(self, img_input, depth_input, cam_info, mode):
assert mode in ['rgb', 'label']
# From tf, generate camera pose w.r.t heightmap_frame
try:
trans, rot \
= self.listener.lookupTransform(self.heightmap_frame,
img_input.header.frame_id,
rospy.Time(0))
except Exception as e:
logerr_throttle(10, e)
return
cam_pose = self.tft.fromTranslationRotation(trans, rot)
# Generate other data
cam_intrinsics = np.array(cam_info.K).reshape(3, 3)
if mode == 'rgb':
color_img = self.br.imgmsg_to_cv2(
img_input, desired_encoding='rgb8'
)
color_img = color_img.astype(float) / 255 # Convert to range [0,1]
label_img = np.zeros(
(color_img.shape[0], color_img.shape[1]), dtype=np.int32
)
else:
label_img = self.br.imgmsg_to_cv2(
img_input, desired_encoding='passthrough'
)
# this should be filled by 1 for bg subtraction in get_heightmap
color_img = np.ones(
(label_img.shape[0], label_img.shape[1], 3), dtype=float
)
depth_img = self.br.imgmsg_to_cv2(
depth_input, desired_encoding='passthrough'
)
# Replace nan element to zero
depth_img = np.where(np.isnan(depth_img), 0, depth_img)
if depth_input.encoding == '16UC1':
depth_img = depth_img.astype(float) / 1000.0 # Convert mm to m
elif depth_input.encoding != '32FC1':
enc = depth_input.encoding
logerr_throttle(10, 'Unsupported depth encoding: %s' % enc)
return
# Generate heightmap w.r.t heightmap_frame
heightmap_color, heightmap, missing_heightmap, heightmap_label \
= grasp_fusion.utils.get_heightmap(
color_img=color_img,
depth_img=depth_img,
bg_color_img=np.zeros_like(color_img),
bg_depth_img=np.zeros_like(depth_img),
cam_intrinsics=cam_intrinsics,
cam_pose=cam_pose,
grid_origin=np.array([0, 0, 0]),
grid_rot=np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),
suction_img=label_img,
voxel_size=self.voxel_size,
suction_cval=self._bg_label,
)
color_data, depth_data \
= grasp_fusion.utils.heightmap_postprocess(
heightmap_color,
heightmap,
missing_heightmap,
)
# it is scaled in postprocess
depth_data = (depth_data / 10000.).astype(np.float32)
heightmap_label = heightmap_label.reshape(
heightmap.shape[0], heightmap.shape[1],
)
# Consider pixels whose height is 0 as background
heightmap_label[heightmap == 0] = self._bg_label
label_data = np.full((224, 320), self._bg_label, dtype=label_img.dtype)
label_data[12:212, 10:310] = heightmap_label
# For debug
# depth = grasp_fusion_lib.image.colorize_depth(depth_data,
# min_value=0, max_value=1.5)
# viz = grasp_fusion_lib.image.tile([color_data, depth], (1, 2))
# grasp_fusion_lib.io.imshow(viz)
# grasp_fusion_lib.io.waitkey()
if mode == 'rgb':
rgb_output = self.br.cv2_to_imgmsg(color_data, encoding='rgb8')
rgb_output.header = img_input.header
self.pub_rgb.publish(rgb_output)
else:
assert mode == 'label'
label_output = self.br.cv2_to_imgmsg(label_data)
label_output.header = img_input.header
self.pub_label.publish(label_output)
depth_output = self.br.cv2_to_imgmsg(
depth_data, encoding='passthrough'
)
depth_output.header = img_input.header
self.pub_depth.publish(depth_output)
if __name__ == '__main__':
rospy.init_node('get_heightmap')
get_heightmap = GetHeightmap()
rospy.spin()
| [
"grasp_fusion_lib.contrib.grasp_fusion.utils.heightmap_postprocess",
"numpy.ones",
"rospy.init_node",
"rospy.get_param",
"numpy.zeros_like",
"tf.TransformerROS",
"cv_bridge.CvBridge",
"numpy.array",
"numpy.zeros",
"rospy.Time",
"numpy.isnan",
"tf.TransformListener",
"rospy.spin",
"message_filters.Subscriber",
"numpy.full",
"message_filters.ApproximateTimeSynchronizer",
"jsk_topic_tools.log_utils.logerr_throttle"
] | [((6395, 6427), 'rospy.init_node', 'rospy.init_node', (['"""get_heightmap"""'], {}), "('get_heightmap')\n", (6410, 6427), False, 'import rospy\n'), ((6467, 6479), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (6477, 6479), False, 'import rospy\n'), ((525, 560), 'rospy.get_param', 'rospy.get_param', (['"""~heightmap_frame"""'], {}), "('~heightmap_frame')\n", (540, 560), False, 'import rospy\n'), ((630, 660), 'rospy.get_param', 'rospy.get_param', (['"""~voxel_size"""'], {}), "('~voxel_size')\n", (645, 660), False, 'import rospy\n'), ((686, 708), 'tf.TransformListener', 'tf.TransformListener', ([], {}), '()\n', (706, 708), False, 'import tf\n'), ((728, 747), 'tf.TransformerROS', 'tf.TransformerROS', ([], {}), '()\n', (745, 747), False, 'import tf\n'), ((766, 786), 'cv_bridge.CvBridge', 'cv_bridge.CvBridge', ([], {}), '()\n', (784, 786), False, 'import cv_bridge\n'), ((1069, 1100), 'rospy.get_param', 'rospy.get_param', (['"""~bg_label"""', '(0)'], {}), "('~bg_label', 0)\n", (1084, 1100), False, 'import rospy\n'), ((1150, 1235), 'message_filters.Subscriber', 'message_filters.Subscriber', (['"""~input/rgb"""', 'Image'], {'queue_size': '(1)', 'buff_size': '(2 ** 24)'}), "('~input/rgb', Image, queue_size=1, buff_size=2 ** 24\n )\n", (1176, 1235), False, 'import message_filters\n'), ((1276, 1362), 'message_filters.Subscriber', 'message_filters.Subscriber', (['"""~input/depth"""', 'Image'], {'queue_size': '(1)', 'buff_size': '(2 ** 24)'}), "('~input/depth', Image, queue_size=1, buff_size=2 **\n 24)\n", (1302, 1362), False, 'import message_filters\n'), ((1403, 1463), 'message_filters.Subscriber', 'message_filters.Subscriber', (['"""~input/camera_info"""', 'CameraInfo'], {}), "('~input/camera_info', CameraInfo)\n", (1429, 1463), False, 'import message_filters\n'), ((1501, 1621), 'message_filters.ApproximateTimeSynchronizer', 'message_filters.ApproximateTimeSynchronizer', (['[self.sub_rgb, self.sub_depth, self.sub_info]'], {'queue_size': '(100)', 'slop': '(0.1)'}), '([self.sub_rgb, self.sub_depth,\n self.sub_info], queue_size=100, slop=0.1)\n', (1544, 1621), False, 'import message_filters\n'), ((1743, 1829), 'message_filters.Subscriber', 'message_filters.Subscriber', (['"""~input/label"""', 'Image'], {'queue_size': '(1)', 'buff_size': '(2 ** 24)'}), "('~input/label', Image, queue_size=1, buff_size=2 **\n 24)\n", (1769, 1829), False, 'import message_filters\n'), ((1861, 1983), 'message_filters.ApproximateTimeSynchronizer', 'message_filters.ApproximateTimeSynchronizer', (['[self.sub_label, self.sub_depth, self.sub_info]'], {'queue_size': '(100)', 'slop': '(0.1)'}), '([self.sub_label, self.sub_depth,\n self.sub_info], queue_size=100, slop=0.1)\n', (1904, 1983), False, 'import message_filters\n'), ((4830, 4921), 'grasp_fusion_lib.contrib.grasp_fusion.utils.heightmap_postprocess', 'grasp_fusion.utils.heightmap_postprocess', (['heightmap_color', 'heightmap', 'missing_heightmap'], {}), '(heightmap_color, heightmap,\n missing_heightmap)\n', (4870, 4921), False, 'from grasp_fusion_lib.contrib import grasp_fusion\n'), ((5331, 5389), 'numpy.full', 'np.full', (['(224, 320)', 'self._bg_label'], {'dtype': 'label_img.dtype'}), '((224, 320), self._bg_label, dtype=label_img.dtype)\n', (5338, 5389), True, 'import numpy as np\n'), ((3147, 3213), 'numpy.zeros', 'np.zeros', (['(color_img.shape[0], color_img.shape[1])'], {'dtype': 'np.int32'}), '((color_img.shape[0], color_img.shape[1]), dtype=np.int32)\n', (3155, 3213), True, 'import numpy as np\n'), ((3478, 3543), 'numpy.ones', 'np.ones', (['(label_img.shape[0], label_img.shape[1], 3)'], {'dtype': 'float'}), '((label_img.shape[0], label_img.shape[1], 3), dtype=float)\n', (3485, 3543), True, 'import numpy as np\n'), ((3751, 3770), 'numpy.isnan', 'np.isnan', (['depth_img'], {}), '(depth_img)\n', (3759, 3770), True, 'import numpy as np\n'), ((2649, 2662), 'rospy.Time', 'rospy.Time', (['(0)'], {}), '(0)\n', (2659, 2662), False, 'import rospy\n'), ((2707, 2729), 'jsk_topic_tools.log_utils.logerr_throttle', 'logerr_throttle', (['(10)', 'e'], {}), '(10, e)\n', (2722, 2729), False, 'from jsk_topic_tools.log_utils import logerr_throttle\n'), ((2869, 2889), 'numpy.array', 'np.array', (['cam_info.K'], {}), '(cam_info.K)\n', (2877, 2889), True, 'import numpy as np\n'), ((4003, 4062), 'jsk_topic_tools.log_utils.logerr_throttle', 'logerr_throttle', (['(10)', "('Unsupported depth encoding: %s' % enc)"], {}), "(10, 'Unsupported depth encoding: %s' % enc)\n", (4018, 4062), False, 'from jsk_topic_tools.log_utils import logerr_throttle\n'), ((4358, 4382), 'numpy.zeros_like', 'np.zeros_like', (['color_img'], {}), '(color_img)\n', (4371, 4382), True, 'import numpy as np\n'), ((4413, 4437), 'numpy.zeros_like', 'np.zeros_like', (['depth_img'], {}), '(depth_img)\n', (4426, 4437), True, 'import numpy as np\n'), ((4549, 4568), 'numpy.array', 'np.array', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (4557, 4568), True, 'import numpy as np\n'), ((4595, 4638), 'numpy.array', 'np.array', (['[[1, 0, 0], [0, 1, 0], [0, 0, 1]]'], {}), '([[1, 0, 0], [0, 1, 0], [0, 0, 1]])\n', (4603, 4638), True, 'import numpy as np\n')] |
import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="httpserver", # Replace with your own username
version="1.0.0",
author="<NAME>",
author_email="<EMAIL>",
description="Custom HTTP Server",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/evinlort/http-server",
packages=setuptools.find_packages(include=["*", "httpserver", "httpserver.*"]),
classifiers=[
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.7',
license='MIT',
zip_safe=False
)
| [
"setuptools.find_packages"
] | [((425, 494), 'setuptools.find_packages', 'setuptools.find_packages', ([], {'include': "['*', 'httpserver', 'httpserver.*']"}), "(include=['*', 'httpserver', 'httpserver.*'])\n", (449, 494), False, 'import setuptools\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Union
from iceberg.api.types import StringType, Type
from .transform import Transform
class UnknownTransform(Transform):
def __init__(self, source_type: Type, transform: str):
self.source_type = source_type
self.transform = transform
def apply(self, value):
raise AttributeError(f"Cannot apply unsupported transform: {self.transform}")
def can_transform(self, type_var) -> bool:
# assume the transform function can be applied for this type because unknown transform is only used when parsing
# a transform in an existing table. a different Iceberg version must have already validated it.
return self.source_type == type_var
def get_result_type(self, source_type):
# the actual result type is not known
return StringType.get()
def project(self, name, predicate):
return None
def project_strict(self, name, predicate):
return None
def __str__(self):
return self.transform
def __eq__(self, other: Union['UnknownTransform', Transform, object]):
if id(self) == id(other):
return True
elif not isinstance(other, UnknownTransform):
return False
return self.source_type == other.source_type and self.transform == other.transform
def __hash__(self):
return hash((self.source_type, self.transform))
| [
"iceberg.api.types.StringType.get"
] | [((1602, 1618), 'iceberg.api.types.StringType.get', 'StringType.get', ([], {}), '()\n', (1616, 1618), False, 'from iceberg.api.types import StringType, Type\n')] |
#!/usr/bin/env python
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
setup(name='TailSpin',
version='0.1',
description='Efficient Tail Recursion',
author='<NAME>',
author_email='<EMAIL>',
url='http://xavecode.mit.edu/tailspin/',
package_dir = {'': 'src'},
packages=['tailspin'],
requires=['Cython'],
cmdclass = {'build_ext': build_ext},
ext_modules = [Extension("tailspin.fast_h",
["src/tailspin/fast_h.pyx"]),
Extension("tailspin.fastlazy_h",
["src/tailspin/fastlazy_h.pyx"])]
)
| [
"distutils.extension.Extension"
] | [((482, 539), 'distutils.extension.Extension', 'Extension', (['"""tailspin.fast_h"""', "['src/tailspin/fast_h.pyx']"], {}), "('tailspin.fast_h', ['src/tailspin/fast_h.pyx'])\n", (491, 539), False, 'from distutils.extension import Extension\n'), ((593, 658), 'distutils.extension.Extension', 'Extension', (['"""tailspin.fastlazy_h"""', "['src/tailspin/fastlazy_h.pyx']"], {}), "('tailspin.fastlazy_h', ['src/tailspin/fastlazy_h.pyx'])\n", (602, 658), False, 'from distutils.extension import Extension\n')] |
# -*- coding: utf-8 -*-
#-----------------------------------------------------------------------------
# Copyright (c) 2005-2015, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# Library imports
# ---------------
import glob
import locale
import os
import shutil
import sys
import subprocess
# Third-party imports
# -------------------
import pytest
# Local imports
# -------------
from PyInstaller.compat import architecture, is_darwin, is_win, is_py2
from PyInstaller.utils.tests import importorskip, skipif_win, skipif_winorosx, \
skipif_notwin, skipif_notosx
def test_run_from_path_environ(pyi_builder):
pyi_builder.test_script('pyi_absolute_python_path.py', run_from_path=True)
@skipif_winorosx
def test_absolute_ld_library_path(pyi_builder):
pyi_builder.test_script('pyi_absolute_ld_library_path.py')
def test_absolute_python_path(pyi_builder):
pyi_builder.test_script('pyi_absolute_python_path.py')
def test_celementtree(pyi_builder):
pyi_builder.test_script('pyi_celementtree.py')
@importorskip('codecs')
def test_codecs(pyi_builder):
pyi_builder.test_script('pyi_codecs.py')
def test_decoders_ascii(pyi_builder):
pyi_builder.test_script('pyi_decoders_ascii.py')
def test_distutils_submod(pyi_builder):
pyi_builder.test_script('pyi_distutils_submod.py')
def test_dynamic_module(pyi_builder):
pyi_builder.test_script('pyi_dynamic_module.py')
def test_email(pyi_builder):
pyi_builder.test_script('pyi_email.py')
@importorskip('Crypto')
def test_feature_crypto(pyi_builder):
pyi_builder.test_script('pyi_feature_crypto.py', pyi_args=['--key=test_key'])
def test_feature_nocrypto(pyi_builder):
pyi_builder.test_script('pyi_feature_nocrypto.py')
def test_filename(pyi_builder):
pyi_builder.test_script('pyi_filename.py')
def test_getfilesystemencoding(pyi_builder):
pyi_builder.test_script('pyi_getfilesystemencoding.py')
def test_helloworld(pyi_builder):
pyi_builder.test_script('pyi_helloworld.py')
def test_module__file__attribute(pyi_builder):
pyi_builder.test_script('pyi_module__file__attribute.py')
def test_module_attributes(tmpdir, pyi_builder):
# Create file in tmpdir with path to python executable and if it is running
# in debug mode.
# Test script uses python interpreter to compare module attributes.
with open(os.path.join(tmpdir.strpath, 'python_exe.build'), 'w') as f:
f.write(sys.executable + "\n")
f.write('debug=%s' % __debug__ + '\n')
# On Windows we need to preserve systme PATH for subprocesses in tests.
f.write(os.environ.get('PATH') + '\n')
pyi_builder.test_script('pyi_module_attributes.py')
def test_module_reload(pyi_builder):
pyi_builder.test_script('pyi_module_reload.py')
# TODO move 'multiprocessig' tests into 'test_multiprocess.py.
@importorskip('multiprocessing')
def test_multiprocess(pyi_builder):
pyi_builder.test_script('pyi_multiprocess.py')
@importorskip('multiprocessing')
def test_multiprocess_forking(pyi_builder):
pyi_builder.test_script('pyi_multiprocess_forking.py')
@importorskip('multiprocessing')
def test_multiprocess_pool(pyi_builder):
pyi_builder.test_script('pyi_multiprocess_pool.py')
# TODO skip this test if C compiler is not found.
# TODO test it on OS X.
def test_load_dll_using_ctypes(monkeypatch, pyi_builder, compiled_dylib):
# Note that including the data_dir fixture copies files needed by this test.
#
# TODO Make sure PyInstaller is able to find the library and bundle it with the app.
# # If the required dylib does not reside in the current directory, the Analysis
# # class machinery, based on ctypes.util.find_library, will not find it. This
# # was done on purpose for this test, to show how to give Analysis class
# # a clue.
# if is_win:
# os.environ['PATH'] = os.path.abspath(CTYPES_DIR) + ';' + os.environ['PATH']
# else:
# os.environ['LD_LIBRARY_PATH'] = CTYPES_DIR
# os.environ['DYLD_LIBRARY_PATH'] = CTYPES_DIR
# os.environ['LIBPATH'] = CTYPES_DIR
# Build and run the app.
pyi_builder.test_script('pyi_load_dll_using_ctypes.py')
def test_get_meipass_value(pyi_builder):
pyi_builder.test_script('pyi_get_meipass_value.py')
def test_chdir_meipass(pyi_builder):
pyi_builder.test_script('pyi_chdir_meipass.py')
def test_option_exclude_module(pyi_builder):
"""
Test to ensure that when using option --exclude-module=xml.sax
the module 'xml.sax' won't be bundled.
"""
pyi_builder.test_script('pyi_option_exclude_module.py',
pyi_args=['--exclude-module', 'xml.sax'])
@skipif_win
def test_python_makefile(pyi_builder):
pyi_builder.test_script('pyi_python_makefile.py')
def test_set_icon(pyi_builder, data_dir):
if is_win:
args = ['--icon', os.path.join(data_dir.strpath, 'pyi_icon.ico')]
elif is_darwin:
# On OS X icon is applied only for windowed mode.
args = ['--windowed', '--icon', os.path.join(data_dir.strpath, 'pyi_icon.icns')]
else:
pytest.skip('option --icon works only on Windows and Mac OS X')
# Just use helloworld script.
pyi_builder.test_script('pyi_helloworld.py', pyi_args=args)
def test_python_home(pyi_builder):
pyi_builder.test_script('pyi_python_home.py')
def test_stderr_encoding(tmpdir, pyi_builder):
# NOTE: '-s' option to pytest disables output capturing, changing this test's result:
# without -s: py.test process changes its own stdout encoding to 'UTF-8' to
# capture output. subprocess spawned by py.test has stdout encoding
# 'cp1252', which is an ANSI codepage. test fails as they do not match.
# with -s: py.test process has stdout encoding from windows terminal, which is an
# OEM codepage. spawned subprocess has the same encoding. test passes.
#
with open(os.path.join(tmpdir.strpath, 'stderr_encoding.build'), 'w') as f:
if is_py2:
if sys.stderr.isatty() and is_win:
enc = str(sys.stderr.encoding)
else:
# In Python 2 on Mac OS X and Linux 'sys.stderr.encoding' is set to None.
# On Windows when running in non-interactive terminal it is None.
enc = 'None'
elif sys.stderr.isatty():
enc = str(sys.stderr.encoding)
else:
# For non-interactive stderr use locale encoding - ANSI codepage.
# This fixes the test when running with py.test and capturing output.
enc = locale.getpreferredencoding(False)
f.write(enc)
pyi_builder.test_script('pyi_stderr_encoding.py')
def test_stdout_encoding(tmpdir, pyi_builder):
with open(os.path.join(tmpdir.strpath, 'stdout_encoding.build'), 'w') as f:
if is_py2:
if sys.stdout.isatty() and is_win:
enc = str(sys.stdout.encoding)
else:
# In Python 2 on Mac OS X and Linux 'sys.stdout.encoding' is set to None.
# On Windows when running in non-interactive terminal it is None.
enc = 'None'
elif sys.stdout.isatty():
enc = str(sys.stdout.encoding)
else:
# For non-interactive stderr use locale encoding - ANSI codepage.
# This fixes the test when running with py.test and capturing output.
enc = locale.getpreferredencoding(False)
f.write(enc)
pyi_builder.test_script('pyi_stdout_encoding.py')
def test_site_module_disabled(pyi_builder):
pyi_builder.test_script('pyi_site_module_disabled.py')
def test_time_module(pyi_builder):
pyi_builder.test_script('pyi_time_module.py')
@skipif_win
def test_time_module_localized(pyi_builder, monkeypatch):
# This checks that functions 'time.ctime()' and 'time.strptime()'
# use the same locale. There was an issue with bootloader where
# every function was using different locale:
# time.ctime was using 'C'
# time.strptime was using 'xx_YY' from the environment.
lang = 'cs_CZ' if is_darwin else 'cs_CZ.UTF-8'
monkeypatch.setenv('LC_ALL', lang)
pyi_builder.test_script('pyi_time_module.py')
def test_xmldom_module(pyi_builder):
pyi_builder.test_script('pyi_xmldom_module.py')
def test_threading_module(pyi_builder):
pyi_builder.test_script('pyi_threading_module.py')
def test_argument(pyi_builder):
pyi_builder.test_script('pyi_argument.py', app_args=["--argument"])
@importorskip('win32com')
def test_pywin32_win32com(pyi_builder):
pyi_builder.test_script('pyi_pywin32_win32com.py')
@importorskip('win32ui')
def test_pywin32_win32ui(pyi_builder):
pyi_builder.test_script('pyi_pywin32_win32ui.py')
@skipif_notwin
def test_renamed_exe(pyi_builder):
_old_find_executables = pyi_builder._find_executables
def _find_executables(name):
oldexes = _old_find_executables(name)
newexes = []
for old in oldexes:
new = os.path.join(os.path.dirname(old), "renamed_" + os.path.basename(old))
os.rename(old, new)
newexes.append(new)
return newexes
pyi_builder._find_executables = _find_executables
pyi_builder.test_script('pyi_helloworld.py')
@skipif_notosx
def test_osx_override_info_plist(pyi_builder_spec):
pyi_builder_spec.test_spec('pyi_osx_override_info_plist.spec')
| [
"PyInstaller.utils.tests.importorskip",
"os.rename",
"locale.getpreferredencoding",
"os.path.join",
"os.environ.get",
"sys.stderr.isatty",
"os.path.dirname",
"sys.stdout.isatty",
"os.path.basename",
"pytest.skip"
] | [((1278, 1300), 'PyInstaller.utils.tests.importorskip', 'importorskip', (['"""codecs"""'], {}), "('codecs')\n", (1290, 1300), False, 'from PyInstaller.utils.tests import importorskip, skipif_win, skipif_winorosx, skipif_notwin, skipif_notosx\n'), ((1737, 1759), 'PyInstaller.utils.tests.importorskip', 'importorskip', (['"""Crypto"""'], {}), "('Crypto')\n", (1749, 1759), False, 'from PyInstaller.utils.tests import importorskip, skipif_win, skipif_winorosx, skipif_notwin, skipif_notosx\n'), ((3088, 3119), 'PyInstaller.utils.tests.importorskip', 'importorskip', (['"""multiprocessing"""'], {}), "('multiprocessing')\n", (3100, 3119), False, 'from PyInstaller.utils.tests import importorskip, skipif_win, skipif_winorosx, skipif_notwin, skipif_notosx\n'), ((3210, 3241), 'PyInstaller.utils.tests.importorskip', 'importorskip', (['"""multiprocessing"""'], {}), "('multiprocessing')\n", (3222, 3241), False, 'from PyInstaller.utils.tests import importorskip, skipif_win, skipif_winorosx, skipif_notwin, skipif_notosx\n'), ((3348, 3379), 'PyInstaller.utils.tests.importorskip', 'importorskip', (['"""multiprocessing"""'], {}), "('multiprocessing')\n", (3360, 3379), False, 'from PyInstaller.utils.tests import importorskip, skipif_win, skipif_winorosx, skipif_notwin, skipif_notosx\n'), ((8773, 8797), 'PyInstaller.utils.tests.importorskip', 'importorskip', (['"""win32com"""'], {}), "('win32com')\n", (8785, 8797), False, 'from PyInstaller.utils.tests import importorskip, skipif_win, skipif_winorosx, skipif_notwin, skipif_notosx\n'), ((8896, 8919), 'PyInstaller.utils.tests.importorskip', 'importorskip', (['"""win32ui"""'], {}), "('win32ui')\n", (8908, 8919), False, 'from PyInstaller.utils.tests import importorskip, skipif_win, skipif_winorosx, skipif_notwin, skipif_notosx\n'), ((2599, 2647), 'os.path.join', 'os.path.join', (['tmpdir.strpath', '"""python_exe.build"""'], {}), "(tmpdir.strpath, 'python_exe.build')\n", (2611, 2647), False, 'import os\n'), ((5108, 5154), 'os.path.join', 'os.path.join', (['data_dir.strpath', '"""pyi_icon.ico"""'], {}), "(data_dir.strpath, 'pyi_icon.ico')\n", (5120, 5154), False, 'import os\n'), ((5341, 5404), 'pytest.skip', 'pytest.skip', (['"""option --icon works only on Windows and Mac OS X"""'], {}), "('option --icon works only on Windows and Mac OS X')\n", (5352, 5404), False, 'import pytest\n'), ((6177, 6230), 'os.path.join', 'os.path.join', (['tmpdir.strpath', '"""stderr_encoding.build"""'], {}), "(tmpdir.strpath, 'stderr_encoding.build')\n", (6189, 6230), False, 'import os\n'), ((6588, 6607), 'sys.stderr.isatty', 'sys.stderr.isatty', ([], {}), '()\n', (6605, 6607), False, 'import sys\n'), ((7017, 7070), 'os.path.join', 'os.path.join', (['tmpdir.strpath', '"""stdout_encoding.build"""'], {}), "(tmpdir.strpath, 'stdout_encoding.build')\n", (7029, 7070), False, 'import os\n'), ((7428, 7447), 'sys.stdout.isatty', 'sys.stdout.isatty', ([], {}), '()\n', (7445, 7447), False, 'import sys\n'), ((9353, 9372), 'os.rename', 'os.rename', (['old', 'new'], {}), '(old, new)\n', (9362, 9372), False, 'import os\n'), ((2842, 2864), 'os.environ.get', 'os.environ.get', (['"""PATH"""'], {}), "('PATH')\n", (2856, 2864), False, 'import os\n'), ((5274, 5321), 'os.path.join', 'os.path.join', (['data_dir.strpath', '"""pyi_icon.icns"""'], {}), "(data_dir.strpath, 'pyi_icon.icns')\n", (5286, 5321), False, 'import os\n'), ((6277, 6296), 'sys.stderr.isatty', 'sys.stderr.isatty', ([], {}), '()\n', (6294, 6296), False, 'import sys\n'), ((6844, 6878), 'locale.getpreferredencoding', 'locale.getpreferredencoding', (['(False)'], {}), '(False)\n', (6871, 6878), False, 'import locale\n'), ((7117, 7136), 'sys.stdout.isatty', 'sys.stdout.isatty', ([], {}), '()\n', (7134, 7136), False, 'import sys\n'), ((7684, 7718), 'locale.getpreferredencoding', 'locale.getpreferredencoding', (['(False)'], {}), '(False)\n', (7711, 7718), False, 'import locale\n'), ((9283, 9303), 'os.path.dirname', 'os.path.dirname', (['old'], {}), '(old)\n', (9298, 9303), False, 'import os\n'), ((9318, 9339), 'os.path.basename', 'os.path.basename', (['old'], {}), '(old)\n', (9334, 9339), False, 'import os\n')] |
from flask import flash, render_template, request, redirect, url_for
from flask_login import (
LoginManager,
login_required,
login_user,
logout_user,
)
from login import app, db
from login.models import User
login_manager = LoginManager(app)
login_manager.login_view = "login_page"
class HttpMethod:
GET: str = "GET"
POST: str = "POST"
@classmethod
def new_request(cls) -> tuple:
return cls.GET, cls.POST
def add_user(username: str, password: str) -> None:
db.session.add(User(username=username, password=password))
db.session.commit()
flash("User is created")
@app.route("/")
@app.route("/index.html")
def index() -> str:
return render_template("index.html")
@app.route("/dashboard")
@login_required
def dashboard() -> str:
return render_template("dashboard.html")
@app.route("/login", methods=HttpMethod.new_request())
def login_page() -> str:
if request.method == HttpMethod.POST and "username" in request.form:
user = User.query.filter_by(
username=request.form.get("username")
).first()
if user:
if user.password == request.form.get("password"):
login_user(user)
return redirect(url_for("dashboard"))
return "Invalid username or password"
return render_template("login.html")
@app.route("/logout")
@login_required
def logout_page() -> str:
logout_user()
return redirect(url_for("index"))
@app.route("/create_user", methods=HttpMethod.new_request())
def create_user() -> str:
if request.method == HttpMethod.POST and "username" in request.form:
username = request.form.get("username")
password = request.form.get("password")
add_user(username, password)
return render_template("create_user.html")
@login_manager.user_loader
def load_user(user_id: str):
return User.query.get(int(user_id))
| [
"flask.render_template",
"flask_login.LoginManager",
"login.models.User",
"flask.flash",
"flask_login.login_user",
"flask_login.logout_user",
"flask.url_for",
"flask.request.form.get",
"login.db.session.commit",
"login.app.route"
] | [((242, 259), 'flask_login.LoginManager', 'LoginManager', (['app'], {}), '(app)\n', (254, 259), False, 'from flask_login import LoginManager, login_required, login_user, logout_user\n'), ((623, 637), 'login.app.route', 'app.route', (['"""/"""'], {}), "('/')\n", (632, 637), False, 'from login import app, db\n'), ((639, 663), 'login.app.route', 'app.route', (['"""/index.html"""'], {}), "('/index.html')\n", (648, 663), False, 'from login import app, db\n'), ((728, 751), 'login.app.route', 'app.route', (['"""/dashboard"""'], {}), "('/dashboard')\n", (737, 751), False, 'from login import app, db\n'), ((1353, 1373), 'login.app.route', 'app.route', (['"""/logout"""'], {}), "('/logout')\n", (1362, 1373), False, 'from login import app, db\n'), ((571, 590), 'login.db.session.commit', 'db.session.commit', ([], {}), '()\n', (588, 590), False, 'from login import app, db\n'), ((595, 619), 'flask.flash', 'flash', (['"""User is created"""'], {}), "('User is created')\n", (600, 619), False, 'from flask import flash, render_template, request, redirect, url_for\n'), ((695, 724), 'flask.render_template', 'render_template', (['"""index.html"""'], {}), "('index.html')\n", (710, 724), False, 'from flask import flash, render_template, request, redirect, url_for\n'), ((803, 836), 'flask.render_template', 'render_template', (['"""dashboard.html"""'], {}), "('dashboard.html')\n", (818, 836), False, 'from flask import flash, render_template, request, redirect, url_for\n'), ((1320, 1349), 'flask.render_template', 'render_template', (['"""login.html"""'], {}), "('login.html')\n", (1335, 1349), False, 'from flask import flash, render_template, request, redirect, url_for\n'), ((1420, 1433), 'flask_login.logout_user', 'logout_user', ([], {}), '()\n', (1431, 1433), False, 'from flask_login import LoginManager, login_required, login_user, logout_user\n'), ((1778, 1813), 'flask.render_template', 'render_template', (['"""create_user.html"""'], {}), "('create_user.html')\n", (1793, 1813), False, 'from flask import flash, render_template, request, redirect, url_for\n'), ((523, 565), 'login.models.User', 'User', ([], {'username': 'username', 'password': 'password'}), '(username=username, password=password)\n', (527, 565), False, 'from login.models import User\n'), ((1454, 1470), 'flask.url_for', 'url_for', (['"""index"""'], {}), "('index')\n", (1461, 1470), False, 'from flask import flash, render_template, request, redirect, url_for\n'), ((1653, 1681), 'flask.request.form.get', 'request.form.get', (['"""username"""'], {}), "('username')\n", (1669, 1681), False, 'from flask import flash, render_template, request, redirect, url_for\n'), ((1701, 1729), 'flask.request.form.get', 'request.form.get', (['"""password"""'], {}), "('password')\n", (1717, 1729), False, 'from flask import flash, render_template, request, redirect, url_for\n'), ((1146, 1174), 'flask.request.form.get', 'request.form.get', (['"""password"""'], {}), "('password')\n", (1162, 1174), False, 'from flask import flash, render_template, request, redirect, url_for\n'), ((1192, 1208), 'flask_login.login_user', 'login_user', (['user'], {}), '(user)\n', (1202, 1208), False, 'from flask_login import LoginManager, login_required, login_user, logout_user\n'), ((1241, 1261), 'flask.url_for', 'url_for', (['"""dashboard"""'], {}), "('dashboard')\n", (1248, 1261), False, 'from flask import flash, render_template, request, redirect, url_for\n'), ((1050, 1078), 'flask.request.form.get', 'request.form.get', (['"""username"""'], {}), "('username')\n", (1066, 1078), False, 'from flask import flash, render_template, request, redirect, url_for\n')] |
# Copyright 2021 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
import random
import time
from torch.utils.tensorboard import SummaryWriter
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from utils.logging import AverageMeter, ProgressMeter
from utils.net_utils import (
set_model_top_k,
freeze_model_weights,
save_checkpoint,
get_lr,
LabelSmoothing,
)
from utils.schedulers import get_policy
from utils.profiling import estimate_params_size
from args import args
import importlib
import data
import models
def main():
print(args)
if args.seed is not None:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
# Simply call main_worker function
main_worker(args)
def main_worker(args):
args.gpu = None
train, validate, test, modifier = get_trainer(args)
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
# create model and optimizer
model = get_model(args)
model = set_gpu(args, model)
if args.pretrained:
pretrained(args, model)
optimizer = get_optimizer(args, model)
data = get_dataset(args)
lr_policy = get_policy(args.lr_policy)(optimizer, args)
if args.label_smoothing is None:
criterion = nn.CrossEntropyLoss().cuda()
else:
criterion = LabelSmoothing(smoothing=args.label_smoothing)
# report estimates of #parameters and memory size
estimate_params_size(model, args)
# optionally resume from a checkpoint
best_acc1 = 0.0
best_acc5 = 0.0
best_train_acc1 = 0.0
best_train_acc5 = 0.0
test_acc1 = -1
test_acc5 = -1
best_epoch = -1
if args.resume:
best_acc1 = resume(args, model, optimizer)
# Data loading code
if args.evaluate:
acc1, acc5 = validate(
data.val_loader, model, criterion, args, writer=None, epoch=args.start_epoch
)
return
# Set up directories
run_base_dir, ckpt_base_dir, log_base_dir = get_directories(args)
args.ckpt_base_dir = ckpt_base_dir
writer = SummaryWriter(log_dir=log_base_dir)
epoch_time = AverageMeter("epoch_time", ":.4f", write_avg=False)
validation_time = AverageMeter("validation_time", ":.4f", write_avg=False)
train_time = AverageMeter("train_time", ":.4f", write_avg=False)
progress_overall = ProgressMeter(
1, [epoch_time, validation_time, train_time], prefix="Overall Timing"
)
end_epoch = time.time()
args.start_epoch = args.start_epoch or 0
acc1 = None
# Save the initial state
save_checkpoint(
{
"epoch": 0,
"arch": args.arch,
"state_dict": model.state_dict(),
"best_acc1": best_acc1,
"best_acc5": best_acc5,
"best_train_acc1": best_train_acc1,
"best_train_acc5": best_train_acc5,
"optimizer": optimizer.state_dict(),
"curr_acc1": acc1 if acc1 else "Not evaluated",
},
False,
filename=ckpt_base_dir / f"initial.state",
save=False,
)
# Start training
for epoch in range(args.start_epoch, args.epochs):
modifier(args, epoch, model)
cur_lr = get_lr(optimizer)
# train for one epoch
start_train = time.time()
train_acc1, train_acc5 = train(
data.train_loader, model, criterion, optimizer, epoch, args, writer=writer
)
train_time.update((time.time() - start_train) / 60)
lr_policy(epoch, iteration=None)
# evaluate on validation set
start_validation = time.time()
acc1, acc5 = validate(data.val_loader, model, criterion, args, writer, epoch)
validation_time.update((time.time() - start_validation) / 60)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
best_acc5 = max(acc5, best_acc5)
best_train_acc1 = max(train_acc1, best_train_acc1)
best_train_acc5 = max(train_acc5, best_train_acc5)
save = ((epoch % args.save_every) == 0) and args.save_every > 0
if is_best or save or epoch == args.epochs - 1:
if is_best:
best_epoch = epoch
print(f"==> New best, saving at {ckpt_base_dir / 'model_best.pth'}")
save_checkpoint(
{
"epoch": epoch + 1,
"arch": args.arch,
"state_dict": model.state_dict(),
"best_acc1": best_acc1,
"best_acc5": best_acc5,
"best_train_acc1": best_train_acc1,
"best_train_acc5": best_train_acc5,
"optimizer": optimizer.state_dict(),
"curr_acc1": acc1,
"curr_acc5": acc5,
},
is_best,
filename=ckpt_base_dir / f"epoch_{epoch}.state",
save=save,
)
epoch_time.update((time.time() - end_epoch) / 60)
progress_overall.display(epoch)
progress_overall.write_to_tensorboard(
writer, prefix="diagnostics", global_step=epoch
)
writer.add_scalar("test/lr", cur_lr, epoch)
end_epoch = time.time()
if(args.test and best_epoch > -1):
print(f"\n\n TEST on best valid (epoch {best_epoch})")
args.pretrained = f"{ckpt_base_dir / 'model_best.pth'}"
pretrained(args, model)
test_acc1, test_acc5 = test(data.test_loader, model, criterion, args, writer, best_epoch)
if args.epochs > 0:
write_result_to_csv(
best_epoch=best_epoch,
test_acc1=test_acc1,
test_acc5=test_acc5,
best_acc1=best_acc1,
best_acc5=best_acc5,
best_train_acc1=best_train_acc1,
best_train_acc5=best_train_acc5,
top_k=args.top_k,
curr_acc1=acc1,
curr_acc5=acc5,
base_config=args.config,
name=args.name,
)
print(f"Best train acc.:\t{best_acc1:.2f}% valid, {best_train_acc1:.2f}% train @epoch {best_epoch}")
print(f"Test acc.:\t{test_acc1:.2f}%")
def get_trainer(args):
print(f"=> Using trainer from trainers.{args.trainer}")
trainer = importlib.import_module(f"trainers.{args.trainer}")
return trainer.train, trainer.validate, trainer.test, trainer.modifier
def set_gpu(args, model):
assert torch.cuda.is_available(), "CPU-only experiments currently unsupported"
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
elif args.multigpu is None:
device = torch.device("cpu")
else:
# DataParallel will divide and allocate batch_size to all available GPUs
print(f"=> Parallelizing on {args.multigpu} gpus")
torch.cuda.set_device(args.multigpu[0])
args.gpu = args.multigpu[0]
model = torch.nn.DataParallel(model, device_ids=args.multigpu).cuda(
args.multigpu[0]
)
if args.seed == None:
cudnn.benchmark = True
else:
cudnn.benchmark = False
return model
def resume(args, model, optimizer):
if os.path.isfile(args.resume):
print(f"=> Loading checkpoint '{args.resume}'")
checkpoint = torch.load(args.resume, map_location=f"cuda:{args.multigpu[0]}")
if args.start_epoch is None:
print(f"=> Setting new start epoch at {checkpoint['epoch']}")
args.start_epoch = checkpoint["epoch"]
best_acc1 = checkpoint["best_acc1"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
print(f"=> Loaded checkpoint '{args.resume}' (epoch {checkpoint['epoch']})")
return best_acc1
else:
print(f"=> No checkpoint found at '{args.resume}'")
def pretrained(args, model):
if os.path.isfile(args.pretrained):
print("=> loading pretrained weights from '{}'".format(args.pretrained))
pretrained = torch.load(
args.pretrained,
map_location=torch.device("cuda:{}".format(args.multigpu[0])),
)["state_dict"]
model_state_dict = model.state_dict()
for k, v in pretrained.items():
if k not in model_state_dict or v.size() != model_state_dict[k].size():
print("IGNORE:", k)
pretrained = {
k: v
for k, v in pretrained.items()
if (k in model_state_dict and v.size() == model_state_dict[k].size())
}
model_state_dict.update(pretrained)
model.load_state_dict(model_state_dict)
else:
print("=> no pretrained weights found at '{}'".format(args.pretrained))
def get_dataset(args):
print(f"=> Getting {args.set} dataset")
dataset = getattr(data, args.set)(args)
return dataset
def get_model(args):
if args.first_layer_dense:
args.first_layer_type = "DenseConv"
print("=> Creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
# applying sparsity to the network
if (args.conv_type != "DenseConv"):
if args.top_k < 0:
raise ValueError("Need to set a positive top k")
set_model_top_k(model, top_k=args.top_k)
# freezing the weights if we are only doing subnet training
if args.freeze_weights:
freeze_model_weights(model)
return model
def get_optimizer(args, model):
for n, v in model.named_parameters():
if v.requires_grad:
print("<DEBUG> gradient to", n)
if not v.requires_grad:
print("<DEBUG> no gradient to", n)
if args.optimizer == "sgd":
parameters = list(model.named_parameters())
bn_params = [v for n, v in parameters if ("bn" in n) and v.requires_grad]
rest_params = [v for n, v in parameters if ("bn" not in n) and v.requires_grad]
optimizer = torch.optim.SGD(
[
{
"params": bn_params,
"weight_decay": 0 if args.no_bn_decay else args.weight_decay,
},
{"params": rest_params, "weight_decay": args.weight_decay},
],
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
nesterov=args.nesterov,
)
elif args.optimizer == "adam":
optimizer = torch.optim.Adam(
filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr
)
return optimizer
def _run_dir_exists(run_base_dir):
log_base_dir = run_base_dir / "logs"
ckpt_base_dir = run_base_dir / "checkpoints"
return log_base_dir.exists() or ckpt_base_dir.exists()
def get_directories(args):
if args.config is None or args.name is None:
raise ValueError("Must have name and config")
config = pathlib.Path(args.config).stem
if args.log_dir is None:
run_base_dir = pathlib.Path(
f"runs/{config}/{args.name}/top_k={args.top_k}"
)
else:
run_base_dir = pathlib.Path(
f"{args.log_dir}/{config}/{args.name}/top_k={args.top_k}"
)
if args.width_mult != 1.0:
run_base_dir = run_base_dir / "width_mult={}".format(str(args.width_mult))
if _run_dir_exists(run_base_dir):
rep_count = 0
while _run_dir_exists(run_base_dir / str(rep_count)):
rep_count += 1
run_base_dir = run_base_dir / str(rep_count)
log_base_dir = run_base_dir / "logs"
ckpt_base_dir = run_base_dir / "checkpoints"
if not run_base_dir.exists():
os.makedirs(run_base_dir)
(run_base_dir / "settings.txt").write_text(str(args))
return run_base_dir, ckpt_base_dir, log_base_dir
def write_result_to_csv(**kwargs):
results = pathlib.Path("runs") / "results.csv"
if not results.exists():
results.write_text(
"Date Finished, "
"Base Config, "
"Name, "
"Top K, "
"Best epoch, "
"Test Top 1, "
"Test Top 5, "
"Current Val Top 1, "
"Current Val Top 5, "
"Best Val Top 1, "
"Best Val Top 5, "
"Best Train Top 1, "
"Best Train Top 5\n"
)
now = time.strftime("%m-%d-%y_%H:%M:%S")
with open(results, "a+") as f:
f.write(
(
"{now}, "
"{base_config}, "
"{name}, "
"{top_k}, "
"{best_epoch}, "
"{test_acc1}, "
"{test_acc5}, "
"{curr_acc1:.02f}, "
"{curr_acc5:.02f}, "
"{best_acc1:.02f}, "
"{best_acc5:.02f}, "
"{best_train_acc1:.02f}, "
"{best_train_acc5:.02f}\n"
).format(now=now, **kwargs)
)
if __name__ == "__main__":
main()
| [
"utils.schedulers.get_policy",
"torch.nn.CrossEntropyLoss",
"torch.cuda.is_available",
"torch.utils.tensorboard.SummaryWriter",
"utils.net_utils.set_model_top_k",
"utils.net_utils.freeze_model_weights",
"pathlib.Path",
"utils.logging.ProgressMeter",
"utils.logging.AverageMeter",
"torch.optim.SGD",
"utils.profiling.estimate_params_size",
"importlib.import_module",
"utils.net_utils.LabelSmoothing",
"os.path.isfile",
"torch.cuda.set_device",
"time.time",
"torch.device",
"torch.cuda.manual_seed_all",
"torch.manual_seed",
"os.makedirs",
"torch.load",
"time.strftime",
"torch.nn.DataParallel",
"random.seed",
"utils.net_utils.get_lr",
"torch.cuda.manual_seed"
] | [((2326, 2359), 'utils.profiling.estimate_params_size', 'estimate_params_size', (['model', 'args'], {}), '(model, args)\n', (2346, 2359), False, 'from utils.profiling import estimate_params_size\n'), ((2966, 3001), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', ([], {'log_dir': 'log_base_dir'}), '(log_dir=log_base_dir)\n', (2979, 3001), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((3019, 3070), 'utils.logging.AverageMeter', 'AverageMeter', (['"""epoch_time"""', '""":.4f"""'], {'write_avg': '(False)'}), "('epoch_time', ':.4f', write_avg=False)\n", (3031, 3070), False, 'from utils.logging import AverageMeter, ProgressMeter\n'), ((3093, 3149), 'utils.logging.AverageMeter', 'AverageMeter', (['"""validation_time"""', '""":.4f"""'], {'write_avg': '(False)'}), "('validation_time', ':.4f', write_avg=False)\n", (3105, 3149), False, 'from utils.logging import AverageMeter, ProgressMeter\n'), ((3167, 3218), 'utils.logging.AverageMeter', 'AverageMeter', (['"""train_time"""', '""":.4f"""'], {'write_avg': '(False)'}), "('train_time', ':.4f', write_avg=False)\n", (3179, 3218), False, 'from utils.logging import AverageMeter, ProgressMeter\n'), ((3242, 3331), 'utils.logging.ProgressMeter', 'ProgressMeter', (['(1)', '[epoch_time, validation_time, train_time]'], {'prefix': '"""Overall Timing"""'}), "(1, [epoch_time, validation_time, train_time], prefix=\n 'Overall Timing')\n", (3255, 3331), False, 'from utils.logging import AverageMeter, ProgressMeter\n'), ((3358, 3369), 'time.time', 'time.time', ([], {}), '()\n', (3367, 3369), False, 'import time\n'), ((7226, 7277), 'importlib.import_module', 'importlib.import_module', (['f"""trainers.{args.trainer}"""'], {}), "(f'trainers.{args.trainer}')\n", (7249, 7277), False, 'import importlib\n'), ((7393, 7418), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7416, 7418), False, 'import torch\n'), ((8155, 8182), 'os.path.isfile', 'os.path.isfile', (['args.resume'], {}), '(args.resume)\n', (8169, 8182), False, 'import os\n'), ((8871, 8902), 'os.path.isfile', 'os.path.isfile', (['args.pretrained'], {}), '(args.pretrained)\n', (8885, 8902), False, 'import os\n'), ((13281, 13315), 'time.strftime', 'time.strftime', (['"""%m-%d-%y_%H:%M:%S"""'], {}), "('%m-%d-%y_%H:%M:%S')\n", (13294, 13315), False, 'import time\n'), ((1382, 1404), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (1393, 1404), False, 'import random\n'), ((1447, 1475), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1464, 1475), False, 'import torch\n'), ((1484, 1517), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['args.seed'], {}), '(args.seed)\n', (1506, 1517), False, 'import torch\n'), ((1526, 1563), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['args.seed'], {}), '(args.seed)\n', (1552, 1563), False, 'import torch\n'), ((2058, 2084), 'utils.schedulers.get_policy', 'get_policy', (['args.lr_policy'], {}), '(args.lr_policy)\n', (2068, 2084), False, 'from utils.schedulers import get_policy\n'), ((2219, 2265), 'utils.net_utils.LabelSmoothing', 'LabelSmoothing', ([], {'smoothing': 'args.label_smoothing'}), '(smoothing=args.label_smoothing)\n', (2233, 2265), False, 'from utils.net_utils import set_model_top_k, freeze_model_weights, save_checkpoint, get_lr, LabelSmoothing\n'), ((4105, 4122), 'utils.net_utils.get_lr', 'get_lr', (['optimizer'], {}), '(optimizer)\n', (4111, 4122), False, 'from utils.net_utils import set_model_top_k, freeze_model_weights, save_checkpoint, get_lr, LabelSmoothing\n'), ((4176, 4187), 'time.time', 'time.time', ([], {}), '()\n', (4185, 4187), False, 'import time\n'), ((4500, 4511), 'time.time', 'time.time', ([], {}), '()\n', (4509, 4511), False, 'import time\n'), ((6194, 6205), 'time.time', 'time.time', ([], {}), '()\n', (6203, 6205), False, 'import time\n'), ((7503, 7534), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.gpu'], {}), '(args.gpu)\n', (7524, 7534), False, 'import torch\n'), ((8262, 8326), 'torch.load', 'torch.load', (['args.resume'], {'map_location': 'f"""cuda:{args.multigpu[0]}"""'}), "(args.resume, map_location=f'cuda:{args.multigpu[0]}')\n", (8272, 8326), False, 'import torch\n'), ((10215, 10255), 'utils.net_utils.set_model_top_k', 'set_model_top_k', (['model'], {'top_k': 'args.top_k'}), '(model, top_k=args.top_k)\n', (10230, 10255), False, 'from utils.net_utils import set_model_top_k, freeze_model_weights, save_checkpoint, get_lr, LabelSmoothing\n'), ((10357, 10384), 'utils.net_utils.freeze_model_weights', 'freeze_model_weights', (['model'], {}), '(model)\n', (10377, 10384), False, 'from utils.net_utils import set_model_top_k, freeze_model_weights, save_checkpoint, get_lr, LabelSmoothing\n'), ((10906, 11170), 'torch.optim.SGD', 'torch.optim.SGD', (["[{'params': bn_params, 'weight_decay': 0 if args.no_bn_decay else args.\n weight_decay}, {'params': rest_params, 'weight_decay': args.weight_decay}]", 'args.lr'], {'momentum': 'args.momentum', 'weight_decay': 'args.weight_decay', 'nesterov': 'args.nesterov'}), "([{'params': bn_params, 'weight_decay': 0 if args.\n no_bn_decay else args.weight_decay}, {'params': rest_params,\n 'weight_decay': args.weight_decay}], args.lr, momentum=args.momentum,\n weight_decay=args.weight_decay, nesterov=args.nesterov)\n", (10921, 11170), False, 'import torch\n'), ((11851, 11876), 'pathlib.Path', 'pathlib.Path', (['args.config'], {}), '(args.config)\n', (11863, 11876), False, 'import pathlib\n'), ((11934, 11995), 'pathlib.Path', 'pathlib.Path', (['f"""runs/{config}/{args.name}/top_k={args.top_k}"""'], {}), "(f'runs/{config}/{args.name}/top_k={args.top_k}')\n", (11946, 11995), False, 'import pathlib\n'), ((12051, 12122), 'pathlib.Path', 'pathlib.Path', (['f"""{args.log_dir}/{config}/{args.name}/top_k={args.top_k}"""'], {}), "(f'{args.log_dir}/{config}/{args.name}/top_k={args.top_k}')\n", (12063, 12122), False, 'import pathlib\n'), ((12597, 12622), 'os.makedirs', 'os.makedirs', (['run_base_dir'], {}), '(run_base_dir)\n', (12608, 12622), False, 'import os\n'), ((12787, 12807), 'pathlib.Path', 'pathlib.Path', (['"""runs"""'], {}), "('runs')\n", (12799, 12807), False, 'import pathlib\n'), ((7621, 7640), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (7633, 7640), False, 'import torch\n'), ((7799, 7838), 'torch.cuda.set_device', 'torch.cuda.set_device', (['args.multigpu[0]'], {}), '(args.multigpu[0])\n', (7820, 7838), False, 'import torch\n'), ((2160, 2181), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (2179, 2181), True, 'import torch.nn as nn\n'), ((4352, 4363), 'time.time', 'time.time', ([], {}), '()\n', (4361, 4363), False, 'import time\n'), ((4630, 4641), 'time.time', 'time.time', ([], {}), '()\n', (4639, 4641), False, 'import time\n'), ((5921, 5932), 'time.time', 'time.time', ([], {}), '()\n', (5930, 5932), False, 'import time\n'), ((7891, 7945), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {'device_ids': 'args.multigpu'}), '(model, device_ids=args.multigpu)\n', (7912, 7945), False, 'import torch\n')] |
import contextlib
import uuid
from pathlib import Path
_GENERATED_NAMESPACE = uuid.UUID('db509c23-800c-41d5-9d00-359fc120e87a')
PROLOGUE = r"""<?xml version="1.0" encoding="utf-8"?>
<Project DefaultTargets="Build" ToolsVersion="Current" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">"""
TARGETS = Path(__file__).parent / "targets"
def _guid(target_name):
return uuid.uuid3(_GENERATED_NAMESPACE, target_name)
class CV:
has_condition = True
def __init__(self, value, condition=None, if_empty=False):
self.value = str(value)
self.condition = condition
self.if_empty = if_empty
def __str__(self):
return self.value
class ProjectFileWriter:
def __init__(self, filename, target_name, *, vc_platforms=None, root_namespace=None):
self.filename = filename
self.target_name = target_name
self.root_namespace = root_namespace or target_name
self._file = None
self._vc_platforms = vc_platforms
self.indent = 2
self.current_group = None
def __enter__(self):
Path(self.filename).parent.mkdir(parents=True, exist_ok=True)
self._file = open(self.filename, "w", encoding="utf-8")
print(PROLOGUE, file=self._file)
if self._vc_platforms is True:
self.add_vc_platforms()
elif self._vc_platforms:
self.add_vc_platforms(*self._vc_platforms)
with self.group("PropertyGroup", Label="Globals"):
self.add_property("Configuration", CV("Release", "$(Configuration) == ''"))
self.add_property("Platform", CV("x64", "$(Platform) == ''"))
self.add_property("ProjectGuid", _guid(self.target_name))
self.add_property("RootNamespace", self.root_namespace)
self.add_property("TargetName", self.target_name)
self.add_property("PyMsbuildTargets", CV(TARGETS, if_empty=True))
return self
def __exit__(self, *exc_info):
print("</Project>", file=self._file)
self._file.flush()
self._file.close()
self._file = None
def write(self, *text):
print(" " * self.indent, *text, sep="", file=self._file)
@contextlib.contextmanager
def group(self, tag, **attributes):
if attributes:
self.write("<", tag, *(' {}="{}"'.format(*i) for i in attributes.items() if all(i)), ">")
else:
self.write("<", tag, ">")
self.indent += 2
old_group, self.current_group = self.current_group, tag
yield
self.current_group = old_group
self.indent -= 2
self.write("</", tag, ">")
def _write_value(self, name, value, symbol='$'):
if isinstance(value, (tuple, list)):
for v in value:
self._write_value(name, v, symbol)
return
c = None
v = str(value)
if getattr(value, "has_condition", None):
c = value.condition
if getattr(value, "if_empty", False):
c = "{}({}) == ''".format(symbol, name)
if getattr(value, "append", False):
v = "{}({}){}".format(symbol, name, v)
if getattr(value, "prepend", False):
v = "{}{}({})".format(v, symbol, name)
if c:
self.write("<", name, ' Condition="', c, '">', v, "</", name, ">")
else:
self.write("<", name, ">", v, "</", name, ">")
def add_property(self, name, value):
self._write_value(name, value, "$")
def add_item(self, kind, name, **metadata):
n = str(name)
c = None
if getattr(name, "has_condition", False):
c = name.condition
if getattr(name, "if_empty", False):
c = "@({}) == ''".format(kind)
if getattr(name, "append", False) or getattr(name, "prepend", False):
raise ValueError("'append' and 'prepend' are not supported on '{}'".format(name))
if metadata:
with self.group(kind, Include=n, Condition=c):
for k, v in metadata.items():
if v is not None:
self._write_value(k, v, "%")
else:
if c:
self.write("<", kind, ' Include="', n, '" Condition="', c, '" />')
else:
self.write("<", kind, ' Include="', n, '" />')
def add_item_property(self, kind, name, value):
self._write_value(name, value, "%")
def add_import(self, project):
self.write('<Import Project="', project, '" />')
def add_vc_platforms(self, platforms=None, configurations=None):
if not platforms:
platforms = ["Win32", "x64", "ARM", "ARM64"]
if not configurations:
configurations = ["Debug", "Release"]
with self.group("ItemGroup", Label="ProjectConfigurations"):
for c in configurations:
for p in platforms:
with self.group("ProjectConfiguration", Include="{}|{}".format(c, p)):
self.add_property("Configuration", c)
self.add_property("Platform", p)
def add_text(self, text):
for line in text.splitlines():
self.write(line)
| [
"uuid.uuid3",
"uuid.UUID",
"pathlib.Path"
] | [((80, 129), 'uuid.UUID', 'uuid.UUID', (['"""db509c23-800c-41d5-9d00-359fc120e87a"""'], {}), "('db509c23-800c-41d5-9d00-359fc120e87a')\n", (89, 129), False, 'import uuid\n'), ((387, 432), 'uuid.uuid3', 'uuid.uuid3', (['_GENERATED_NAMESPACE', 'target_name'], {}), '(_GENERATED_NAMESPACE, target_name)\n', (397, 432), False, 'import uuid\n'), ((316, 330), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (320, 330), False, 'from pathlib import Path\n'), ((1093, 1112), 'pathlib.Path', 'Path', (['self.filename'], {}), '(self.filename)\n', (1097, 1112), False, 'from pathlib import Path\n')] |
#/usr/bin/env python
#
# Copyright 2020-2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import nbconvert
import os
import numpy as np
with open("assignment7.ipynb") as f:
exporter = nbconvert.PythonExporter()
python_file, _ = exporter.from_file(f)
with open("assignment7.py", "w") as f:
f.write(python_file)
from assignment7 import convert_to_true_stress_and_strain
class TestSolution(unittest.TestCase):
def test_convert_to_true_stress_and_strain(self):
strain, stress = convert_to_true_stress_and_strain('data.dat')
np.testing.assert_allclose(strain[:10], np.array([ 1.76974413e-06, 2.19162248e-05, -3.19850395e-05, -2.99607468e-05,
2.42023361e-05, -1.02986180e-05, 1.80243056e-05, 2.69191677e-05,
7.80963814e-05, 4.51086396e-05]), atol=1e-6)
np.testing.assert_allclose(strain[-10:], np.array([0.59983723, 0.59999834, 0.60013837, 0.60030186, 0.60047056,
0.6006305 , 0.60080112, 0.60096908, 0.60115796, 0.60148428]), atol=1e-6)
np.testing.assert_allclose(stress[:10], np.array([ 310.00135992, 570.65679508, 817.77043635, 945.39539323,
1192.34923999, 1423.21648246, 1605.57296261, 1851.96319545,
2099.05379863, 2286.42636236]), atol=1e-6)
np.testing.assert_allclose(stress[-10:], np.array([112492.77647224, 112254.75315531, 112024.73779468, 111711.26437979,
111496.03728211, 111091.35149831, 110849.85117293, 110550.18990996,
110154.87432769, 108773.98868365]), atol=1e-6)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"numpy.array",
"nbconvert.PythonExporter",
"assignment7.convert_to_true_stress_and_strain"
] | [((713, 739), 'nbconvert.PythonExporter', 'nbconvert.PythonExporter', ([], {}), '()\n', (737, 739), False, 'import nbconvert\n'), ((2089, 2104), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2102, 2104), False, 'import unittest\n'), ((1035, 1080), 'assignment7.convert_to_true_stress_and_strain', 'convert_to_true_stress_and_strain', (['"""data.dat"""'], {}), "('data.dat')\n", (1068, 1080), False, 'from assignment7 import convert_to_true_stress_and_strain\n'), ((1131, 1312), 'numpy.array', 'np.array', (['[1.76974413e-06, 2.19162248e-05, -3.19850395e-05, -2.99607468e-05, \n 2.42023361e-05, -1.0298618e-05, 1.80243056e-05, 2.69191677e-05, \n 7.80963814e-05, 4.51086396e-05]'], {}), '([1.76974413e-06, 2.19162248e-05, -3.19850395e-05, -2.99607468e-05,\n 2.42023361e-05, -1.0298618e-05, 1.80243056e-05, 2.69191677e-05, \n 7.80963814e-05, 4.51086396e-05])\n', (1139, 1312), True, 'import numpy as np\n'), ((1395, 1529), 'numpy.array', 'np.array', (['[0.59983723, 0.59999834, 0.60013837, 0.60030186, 0.60047056, 0.6006305, \n 0.60080112, 0.60096908, 0.60115796, 0.60148428]'], {}), '([0.59983723, 0.59999834, 0.60013837, 0.60030186, 0.60047056, \n 0.6006305, 0.60080112, 0.60096908, 0.60115796, 0.60148428])\n', (1403, 1529), True, 'import numpy as np\n'), ((1597, 1763), 'numpy.array', 'np.array', (['[310.00135992, 570.65679508, 817.77043635, 945.39539323, 1192.34923999, \n 1423.21648246, 1605.57296261, 1851.96319545, 2099.05379863, 2286.42636236]'], {}), '([310.00135992, 570.65679508, 817.77043635, 945.39539323, \n 1192.34923999, 1423.21648246, 1605.57296261, 1851.96319545, \n 2099.05379863, 2286.42636236])\n', (1605, 1763), True, 'import numpy as np\n'), ((1841, 2031), 'numpy.array', 'np.array', (['[112492.77647224, 112254.75315531, 112024.73779468, 111711.26437979, \n 111496.03728211, 111091.35149831, 110849.85117293, 110550.18990996, \n 110154.87432769, 108773.98868365]'], {}), '([112492.77647224, 112254.75315531, 112024.73779468, \n 111711.26437979, 111496.03728211, 111091.35149831, 110849.85117293, \n 110550.18990996, 110154.87432769, 108773.98868365])\n', (1849, 2031), True, 'import numpy as np\n')] |
from collections import defaultdict
from py.src.logger import log
from py.src.match.model.cfn import (
CFNMatchModel,
CFNRoundModel,
)
class keydefaultdict(defaultdict):
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
else:
ret = self[key] = self.default_factory(key)
return ret
class OpponentData():
def __init__(self, opponent_character_id=None, opp_dict=None):
self.character_id = (
opp_dict['id'] if opp_dict else opponent_character_id
)
self.game_count = opp_dict['gc'] if opp_dict else 0
self.game_wins = opp_dict['gw'] if opp_dict else 0
self.round_count = opp_dict['rc'] if opp_dict else 0
self.round_wins = opp_dict['rw'] if opp_dict else 0
round_type_dict = opp_dict['rt'] if opp_dict else None
round_type_lookup = {}
if round_type_dict:
for round_type_key, round_type_count in round_type_dict.items():
int_key = int(round_type_key)
round_type_lookup[int_key] = round_type_count
self.round_types = defaultdict(
int,
round_type_lookup,
)
def to_dict(self):
return {
'id': self.character_id,
'gc': self.game_count,
'gw': self.game_wins,
'rc': self.round_count,
'rw': self.round_wins,
'rt': dict(self.round_types),
}
@classmethod
def from_character_id(caller, opponent_character_id):
return OpponentData(opponent_character_id=opponent_character_id)
def combine(self, other_opponent_data):
if self.character_id != other_opponent_data.character_id:
raise Exception
new_opponent_data = OpponentData(
opp_dict=other_opponent_data.to_dict()
)
new_opponent_data.game_count += self.game_count
new_opponent_data.game_wins += self.game_wins
new_opponent_data.round_count += self.round_count
new_opponent_data.round_wins += self.round_wins
for round_type, type_count in self.round_types.items():
new_opponent_data.round_types[round_type] += type_count
return new_opponent_data
class CharacterData():
def __init__(self, character_id=None, char_dict=None):
self.character_id = char_dict['id'] if char_dict else character_id
opp_lookup = {}
if char_dict:
for opp_dict in char_dict['o'].values():
opp_data = OpponentData(opp_dict=opp_dict)
opp_lookup[opp_data.character_id] = opp_data
self.opponents = keydefaultdict(
OpponentData.from_character_id,
opp_lookup,
)
def to_dict(self):
opp_lookup = {}
for opp_key, opp_data in self.opponents.items():
opp_lookup[opp_key] = opp_data.to_dict()
return {
'id': self.character_id,
'o': opp_lookup,
}
@classmethod
def from_character_id(caller, character_id):
return CharacterData(character_id=character_id)
def calculate_stats(self):
self.game_count = 0
self.game_wins = 0
self.round_count = 0
self.round_wins = 0
self.round_types = defaultdict(int)
for opp_data in self.opponents.values():
self.game_count += opp_data.game_count
self.game_wins += opp_data.game_wins
self.round_count += opp_data.round_count
self.round_wins += opp_data.round_wins
for round_type, type_count in opp_data.round_types.items():
self.round_types[round_type] += type_count
def combine(self, other_character_data):
if self.character_id != other_character_data.character_id:
raise Exception
new_character_data = CharacterData(
char_dict=other_character_data.to_dict()
)
for opp_key, opp_data in self.opponents.items():
new_character_data.opponents[opp_key] = (
new_character_data.opponents[opp_key].combine(opp_data)
)
return new_character_data
class _BasePlayerData():
def __init__(self, player_dict=None):
char_lookup = {}
if player_dict:
for char_dict in player_dict['c'].values():
char_data = CharacterData(char_dict=char_dict)
char_lookup[char_data.character_id] = char_data
self.characters = keydefaultdict(
CharacterData.from_character_id,
char_lookup,
)
def to_dict(self):
char_lookup = {}
for char_key, char_data in self.characters.items():
char_lookup[char_key] = char_data.to_dict()
return {
'c': char_lookup,
}
def calculate_stats(self):
self.game_count = 0
self.game_wins = 0
self.round_count = 0
self.round_wins = 0
self.round_types = defaultdict(int)
for char_data in self.characters.values():
char_data.calculate_stats()
self.game_count += char_data.game_count
self.game_wins += char_data.game_wins
self.round_count += char_data.round_count
self.round_wins += char_data.round_wins
for round_type, type_count in char_data.round_types.items():
self.round_types[round_type] += type_count
self.most_used_character = max(
self.characters.keys(),
key=lambda cid: self.characters[cid].game_count,
) if self.characters else None
def combine(self, other_player_data):
new_player = _BasePlayerData(other_player_data.to_dict())
for char_key, char_data in self.characters.items():
new_player.characters[char_key] = (
new_player.characters[char_key].combine(char_data)
)
return new_player
def _process_matches(
self,
matches,
is_current_player_func,
is_new_match_func
):
latest_new_match = None
latest_any_match = None
for match_dict in matches:
match = CFNMatchModel(match_dict, is_current_player_func)
if not latest_any_match or match.ticks > latest_any_match.ticks:
latest_any_match = match
if not is_new_match_func(match):
continue
if not latest_new_match or match.ticks > latest_new_match.ticks:
latest_new_match = match
player = match.player
opponent = match.opponent
matchup = (
self
.characters[player.char_id]
.opponents[opponent.char_id]
)
matchup.game_count += 1
if match.winner_id == player.cfn_id:
matchup.game_wins += 1
for mr_dict in match.rounds:
matchup.round_count += 1
mr = CFNRoundModel(mr_dict)
if mr.winner_id == player.cfn_id:
matchup.round_wins += 1
matchup.round_types[mr.round_type] += 1
self.latest_new_match = latest_new_match
self.latest_any_match = latest_any_match
class IndividualPlayerData(_BasePlayerData):
def __init__(self, player_id=None, player_dict=None):
if player_id and player_dict and player_id != player_dict['id']:
raise Exception
self.player_id = player_dict['id'] if player_dict else player_id
self.is_updated = False
self.latest_match_ticks = player_dict['lm'] if player_dict else None
self.league_points = player_dict['lp'] if player_dict else None
super(IndividualPlayerData, self).__init__(player_dict=player_dict)
def to_dict(self):
res_dict = super(IndividualPlayerData, self).to_dict()
res_dict.update({
'id': self.player_id,
'lm': self.latest_match_ticks,
'lp': self.league_points,
})
return res_dict
def _is_current_player(self, cfn_player):
return cfn_player.cfn_id == self.player_id
def _is_new_match(self, match):
return (
self.latest_match_ticks is None or
match.ticks > self.latest_match_ticks
)
def _process_matches(self, matches):
super(IndividualPlayerData, self)._process_matches(
matches,
self._is_current_player,
self._is_new_match,
)
if self.latest_new_match:
self.latest_match_ticks = self.latest_new_match.ticks
if self.latest_new_match.player.league_points > 0:
# sometimes get bad LP data from Capcom, check before updating
self.league_points = self.latest_new_match.player.league_points
if self.latest_any_match:
self.region = self.latest_any_match.player.region
self.platform = self.latest_any_match.player.platform
class RankBinPlayerData(_BasePlayerData):
def __init__(
self,
player_bin=None,
opponent_bin=None,
player_dict=None,
):
if player_bin and player_dict and player_bin != player_dict['pb']:
raise Exception
if opponent_bin and player_dict and opponent_bin != player_dict['ob']:
raise Exception
self.player_bin = player_dict['pb'] if player_dict else player_bin
self.opponent_bin = player_dict['ob'] if player_dict else opponent_bin
super(RankBinPlayerData, self).__init__(player_dict=player_dict)
def to_dict(self):
res_dict = super(RankBinPlayerData, self).to_dict()
res_dict.update({
'pb': self.player_bin,
'ob': self.opponent_bin,
})
return res_dict
def _process_matches(
self,
matches,
is_current_player_func,
):
super(RankBinPlayerData, self)._process_matches(
matches,
is_current_player_func,
lambda cfn_match: True,
)
class _BaseRankBinMatchupData():
def get_player_data_for_bin(self, rank_bin):
for player_data in self.all_player_data:
if player_data.player_bin == rank_bin:
return player_data
raise Exception
class RankBinMatchupPair(_BaseRankBinMatchupData):
def __init__(
self,
lower_bin=None,
higher_bin=None,
matchup_dict=None,
):
self.lower_bin = matchup_dict['lb'] if matchup_dict else lower_bin
self.higher_bin = matchup_dict['hb'] if matchup_dict else higher_bin
lower_player_dict = matchup_dict['ld'] if matchup_dict else None
higher_player_dict = matchup_dict['hd'] if matchup_dict else None
self.lower_player_data = RankBinPlayerData(
self.lower_bin, self.higher_bin, lower_player_dict
)
self.higher_player_data = RankBinPlayerData(
self.higher_bin, self.lower_bin, higher_player_dict
)
self.all_player_data = [
self.lower_player_data,
self.higher_player_data,
]
def to_dict(self):
return {
'lb': self.lower_bin,
'ld': self.lower_player_data.to_dict(),
'hb': self.higher_bin,
'hd': self.higher_player_data.to_dict(),
}
def _process_matches(self, matches):
self.lower_player_data._process_matches(
matches,
lambda cfn_player: cfn_player.rank_bin == self.lower_bin,
)
self.higher_player_data._process_matches(
matches,
lambda cfn_player: cfn_player.rank_bin == self.higher_bin,
)
class RankBinMatchupMirror(_BaseRankBinMatchupData):
def __init__(
self,
mirror_bin=None,
matchup_dict=None,
):
self.mirror_bin = matchup_dict['mb'] if matchup_dict else mirror_bin
mirror_player_dict = matchup_dict['md'] if matchup_dict else None
self.mirror_player_data = RankBinPlayerData(
self.mirror_bin, self.mirror_bin, mirror_player_dict
)
self.all_player_data = [
self.mirror_player_data,
]
@property
def lower_bin(self):
return self.mirror_bin
@property
def higher_bin(self):
return self.mirror_bin
def to_dict(self):
return {
'mb': self.mirror_bin,
'md': self.mirror_player_data.to_dict(),
}
def _process_matches(self, matches):
self.mirror_player_data._process_matches(
matches,
lambda cfn_player: True,
)
self.mirror_player_data._process_matches(
matches,
lambda cfn_player: False,
)
def _rbm_decider(rbm_dict):
if 'mb' in rbm_dict:
return RankBinMatchupMirror(matchup_dict=rbm_dict)
elif 'lb' in rbm_dict:
return RankBinMatchupPair(matchup_dict=rbm_dict)
else:
raise Exception
def _rbm_creator_factory(lower_bin):
def rbm_creator(higher_bin):
if lower_bin == higher_bin:
return RankBinMatchupMirror(
mirror_bin=lower_bin
)
elif lower_bin < higher_bin:
return RankBinMatchupPair(
lower_bin=lower_bin,
higher_bin=higher_bin,
)
else:
raise Exception
return rbm_creator
class LowerRankBinData():
def __init__(
self,
lower_bin=None,
bin_dict=None
):
self.lower_bin = bin_dict['lb'] if bin_dict else lower_bin
rbm_lookup = {}
if bin_dict:
for rbm_dict in bin_dict['rbm'].values():
rbm_data = _rbm_decider(rbm_dict)
if self.lower_bin != rbm_data.lower_bin:
raise Exception
rbm_lookup[rbm_data.higher_bin] = rbm_data
self.rank_bin_matchups = keydefaultdict(
_rbm_creator_factory(self.lower_bin),
rbm_lookup,
)
def to_dict(self):
rbm_lookup = {}
for rbm_key, rbm_data in self.rank_bin_matchups.items():
rbm_lookup[rbm_key] = rbm_data.to_dict()
return {
'lb': self.lower_bin,
'rbm': rbm_lookup,
}
@classmethod
def from_lower_bin(caller, lower_bin):
return LowerRankBinData(lower_bin=lower_bin)
def _process_matches(
self,
matches_by_higher_bin,
):
for higher_bin_key, matches in matches_by_higher_bin.items():
self.rank_bin_matchups[higher_bin_key]._process_matches(
matches,
)
class GlobalRankedData():
def __init__(
self,
global_dict=None,
):
bin_lookup = {}
if global_dict:
for bin_dict in global_dict['b'].values():
bin_data = LowerRankBinData(bin_dict=bin_dict)
bin_lookup[bin_data.lower_bin] = bin_data
self.lower_rank_bins = keydefaultdict(
LowerRankBinData.from_lower_bin,
bin_lookup,
)
def to_dict(self):
bin_lookup = {}
for bin_key, bin_data in self.lower_rank_bins.items():
bin_lookup[bin_key] = bin_data.to_dict()
return {
'b': bin_lookup,
}
def _process_matches(
self,
matches,
is_new_match_func,
):
match_bins = defaultdict(lambda: defaultdict(list))
for match_dict in matches:
match = CFNMatchModel(match_dict)
if not is_new_match_func(match):
continue
lower_bin = match_bins[match.lower_rank.rank_bin]
lower_bin[match.higher_rank.rank_bin].append(match_dict)
for lower_bin_key, higher_bin_matches in match_bins.items():
self.lower_rank_bins[lower_bin_key]._process_matches(
higher_bin_matches
)
def _combine_player_data(self, player_datas):
new_player = _BasePlayerData()
for player_data in player_datas:
new_player = new_player.combine(player_data)
return new_player
def get_player_data_by_bins(
self,
p1_bins,
p2_bins,
):
player_datas = []
for p1_bin in p1_bins:
for p2_bin in p2_bins:
lower_bin = min(p1_bin, p2_bin)
higher_bin = max(p1_bin, p2_bin)
player_data = (
self
.lower_rank_bins[lower_bin]
.rank_bin_matchups[higher_bin]
.get_player_data_for_bin(p1_bin)
)
player_datas.append(player_data)
combined = self._combine_player_data(player_datas)
return combined
| [
"py.src.match.model.cfn.CFNRoundModel",
"py.src.match.model.cfn.CFNMatchModel",
"collections.defaultdict"
] | [((1151, 1186), 'collections.defaultdict', 'defaultdict', (['int', 'round_type_lookup'], {}), '(int, round_type_lookup)\n', (1162, 1186), False, 'from collections import defaultdict\n'), ((3315, 3331), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (3326, 3331), False, 'from collections import defaultdict\n'), ((5012, 5028), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (5023, 5028), False, 'from collections import defaultdict\n'), ((6226, 6275), 'py.src.match.model.cfn.CFNMatchModel', 'CFNMatchModel', (['match_dict', 'is_current_player_func'], {}), '(match_dict, is_current_player_func)\n', (6239, 6275), False, 'from py.src.match.model.cfn import CFNMatchModel, CFNRoundModel\n'), ((15776, 15801), 'py.src.match.model.cfn.CFNMatchModel', 'CFNMatchModel', (['match_dict'], {}), '(match_dict)\n', (15789, 15801), False, 'from py.src.match.model.cfn import CFNMatchModel, CFNRoundModel\n'), ((7029, 7051), 'py.src.match.model.cfn.CFNRoundModel', 'CFNRoundModel', (['mr_dict'], {}), '(mr_dict)\n', (7042, 7051), False, 'from py.src.match.model.cfn import CFNMatchModel, CFNRoundModel\n'), ((15702, 15719), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (15713, 15719), False, 'from collections import defaultdict\n')] |
# Generated by Django 3.2.3 on 2021-06-03 16:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('beershareapp', '0004_beerorder_datetime'),
]
operations = [
migrations.AlterField(
model_name='address',
name='address',
field=models.CharField(max_length=255, verbose_name='Address'),
),
]
| [
"django.db.models.CharField"
] | [((343, 399), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(255)', 'verbose_name': '"""Address"""'}), "(max_length=255, verbose_name='Address')\n", (359, 399), False, 'from django.db import migrations, models\n')] |
from liesl.files.labrecorder.manager import *
import pytest
def test_validate(mock, markermock):
sids = validate([{"name": "Liesl-Mock-EEG"}, {"name": "Liesl-Mock-Marker"}])
def test_validate_raises(mock, markermock):
with pytest.raises(ConnectionError):
sids = validate([{"name": "Liesl-Mock-EEG"}, {"name": "not-available"}])
with pytest.raises(ConnectionError):
sids = validate([{"name": "Liesl-Mock-EEG"}, {"name": "Liesl-Mock-EEG"}])
def test_add_to_path():
pass
def test_follow_lnk():
pass
@pytest.mark.parametrize("fname", ["test.txt", "LabRecorderCLI.exe"])
def test_find_file(tmpdir, fname):
p = tmpdir.mkdir("sub").join(fname)
p.write("content")
find_file(path=str(tmpdir), file=fname)
| [
"pytest.mark.parametrize",
"pytest.raises"
] | [((543, 611), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fname"""', "['test.txt', 'LabRecorderCLI.exe']"], {}), "('fname', ['test.txt', 'LabRecorderCLI.exe'])\n", (566, 611), False, 'import pytest\n'), ((235, 265), 'pytest.raises', 'pytest.raises', (['ConnectionError'], {}), '(ConnectionError)\n', (248, 265), False, 'import pytest\n'), ((357, 387), 'pytest.raises', 'pytest.raises', (['ConnectionError'], {}), '(ConnectionError)\n', (370, 387), False, 'import pytest\n')] |
# Copyright(c) 2019-2020 Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# pylint: disable-all
import nxsdk.api.n2a as nx
import matplotlib.pyplot as plt
import numpy as np
import os
import time
import pickle
import nxsdk_modules_ncl.epl.src.computeResults as computeResults
from collections import namedtuple
def timer(input_func):
def timed(*args, **kwargs):
start_time = time.time()
result = input_func(*args, **kwargs)
end_time = time.time()
print("{0} took {1:0.5f} secs".format(input_func.__name__,
end_time - start_time))
return result
return timed
class MultiPatternInferenceEPL:
def __init__(self, numCores, numExcNeuronsPerCore, numInhNeuronsPerCore,
inputBiases=None, gcInputBias=None,
conn_prob=0.2, delayMCToGC=16, numMCToGCDelays=4,
doOnlyInference=True, debug=False, log=True):
self.net = nx.NxNet()
self.numCores = numCores
self.numExcNeuronsPerCore = numExcNeuronsPerCore
self.numInhNeuronsPerCore = numInhNeuronsPerCore
self.inputBiases = inputBiases
self.gcInputBias = gcInputBias
self.conn_prob = conn_prob
self.numMCToGCDelays = numMCToGCDelays
self.delayMCToGC = delayMCToGC
self.stim2bias = [0, 34, 36, 38, 41, 43, 46, 50, 54, 59, 65, 72, 81,
92, 107, 129, 161, 214, 321, 641]
self.cycleDuration = 40
self.doOnlyInference = doOnlyInference
self.debug = debug
self.log = log
self.numStepsRan = 0
if not self.debug:
self.setupNetwork()
@property
def numENeurons(self):
return self.numCores * self.numExcNeuronsPerCore
@property
def numENeuronsPerCore(self):
return self.numExcNeuronsPerCore
@property
def numINeurons(self):
return self.numCores * self.numInhNeuronsPerCore
@property
def numINeuronsPerCore(self):
return self.numInhNeuronsPerCore
def setupNetwork(self):
self.loadWeightsAndInputs()
self.createMCAndSTONetwork()
self.createMCToGCNetwork()
self.setupProbes()
@timer
def createMCAndSTONetwork(self):
self.createExcitatoryMCNeurons()
self.createSTONeurons()
self.connectSTONeuronsWithMCADNeurons()
@timer
def createMCToGCNetwork(self):
self.createInhibitoryGCNeurons()
self.connectInhibitoryGCToExcitatoryMCNeurons()
self.connectExcitatoryMCToInhibitoryGCNeurons()
@timer
def loadWeightsAndInputs(self):
dir_path = os.path.dirname(os.path.abspath(__file__))
data_dir = os.path.join(dir_path, "../../data/")
# print(data_dir)
self.inhGCToExcMCWeights = np.load(os.path.join(data_dir,
"i2eWgtMat.npy"))
self.inhGCToExcMCDelays = np.load(os.path.join(data_dir,
"i2eDlyMat.npy"))
self.excMCToInhGCWeights = np.load(os.path.join(data_dir,
"e2iWgtMat.npy"))
#print(os.path.join(data_dir, "windTunnelData.pi"))
if not self.debug:
windTunnelDataFile = "windTunnelData.pi"
rf = open(os.path.join(data_dir, windTunnelDataFile), 'rb')
self.trainingSet = pickle.load(rf)
self.testSet = pickle.load(rf)
rf.close()
# print(self.inhGCToExcMCWeights.shape)
# print(self.excMCToInhGCWeights.shape)
def createInhibitoryGCNeurons(self):
self.allGCNeuronsGroup = self.net.createCompartmentGroup()
self.gcNeuronGrpPerCoreList = []
if self.gcInputBias is None:
self.gcInputBias = 0
for coreIdx in range(self.numCores):
gcNeuronGrpPerCore = self.net.createCompartmentGroup()
gcNeuronProtoPerCore = nx.CompartmentPrototype(
logicalCoreId=coreIdx,
compartmentCurrentDecay=4095,
compartmentVoltageDecay=4095,
biasMant=0 if not self.debug else self.gcInputBias,
vThMant=5*200 if not self.debug else self.gcInputBias//64,
refractoryDelay=25,
vMinExp=0,
numDendriticAccumulators=64,
functionalState=nx.COMPARTMENT_FUNCTIONAL_STATE.IDLE,
thresholdBehavior=nx.COMPARTMENT_THRESHOLD_MODE.SPIKE_AND_RESET
)
for i in range(self.numINeuronsPerCore):
gcCx = self.net.createCompartment(
prototype=gcNeuronProtoPerCore)
gcNeuronGrpPerCore.addCompartments(gcCx)
self.allGCNeuronsGroup.addCompartments(gcCx)
self.gcNeuronGrpPerCoreList.append(gcNeuronGrpPerCore)
def connectInhibitoryGCToExcitatoryMCNeurons(self):
ConnGroup = namedtuple("ConnGroup", "positive negative")
self.inh2ExcConnGroups = list()
for coreIdx in range(self.numCores):
"""
wgts = np.zeros((self.numMCsPerCore, self.numGCsPerCore), int)
delays = np.ones(wgts.shape)
"""
if not self.debug:
excWgts = self.inhGCToExcMCWeights[0, coreIdx]
excDlys = self.inhGCToExcMCDelays[0, coreIdx]
inhWgts = self.inhGCToExcMCWeights[1, coreIdx]
inhDlys = self.inhGCToExcMCDelays[1, coreIdx]
else:
wgts = self.inhGCToExcMCWeights
dlys = self.inhGCToExcMCDelays
excWgts = np.ones_like(wgts[0, coreIdx])
excDlys = np.ones_like(dlys[0, coreIdx]) * 2
inhWgts = np.ones_like(wgts[1, coreIdx]) * -1
inhDlys = np.ones_like(dlys[1, coreIdx]) * 1
excConnProtoBox = nx.ConnectionPrototype(
numDelayBits=6,
enableDelay=1,
signMode=nx.SYNAPSE_SIGN_MODE.EXCITATORY,
postSynResponseMode=nx.SYNAPSE_POST_SYN_RESPONSE_MODE.BOX,
compressionMode=nx.SYNAPSE_COMPRESSION_MODE.SPARSE
)
inhConnProtoBox = nx.ConnectionPrototype(
numDelayBits=6,
enableDelay=1,
signMode=nx.SYNAPSE_SIGN_MODE.INHIBITORY,
postSynResponseMode=nx.SYNAPSE_POST_SYN_RESPONSE_MODE.BOX,
compressionMode=nx.SYNAPSE_COMPRESSION_MODE.SPARSE
)
posConnGrp = self.net.createConnectionGroup(
src=self.gcNeuronGrpPerCoreList[coreIdx],
dst=self.mcNeuronGrpPerCoreList[coreIdx],
prototype=excConnProtoBox,
connectionMask=(excWgts > 0),
weight=excWgts,
delay=excDlys
)
negConnGrp = self.net.createConnectionGroup(
src=self.gcNeuronGrpPerCoreList[coreIdx],
dst=self.mcNeuronGrpPerCoreList[coreIdx],
prototype=inhConnProtoBox,
connectionMask=(inhWgts < 0),
weight=inhWgts,
delay=inhDlys
)
self.inh2ExcConnGroups.append(ConnGroup(positive=posConnGrp,
negative=negConnGrp))
def connectExcitatoryMCToInhibitoryGCNeurons(self):
minDelay = self.delayMCToGC
numDelays = self.numMCToGCDelays
#percent = int(100 * self.conn_prob)
"""
eSTDPLearningRule= net.createLearningRule(
dw='2^-4*x1*y0',
x1Impulse=20,
x1TimeConstant=2,
tEpoch=trainEpoch
)
"""
self.exc2InhConnGroups = list()
for delay in range(minDelay, minDelay + numDelays):
"""
wgtMat = np.zeros((self.numGCs, self.numMCs), int)
rand = np.random.uniform(0, 100, size=wgtMat.shape)
wgtMat[rand <= percent] = 10
"""
wgtMat = self.excMCToInhGCWeights[delay-minDelay]
connProtoE2I = nx.ConnectionPrototype(
delay=delay if not self.debug else 0,
numDelayBits=6,
enableDelay=1,
signMode=nx.SYNAPSE_SIGN_MODE.EXCITATORY,
compressionMode=nx.SYNAPSE_COMPRESSION_MODE.SPARSE,
#enableLearning=1 if enableSTDP else 0,
# learningRule=eSTDPLearningRule,
# learningEnableMode=nx.SYNAPSE_LEARNING_ENABLE_MODE.SHARED
)
connGrp = self.net.createConnectionGroup(
dst=self.allGCNeuronsGroup,
src=self.allMCSomaNeuronsGrp,
prototype=connProtoE2I,
connectionMask=(wgtMat > 0),
weight=wgtMat
)
self.exc2InhConnGroups.append(connGrp)
def createExcitatoryMCNeurons(self):
# Create MC-AD neurons recieve the input biases. The activity of
# the MC-AD neurons is gated by the STO Neurons.
if self.inputBiases is None:
self.inputBiases = [0] * self.numCores
self.mcADNeuronGroup = self.net.createCompartmentGroup()
for coreIdx in range(self.numCores):
mcADProto = nx.CompartmentPrototype(
logicalCoreId=coreIdx,
compartmentCurrentDecay=0,
vThMant=10, # i.e. 10 * 64 = 640
biasMant=self.inputBiases[coreIdx],
refractoryDelay=20,
vMinExp=0,
numDendriticAccumulators=64,
functionalState=nx.COMPARTMENT_FUNCTIONAL_STATE.IDLE,
thresholdBehavior=nx.COMPARTMENT_THRESHOLD_MODE.SPIKE_AND_RESET
)
mcADCx = self.net.createCompartment(prototype=mcADProto)
self.mcADNeuronGroup.addCompartments(mcADCx)
# Create MC-Soma neurons which get input form MC-AD neurons. MC-Soma
# neurons connect to the Inhibitory GC neurons.
self.allMCSomaNeuronsGrp = self.net.createCompartmentGroup()
self.mcNeuronGrpPerCoreList = []
for coreIdx in range(self.numCores):
mcSomaNeuronProto = nx.CompartmentPrototype(
logicalCoreId=coreIdx,
compartmentCurrentDecay=0,
compartmentVoltageDecay=4095,
vThMant=2, # i.e. 2 * 64 = 128
refractoryDelay=19,
vMinExp=0,
numDendriticAccumulators=64,
functionalState=nx.COMPARTMENT_FUNCTIONAL_STATE.IDLE,
thresholdBehavior=nx.COMPARTMENT_THRESHOLD_MODE.SPIKE_AND_RESET
)
mcNeuronGrpPerCore = self.net.createCompartmentGroup()
for _ in range(self.numENeuronsPerCore):
mcSomaNeuronCx = self.net.createCompartment(
prototype=mcSomaNeuronProto)
self.allMCSomaNeuronsGrp.addCompartments(mcSomaNeuronCx)
mcNeuronGrpPerCore.addCompartments(mcSomaNeuronCx)
self.mcNeuronGrpPerCoreList.append(mcNeuronGrpPerCore)
# Connect each MC-AD neuron to its MC-Soma neuron
mcADToSomaConnProtoBox = nx.ConnectionPrototype(
weight=3,
delay=19,
numDelayBits=6,
enableDelay=1,
signMode=nx.SYNAPSE_SIGN_MODE.EXCITATORY,
postSynResponseMode=nx.SYNAPSE_POST_SYN_RESPONSE_MODE.BOX,
compressionMode=nx.SYNAPSE_COMPRESSION_MODE.SPARSE
)
for coreIdx in range(self.numENeurons):
self.net._createConnection(src=self.mcADNeuronGroup[coreIdx],
dst=self.allMCSomaNeuronsGrp[coreIdx],
prototype=mcADToSomaConnProtoBox)
def createSTONeurons(self):
self.stoNeuronGroup = self.net.createCompartmentGroup()
for i in range(self.numENeurons):
stoNeuronProto = nx.CompartmentPrototype(
logicalCoreId=i,
compartmentCurrentDecay=4095,
vThMant=39,
biasMant=64,
numDendriticAccumulators=64,
vMinExp=0,
functionalState=nx.COMPARTMENT_FUNCTIONAL_STATE.IDLE,
thresholdBehavior=nx.COMPARTMENT_THRESHOLD_MODE.SPIKE_AND_RESET
)
stoNeuronCx = self.net.createCompartment(prototype=stoNeuronProto)
self.stoNeuronGroup.addCompartments(stoNeuronCx)
def connectSTONeuronsWithMCADNeurons(self, wgt=20):
connProtoBox = nx.ConnectionPrototype(
weight=-wgt,
delay=20,
numDelayBits=6,
enableDelay=1,
signMode=nx.SYNAPSE_SIGN_MODE.INHIBITORY,
postSynResponseMode=nx.SYNAPSE_POST_SYN_RESPONSE_MODE.BOX,
compressionMode=nx.SYNAPSE_COMPRESSION_MODE.SPARSE
)
# stoNeuronGroup.connect(dst=eNeuronADGroup, prototype=connProtoBox)
for coreIdx in range(self.numENeurons):
self.net._createConnection(
src=self.stoNeuronGroup[coreIdx],
dst=self.mcADNeuronGroup[coreIdx],
prototype=connProtoBox)
for idx in range(self.numENeuronsPerCore):
self.net._createConnection(
src=self.stoNeuronGroup[coreIdx],
dst=self.mcNeuronGrpPerCoreList[coreIdx][idx],
prototype=connProtoBox)
def applyInputs(self, inputList, thethaReset=False):
if len(inputList) != self.numENeurons:
raise ValueError("Incorrect size of inputs list")
if self.board is None:
raise ValueError("There's no board as the network is not "
"compiled yet.")
#uniqueCores = set()
for mcIdx, inputVal in enumerate(inputList):
cx = self.mcADNeuronGroup[mcIdx]
_, chipId, coreId, cxId, _, _ = \
self.net.resourceMap.compartment(cx.nodeId)
n2Core = self.board.n2Chips[chipId].n2Cores[coreId]
n2Core.cxCfg[np.asscalar(cxId)].bias = self.stim2bias[inputVal]
n2Core.cxCfg.pushModified()
if thethaReset:
n2Core.cxState[np.asscalar(cxId)].v = 0
n2Core.cxState.pushModified()
def switchThetaState(self, state):
for mcIdx in range(self.numCores):
# MC soma
cx = self.allMCSomaNeuronsGrp[mcIdx]
_, chipId, coreId, cxId, _, vthProfileCfgId1 = \
map(lambda x: int(x), self.net.resourceMap.compartment(cx.nodeId))
n2Core = self.board.n2Chips[chipId].n2Cores[coreId]
vth = 2 if state == 1 else 100
n2Core.vthProfileCfg[vthProfileCfgId1].staticCfg.vth = vth
n2Core.vthProfileCfg.pushModified()
def sniff(self, inputList, numGammaCycles=5,
numThetaCycles=1):
self.applyInputs(inputList)
numSteps = numGammaCycles * self.cycleDuration
board.run(numSteps)
self.applyInputs([0] * self.numCores, thethaReset=True)
self.switchThetaState(state=0)
# numSteps = numGammaCycles * self.cycleDuration
board.run(numSteps)
self.switchThetaState(state=1)
self.numStepsRan += 2 * numSteps
def dumpSpikesOutputForPostProcessing(self, nGamma):
_, spikeProbes, _ = self.mcSomaProbes
offset = 20 + 1 # 1 accounts the delay in spike probe counter
gammaCode = []
for _ in range(nGamma):
gammaCode.append([0]*72)
for i, spkProbe in enumerate(spikeProbes):
data = spkProbe.data[offset:]
spikes1 = np.nonzero(data)[0]
for j in spikes1:
gammaCycle = j//40
rank = (gammaCycle*40 + 21) - (gammaCycle*40 + (j % 40))
gammaCode[gammaCycle][i] = rank
pickledfilename = "spikes.pi"
wf = open(pickledfilename, 'wb')
pickle.dump(gammaCode, wf)
wf.close()
@timer
def setupProbes(self):
self.setupMCAndSTOProbes()
def setupMCAndSTOProbes(self):
probeParams = [nx.ProbeParameter.COMPARTMENT_VOLTAGE,
nx.ProbeParameter.SPIKE,
nx.ProbeParameter.COMPARTMENT_CURRENT]
self.mcADProbes = self.mcADNeuronGroup.probe(probeParams)
self.mcSomaProbes = self.allMCSomaNeuronsGrp.probe(probeParams)
self.stoProbes = self.stoNeuronGroup.probe(probeParams)
def getProbesForNeuronIdx(self, probes, idx):
vProbes, spikeProbes, uProbes = probes
return vProbes[idx], spikeProbes[idx], uProbes[idx]
def plotSTOVsMCNeuronProbes(self, idx):
# plot the eNeuronProbes
vProbeE, spikeProbeE, uProbeE = self.getProbesForNeuronIdx(
self.mcSomaProbes, idx)
vProbeSTO, spikeProbeSTO, uProbeSTO = self.getProbesForNeuronIdx(
self.stoProbes, idx)
plt.figure()
ax1 = plt.subplot(321)
vProbeE.plot()
plt.title("E-NEURON(V_PROBE)")
plt.subplot(323, sharex=ax1)
spikeProbeE.plot()
plt.title("E-NEURON(SPIKE_PROBE)")
plt.subplot(325, sharex=ax1)
uProbeE.plot()
plt.title("E-NEURON(U_PROBE)")
# plots for STO neurons
plt.subplot(322, sharex=ax1)
vProbeSTO.plot()
plt.title("STO-NEURON(V_PROBE)")
plt.subplot(324, sharex=ax1)
spikeProbeSTO.plot()
plt.title("STO-NEURON(SPIKE_PROBE)")
plt.subplot(326, sharex=ax1)
uProbeSTO.plot()
plt.title("E-NEURON(U_PROBE)")
def plotSpikeRaster(self, probes, offset=60):
_, spikeProbes, _ = probes
plt.figure()
# probe[1] is spike probe
data = [np.nonzero(spkProbe.data[offset:])[0]
for spkProbe in spikeProbes]
size = self.numENeurons
plt.eventplot(positions=data, colors=[(1, 0, 0)],
lineoffsets=np.arange(size),
linelengths=np.ones(size) / 2.0)
plt.title("E-Neurons (Spike Raster Plot)")
plt.ylabel("# E-Neurons")
plt.xlabel("Time + {} timesteps".format(offset))
plt.tight_layout()
@timer
def compileAndGetBoard(self):
self.board = nx.N2Compiler().compile(self.net)
return self.board
if __name__ == '__main__':
numCores = 72
eplInference = MultiPatternInferenceEPL(numCores=numCores,
numExcNeuronsPerCore=1,
numInhNeuronsPerCore=46)
board = eplInference.compileAndGetBoard()
for i, trainSample in enumerate(eplInference.trainingSet):
for _ in range(2):
eplInference.sniff(inputList=trainSample)
for i, testSample in enumerate(eplInference.testSet):
eplInference.sniff(inputList=testSample)
print("Ran the network for {} time steps".format(eplInference.numStepsRan))
board.disconnect()
nGamma = 10*len(eplInference.trainingSet)*2 + 10*len(eplInference.testSet)
eplInference.dumpSpikesOutputForPostProcessing(nGamma)
computeResults.computeResults(nGammaPerTraining=10,
trainingSetSize=len(
eplInference.trainingSet),
testSetSize=len(eplInference.testSet),
plotIDs=[0])
| [
"matplotlib.pyplot.ylabel",
"numpy.arange",
"nxsdk.api.n2a.NxNet",
"collections.namedtuple",
"numpy.ones",
"pickle.load",
"nxsdk.api.n2a.ConnectionPrototype",
"numpy.nonzero",
"matplotlib.pyplot.title",
"time.time",
"numpy.ones_like",
"pickle.dump",
"os.path.join",
"numpy.asscalar",
"matplotlib.pyplot.figure",
"nxsdk.api.n2a.N2Compiler",
"matplotlib.pyplot.tight_layout",
"os.path.abspath",
"matplotlib.pyplot.subplot",
"nxsdk.api.n2a.CompartmentPrototype"
] | [((1877, 1888), 'time.time', 'time.time', ([], {}), '()\n', (1886, 1888), False, 'import time\n'), ((1953, 1964), 'time.time', 'time.time', ([], {}), '()\n', (1962, 1964), False, 'import time\n'), ((2455, 2465), 'nxsdk.api.n2a.NxNet', 'nx.NxNet', ([], {}), '()\n', (2463, 2465), True, 'import nxsdk.api.n2a as nx\n'), ((4208, 4245), 'os.path.join', 'os.path.join', (['dir_path', '"""../../data/"""'], {}), "(dir_path, '../../data/')\n", (4220, 4245), False, 'import os\n'), ((6473, 6517), 'collections.namedtuple', 'namedtuple', (['"""ConnGroup"""', '"""positive negative"""'], {}), "('ConnGroup', 'positive negative')\n", (6483, 6517), False, 'from collections import namedtuple\n'), ((12927, 13167), 'nxsdk.api.n2a.ConnectionPrototype', 'nx.ConnectionPrototype', ([], {'weight': '(3)', 'delay': '(19)', 'numDelayBits': '(6)', 'enableDelay': '(1)', 'signMode': 'nx.SYNAPSE_SIGN_MODE.EXCITATORY', 'postSynResponseMode': 'nx.SYNAPSE_POST_SYN_RESPONSE_MODE.BOX', 'compressionMode': 'nx.SYNAPSE_COMPRESSION_MODE.SPARSE'}), '(weight=3, delay=19, numDelayBits=6, enableDelay=1,\n signMode=nx.SYNAPSE_SIGN_MODE.EXCITATORY, postSynResponseMode=nx.\n SYNAPSE_POST_SYN_RESPONSE_MODE.BOX, compressionMode=nx.\n SYNAPSE_COMPRESSION_MODE.SPARSE)\n', (12949, 13167), True, 'import nxsdk.api.n2a as nx\n'), ((14308, 14551), 'nxsdk.api.n2a.ConnectionPrototype', 'nx.ConnectionPrototype', ([], {'weight': '(-wgt)', 'delay': '(20)', 'numDelayBits': '(6)', 'enableDelay': '(1)', 'signMode': 'nx.SYNAPSE_SIGN_MODE.INHIBITORY', 'postSynResponseMode': 'nx.SYNAPSE_POST_SYN_RESPONSE_MODE.BOX', 'compressionMode': 'nx.SYNAPSE_COMPRESSION_MODE.SPARSE'}), '(weight=-wgt, delay=20, numDelayBits=6, enableDelay=1,\n signMode=nx.SYNAPSE_SIGN_MODE.INHIBITORY, postSynResponseMode=nx.\n SYNAPSE_POST_SYN_RESPONSE_MODE.BOX, compressionMode=nx.\n SYNAPSE_COMPRESSION_MODE.SPARSE)\n', (14330, 14551), True, 'import nxsdk.api.n2a as nx\n'), ((17737, 17763), 'pickle.dump', 'pickle.dump', (['gammaCode', 'wf'], {}), '(gammaCode, wf)\n', (17748, 17763), False, 'import pickle\n'), ((18725, 18737), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (18735, 18737), True, 'import matplotlib.pyplot as plt\n'), ((18752, 18768), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(321)'], {}), '(321)\n', (18763, 18768), True, 'import matplotlib.pyplot as plt\n'), ((18800, 18830), 'matplotlib.pyplot.title', 'plt.title', (['"""E-NEURON(V_PROBE)"""'], {}), "('E-NEURON(V_PROBE)')\n", (18809, 18830), True, 'import matplotlib.pyplot as plt\n'), ((18840, 18868), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(323)'], {'sharex': 'ax1'}), '(323, sharex=ax1)\n', (18851, 18868), True, 'import matplotlib.pyplot as plt\n'), ((18904, 18938), 'matplotlib.pyplot.title', 'plt.title', (['"""E-NEURON(SPIKE_PROBE)"""'], {}), "('E-NEURON(SPIKE_PROBE)')\n", (18913, 18938), True, 'import matplotlib.pyplot as plt\n'), ((18948, 18976), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(325)'], {'sharex': 'ax1'}), '(325, sharex=ax1)\n', (18959, 18976), True, 'import matplotlib.pyplot as plt\n'), ((19008, 19038), 'matplotlib.pyplot.title', 'plt.title', (['"""E-NEURON(U_PROBE)"""'], {}), "('E-NEURON(U_PROBE)')\n", (19017, 19038), True, 'import matplotlib.pyplot as plt\n'), ((19080, 19108), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(322)'], {'sharex': 'ax1'}), '(322, sharex=ax1)\n', (19091, 19108), True, 'import matplotlib.pyplot as plt\n'), ((19142, 19174), 'matplotlib.pyplot.title', 'plt.title', (['"""STO-NEURON(V_PROBE)"""'], {}), "('STO-NEURON(V_PROBE)')\n", (19151, 19174), True, 'import matplotlib.pyplot as plt\n'), ((19184, 19212), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(324)'], {'sharex': 'ax1'}), '(324, sharex=ax1)\n', (19195, 19212), True, 'import matplotlib.pyplot as plt\n'), ((19250, 19286), 'matplotlib.pyplot.title', 'plt.title', (['"""STO-NEURON(SPIKE_PROBE)"""'], {}), "('STO-NEURON(SPIKE_PROBE)')\n", (19259, 19286), True, 'import matplotlib.pyplot as plt\n'), ((19296, 19324), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(326)'], {'sharex': 'ax1'}), '(326, sharex=ax1)\n', (19307, 19324), True, 'import matplotlib.pyplot as plt\n'), ((19358, 19388), 'matplotlib.pyplot.title', 'plt.title', (['"""E-NEURON(U_PROBE)"""'], {}), "('E-NEURON(U_PROBE)')\n", (19367, 19388), True, 'import matplotlib.pyplot as plt\n'), ((19483, 19495), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (19493, 19495), True, 'import matplotlib.pyplot as plt\n'), ((19834, 19876), 'matplotlib.pyplot.title', 'plt.title', (['"""E-Neurons (Spike Raster Plot)"""'], {}), "('E-Neurons (Spike Raster Plot)')\n", (19843, 19876), True, 'import matplotlib.pyplot as plt\n'), ((19885, 19910), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""# E-Neurons"""'], {}), "('# E-Neurons')\n", (19895, 19910), True, 'import matplotlib.pyplot as plt\n'), ((19976, 19994), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19992, 19994), True, 'import matplotlib.pyplot as plt\n'), ((4162, 4187), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (4177, 4187), False, 'import os\n'), ((4315, 4354), 'os.path.join', 'os.path.join', (['data_dir', '"""i2eWgtMat.npy"""'], {}), "(data_dir, 'i2eWgtMat.npy')\n", (4327, 4354), False, 'import os\n'), ((4455, 4494), 'os.path.join', 'os.path.join', (['data_dir', '"""i2eDlyMat.npy"""'], {}), "(data_dir, 'i2eDlyMat.npy')\n", (4467, 4494), False, 'import os\n'), ((4595, 4634), 'os.path.join', 'os.path.join', (['data_dir', '"""e2iWgtMat.npy"""'], {}), "(data_dir, 'e2iWgtMat.npy')\n", (4607, 4634), False, 'import os\n'), ((4936, 4951), 'pickle.load', 'pickle.load', (['rf'], {}), '(rf)\n', (4947, 4951), False, 'import pickle\n'), ((4979, 4994), 'pickle.load', 'pickle.load', (['rf'], {}), '(rf)\n', (4990, 4994), False, 'import pickle\n'), ((5483, 5905), 'nxsdk.api.n2a.CompartmentPrototype', 'nx.CompartmentPrototype', ([], {'logicalCoreId': 'coreIdx', 'compartmentCurrentDecay': '(4095)', 'compartmentVoltageDecay': '(4095)', 'biasMant': '(0 if not self.debug else self.gcInputBias)', 'vThMant': '(5 * 200 if not self.debug else self.gcInputBias // 64)', 'refractoryDelay': '(25)', 'vMinExp': '(0)', 'numDendriticAccumulators': '(64)', 'functionalState': 'nx.COMPARTMENT_FUNCTIONAL_STATE.IDLE', 'thresholdBehavior': 'nx.COMPARTMENT_THRESHOLD_MODE.SPIKE_AND_RESET'}), '(logicalCoreId=coreIdx, compartmentCurrentDecay=4095,\n compartmentVoltageDecay=4095, biasMant=0 if not self.debug else self.\n gcInputBias, vThMant=5 * 200 if not self.debug else self.gcInputBias //\n 64, refractoryDelay=25, vMinExp=0, numDendriticAccumulators=64,\n functionalState=nx.COMPARTMENT_FUNCTIONAL_STATE.IDLE, thresholdBehavior\n =nx.COMPARTMENT_THRESHOLD_MODE.SPIKE_AND_RESET)\n', (5506, 5905), True, 'import nxsdk.api.n2a as nx\n'), ((7418, 7639), 'nxsdk.api.n2a.ConnectionPrototype', 'nx.ConnectionPrototype', ([], {'numDelayBits': '(6)', 'enableDelay': '(1)', 'signMode': 'nx.SYNAPSE_SIGN_MODE.EXCITATORY', 'postSynResponseMode': 'nx.SYNAPSE_POST_SYN_RESPONSE_MODE.BOX', 'compressionMode': 'nx.SYNAPSE_COMPRESSION_MODE.SPARSE'}), '(numDelayBits=6, enableDelay=1, signMode=nx.\n SYNAPSE_SIGN_MODE.EXCITATORY, postSynResponseMode=nx.\n SYNAPSE_POST_SYN_RESPONSE_MODE.BOX, compressionMode=nx.\n SYNAPSE_COMPRESSION_MODE.SPARSE)\n', (7440, 7639), True, 'import nxsdk.api.n2a as nx\n'), ((7750, 7971), 'nxsdk.api.n2a.ConnectionPrototype', 'nx.ConnectionPrototype', ([], {'numDelayBits': '(6)', 'enableDelay': '(1)', 'signMode': 'nx.SYNAPSE_SIGN_MODE.INHIBITORY', 'postSynResponseMode': 'nx.SYNAPSE_POST_SYN_RESPONSE_MODE.BOX', 'compressionMode': 'nx.SYNAPSE_COMPRESSION_MODE.SPARSE'}), '(numDelayBits=6, enableDelay=1, signMode=nx.\n SYNAPSE_SIGN_MODE.INHIBITORY, postSynResponseMode=nx.\n SYNAPSE_POST_SYN_RESPONSE_MODE.BOX, compressionMode=nx.\n SYNAPSE_COMPRESSION_MODE.SPARSE)\n', (7772, 7971), True, 'import nxsdk.api.n2a as nx\n'), ((9810, 10003), 'nxsdk.api.n2a.ConnectionPrototype', 'nx.ConnectionPrototype', ([], {'delay': '(delay if not self.debug else 0)', 'numDelayBits': '(6)', 'enableDelay': '(1)', 'signMode': 'nx.SYNAPSE_SIGN_MODE.EXCITATORY', 'compressionMode': 'nx.SYNAPSE_COMPRESSION_MODE.SPARSE'}), '(delay=delay if not self.debug else 0, numDelayBits=6,\n enableDelay=1, signMode=nx.SYNAPSE_SIGN_MODE.EXCITATORY,\n compressionMode=nx.SYNAPSE_COMPRESSION_MODE.SPARSE)\n', (9832, 10003), True, 'import nxsdk.api.n2a as nx\n'), ((10994, 11312), 'nxsdk.api.n2a.CompartmentPrototype', 'nx.CompartmentPrototype', ([], {'logicalCoreId': 'coreIdx', 'compartmentCurrentDecay': '(0)', 'vThMant': '(10)', 'biasMant': 'self.inputBiases[coreIdx]', 'refractoryDelay': '(20)', 'vMinExp': '(0)', 'numDendriticAccumulators': '(64)', 'functionalState': 'nx.COMPARTMENT_FUNCTIONAL_STATE.IDLE', 'thresholdBehavior': 'nx.COMPARTMENT_THRESHOLD_MODE.SPIKE_AND_RESET'}), '(logicalCoreId=coreIdx, compartmentCurrentDecay=0,\n vThMant=10, biasMant=self.inputBiases[coreIdx], refractoryDelay=20,\n vMinExp=0, numDendriticAccumulators=64, functionalState=nx.\n COMPARTMENT_FUNCTIONAL_STATE.IDLE, thresholdBehavior=nx.\n COMPARTMENT_THRESHOLD_MODE.SPIKE_AND_RESET)\n', (11017, 11312), True, 'import nxsdk.api.n2a as nx\n'), ((11925, 12236), 'nxsdk.api.n2a.CompartmentPrototype', 'nx.CompartmentPrototype', ([], {'logicalCoreId': 'coreIdx', 'compartmentCurrentDecay': '(0)', 'compartmentVoltageDecay': '(4095)', 'vThMant': '(2)', 'refractoryDelay': '(19)', 'vMinExp': '(0)', 'numDendriticAccumulators': '(64)', 'functionalState': 'nx.COMPARTMENT_FUNCTIONAL_STATE.IDLE', 'thresholdBehavior': 'nx.COMPARTMENT_THRESHOLD_MODE.SPIKE_AND_RESET'}), '(logicalCoreId=coreIdx, compartmentCurrentDecay=0,\n compartmentVoltageDecay=4095, vThMant=2, refractoryDelay=19, vMinExp=0,\n numDendriticAccumulators=64, functionalState=nx.\n COMPARTMENT_FUNCTIONAL_STATE.IDLE, thresholdBehavior=nx.\n COMPARTMENT_THRESHOLD_MODE.SPIKE_AND_RESET)\n', (11948, 12236), True, 'import nxsdk.api.n2a as nx\n'), ((13690, 13957), 'nxsdk.api.n2a.CompartmentPrototype', 'nx.CompartmentPrototype', ([], {'logicalCoreId': 'i', 'compartmentCurrentDecay': '(4095)', 'vThMant': '(39)', 'biasMant': '(64)', 'numDendriticAccumulators': '(64)', 'vMinExp': '(0)', 'functionalState': 'nx.COMPARTMENT_FUNCTIONAL_STATE.IDLE', 'thresholdBehavior': 'nx.COMPARTMENT_THRESHOLD_MODE.SPIKE_AND_RESET'}), '(logicalCoreId=i, compartmentCurrentDecay=4095,\n vThMant=39, biasMant=64, numDendriticAccumulators=64, vMinExp=0,\n functionalState=nx.COMPARTMENT_FUNCTIONAL_STATE.IDLE, thresholdBehavior\n =nx.COMPARTMENT_THRESHOLD_MODE.SPIKE_AND_RESET)\n', (13713, 13957), True, 'import nxsdk.api.n2a as nx\n'), ((4855, 4897), 'os.path.join', 'os.path.join', (['data_dir', 'windTunnelDataFile'], {}), '(data_dir, windTunnelDataFile)\n', (4867, 4897), False, 'import os\n'), ((7172, 7202), 'numpy.ones_like', 'np.ones_like', (['wgts[0, coreIdx]'], {}), '(wgts[0, coreIdx])\n', (7184, 7202), True, 'import numpy as np\n'), ((17443, 17459), 'numpy.nonzero', 'np.nonzero', (['data'], {}), '(data)\n', (17453, 17459), True, 'import numpy as np\n'), ((19546, 19580), 'numpy.nonzero', 'np.nonzero', (['spkProbe.data[offset:]'], {}), '(spkProbe.data[offset:])\n', (19556, 19580), True, 'import numpy as np\n'), ((19754, 19769), 'numpy.arange', 'np.arange', (['size'], {}), '(size)\n', (19763, 19769), True, 'import numpy as np\n'), ((20062, 20077), 'nxsdk.api.n2a.N2Compiler', 'nx.N2Compiler', ([], {}), '()\n', (20075, 20077), True, 'import nxsdk.api.n2a as nx\n'), ((7229, 7259), 'numpy.ones_like', 'np.ones_like', (['dlys[0, coreIdx]'], {}), '(dlys[0, coreIdx])\n', (7241, 7259), True, 'import numpy as np\n'), ((7290, 7320), 'numpy.ones_like', 'np.ones_like', (['wgts[1, coreIdx]'], {}), '(wgts[1, coreIdx])\n', (7302, 7320), True, 'import numpy as np\n'), ((7352, 7382), 'numpy.ones_like', 'np.ones_like', (['dlys[1, coreIdx]'], {}), '(dlys[1, coreIdx])\n', (7364, 7382), True, 'import numpy as np\n'), ((15843, 15860), 'numpy.asscalar', 'np.asscalar', (['cxId'], {}), '(cxId)\n', (15854, 15860), True, 'import numpy as np\n'), ((19805, 19818), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (19812, 19818), True, 'import numpy as np\n'), ((15993, 16010), 'numpy.asscalar', 'np.asscalar', (['cxId'], {}), '(cxId)\n', (16004, 16010), True, 'import numpy as np\n')] |
from Queue import LifoQueue
from collections import defaultdict
class Actions(object):
def __init__(self, activate, deactivate):
self.activate = activate
self.deactivate = deactivate
class MenuAction(object):
def __init__(self):
self.undo_commands = LifoQueue()
self.commands = defaultdict(Actions)
def set_command(self, item, activate, deactivate):
self.commands[item] = Actions(activate, deactivate)
def activate(self, item):
action = self.commands[item].activate
action.execute()
self.undo_commands.put(action)
def deactivate(self, item):
action = self.commands[item].deactivate
action.execute()
self.undo_commands.put(action)
def undo(self):
if not self.undo_commands.empty():
self.undo_commands.get().undo()
| [
"collections.defaultdict",
"Queue.LifoQueue"
] | [((294, 305), 'Queue.LifoQueue', 'LifoQueue', ([], {}), '()\n', (303, 305), False, 'from Queue import LifoQueue\n'), ((331, 351), 'collections.defaultdict', 'defaultdict', (['Actions'], {}), '(Actions)\n', (342, 351), False, 'from collections import defaultdict\n')] |
# Client Transports
import struct
import logging
from . import protocol as PROTOCOL # Constant
# Websockets
import asyncio
import websockets
import websockets.exceptions
import websockets.client
from websockets.client import connect as ws_connect # for autocomplete satsifacation
pingByte = struct.pack(">i",PROTOCOL.PING)
class Transport:
async def ping(self):
await self.send_raw(pingByte)
def __init__(self,ping_interval = 1000):
self.closed = False
self.lc = None
self.ping_interval = ping_interval
def after_connect(self):
asyncio.create_task(self.perodicPing(self.ping_interval))
def on_close(self):
if self.lc != None:
self.lc.transport_close(self)
def on_close_custom(self):
pass
def set_closed(self, newValue):
if self.closed != newValue:
if newValue:
self.closed = True
self.on_close()
self.on_close_custom()
else:
self.closed = newValue
async def send_internal(self, data):
raise NotImplementedError("Implemented in subclass only")
async def send_raw(self, byteData):
if isinstance(byteData, str):
byteData = bytes(byteData, PROTOCOL.ENCODING)
await self.send_internal(byteData)
async def perodicPing(self, interval):
while not self.closed:
await self.ping()
await asyncio.sleep(interval)
async def close(self):
'''
Note: this does not trigger on close handlers as this is a intentional close
'''
# No close. dummy
pass
async def create_connection(address):
# Mirror method
print("Trying to connect to",address)
connection = await ws_connect(address)
print("Established connection")
return connection
class WebsocketTransport(Transport):
def __init__(self, address):
super(WebsocketTransport, self).__init__()
# Synchronous Init
logging.info("Synchronously Starting Connection")
print("Running create connection")
# connectionCorountine = create_connection(connectionDesc)
# print(":P",connectionCorountine)
# print("waiting for corountine to finish")
# self.ws = asyncio.run_coroutine_threadsafe(create_connection(connectionDesc), asyncio.get_event_loop()).result()
self.address = address
async def connect(self):
self.ws = await ws_connect(self.address)
self.after_connect()
async def send_internal(self, data):
try:
await self.ws.ensure_open()
await self.ws.send(data)
except websockets.exceptions.ConnectionClosed:
# Pretend like the message was lost
self.set_closed(True)
return
async def loop(self):
for message in self.ws:
print(message)
async def close(self):
await self.ws.close() | [
"websockets.client.connect",
"logging.info",
"struct.pack",
"asyncio.sleep"
] | [((295, 327), 'struct.pack', 'struct.pack', (['""">i"""', 'PROTOCOL.PING'], {}), "('>i', PROTOCOL.PING)\n", (306, 327), False, 'import struct\n'), ((1784, 1803), 'websockets.client.connect', 'ws_connect', (['address'], {}), '(address)\n', (1794, 1803), True, 'from websockets.client import connect as ws_connect\n'), ((2019, 2068), 'logging.info', 'logging.info', (['"""Synchronously Starting Connection"""'], {}), "('Synchronously Starting Connection')\n", (2031, 2068), False, 'import logging\n'), ((2481, 2505), 'websockets.client.connect', 'ws_connect', (['self.address'], {}), '(self.address)\n', (2491, 2505), True, 'from websockets.client import connect as ws_connect\n'), ((1461, 1484), 'asyncio.sleep', 'asyncio.sleep', (['interval'], {}), '(interval)\n', (1474, 1484), False, 'import asyncio\n')] |
#!/usr/bin/env python
import roslib; roslib.load_manifest('pr2_draw')
import rospy
import draw_control as dc
if __name__ == '__main__':
rospy.init_node('draw_line')
draw_control = dc.DrawController()
orientation = draw_control.home_orientation
"""WARNING: use setup first!"""
draw_control.add_home_goal()
draw_control.add_move_goal((0.79,0,0), orientation, 200)
draw_control.add_move_goal((0.79,0,-0.1), orientation, 100)
draw_control.add_move_goal((0.75,0,-0.1), orientation, 100)
draw_control.add_home_goal()
draw_control.send()
| [
"rospy.init_node",
"draw_control.DrawController",
"roslib.load_manifest"
] | [((37, 69), 'roslib.load_manifest', 'roslib.load_manifest', (['"""pr2_draw"""'], {}), "('pr2_draw')\n", (57, 69), False, 'import roslib\n'), ((142, 170), 'rospy.init_node', 'rospy.init_node', (['"""draw_line"""'], {}), "('draw_line')\n", (157, 170), False, 'import rospy\n'), ((191, 210), 'draw_control.DrawController', 'dc.DrawController', ([], {}), '()\n', (208, 210), True, 'import draw_control as dc\n')] |
#!/usr/bin/env python3
import argparse
from npcaller.fasta import FastaReader, FastaWriter
from npcaller.fast5 import Fast5File
from npcaller.validator import align_to_reference, sam_to_bam
from tempfile import NamedTemporaryFile
from pysam import AlignmentFile
import numpy as np
import pandas
import logging
from multiprocessing import Pool
import itertools
import pickle
logging.basicConfig(level=logging.INFO)
class AlignmentEndException(Exception):
pass
class ModelMaker(object):
"""
Make a model from alignment of events to reference.
"""
def __init__(self, filelist, ref_file, model_basename, ncores, k, graphmap_bin):
"""
Args:
filelist:
ref_file:
model_basename:
ncores:
k:
Returns:
"""
self.ref_file = ref_file
self.model_basename = model_basename
self.ncores = ncores
self.k = k
self.graphmap_bin = graphmap_bin
self.logger = logging.getLogger("alignment")
self.logger.setLevel(logging.NOTSET)
fr = FastaReader(ref_file)
_, self.ref_seq = next(fr.get_entries())
self.f5files = {}
filelist = [l.strip() for l in filelist.readlines()]
for file in filelist:
f5file = Fast5File(file)
self.f5files[f5file.get_id()] = f5file
def make_all_models(self):
"""
Make the models for both template and complement.
"""
for strand in ["template", "complement"]:
self.make_model(strand)
def make_model(self, strand):
"""
Runner function: make the model for either template or complement strand.
Pickles the model to {basename}.strand.pickle.
Args:
strand:
Returns:
"""
self.logger.info("Making model for {0}-strand".format(strand))
alignment = self._make_bam(strand)
correct_kmers = self._find_correct_kmers(alignment, strand)
model = self._make_stats(correct_kmers)
pickle.dump(model, open("{0}.{1}.{2}".format(self.model_basename, strand, "pickle"), 'wb'), protocol=2)
def _make_bam(self, strand):
"""
Extract sequences from fast5 files and map them to the reference sequence
Args:
strand: either template or complement
Returns:
AlignmentFile object of the generated bam-Alignment
"""
# with NamedTemporaryFile() as fasta_file:
with NamedTemporaryFile('w') as fasta_file:
samfile = self.model_basename + ".sam"
bamfile = self.model_basename
fw = FastaWriter(fasta_file)
for f5file in self.f5files.values():
header = f5file.get_id()
seq = f5file.get_seq(strand)
fw.write_entry(header, seq)
fw.flush()
align_to_reference(fasta_file, self.ref_file, samfile,
graphmap_bin=self.graphmap_bin, ncores=self.ncores)
sam_to_bam(samfile, bamfile)
return AlignmentFile(bamfile + ".bam")
def _find_correct_kmers(self, alignment, strand):
"""
Args:
alignment (AlignmentFile): Pysam Object of the bam-Alignment
strand (str): either template or complement
Returns:
list of correctly mapped events.
"""
total_events = 0
result = list()
reads = [x for x in alignment.fetch()]
self.logger.info("{0} reads found in alignment".format(len(reads)))
for read in reads:
f5file = self.f5files[read.query_name]
pairs = [list(t) for t in zip(*read.get_aligned_pairs())]
assert(pairs[0][0] == 0), "alignment is not null-indexed."
correct, total = self._process_events(f5file, pairs, strand)
total_events += total
result.append(correct)
# true_events = list(chain.from_iterable([r.get() for r in result]))
correct_events = [x for x in itertools.chain.from_iterable(result)]
self.logger.info("Identified {0} correct kmers of {1} total kmers. That's {2:%}".format(
len(correct_events), total_events, len(correct_events)/total_events
))
return correct_events
def _make_stats(self, correct_events):
"""
sort the events into their respective kmer-buckets and calculate the target
statistics (mean, sd) for the model
Args:
correct_events (list): list of correctly mapped events
Returns:
Pandas Dataframe containing the model
"""
self.logger.info("started calculating statistics")
all_kmers = ["".join(i) for i in itertools.product("ACGT", repeat=self.k)]
stat_map = {}
for attr in ["mean", "stdv"]:
stat_map[attr] = {kmer: [] for kmer in all_kmers}
for ev in correct_events:
stat_map[attr][ev["kmer"]].append(ev[attr])
# make model file
model = []
for kmer in all_kmers:
model.append({
"kmer": kmer,
"level_mean": np.mean(stat_map["mean"][kmer]),
"level_stdv": np.std(stat_map["mean"][kmer]),
"sd_mean": np.mean(stat_map["stdv"][kmer]),
"sd_stdv": np.std(stat_map["stdv"][kmer]),
"weight": 1000.0 # not implemented in this model, use neutral value
})
return pandas.DataFrame(model)
def _process_events(self, f5file, pairs, strand):
"""
Helper function which processes the events per f5file.
Args:
f5file (Fast5File):
pairs (list): list of pairs (read nt <-> ref nt)
strand: either template or complement
Returns:
correctly mapped events of the given file.
"""
i_seq = 0
correct = list()
total = 0
called_seq = f5file.get_seq(strand)
for ev in f5file.get_corrected_events(strand):
total += 1
i_seq += self._gapmove(ev["move"], pairs[0], i_seq)
try:
ev_index = self._event_indexes(pairs[0], i_seq)
except AlignmentEndException:
# not the whole read is aligned
break
read_kmer = self._get_nt_kmer(ev_index, pairs[0], called_seq)
assert(read_kmer == ev["kmer"]), (i_seq, ev, read_kmer, ev_index)
if self._is_correct_kmer(ev_index, pairs, called_seq):
ev["ref_position"] = pairs[1][ev_index[0]] # first position of kmer in reference
correct.append(ev)
return correct, total
def _event_indexes(self, pairing_seq, offset):
""" get the next entries from the pairing array
such that k non-gap characters are contained"""
count = 0
kmer = []
for i in range(offset, len(pairing_seq)):
if count == self.k: break
if pairing_seq[i] is not None:
count += 1
kmer.append(i)
if len(kmer) != self.k:
raise AlignmentEndException
return kmer
@staticmethod
def _gapmove(to_move, seq, offset):
"""move by 'move' (from metrichor) in the aligned sequence.
additionally increase index to compensate for each gap
"""
move = to_move
for i in seq[offset:]:
if i is None:
move += 1
else:
to_move -= 1
if to_move <= 0:
return move
@staticmethod
def _get_nt_kmer(index, pairs, seq):
"""convert sequence indexes into the corresponding nucleotides.
gaps are converted into ''
"""
seq_index = [pairs[x] for x in index]
nt_kmer = [seq[x] for x in seq_index]
return "".join(nt_kmer)
@staticmethod
def _is_consecutive_seq(seq):
"""check if the sequence 'seq' consists of consecutive numbers"""
return len(set(list(map(lambda ix:ix[1]-ix[0], enumerate(seq))))) <= 1
def _is_correct_kmer(self, ev_index, pairs, read):
"""check if a kmer corresponds completely wit the reference.
This is the case if:
* the read positions are consecutive (no indels)
* the ref positions are consecutive (no indels)
* the nucleotides are idential (no substitutions)
"""
assert(len(ev_index) == self.k), "invalid event index"
read_index = [pairs[0][x] for x in ev_index]
ref_index = [pairs[1][x] for x in ev_index]
if None in read_index or not ModelMaker._is_consecutive_seq(read_index):
"""indel in read"""
return False
if None in ref_index or not ModelMaker._is_consecutive_seq(ref_index):
"""indel in ref"""
return False
read_seq = [read[x] for x in read_index]
ref_seq = [self.ref_seq[x] for x in ref_index]
if read_seq == ref_seq:
"""full_match"""
return True
else:
"""substitution"""
return False
if __name__ == "__main__":
argp = argparse.ArgumentParser("Align processed reads from metrichor to reference; "
"extract correct kmers and calculate mean and stdv for the model. "
"Only data from 2D reads is used.")
argp.add_argument("-f", "--filelist", required=True, type=argparse.FileType('r'),
help="a list of fast5 files to be aligned, one per line")
argp.add_argument("-r", "--reference", required=True, type=argparse.FileType('r'),
help="fasta file with reference sequence")
argp.add_argument("-o", "--output", required=True, type=str,
help="model basename. /path/to/my_model will result in e.g. /path/to/my_model.template.pickle")
argp.add_argument("-n", "--ncores", required=False, type=int,
help="#CPU cores", default=None)
argp.add_argument("-k", "--kmer", required=False, type=int,
help="length of kmer", default=6)
argp.add_argument("-g", "--graphmap", required=False, type=str, default="",
help="Path to graphmap alignment tool")
args = argp.parse_args()
mm = ModelMaker(args.filelist, args.reference, args.output, args.ncores, args.kmer, args.graphmap)
mm.make_all_models() | [
"logging.basicConfig",
"logging.getLogger",
"argparse.FileType",
"numpy.mean",
"npcaller.validator.sam_to_bam",
"argparse.ArgumentParser",
"npcaller.validator.align_to_reference",
"numpy.std",
"itertools.product",
"npcaller.fasta.FastaReader",
"pysam.AlignmentFile",
"itertools.chain.from_iterable",
"tempfile.NamedTemporaryFile",
"pandas.DataFrame",
"npcaller.fast5.Fast5File",
"npcaller.fasta.FastaWriter"
] | [((374, 413), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (393, 413), False, 'import logging\n'), ((9243, 9428), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Align processed reads from metrichor to reference; extract correct kmers and calculate mean and stdv for the model. Only data from 2D reads is used."""'], {}), "(\n 'Align processed reads from metrichor to reference; extract correct kmers and calculate mean and stdv for the model. Only data from 2D reads is used.'\n )\n", (9266, 9428), False, 'import argparse\n'), ((1005, 1035), 'logging.getLogger', 'logging.getLogger', (['"""alignment"""'], {}), "('alignment')\n", (1022, 1035), False, 'import logging\n'), ((1095, 1116), 'npcaller.fasta.FastaReader', 'FastaReader', (['ref_file'], {}), '(ref_file)\n', (1106, 1116), False, 'from npcaller.fasta import FastaReader, FastaWriter\n'), ((3097, 3128), 'pysam.AlignmentFile', 'AlignmentFile', (["(bamfile + '.bam')"], {}), "(bamfile + '.bam')\n", (3110, 3128), False, 'from pysam import AlignmentFile\n'), ((5521, 5544), 'pandas.DataFrame', 'pandas.DataFrame', (['model'], {}), '(model)\n', (5537, 5544), False, 'import pandas\n'), ((1305, 1320), 'npcaller.fast5.Fast5File', 'Fast5File', (['file'], {}), '(file)\n', (1314, 1320), False, 'from npcaller.fast5 import Fast5File\n'), ((2515, 2538), 'tempfile.NamedTemporaryFile', 'NamedTemporaryFile', (['"""w"""'], {}), "('w')\n", (2533, 2538), False, 'from tempfile import NamedTemporaryFile\n'), ((2664, 2687), 'npcaller.fasta.FastaWriter', 'FastaWriter', (['fasta_file'], {}), '(fasta_file)\n', (2675, 2687), False, 'from npcaller.fasta import FastaReader, FastaWriter\n'), ((2902, 3013), 'npcaller.validator.align_to_reference', 'align_to_reference', (['fasta_file', 'self.ref_file', 'samfile'], {'graphmap_bin': 'self.graphmap_bin', 'ncores': 'self.ncores'}), '(fasta_file, self.ref_file, samfile, graphmap_bin=self.\n graphmap_bin, ncores=self.ncores)\n', (2920, 3013), False, 'from npcaller.validator import align_to_reference, sam_to_bam\n'), ((3052, 3080), 'npcaller.validator.sam_to_bam', 'sam_to_bam', (['samfile', 'bamfile'], {}), '(samfile, bamfile)\n', (3062, 3080), False, 'from npcaller.validator import align_to_reference, sam_to_bam\n'), ((9557, 9579), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (9574, 9579), False, 'import argparse\n'), ((9724, 9746), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (9741, 9746), False, 'import argparse\n'), ((4062, 4099), 'itertools.chain.from_iterable', 'itertools.chain.from_iterable', (['result'], {}), '(result)\n', (4091, 4099), False, 'import itertools\n'), ((4766, 4806), 'itertools.product', 'itertools.product', (['"""ACGT"""'], {'repeat': 'self.k'}), "('ACGT', repeat=self.k)\n", (4783, 4806), False, 'import itertools\n'), ((5192, 5223), 'numpy.mean', 'np.mean', (["stat_map['mean'][kmer]"], {}), "(stat_map['mean'][kmer])\n", (5199, 5223), True, 'import numpy as np\n'), ((5255, 5285), 'numpy.std', 'np.std', (["stat_map['mean'][kmer]"], {}), "(stat_map['mean'][kmer])\n", (5261, 5285), True, 'import numpy as np\n'), ((5314, 5345), 'numpy.mean', 'np.mean', (["stat_map['stdv'][kmer]"], {}), "(stat_map['stdv'][kmer])\n", (5321, 5345), True, 'import numpy as np\n'), ((5374, 5404), 'numpy.std', 'np.std', (["stat_map['stdv'][kmer]"], {}), "(stat_map['stdv'][kmer])\n", (5380, 5404), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
#****************************************************************************************************************************************************
# Copyright (c) 2016 Freescale Semiconductor, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the Freescale Semiconductor, Inc. nor the names of
# its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#****************************************************************************************************************************************************
import os
from FslBuildGen import IOUtil
from FslBuildGen.DataTypes import BuildPlatformType
from FslBuildGen.PackageConfig import PlatformNameString
#from FslBuildGen import PluginSharedValues
__g_posixPlatforms = [PlatformNameString.ANDROID, PlatformNameString.UBUNTU, PlatformNameString.YOCTO, PlatformNameString.QNX]
__g_ntPlatforms = [PlatformNameString.ANDROID, PlatformNameString.WINDOWS]
def AddExtraGenerators(platform: str) -> None:
if DetectBuildPlatformType() != BuildPlatformType.Windows:
return
__g_ntPlatforms.append(platform)
def DetectBuildPlatform() -> str:
sdkPlatformName = IOUtil.TryGetEnvironmentVariable('FSL_PLATFORM_NAME')
if os.name == 'posix':
if not sdkPlatformName:
raise EnvironmentError("Please make sure that the environment variable FSL_PLATFORM_NAME is set")
if sdkPlatformName not in __g_posixPlatforms:
raise EnvironmentError("Please make sure that the environment variable FSL_PLATFORM_NAME is set to one of these {0}".format(__g_posixPlatforms))
elif os.name == 'nt':
if not sdkPlatformName:
return PlatformNameString.WINDOWS
if sdkPlatformName not in __g_ntPlatforms:
raise EnvironmentError("Please make sure that the environment variable FSL_PLATFORM_NAME is set to one of these {0}".format(__g_ntPlatforms))
else:
raise EnvironmentError("Unsupported build environment")
return sdkPlatformName
def DetectBuildPlatformType() -> int:
if os.name == 'posix':
return BuildPlatformType.Unix
elif os.name == 'nt':
return BuildPlatformType.Windows
return BuildPlatformType.Unknown
def TryCheckBuildPlatform(platform: str) -> bool:
buildPlatformType = DetectBuildPlatformType()
if buildPlatformType == BuildPlatformType.Unix and platform in __g_posixPlatforms:
return True
elif buildPlatformType == BuildPlatformType.Windows and platform in __g_ntPlatforms:
return True
return False
def CheckBuildPlatform(platform: str) -> None:
if TryCheckBuildPlatform(platform):
return
raise EnvironmentError("Unsupported build environment for '{0}'".format(platform))
| [
"FslBuildGen.IOUtil.TryGetEnvironmentVariable"
] | [((2538, 2591), 'FslBuildGen.IOUtil.TryGetEnvironmentVariable', 'IOUtil.TryGetEnvironmentVariable', (['"""FSL_PLATFORM_NAME"""'], {}), "('FSL_PLATFORM_NAME')\n", (2570, 2591), False, 'from FslBuildGen import IOUtil\n')] |
from http.server import HTTPServer, BaseHTTPRequestHandler
import ssl
# Simple HTTP server that serves over HTTPS
# Make sure that key.pem and cert.pem are generated.
# You can use the ./generate-certs.sh script provided in this repo.
class web_server(BaseHTTPRequestHandler):
def do_GET(self):
if self.path == '/':
self.path = '/encrypt-file.html'
try:
#Reading the file
file_to_open = open(self.path[1:]).read()
self.send_response(200)
except:
file_to_open = "File not found"
self.send_response(404)
self.end_headers()
self.wfile.write(bytes(file_to_open, 'utf-8'))
httpd = HTTPServer(('192.168.93.122', 4443), web_server)
httpd.socket = ssl.wrap_socket (httpd.socket,
keyfile="./key.pem",
certfile='./cert.pem', server_side=True)
httpd.serve_forever() | [
"http.server.HTTPServer",
"ssl.wrap_socket"
] | [((611, 659), 'http.server.HTTPServer', 'HTTPServer', (["('192.168.93.122', 4443)", 'web_server'], {}), "(('192.168.93.122', 4443), web_server)\n", (621, 659), False, 'from http.server import HTTPServer, BaseHTTPRequestHandler\n'), ((676, 771), 'ssl.wrap_socket', 'ssl.wrap_socket', (['httpd.socket'], {'keyfile': '"""./key.pem"""', 'certfile': '"""./cert.pem"""', 'server_side': '(True)'}), "(httpd.socket, keyfile='./key.pem', certfile='./cert.pem',\n server_side=True)\n", (691, 771), False, 'import ssl\n')] |
from matplotlib import pyplot as plt
from PIL import Image
import pandas as pd
import numpy as np
from tqdm import tqdm
# read pred and gt info
train_best_df = pd.read_csv('train_rank_best_2000_img_id.txt', sep=' ', header=None, index_col=False)
test_best_df = pd.read_csv('test_rank_best_200_img_id.txt', sep=' ', header=None, index_col=False)
test_worst_df = pd.read_csv('test_rank_worst_200_img_id.txt', sep=' ', header=None, index_col=False)
train_worst_df = pd.read_csv('train_rank_worst_2000_img_id.txt', sep=' ', header=None, index_col=False)
train_refer_df = pd.read_csv('train_refer_100.txt', sep=' ', header=None, index_col=False)
test_refer_df = pd.read_csv('test_refer_100.txt', sep=' ', header=None, index_col=False)
gt_anno = pd.read_csv('../data/all_anno_new.txt', sep=' ', header=None, index_col=False)
# plot img and anno
def plot_img_pred_gt_anno_score(input_list, refer_df, gt_anno, img_root, save_root):
# max score and min score refer
for i in tqdm(range(len(input_list))):
fig1 = plt.figure(figsize=(12, 6))
x = np.arange(1, 11)
gt_y = gt_anno[gt_anno[0] == input_list[i]].iloc[0, 1:11].values
gt_score = gt_anno[gt_anno[0] == input_list[i]].iloc[0, 11]
ax1 = fig1.add_subplot(241)
img_data = Image.open(img_root + str(input_list[i]) + '.jpg')
ax1.imshow(img_data)
ax1.axis('off')
pred_title = 'gt_score: ' + str(round(gt_score, 3))
ax1.set_title(pred_title, fontsize=8, color='b')
# plt.bar(x, pred_y, color='r', alpha=0.5, width=0.4, label='pred')
ax2 = fig1.add_subplot(245)
# ax2.axis('off')
plt.bar(x, gt_y, color='g', alpha=0.5, width=0.4, label='gt')
plt.ylim(0, 0.5)
# plt.legend(loc='upper right')
# refer list
refer_list = refer_df[refer_df[0]==input_list[i]].iloc[0, 1:4].values
for j in range(len(refer_list)):
# ax fig
index_str1 = 242 + j
ax = fig1.add_subplot(index_str1)
refer_data = Image.open(img_root + str(int(refer_list[j])) + '.jpg')
ax.imshow(refer_data)
ax.axis('off')
refer_score = gt_anno[gt_anno[0] == refer_list[j]].iloc[0, 11]
refer_title = 'gt_score: ' + str(round(refer_score, 3))
ax.set_title(refer_title, fontsize=8, color='b')
index_str2 = 246 + j
ax2 = fig1.add_subplot(index_str2)
refer_anno = gt_anno[gt_anno[0]==refer_list[j]].iloc[0, 1:11].values
plt.bar(x, refer_anno, color='g', alpha=0.5, width=0.4, label='gt')
plt.ylim(0, 0.5)
# ax2.axis('off')
# plt.show()
save_path = save_root + str(i) + '_' + str(input_list[i])
fig1.savefig(save_path)
img_root = '/home/flyingbird/1_Data/images/'
# test_worst_list = list(test_worst_df[0])
# save_root = 'test_worst_200_refer_anno_score/'
# plot_img_pred_gt_anno_score(test_worst_list, test_refer_df, gt_anno, img_root, save_root)
# test_best
# test_best_list = list(test_best_df[0])
train_best_list = list(train_best_df[0])
save_root = 'train_best_2000_refer_anno_score/'
plot_img_pred_gt_anno_score(train_best_list, train_refer_df, gt_anno, img_root, save_root) | [
"pandas.read_csv",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.ylim",
"numpy.arange"
] | [((161, 250), 'pandas.read_csv', 'pd.read_csv', (['"""train_rank_best_2000_img_id.txt"""'], {'sep': '""" """', 'header': 'None', 'index_col': '(False)'}), "('train_rank_best_2000_img_id.txt', sep=' ', header=None,\n index_col=False)\n", (172, 250), True, 'import pandas as pd\n'), ((262, 349), 'pandas.read_csv', 'pd.read_csv', (['"""test_rank_best_200_img_id.txt"""'], {'sep': '""" """', 'header': 'None', 'index_col': '(False)'}), "('test_rank_best_200_img_id.txt', sep=' ', header=None,\n index_col=False)\n", (273, 349), True, 'import pandas as pd\n'), ((362, 450), 'pandas.read_csv', 'pd.read_csv', (['"""test_rank_worst_200_img_id.txt"""'], {'sep': '""" """', 'header': 'None', 'index_col': '(False)'}), "('test_rank_worst_200_img_id.txt', sep=' ', header=None,\n index_col=False)\n", (373, 450), True, 'import pandas as pd\n'), ((464, 554), 'pandas.read_csv', 'pd.read_csv', (['"""train_rank_worst_2000_img_id.txt"""'], {'sep': '""" """', 'header': 'None', 'index_col': '(False)'}), "('train_rank_worst_2000_img_id.txt', sep=' ', header=None,\n index_col=False)\n", (475, 554), True, 'import pandas as pd\n'), ((569, 642), 'pandas.read_csv', 'pd.read_csv', (['"""train_refer_100.txt"""'], {'sep': '""" """', 'header': 'None', 'index_col': '(False)'}), "('train_refer_100.txt', sep=' ', header=None, index_col=False)\n", (580, 642), True, 'import pandas as pd\n'), ((659, 731), 'pandas.read_csv', 'pd.read_csv', (['"""test_refer_100.txt"""'], {'sep': '""" """', 'header': 'None', 'index_col': '(False)'}), "('test_refer_100.txt', sep=' ', header=None, index_col=False)\n", (670, 731), True, 'import pandas as pd\n'), ((743, 821), 'pandas.read_csv', 'pd.read_csv', (['"""../data/all_anno_new.txt"""'], {'sep': '""" """', 'header': 'None', 'index_col': '(False)'}), "('../data/all_anno_new.txt', sep=' ', header=None, index_col=False)\n", (754, 821), True, 'import pandas as pd\n'), ((1023, 1050), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (1033, 1050), True, 'from matplotlib import pyplot as plt\n'), ((1063, 1079), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (1072, 1079), True, 'import numpy as np\n'), ((1645, 1706), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'gt_y'], {'color': '"""g"""', 'alpha': '(0.5)', 'width': '(0.4)', 'label': '"""gt"""'}), "(x, gt_y, color='g', alpha=0.5, width=0.4, label='gt')\n", (1652, 1706), True, 'from matplotlib import pyplot as plt\n'), ((1715, 1731), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(0.5)'], {}), '(0, 0.5)\n', (1723, 1731), True, 'from matplotlib import pyplot as plt\n'), ((2535, 2602), 'matplotlib.pyplot.bar', 'plt.bar', (['x', 'refer_anno'], {'color': '"""g"""', 'alpha': '(0.5)', 'width': '(0.4)', 'label': '"""gt"""'}), "(x, refer_anno, color='g', alpha=0.5, width=0.4, label='gt')\n", (2542, 2602), True, 'from matplotlib import pyplot as plt\n'), ((2615, 2631), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(0.5)'], {}), '(0, 0.5)\n', (2623, 2631), True, 'from matplotlib import pyplot as plt\n')] |
import logging
from hashids import Hashids
LOG_FORMAT = '%(levelname) -10s %(asctime)s %(name) -30s %(funcName) -35s %(lineno) -5d: %(message)s'
logger = logging.getLogger(__name__)
class NIdNoise:
NSALT = "dakjsl#^%6bqhcjhb"
HASH_LENGTH = 11
# Here will be the instance stored.
__instance = None
@staticmethod
def get_instance():
""" Static access method. """
if NIdNoise.__instance == None:
NIdNoise()
return NIdNoise.__instance
def __init__(self):
""" Virtually private constructor. """
if NIdNoise.__instance != None:
raise Exception("NIdNoise class is a singleton!")
else:
self.hashids = Hashids(salt=self.NSALT, min_length=self.HASH_LENGTH)
NIdNoise.__instance = self
def ennoise_id(self, id):
if id >= 0:
return self.hashids.encode(id)
else:
return ''
def denoise_id(self, nid):
if nid:
return self.hashids.decode(nid)[0]
else:
return ''
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT)
for i in range(100):
id1 = i
nid1 = NIdNoise.get_instance().ennoise_id(id=id1)
logging.info("id1: " + str(id1) + " --> nid1: " + str(nid1))
nid2 = nid1
id2 = NIdNoise.get_instance().denoise_id(nid=nid2)
logging.info("nid2: " + str(nid2) + " --> id2: " + str(id2))
logging.info("id1 == id2: %s", str(id1 == id2))
| [
"logging.getLogger",
"hashids.Hashids",
"logging.basicConfig"
] | [((155, 182), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (172, 182), False, 'import logging\n'), ((1098, 1157), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': 'LOG_FORMAT'}), '(level=logging.DEBUG, format=LOG_FORMAT)\n', (1117, 1157), False, 'import logging\n'), ((711, 764), 'hashids.Hashids', 'Hashids', ([], {'salt': 'self.NSALT', 'min_length': 'self.HASH_LENGTH'}), '(salt=self.NSALT, min_length=self.HASH_LENGTH)\n', (718, 764), False, 'from hashids import Hashids\n')] |
import time
import json
import logging
import tensorflow as tf
from collections import OrderedDict
from bottle import Bottle, request, response
from icyserver.icy import new_icy
logger = logging.getLogger(__name__)
class App:
def __init__(self):
self.bottle = Bottle()
def load_model(self, model_dir, beam_width, beam_steps, steps, max_context):
self.icy = new_icy(model_dir, beam_width, beam_steps, steps, max_context)
app = App()
def filter_items(items, probs):
counter = OrderedDict()
for item, p in zip(items, probs):
item = item.splitlines(keepends=False)[0].rstrip()
if not item.strip():
continue
if item in counter:
counter[item] += p
else:
counter[item] = p
return sorted(counter.items(), key=lambda x: x[1], reverse=True)
@app.bottle.post('/completions')
def completions():
data = request.body.read()
data = json.loads(data.decode('utf8'))
filepath = data['filepath']
line_num = data['line_num']
column_num = data['column_num']
contents = data['file_data'].get(filepath, {}).get('contents')
if contents is None:
return '[]'
if column_num <= 1:
return '[]'
contents = contents.splitlines(keepends=False)
front_half = contents[:line_num]
front_half[-1] = front_half[-1][:column_num-1]
context = '\n'.join(front_half)
history = data.get('history')
if history: # interactive bash
context = history + context
t0 = time.time()
logger.info(f'context: \n----\n[{context}]\n')
result = app.icy.predict(context, filepath)
if result is None:
return '{}'
n, prefix, items, probs = result
logger.info(f'cost {time.time()-t0} seconds, candidates: {items!r}')
items = filter_items(items, probs)
logger.info(f'final candidates: {items}')
completions = [
{"insertion_text": item, "extra_menu_info": "{: >6.3f}".format(p*100)}
for item, p in items
]
if prefix.strip():
prefix_item = {"insertion_text": prefix.rstrip()}
if len(completions) == 0:
completions.insert(0, prefix_item)
else:
completions.insert(1, prefix_item)
result = {"completions": completions,
"completion_start_column": max(data['column_num'] - n, 0),
"errors": []}
response.set_header('Content-Type', 'application/json')
return json.dumps(result)
def main(model_dir="./hub1000", beam_width=8, beam_steps=3, steps=10, max_context=300):
logging.basicConfig(level=logging.DEBUG)
physical_devices = tf.config.experimental.list_physical_devices('GPU')
if len(physical_devices) > 0:
tf.config.experimental.set_memory_growth(physical_devices[0], True)
app.load_model(model_dir, beam_width, beam_steps, steps, max_context)
app.bottle.run(host='0.0.0.0', port=10086)
if __name__ == '__main__':
import fire
fire.Fire(main)
| [
"logging.getLogger",
"logging.basicConfig",
"collections.OrderedDict",
"bottle.response.set_header",
"fire.Fire",
"tensorflow.config.experimental.set_memory_growth",
"bottle.Bottle",
"json.dumps",
"bottle.request.body.read",
"time.time",
"icyserver.icy.new_icy",
"tensorflow.config.experimental.list_physical_devices"
] | [((190, 217), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (207, 217), False, 'import logging\n'), ((512, 525), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (523, 525), False, 'from collections import OrderedDict\n'), ((910, 929), 'bottle.request.body.read', 'request.body.read', ([], {}), '()\n', (927, 929), False, 'from bottle import Bottle, request, response\n'), ((1519, 1530), 'time.time', 'time.time', ([], {}), '()\n', (1528, 1530), False, 'import time\n'), ((2377, 2432), 'bottle.response.set_header', 'response.set_header', (['"""Content-Type"""', '"""application/json"""'], {}), "('Content-Type', 'application/json')\n", (2396, 2432), False, 'from bottle import Bottle, request, response\n'), ((2444, 2462), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (2454, 2462), False, 'import json\n'), ((2557, 2597), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (2576, 2597), False, 'import logging\n'), ((2622, 2673), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (2666, 2673), True, 'import tensorflow as tf\n'), ((2954, 2969), 'fire.Fire', 'fire.Fire', (['main'], {}), '(main)\n', (2963, 2969), False, 'import fire\n'), ((277, 285), 'bottle.Bottle', 'Bottle', ([], {}), '()\n', (283, 285), False, 'from bottle import Bottle, request, response\n'), ((387, 449), 'icyserver.icy.new_icy', 'new_icy', (['model_dir', 'beam_width', 'beam_steps', 'steps', 'max_context'], {}), '(model_dir, beam_width, beam_steps, steps, max_context)\n', (394, 449), False, 'from icyserver.icy import new_icy\n'), ((2716, 2783), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['physical_devices[0]', '(True)'], {}), '(physical_devices[0], True)\n', (2756, 2783), True, 'import tensorflow as tf\n'), ((1734, 1745), 'time.time', 'time.time', ([], {}), '()\n', (1743, 1745), False, 'import time\n')] |
from six import text_type
from django.contrib.auth.tokens import PasswordResetTokenGenerator
class AccountActivation(PasswordResetTokenGenerator):
def _make_hash_value(self, user, timestamp):
return (text_type(user.pk) + text_type(timestamp) +
text_type(user.email_verified))
activater = AccountActivation()
| [
"six.text_type"
] | [((275, 305), 'six.text_type', 'text_type', (['user.email_verified'], {}), '(user.email_verified)\n', (284, 305), False, 'from six import text_type\n'), ((215, 233), 'six.text_type', 'text_type', (['user.pk'], {}), '(user.pk)\n', (224, 233), False, 'from six import text_type\n'), ((236, 256), 'six.text_type', 'text_type', (['timestamp'], {}), '(timestamp)\n', (245, 256), False, 'from six import text_type\n')] |
# Generated by Django 3.2.7 on 2021-09-17 16:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('configuration', '0002_configuration_shipping_origin'),
]
operations = [
migrations.AlterField(
model_name='configuration',
name='currency',
field=models.CharField(blank=True, default='USD', help_text='The iso currency code to use for payments', max_length=6, null=True),
),
migrations.AlterField(
model_name='configuration',
name='currency_html_code',
field=models.CharField(blank=True, default='£', help_text='The HTML code for the currency symbol. Used for display purposes only', max_length=12, null=True),
),
migrations.AlterField(
model_name='configuration',
name='default_shipping_carrier',
field=models.CharField(blank=True, default='Royal Mail', help_text='The default shipping carrier', max_length=32, null=True),
),
migrations.AlterField(
model_name='configuration',
name='default_shipping_rate',
field=models.DecimalField(blank=True, decimal_places=2, default=3.95, help_text='The default shipping rate for countries which have not been configured', max_digits=12, null=True),
),
migrations.AlterField(
model_name='configuration',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| [
"django.db.models.DecimalField",
"django.db.models.CharField",
"django.db.models.BigAutoField"
] | [((362, 490), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '"""USD"""', 'help_text': '"""The iso currency code to use for payments"""', 'max_length': '(6)', 'null': '(True)'}), "(blank=True, default='USD', help_text=\n 'The iso currency code to use for payments', max_length=6, null=True)\n", (378, 490), False, 'from django.db import migrations, models\n'), ((626, 791), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '"""£"""', 'help_text': '"""The HTML code for the currency symbol. Used for display purposes only"""', 'max_length': '(12)', 'null': '(True)'}), "(blank=True, default='£', help_text=\n 'The HTML code for the currency symbol. Used for display purposes only',\n max_length=12, null=True)\n", (642, 791), False, 'from django.db import migrations, models\n'), ((929, 1052), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'default': '"""Royal Mail"""', 'help_text': '"""The default shipping carrier"""', 'max_length': '(32)', 'null': '(True)'}), "(blank=True, default='Royal Mail', help_text=\n 'The default shipping carrier', max_length=32, null=True)\n", (945, 1052), False, 'from django.db import migrations, models\n'), ((1191, 1373), 'django.db.models.DecimalField', 'models.DecimalField', ([], {'blank': '(True)', 'decimal_places': '(2)', 'default': '(3.95)', 'help_text': '"""The default shipping rate for countries which have not been configured"""', 'max_digits': '(12)', 'null': '(True)'}), "(blank=True, decimal_places=2, default=3.95, help_text=\n 'The default shipping rate for countries which have not been configured',\n max_digits=12, null=True)\n", (1210, 1373), False, 'from django.db import migrations, models\n'), ((1489, 1585), 'django.db.models.BigAutoField', 'models.BigAutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (1508, 1585), False, 'from django.db import migrations, models\n')] |
import sys
__version__ = '0.4.4'
def main():
try:
from baker.commands import execute_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Baker is missing to avoid masking other exceptions.
try:
import baker # noqa: F401
except ImportError:
raise ImportError(
"Couldn't import Baker. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_command_line(sys.argv)
| [
"baker.commands.execute_command_line"
] | [((651, 681), 'baker.commands.execute_command_line', 'execute_command_line', (['sys.argv'], {}), '(sys.argv)\n', (671, 681), False, 'from baker.commands import execute_command_line\n')] |
Subsets and Splits