metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "joodo/PARL",
"score": 3
} |
#### File: parl/framework/agent_base.py
```python
import paddle.fluid as fluid
import parl.layers as layers
from parl.framework.algorithm_base import Algorithm
from parl.framework.model_base import Model
from parl.utils import get_gpu_count
__all__ = ['Agent']
class Agent(object):
"""
A Agent is responsible for the general data flow
outside the algorithm.
A Agent is created in a bottom-up way:
a. create a Model
b. create an Algorithm with the model as an input
c. define a Agent with the algorithm
"""
def __init__(self, algorithm, gpu_id=None):
""" build program and run initialization for default_startup_program
Created object:
self.alg: parl.framework.Algorithm
self.gpu_id: int
self.fluid_executor: fluid.Executor
"""
assert isinstance(algorithm, Algorithm)
self.alg = algorithm
self.build_program()
if gpu_id is None:
gpu_id = 0 if get_gpu_count() > 0 else -1
self.gpu_id = gpu_id
place = fluid.CUDAPlace(gpu_id) if gpu_id >= 0 else fluid.CPUPlace()
self.fluid_executor = fluid.Executor(place)
self.fluid_executor.run(fluid.default_startup_program())
def build_program(self):
"""build your training program and prediction program here,
using the functions define_learn and define_predict in algorithm.
To build the program, you may need to do the following:
a. create a new program in fluid with program guard
b. define your data layer
c. build your training/prediction program, pass the data variable
defined in step b to `define_training/define_prediction` of algorithm
"""
raise NotImplementedError
def predict(self, obs):
"""This function will predict the action given current observation of the enviroment.
Note that this function will only do the prediction and it doesn't try any exploration,
To explore in the action space, you should create your process in `sample` function below.
In formally, this function is often used in test process.
"""
raise NotImplementedError
def sample(self, obs):
"""This function will predict the action given current observation of the enviroment.
Additionaly, action will be added noise here to explore a new trajectory. In formally,
this function is often used in training process.
"""
raise NotImplementedError
def learn(self, obs, action, reward, next_obs, terminal):
"""pass data to the training program to update model,
this function is the training interface for Agent.
"""
raise NotImplementedError
``` |
{
"source": "Jooeeee/Excel_Diff",
"score": 2
} |
#### File: Excel_Diff/code/GUI.py
```python
import sys
import os
from PyQt4 import QtGui,QtCore
from ExcelDiff import *
from functools import partial
import time
from threading import Thread
class MainUi(QtGui.QMainWindow):
def __init__(self, parent=None):
super(MainUi, self).__init__(parent)
self.init_ui()
def init_ui(self):
self.setBaseSize(960,700)
self.setWindowTitle("Excel Diff")
self.setWindowOpacity(0.97) # 设置窗口透明度
#self.setAttribute(QtCore.Qt.WA_TranslucentBackground) # 设置窗口背景透明
#窗口主部件
self.main_widget=QtGui.QWidget()
self.main_layout=QtGui.QGridLayout()
self.main_widget.setLayout(self.main_layout)
#上侧部件
self.top_widget=QtGui.QWidget()
self.top_widget.setObjectName('top_widget')
self.top_layout=QtGui.QGridLayout()
self.top_widget.setLayout(self.top_layout)
self.top_widget.setStyleSheet('''
QWidget#top_widget{
background:#F0FFF0;
border-top:1px solid white;
border-bottom:1px solid white;
border-left:1px solid white;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
QLabel#left_label{
border:none;
font-size:20px;
font-weight:700;
font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
}
''')
#下侧部件
self.bottom_widget = QtGui.QWidget()
self.bottom_widget.setObjectName('bottom_widget')
self.bottom_layout = QtGui.QGridLayout()
self.bottom_widget.setLayout(self.bottom_layout)
#布局管理
self.main_layout.addWidget(self.top_widget,0,0,2,14)
self.main_layout.addWidget(self.bottom_widget,2,0,19,14)
self.main_layout.setSpacing(0)
self.setCentralWidget(self.main_widget)
self.make_top()
def make_top(self):
# 文件选择按钮
self.file_text1 = QtGui.QLineEdit()
self.file_button1 = QtGui.QPushButton(u'请选择文件1: ')
self.file_button1.clicked.connect(partial(self.select_file,self.file_text1))
self.file_text2 = QtGui.QLineEdit()
self.file_button2 = QtGui.QPushButton(u'请选择文件2: ')
self.file_button2.clicked.connect(partial(self.select_file, self.file_text2))
# diff动作按钮
self.diff_button = QtGui.QPushButton(u'diff')
self.diffrst=None
self.diff_button.clicked.connect(self.diff)
self.top_layout.addWidget(self.file_button1,0,0,1,1)
self.top_layout.addWidget(self.file_text1,0,1,1,2)
self.top_layout.addWidget(self.file_button2,0,4,1,1)
self.top_layout.addWidget(self.file_text2,0,5,1,2)
self.top_layout.addWidget(self.diff_button,0,13,1,1)
#进度条
self.process_bar=QtGui.QProgressBar()
self.process_bar.setValue(0)
self.timer=QtCore.QBasicTimer()
self.timer_step=0
self.process_bar.setFixedHeight(10)
self.top_layout.addWidget(self.process_bar,1,1,1,10)
#构建下部区域
#self.make_bottom()
#选择文件动作
def select_file(self,txt):
filepath=QtGui.QFileDialog.getOpenFileName(self,u'选择文件','./','Excel(*.xls*);;All Files(*)')
txt.setText(filepath)
#diff动作
def diff(self):
self.timer_step=0
if not os.path.isfile(self.file_text1.text().__str__()):
self.file_text1.setText('')
self.file_text1.setPlaceholderText(u"请选择文件")
else:
if not os.path.isfile(self.file_text2.text().__str__()):
self.file_text2.setText('')
self.file_text2.setPlaceholderText(u"请选择文件")
else:
for i in reversed(range(0, self.bottom_layout.count())):
self.bottom_layout.itemAt(i).widget().setParent(None)
self.process_bar.reset()
timeStart=time.time()
diffthread = Thread(target=self.diffThread)
prossbarthread=Thread(target=self.processBarThread)
diffthread.start()
prossbarthread.start()
diffthread.join()
prossbarthread.join()
timeEnd=time.time()
# print u"time consumed %.3fseconds"%(timeEnd-timeStart)
self.position=self.positionFind()
self.make_bottom()
self.process_bar.setValue(100)
# print u'Diff successed'
def diffThread(self):
excelDiff = ExcelDiff()
self.diffrst = excelDiff.main(self.file_text1.text(), self.file_text2.text())
self.timer_step=99
def processBarThread(self):
while self.timer_step<99:
self.timer_step+=1
self.process_bar.setValue(self.timer_step)
time.sleep(0.5)
def timerEvent(self, *args, **kwargs):
if self.timer_step>=99:
self.timer.stop()
return
self.timer_step+=1
self.process_bar.setValue(self.timer_step)
def make_bottom(self):
for i in reversed(range(0, self.bottom_layout.count())):
self.bottom_layout.itemAt(i).widget().setParent(None)
#左侧部件
self.left_widget=QtGui.QWidget()
self.left_widget.setObjectName('left_widget')
self.left_layout=QtGui.QGridLayout()
self.left_widget.setLayout(self.left_layout)
#右侧部件
self.right_widget=QtGui.QWidget()
self.right_layout=QtGui.QGridLayout()
self.right_widget.setLayout(self.right_layout)
#显示两个sheet的部件
self.tabWidget1 = QtGui.QTableWidget()
self.tabWidget2 = QtGui.QTableWidget()
#增删改的按钮
self.row_button=QtGui.QPushButton(u'行增删')
self.col_button=QtGui.QPushButton(u'列增删')
self.cell_button=QtGui.QPushButton(u'单元格改动')
self.clearHighlight_button=QtGui.QPushButton(u'清除高亮')
self.detail=QtGui.QTableWidget()
self.right_layout.addWidget(self.tabWidget1,0,0,15,6)
self.right_layout.addWidget(self.tabWidget2,0,6,15,6)
self.right_layout.addWidget(self.row_button,15,0,1,2)
self.right_layout.addWidget(self.col_button,15,2,1,2)
self.right_layout.addWidget(self.cell_button,15,4,1,2)
self.right_layout.addWidget(self.clearHighlight_button,15,6,1,2)
self.right_layout.addWidget(self.detail,16,0,3,12)
#主布局管理
self.bottom_layout.addWidget(self.left_widget,0,0,19,1)
self.bottom_layout.addWidget(self.right_widget,0,1,19,13)
self.make_left()
def make_left(self):
#构建左侧sheet显示按钮
for i in reversed(range(0, self.left_layout.count())):
self.left_layout.itemAt(i).widget().setParent(None)
label = QtGui.QLabel('sheetnames')
label.setObjectName('left_label')
self.left_layout.addWidget(label,0,0,1,2,QtCore.Qt.AlignTop)
self.sheet_button_list=[]
for i in range(len(self.diffrst)):
sheet_button = QtGui.QPushButton()
sheet_button.setText(self.diffrst[i][0])
sheet_button.clicked.connect(partial(self.show_sheets,i))
self.left_layout.addWidget(sheet_button,i+1,0,1,1,QtCore.Qt.AlignTop)
self.sheet_button_list.append(sheet_button)
self.left_layout.setAlignment(QtCore.Qt.AlignTop)
self.click_sheet=0
self.show_sheets(self.click_sheet)
self.left_widget.setStyleSheet('''
QWidget#left_widget{
background:gray;
border-top:1px solid white;
border-bottom:1px solid white;
border-left:1px solid white;
border-top-left-radius:10px;
border-bottom-left-radius:10px;
border-top-right-radius:10px;
border-bottom-right-radius:10px;
}
QLabel#left_label{
border:none;
font-size:20px;
font-weight:700;
font-family: "Helvetica Neue", Helvetica, Arial, sans-serif;
}
''')
def show_sheets(self,i):
self.detail.clearContents()
self.sheet_button_list[self.click_sheet].setStyleSheet('background-color:rgb(255,255,255)')
self.click_sheet=i
self.sheet_button_list[i].setStyleSheet('background-color:rgb(100,100,150)')
diff=self.diffrst[i]
self.set_sheets(self.tabWidget1,diff[1],1,i)
self.set_sheets(self.tabWidget2,diff[2],2,i)
self.show_detail(diff[3],i)
def set_sheets(self,tw,diff,num,sn):
lrow=len(diff)
tw.setRowCount(lrow)
if lrow>0:
lcol=max([len(x)for x in diff])
else:
lcol=0
tw.setColumnCount(lcol)
vheadlable=[]
if num >1:
for i in range(lrow):
if self.position[sn][1][1][i]<0:
vheadlable.append(' ')
else:
vheadlable.append(str(self.position[sn][1][1][i]+1))
else:
for i in range(lrow):
if self.position[sn][1][0][i]<0:
vheadlable.append(' ')
else:
vheadlable.append(str(self.position[sn][1][0][i]+1))
tw.setVerticalHeaderLabels(vheadlable)
hheadlable = []
if num > 1:
for i in range(lcol):
if self.position[sn][1][3][i]<0:
hheadlable.append(' ')
else:
hheadlable.append(self.colToString(self.position[sn][1][3][i]))
else:
for i in range(lcol):
if self.position[sn][1][2][i]<0:
hheadlable.append(' ')
else:
hheadlable.append(self.colToString(self.position[sn][1][2][i]))
tw.setHorizontalHeaderLabels(hheadlable)
for i in range(len(diff)):
for j in range(len(diff[i])):
dtxt=diff[i][j][1]
if dtxt or dtxt==0:
txt = self.mtoString(dtxt)#str(dtxt)
else:
txt=' '
twi=QtGui.QTableWidgetItem(txt)
if diff[i][j][0]:
if diff[i][j][0]<0:
twi.setBackground(QtGui.QColor(233,150,122))#红色
elif diff[i][j][0]>1:
twi.setBackground(QtGui.QColor(255,255,51))#橘色
else:
twi.setBackground(QtGui.QColor(135,206,235))#蓝色
else:
twi.setBackground(QtGui.QColor(255,255,255))
tw.setItem(i, j,twi)
#列号转化为字母标识
def colToString(self,n):
ss = ''
m = n // 26
r = n % 26
if m:
ss += self.colToString(m)
ss += chr(65 + r)
return ss
#数字转化为字符
def mtoString(self,input):
try:
# 因为使用float有一个例外是'NaN'
if input == 'NaN':
return 'NaN'
float(input)
return str(input)
except ValueError:
return input
# 设置展示动作区域
def show_detail(self,merge,num):
if merge[0]:
if merge[0]<0:
rowE=[-1,[],[]]
colE=[-1,[],[]]
cellE=[-1,[],[]]
elif merge[0]>1:
row1=[]
row2=[]
for i in range(len(merge[1])):
if merge[1][i]:
if merge[1][i] <1:
row1.append(i)
elif merge[1][i] >1:
pass
else:
row2.append(i)
rowE=[0,row1,row2]
row1 = []
row2 = []
for i in range(len(merge[2])):
if merge[2][i]:
if merge[2][i] < 1:
row1.append(i)
elif merge[2][i]> 1:
pass
else:
row2.append(i)
colE=[0,row1,row2]
cellE=[0,merge[3]]
else:
rowE = [1, [], []]
colE = [1, [], []]
cellE = [1, [], []]
else:
rowE = [0, [], []]
colE = [0, [], []]
cellE = [0, [], []]
self.row_button.clicked.connect(partial(self.diff_comment, 1, rowE,num))
self.col_button.clicked.connect(partial(self.diff_comment, -1, colE,num))
self.cell_button.clicked.connect(partial(self.diff_comment_cell,cellE,num))
self.clearHighlight_button.clicked.connect(partial(self.show_sheets, num))
self.diff_comment(1, rowE,num)
#选择文件动作
def select_file1(self):
self.file1=QtGui.QFileDialog.getOpenFileName(self,u'选择文件','./','Excel(*.xls*);;All Files(*)')
if self.file1:
self.file_button1.setText(self.file1)
def select_file2(self):
self.file2=QtGui.QFileDialog.getOpenFileName(self,u'选择文件','./','Excel(*.xls*);;All Files(*)')
if self.file2:
self.file_button2.setText(self.file2)
#因为diff把两个表合在了一起,所以有个对照
def positionFind(self):
def match(rowmerge ):
rowpre = []
rowpreR = []
rowsuf = []
rowsufR = []
pre = 0
suf = 0
for i in range(len(rowmerge)):
if rowmerge[i]:
if rowmerge[i] < 1:
rowpre.append(i)
rowpreR.append(pre)
rowsufR.append(-1)
pre += 1
elif rowmerge[i] > 1:
rowpre.append(i)
rowpreR.append(pre)
pre += 1
rowsuf.append(i)
rowsufR.append(suf)
suf += 1
else:
rowsuf.append(i)
rowsufR.append(suf)
rowpreR.append(-1)
suf += 1
else:
rowpre.append(i)
rowpreR.append(pre)
rowsuf.append(i)
rowsufR.append(suf)
pre += 1
suf += 1
return rowpre,rowsuf,rowpreR,rowsufR
position=[]
for itr in self.diffrst:
rowpre, rowsuf, rowpreR, rowsufR=match(itr[3][1])
colpre, colsuf, colpreR, colsufR=match(itr[3][2])
#原表到展示表,展示表到原表
position.append(([rowpre,rowsuf,colpre,colsuf],[rowpreR,rowsufR,colpreR,colsufR]))
return position
#显示行列动作的详情
#输入(行或者列标识号,[表的改变,删除的行号,新增加的行号])
def diff_comment(self,sig,diff,num):
self.row_button.setStyleSheet('background-color:rgb(255,255,255)')
self.col_button.setStyleSheet('background-color:rgb(255,255,255)')
self.cell_button.setStyleSheet('background-color:rgb(255,255,255)')
self.detail.clearContents()
self.detail.setRowCount(2)
self.detail.setColumnCount(2)
if sig>0:
self.row_button.setStyleSheet('background-color:rgb(100,100,150)')
hzh=u'行号'
elif sig<0:
self.col_button.setStyleSheet('background-color:rgb(100,100,150)')
hzh =u'列号'
# infolabel = QtGui.QTableWidgetItem(self.diffrst[num][0])
# infolabel.setBackground(QtGui.QColor(100,100,150))
# self.detail.setItem(0,0,infolabel)
# lbadd=QtGui.QTableWidgetItem(u'新增')
# self.detail.setItem(1,0,lbadd)
# lbrm=QtGui.QTableWidgetItem(u'删除')
# self.detail.setItem(2,0,lbrm)
# lbhzh=QtGui.QTableWidgetItem(hzh)
# self.detail.setItem(0, 1,lbhzh)
vheadlable = [u'新增', u'删除']
self.detail.setVerticalHeaderLabels(vheadlable)
hheadlable=[]
if diff[0] < 0:
lbqb=QtGui.QTableWidgetItem(u'全部')
self.detail.setItem(1, 0,lbqb)
elif diff[0]>0:
lbqb =QtGui.QTableWidgetItem(u'全部')
self.detail.setItem(0, 0,lbqb)
else:
allrow=list(set(diff[1]).union(diff[2]))
allrow.sort()
self.detail.setColumnCount(len(allrow))
for i in range(len(allrow)):
# lbhzh = QtGui.QTableWidgetItem(hzh)
# self.detail.setItem(0,i+1,lbhzh)
hheadlable.append(hzh)
if allrow[i] in diff[1]:
delbut=QtGui.QPushButton()
#delbut.setFixedSize(50,20)
if sig>0:
txt=self.position[num][1][0][allrow[i]]+1
delbut.setText(str(txt))
else:
txt = self.position[num][1][2][allrow[i]]+1
txt=self.colToString(txt-1)
delbut.setText(txt)
delbut.clicked.connect(partial(self.highlight,sig,allrow[i]))
self.detail.setCellWidget(1, i,delbut)
if allrow[i] in diff[2]:
addbut=QtGui.QPushButton()
#addbut.setFixedSize(50,20)
if sig < 0:
txt = self.position[num][1][3][allrow[i]]+1
txt = self.colToString(txt-1)
addbut.setText(txt)
else:
txt = self.position[num][1][1][allrow[i]]+1
addbut.setText(str(txt))
addbut.clicked.connect(partial(self.highlight,sig,allrow[i],num))
self.detail.setCellWidget(0, i,addbut)
self.detail.setHorizontalHeaderLabels(hheadlable)
def diff_comment_cell(self,cell,num):
self.row_button.setStyleSheet('background-color:rgb(255,255,255)')
self.col_button.setStyleSheet('background-color:rgb(255,255,255)')
self.cell_button.setStyleSheet('background-color:rgb(100, 100, 150)')
self.detail.clearContents()
self.detail.setRowCount(1)
self.detail.setColumnCount(len(cell[1]))
headlable=[u'改动']*len(cell[1])
self.detail.setHorizontalHeaderLabels(headlable)
self.detail.setVerticalHeaderLabels([u'单元格'])
# infolabel = QtGui.QTableWidgetItem(self.diffrst[num][0])
# infolabel.setBackground(QtGui.QColor(100, 100, 150))
# self.detail.setItem(0, 0, infolabel)
m=0
for itr in cell[1]:
# txt=itr.__str__().replace('), (',')->(')
# txt=txt[1:-1]
txt='('+str(itr[0][0]+1)+','+str(itr[0][1]+1)+')->('+str(itr[1][0]+1)+','+str(itr[1][1]+1)+')'
inf = QtGui.QTableWidgetItem(txt)
self.detail.setItem(0,m,inf)
m+=1
def highlight(self,sig,n,num):
if sig>0:
for i in range(self.tabWidget1.columnCount()):
self.tabWidget1.item(n,i).setBackground(QtGui.QColor(100,100,150))
for i in range(self.tabWidget2.columnCount()):
self.tabWidget2.item(n,i).setBackground(QtGui.QColor(100,100,150))
self.tabWidget1.verticalScrollBar().setSliderPosition(n)
self.tabWidget2.verticalScrollBar().setSliderPosition(n)
else:
for i in range(self.tabWidget1.rowCount()):
self.tabWidget1.item(i,n).setBackground(QtGui.QColor(100,100,150))
for i in range(self.tabWidget2.rowCount()):
self.tabWidget2.item(i,n).setBackground(QtGui.QColor(100,100,150))
self.tabWidget1.horizontalScrollBar().setSliderPosition(n)
self.tabWidget2.horizontalScrollBar().setSliderPosition(n)
def main():
app = QtGui.QApplication(sys.argv)
mainwindow=MainUi()
mainwindow.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
``` |
{
"source": "jo-oe/protonfixes",
"score": 2
} |
#### File: protonfixes/gamefixes/21690.py
```python
import os
import shutil
import subprocess
from protonfixes import util
def main():
""" Installs wmp11
"""
util.protontricks('wmp11')
util.protontricks('gfw')
installpath = os.path.abspath(os.getcwd())
videopath = os.path.join(installpath,'nativePC_MT','movie')
for video in os.listdir(videopath):
if video.endswith(".wmv") and os.path.getsize(os.path.join(videopath, video)) > 0:
shutil.move(os.path.join(videopath, video), os.path.join(videopath, video + '.bak'))
subprocess.call(['touch', os.path.join(videopath, video), os.path.join(videopath)])
```
#### File: protonfixes/gamefixes/429720.py
```python
from protonfixes import util
import os
import getpass
# IMSCARED relies on a folder on the user's Desktop being accessible
# The problem is that all of the folders in Proton are sandboxed
# So this protonfix works around that
def main():
desktoppath = os.path.join(util.protonprefix(), 'drive_c/users/steamuser/Desktop')
if os.path.exists(desktoppath):
if os.path.islink(desktoppath):
os.unlink(desktoppath)
else:
os.rmdir(desktoppath)
dst = '/home/' + getpass.getuser() + '/Desktop/'
os.symlink(dst, desktoppath)
```
#### File: protonfixes/gamefixes/678950.py
```python
from protonfixes import util
def main():
util.replace_command('DBFighterZ.exe', 'RED/Binaries/Win64/RED-Win64-Shipping.exe')
util.append_argument('-eac-nop-loaded')
util.protontricks('hidewineexports=enable')
``` |
{
"source": "jooern81/covid19_networkx",
"score": 3
} |
#### File: jooern81/covid19_networkx/full_network_test3.py
```python
import networkx as nx
import matplotlib.pyplot as plt
from matplotlib import pylab
import random
import pandas as pd
import datetime
# sd = pd.read_excel('station_details.xlsx')
# sd.to_pickle("./station_details.pkl")
sd = pd.read_pickle("./station_details.pkl")
df = pd.read_pickle("./full.pkl")
df2 = pd.read_pickle("./expansion1.pkl")
df3 = pd.read_pickle("./expansion2.pkl")
df4 = pd.read_pickle("./expansion4.pkl")
df5 = pd.read_pickle("./expansion5.pkl")
df = pd.concat([df,df2,df3,df4,df5])
adult_df = df[df['Ticket Category Description'] == 'Adult CSC']
child_df = df[df['Ticket Category Description'] == 'Child/Student CSC']
senior_df = df[df['Ticket Category Description'] == 'Snr Citizen CSC']
# tourist_df = df[df['Ticket Category Description'] == 'Tourist']
def generate_graph(df,sd,date,hour):
#select a specific date and time from the df
subdf = df[df['Business Date'] == date]
subdf = subdf[subdf['Entry Hour'] == hour]
subdf = subdf.groupby(['Entry Station ID', 'Exit Station ID'])[['Ridership NSEWL']].agg('sum')
subdf = subdf.reset_index()
subdf = pd.concat([subdf[subdf['Entry Station ID'].between(1, 48)],subdf[subdf['Entry Station ID'].between(63, 73)]])
subdf = pd.concat([subdf[subdf['Exit Station ID'].between(1, 48)],subdf[subdf['Exit Station ID'].between(63, 73)]])
subdf = subdf[subdf['Ridership NSEWL'].between(int(0.05*max(subdf['Ridership NSEWL'])), max(subdf['Ridership NSEWL']))]
subdf = subdf.reset_index()
#instantiate graph
G = nx.DiGraph()
# G_base = nx.DiGraph()
#use dictionary to set line attribute for stations
node_color = {}
#get the stations from each line and sort them in their running order, paired with their entry/exit ID
ew_stations = sd[['Entry Station ID','EW ID']]
ew_stations = ew_stations[ew_stations['EW ID'] !='x']
ew_stations = ew_stations.sort_values(['EW ID'], ascending=[1])
ns_stations = sd[['Entry Station ID','NS ID']]
ns_stations = ns_stations[ns_stations['NS ID'] !='x']
ns_stations = ns_stations.sort_values(['NS ID'], ascending=[1])
ca_stations = sd[['Entry Station ID','CA ID']]
ca_stations = ca_stations[ca_stations['CA ID'] !='x']
ca_stations = ca_stations.sort_values(['CA ID'], ascending=[1])
#add node positions for visuals
node_positions = {}
for sd_entry in range(0,len(sd)):
node_positions[sd['Entry Station ID'][sd_entry]] = (sd['X'][sd_entry]/100,sd['Y'][sd_entry]/100)
for station in node_positions:
G.add_node(station,pos=node_positions[station])
# G_base.add_node(station,pos=node_positions[station])
#assign attributes
for sd_entry in range(0,len(sd)):
node_color[sd['Entry Station ID'][sd_entry]] = sd['color'][sd_entry]
for station in node_color:
G.add_node(station,color=node_color[station])
# G_base.add_node(station,color=node_color[station])
#add base edges to graph - ensure strong connected
base_edges = []
list_ew_stations = list(ew_stations['Entry Station ID'])
for i in range(0,len(list_ew_stations)):
if list_ew_stations[i] not in [list_ew_stations[len(list_ew_stations)-1]]: #terminal stations
G.add_edge(list_ew_stations[i],list_ew_stations[i+1], color_base='green', weight=1)
base_edges.append((list_ew_stations[i],list_ew_stations[i+1]))
for i in range(len(list_ew_stations)-1,0,-1):
if list_ew_stations[i] not in [list_ew_stations[0]]: #terminal stations
G.add_edge(list_ew_stations[i],list_ew_stations[i-1], color_base='green', weight=1)
base_edges.append((list_ew_stations[i],list_ew_stations[i-1]))
list_ns_stations = list(ns_stations['Entry Station ID'])
for i in range(0,len(list_ns_stations)):
if list_ns_stations[i] not in [list_ns_stations[len(list_ns_stations)-1]]: #terminal stations
G.add_edge(list_ns_stations[i],list_ns_stations[i+1], color_base='red', weight=1)
base_edges.append((list_ns_stations[i],list_ns_stations[i+1]))
for i in range(len(list_ns_stations)-1,0,-1):
if list_ns_stations[i] not in [list_ns_stations[0]]: #terminal stations
G.add_edge(list_ns_stations[i],list_ns_stations[i-1], color_base='red', weight=1)
base_edges.append((list_ns_stations[i],list_ns_stations[i-1]))
list_ca_stations = list(ca_stations['Entry Station ID'])
for i in range(0,len(list_ca_stations)):
if list_ca_stations[i] not in [list_ca_stations[len(list_ca_stations)-1]]: #terminal stations
G.add_edge(list_ca_stations[i],list_ca_stations[i+1], color_base='green', weight=1)
base_edges.append((list_ca_stations[i],list_ca_stations[i+1]))
for i in range(len(list_ca_stations)-1,0,-1):
if list_ca_stations[i] not in [list_ca_stations[0]]: #terminal stations
G.add_edge(list_ca_stations[i],list_ca_stations[i-1], color_base='green', weight=1)
base_edges.append((list_ca_stations[i],list_ca_stations[i-1]))
#building the base network
# list_ew_stations = list(ew_stations['Entry Station ID'])
# for i in range(0,len(list_ew_stations)):
# if list_ew_stations[i] not in [list_ew_stations[len(list_ew_stations)-1]]: #terminal stations
# G_base.add_edge(list_ew_stations[i],list_ew_stations[i+1], color='green', weight=1)
# for i in range(len(list_ew_stations)-1,0,-1):
# if list_ew_stations[i] not in [list_ew_stations[0]]: #terminal stations
# G_base.add_edge(list_ew_stations[i],list_ew_stations[i-1], color='green', weight=1)
# list_ns_stations = list(ns_stations['Entry Station ID'])
# for i in range(0,len(list_ns_stations)):
# if list_ns_stations[i] not in [list_ns_stations[len(list_ns_stations)-1]]: #terminal stations
# G_base.add_edge(list_ns_stations[i],list_ns_stations[i+1], color='red', weight=1)
# for i in range(len(list_ns_stations)-1,0,-1):
# if list_ns_stations[i] not in [list_ns_stations[0]]: #terminal stations
# G_base.add_edge(list_ns_stations[i],list_ns_stations[i-1], color='red', weight=1)
# list_ca_stations = list(ca_stations['Entry Station ID'])
# for i in range(0,len(list_ca_stations)):
# if list_ca_stations[i] not in [list_ca_stations[len(list_ca_stations)-1]]: #terminal stations
# G_base.add_edge(list_ca_stations[i],list_ca_stations[i+1], color='green', weight=1)
# for i in range(len(list_ca_stations)-1,0,-1):
# if list_ca_stations[i] not in [list_ca_stations[0]]: #terminal stations
# G_base.add_edge(list_ca_stations[i],list_ca_stations[i-1], color='green', weight=1)
#extra attention for interchange stations
#add in trip edges
for df_entry in range(0,int(len(subdf))):
G.add_edge(subdf['Entry Station ID'][df_entry],subdf['Exit Station ID'][df_entry],weight=subdf['Ridership NSEWL'][df_entry])
return(G,base_edges)
def save_graph(graph,base_edges,file_name):
#initialze Figure
plt.figure(num=None, figsize=(150, 100), dpi=100)
fig = plt.figure(1)
weights = nx.get_edge_attributes(graph,'weight')
pos = nx.get_node_attributes(graph,'pos')
edges, colors = zip(*nx.get_edge_attributes(graph,'weight').items())
node_colors = nx.get_node_attributes(graph,'color')
d = dict(graph.degree)
colors_base = nx.get_edge_attributes(graph,'color_base')
nx.draw(graph, pos, edgelist=base_edges, edge_color=[v for v in colors_base.values()], width=15,alpha=0.2)
nx.draw(graph, pos, edgelist=edges, edge_color=weights.values(), width=[pow(v/max(weights.values()),3)*30 for v in weights.values()], edge_cmap = plt.cm.jet,
vmin = 0.0, vmax = max(weights.values()), alpha=0.9, connectionstyle="arc3,rad=0.2")
sm = plt.cm.ScalarMappable(cmap=plt.cm.jet, norm=plt.Normalize(vmin = 0.0, vmax = max(weights.values())))
sm._A = []
cbar = plt.colorbar(sm,shrink=0.3,pad=0.001)
cbar.ax.tick_params(labelsize=100)
nx.draw_networkx_nodes(graph,pos,nodelist=d.keys(), node_size=[300 for v in d.values()],node_color=[v for v in node_colors.values()])
nx.draw_networkx_labels(graph,pos)
# nx.draw_networkx_edges(graph,pos, edgelist=edges, width=[weight / 1 for weight in weights],connectionstyle=None,edge_color=[v for v in edge_colors.values()])
# out = nx.draw(G,pos,edge_color = weights.values(), edge_cmap = plt.cm.jet, vmin = 0.0, vmax = max(weights.values()))
cut = 1.3
xmax = cut * max(xx for xx, yy in pos.values())
ymax = cut * max(yy for xx, yy in pos.values())
plt.xlim(40, xmax)
plt.ylim(200, ymax)
plt.savefig(file_name)
pylab.close()
del fig
# print("Number of Nodes: " + str(len(graph.nodes())))
# print("Number of Edges: " + str(len(graph.edges())))
# print("Number of Trips: " + str(sum(weights.values())))
# print("Trip Range: " + str(min(weights.values()))+"-"+str(max(weights.values())))
# def save_graph2(graph,file_name):
# #initialze Figure
# plt.figure(num=None, figsize=(150, 100), dpi=100)
# fig = plt.figure(1)
# weights = nx.get_edge_attributes(graph,'weight')
# pos = nx.get_node_attributes(graph,'pos')
# edges = graph.edges
# colors = nx.get_edge_attributes(graph,'color')
# node_colors = nx.get_node_attributes(graph,'color')
# d = dict(graph.degree)
# nx.draw(graph, pos, edgelist=edges, edge_color=[v for v in colors.values()], width=[5 for v in weights.values()])
# # sm = plt.cm.ScalarMappable(cmap=plt.cm.jet, norm=plt.Normalize(vmin = 0.0, vmax = max(weights.values())))
# # sm._A = []
# # cbar = plt.colorbar(sm,shrink=0.3,pad=0.001)
# # cbar.ax.tick_params(labelsize=100)
# nx.draw_networkx_nodes(graph,pos,nodelist=d.keys(), node_size=[300 for v in d.values()],node_color=[v for v in node_colors.values()])
# nx.draw_networkx_labels(graph,pos)
# # nx.draw_networkx_edges(graph,pos, edgelist=edges, width=[weight / 1 for weight in weights],connectionstyle=None,edge_color=[v for v in edge_colors.values()])
# # out = nx.draw(G,pos,edge_color = weights.values(), edge_cmap = plt.cm.jet, vmin = 0.0, vmax = max(weights.values()))
# cut = 1.3
# xmax = cut * max(xx for xx, yy in pos.values())
# ymax = cut * max(yy for xx, yy in pos.values())
# plt.xlim(40, xmax)
# plt.ylim(200, ymax)
# plt.savefig(file_name)
# pylab.close()
# del fig
# print("Number of Nodes: " + str(len(graph.nodes())))
# print("Number of Edges: " + str(len(graph.edges())))
# print("Number of Trips: " + str(sum(weights.values())))
# print("Trip Range: " + str(min(weights.values()))+"-"+str(max(weights.values())))
def centrality_measures(G):
#how to factor in the weights?
nx.degree_centrality(G)
nx.in_degree_centrality(G)
nx.out_degree_centrality(G)
nx.eigenvector_centrality(G)
nx.closeness_centrality(G)
nx.betweenness_centrality(G)
nx.betweenness_centrality_source(G)
nx.harmonic_centrality(G)
nx.load_centrality(G)
nx.voterank(G,10)
def generate_report(G,date,hour,category):
# Create some Pandas dataframes from some data.
weights = nx.get_edge_attributes(G,'weight')
edges, weight = zip(*nx.get_edge_attributes(G,'weight').items())
general_details_df = pd.DataFrame({'number_of_nodes':[str(len(G.nodes()))],'number_of_edges':[str(len(G.edges()))],'number_of_trips':[str(sum(weights.values()))],'trip_range':[str(min(weights.values()))+"-"+str(max(weights.values()))]})
degree_centrality_df = pd.DataFrame({'station_id':list(nx.degree_centrality(G).keys()),'degree_centrality':list(nx.degree_centrality(G).values())})
weighted_degree_df = pd.DataFrame({'station_id':list(dict(G.degree(weight='weight')).keys()),'degree_centrality':list(dict(G.degree(weight='weight')).values())})
weighted_out_degree_df = pd.DataFrame({'station_id':list(dict(G.out_degree(weight='weight')).keys()),'out_degree_centrality':list(dict(G.out_degree(weight='weight')).values())})
weighted_in_degree_df = pd.DataFrame({'station_id':list(dict(G.in_degree(weight='weight')).keys()),'in_degree_centrality':list(dict(G.in_degree(weight='weight')).values())})
in_degree_centrality_df = pd.DataFrame({'station_id':list(nx.in_degree_centrality(G).keys()),'in_degree_centrality':list(nx.in_degree_centrality(G).values())})
out_degree_centrality_df = pd.DataFrame({'station_id':list(nx.out_degree_centrality(G).keys()),'out_degree_centrality':list(nx.out_degree_centrality(G).values())})
edge_weight_df = pd.DataFrame({'edge_tuple':edges,'edge_weight':weight})
# eigenvector_centrality_df = pd.DataFrame({'station_id':list(nx.eigenvector_centrality(G,max_iter=600).keys()),'eigenvector_centrality':list(nx.eigenvector_centrality(G,max_iter=600).values())})
# closeness_centrality_df = pd.DataFrame({'station_id':list(nx.closeness_centrality(G).keys()),'closeness_centrality':list(nx.closeness_centrality(G).values())})
betweenness_centrality_df = pd.DataFrame({'station_id':list(nx.betweenness_centrality(G).keys()),'betweenness_centrality':list(nx.betweenness_centrality(G).values())})
betweenness_centrality_source_df = pd.DataFrame({'station_id':list(nx.betweenness_centrality_source(G).keys()),'betweenness_centrality_source':list(nx.betweenness_centrality_source(G).values())})
# harmonic_centrality_df = pd.DataFrame({'station_id':list(nx.harmonic_centrality(G).keys()),'harmonic_centrality':list(nx.harmonic_centrality(G).values())})
# load_centrality_df = pd.DataFrame({'station_id':list(nx.load_centrality(G).keys()),'load_centrality':list(nx.load_centrality(G).values())})
# voterank_df = pd.DataFrame({'key_stations:':nx.voterank(G)})
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter(category+'_centrality_report1_'+date+'_'+str(hour)+'.xlsx')
# Write each dataframe to a different worksheet.
general_details_df.to_excel(writer, sheet_name='general_details')
degree_centrality_df.to_excel(writer, sheet_name='degree_centrality')
weighted_degree_df.to_excel(writer, sheet_name='weighted_degree')
weighted_out_degree_df.to_excel(writer, sheet_name='weighted_out_degree')
weighted_in_degree_df.to_excel(writer, sheet_name='weighted_in_degree')
in_degree_centrality_df.to_excel(writer, sheet_name='in_degree_centrality')
out_degree_centrality_df.to_excel(writer, sheet_name='out_degree_centrality')
edge_weight_df.to_excel(writer, sheet_name='edge_weights')
# eigenvector_centrality_df.to_excel(writer, sheet_name='eigenvector_centrality')
# closeness_centrality_df.to_excel(writer, sheet_name='closeness_centrality')
betweenness_centrality_df.to_excel(writer, sheet_name='betweenness_centrality')
betweenness_centrality_source_df.to_excel(writer, sheet_name='betweenness_centrality_source')
# harmonic_centrality_df.to_excel(writer, sheet_name='harmonic_centrality')
# load_centrality_df.to_excel(writer, sheet_name='load_centrality')
# voterank_df.to_excel(writer, sheet_name='voterank')
# Close the Pandas Excel writer and output the Excel file.
writer.save()
start_date = datetime.date(2019,1,21)
end_date = datetime.date(2019,1,21)
delta = datetime.timedelta(days=1)
list_of_dates = []
list_of_hours = range(7,14)
while start_date <= end_date:
list_of_dates.append(str(start_date))
start_date += delta
for date in list_of_dates:
for hour in list_of_hours:
try:
# category = 'adult'
# G,base_edges = generate_graph(adult_df, sd, date, hour)
# save_graph(G,base_edges,"adult_mrt_base1_"+date+"_"+str(hour)+'.png')
# generate_report(G,date,hour,category)
# category = 'child'
# G,base_edges = generate_graph(child_df, sd, date, hour)
# save_graph(G,base_edges,"child_mrt_base1_"+date+"_"+str(hour)+'.png')
# generate_report(G,date,hour,category)
category = 'senior'
G,base_edges = generate_graph(senior_df, sd, date, hour)
save_graph(G,base_edges,"senior_mrt_base1_"+date+"_"+str(hour)+'.png')
generate_report(G,date,hour,category)
except:
print(str(date)+'_'+str(hour)+'_'+category+' not available.')
# save_graph2(G_base,'mrt_base.pdf')
# generate_report(G_base,date,hour)
``` |
{
"source": "joofeloof/pyaf",
"score": 2
} |
#### File: pyaf/TS/SignalDecomposition_Cycle.py
```python
import pandas as pd
import numpy as np
from . import Time as tsti
from . import Perf as tsperf
from . import Plots as tsplot
from . import Utils as tsutil
# for timing
import time
class cAbstractCycle:
def __init__(self , trend):
self.mTimeInfo = tsti.cTimeInfo()
self.mTrendFrame = pd.DataFrame()
self.mCycleFrame = pd.DataFrame()
self.mTrend = trend;
self.mTrend_residue_name = self.mTrend.mOutName + '_residue'
self.mFormula = None;
self.mComplexity = None;
def getCycleResidueName(self):
return self.getCycleName() + "_residue";
def plot(self):
tsplot.decomp_plot(self.mCycleFrame, self.mTimeInfo.mNormalizedTimeColumn,
self.mTrend_residue_name, self.getCycleName() , self.getCycleResidueName());
def check_not_nan(self, sig , name):
#print("check_not_nan");
if(np.isnan(sig).any() or np.isinf(sig).any() ):
logger = tsutil.get_pyaf_logger();
logger.error("CYCLE_RESIDUE_WITH_NAN_IN_SIGNAL" + str(sig));
raise tsutil.Internal_PyAF_Error("CYCLE_COLUMN _FOR_TREND_RESIDUE ['" + name + "'");
pass
def computePerf(self):
if(self.mOptions.mDebug):
self.check_not_nan(self.mCycleFrame[self.getCycleResidueName()], self.getCycleResidueName())
# self.mCycleFrame.to_csv(self.getCycleResidueName() + ".csv");
self.mCycleFitPerf = tsperf.cPerf();
self.mCycleForecastPerf = tsperf.cPerf();
# self.mCycleFrame[[self.mTrend_residue_name, self.getCycleName()]].to_csv(self.getCycleName() + ".csv");
(lFrameFit, lFrameForecast, lFrameTest) = self.mSplit.cutFrame(self.mCycleFrame);
self.mCycleFitPerf.compute(
lFrameFit[self.mTrend_residue_name], lFrameFit[self.getCycleName()], self.getCycleName())
self.mCycleForecastPerf.compute(
lFrameForecast[self.mTrend_residue_name], lFrameForecast[self.getCycleName()], self.getCycleName())
class cZeroCycle(cAbstractCycle):
def __init__(self , trend):
super().__init__(trend);
self.mFormula = "NoCycle"
self.mComplexity = 0;
def getCycleName(self):
return self.mTrend_residue_name + "_zeroCycle";
def fit(self):
self.mTime = self.mTimeInfo.mTime;
self.mSignal = self.mTimeInfo.mSignal;
self.mTimeInfo.addVars(self.mCycleFrame);
self.mCycleFrame[self.mTrend_residue_name] = self.mTrendFrame[self.mTrend_residue_name]
self.mCycleFrame[self.getCycleName()] = np.zeros_like(self.mTrendFrame[self.mTrend_residue_name])
self.mCycleFrame[self.getCycleResidueName()] = self.mCycleFrame[self.mTrend_residue_name];
self.mOutName = self.getCycleName()
def transformDataset(self, df):
target = df[self.mTrend_residue_name]
df[self.getCycleName()] = np.zeros_like(df[self.mTrend_residue_name]);
df[self.getCycleResidueName()] = target - df[self.getCycleName()].values
return df;
class cSeasonalPeriodic(cAbstractCycle):
def __init__(self , trend, date_part):
super().__init__(trend);
self.mDatePart = date_part;
self.mEncodedValueDict = {}
self.mFormula = "Seasonal_" + self.mDatePart;
self.mComplexity = 1;
def getCycleName(self):
return self.mTrend_residue_name + "_Seasonal_" + self.mDatePart;
def hasEnoughData(self, iTimeMin, iTimeMax):
lTimeDelta = iTimeMax - iTimeMin;
lDays = lTimeDelta / np.timedelta64(1,'D');
lSeconds = lTimeDelta / np.timedelta64(1,'s');
if(self.mDatePart == "Hour"):
return (lDays >= 10);
if(self.mDatePart == "Minute"):
lHours = lSeconds // 3600;
return (lHours >= 10);
if(self.mDatePart == "Second"):
lMinutes = lSeconds // 60;
return (lMinutes >= 10);
if(self.mDatePart == "DayOfMonth"):
lMonths = lDays // 30;
return (lMonths >= 10);
if(self.mDatePart == "DayOfWeek"):
lWeeks = lDays // 7;
return (lWeeks >= 10);
if(self.mDatePart == "MonthOfYear"):
lYears = lDays // 360;
return (lYears >= 10);
if(self.mDatePart == "WeekOfYear"):
lYears = lDays // 360;
return (lYears >= 10);
return False;
def fit(self):
assert(self.mTimeInfo.isPhysicalTime());
lHor = self.mTimeInfo.mHorizon;
self.mTime = self.mTimeInfo.mTime;
self.mSignal = self.mTimeInfo.mSignal;
self.mTimeInfo.addVars(self.mCycleFrame);
lName = self.getCycleName();
self.mCycleFrame[self.mTrend_residue_name] = self.mTrendFrame[self.mTrend_residue_name]
self.mCycleFrame[lName] = self.mTrendFrame[self.mTime].apply(self.get_date_part);
# we encode only using estimation
lCycleFrameEstim = self.mSplit.getEstimPart(self.mCycleFrame);
lTrendMeanEstim = lCycleFrameEstim[self.mTrend_residue_name].mean();
lGroupBy = lCycleFrameEstim.groupby(by=[lName] , sort=False)[self.mTrend_residue_name].mean();
self.mEncodedValueDict = lGroupBy.to_dict()
self.mDefaultValue = lTrendMeanEstim;
# print("cSeasonalPeriodic_DefaultValue" , self.getCycleName(), self.mDefaultValue);
self.mCycleFrame[lName + '_enc'] = self.mCycleFrame[lName].apply(lambda x : self.mEncodedValueDict.get(x , self.mDefaultValue))
self.mCycleFrame[lName + '_enc'].fillna(lTrendMeanEstim, inplace=True);
self.mCycleFrame[self.getCycleResidueName()] = self.mCycleFrame[self.mTrend_residue_name] - self.mCycleFrame[lName + '_enc'];
self.mCycleFrame[lName + '_NotEncoded'] = self.mCycleFrame[lName];
self.mCycleFrame[lName] = self.mCycleFrame[lName + '_enc'];
self.mOutName = self.getCycleName()
#print("encoding '" + lName + "' " + str(self.mEncodedValueDict));
@tsutil.cMemoize
def get_date_part(self, x):
lDatepartComputer = self.mTimeInfo.get_date_part_value_computer(self.mDatePart)
return lDatepartComputer(x)
@tsutil.cMemoize
def get_date_part_encoding(self, x):
lDatepartComputer = self.mTimeInfo.get_date_part_value_computer(self.mDatePart)
dp = lDatepartComputer(x)
return self.mEncodedValueDict.get(dp , self.mDefaultValue)
def transformDataset(self, df):
target = df[self.mTrend_residue_name]
df[self.getCycleName()] = df[self.mTime].apply(self.get_date_part_encoding);
df[self.getCycleResidueName()] = target - df[self.getCycleName()].values
return df;
class cBestCycleForTrend(cAbstractCycle):
def __init__(self , trend, criterion):
super().__init__(trend);
self.mCycleFrame = pd.DataFrame()
self.mCyclePerfDict = {}
self.mBestCycleValueDict = {}
self.mBestCycleLength = None
self.mCriterion = criterion
self.mComplexity = 2;
self.mFormula = "BestCycle"
def getCycleName(self):
return self.mTrend_residue_name + "_bestCycle_by" + self.mCriterion;
def dumpCyclePerfs(self):
print(self.mCyclePerfDict);
def computeBestCycle(self):
# self.dumpCyclePerfs();
lCycleFrameEstim = self.mSplit.getEstimPart(self.mCycleFrame);
self.mDefaultValue = lCycleFrameEstim[self.mTrend_residue_name].mean();
self.mBestCycleLength = None;
lBestCycleIdx = None;
lBestCriterion = None;
if(self.mCyclePerfDict):
for k in sorted(self.mCyclePerfDict.keys()):
# smallest cycles are better
if((lBestCriterion is None) or (self.mCyclePerfDict[k] < lBestCriterion)):
lBestCycleIdx = k;
lBestCriterion = self.mCyclePerfDict[k];
if(self.mOptions.mCycle_Criterion_Threshold is None or
(self.mCyclePerfDict[lBestCycleIdx] < self.mOptions.mCycle_Criterion_Threshold)) :
self.mBestCycleLength = lBestCycleIdx
# print("BEST_CYCLE_PERF" , self.mTrend_residue_name, self.mBestCycleLength)
self.transformDataset(self.mCycleFrame);
pass
def generate_cycles(self):
self.mTimeInfo.addVars(self.mCycleFrame);
self.mCycleFrame[self.mTrend_residue_name ] = self.mTrendFrame[self.mTrend_residue_name]
lCycleFrameEstim = self.mSplit.getEstimPart(self.mCycleFrame);
self.mDefaultValue = lCycleFrameEstim[self.mTrend_residue_name].mean();
del lCycleFrameEstim;
self.mCyclePerfDict = {}
lMaxRobustCycle = self.mTrendFrame.shape[0]//12;
# print("MAX_ROBUST_CYCLE_LENGTH", self.mTrendFrame.shape[0], lMaxRobustCycle);
lCycleLengths = self.mOptions.mCycleLengths or range(2,lMaxRobustCycle + 1)
lCycleFrame = pd.DataFrame();
lCycleFrame[self.mTrend_residue_name ] = self.mTrendFrame[self.mTrend_residue_name]
for i in lCycleLengths:
if ((i > 1) and (i <= lMaxRobustCycle)):
name_i = self.mTrend_residue_name + '_Cycle';
lCycleFrame[name_i] = self.mCycleFrame[self.mTimeInfo.mRowNumberColumn] % i
lCycleFrameEstim = self.mSplit.getEstimPart(lCycleFrame);
lGroupBy = lCycleFrameEstim.groupby(by=[name_i] , sort=False)[self.mTrend_residue_name].mean();
lEncodedValueDict = lGroupBy.to_dict()
lCycleFrame[name_i + '_enc'] = lCycleFrame[name_i].apply(
lambda x : lEncodedValueDict.get(x , self.mDefaultValue))
self.mBestCycleValueDict[i] = lEncodedValueDict;
lPerf = tsperf.cPerf();
# validate the cycles on the validation part
lValidFrame = self.mSplit.getValidPart(lCycleFrame);
lCritValue = lPerf.computeCriterion(lValidFrame[self.mTrend_residue_name],
lValidFrame[name_i + "_enc"],
self.mCriterion,
"Validation")
self.mCyclePerfDict[i] = lCritValue;
if(self.mOptions.mDebugCycles):
logger = tsutil.get_pyaf_logger();
logger.debug("CYCLE_INTERNAL_CRITERION " + name_i + " " + str(i) + \
" " + self.mCriterion +" " + str(lCritValue))
pass
def fit(self):
# print("cycle_fit" , self.mTrend_residue_name);
self.mTime = self.mTimeInfo.mTime;
self.mSignal = self.mTimeInfo.mSignal;
self.generate_cycles();
self.computeBestCycle();
self.mOutName = self.getCycleName()
self.mFormula = "Cycle_None"
if(self.mBestCycleLength is not None):
self.mFormula = "Cycle" # + str(self.mBestCycleLength);
self.transformDataset(self.mCycleFrame);
def transformDataset(self, df):
if(self.mBestCycleLength is not None):
lValueCol = df[self.mTimeInfo.mRowNumberColumn].apply(lambda x : x % self.mBestCycleLength);
df['cycle_internal'] = lValueCol;
# print("BEST_CYCLE" , self.mBestCycleLength)
# print(self.mBestCycleValueDict);
lDict = self.mBestCycleValueDict[self.mBestCycleLength];
df[self.getCycleName()] = lValueCol.apply(lambda x : lDict.get(x , self.mDefaultValue));
else:
df[self.getCycleName()] = np.zeros_like(df[self.mTimeInfo.mRowNumberColumn]);
target = df[self.mTrend_residue_name]
df[self.getCycleResidueName()] = target - df[self.getCycleName()].values
if(self.mOptions.mDebug):
self.check_not_nan(self.mCycleFrame[self.getCycleName()].values , self.getCycleName());
return df;
class cCycleEstimator:
def __init__(self):
self.mTimeInfo = tsti.cTimeInfo()
self.mTrendFrame = pd.DataFrame()
self.mCycleFrame = pd.DataFrame()
self.mCycleList = {}
def addSeasonal(self, trend, seas_type, resolution):
if(resolution >= self.mTimeInfo.mResolution):
lSeasonal = cSeasonalPeriodic(trend, seas_type);
if(self.mOptions.mActivePeriodics[lSeasonal.mFormula]):
if(lSeasonal.hasEnoughData(self.mTimeInfo.mTimeMin, self.mTimeInfo.mTimeMax)):
self.mCycleList[trend] = self.mCycleList[trend] + [lSeasonal];
pass
def defineCycles(self):
for trend in self.mTrendList:
self.mCycleList[trend] = [];
if(self.mOptions.mActivePeriodics['NoCycle']):
self.mCycleList[trend] = [cZeroCycle(trend)];
if(self.mOptions.mActivePeriodics['BestCycle']):
self.mCycleList[trend] = self.mCycleList[trend] + [
cBestCycleForTrend(trend, self.mOptions.mCycle_Criterion)];
if(self.mTimeInfo.isPhysicalTime()):
# The order used here is mandatory. see filterSeasonals before changing this order.
self.addSeasonal(trend, "MonthOfYear", tsti.cTimeInfo.sRES_MONTH);
self.addSeasonal(trend, "WeekOfYear", tsti.cTimeInfo.sRES_DAY);
self.addSeasonal(trend, "DayOfMonth", tsti.cTimeInfo.sRES_DAY);
self.addSeasonal(trend, "DayOfWeek", tsti.cTimeInfo.sRES_DAY);
self.addSeasonal(trend, "Hour", tsti.cTimeInfo.sRES_HOUR);
self.addSeasonal(trend, "Minute", tsti.cTimeInfo.sRES_MINUTE);
self.addSeasonal(trend, "Second", tsti.cTimeInfo.sRES_SECOND);
for trend in self.mTrendList:
if(len(self.mCycleList[trend]) == 0):
self.mCycleList[trend] = [cZeroCycle(trend)];
for cycle in self.mCycleList[trend]:
cycle.mTrendFrame = self.mTrendFrame;
cycle.mTimeInfo = self.mTimeInfo;
cycle.mSplit = self.mSplit;
cycle.mOptions = self.mOptions;
def plotCycles(self):
for trend in self.mTrendList:
for cycle in self.mCycleList[trend]:
cycle.plot()
def dumpCyclePerf(self, cycle):
if(self.mOptions.mDebugCycles):
logger = tsutil.get_pyaf_logger();
logger.debug("CYCLE_PERF_DETAIL_COUNT_FIT_FORECAST " + cycle.mOutName +
" %.3f" % (cycle.mCycleFitPerf.mCount) + " %.3f" % (cycle.mCycleForecastPerf.mCount));
logger.debug("CYCLE_PERF_DETAIL_MAPE_FIT_FORECAST " + cycle.mOutName +
" %.3f" % (cycle.mCycleFitPerf.mMAPE)+ " %.3f" % (cycle.mCycleForecastPerf.mMAPE));
logger.debug("CYCLE_PERF_DETAIL_L2_FIT_FORECAST " + cycle.mOutName +
" %.3f" % (cycle.mCycleFitPerf.mL2) + " %.3f" % (cycle.mCycleForecastPerf.mL2));
logger.debug("CYCLE_PERF_DETAIL_R2_FIT_FORECAST " + cycle.mOutName +
" %.3f" % (cycle.mCycleFitPerf.mR2) + " %.3f" % (cycle.mCycleForecastPerf.mR2));
logger.debug("CYCLE_PERF_DETAIL_PEARSONR_FIT_FORECAST " + cycle.mOutName +
" %.3f" % (cycle.mCycleFitPerf.mPearsonR) + " %.3f" % (cycle.mCycleForecastPerf.mPearsonR));
def estimateCycles(self):
self.mTime = self.mTimeInfo.mTime;
self.mSignal = self.mTimeInfo.mSignal;
self.mTimeInfo.addVars(self.mCycleFrame);
for trend in self.mTrendList:
lTrend_residue_name = trend.mOutName + '_residue'
self.mCycleFrame[lTrend_residue_name] = self.mTrendFrame[lTrend_residue_name]
for cycle in self.mCycleList[trend]:
start_time = time.time()
cycle.fit();
if(self.mOptions.mDebugPerformance):
cycle.computePerf();
self.dumpCyclePerf(cycle)
self.mCycleFrame[cycle.getCycleName()] = cycle.mCycleFrame[cycle.getCycleName()]
self.mCycleFrame[cycle.getCycleResidueName()] = cycle.mCycleFrame[cycle.getCycleResidueName()]
if(self.mOptions.mDebug):
cycle.check_not_nan(self.mCycleFrame[cycle.getCycleResidueName()].values ,
cycle.getCycleResidueName())
end_time = time.time()
lTrainingTime = round(end_time - start_time , 2);
if(self.mOptions.mDebugProfile):
logger = tsutil.get_pyaf_logger();
logger.info("CYCLE_TRAINING_TIME_IN_SECONDS '" + cycle.mOutName + "' " + str(lTrainingTime))
pass
def filterSeasonals(self):
logger = tsutil.get_pyaf_logger();
logger.debug("CYCLE_TRAINING_FILTER_SEASONALS_START")
for trend in self.mTrendList:
lPerfs = {}
lTrend_residue_name = trend.mOutName + '_residue'
lCycleList = []
lSeasonals = []
for cycle in self.mCycleList[trend]:
if(isinstance(cycle , cSeasonalPeriodic)):
cycle.computePerf();
lPerfs[cycle.mOutName] = cycle.mCycleForecastPerf.getCriterionValue(self.mOptions.mCycle_Criterion)
lSeasonals = lSeasonals + [cycle]
else:
lCycleList = lCycleList + [cycle]
if(len(lSeasonals) == 0):
return
lBestCriterion = None
lBestSeasonal = None
for (k,cycle) in enumerate(lSeasonals):
lCriterionValue = lPerfs[cycle.mOutName]
if((lBestCriterion is None) or (lCriterionValue < (1.05 * lBestCriterion))):
lBestSeasonal = cycle
lBestCriterion = lCriterionValue;
lCycleList = lCycleList + [lBestSeasonal]
self.mCycleList[trend] = lCycleList
logger.debug("CYCLE_TRAINING_FILTER_SEASONALS " + trend.mOutName + " " + lBestSeasonal.mOutName)
logger.debug("CYCLE_TRAINING_FILTER_SEASONALS_END")
pass
def estimateAllCycles(self):
self.defineCycles();
self.estimateCycles()
if(self.mOptions.mFilterSeasonals):
self.filterSeasonals()
``` |
{
"source": "joofio/distributed-data-benchmark",
"score": 3
} |
#### File: joofio/distributed-data-benchmark/streamlit_app.py
```python
import numpy as np
import pandas as pd
import streamlit as st
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
import statistics as s
#st.set_page_config(layout="wide")
silos=9
n_clusters=2
#metric=c1.selectbox("metric",["Idade Materna","Bishop Score","Cesarianas Anterior","Cesarianas"])
means={}
means["Idade Materna"]=[30.94473361910594, 30.620558542021765, 31.077226489516296, 31.091688089117394, 31.377103122865833, 31.31202023726448, 31.292021688613477, 31.35806504330773, 30.137582625118036]
means["Bishop Score"]=[5.654205607476635, 4.8772040302267, 5.408, 6.2594936708860756, 6.495614035087719, 5.5227272727272725, 5.826347305389222, 5.68, 6.042910447761194]
means["Cesarianas Anterior"]=[1.11864406779661, 0.5793376173999011, 1.1185647425897036, 1.1300813008130082, 0.31453804347826086, 0.5736070381231672, 0.6453608247422681, 0.8116646415552855, 0.7654205607476635]
means["Cesarianas"]=[0.3000612369871402, 0.2559328700668677, 0.24185177496367033, 0.22922022279348758, 0.27533804738866147, 0.29684228890439635, 0.2973147430932094, 0.27259356103938553, 0.22455146364494807]
st.markdown("""Please Select metric to assess in a distributed manner. Yor data will not be shared and only metadata will be collected from peers.""")
def calculate_centroids(seeds,mean,clusters):
d=seeds.flatten()
d=np.append(d,mean)
# print(d)
res=KMeans(n_clusters=clusters, random_state=0).fit(d.reshape(-1, 1))
return res
def convergence_clusters_2(mean,clusters):
new_seeds=np.zeros((silos,n_clusters))
#get initial from all of the rest:
c1_l=[]
c2_l=[]
# n = s.NormalDist(mu=50, sigma=10)
# seeds = np.array(n.samples(n_clusters, seed=42)) # remove seed if desired
seeds=np.array([np.random.randint(limit_rand, size=n_clusters) for i in range(silos)]) #mimic
for i in range(10): #arbitrary, until convergence
c1_=[]
c2_=[]
#create mine
my_centroids=calculate_centroids(seeds,mean,n_clusters)
#my_centroids=KMeans(n_clusters=clusters, random_state=0).fit(data.reshape(-1, 1))
# print(my_centroids.cluster_centers_)
#get all the others
for idx,x in enumerate(means[metric]):
#row_no_null=x[~pd.isnull(x["IDADE_MATERNA"])]["IDADE_MATERNA"]
silo_mean=x
#means.append(silo_mean)
# silo_own=KMeans(n_clusters=clusters, random_state=0).fit(row_no_null.values.reshape(-1, 1))
# print(silo_own.cluster_centers_[:,0])
# print(silo_mean)
#silo_centroids=calculate_centroids(seeds,silo_own.cluster_centers_[:,0],n_clusters)
silo_centroids=calculate_centroids(seeds,silo_mean,n_clusters).cluster_centers_
# print(silo_centroids[:,0])
new_seeds[idx,:]=silo_centroids[:,0]
#print(new_seeds)
c1_.append(silo_centroids.min())
#print(silo_centroids.max())
c2_.append(silo_centroids.max())
seeds=new_seeds
c1_l.append(np.mean(c1_))
c2_l.append(np.mean(c2_))
# print(seeds)
return c1_l,c2_l,seeds,means,my_centroids
def process_data(mean):
print(mean)
seeds=np.array([np.random.randint(100, size=n_clusters) for i in range(silos)])
_,_,seed,means,my_centroids=convergence_clusters_2(mean,n_clusters)
# print(my_centroids.cluster_centers_[:,0])
c1=plt.scatter([0],my_centroids.cluster_centers_[0,0])
c2=plt.scatter([0],my_centroids.cluster_centers_[1,0])
c3=plt.scatter([0],mean)
plt.legend((c1, c2, c3),
('Cluster1', 'Cluster2', 'Means'),
scatterpoints=1,
loc=0,
ncol=3,
fontsize=8)
plt.title(metric)
st.pyplot(plt)
c1,c2,c3=st.columns(3)
metric=c1.selectbox("metric",["Idade Materna","Bishop Score","Cesarianas Anterior","Cesarianas"])
mean=c2.number_input("Mean",min_value=0.0,value=0.0,step=0.1)
limit_rand=c3.number_input("Limit for Random",min_value=0,max_value=1000,value=100)
if st.button("Calculate"):
process_data(mean)
``` |
{
"source": "joofio/fastapi-heroku-obs-ml",
"score": 3
} |
#### File: joofio/fastapi-heroku-obs-ml/help_functions.py
```python
import re
import numpy as np
import pandas as pd
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import (
FunctionTransformer,
LabelBinarizer,
LabelEncoder,
OneHotEncoder,
OrdinalEncoder,
StandardScaler,
)
def transfrom_array_to_df_onehot(pl,nparray,onehot=True,overal_imp=False):
col_list=[]
col_list_int = pl["preprocessor"].transformers_[0][2] #changes col location
#print(col_list_int)
ordinal_col=pl["preprocessor"].transformers[1][2]
original_col=pl["preprocessor"].transformers[2][2]
col_list=col_list_int+ordinal_col
if onehot:
encoded_col=pl["preprocessor"].transformers_[2][1].named_steps["OneHotEnconding"].get_feature_names_out()
#print(len(encoded_col))
new_enconded_list=[]
for idx,col in enumerate(original_col):
for n_col in encoded_col:
#print(idx,col)
# print("x"+str(idx))
if "x"+str(idx)+"_" in n_col:
# print(col,n_col)
new_enconded_list.append(col+"_"+n_col.split("_")[-1])
col_list=col_list+new_enconded_list
print(col_list)
#print(len(col_list))
else:
col_list=col_list+original_col
if overal_imp==True:
imputed_cols_idx=pl["imputer"].indicator_.features_
imputed_indicator=[col_list[i] for i in imputed_cols_idx]
# print(imputed_indicator)
# print(len(imputed_indicator))
for imp_col in imputed_indicator:
col_list.append(imp_col+"_imput_indicator")
df1 = pd.DataFrame(nparray, columns=col_list)
return df1
``` |
{
"source": "joofio/heroku-data-quality",
"score": 3
} |
#### File: joofio/heroku-data-quality/api_app.py
```python
from fastapi import FastAPI
import numpy as np
import pandas as pd
from pydantic import BaseModel, Field
from enum import Enum
import re
from collections import Counter
import datetime
import math
import json
from typing import List, Optional
import unicodedata
from fastapi.templating import Jinja2Templates
from fastapi.responses import HTMLResponse
from fastapi.staticfiles import StaticFiles
from enum import Enum
from fastapi import Request
app = FastAPI()
templates = Jinja2Templates(directory="templates")
df=pd.read_csv("data-quality-v1.csv")
class Range(BaseModel):
lower: str
upper: str
class Output(str,Enum):
one="Ok"
two="Warning: Possible Error"
three="Error: please check value"
class Columns(str,Enum):
a_para = "A_PARA"
a_gesta = "A_GESTA"
eutocito_anterior = "EUTOCITO_ANTERIOR"
ventosas_anterior = "VENTOSAS_ANTERIOR"
forceps_anterior = "FORCEPS_ANTERIOR"
cesarianas_anterior = "CESARIANAS_ANTERIOR"
idade_materna = "IDADE_MATERNA"
peso_inicial = "PESO_INICIAL"
imc = "IMC"
numero_consultas_pre_natal = "NUMERO_CONSULTAS_PRE_NATAL"
idade_gestacional_admissao = "IDADE_GESTACIONAL_ADMISSAO"
semanas_gestacao_parto = "SEMANAS_GESTACAO_PARTO"
peso_admissao_internamento = "PESO_ADMISSAO_INTERNAMENTO"
estimativa_peso_eco_30 = "ESTIMATIVA_PESO_ECO_30"
estimativa_peso_eco_31 = "ESTIMATIVA_PESO_ECO_31"
estimativa_peso_eco_32 = "ESTIMATIVA_PESO_ECO_32"
estimativa_peso_eco_24 = "ESTIMATIVA_PESO_ECO_24"
estimativa_peso_eco_25 = "ESTIMATIVA_PESO_ECO_25"
estimativa_peso_eco_26 = "ESTIMATIVA_PESO_ECO_26"
estimativa_peso_eco_27 = "ESTIMATIVA_PESO_ECO_27"
estimativa_peso_eco_28 = "ESTIMATIVA_PESO_ECO_28"
estimativa_peso_eco_29 = "ESTIMATIVA_PESO_ECO_29"
estimativa_peso_eco_33 = "ESTIMATIVA_PESO_ECO_33"
estimativa_peso_eco_34 = "ESTIMATIVA_PESO_ECO_34"
estimativa_peso_eco_35 = "ESTIMATIVA_PESO_ECO_35"
estimativa_peso_eco_36 = "ESTIMATIVA_PESO_ECO_36"
estimativa_peso_eco_37 = "ESTIMATIVA_PESO_ECO_37"
estimativa_peso_eco_38 = "ESTIMATIVA_PESO_ECO_38"
estimativa_peso_eco_39 = "ESTIMATIVA_PESO_ECO_39"
estimativa_peso_eco_40 = "ESTIMATIVA_PESO_ECO_40"
estimativa_peso_eco_41 = "ESTIMATIVA_PESO_ECO_41"
class Evaluation(BaseModel):
range: Range
output: Output
class Input(BaseModel):
snomed:str
value:float
unit:str
column: Columns = Field(
"IMC", title="Column name to be evaluated",
)
def checkoutlier(x,iqr,q1,q3):
if pd.isna(x):
return 0,None,None
ll_threshold=q1-iqr*3
uu_threshold=q3+iqr*3
if x<ll_threshold or x>uu_threshold:
return 2,ll_threshold,uu_threshold
l_threshold=q1-iqr*1.5
u_threshold=q3+iqr*1.5
if x<l_threshold or x>u_threshold:
return 1,l_threshold,u_threshold
return 0,l_threshold,u_threshold
@app.get("/",response_class=HTMLResponse)
async def root(request:Request):
return templates.TemplateResponse("index.html",{"request":request})
@app.post("/CorrectnessCheck",response_model=Evaluation)
async def quality_check(input: Input):
sel=df[df["column"]==input.column]
# print(sel)
q1=sel["q1"].values[0]
q3=sel["q3"].values[0]
iqr=sel["iqr"].values[0]
#print(q1,q3,iqr)
output,l,u=checkoutlier(input.value,iqr,q1,q3)
label="Ok"
if output==1:
label="Warning: Possible Error"
if output==2:
label="Error: please check value"
doc={}
doc["output"]=label
doc["range"]={"lower":l,"upper":u}
return doc
@app.post("/comparabilityCheck",response_model=Evaluation)
async def comparability_check():
doc={}
doc["output"]="Ok"
doc["range"]={"lower":0,"upper":100}
return doc
@app.post("/completenessCheck")
async def completeness_check():
doc={}
doc["output"]="Ok"
doc["range"]={"lower":0,"upper":100}
return doc
@app.post("/concordanceCheck")
async def concordance_check():
doc={}
doc["output"]="Ok"
doc["range"]={"lower":0,"upper":100}
return doc
``` |
{
"source": "joofio/heroku-meds-api",
"score": 2
} |
#### File: joofio/heroku-meds-api/api_app.py
```python
from fastapi import FastAPI
import numpy as np
import pandas as pd
from pydantic import BaseModel, Field
from enum import Enum
import re
from collections import Counter
import datetime
import math
from textdistance import damerau_levenshtein,jaro_winkler,jaccard,overlap,lcsseq,lcsstr,ratcliff_obershelp,levenshtein
import json
from typing import List, Optional
app = FastAPI()
errors_dict=pd.read_csv("truth_error.csv")
class Summary(BaseModel):
corpus: str
class Output(BaseModel):
display: str
ID: str
atc: str
class Result(BaseModel):
input: str
output: Output
class Extraction(BaseModel):
results: List[Result]
timestamp: datetime.date
@app.get("/")
async def root():
return {"message": "Hello World"}
@app.post("/extract-meds",response_model=Extraction)
async def extract_meds(summary: Summary):
final_list=[]
for x in summary.corpus.split():
if len(errors_dict[errors_dict["error"]==x.lower()])>0:
key=errors_dict[errors_dict["error"]==x.lower()]["truth"].values[0]
doc={"input":x,"output":{"display":key,"ID":"","atc":""}}
final_list.append(doc)
if len(errors_dict[errors_dict["truth"]==x.lower()])>0:
key=errors_dict[errors_dict["truth"]==x.lower()]["truth"].values[0]
doc={"input":x,"output":{"display":key,"ID":"","atc":""}}
final_list.append(doc)
return {"results":final_list,"timestamp":datetime.datetime.now()}
``` |
{
"source": "joofio/obs-ml-2",
"score": 3
} |
#### File: joofio/obs-ml-2/help_functions.py
```python
import re
import scipy.stats as st
import plotly.express as px
import numpy as np
import pandas as pd
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import (
FunctionTransformer,
LabelBinarizer,
LabelEncoder,
OneHotEncoder,
OrdinalEncoder,
StandardScaler,
)
import scipy
def to_object(x):
return pd.DataFrame(x).astype(str)
def to_number(x):
return pd.DataFrame(x).astype(float)
def get_ci_model_from_clf(clf):
params = []
for k, v in clf.cv_results_.items():
if k == "params" and type(v) == list:
# print(k,v)
for p in v:
# print(p)
z = []
for d, e in p.items():
z.append(str(d) + "=" + str(e))
# print(d,e)
params.append("|".join(z))
# print(params)
param_train_score = {str(d): [] for d in params}
pattern = "split\d{1,2}_\S+"
for k, v in clf.cv_results_.items():
if re.match(pattern, k):
for idx, para in enumerate(param_train_score):
param_train_score[para].append(v[idx])
train_score_ci = {
k: st.norm.interval(alpha=0.95, loc=np.mean(v), scale=scipy.stats.sem(v))
for k, v in param_train_score.items()
}
return train_score_ci
def plot_error_bar(ci_rf):
def color_statistical_sig(x, max_val):
plus = x["plus"]
# print(minus,plus,max_val)
if plus >= max_val:
# print("---",plus,max_val)
return "not sig"
return "sig"
ff = pd.DataFrame(ci_rf)
fft = ff.transpose()
fft.columns = ["minus", "plus"]
fft["mean"] = fft.apply(np.mean, axis=1)
fft["e_plus"] = fft["plus"] - fft["mean"]
fft["e_minus"] = fft["mean"] - fft["minus"]
max_val = fft["plus"].max()
# print(max_val)
min_val = fft[fft["minus"] > 0]["minus"].min()
min_plus_idx = fft[fft["plus"] > 0]["plus"].idxmax()
min_plus = fft.loc[min_plus_idx, "minus"]
# tt.loc['criterion=gini|min_samples_split=20']["minus"]
# print(min_plus)
fft["max"] = fft["plus"].apply(lambda x: "max" if x == max_val else "not max")
fft["significant"] = fft.apply(
lambda x: color_statistical_sig(x, max_val=min_plus), axis=1
)
# print(fft)
fft["hover_data"] = (
round(fft["minus"], 4).astype(str) + " +- " + round(fft["plus"], 4).astype(str)
)
# print(fft["hover_data"])
fig = px.scatter(
fft,
x=fft.index,
y="mean",
error_y="e_plus",
error_y_minus="e_minus",
color="significant",
symbol="max",
hover_data=["hover_data"],
)
fig.update(layout_yaxis_range=[min_val - 0.1, max_val + 0.1])
fig.show()
return fft
def transfrom_array_to_df_onehot(pl, nparray, onehot=True, overal_imp=False):
col_list = []
col_list_int = pl["preprocessor"].transformers_[0][2] # changes col location
# print(col_list_int)
ordinal_col = pl["preprocessor"].transformers[1][2]
original_col = pl["preprocessor"].transformers[2][2]
col_list = col_list_int + ordinal_col
if onehot:
encoded_col = (
pl["preprocessor"]
.transformers_[2][1]
.named_steps["OneHotEnconding"]
.get_feature_names_out()
)
# print(len(encoded_col))
new_enconded_list = []
for idx, col in enumerate(original_col):
for n_col in encoded_col:
# print(idx,col)
# print("x"+str(idx))
if "x" + str(idx) + "_" in n_col:
# print(col,n_col)
new_enconded_list.append(col + "_" + n_col.split("_")[-1])
col_list = col_list + new_enconded_list
print(col_list)
# print(len(col_list))
else:
col_list = col_list + original_col
if overal_imp == True:
imputed_cols_idx = pl["imputer"].indicator_.features_
imputed_indicator = [col_list[i] for i in imputed_cols_idx]
# print(imputed_indicator)
# print(len(imputed_indicator))
for imp_col in imputed_indicator:
col_list.append(imp_col + "_imput_indicator")
print(col_list)
df1 = pd.DataFrame(nparray, columns=col_list)
return df1
def get_transformers_nan(df, pl):
to_list = []
col_list = []
col_list_int = pl["preprocessor"].transformers_[0][2] # changes col location
# print(col_list_int)
ordinal_col = pl["preprocessor"].transformers[1][2]
original_col = pl["preprocessor"].transformers[2][2]
col_list = col_list_int + ordinal_col
tt = pl.transform(df)
imputed_cols_idx = pl["imputer"].indicator_.features_
imputed_indicator = [col_list[i] for i in imputed_cols_idx]
# print(imputed_indicator)
# print(len(imputed_indicator))
for imp_col in imputed_indicator:
to_list.append(imp_col + "_imput_indicator")
# print(to_list)
missing_imp = np.zeros((1, len(to_list)))
# print(missing_imp)
# print(tt)
final_np = np.append(tt, missing_imp)
return final_np
```
#### File: joofio/obs-ml-2/streamlit_app.py
```python
import numpy as np
import pandas as pd
import streamlit as st
import plotly.express as px
from sklearn.compose import ColumnTransformer
import matplotlib.pyplot as pl
import collections
import shap
from shap import Explanation
from help_functions import *
from sklearn.multiclass import OneVsRestClassifier
from sklearn.pipeline import Pipeline, make_pipeline
from sklearn.preprocessing import (
FunctionTransformer,
LabelBinarizer,
LabelEncoder,
OneHotEncoder,
OrdinalEncoder,
StandardScaler,
)
from sklearn import preprocessing
from lightgbm import LGBMClassifier
import json
import joblib
from sklearn.metrics import (
plot_confusion_matrix,roc_curve, auc,
roc_auc_score,
)
st.set_page_config(layout="wide")
st.set_option('deprecation.showPyplotGlobalUse', False)
reversed_label_encoder={1: 'Vaginal', 0: 'Cesariana'}
od = collections.OrderedDict(sorted(reversed_label_encoder.items()))
with open('051_prod_feature_data.json') as json_file:
columns = json.load(json_file)
THRESHOLD_DEF=0.70
prc=pd.read_csv("prc.csv")
tpr=pd.read_csv("tpr.csv",index_col=[0])
auc_data=pd.read_csv("auc.csv")
#print(od)
row={}
pred_cols=[]
total_cols=len(columns)
#print(total_cols//6)
#print(total_cols%6)
cols_dicts={}
#print(columns)
keys=list(columns.keys())
COL_VALUE=6
st.header("Delivery Type Prediction")
st.markdown("""Please select the options for the model to predict the delivery type. Click the button in the sidebar to start prediction""")
for i in range(0,total_cols,COL_VALUE):
cols_dicts["col"+str(i)+"0"],cols_dicts["col"+str(i)+"1"],cols_dicts["col"+str(i)+"2"],cols_dicts["col"+str(i)+"3"],cols_dicts["col"+str(i)+"4"],cols_dicts["col"+str(i)+"5"]=st.columns(COL_VALUE)
for j in range(0,COL_VALUE):
# print(i,j)
if (i+j)>=total_cols:
break
col=keys[i+j]
label=columns[col][2]
# print(col)
value_col=columns[col]
# print(value_col)
ncol=" ".join([c.capitalize() if c!="IMC" else c for c in label.split("_") ])
options=[str(cols).replace("nan","Desconhecido") for cols in value_col[1]]
if value_col[0] in["cat","ord"]:
if options==["0","1"]:
# print(options)
options=["Não","Sim"]
row[col]=[cols_dicts["col"+str(i)+str(j)].selectbox(ncol, options,key=col)]
#
if value_col[0] in["int"]:
max_val=value_col[1][1]
step=1.0
if max_val>1000:
step=100.0
row[col]=[cols_dicts["col"+str(i)+str(j)].number_input(ncol,min_value=value_col[1][0],max_value=max_val,value=value_col[1][2],step=step,key=col)]
filename = '051_prod_lgbm.sav'
loaded_model = joblib.load(filename)
filename = '051_prod_pipeline.sav'
pipeline = joblib.load(filename)
filename = '051_prod_explainer.sav'
explainer = joblib.load(filename)
def predict_with_threshold(x,threshold=0.9):
print(x)
if x>=threshold:
return 1
else:
return 0
def create_outcome(le,arr):
outcome_dict={}
for idx,class_ in le.items():
# print(idx,class_,arr)
# print(arr[0][idx])
outcome_dict[class_]=[str(round(arr[0][idx]*100,2)) +" %"]
outcome_dict["Threshold"]=THRESHOLD
return pd.DataFrame.from_dict(outcome_dict)
make_prediction=st.sidebar.button('Make Prediction')
explaining=st.sidebar.button('Make Prediction with Shap Values')
THRESHOLD=st.sidebar.slider("Threshold for prediction", min_value=0.0, max_value=1.0, value=THRESHOLD_DEF, step=0.01)
with st.expander("Thresholds Definition"):
vcol1, vcol2,vcol3= st.columns(3)
# print(prc.head())
fig = px.area(prc,
x="recall", y="precision",
title=f'Precision-Recall Curve',
labels=dict(x='Recall', y='Precision'),hover_data=["thresholds"],
width=700, height=500
)
fig.add_shape(
type='line', line=dict(dash='dash'),
x0=0, x1=1, y0=1, y1=0
)
fig.update_yaxes(scaleanchor="x", scaleratio=1)
fig.update_xaxes(constrain='domain')
#fig.show()
vcol1.plotly_chart(fig, use_container_width=True)
fig_thresh = px.line(
tpr, title='TPR and FPR at every threshold',
width=700, height=500
)
fig_thresh.update_yaxes(scaleanchor="x", scaleratio=1)
fig_thresh.update_xaxes(range=[0, 1], constrain='domain')
vcol2.plotly_chart(fig_thresh, use_container_width=True)
fpr=auc_data["fpr"].values
tpr=auc_data["tpr"].values
fig_auc= px.area(auc_data,
x="fpr", y="tpr",
title=f'ROC Curve (AUC={auc(fpr, tpr):.4f})',
labels=dict(x='False Positive Rate', y='True Positive Rate'),hover_data=["thresholds"],
width=700, height=500,#hover_data=thresholds
)
fig_auc.add_shape(
type='line', line=dict(dash='dash'),
x0=0, x1=1, y0=0, y1=1
)
fig_auc.update_yaxes(scaleanchor="x", scaleratio=1)
fig_auc.update_xaxes(constrain='domain')
vcol3.plotly_chart(fig_auc, use_container_width=True)
def streamlit_predict(row):
df=pd.DataFrame.from_dict(row)
st.write('Predicting for')
st.dataframe(df)
X=pipeline.transform(df.replace("Desconhecido","nan"))
df1=transfrom_array_to_df_onehot(pipeline,X,onehot=False,overal_imp=True)
pred_proba=loaded_model.predict_proba(X)
pred=predict_with_threshold(pred_proba[0][1],THRESHOLD)
st.markdown("### The prediction is: ")
st.write(reversed_label_encoder[pred])
st.dataframe(create_outcome(od,pred_proba))
return df1,X,pred,pred_proba
if make_prediction:
streamlit_predict(row)
if explaining:
df1,X,pred,pred_proba=streamlit_predict(row)
st.write('Explaining using SHAP values...')
shap_values = explainer.shap_values(X,check_additivity=False)
#Now we can plot relevant plots that will help us analyze the model.
st.subheader("Summary Plot")
shap.summary_plot(shap_values, X, plot_type="bar", class_names= ["Cesariana","Vaginal"], feature_names = df1.columns)
st.pyplot(bbox_inches='tight',dpi=300,pad_inches=0)
pl.clf()
st.subheader("Force Plot")
shap.force_plot(explainer.expected_value[pred[0]], shap_values[pred[0]],df1,matplotlib=True,show=False,figsize=(30,10))
st.pyplot(bbox_inches='tight',dpi=300,pad_inches=0)
pl.clf()
#https://github.com/sgoede/streamlit-boston-app/blob/master/boston_xgb_app.py
``` |
{
"source": "joofio/streamlit-drug-pred",
"score": 3
} |
#### File: joofio/streamlit-drug-pred/streamlit_app.py
```python
import numpy as np
import pandas as pd
import streamlit as st
import pickle
from PIL import Image
from rdkit import Chem
from rdkit.Chem import Descriptors
from rdkit.Chem import Draw
import math
######################
# Custom function
######################
## Calculate molecular descriptors
def AromaticProportion(m):
aromatic_atoms = [m.GetAtomWithIdx(i).GetIsAromatic() for i in range(m.GetNumAtoms())]
aa_count = []
for i in aromatic_atoms:
if i==True:
aa_count.append(1)
AromaticAtom = sum(aa_count)
HeavyAtom = Descriptors.HeavyAtomCount(m)
AR = AromaticAtom/HeavyAtom
return AR
def generate_x(smiles, verbose=False):
moldata= []
for elem in smiles:
mol=Chem.MolFromSmiles(elem)
moldata.append(mol)
desc_MolLogP=[]
desc_MolWt=[]
desc_NumRotatableBonds=[]
desc_AromaticProportion=[]
#print("len:",len(moldata))
for mol in moldata:
desc_MolLogP.append(Descriptors.MolLogP(mol))
desc_MolWt.append(Descriptors.MolWt(mol))
desc_NumRotatableBonds.append(Descriptors.NumRotatableBonds(mol))
desc_AromaticProportion.append(AromaticProportion(mol))
f={"MolLogP":desc_MolLogP,"MolWt":desc_MolWt,"NumRotatableBonds":desc_NumRotatableBonds,"AromaticProportion":desc_AromaticProportion}
descriptors = pd.DataFrame.from_dict(f)
return descriptors
def LogS_to_mg_ml(logs,mw):
"""LogS is directly related to the water solubility of a drug and it is defined as a common solubility
unit corresponding to the 10-based logarithm of the solubility of a molecule measured in mol/L.
The solubility and logS of a drug can be divided in:"""
mol=10**logs
# print(mw)
return str(round(mol/mw*1000,3))+" mg/ml"
#1 g ----180
# x g ----1
def mg_ml_to_logS(sol,mw,sol_unit):
"""LogS is directly related to the water solubility of a drug and it is defined as a common solubility
unit corresponding to the 10-based logarithm of the solubility of a molecule measured in mol/L.
The solubility and logS of a drug can be divided in:"""
# less than 1 mg/mL at 73° F
mw=180.1590
#so mw is g/mol
#1 g --- 180
#1 mg --- X
mol=sol/mw
LogS=math.log10(mol)
return LogS
def create_sum(logs,mws):
f=[]
for l,m in zip(logs,mws):
#print(l,m)
f.append(LogS_to_mg_ml(l,m))
return f
######################
# Page Title
######################
st.write("""
# Molecular Solubility Prediction App
This app predicts the **Solubility (LogS)** values of molecules!
Data obtained from the <NAME>. [ESOL: Estimating Aqueous Solubility Directly from Molecular Structure](https://pubs.acs.org/doi/10.1021/ci034243x). ***J. Chem. Inf. Comput. Sci.*** 2004, 44, 3, 1000-1005.
***
""")
######################
# Input molecules (Side Panel)
######################
st.sidebar.header('User Input Features')
## Read SMILES input
SMILES_input = "NCCCC\nCCC\nCN"
SMILES = st.sidebar.text_area("SMILES input (separate different by new line)", SMILES_input)
#SMILES = "C\n" + SMILES #Adds C as a dummy, first item
SMILES = SMILES.split('\n')
print(SMILES)
st.header('Input SMILES')
#SMILES[1:] # Skips the dummy first item
## Calculate molecular descriptors
st.header('Computed molecular descriptors')
X = generate_x(SMILES)
X
#X[1:] # Skips the dummy first item
######################
# Pre-built model
######################
# Reads in saved model
load_model = pickle.load(open('solubility_model.pkl', 'rb'))
# Apply model to make predictions
prediction = load_model.predict(X)
final_df=pd.DataFrame({"LogS":prediction})
final_df["solubility"]=create_sum(prediction,X.iloc[:,1])
st.header('Predicted Solubility')
#prediction[1:] # Skips the dummy first item
st.dataframe(final_df)
m = Chem.MolFromSmiles(SMILES[0])
im=Draw.MolToImage(m)
st.header('First SMILES Product Visualized')
st.image(im)
``` |
{
"source": "joofio/unitmeasure",
"score": 2
} |
#### File: joofio/unitmeasure/cng_um_bd.py
```python
import requests
import xmltodict
import json
import cx_Oracle
import request_soap
import variables as vb
def getCNGum():
# ##################################### CONNECTION #######################
connection = cx_Oracle.connect(vb.connection)
cursor = connection.cursor()
# ####################################get all ids CNG############################################
prod_pageSize = vb.g_prod_pageSize
prod_pageNumber = vb.g_prod_pageNumber
request1 = request_soap.CNG(1, 1)
encoded_request = request1.encode('utf-8')
head = {"Host": vb.host,
"Content-Type": "text/xml; charset=UTF-8",
"Content-Length": str(len(encoded_request))}
resp = requests.post(url=vb.vidal_service + "/merlin-service/services/CommonNameGroupService",
headers=head,
data=encoded_request,
verify=False)
ans = json.dumps(xmltodict.parse(resp.text))
ans = json.loads(ans)
TotalIds = int(ans['soap:Envelope']['soap:Body']['ns1:getAllCommonNameGroupFullsResponse'][
'ns1:pagedResultCommonNameGroupFull']['rowCount']['#text'])
totalPaginas = TotalIds / prod_pageSize + 1
ids = []
while prod_pageNumber < totalPaginas:
i = 0
request2 = request_soap.CNG(prod_pageSize, prod_pageNumber)
encoded_request = request2.encode('utf-8')
head = {"Host": vb.host,
"Content-Type": "text/xml; charset=UTF-8",
"Content-Length": str(len(encoded_request))}
resp = requests.post(url=vb.vidal_service + "/merlin-service/services/CommonNameGroupService",
headers=head,
data=encoded_request,
verify=False)
ans = json.dumps(xmltodict.parse(resp.text))
ans = json.loads(ans)
h1 = ans['soap:Envelope']['soap:Body'][
'ns1:getAllCommonNameGroupFullsResponse']['ns1:pagedResultCommonNameGroupFull']
while i < prod_pageSize:
ids.append(h1['result']['commonNameGroupFull']
[i]['commonNameGroup']['id'])
i += 1
prod_pageNumber += 1
# ############################################# DOSE UNITS ###############
for id in ids:
request = request_soap.unit('DoseUnit', 'CommonNameGroup', id)
encoded_request = request.encode('utf-8')
headers = {"Host": vb.host,
"Content-Type": "text/xml; charset=UTF-8",
"Content-Length": str(len(encoded_request))}
response = requests.post(url=vb.vidal_service + "/merlin-service/services/PosologyService",
headers=headers,
data=encoded_request,
verify=False)
doc1 = json.dumps(xmltodict.parse(response.text))
doc = json.loads(doc1)
try:
cngdose = doc['soap:Envelope']['soap:Body'][
'ns1:searchDoseUnitByCommonNameGroupIdResponse']['ns1:doseUnitList']['doseUnits']
i = 0
while i <= len(cngdose['doseUnit']):
try:
UnitId = str(vb.g_market) + '0' + str(vb.g_id_standard) + \
cngdose['doseUnit'][i]['id'].rjust(9, '0')
id_product = vb.prefix_level0 + id
insert_stat = "insert into lnk_product_um" + vb.suffix + "(ID_PRODUCT,ID_PRODUCT_SUPPLIER,ID_UNIT_MEASURE,ID_UNIT_MEASURE_CONTEXT,FLG_DEFAULT,FLG_FRACTIONABLE,FLG_STD) values ('" + \
id_product + "','" + vb.g_supplier + "'," + UnitId + ",1,'N','N','Y')"
cursor.execute(insert_stat)
i += 1
except KeyError:
UnitId = str(vb.g_market) + '0' + str(vb.g_id_standard) + \
cngdose['doseUnit']['id'].rjust(9, '0')
id_product = vb.prefix_level0 + id
insert_stat = "insert into lnk_product_um" + vb.suffix + "(ID_PRODUCT,ID_PRODUCT_SUPPLIER,ID_UNIT_MEASURE,ID_UNIT_MEASURE_CONTEXT,FLG_DEFAULT,FLG_FRACTIONABLE,FLG_STD) values ('" + \
id_product + "','" + vb.g_supplier + "'," + UnitId + ",1,'N','N','Y')"
cursor.execute(insert_stat)
i = len(cngdose['doseUnit']) + 1
except IndexError:
i = len(cngdose['doseUnit']) + 1
except KeyError:
pass
except TypeError:
pass
# ############################################### POSOLOGY UNITS #########
for id in ids:
request = request_soap.unit('PosologyUnit', 'CommonNameGroup', id)
encoded_request = request.encode('utf-8')
headers = {"Host": vb.host,
"Content-Type": "text/xml; charset=UTF-8",
"Content-Length": str(len(encoded_request))}
response = requests.post(url=vb.vidal_service + "/merlin-service/services/PosologyService",
headers=headers,
data=encoded_request,
verify=False)
doc1 = json.dumps(xmltodict.parse(response.text))
doc = json.loads(doc1)
try:
cngposol = doc['soap:Envelope']['soap:Body']['ns1:searchPosologyUnitByCommonNameGroupIdResponse'][
'ns1:posologyUnitList']['posologyUnits']
i = 0
while i <= len(cngposol['posologyUnit']):
try:
UnitId = str(vb.g_market) + '0' + str(vb.g_id_standard) + \
cngposol['posologyUnit'][i]['id'].rjust(9, '0')
id_product = vb.prefix_level0 + id
insert_stat = "insert into lnk_product_um" + vb.suffix + "(ID_PRODUCT,ID_PRODUCT_SUPPLIER,ID_UNIT_MEASURE,ID_UNIT_MEASURE_CONTEXT,FLG_DEFAULT,FLG_FRACTIONABLE,FLG_STD) values ('" + \
id_product + "','" + vb.g_supplier + "'," + UnitId + ",2,'N','N','Y')"
cursor.execute(insert_stat)
i += 1
except KeyError:
UnitId = str(vb.g_market) + '0' + str(vb.g_id_standard) + \
cngposol['posologyUnit']['id'].rjust(9, '0')
id_product = vb.prefix_level0 + id
insert_stat = "insert into lnk_product_um" + vb.suffix + "(ID_PRODUCT,ID_PRODUCT_SUPPLIER,ID_UNIT_MEASURE,ID_UNIT_MEASURE_CONTEXT,FLG_DEFAULT,FLG_FRACTIONABLE,FLG_STD) values ('" + \
id_product + "','" + vb.g_supplier + "'," + UnitId + ",2,'N','N','Y')"
cursor.execute(insert_stat)
i = len(cngposol['posologyUnit']) + 1
except IndexError:
i = len(cngposol['posologyUnit']) + 1
except KeyError:
pass
connection.commit()
connection.close()
print ("ALL CNG processed")
``` |
{
"source": "joog-lim/Account-API",
"score": 2
} |
#### File: joog-lim/Account-API/handler.py
```python
import json
from middleware.token import TOKEN_MANAGE
from middleware.mongo import DB_CONNECT
from model.emoji import EmojiModel
from model.token import TokenModel
from model.user import UserModel, UserRegistObject
from util.student import get_generation_from_email, get_is_student_from_email
from util.auth import auth_by_google_token
from util.serverless import createRes, createErrorRes
def hello(event, __):
body = {
"message": "Go Serverless v1.0! Your function executed successfully!",
"input": event,
}
print(1)
return createRes(header={"Content-Type": "application/json"}, body=body)
@DB_CONNECT()
def logout(event, _, DB):
db = DB
token: str = event["headers"]["Authorization"]
if TokenModel(db).delete_by_token(token):
return createRes(header={}, body={})
else:
return createErrorRes(header={}, body={"message": "권한이 없습니다."}, statusCode=401)
@DB_CONNECT()
def login_or_regist(event, _, DB):
decode_token = auth_by_google_token(event["headers"]["Authorization"])
sub: str = decode_token.get("sub")
if sub is None:
return createErrorRes(
header={"Content-Type": "application/json"}, message=decode_token["error"]
)
db = DB
user_collect = UserModel(db)
token_collect = TokenModel(db)
if user_collect.has_account(sub): # 계정 있는지 확인
token: str = token_collect.add(sub) # 있다면 바로 토큰 발급
else: # 없다면 회원가입 진행
email: str = decode_token.get("email")
name: str = decode_token.get("name")
is_student: bool = get_is_student_from_email(email)
if is_student:
generation: int = get_generation_from_email(email)
else:
generation: int = 0
regist_value: UserRegistObject = UserRegistObject(
sub=sub,
email=email,
name=name,
generation=generation,
is_student=is_student,
)
user_collect.register(regist_value)
token: str = token_collect.add(sub)
return createRes(
header={
"Set-Cookie": f"token={token}; Secure; HttpOnly; Domain=server.joog-lim.info; Path=/;"
},
body={"token": token},
)
@DB_CONNECT()
@TOKEN_MANAGE()
def add_emoji(event, _, DB, TOKEN, sub):
emoji: str = event["pathParameters"]["emoji"]
algorithem_num: int = json.loads(event["body"])["num"]
emoji_collect = EmojiModel(DB)
if emoji not in EmojiModel.reaction_list:
return createErrorRes(header={}, message="Bad Request", statusCode=400)
if emoji_collect.add(sub, algorithem_num=algorithem_num, reaction=emoji):
return createRes(
header={
"Set-Cookie": f"token={TOKEN}; Secure; HttpOnly; Domain=joog-lim.info; Path=/;"
},
body={"message": "success"},
)
else:
return createErrorRes(
header={}, message="Already been processed", statusCode=418
)
@DB_CONNECT()
@TOKEN_MANAGE()
def remove_emoji(event, _, DB, TOKEN, sub):
emoji: str = event["pathParameters"]["emoji"]
algorithem_num: int = json.loads(event["body"])["num"]
emoji_collect = EmojiModel(DB)
if emoji not in EmojiModel.reaction_list:
return createErrorRes(header={}, message="Bad Request", statusCode=400)
if emoji_collect.remove(sub, algorithem_num=algorithem_num, reaction=emoji):
return createRes(
header={
"Set-Cookie": f"token={TOKEN}; Secure; HttpOnly; Domain=joog-lim.info; Path=/;"
},
body={"message": "success"},
)
else:
return createErrorRes(
header={}, message="Already been processed", statusCode=418
)
@DB_CONNECT()
def join_emoji(event, _, DB):
algorithem_num: int = int(event["queryStringParameters"]["num"])
emoji = EmojiModel(DB).join_emoji(algorithem_num)
return createRes(header={}, body={i["_id"]: i["count"] for i in emoji})
```
#### File: Account-API/middleware/token.py
```python
from pytz import timezone
from util.serverless import createErrorRes
from model.token import TokenModel
from config import service_timezone
def TOKEN_MANAGE():
def decorator(func):
def wrap(*args, **kwargs):
print(args[0])
if not args[0]["headers"].get("Authorization"):
return createErrorRes(header={}, message="Authorization 헤더를 찾을수가 없습니다.")
token: str = args[0]["headers"]["Authorization"]
token_model = TokenModel(kwargs["DB"])
token_inform = token_model.find(token)
if not token_inform:
return createErrorRes(header={}, message="인가되지않은 토큰입니다.")
expired_at = token_inform["expired_at"].replace(tzinfo=service_timezone)
if token_model.now > expired_at:
token_model.delete(token)
return createErrorRes(header={}, message="토큰이 만료되었습니다.")
renew_able_at = token_inform["renew_able_at"].replace(
tzinfo=service_timezone
)
sub = token_model.decode_token(token)
if renew_able_at < token_model.now:
token_model.delete(token)
new_token: str = token_model.add(sub)
kwargs["TOKEN"] = new_token
else:
kwargs["TOKEN"] = token
kwargs["sub"] = sub
result = func(*args, **kwargs)
return result
return wrap
return decorator
``` |
{
"source": "JoogsWasTaken/no-noscript",
"score": 3
} |
#### File: data/scripts/util.py
```python
import os
import sys
results_columns = {
"url": 0,
"noscript": 1,
"scripts": 2,
"js_on_median_load": 3,
"js_on_median_domload": 4,
"js_on_median_idle": 5,
"js_off_median_load": 6,
"js_off_median_domload": 7,
"js_off_median_idle": 8
}
benchmark_columns = {
"url": 0,
"timestamp": 1,
"jsEnabled": 2,
"scriptCount": 3,
"noscript": 4,
"dataFileName": 5
}
def try_remove(lst, item):
"""
Tries to remove the specified item from a list and
silently fails if the item doesn't exist.
"""
try:
lst.remove(item)
except Exception:
pass
def get_paths():
"""
Gets the output path from the command line and formats
the paths to the respective subdirectories.
"""
if len(sys.argv) < 2:
print("usage: {} outputdir [args...]".format(sys.argv[0]))
exit()
base_dir_path = sys.argv[1]
return (os.path.join(base_dir_path, "benchmark.csv"),
os.path.join(base_dir_path, "metrics"),
os.path.join(base_dir_path, "noscript"),
os.path.join(base_dir_path, "screenshots"))
def as_bool(x):
"""
Returns True if the given string in lowercase equals "true",
False otherwise.
"""
return True if x.lower() == "true" else False
def append_to_filename(path, suffix):
"""
Appends a suffix to a filename, e.g. append_to_filename("test.csv", "_2")
will return "test_2.csv".
"""
name, ext = os.path.splitext(path)
return name + suffix + ext
def parse_csv_line(line):
"""
Splits a line of comma seperated values into a list of
values. Removes leading and trailing quotes if there are
any.
"""
parsed_line = []
for x in line.split(","):
if x[0] == "\"" and x[-1] == "\"":
x = x[1:-1]
parsed_line.append(x.strip())
return parsed_line
``` |
{
"source": "Joohansson/nano-address-notifier",
"score": 2
} |
#### File: Joohansson/nano-address-notifier/notify.py
```python
import os
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import *
import logging
import json
import simplejson
import websockets
import datetime
import asyncio
import time
import numpy as np
from pathlib import Path
# CONFIG THESE
enableEmail = False # if False, it will only log to file
fromEmail = '<EMAIL>' # approved sender in sendgrid '<EMAIL>'
toEmails = ['<EMAIL>'] # comma separated list ['<EMAIL>','<EMAIL>']
sendgridAPIKey = 'xxx' # sendgrid account API key
ws_host = 'wss://socket.nanos.cc'
# New transaction will be emailed directly if this many seconds has passed since last email
# Transactions will be bulk sent in one email if they arrive faster
emailBufferLength = 3600
# Nano amount below this will not be tracked
minAmount = 0.0000001
# Set to True if tracking thousands of accounts, or subscription will fail. Subscribing to all will still work
# but increases the web traffic for both client and server. However NOT available on socket.nanos.cc.
subscribeAll = False
# INPUT AND OUTPUT
accountFile = 'accounts.json' # input accounts to track
logFile = 'events.log'
# CODE - Don't touch
filename = Path(logFile)
filename.touch(exist_ok=True)
logging.basicConfig(level=logging.INFO,filename=logFile, filemode='a+', format='%(name)s - %(levelname)s - %(message)s')
log = logging.getLogger(__name__)
statData = []
accounts = {'account':{}}
accountsIds = []
emailBuffer = []
emailBufferAmounts = {'send': 0, 'receive': 0}
lastEmail = 0
nano = 1000000000000000000000000000000
def timeLog(msg):
return str(datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')) + ": " + msg
emails = []
for address in toEmails:
emails.append(To(address))
def format_float(num):
return np.format_float_positional(num, trim='-')
def sendMail(body):
message = Mail(
from_email=fromEmail,
to_emails=emails,
subject='Nano Address Notifier: Transaction events',
html_content='<strong>Transactions occured:</strong><br><br>'+body)
try:
sg = SendGridAPIClient(sendgridAPIKey)
response = sg.send(message)
if response.status_code != 202:
log.error(timeLog('Failed to send email. Status code: ' + response.status_code))
log.error(timeLog(response.body))
#print(response.status_code)
#print(response.body)
#print(response.headers)
except Exception as e:
log.error(e)
def trackAccounts():
global accounts
# read account file
try:
with open(accountFile) as json_file:
inputJson = json.load(json_file)
# only keep stats from past week
for a in inputJson:
if 'account' in a:
alias = 'N/A'
if 'alias' in a:
alias = str(a['alias'])
accounts[a['account']] = {'alias': alias}
accountsIds.append(a['account'])
except Exception as e:
log.error(timeLog('Could not read account data. Error: %r' %e))
async def connectWebsocket(init = True):
global accounts
global accountsIds
global emailBuffer
global emailBufferAmounts
if init:
trackAccounts()
# Predefined subscription message
msg = {
"action": "subscribe",
"topic": "confirmation",
"ack": "true",
"id": "12345"
}
if not subscribeAll:
msg['options'] = {
"accounts": accountsIds
}
try:
async with websockets.connect(ws_host) as websocket:
log.info(timeLog('Subscribing to websocket and waiting for acknowledge..'))
await websocket.send(json.dumps(msg))
while 1:
try:
rec = json.loads(await websocket.recv())
if 'ack' in rec:
log.info(timeLog('Subscription acknowledged! Waiting for transactions..'))
if 'topic' in rec and rec['topic'] == 'confirmation':
message = rec['message']
text = 'Unknown block'
amount = '0'
okAmount = False
okBlock = False
href = '<a href="https://nanolooker.com/account/'
if int(message['amount']) > 0:
amount = format_float(int(message['amount']) / nano)
if float(amount) >= minAmount:
okAmount = True
## send, receive or change block
if message['block']['account'] in accountsIds:
account = message['account']
textLog = 'Account ' + accounts[account]['alias'] + ' (' + account + ')'
text = 'Account <strong>' + accounts[account]['alias'] + '</strong> (' + href + account + '">' + account + '</a>)'
# send block
if message['block']['subtype'] == 'send':
text = text + ' sent ' + amount + ' NANO to ' + href + message['block']['link_as_account'] + '">' + message['block']['link_as_account'] + '</a>'
textLog = textLog + ' sent ' + amount + ' NANO to ' + message['block']['link_as_account']
okBlock = True
emailBufferAmounts['send'] = emailBufferAmounts['send'] + float(amount)
# receive block
elif message['block']['subtype'] == 'receive':
add = ' received ' + amount + ' NANO'
text = text + add
textLog = textLog + add
okBlock = True
emailBufferAmounts['receive'] = emailBufferAmounts['receive'] + float(amount)
# change block
elif message['block']['subtype'] == 'change':
text = text + ' changed rep to ' + href + message['block']['representative'] + '">' + message['block']['representative'] + '</a>'
textLog = textLog + ' changed rep to ' + message['block']['representative']
okAmount = True
okBlock = True
## incoming block
elif message['block']['link_as_account'] in accounts:
account = message['block']['link_as_account']
textLog = 'Account ' + accounts[account]['alias'] + ' (' + account + ')'
text = 'Account <strong>' + accounts[account]['alias'] + '</strong> (' + href + account + '">' + account + '</a>)'
# incoming block
if message['block']['subtype'] == 'send':
text = text + ' got incoming ' + amount + ' NANO from ' + href + message['block']['account'] + '">' + message['block']['account'] + '</a>'
textLog = textLog + ' got incoming ' + amount + ' NANO from ' + message['block']['account']
okBlock = True
if okBlock and okAmount:
log.info(timeLog(textLog))
emailBuffer.append(text)
except Exception as e:
log.error(timeLog('Error: %r' %e))
await asyncio.sleep(5)
break
await connectWebsocket(False)
except Exception as e:
log.error(timeLog('Websocket connection error. Error: %r' %e))
# wait 5sec and reconnect
await asyncio.sleep(5)
await connectWebsocket(False)
async def emailer():
global emailBuffer
global lastEmail
global emailBufferAmounts
if not enableEmail:
return
while 1:
try:
await asyncio.sleep(1)
if len(emailBuffer) > 0 and int(time.time()) > lastEmail + emailBufferLength:
body = ''
for text in emailBuffer:
body = body + str(datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')) + " (UTC): " + text + '<br><br>'
body = body + '<strong>Total Amounts: </strong>SENT: ' + format_float(emailBufferAmounts['send']) + ' | RECEIVED: ' + format_float(emailBufferAmounts['receive'])
# reset buffers
emailBuffer = []
emailBufferAmounts = {'send': 0, 'receive': 0}
log.info(timeLog('Sending email'))
sendMail(body)
lastEmail = int(time.time())
except Exception as e:
log.error(timeLog('Failed to send email'))
log.error(timeLog('Error: %r' %e))
try:
loop = asyncio.get_event_loop()
futures = [connectWebsocket(), emailer()]
loop.run_until_complete(asyncio.wait(futures))
except KeyboardInterrupt:
pass
``` |
{
"source": "jooh/bindit",
"score": 3
} |
#### File: bindit/bindit/shell.py
```python
import sys
import subprocess
import shlex
def run(*arg, interactive=False):
"""subprocess.run wrapper to handle exceptions, writing to stdout/stderr or not."""
stdout = subprocess.PIPE
stderr = subprocess.PIPE
if interactive:
stdout = None
stderr = None
try:
ret = subprocess.run(
arg, stdout=stdout, stderr=stderr, check=True, shell=False, encoding="utf-8"
)
except subprocess.CalledProcessError as ret:
print(f"command line exception with args: {arg}")
if not interactive:
sys.stdout.write(ret.stdout)
sys.stderr.write(ret.stderr)
sys.exit(ret.returncode)
except BaseException:
raise
return ret
def join_and_quote(arg_list):
"""return a string of appropriately quoted and escaped arguments from list."""
# need to cast to str because join chokes on pathlib.Path as of python 3.6
# and shlex to get quotes on args with spaces (and escape any nested quotes)
return " ".join([shlex.quote(str(this_arg)) for this_arg in arg_list])
``` |
{
"source": "joohoi/focli",
"score": 3
} |
#### File: focli/focli/exceptions.py
```python
class FoliException(Exception):
""" Base exception """
def __init__(self, message):
self.message = message
class FoliStopNameException(FoliException):
""" Stop name error """
class FoliServerException(FoliException):
""" Stop name error """
class FoliParseDataError(FoliException):
""" Error parsing data """
class FoliTerminalException(FoliException):
""" Error getting terminal info """
```
#### File: focli/focli/__main__.py
```python
from __future__ import absolute_import
"""Command entry point"""
import sys
from focli import focli
def main():
return focli.main()
if __name__ == '__main__':
sys.exit(main())
``` |
{
"source": "joohongpark/RustPython",
"score": 3
} |
#### File: Lib/test/test_base64.py
```python
import unittest
from test import support
import base64
import binascii
import os
from array import array
from test.support import script_helper, os_helper
class LegacyBase64TestCase(unittest.TestCase):
# Legacy API is not as permissive as the modern API
def check_type_errors(self, f):
self.assertRaises(TypeError, f, "")
self.assertRaises(TypeError, f, [])
multidimensional = memoryview(b"1234").cast('B', (2, 2))
self.assertRaises(TypeError, f, multidimensional)
int_data = memoryview(b"1234").cast('I')
self.assertRaises(TypeError, f, int_data)
def test_encodestring_warns(self):
with self.assertWarns(DeprecationWarning):
base64.encodestring(b"www.python.org")
def test_decodestring_warns(self):
with self.assertWarns(DeprecationWarning):
base64.decodestring(b"d3d3LnB5dGhvbi5vcmc=\n")
def test_encodebytes(self):
eq = self.assertEqual
eq(base64.encodebytes(b"www.python.org"), b"d3d3LnB5dGhvbi5vcmc=\n")
eq(base64.encodebytes(b"a"), b"YQ==\n")
eq(base64.encodebytes(b"ab"), b"YWI=\n")
eq(base64.encodebytes(b"abc"), b"YWJj\n")
eq(base64.encodebytes(b""), b"")
eq(base64.encodebytes(b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}"),
b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n")
# Non-bytes
eq(base64.encodebytes(bytearray(b'abc')), b'YWJj\n')
eq(base64.encodebytes(memoryview(b'abc')), b'YWJj\n')
eq(base64.encodebytes(array('B', b'abc')), b'YWJj\n')
self.check_type_errors(base64.encodebytes)
def test_decodebytes(self):
eq = self.assertEqual
eq(base64.decodebytes(b"d3d3LnB5dGhvbi5vcmc=\n"), b"www.python.org")
eq(base64.decodebytes(b"YQ==\n"), b"a")
eq(base64.decodebytes(b"YWI=\n"), b"ab")
eq(base64.decodebytes(b"YWJj\n"), b"abc")
eq(base64.decodebytes(b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n"),
b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}")
eq(base64.decodebytes(b''), b'')
# Non-bytes
eq(base64.decodebytes(bytearray(b'YWJj\n')), b'abc')
eq(base64.decodebytes(memoryview(b'YWJj\n')), b'abc')
eq(base64.decodebytes(array('B', b'YWJj\n')), b'abc')
self.check_type_errors(base64.decodebytes)
def test_encode(self):
eq = self.assertEqual
from io import BytesIO, StringIO
infp = BytesIO(b'abcdefghijklmnopqrstuvwxyz'
b'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
b'0123456789!@#0^&*();:<>,. []{}')
outfp = BytesIO()
base64.encode(infp, outfp)
eq(outfp.getvalue(),
b'YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE'
b'RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT'
b'Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==\n')
# Non-binary files
self.assertRaises(TypeError, base64.encode, StringIO('abc'), BytesIO())
self.assertRaises(TypeError, base64.encode, BytesIO(b'abc'), StringIO())
self.assertRaises(TypeError, base64.encode, StringIO('abc'), StringIO())
def test_decode(self):
from io import BytesIO, StringIO
infp = BytesIO(b'd3d3LnB5dGhvbi5vcmc=')
outfp = BytesIO()
base64.decode(infp, outfp)
self.assertEqual(outfp.getvalue(), b'www.python.org')
# Non-binary files
self.assertRaises(TypeError, base64.encode, StringIO('YWJj\n'), BytesIO())
self.assertRaises(TypeError, base64.encode, BytesIO(b'YWJj\n'), StringIO())
self.assertRaises(TypeError, base64.encode, StringIO('YWJj\n'), StringIO())
class BaseXYTestCase(unittest.TestCase):
# Modern API completely ignores exported dimension and format data and
# treats any buffer as a stream of bytes
def check_encode_type_errors(self, f):
self.assertRaises(TypeError, f, "")
self.assertRaises(TypeError, f, [])
def check_decode_type_errors(self, f):
self.assertRaises(TypeError, f, [])
def check_other_types(self, f, bytes_data, expected):
eq = self.assertEqual
b = bytearray(bytes_data)
eq(f(b), expected)
# The bytearray wasn't mutated
eq(b, bytes_data)
eq(f(memoryview(bytes_data)), expected)
eq(f(array('B', bytes_data)), expected)
# XXX why is b64encode hardcoded here?
self.check_nonbyte_element_format(base64.b64encode, bytes_data)
self.check_multidimensional(base64.b64encode, bytes_data)
def check_multidimensional(self, f, data):
padding = b"\x00" if len(data) % 2 else b""
bytes_data = data + padding # Make sure cast works
shape = (len(bytes_data) // 2, 2)
multidimensional = memoryview(bytes_data).cast('B', shape)
self.assertEqual(f(multidimensional), f(bytes_data))
def check_nonbyte_element_format(self, f, data):
padding = b"\x00" * ((4 - len(data)) % 4)
bytes_data = data + padding # Make sure cast works
int_data = memoryview(bytes_data).cast('I')
self.assertEqual(f(int_data), f(bytes_data))
def test_b64encode(self):
eq = self.assertEqual
# Test default alphabet
eq(base64.b64encode(b"www.python.org"), b"d3d3LnB5dGhvbi5vcmc=")
eq(base64.b64encode(b'\x00'), b'AA==')
eq(base64.b64encode(b"a"), b"YQ==")
eq(base64.b64encode(b"ab"), b"YWI=")
eq(base64.b64encode(b"abc"), b"YWJj")
eq(base64.b64encode(b""), b"")
eq(base64.b64encode(b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}"),
b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==")
# Test with arbitrary alternative characters
eq(base64.b64encode(b'\xd3V\xbeo\xf7\x1d', altchars=b'*$'), b'01a*b$cd')
eq(base64.b64encode(b'\xd3V\xbeo\xf7\x1d', altchars=bytearray(b'*$')),
b'01a*b$cd')
eq(base64.b64encode(b'\xd3V\xbeo\xf7\x1d', altchars=memoryview(b'*$')),
b'01a*b$cd')
eq(base64.b64encode(b'\xd3V\xbeo\xf7\x1d', altchars=array('B', b'*$')),
b'01a*b$cd')
# Non-bytes
self.check_other_types(base64.b64encode, b'abcd', b'YWJjZA==')
self.check_encode_type_errors(base64.b64encode)
self.assertRaises(TypeError, base64.b64encode, b"", altchars="*$")
# Test standard alphabet
eq(base64.standard_b64encode(b"www.python.org"), b"d3d3LnB5dGhvbi5vcmc=")
eq(base64.standard_b64encode(b"a"), b"YQ==")
eq(base64.standard_b64encode(b"ab"), b"YWI=")
eq(base64.standard_b64encode(b"abc"), b"YWJj")
eq(base64.standard_b64encode(b""), b"")
eq(base64.standard_b64encode(b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}"),
b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0NT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==")
# Non-bytes
self.check_other_types(base64.standard_b64encode,
b'abcd', b'YWJjZA==')
self.check_encode_type_errors(base64.standard_b64encode)
# Test with 'URL safe' alternative characters
eq(base64.urlsafe_b64encode(b'\xd3V\xbeo\xf7\x1d'), b'01a-b_cd')
# Non-bytes
self.check_other_types(base64.urlsafe_b64encode,
b'\xd3V\xbeo\xf7\x1d', b'01a-b_cd')
self.check_encode_type_errors(base64.urlsafe_b64encode)
def test_b64decode(self):
eq = self.assertEqual
tests = {b"d3d3LnB5dGhvbi5vcmc=": b"www.python.org",
b'AA==': b'\x00',
b"YQ==": b"a",
b"YWI=": b"ab",
b"YWJj": b"abc",
b"YWJjZGVmZ2hpamtsbW5vcHFyc3R1dnd4eXpBQkNE"
b"RUZHSElKS0xNTk9QUVJTVFVWV1hZWjAxMjM0\nNT"
b"Y3ODkhQCMwXiYqKCk7Ojw+LC4gW117fQ==":
b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}",
b'': b'',
}
for data, res in tests.items():
eq(base64.b64decode(data), res)
eq(base64.b64decode(data.decode('ascii')), res)
# Non-bytes
self.check_other_types(base64.b64decode, b"YWJj", b"abc")
self.check_decode_type_errors(base64.b64decode)
# Test with arbitrary alternative characters
tests_altchars = {(b'01a*b$cd', b'*$'): b'\xd3V\xbeo\xf7\x1d',
}
for (data, altchars), res in tests_altchars.items():
data_str = data.decode('ascii')
altchars_str = altchars.decode('ascii')
eq(base64.b64decode(data, altchars=altchars), res)
eq(base64.b64decode(data_str, altchars=altchars), res)
eq(base64.b64decode(data, altchars=altchars_str), res)
eq(base64.b64decode(data_str, altchars=altchars_str), res)
# Test standard alphabet
for data, res in tests.items():
eq(base64.standard_b64decode(data), res)
eq(base64.standard_b64decode(data.decode('ascii')), res)
# Non-bytes
self.check_other_types(base64.standard_b64decode, b"YWJj", b"abc")
self.check_decode_type_errors(base64.standard_b64decode)
# Test with 'URL safe' alternative characters
tests_urlsafe = {b'01a-b_cd': b'\xd3V\xbeo\xf7\x1d',
b'': b'',
}
for data, res in tests_urlsafe.items():
eq(base64.urlsafe_b64decode(data), res)
eq(base64.urlsafe_b64decode(data.decode('ascii')), res)
# Non-bytes
self.check_other_types(base64.urlsafe_b64decode, b'01a-b_cd',
b'\xd3V\xbeo\xf7\x1d')
self.check_decode_type_errors(base64.urlsafe_b64decode)
def test_b64decode_padding_error(self):
self.assertRaises(binascii.Error, base64.b64decode, b'abc')
self.assertRaises(binascii.Error, base64.b64decode, 'abc')
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_b64decode_invalid_chars(self):
# issue 1466065: Test some invalid characters.
tests = ((b'%3d==', b'\xdd'),
(b'$3d==', b'\xdd'),
(b'[==', b''),
(b'YW]3=', b'am'),
(b'3{d==', b'\xdd'),
(b'3d}==', b'\xdd'),
(b'@@', b''),
(b'!', b''),
(b"YWJj\n", b"abc"),
(b'YWJj\nYWI=', b'abcab'))
funcs = (
base64.b64decode,
base64.standard_b64decode,
base64.urlsafe_b64decode,
)
for bstr, res in tests:
for func in funcs:
with self.subTest(bstr=bstr, func=func):
self.assertEqual(func(bstr), res)
self.assertEqual(func(bstr.decode('ascii')), res)
with self.assertRaises(binascii.Error):
base64.b64decode(bstr, validate=True)
with self.assertRaises(binascii.Error):
base64.b64decode(bstr.decode('ascii'), validate=True)
# Normal alphabet characters not discarded when alternative given
res = b'\xFB\xEF\xBE\xFF\xFF\xFF'
self.assertEqual(base64.b64decode(b'++[[//]]', b'[]'), res)
self.assertEqual(base64.urlsafe_b64decode(b'++--//__'), res)
def test_b32encode(self):
eq = self.assertEqual
eq(base64.b32encode(b''), b'')
eq(base64.b32encode(b'\x00'), b'AA======')
eq(base64.b32encode(b'a'), b'ME======')
eq(base64.b32encode(b'ab'), b'MFRA====')
eq(base64.b32encode(b'abc'), b'MFRGG===')
eq(base64.b32encode(b'abcd'), b'MFRGGZA=')
eq(base64.b32encode(b'abcde'), b'MFRGGZDF')
# Non-bytes
self.check_other_types(base64.b32encode, b'abcd', b'MFRGGZA=')
self.check_encode_type_errors(base64.b32encode)
def test_b32decode(self):
eq = self.assertEqual
tests = {b'': b'',
b'AA======': b'\x00',
b'ME======': b'a',
b'MFRA====': b'ab',
b'MFRGG===': b'abc',
b'MFRGGZA=': b'abcd',
b'MFRGGZDF': b'abcde',
}
for data, res in tests.items():
eq(base64.b32decode(data), res)
eq(base64.b32decode(data.decode('ascii')), res)
# Non-bytes
self.check_other_types(base64.b32decode, b'MFRGG===', b"abc")
self.check_decode_type_errors(base64.b32decode)
def test_b32decode_casefold(self):
eq = self.assertEqual
tests = {b'': b'',
b'ME======': b'a',
b'MFRA====': b'ab',
b'MFRGG===': b'abc',
b'MFRGGZA=': b'abcd',
b'MFRGGZDF': b'abcde',
# Lower cases
b'me======': b'a',
b'mfra====': b'ab',
b'mfrgg===': b'abc',
b'mfrggza=': b'abcd',
b'mfrggzdf': b'abcde',
}
for data, res in tests.items():
eq(base64.b32decode(data, True), res)
eq(base64.b32decode(data.decode('ascii'), True), res)
self.assertRaises(binascii.Error, base64.b32decode, b'me======')
self.assertRaises(binascii.Error, base64.b32decode, 'me======')
# Mapping zero and one
eq(base64.b32decode(b'MLO23456'), b'b\xdd\xad\xf3\xbe')
eq(base64.b32decode('MLO23456'), b'b\xdd\xad\xf3\xbe')
map_tests = {(b'M1023456', b'L'): b'b\xdd\xad\xf3\xbe',
(b'M1023456', b'I'): b'b\x1d\xad\xf3\xbe',
}
for (data, map01), res in map_tests.items():
data_str = data.decode('ascii')
map01_str = map01.decode('ascii')
eq(base64.b32decode(data, map01=map01), res)
eq(base64.b32decode(data_str, map01=map01), res)
eq(base64.b32decode(data, map01=map01_str), res)
eq(base64.b32decode(data_str, map01=map01_str), res)
self.assertRaises(binascii.Error, base64.b32decode, data)
self.assertRaises(binascii.Error, base64.b32decode, data_str)
def test_b32decode_error(self):
tests = [b'abc', b'ABCDEF==', b'==ABCDEF']
prefixes = [b'M', b'ME', b'MFRA', b'MFRGG', b'MFRGGZA', b'MFRGGZDF']
for i in range(0, 17):
if i:
tests.append(b'='*i)
for prefix in prefixes:
if len(prefix) + i != 8:
tests.append(prefix + b'='*i)
for data in tests:
with self.subTest(data=data):
with self.assertRaises(binascii.Error):
base64.b32decode(data)
with self.assertRaises(binascii.Error):
base64.b32decode(data.decode('ascii'))
def test_b16encode(self):
eq = self.assertEqual
eq(base64.b16encode(b'\x01\x02\xab\xcd\xef'), b'0102ABCDEF')
eq(base64.b16encode(b'\x00'), b'00')
# Non-bytes
self.check_other_types(base64.b16encode, b'\x01\x02\xab\xcd\xef',
b'0102ABCDEF')
self.check_encode_type_errors(base64.b16encode)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_b16decode(self):
eq = self.assertEqual
eq(base64.b16decode(b'0102ABCDEF'), b'\x01\x02\xab\xcd\xef')
eq(base64.b16decode('0102ABCDEF'), b'\x01\x02\xab\xcd\xef')
eq(base64.b16decode(b'00'), b'\x00')
eq(base64.b16decode('00'), b'\x00')
# Lower case is not allowed without a flag
self.assertRaises(binascii.Error, base64.b16decode, b'0102abcdef')
self.assertRaises(binascii.Error, base64.b16decode, '0102abcdef')
# Case fold
eq(base64.b16decode(b'0102abcdef', True), b'\x01\x02\xab\xcd\xef')
eq(base64.b16decode('0102abcdef', True), b'\x01\x02\xab\xcd\xef')
# Non-bytes
self.check_other_types(base64.b16decode, b"0102ABCDEF",
b'\x01\x02\xab\xcd\xef')
self.check_decode_type_errors(base64.b16decode)
eq(base64.b16decode(bytearray(b"0102abcdef"), True),
b'\x01\x02\xab\xcd\xef')
eq(base64.b16decode(memoryview(b"0102abcdef"), True),
b'\x01\x02\xab\xcd\xef')
eq(base64.b16decode(array('B', b"0102abcdef"), True),
b'\x01\x02\xab\xcd\xef')
# Non-alphabet characters
self.assertRaises(binascii.Error, base64.b16decode, '0102AG')
# Incorrect "padding"
self.assertRaises(binascii.Error, base64.b16decode, '010')
def test_a85encode(self):
eq = self.assertEqual
tests = {
b'': b'',
b"www.python.org": b'GB\\6`E-ZP=Df.1GEb>',
bytes(range(255)): b"""!!*-'"9eu7#RLhG$k3[W&.oNg'GVB"(`=52*$$"""
b"""(B+<_pR,UFcb-n-Vr/1iJ-0JP==1c70M3&s#]4?Ykm5X@_(6q'R884cE"""
b"""H9MJ8X:f1+h<)lt#=BSg3>[:ZC?t!MSA7]@cBPD3sCi+'.E,fo>FEMbN"""
b"""G^4U^I!pHnJ:W<)KS>/9Ll%"IN/`jYOHG]iPa.Q$R$jD4S=Q7DTV8*TU"""
b"""nsrdW2ZetXKAY/Yd(L?['d?O\\@K2_]Y2%o^qmn*`5Ta:aN;TJbg"GZd"""
b"""*^:jeCE.%f\\,!5gtgiEi8N\\UjQ5OekiqBum-X60nF?)@o_%qPq"ad`"""
b"""r;HT""",
b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789!@#0^&*();:<>,. []{}":
b'@:E_WAS,RgBkhF"D/O92EH6,BF`qtRH$VbC6UX@47n?3D92&&T'
b":Jand;cHat='/U/0JP==1c70M3&r-I,;<FN.OZ`-3]oSW/g+A(H[P",
b"no padding..": b'DJpY:@:Wn_DJ(RS',
b"zero compression\0\0\0\0": b'H=_,8+Cf>,E,oN2F(oQ1z',
b"zero compression\0\0\0": b'H=_,8+Cf>,E,oN2F(oQ1!!!!',
b"Boundary:\0\0\0\0": b'6>q!aA79M(3WK-[!!',
b"Space compr: ": b';fH/TAKYK$D/aMV+<VdL',
b'\xff': b'rr',
b'\xff'*2: b's8N',
b'\xff'*3: b's8W*',
b'\xff'*4: b's8W-!',
}
for data, res in tests.items():
eq(base64.a85encode(data), res, data)
eq(base64.a85encode(data, adobe=False), res, data)
eq(base64.a85encode(data, adobe=True), b'<~' + res + b'~>', data)
self.check_other_types(base64.a85encode, b"www.python.org",
b'GB\\6`E-ZP=Df.1GEb>')
self.assertRaises(TypeError, base64.a85encode, "")
eq(base64.a85encode(b"www.python.org", wrapcol=7, adobe=False),
b'GB\\6`E-\nZP=Df.1\nGEb>')
eq(base64.a85encode(b"\0\0\0\0www.python.org", wrapcol=7, adobe=False),
b'zGB\\6`E\n-ZP=Df.\n1GEb>')
eq(base64.a85encode(b"www.python.org", wrapcol=7, adobe=True),
b'<~GB\\6`\nE-ZP=Df\n.1GEb>\n~>')
eq(base64.a85encode(b' '*8, foldspaces=True, adobe=False), b'yy')
eq(base64.a85encode(b' '*7, foldspaces=True, adobe=False), b'y+<Vd')
eq(base64.a85encode(b' '*6, foldspaces=True, adobe=False), b'y+<U')
eq(base64.a85encode(b' '*5, foldspaces=True, adobe=False), b'y+9')
def test_b85encode(self):
eq = self.assertEqual
tests = {
b'': b'',
b'www.python.org': b'cXxL#aCvlSZ*DGca%T',
bytes(range(255)): b"""009C61O)~M2nh-c3=Iws5D^j+6crX17#SKH9337X"""
b"""AR!_nBqb&%C@Cr{EG;fCFflSSG&MFiI5|2yJUu=?KtV!7L`6nNNJ&ad"""
b"""OifNtP*GA-R8>}2SXo+ITwPvYU}0ioWMyV&XlZI|Y;A6DaB*^Tbai%j"""
b"""czJqze0_d@fPsR8goTEOh>41ejE#<ukdcy;l$Dm3n3<ZJoSmMZprN9p"""
b"""q@|{(sHv)}tgWuEu(7hUw6(UkxVgH!yuH4^z`?@9#Kp$P$jQpf%+1cv"""
b"""(9zP<)YaD4*xB0K+}+;a;Njxq<mKk)=;`X~?CtLF@bU8V^!4`l`1$(#"""
b"""{Qdp""",
b"""abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"""
b"""0123456789!@#0^&*();:<>,. []{}""":
b"""VPa!sWoBn+X=-b1ZEkOHadLBXb#`}nd3r%YLqtVJM@UIZOH55pPf$@("""
b"""Q&d$}S6EqEFflSSG&MFiI5{CeBQRbjDkv#CIy^osE+AW7dwl""",
b'no padding..': b'Zf_uPVPs@!Zf7no',
b'zero compression\x00\x00\x00\x00': b'dS!BNAY*TBaB^jHb7^mG00000',
b'zero compression\x00\x00\x00': b'dS!BNAY*TBaB^jHb7^mG0000',
b"""Boundary:\x00\x00\x00\x00""": b"""LT`0$WMOi7IsgCw00""",
b'Space compr: ': b'Q*dEpWgug3ZE$irARr(h',
b'\xff': b'{{',
b'\xff'*2: b'|Nj',
b'\xff'*3: b'|Ns9',
b'\xff'*4: b'|NsC0',
}
for data, res in tests.items():
eq(base64.b85encode(data), res)
self.check_other_types(base64.b85encode, b"www.python.org",
b'cXxL#aCvlSZ*DGca%T')
def test_a85decode(self):
eq = self.assertEqual
tests = {
b'': b'',
b'GB\\6`E-ZP=Df.1GEb>': b'www.python.org',
b"""! ! * -'"\n\t\t9eu\r\n7# RL\vhG$k3[W&.oNg'GVB"(`=52*$$"""
b"""(B+<_pR,UFcb-n-Vr/1iJ-0JP==1c70M3&s#]4?Ykm5X@_(6q'R884cE"""
b"""H9MJ8X:f1+h<)lt#=BSg3>[:ZC?t!MSA7]@cBPD3sCi+'.E,fo>FEMbN"""
b"""G^4U^I!pHnJ:W<)KS>/9Ll%"IN/`jYOHG]iPa.Q$R$jD4S=Q7DTV8*TU"""
b"""nsrdW2ZetXKAY/Yd(L?['d?O\\@K2_]Y2%o^qmn*`5Ta:aN;TJbg"GZd"""
b"""*^:jeCE.%f\\,!5gtgiEi8N\\UjQ5OekiqBum-X60nF?)@o_%qPq"ad`"""
b"""r;HT""": bytes(range(255)),
b"""@:E_WAS,RgBkhF"D/O92EH6,BF`qtRH$VbC6UX@47n?3D92&&T:Jand;c"""
b"""Hat='/U/0JP==1c70M3&r-I,;<FN.OZ`-3]oSW/g+A(H[P""":
b'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ01234'
b'56789!@#0^&*();:<>,. []{}',
b'DJpY:@:Wn_DJ(RS': b'no padding..',
b'H=_,8+Cf>,E,oN2F(oQ1z': b'zero compression\x00\x00\x00\x00',
b'H=_,8+Cf>,E,oN2F(oQ1!!!!': b'zero compression\x00\x00\x00',
b'6>q!aA79M(3WK-[!!': b"Boundary:\x00\x00\x00\x00",
b';fH/TAKYK$D/aMV+<VdL': b'Space compr: ',
b'rr': b'\xff',
b's8N': b'\xff'*2,
b's8W*': b'\xff'*3,
b's8W-!': b'\xff'*4,
}
for data, res in tests.items():
eq(base64.a85decode(data), res, data)
eq(base64.a85decode(data, adobe=False), res, data)
eq(base64.a85decode(data.decode("ascii"), adobe=False), res, data)
eq(base64.a85decode(b'<~' + data + b'~>', adobe=True), res, data)
eq(base64.a85decode(data + b'~>', adobe=True), res, data)
eq(base64.a85decode('<~%s~>' % data.decode("ascii"), adobe=True),
res, data)
eq(base64.a85decode(b'yy', foldspaces=True, adobe=False), b' '*8)
eq(base64.a85decode(b'y+<Vd', foldspaces=True, adobe=False), b' '*7)
eq(base64.a85decode(b'y+<U', foldspaces=True, adobe=False), b' '*6)
eq(base64.a85decode(b'y+9', foldspaces=True, adobe=False), b' '*5)
self.check_other_types(base64.a85decode, b'GB\\6`E-ZP=Df.1GEb>',
b"www.python.org")
def test_b85decode(self):
eq = self.assertEqual
tests = {
b'': b'',
b'cXxL#aCvlSZ*DGca%T': b'www.python.org',
b"""009C61O)~M2nh-c3=Iws5D^j+6crX17#SKH9337X"""
b"""AR!_nBqb&%C@Cr{EG;fCFflSSG&MFiI5|2yJUu=?KtV!7L`6nNNJ&ad"""
b"""OifNtP*GA-R8>}2SXo+ITwPvYU}0ioWMyV&XlZI|Y;A6DaB*^Tbai%j"""
b"""czJqze0_d@fPsR8goTEOh>41ejE#<ukdcy;l$Dm3n3<ZJoSmMZprN9p"""
b"""q@|{(sHv)}tgWuEu(7hUw6(UkxVgH!yuH4^z`?@9#Kp$P$jQpf%+1cv"""
b"""(9zP<)YaD4*xB0K+}+;a;Njxq<mKk)=;`X~?CtLF@bU8V^!4`l`1$(#"""
b"""{Qdp""": bytes(range(255)),
b"""VPa!sWoBn+X=-b1ZEkOHadLBXb#`}nd3r%YLqtVJM@UIZOH55pPf$@("""
b"""Q&d$}S6EqEFflSSG&MFiI5{CeBQRbjDkv#CIy^osE+AW7dwl""":
b"""abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"""
b"""0123456789!@#0^&*();:<>,. []{}""",
b'Zf_uPVPs@!Zf7no': b'no padding..',
b'dS!BNAY*TBaB^jHb7^mG00000': b'zero compression\x00\x00\x00\x00',
b'dS!BNAY*TBaB^jHb7^mG0000': b'zero compression\x00\x00\x00',
b"""LT`0$WMOi7IsgCw00""": b"""Boundary:\x00\x00\x00\x00""",
b'Q*dEpWgug3ZE$irARr(h': b'Space compr: ',
b'{{': b'\xff',
b'|Nj': b'\xff'*2,
b'|Ns9': b'\xff'*3,
b'|NsC0': b'\xff'*4,
}
for data, res in tests.items():
eq(base64.b85decode(data), res)
eq(base64.b85decode(data.decode("ascii")), res)
self.check_other_types(base64.b85decode, b'cXxL#aCvlSZ*DGca%T',
b"www.python.org")
def test_a85_padding(self):
eq = self.assertEqual
eq(base64.a85encode(b"x", pad=True), b'GQ7^D')
eq(base64.a85encode(b"xx", pad=True), b"G^'2g")
eq(base64.a85encode(b"xxx", pad=True), b'G^+H5')
eq(base64.a85encode(b"xxxx", pad=True), b'G^+IX')
eq(base64.a85encode(b"xxxxx", pad=True), b'G^+IXGQ7^D')
eq(base64.a85decode(b'GQ7^D'), b"x\x00\x00\x00")
eq(base64.a85decode(b"G^'2g"), b"xx\x00\x00")
eq(base64.a85decode(b'G^+H5'), b"xxx\x00")
eq(base64.a85decode(b'G^+IX'), b"xxxx")
eq(base64.a85decode(b'G^+IXGQ7^D'), b"xxxxx\x00\x00\x00")
def test_b85_padding(self):
eq = self.assertEqual
eq(base64.b85encode(b"x", pad=True), b'cmMzZ')
eq(base64.b85encode(b"xx", pad=True), b'cz6H+')
eq(base64.b85encode(b"xxx", pad=True), b'czAdK')
eq(base64.b85encode(b"xxxx", pad=True), b'czAet')
eq(base64.b85encode(b"xxxxx", pad=True), b'czAetcmMzZ')
eq(base64.b85decode(b'cmMzZ'), b"x\x00\x00\x00")
eq(base64.b85decode(b'cz6H+'), b"xx\x00\x00")
eq(base64.b85decode(b'czAdK'), b"xxx\x00")
eq(base64.b85decode(b'czAet'), b"xxxx")
eq(base64.b85decode(b'czAetcmMzZ'), b"xxxxx\x00\x00\x00")
def test_a85decode_errors(self):
illegal = (set(range(32)) | set(range(118, 256))) - set(b' \t\n\r\v')
for c in illegal:
with self.assertRaises(ValueError, msg=bytes([c])):
base64.a85decode(b'!!!!' + bytes([c]))
with self.assertRaises(ValueError, msg=bytes([c])):
base64.a85decode(b'!!!!' + bytes([c]), adobe=False)
with self.assertRaises(ValueError, msg=bytes([c])):
base64.a85decode(b'<~!!!!' + bytes([c]) + b'~>', adobe=True)
self.assertRaises(ValueError, base64.a85decode,
b"malformed", adobe=True)
self.assertRaises(ValueError, base64.a85decode,
b"<~still malformed", adobe=True)
# With adobe=False (the default), Adobe framing markers are disallowed
self.assertRaises(ValueError, base64.a85decode,
b"<~~>")
self.assertRaises(ValueError, base64.a85decode,
b"<~~>", adobe=False)
base64.a85decode(b"<~~>", adobe=True) # sanity check
self.assertRaises(ValueError, base64.a85decode,
b"abcx", adobe=False)
self.assertRaises(ValueError, base64.a85decode,
b"abcdey", adobe=False)
self.assertRaises(ValueError, base64.a85decode,
b"a b\nc", adobe=False, ignorechars=b"")
self.assertRaises(ValueError, base64.a85decode, b's', adobe=False)
self.assertRaises(ValueError, base64.a85decode, b's8', adobe=False)
self.assertRaises(ValueError, base64.a85decode, b's8W', adobe=False)
self.assertRaises(ValueError, base64.a85decode, b's8W-', adobe=False)
self.assertRaises(ValueError, base64.a85decode, b's8W-"', adobe=False)
def test_b85decode_errors(self):
illegal = list(range(33)) + \
list(b'"\',./:[\\]') + \
list(range(128, 256))
for c in illegal:
with self.assertRaises(ValueError, msg=bytes([c])):
base64.b85decode(b'0000' + bytes([c]))
self.assertRaises(ValueError, base64.b85decode, b'|')
self.assertRaises(ValueError, base64.b85decode, b'|N')
self.assertRaises(ValueError, base64.b85decode, b'|Ns')
self.assertRaises(ValueError, base64.b85decode, b'|NsC')
self.assertRaises(ValueError, base64.b85decode, b'|NsC1')
def test_decode_nonascii_str(self):
decode_funcs = (base64.b64decode,
base64.standard_b64decode,
base64.urlsafe_b64decode,
base64.b32decode,
base64.b16decode,
base64.b85decode,
base64.a85decode)
for f in decode_funcs:
self.assertRaises(ValueError, f, 'with non-ascii \xcb')
def test_ErrorHeritage(self):
self.assertTrue(issubclass(binascii.Error, ValueError))
class TestMain(unittest.TestCase):
def tearDown(self):
if os.path.exists(os_helper.TESTFN):
os.unlink(os_helper.TESTFN)
def get_output(self, *args):
return script_helper.assert_python_ok('-m', 'base64', *args).out
def test_encode_decode(self):
output = self.get_output('-t')
self.assertSequenceEqual(output.splitlines(), (
b"b'Aladdin:open sesame'",
br"b'QWxhZGRpbjpvcGVuIHNlc2FtZQ==\n'",
b"b'Aladdin:open sesame'",
))
def test_encode_file(self):
with open(os_helper.TESTFN, 'wb') as fp:
fp.write(b'a\xffb\n')
output = self.get_output('-e', os_helper.TESTFN)
self.assertEqual(output.rstrip(), b'Yf9iCg==')
def test_encode_from_stdin(self):
with script_helper.spawn_python('-m', 'base64', '-e') as proc:
out, err = proc.communicate(b'a\xffb\n')
self.assertEqual(out.rstrip(), b'Yf9iCg==')
self.assertIsNone(err)
def test_decode(self):
with open(os_helper.TESTFN, 'wb') as fp:
fp.write(b'Yf9iCg==')
output = self.get_output('-d', os_helper.TESTFN)
self.assertEqual(output.rstrip(), b'a\xffb')
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joohoonkim/carsimpy",
"score": 4
} |
#### File: joohoonkim/carsimpy/sensor.py
```python
import pygame
from pygame.math import Vector2
from math import sin, cos, tan, radians, degrees, copysign, pi
from constants import *
import map_model
class Sensor:
def __init__(self, x=0.0, y=0.0, orient=0.0):
self.position = Vector2(x,y)
self.orient = orient
self.length = 0
self.width = 5
def update(self,position,orient):
self.position.x = position.x
self.position.y = position.y
self.orient = orient
def check_sensor(self,env,distance=100,num_intervals=10):
self.length = distance
detected = False
heading = Vector2(cos(self.orient*pi/180.0),sin(-self.orient*pi/180.0))
steps = int(distance/num_intervals)
for i in range(0,num_intervals+1):
sensor_pos = Vector2(self.position.x + (heading.x*i*steps), self.position.y + (heading.y*i*steps))
terrain = env.CheckTerrain(self.position.x,self.position.y,sensor_pos)
if(terrain):
detected = True
continue
return detected
def get_position(self):
return self.position
def get_length(self):
return self.length
def get_width(self):
return self.width
``` |
{
"source": "jooh/py-expcontrol",
"score": 3
} |
#### File: py-expcontrol/expcontrol/eyelinkdep.py
```python
import time
import numpy
import pylink # pylint: disable=import-error
class EyeLinkTracker(object):
'''
Handle common eye tracker tasks with a somewhat more intuitive
interface than stock pylink.
'''
def __init__(self, size=[1024, 768], calibscale=1., ip='192.168.3.11', \
bgcolor=[127, 127, 127], fgcolor=[255, 255, 255], \
targetdiameter=20, targethole=5, calibrationtype='HV9', \
calibrationpacing=.9, viewdistance=None, screenwidth=None):
self.size = tuple(size)
# connect to tracker and do initial config
self.tracker = pylink.EyeLink(ip)
self.eyeused = None
# flush out any pending key presses and get back to offline mode in
# case we crashed out while recording
pylink.flushGetkeyQueue()
self.tracker.setOfflineMode()
self.fgcolor = fgcolor
self.bgcolor = bgcolor
self.targetdiameter = targetdiameter
self.targethole = targethole
# month, day, hour, minute.
self.remotefilename = time.strftime('%m%d%H%M')
self.tracker.openDataFile(self.remotefilename)
self.calibsize = (numpy.array(self.size) * calibscale)
calibarea = numpy.round(numpy.array(self.size) - self.calibsize)
alldims = (calibarea[0], calibarea[1], self.calibsize[0],
self.calibsize[1])
self.tracker.sendCommand('screen_pixel_coords = %d %d %d %d' % alldims)
self.tracker.sendMessage("DISPLAY_COORDS %d %d %d %d" % alldims)
self.tracker.sendMessage("SCREEN_COORDS 0 0 %d %d" % self.size)
# for robustness we set a bunch of other parameters so that any
# weird defaults get overwritten
if viewdistance:
self.tracker.sendCommand('simulation_screen_distance=%d' % \
(viewdistance * 10))
self.tracker.sendMessage('VIEW_DISTANCE %d' % (viewdistance * 10))
self.tracker.sendCommand('automatic_calibration_pacing=%d' % \
(calibrationpacing * 1000))
if screenwidth:
self.tracker.sendMessage('SCREEN_WIDTH %d' % screenwidth)
# NB this command is necessary whenever changing
# screen_pixel_coords
self.tracker.sendCommand('calibration_type=' + calibrationtype)
if self.tracker.getTrackerVersion() == 2:
self.tracker.sendCommand("select_parser_configuration 0")
else:
self.tracker.sendCommand("saccade_velocity_threshold = 35")
self.tracker.sendCommand("saccade_acceleration_threshold = 9500")
self.tracker.setFileEventFilter("LEFT,RIGHT,FIXATION,SACCADE,BLINK,MESSAGE,BUTTON")
self.tracker.setFileSampleFilter("LEFT,RIGHT,GAZE,AREA,GAZERES,STATUS")
self.tracker.setLinkEventFilter("LEFT,RIGHT,FIXATION,SACCADE,BLINK,BUTTON")
self.tracker.setLinkSampleFilter("LEFT,RIGHT,GAZE,GAZERES,AREA,STATUS")
self.tracker.sendCommand("button_function 5 'accept_target_fixation'")
return
def calibrate(self):
'''
Open a pygame window, run a calibration routine and close it.
'''
# start the main calibration/validation interface
pylink.openGraphics(self.size)
# these commands cause a hard crash if sent before openGraphics
pylink.setCalibrationColors(self.fgcolor, self.bgcolor)
pylink.setTargetSize(self.targetdiameter, self.targethole)
self.tracker.doTrackerSetup()
self.eyeused = self.tracker.eyeAvailable()
pylink.closeGraphics()
return
def start(self):
'''
start recording eye tracking data.
'''
err = self.tracker.startRecording(1, 1, 1, 1)
assert not err, 'EyeLink error: ' + err
return
def message(self, msg):
'''
send the str msg to the eye tracker.
'''
self.tracker.sendMessage(msg)
return
def stop(self, outfile):
'''
stop recording and receive the data file if outfile is not None.
'''
# pumpDelay is a lower priority delay which does not block background
# events. msecDelay is more aggressive. Here used to catch last bit of
# data before stopping the recording
pylink.pumpDelay(100)
# idle mode
self.tracker.setOfflineMode()
pylink.msecDelay(500)
# close the file on the tracker HD. Can take a while...
self.tracker.closeDataFile()
if outfile is not None:
self.tracker.receiveDataFile(self.remotefilename, outfile)
self.tracker.close()
return
```
#### File: py-expcontrol/expcontrol/psychopydep.py
```python
import collections
import numpy
import psychopy.core
import psychopy.visual
import psychopy.logging
import psychopy.event
from psychopy.hardware.emulator import SyncGenerator
class Clock(object):
'''
Time-keeping functionality for expcontrol by wrapping Psychopy's
core.Clock instance.'''
def __init__(self):
'''Initialise a clock instance.'''
self.ppclock = psychopy.core.Clock()
super(Clock, self).__init__()
psychopy.logging.setDefaultClock(self.ppclock)
return
def __call__(self):
'''Return the current time stamp from ppclock.getTime'''
return self.ppclock.getTime()
def start(self):
'''Reset the clock to 0.'''
self.ppclock.reset()
return self()
def wait(self, time):
'''wait for time duration (s).'''
psychopy.core.wait(time)
return
def waituntil(self, time):
'''wait until the clock reaches time.'''
self.wait(time-self())
return
class PulseClock(Clock):
'''
Time-keeping with tracking of pulses (e.g. from a scanner trigger)
through a keyboard button at some interval. Note that time is
still tracked in seconds, not pulses. So on its own, using this class
will ensure that you synchronise your experiment to the first pulse
(see start method), but everything afterwards still runs in seconds as
with the standard Clock class.
The only further refinement is that the clock will attempt to meausure
pulse period empirically whenever given a chance (ie, self.waituntil is
called with enough remaining time that a pulse is expected during the
wait. These estimates are stored in self.periodhistory.
'''
def __init__(self, key, period, pulsedur=0.01, tolerance=.1, timeout=20., \
verbose=False, ndummies=0):
self.period = period
self.pulsedur = pulsedur
self.tolerance = tolerance
self.periodhistory = [period]
self.timeout = timeout
self.verbose = verbose
assert ndummies >= 0, 'ndummies must be 0 or greater'
self.ndummies = ndummies
super(PulseClock, self).__init__()
self.keyhand = KeyboardResponse(key, self.ppclock)
return
def waitpulse(self):
'''wait until a pulse is received. An exception is raised if the wait
exceeds self.timeout.'''
key, keytime = self.keyhand.waitkey(self.timeout)
assert key, 'exceeded %.0fs timeout without receiving pulse' % \
self.timeout
# first time of response if we got multiple
keytime = keytime[0]
return keytime
def start(self):
'''reset the clock and return once the correct pulse has been received
(one for each of self.ndummies+1).'''
# need to first reset the second clock to make the timeout counter
# in waitpulse work properly
super(PulseClock, self).start()
# nb +1 so we always wait for a pulse. dummies are in ADDITION to this
for dummy in range(self.ndummies+1):
if self.verbose:
print 'waiting for pulse %d' % dummy
# but this means that the starttime recorded here is off
starttime = self.waitpulse()
# so we adjust the clock to compensate for starttime (not quite the
# same as zeroing the clock - if time has passed since the pulse
# was received this operation will produce a current clock time >0
self.ppclock.add(starttime)
# return current time after all this
return self()
def waituntil(self, time):
'''wait until time, catching any pulses along the way.'''
# current time
now = self()
nowpulse = now / self.period
timepulse = time / self.period
npulseleft = numpy.floor(timepulse)-numpy.floor(nowpulse)
if npulseleft < 1:
# less than a self.period left, so wait it out using standard
# second clock
super(PulseClock, self).waituntil(time)
return
# if we make it here, there must be pulses to catch
actualtime = self.waitpulse()
# we expect the next pulse to be number
predictpulse = numpy.ceil(now / self.period)
# now we can update our estimate of period like so...
newpulse = actualtime / predictpulse
if numpy.abs(newpulse-self.period) > self.tolerance:
raise Exception('pulse period beyond tolerance: ' +
'expected=%.4f, estimated=%.4f' % (self.period,
newpulse))
self.period = newpulse
if self.verbose:
print 'Pulse at %.2f. tr=%.3f' % (actualtime, newpulse)
self.periodhistory.append(newpulse)
# avoid catching the same pulse twice
if (time-self()) > self.pulsedur:
self.wait(self.pulsedur)
# we recurse with a depth of npulseleft. This is important to
# handle cases where you are waiting n pulses + a bit extra
self.waituntil(time)
return
class Window(object):
'''
Display control functionality for expcontrol by wrapping
Psychopy's visual.Window.
'''
def __init__(self, *args, **kwargs):
'''
Initialise a window instance. All input arguments are piped to
psychopy.visual.Window.
'''
self.winhand = psychopy.visual.Window(*args, **kwargs)
# flip a few times because it is thought this helps stabilise
# timings
[self() for flip in range(50)]
return
def __call__(self):
'''flip the screen and return an exact time stamp of when the flip
occurred.'''
return self.winhand.flip()
def close(self):
'''close the screen.'''
self.winhand.close()
return
class KeyboardResponse(object):
'''
Psychopy-based keyboard response checking.
'''
esckey = 'escape'
def __init__(self, keylist, clock):
'''
Initialise a KeyboardResponse instance. keylist is a list of valid keys
(all other inputs are ignored). clock is a handle to a current Psychopy
clock instance.
'''
if not isinstance(keylist, collections.Iterable):
keylist = [keylist]
self.keylist = keylist + [self.esckey]
self.ppclock = clock
return
def __call__(self):
'''Check for responses.'''
ktup = psychopy.event.getKeys(keyList=self.keylist,
timeStamped=self.ppclock)
return self.parsekey(ktup)
def waitkey(self, dur=float('inf')):
'''wait for a key press for a set duration (default inf).'''
ktup = psychopy.event.waitKeys(maxWait=dur, keyList=self.keylist,
timeStamped=self.ppclock)
return self.parsekey(ktup)
def parsekey(self, ktup):
'''Convert timestamped key presses to separate key and time stamp
arrays. Used internally to support __call__ and waitkey.'''
keys = []
timestamps = []
if ktup:
keys, timestamps = zip(*ktup)
if self.esckey in keys:
raise Exception('user pressed escape')
return numpy.array(keys), numpy.array(timestamps)
class PulseEmulator(object):
'''
Simulate pulses at some period. Just a convenience wrapper for
psychopy.hardware.emulator.SynchGenerator.
'''
def __init__(self, *args, **kwargs):
'''Initialise a PulseEmulator instance. All arguments are passed to
SynchGenerator.'''
self.pulsehand = SyncGenerator(*args, **kwargs)
return
def start(self):
'''Start sending pulses.'''
self.pulsehand.start()
psychopy.core.runningThreads.append(self.pulsehand)
return
def stop(self):
'''Stop sending pulses.'''
self.pulsehand.stop()
return
``` |
{
"source": "jooh/sana",
"score": 3
} |
#### File: sana/backend/npbased.py
```python
import numpy as np
from sana.base import n2npairs, npairs2n
import scipy.linalg
def lsbetas(x, y, lapack_driver='gelsy'):
"""return the parameter estimats (betas) from a least squares fit. Wraps
scipy.linalg.lstsq since it seems to outperform np.linalg.lstsq for our typical data
at the moment. You can sometimes tune performance further by switching the
lapack_driver (we override the default gelsd in favour of gelsy, see scipy docs)."""
return scipy.linalg.lstsq(x, y, lapack_driver=lapack_driver)[0]
def square2vec(rdm):
"""map 2D distance matrix to [n,1] vector of unique distances. Returns distances in
same order as scipy.spatial.distance.squareform."""
return rdm[np.triu_indices_from(rdm, k=1)][:, None]
def allpairwisecontrasts(n, dtype=np.float64):
"""return a npair by n matrix of contrast vectors. The result differences should be
compatible with the default scipy distance matrix code (pdist, squareform)."""
outsize = [n, int(n2npairs(n))]
# as in tf.square2vec, use triu instead of tril since numpy is row major and matlab
# column major
mask = np.triu(np.ones((n, n), dtype="bool"), 1)
rows, cols = mask.nonzero()
ind = np.arange(outsize[1])
posind = np.ravel_multi_index((rows, ind), outsize)
negind = np.ravel_multi_index((cols, ind), outsize)
result = np.zeros(outsize)
result[np.unravel_index(posind, outsize)] = 1.
result[np.unravel_index(negind, outsize)] = -1.
return result.T
def sqsigned(x):
"""return signed square transform. Quirky concept to avoid imaginary numbers when
working with multiple regression RSA."""
return np.sign(x) * (x ** 2)
def sqrtsigned(x):
"""return signed square-root transform (ie, square root on abs value, then returned
to original sign). Quirky concept to avoid imaginary numbers when working with
multiple regression RSA."""
return np.sign(x) * np.sqrt(np.abs(x))
def zscore(rdv, axis=0):
return (rdv - np.mean(rdv, axis=axis, keepdims=True)) / np.std(
rdv, axis=axis, keepdims=True
)
def pearsonz_1vN(rdv, rdvn):
"""Fisher Z-transformed pearson correlation between rdv (f by 1 array) and
rdvn (f by n array)."""
return np.arctanh(lsbetas(zscore(rdv), zscore(rdvn)))
# TODO - the two other convenient correlation algorithms - from the covariance
# matrix (to get a full n by n matrix) and from unit length cosines (to get
# pairwise correlations between arbitrary n by f and m by f arrays)
def flatten(resp):
"""flatten dimensions 1: of the input ND array resp."""
return np.reshape(resp, [resp.shape[0], np.prod(resp.shape[1:])])
def euclideansq(resp):
"""squared euclidean distance matrix"""
# resp is exemplar by features
# reshape to put examplars in rows, features in columns
resp = flatten(resp)
# sum of squares over feature dim
r = np.sum(resp * resp, axis=1)
rdm = r[:, None] - 2 * np.matmul(resp, resp.T) + r[None, :]
# take advantage of known properties of distance matrices to correct
# rounding error - symmetry
rdm = (rdm + rdm.T) / 2.
# (NB, rdm diagonal won't be exactly zero but those values are not returned so it
# doesn't matter)
return square2vec(rdm)
def vec2square(rdv):
xs = rdv.shape
ncon = npairs2n(xs[0])
assert np.allclose(
ncon, np.round(ncon)
), "rdv is not convertible to square distance matrix"
ncon = int(ncon)
uind = np.triu_indices(ncon, k=1)
rdm = np.zeros([ncon, ncon], rdv.dtype)
rdm[uind] = rdv.flatten()
rdm += rdm.T
return rdm
def rdmsplitter(rdm):
"""leave-one-out splitter for input rdm (n by ncon by ncon array). Returns trainrdm
(the sum RDM for all-but-one) and testrdm (the left-out RDM) on each call."""
rdm = np.asarray(rdm)
# singleton dimensions are impossible (condition dims are at least 2, and if you
# have less than 2 entries in split dim, you have a problem)
assert np.all(np.array(rdm.shape) > 1)
assert np.ndim(rdm) == 3
# assume stacked in rows
nrdm = rdm.shape[0]
# always leave one out for now
allind = np.arange(nrdm)
for ind in allind:
testrdm = rdm[ind, :, :]
# pearson is invariant to whether you sum or average. And with sum you
# can easily add another rdv later (e.g. in noiseceiling)
trainrdm = np.sum(rdm[np.setdiff1d(allind, ind), :, :], axis=0)
yield trainrdm, testrdm
def noiseceiling(metric, trainrdv, testrdv):
return (metric(trainrdv, testrdv), metric(trainrdv + testrdv, testrdv))
def mrdivide(x, y):
"""matlab-style matrix right division."""
return lsbetas(y.T, x.T).T
def discriminant(x, y, con, covestimator=np.cov):
"""estimate linear discriminant weights according to some covariance estimator. Note
that for neuroimaging data a regularised covariance estimator is a good idea, e.g.
lambda res: sklearn.variance.LedoitWolf().fit(res).covariance_."""
betas = lsbetas(x, y)
conest = con @ betas
yhat = x @ betas
resid = y - yhat
cmat = covestimator(resid)
return mrdivide(conest, cmat)
def discriminantcontrast(x, y, con, w):
"""return discriminant contrast (LDC, crossnobis, CV-Mahalanobis, whatever)."""
betas = lsbetas(x, y)
conest = con @ betas
return np.sum(conest * w, axis=1)
def chunk2bool(chunks, val):
"""return a vector that's true whenever chunks==any(val)."""
return np.any(np.asarray(chunks)[:,None] == np.asarray(val)[None,:], axis=1)
def projectout(x, c):
"""return x after removing the fitted contribution of c."""
return x - c @ lsbetas(c, x)
def polynomialmatrix(nvol, n):
"""return nvol x n+1 matrix of polynomials of degree 0:n. Often used in fMRI GLMs as
a high-pass filter."""
return np.linspace(-1., 1., nvol)[:,None] ** np.arange(n+1)[None,:]
def corrpairs(x, y, axis=0):
"""return the pearson correlation coefficient for each element along axis of x
paired with its corresponding element of y."""
xn = x-x.mean(axis=axis, keepdims=True)
xn /= np.linalg.norm(xn, axis=axis, keepdims=True)
yn = y-y.mean(axis=axis, keepdims=True)
yn /= np.linalg.norm(yn, axis=axis, keepdims=True)
return np.sum(xn.conj()*yn, axis=axis)
```
#### File: sana/tests/util.py
```python
import numpy as np
def assert_shape(arrlike, shape):
np.testing.assert_array_equal(np.array(arrlike).shape, np.array(shape))
def responses(ncon, nfeat=100):
return np.random.rand(ncon, nfeat)
def signeddistancematrix():
"""5x5 matrix with distance from diagonal in increasing positive values below, and
increasing negative values above."""
return np.arange(-2, 3)[:, None] - np.arange(-2, 3)[None, :]
def signeddistancevector():
"""ground truth distance vector form for signeddistancematrix."""
return -1 * np.array([1, 2, 3, 4, 1, 2, 3, 1, 2, 1])[:, None]
``` |
{
"source": "JoohyungLee0106/rectal_MR_volume_classification",
"score": 3
} |
#### File: rectal_MR_volume_classification/model/conv_builder.py
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch._six import container_abcs
from itertools import repeat
def _ntuple(n):
def parse(x):
if isinstance(x, container_abcs.Iterable):
return x
return tuple(repeat(x, n))
return parse
_triple = _ntuple(3)
class Conv3DSimple(nn.Conv3d):
def __init__(self,
in_planes,
out_planes,
stride=1,
kernel_size=3):
padding = (kernel_size - 1) // 2
super(Conv3DSimple, self).__init__(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=(3, kernel_size, kernel_size),
stride=(1, stride, stride),
padding=(1, padding, padding),
bias=False)
class Conv2Plus1D(nn.Sequential):
def __init__(self,
in_planes,
out_planes,
stride=1,
kernel_size=3):
padding = (kernel_size - 1) // 2
midplanes = (in_planes * out_planes * 3 * 3 * 3) // (in_planes * 3 * 3 + 3 * out_planes)
super(Conv2Plus1D, self).__init__(
nn.Conv3d(in_planes, midplanes, kernel_size=(1, kernel_size, kernel_size),
stride=(1, stride, stride), padding=(0, padding, padding),
bias=False),
nn.BatchNorm3d(midplanes),
nn.ReLU(inplace=True),
nn.Conv3d(midplanes, out_planes, kernel_size=(kernel_size, 1, 1),
stride=(1, 1, 1), padding=(padding, 0, 0),
bias=False))
class Conv3DNoTemporal(nn.Conv3d):
def __init__(self,
in_planes,
out_planes,
stride=1,
kernel_size=3):
padding = (kernel_size - 1) // 2
super(Conv3DNoTemporal, self).__init__(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=(1, kernel_size, kernel_size),
stride=(1, stride, stride),
padding=(0, padding, padding),
bias=False)
``` |
{
"source": "jookies/txamqp",
"score": 2
} |
#### File: jookies/txamqp/setup.py
```python
import os
from setuptools import setup, find_packages
def parse_requirements(filename):
"""load requirements from a pip requirements file"""
lineiter = (line.strip() for line in open(filename))
return [line for line in lineiter if line and (not line.startswith("#") and not line.startswith('-'))]
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="txAMQP3",
version='0.9.0',
author="<NAME>",
author_email="<EMAIL>",
description="Python3 library for communicating with AMQP peers and brokers using Twisted",
license='Apache License 2.0',
packages=find_packages(exclude=["tests"]),
# long_description=read('README.md'),
keywords="twisted amq",
url="https://github.com/jookies/txamqp",
py_modules=["txAMQP3"],
include_package_data=True,
package_data={'txamqp3': ['README.md']},
install_requires=parse_requirements('requirements.txt'),
classifiers=[
"Development Status :: 5 - Production/Stable",
"Framework :: Twisted",
"Topic :: System :: Networking",
"Operating System :: OS Independent",
"License :: OSI Approved :: Apache Software License",
"Intended Audience :: Developers",
"Programming Language :: Python",
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
```
#### File: txamqp/txamqp/codec.py
```python
from io import BytesIO
from struct import pack, calcsize, unpack
class EOF(Exception):
pass
class Codec:
def __init__(self, stream):
self.stream = stream
self.nwrote = 0
self.nread = 0
self.incoming_bits = []
self.outgoing_bits = []
def read(self, n):
data = self.stream.read(n)
if n > 0 and len(data) == 0:
raise EOF()
self.nread += len(data)
return data
def write(self, s):
self.flushbits()
self.stream.write(s)
self.nwrote += len(s)
def flush(self):
self.flushbits()
self.stream.flush()
def flushbits(self):
if len(self.outgoing_bits) > 0:
bytes_list = []
index = 0
for b in self.outgoing_bits:
if index == 0:
bytes_list.append(0)
if b:
bytes_list[-1] |= 1 << index
index = (index + 1) % 8
del self.outgoing_bits[:]
for byte in bytes_list:
self.encode_octet(byte)
def pack(self, fmt, *args):
self.write(pack(fmt, *args))
def unpack(self, fmt):
size = calcsize(fmt)
data = self.read(size)
values = unpack(fmt, data)
if len(values) == 1:
return values[0]
else:
return values
def encode(self, field_type, field_value):
getattr(self, "encode_" + field_type)(field_value)
def decode(self, field_type):
return getattr(self, "decode_" + field_type)()
# bit
def encode_bit(self, o):
if o:
self.outgoing_bits.append(True)
else:
self.outgoing_bits.append(False)
def decode_bit(self):
if len(self.incoming_bits) == 0:
bits = self.decode_octet()
for shift in range(8):
self.incoming_bits.append(bits >> shift & 1 != 0)
return self.incoming_bits.pop(0)
# octet
def encode_octet(self, o):
self.pack("!B", o)
def decode_octet(self):
return self.unpack("!B")
# short
def encode_short(self, o):
self.pack("!H", o)
def decode_short(self):
return self.unpack("!H")
# long
def encode_long(self, o):
self.pack("!L", o)
def decode_long(self):
return self.unpack("!L")
# longlong
def encode_longlong(self, o):
self.pack("!Q", o)
def decode_longlong(self):
return self.unpack("!Q")
def enc_str(self, fmt, s):
size = len(s)
self.pack(fmt, size)
if not isinstance(s, bytes):
s = s.encode()
self.write(s)
def enc_bytes(self, fmt, s):
size = len(s)
self.pack(fmt, size)
self.write(s)
def dec_str(self, fmt):
size = self.unpack(fmt)
data = self.read(size)
# Oppertunistic binary decode
try:
data = data.decode()
except UnicodeDecodeError:
pass
return data
def dec_bytes(self, fmt):
size = self.unpack(fmt)
return self.read(size)
# shortstr
def encode_shortstr(self, s):
self.enc_str("!B", s)
def decode_shortstr(self):
return self.dec_str("!B")
# longstr
def encode_longstr(self, s):
if isinstance(s, dict):
self.encode_table(s)
else:
self.enc_str("!L", s)
def encode_longbytes(self, s):
if isinstance(s, dict):
self.encode_table(s)
else:
self.enc_bytes("!L", s)
def decode_longstr(self):
return self.dec_str("!L")
def decode_longbytes(self):
return self.dec_bytes("!L")
# timestamp
def encode_timestamp(self, o):
self.pack("!Q", o)
def decode_timestamp(self):
return self.unpack("!Q")
def _write_value(self, value):
if isinstance(value, (str, bytes)):
self.write(b"S")
self.encode_longstr(value)
elif value is None:
self.encode_void()
elif isinstance(value, list):
self.write(b'A')
self.encode_array(value)
elif isinstance(value, int):
self.write(b"I")
self.encode_long(value)
else:
raise TypeError('Got unknown type %s for encoding' % type(value))
# array
def encode_array(self, arr):
enc = BytesIO()
codec = Codec(enc)
for value in arr:
codec._write_value(value)
s = enc.getvalue()
self.encode_long(len(s))
self.write(s)
# table
def encode_table(self, tbl):
enc = BytesIO()
codec = Codec(enc)
for key, value in tbl.items():
codec.encode_shortstr(key)
codec._write_value(value)
s = enc.getvalue()
self.encode_long(len(s))
self.write(s)
def decode_table(self):
size = self.decode_long()
start = self.nread
result = {}
while self.nread - start < size:
key = self.decode_shortstr()
item_type = self.read(1)
if item_type == b"S":
value = self.decode_longstr()
elif item_type == b"I":
value = self.decode_long()
elif item_type == b"F":
value = self.decode_table()
elif item_type == b"t":
value = (self.decode_octet() != 0)
else:
raise ValueError(repr(item_type))
result[key] = value
return result
# void
def encode_void(self):
self.write(b"V")
def decode_void(self):
return None
```
#### File: txamqp/txamqp/endpoint.py
```python
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.internet.endpoints import clientFromString
from twisted.web.client import URI
from twisted.web.http import parse_qs
class AMQEndpoint:
"""An endpoint that knows how to connect to AMQP brokers.
The class implements the same API as IStreamClientEndpoint, however it
requires the protocol factory to be able to speak AMQP for perfoming
the authentication.
@note: Currently TLS connections are not supported.
"""
def __init__(self, reactor, host, port, username="", password="",
vhost="/", heartbeat=0, auth_mechanism="AMQPLAIN",
timeout=30):
"""
@param reactor: An L{IReactorTCP} provider.
@param username: The username to use when authenticating.
@type username: L{bytes}
@param password: The password to use when authenticating.
@type password: L{bytes}
@param host: Host name or IP address of the AMQP broker.
@type host: L{bytes}
@type port: L{int}
@param port: Port number.
@param vhost: The vhost to open the connection against.
@type vhost: L{bytes}
@param heartbeat: AMQP heartbeat in seconds.
@type heartbeat: L{int}
@type auth_mechanism: Authentication mechanism. Currently only AMQPLAIN
and PLAIN are supported.
@type mechanism: L{bytes}
@param timeout: Number of seconds to wait before assuming the
connection has failed.
@type timeout: int
"""
self._reactor = reactor
self._host = host
self._port = port
self._username = username
self._password = password
self._vhost = vhost
self._heartbeat = heartbeat
self._auth_mechanism = auth_mechanism
self._timeout = timeout
@classmethod
def from_uri(cls, reactor, uri):
"""Return an AMQEndpoint instance configured with the given AMQP uri.
@see: https://www.rabbitmq.com/uri-spec.html
"""
uri = URI.fromBytes(uri.encode(), defaultPort=5672)
kwargs = {}
host = uri.host.decode()
if "@" in host:
auth, host = uri.netloc.decode().split("@")
username, password = auth.split(":")
kwargs.update({"username": username, "password": password})
vhost = uri.path.decode()
if len(vhost) > 1:
vhost = vhost[1:] # Strip leading "/"
kwargs["vhost"] = vhost
params = parse_qs(uri.query)
kwargs.update({name.decode(): value[0].decode() for name, value in params.items()})
if "heartbeat" in kwargs:
kwargs["heartbeat"] = int(kwargs["heartbeat"])
return cls(reactor, host, uri.port, **kwargs)
def connect(self, protocol_factory):
"""
Connect to the C{protocolFactory} to the AMQP broker specified by the
URI of this endpoint.
@param protocol_factory: An L{AMQFactory} building L{AMQClient} objects.
@return: A L{Deferred} that results in an L{AMQClient} upon successful
connection otherwise a L{Failure} wrapping L{ConnectError} or
L{NoProtocol <twisted.internet.error.NoProtocol>}.
"""
# XXX Since AMQClient requires these parameters at __init__ time, we
# need to override them in the provided factory.
protocol_factory.set_vhost(self._vhost)
protocol_factory.set_heartbeat(self._heartbeat)
description = "tcp:{}:{}:timeout={}".format(
self._host, self._port, self._timeout)
endpoint = clientFromString(self._reactor, description)
deferred = endpoint.connect(protocol_factory)
return deferred.addCallback(self._authenticate)
@inlineCallbacks
def _authenticate(self, client):
"""Perform AMQP authentication."""
yield client.authenticate(
self._username, self._password, mechanism=self._auth_mechanism)
returnValue(client)
``` |
{
"source": "jookovjook/torch2coreml",
"score": 2
} |
#### File: torch2coreml/test/test_layers.py
```python
import numpy as np
import numpy.testing as npt
import unittest
import sys
import os
import torch
import torch.legacy.nn as nn
from _test_utils import _INPUT_SHAPE
sys.path.append(
os.path.dirname(os.path.realpath(__file__)) + "/../torch2coreml/"
)
class SingleLayerTest(unittest.TestCase):
def setUp(self):
self.input = np.random.ranf(_INPUT_SHAPE)
self.torch_batch_mode = True
self.output_count = 1
def _forward_torch(self, torch_model):
if isinstance(self.input, list):
inputs = [
torch.from_numpy(
np.asarray([inp] if self.torch_batch_mode else inp)
).float()
for inp in self.input
]
result = torch_model.forward(inputs)
else:
input_tensor = torch.from_numpy(
np.asarray(
[self.input] if self.torch_batch_mode else self.input
)
).float()
result = torch_model.forward(input_tensor)
if isinstance(result, list):
return [
(r.numpy()[0] if self.torch_batch_mode else r.numpy())
for r in result
]
else:
r = result.numpy()
return r[0] if self.torch_batch_mode else r
def _forward_coreml(self, torch_model):
from _torch_converter import convert
output_names = ['output']
if self.output_count > 1:
output_names = [
'output_' + str(i)
for i in range(self.output_count)
]
if isinstance(self.input, list):
input_shapes = [inp.shape for inp in self.input]
input_names = ['input_' + str(i) for i in range(len(self.input))]
coreml_model = convert(
torch_model,
input_shapes,
input_names=input_names,
output_names=output_names
)
result = coreml_model.predict(
dict(zip(input_names, self.input)), useCPUOnly=True
)
else:
coreml_model = convert(
torch_model,
[self.input.shape],
output_names=output_names
)
result = coreml_model.predict(
{'input': self.input}, useCPUOnly=True
)
if self.output_count > 1:
return [result[name] for name in output_names]
else:
return result['output']
def _assert_outputs(self, torch_output, coreml_output, decimal):
if isinstance(torch_output, list):
self.assertTrue(isinstance(coreml_output, list))
self.assertEqual(len(torch_output), len(coreml_output))
for i in range(len(torch_output)):
tout = torch_output[i]
cout = coreml_output[i]
self.assertEqual(tout.shape, cout.shape)
npt.assert_almost_equal(cout, tout, decimal=decimal)
else:
self.assertEqual(torch_output.shape, coreml_output.shape)
npt.assert_almost_equal(
coreml_output, torch_output, decimal=decimal
)
def _test_single_layer(self, layer, decimal=7):
torch_model = nn.Sequential()
torch_model.add(layer)
coreml_output = self._forward_coreml(torch_model)
if not isinstance(coreml_output, list):
coreml_output = coreml_output.copy()
# XXX: pytorch legacy.nn has problem with state clearing, so we need to
# do it manually
for l in torch_model.modules:
if isinstance(l.output, torch.Tensor):
l.output = l.output.new()
torch_output = self._forward_torch(torch_model)
if not isinstance(torch_output, list):
torch_output = torch_output.copy()
self._assert_outputs(torch_output, coreml_output, decimal)
def test_elu(self):
self._test_single_layer(nn.ELU())
def test_relu(self):
self._test_single_layer(nn.ReLU())
def test_softmax(self):
self._test_single_layer(nn.SoftMax())
self._test_single_layer(nn.SpatialSoftMax())
def test_convolution(self):
self._test_single_layer(
nn.SpatialConvolution(3, 64, 7, 7, 2, 2, 3, 3),
decimal=6
)
def test_max_pooling(self):
self._test_single_layer(nn.SpatialMaxPooling(3, 3, 1, 1, 1, 1))
def test_avg_pooling(self):
self._test_single_layer(
nn.SpatialAveragePooling(5, 5, 1, 1, 2, 2),
decimal=6
)
def test_linear(self):
self.input = self.input.flatten()
input_size = self.input.shape[0]
self._test_single_layer(nn.Linear(input_size, 3, True), decimal=5)
def test_tanh(self):
self._test_single_layer(nn.Tanh())
def test_mul_constant(self):
self._test_single_layer(nn.MulConstant(3.0))
def test_zero_padding(self):
self._test_single_layer(nn.SpatialZeroPadding(1, 2, 3, 4))
self._test_single_layer(nn.SpatialZeroPadding(-2, -2, -2, -2))
def test_full_convolution(self):
self._test_single_layer(
nn.SpatialFullConvolution(3, 1, 7, 7, 5, 5, 2, 2, 2, 2)
)
def test_batch_norm(self):
self._test_single_layer(nn.SpatialBatchNormalization(3))
def test_narrow(self):
self.torch_batch_mode = False
self._test_single_layer(nn.Narrow(1, 1, 1))
def test_reflection_padding(self):
self._test_single_layer(nn.SpatialReflectionPadding(1, 2, 3, 4))
def test_upsample_nearest(self):
self._test_single_layer(nn.SpatialUpSamplingNearest(2))
def test_cadd_table(self):
self.input = [self.input] * 5
self._test_single_layer(nn.CAddTable())
def test_split_table(self):
self.output_count = 3
self.torch_batch_mode = False
self._test_single_layer(nn.SplitTable(0))
def test_sigmoid(self):
self._test_single_layer(nn.Sigmoid())
def test_power(self):
self._test_single_layer(nn.Power(2))
```
#### File: torch2coreml/torch2coreml/_layers.py
```python
from _utils import _gen_layer_name, _torch_typename
def _convert_sequential(builder, name, layer, input_names, output_names):
layers = layer.modules
n = len(layers)
inputs = input_names
for i in range(n):
l_ = layers[i]
l_outputs = None
l_name = _gen_layer_name(l_)
if i != (n - 1):
if isinstance(l_.output, list):
l_outputs = [
"{}_{}".format(l_name, i)for i in range(len(l_.output))
]
else:
l_outputs = [l_name]
else:
l_outputs = output_names
l_outputs = _convert_layer(builder, l_name, l_, inputs, l_outputs)
inputs = l_outputs
return output_names
def _convert_convolution(builder, name, layer, input_names, output_names):
input_name = input_names[0]
output_name = output_names[0]
k_h, k_w = layer.kH, layer.kW
pad_h, pad_w = layer.padH, layer.padW
weight = layer.weight.numpy().transpose((2, 3, 1, 0))
bias = None
if layer.bias is not None:
bias = layer.bias.numpy()
builder.add_convolution(
name=name,
kernel_channels=layer.nInputPlane,
output_channels=layer.nOutputPlane,
height=k_h,
width=k_w,
stride_height=layer.dH,
stride_width=layer.dW,
border_mode='valid',
groups=1,
W=weight,
b=bias,
has_bias=bias is not None,
is_deconv=False,
output_shape=None,
input_name=input_name,
output_name=output_name,
dilation_factors=[1, 1],
padding_top=pad_h,
padding_bottom=pad_h,
padding_left=pad_w,
padding_right=pad_w
)
return output_names
def _convert_full_convolution(builder, name, layer, input_names, output_names):
input_name = input_names[0]
output_name = output_names[0]
k_h, k_w = layer.kH, layer.kW
pad_h, pad_w = layer.padH, layer.padW
weight = layer.weight.numpy().transpose((2, 3, 0, 1))
bias = None
if layer.bias is not None:
bias = layer.bias.numpy()
add_crop = False
output_ = layer.output.numpy()
output_shape = (
output_.shape[-2] + 2 * pad_h,
output_.shape[-1] + 2 * pad_w
)
if pad_h > 0 or pad_w > 0:
crop_padding_name = _gen_layer_name('padding')
output_name = name + '_output'
add_crop = True
builder.add_convolution(
name=name,
kernel_channels=layer.nInputPlane,
output_channels=layer.nOutputPlane,
height=k_h,
width=k_w,
stride_height=layer.dH,
stride_width=layer.dW,
border_mode='valid',
groups=1,
W=weight,
b=bias,
has_bias=bias is not None,
is_deconv=True,
output_shape=output_shape,
input_name=input_name,
output_name=output_name,
dilation_factors=[1, 1]
)
if add_crop:
builder.add_crop(
name=crop_padding_name,
left=pad_w,
right=pad_w,
top=pad_h,
bottom=pad_h,
offset=0,
input_names=[output_name],
output_name=output_names[0]
)
return output_names
def _convert_elu(builder, name, layer, input_names, output_names):
builder.add_activation(
name=name,
non_linearity='ELU',
input_name=input_names[0],
output_name=output_names[0],
params=layer.alpha
)
return output_names
def _convert_relu(builder, name, layer, input_names, output_names):
builder.add_activation(
name=name,
non_linearity='RELU',
input_name=input_names[0],
output_name=output_names[0]
)
return output_names
def _convert_concat_table(builder, name, layer, input_names, output_names):
layers = layer.modules
result_outputs = []
for l in layers:
l_name = _gen_layer_name(l)
l_outputs = _convert_layer(builder, l_name, l, input_names, [l_name])
result_outputs += l_outputs
return result_outputs
def _convert_parallel_table(builder, name, layer, input_names, output_names):
layers = layer.modules
assert len(input_names) == len(layers)
result_outputs = []
for i in range(len(layers)):
l_ = layers[i]
l_name = _gen_layer_name(l_)
l_outputs = _convert_layer(
builder, l_name, l_, [input_names[i]], [l_name]
)
result_outputs.append(l_outputs[0])
return result_outputs
def _convert_batch_norm(builder, name, layer, input_names, output_names):
epsilon = layer.eps
mean = layer.running_mean.numpy()
variance = layer.running_var.numpy()
weight = layer.weight.numpy()
bias = None
if layer.bias is not None:
bias = layer.bias.numpy()
builder.add_batchnorm(
name=name,
channels=weight.shape[0],
gamma=weight,
beta=bias,
mean=mean,
variance=variance,
input_name=input_names[0],
output_name=output_names[0],
epsilon=epsilon
)
return output_names
def _convert_cadd_table(builder, name, layer, input_names, output_names):
assert len(input_names) > 1
assert len(output_names) == 1
builder.add_elementwise(
name=name,
input_names=input_names,
output_name=output_names[0],
mode='ADD'
)
return output_names
def _convert_cdiv_table(builder, name, layer, input_names, output_names):
assert len(input_names) == 2
assert len(output_names) == 1
inverse_layer_name = _gen_layer_name('inverse')
inverse_layer_output_name = inverse_layer_name + '_output'
builder.add_unary(
name=inverse_layer_name,
input_name=input_names[1],
output_name=inverse_layer_output_name,
mode='inverse'
)
builder.add_elementwise(
name=name,
input_names=[input_names[0], inverse_layer_output_name],
output_name=output_names[0],
mode='MULTIPLY'
)
return output_names
def _convert_cmul_table(builder, name, layer, input_names, output_names):
assert len(input_names) > 1
assert len(output_names) == 1
builder.add_elementwise(
name=name,
input_names=input_names,
output_name=output_names[0],
mode='MULTIPLY'
)
return output_names
def _convert_identity(builder, name, layer, input_names, output_names):
return input_names
def _convert_soft_max(builder, name, layer, input_names, output_names):
builder.add_softmax(
name=name,
input_name=input_names[0],
output_name=output_names[0]
)
return output_names
def _convert_pooling(builder, name, layer, input_names, output_names):
typename = _torch_typename(layer)
exclude_pad_area = True
if typename == 'SpatialMaxPooling':
layer_type = 'MAX'
elif typename == 'SpatialAveragePooling':
layer_type = 'AVERAGE'
exclude_pad_area = not layer.count_include_pad
else:
raise TypeError("Unknown type '{}'".format(typename,))
k_h, k_w = layer.kH, layer.kW
pad_h, pad_w = layer.padH, layer.padW
d_h, d_w = layer.dH, layer.dW
builder.add_pooling(
name=name,
height=k_h,
width=k_w,
stride_height=d_h,
stride_width=d_w,
layer_type=layer_type,
padding_type='VALID',
input_name=input_names[0],
output_name=output_names[0],
exclude_pad_area=exclude_pad_area,
padding_top=pad_h,
padding_bottom=pad_h,
padding_left=pad_w,
padding_right=pad_w
)
return output_names
def _convert_linear(builder, name, layer, input_names, output_names):
weight = layer.weight.numpy()
bias = layer.bias
has_bias = bias is not None
if has_bias:
bias = bias.numpy()
output_channels, input_channels = weight.shape
builder.add_inner_product(
name=name,
W=weight,
b=bias,
input_channels=input_channels,
output_channels=output_channels,
has_bias=has_bias,
input_name=input_names[0],
output_name=output_names[0]
)
return output_names
def _convert_view(builder, name, layer, input_names, output_names):
shape = tuple(layer.size)
if len(shape) == 1 or (len(shape) == 2 and shape[0] == 1):
builder.add_flatten(
name=name,
mode=0,
input_name=input_names[0],
output_name=output_names[0]
)
else:
builder.add_reshape(
name=name,
input_name=input_names[0],
output_name=output_names[0],
target_shape=shape,
mode=0
)
return output_names
def _convert_tanh(builder, name, layer, input_names, output_names):
builder.add_activation(
name=name,
non_linearity='TANH',
input_name=input_names[0],
output_name=output_names[0],
params=None
)
return output_names
def _convert_mul_constant(builder, name, layer, input_names, output_names):
scalar = float(layer.constant_scalar)
builder.add_elementwise(
name=name,
input_names=[input_names[0]],
output_name=output_names[0],
mode='MULTIPLY',
alpha=scalar
)
return output_names
def _convert_zero_padding(builder, name, layer, input_names, output_names):
pad_l = int(layer.pad_l)
pad_r = int(layer.pad_r)
pad_t = int(layer.pad_t)
pad_b = int(layer.pad_b)
if pad_l < 0 and pad_r < 0 and pad_t < 0 and pad_b < 0:
# crop mode
builder.add_crop(
name=name,
left=-pad_l,
right=-pad_r,
top=-pad_t,
bottom=-pad_b,
offset=None,
input_names=input_names,
output_name=output_names[0]
)
else:
builder.add_padding(
name=name,
left=pad_l,
right=pad_r,
top=pad_t,
bottom=pad_b,
value=0.0,
input_name=input_names[0],
output_name=output_names[0]
)
return output_names
def _convert_narrow(builder, name, layer, input_names, output_names):
dimension = layer.dimension
if len(layer.output.numpy().shape) == 4:
# as torch layer works with 4d tensor we should decrement dimension
dimension -= 1
if dimension == 0:
axis = 'channel'
elif dimension == 1:
axis = 'height'
elif dimension == 2:
axis = 'width'
else:
raise ValueError('Only 3d tensors are supported')
index = layer.index
length = layer.length
builder.add_slice(
name=name,
axis=axis,
start_index=index,
end_index=index + length,
stride=1,
input_name=input_names[0],
output_name=output_names[0]
)
return output_names
def _convert_reflection_padding(builder,
name,
layer,
input_names,
output_names):
pad_l = int(layer.pad_l)
pad_r = int(layer.pad_r)
pad_t = int(layer.pad_t)
pad_b = int(layer.pad_b)
builder.add_padding(
name=name,
left=pad_l,
right=pad_r,
top=pad_t,
bottom=pad_b,
input_name=input_names[0],
output_name=output_names[0],
padding_type='reflection'
)
return output_names
def _convert_upsampling_nearest(builder,
name,
layer,
input_names,
output_names):
scale = int(layer.scale_factor)
builder.add_upsample(
name=name,
scaling_factor_h=scale,
scaling_factor_w=scale,
input_name=input_names[0],
output_name=output_names[0],
mode='NN'
)
return output_names
def _convert_split_table(builder, name, layer, input_names, output_names):
dimension = layer.dimension
if len(layer.output[0].numpy().shape) == 3:
# as torch layer works with 4d tensor we should decrement dimension
dimension -= 1
if dimension != 0:
raise ValueError(
'Only channel dimension for Split is supported now.'
)
builder.add_split(
name=name,
input_name=input_names[0],
output_names=output_names
)
return output_names
def _convert_log(builder, name, layer, input_names, output_names):
builder.add_unary(
name=name,
input_name=input_names[0],
output_name=output_names[0],
mode='log'
)
return output_names
def _convert_sigmoid(builder, name, layer, input_names, output_names):
builder.add_activation(
name=name,
non_linearity='SIGMOID',
input_name=input_names[0],
output_name=output_names[0]
)
return output_names
def _convert_power(builder, name, layer, input_names, output_names):
p = layer.pow
if p == -1:
builder.add_unary(
name=name,
input_name=input_names[0],
output_name=output_names[0],
mode='inverse'
)
else:
builder.add_unary(
name=name,
input_name=input_names[0],
output_name=output_names[0],
mode='power',
alpha=float(p)
)
return output_names
_TORCH_LAYER_REGISTRY = {
'Sequential': _convert_sequential,
'SpatialConvolution': _convert_convolution,
'ELU': _convert_elu,
'ConcatTable': _convert_concat_table,
'SpatialBatchNormalization': _convert_batch_norm,
'Identity': _convert_identity,
'CAddTable': _convert_cadd_table,
'SpatialFullConvolution': _convert_full_convolution,
'SpatialSoftMax': _convert_soft_max,
'SoftMax': _convert_soft_max,
'ReLU': _convert_relu,
'SpatialMaxPooling': _convert_pooling,
'SpatialAveragePooling': _convert_pooling,
'View': _convert_view,
'Linear': _convert_linear,
'Tanh': _convert_tanh,
'MulConstant': _convert_mul_constant,
'SpatialZeroPadding': _convert_zero_padding,
'Narrow': _convert_narrow,
'SpatialReflectionPadding': _convert_reflection_padding,
'SpatialUpSamplingNearest': _convert_upsampling_nearest,
'SplitTable': _convert_split_table,
'CDivTable': _convert_cdiv_table,
'Log': _convert_log,
'Sigmoid': _convert_sigmoid,
'ParallelTable': _convert_parallel_table,
'Power': _convert_power,
'CMulTable': _convert_cmul_table,
}
def _get_layer_converter_fn(layer):
"""
Get the right converter function for Torch layer name
"""
name = _torch_typename(layer)
if name in _TORCH_LAYER_REGISTRY:
return _TORCH_LAYER_REGISTRY[name]
else:
unknown_fn = _get_layer_converter_fn.unknown_converter_fn
if unknown_fn is not None:
return unknown_fn
raise TypeError(
"Torch layer of type {} is not supported.".format(name,)
)
def _convert_layer(builder, name, layer, input_names, output_names):
converter_fn = _get_layer_converter_fn(layer)
return converter_fn(builder, name, layer, input_names, output_names)
```
#### File: torch2coreml/torch2coreml/_torch_converter.py
```python
import numpy as np
import torch
from torch.utils.serialization import load_lua
from coremltools.models.neural_network import NeuralNetworkBuilder
from coremltools.models import MLModel, datatypes
import _layers
from _layers import _get_layer_converter_fn
from _utils import _gen_layer_name
from _utils import _convert_multiarray_output_to_image
_DEPROCESS_LAYER_NAME = 'deprocess_image'
def _forward_torch_random_input(torch_model, input_shapes, is_batch=False):
input_tensors = []
for shape in input_shapes:
if is_batch:
tensor = torch.rand(1, *shape).float()
else:
tensor = torch.rand(*shape).float()
input_tensors.append(tensor)
if len(input_tensors) == 1:
result = torch_model.forward(input_tensors[0])
else:
result = torch_model.forward(input_tensors)
if isinstance(result, list):
# multi output
output_shapes = []
for tensor in result:
shape = tensor.numpy().shape
if is_batch:
shape = shape[1:]
output_shapes.append(shape)
return output_shapes
else:
# single output
output_shape = result.numpy().shape
if is_batch:
return [output_shape[1:]]
else:
return [output_shape]
def _infer_torch_output_shapes(torch_model, input_shapes):
"""
Forward torch model to infer output shape
"""
try:
return _forward_torch_random_input(
torch_model,
input_shapes,
is_batch=False
)
except:
# try batch mode
return _forward_torch_random_input(
torch_model,
input_shapes,
is_batch=True
)
def _set_deprocessing(is_grayscale,
builder,
deprocessing_args,
input_name,
output_name):
is_bgr = deprocessing_args.get('is_bgr', False)
_convert_multiarray_output_to_image(
builder.spec, output_name, is_bgr=is_bgr
)
image_scale = deprocessing_args.get('image_scale', 1.0)
if is_grayscale:
gray_bias = deprocessing_args.get('gray_bias', 0.0)
W = np.array([image_scale])
b = np.array([gray_bias])
else:
W = np.array([image_scale, image_scale, image_scale])
red_bias = deprocessing_args.get('red_bias', 0.0)
green_bias = deprocessing_args.get('green_bias', 0.0)
blue_bias = deprocessing_args.get('blue_bias', 0.0)
if not is_bgr:
b = np.array([
red_bias,
green_bias,
blue_bias,
])
else:
b = np.array([
blue_bias,
green_bias,
red_bias,
])
builder.add_scale(
name=input_name,
W=W,
b=b,
has_bias=True,
shape_scale=W.shape,
shape_bias=b.shape,
input_name=input_name,
output_name=output_name
)
def convert(model,
input_shapes,
input_names=['input'],
output_names=['output'],
mode=None,
image_input_names=[],
preprocessing_args={},
image_output_names=[],
deprocessing_args={},
class_labels=None,
predicted_feature_name='classLabel',
unknown_layer_converter_fn=None):
"""
Convert Torch7 model to CoreML.
Parameters
----------
model: Torch7 model (loaded with PyTorch) | str
A trained Torch7 model loaded in python using PyTorch or path to file
with model (*.t7).
input_shapes: list of tuples
Shapes of the input tensors.
mode: str ('classifier', 'regressor' or None)
Mode of the converted coreml model:
'classifier', a NeuralNetworkClassifier spec will be constructed.
'regressor', a NeuralNetworkRegressor spec will be constructed.
preprocessing_args: dict
'is_bgr', 'red_bias', 'green_bias', 'blue_bias', 'gray_bias',
'image_scale' keys with the same meaning as
https://apple.github.io/coremltools/generated/coremltools.models.neural_network.html#coremltools.models.neural_network.NeuralNetworkBuilder.set_pre_processing_parameters
deprocessing_args: dict
Same as 'preprocessing_args' but for deprocessing.
class_labels: A string or list of strings.
As a string it represents the name of the file which contains
the classification labels (one per line).
As a list of strings it represents a list of categories that map
the index of the output of a neural network to labels in a classifier.
predicted_feature_name: str
Name of the output feature for the class labels exposed in the Core ML
model (applies to classifiers only). Defaults to 'classLabel'
unknown_layer_converter_fn: function with signature:
(builder, name, layer, input_names, output_names)
builder: object - instance of NeuralNetworkBuilder class
name: str - generated layer name
layer: object - pytorch object for corresponding layer
input_names: list of strings
output_names: list of strings
Returns: list of strings for layer output names
Callback function to handle unknown for torch2coreml layers
Returns
-------
model: A coreml model.
"""
_gen_layer_name.called = 0
_get_layer_converter_fn.unknown_converter_fn = unknown_layer_converter_fn
if isinstance(model, basestring):
torch_model = load_lua(model)
elif isinstance(model, torch.legacy.nn.Sequential):
torch_model = model
else:
raise TypeError(
"Model must be file path to .t7 file or pytorch loaded model \
with torch.legacy.nn.Sequential module as root"
)
torch_model.evaluate()
if not isinstance(input_shapes, list):
raise TypeError("Input shapes should be a list of tuples.")
for shape in input_shapes:
if not isinstance(shape, tuple):
raise TypeError("Input shape should be a tuple.")
if len(input_names) != len(input_shapes):
raise ValueError(
"Input names count must be equal to input shapes count"
)
output_shapes = _infer_torch_output_shapes(
torch_model,
input_shapes
)
if len(output_shapes) != len(output_names):
raise ValueError(
"Model has {} outputs, but you set output_names for {}."
.format(len(output_shapes), len(output_names))
)
# create input/output features
input_features = []
for i in range(len(input_names)):
input_features.append(
(input_names[i], datatypes.Array(*input_shapes[i]))
)
output_features = []
for i in range(len(output_names)):
output_features.append(
(output_names[i], datatypes.Array(*output_shapes[i]))
)
builder = NeuralNetworkBuilder(input_features, output_features, mode)
# build model
layer_name = _gen_layer_name(torch_model)
_output_names = output_names[:]
if len(image_output_names) > 0:
for i in range(len(_output_names)):
if _output_names[i] in image_output_names:
_output_names[i] = _gen_layer_name(_DEPROCESS_LAYER_NAME)
model_output_names = _layers._convert_layer(
builder, layer_name, torch_model, input_names, _output_names
)
# set preprocessing parameters
if len(image_input_names) > 0:
builder.set_pre_processing_parameters(
image_input_names=image_input_names,
is_bgr=preprocessing_args.get('is_bgr', False),
red_bias=preprocessing_args.get('red_bias', 0.0),
green_bias=preprocessing_args.get('green_bias', 0.0),
blue_bias=preprocessing_args.get('blue_bias', 0.0),
gray_bias=preprocessing_args.get('gray_bias', 0.0),
image_scale=preprocessing_args.get('image_scale', 1.0)
)
# set deprocessing parameters
if len(image_output_names) > 0:
for i in range(len(output_names)):
output_name = output_names[i]
if output_name in image_output_names:
output_shape = output_shapes[i]
if len(output_shape) == 2 or output_shape[0] == 1:
is_grayscale = True
elif output_shape[0] == 3:
is_grayscale = False
else:
raise ValueError('Output must be RGB image or Grayscale')
_set_deprocessing(
is_grayscale,
builder,
deprocessing_args,
model_output_names[i],
output_name
)
if class_labels is not None:
if type(class_labels) is str:
labels = [l.strip() for l in open(class_labels).readlines()]
elif type(class_labels) is list:
labels = class_labels
else:
raise TypeError(
"synset variable of unknown type. Type found: {}. \
Expected either string or list of strings."
.format(type(class_labels),))
builder.set_class_labels(
class_labels=labels,
predicted_feature_name=predicted_feature_name
)
return MLModel(builder.spec)
``` |
{
"source": "JoOkuma/dexp",
"score": 2
} |
#### File: cli/dexp_commands/add.py
```python
import click
from arbol.arbol import aprint, asection
from dexp.cli.defaults import DEFAULT_STORE
from dexp.cli.parsing import _get_output_path, _parse_channels
from dexp.datasets.open_dataset import glob_datasets
@click.command()
@click.argument("input_paths", nargs=-1) # , help='input path'
@click.option("--output_path", "-o") # , help='output path'
@click.option("--channels", "-c", default=None, help="List of channels, all channels when ommited.")
@click.option(
"--rename",
"-rc",
default=None,
help="You can rename channels: e.g. if channels are ‘channel1,anotherc’ then ‘gfp,rfp’ would rename the ‘channel1’ channel to ‘gfp’, and ‘anotherc’ to ‘rfp’ ",
)
@click.option("--store", "-st", default=DEFAULT_STORE, help="Zarr store: ‘dir’, ‘ndir’, or ‘zip’", show_default=True)
@click.option("--overwrite", "-w", is_flag=True, help="Forces overwrite of target", show_default=True)
@click.option(
"--projection", "-p/-np", is_flag=True, default=True, help="If flags should be copied.", show_default=True
)
def add(input_paths, output_path, channels, rename, store, overwrite, projection):
"""Adds the channels selected from INPUT_PATHS to the given output dataset (created if not existing)."""
input_dataset, input_paths = glob_datasets(input_paths)
output_path = _get_output_path(input_paths[0], output_path, "_add")
channels = _parse_channels(input_dataset, channels)
if rename is None:
rename = input_dataset.channels()
else:
rename = rename.split(",")
with asection(f"Adding channels: {channels} from: {input_paths} to {output_path}, with new names: {rename}"):
input_dataset.add_channels_to(
output_path,
channels=channels,
rename=rename,
store=store,
overwrite=overwrite,
add_projections=projection,
)
input_dataset.close()
aprint("Done!")
```
#### File: cli/dexp_commands/crop.py
```python
import click
from arbol.arbol import aprint, asection
from dexp.cli.defaults import DEFAULT_CLEVEL, DEFAULT_CODEC, DEFAULT_STORE
from dexp.cli.parsing import _get_output_path, _parse_channels, _parse_chunks
from dexp.datasets.open_dataset import glob_datasets
from dexp.datasets.operations.crop import dataset_crop
@click.command()
@click.argument("input_paths", nargs=-1) # , help='input path'
@click.option("--output_path", "-o") # , help='output path'
@click.option("--channels", "-c", default=None, help="List of channels, all channels when ommited.")
@click.option(
"--quantile",
"-q",
default=0.99,
type=float,
help="Quantile parameter for lower bound of brightness for thresholding.",
show_default=True,
)
@click.option(
"--reference-channel",
"-rc",
default=None,
help="Reference channel to estimate cropping. If no provided it picks the first one.",
)
@click.option("--store", "-st", default=DEFAULT_STORE, help="Zarr store: ‘dir’, ‘ndir’, or ‘zip’", show_default=True)
@click.option("--chunks", "-chk", default=None, help="Dataset chunks dimensions, e.g. (1, 126, 512, 512).")
@click.option(
"--codec",
"-z",
default=DEFAULT_CODEC,
help="Compression codec: zstd for ’, ‘blosclz’, ‘lz4’, ‘lz4hc’, ‘zlib’ or ‘snappy’ ",
show_default=True,
)
@click.option("--clevel", "-l", type=int, default=DEFAULT_CLEVEL, help="Compression level", show_default=True)
@click.option("--overwrite", "-w", is_flag=True, help="Forces overwrite of target", show_default=True)
@click.option(
"--workers",
"-wk",
default=-4,
help="Number of worker threads to spawn. Negative numbers n correspond to: number_of _cores / |n| ",
show_default=True,
) #
@click.option("--check", "-ck", default=True, help="Checking integrity of written file.", show_default=True) #
def crop(
input_paths,
output_path,
channels,
quantile,
reference_channel,
store,
chunks,
codec,
clevel,
overwrite,
workers,
check,
):
input_dataset, input_paths = glob_datasets(input_paths)
output_path = _get_output_path(input_paths[0], output_path, "_crop")
channels = _parse_channels(input_dataset, channels)
if reference_channel is None:
reference_channel = input_dataset.channels()[0]
chunks = _parse_chunks(chunks)
with asection(
f"Cropping from: {input_paths} to {output_path} for channels: {channels}, "
f"using channel {reference_channel} as a reference."
):
dataset_crop(
input_dataset,
output_path,
channels=channels,
reference_channel=reference_channel,
quantile=quantile,
store=store,
chunks=chunks,
compression=codec,
compression_level=clevel,
overwrite=overwrite,
workers=workers,
check=check,
)
input_dataset.close()
aprint("Done!")
```
#### File: cli/dexp_commands/stabilize.py
```python
import click
from arbol.arbol import aprint, asection
from dexp.cli.defaults import (
DEFAULT_CLEVEL,
DEFAULT_CODEC,
DEFAULT_STORE,
DEFAULT_WORKERS_BACKEND,
)
from dexp.cli.parsing import _get_output_path, _parse_channels, _parse_slicing
from dexp.datasets.open_dataset import glob_datasets
from dexp.datasets.operations.stabilize import dataset_stabilize
@click.command()
@click.argument("input_paths", nargs=-1) # , help='input path'
@click.option("--output_path", "-o") # , help='output path'
@click.option("--channels", "-c", default=None, help="List of channels, all channels when omitted.")
@click.option(
"--reference-channel", "-rc", default=None, help="Reference channel for single stabilization model computation."
)
@click.option(
"--slicing",
"-s",
default=None,
help="Dataset slice (TZYX), e.g. [0:5] (first five stacks) [:,0:100] (cropping in z) ",
)
@click.option("--store", "-st", default=DEFAULT_STORE, help="Zarr store: ‘dir’, ‘ndir’, or ‘zip’", show_default=True)
@click.option(
"--codec",
"-z",
default=DEFAULT_CODEC,
help="Compression codec: ‘zstd’, ‘blosclz’, ‘lz4’, ‘lz4hc’, ‘zlib’ or ‘snappy’ ",
show_default=True,
)
@click.option("--clevel", "-l", type=int, default=DEFAULT_CLEVEL, help="Compression level", show_default=True)
@click.option("--overwrite", "-w", is_flag=True, help="Forces overwrite of target", show_default=True)
@click.option(
"--maxrange",
"-mr",
type=int,
default=7,
help="Maximal distance, in time points, between pairs of images to registrate.",
show_default=True,
)
@click.option(
"--minconfidence",
"-mc",
type=float,
default=0.5,
help="Minimal confidence for registration parameters, if below that level the registration parameters for previous time points is used.",
show_default=True,
) #
@click.option(
"--com/--no-com",
type=bool,
default=False,
help="Enable center of mass fallback when standard registration fails.",
show_default=True,
)
@click.option(
"--quantile",
"-q",
type=float,
default=0.5,
help="Quantile to cut-off background in center-of-mass calculation.",
show_default=True,
)
@click.option("--tolerance", "-t", type=float, default=1e-7, help="Tolerance for linear solver.", show_default=True)
@click.option(
"--ordererror", "-oe", type=float, default=2.0, help="Order for linear solver error term.", show_default=True
)
@click.option(
"--orderreg", "-or", type=float, default=1.0, help="Order for linear solver regularisation term.", show_default=True
)
@click.option(
"--alphareg",
"-or",
type=float,
default=1e-4,
help="Multiplicative coefficient for regularisation term.",
show_default=True,
)
@click.option(
"--pcsigma",
"-rs",
type=float,
default=2,
help="Sigma for Gaussian smoothing of phase correlogram, zero to disable.",
show_default=True,
)
@click.option(
"--dsigma",
"-ds",
type=float,
default=1.5,
help="Sigma for Gaussian smoothing (crude denoising) of input images, zero to disable.",
show_default=True,
)
@click.option(
"--logcomp",
"-lc",
type=bool,
default=True,
help="Applies the function log1p to the images to compress high-intensities (usefull when very (too) bright structures are present in the images, such as beads.",
show_default=True,
)
@click.option(
"--edgefilter",
"-ef",
type=bool,
default=False,
help="Applies sobel edge filter to input images.",
show_default=True,
)
@click.option(
"--detrend",
"-dt",
type=bool,
is_flag=True,
default=False,
help="Remove linear trend from stabilization result",
show_default=True,
)
@click.option(
"--maxproj/--no-maxproj",
"-mp/-nmp",
type=bool,
default=True,
help="Registers using only the maximum intensity projection from each stack.",
show_default=True,
)
@click.option(
"--model-input-path",
"-mi",
type=str,
default=None,
help="Path to pre-computed model for image registration",
)
@click.option(
"--model-output-path",
"-mo",
type=str,
default="stabilization_model.json",
show_default=True,
help="Output path for computed registration model",
)
@click.option(
"--workers",
"-k",
type=int,
default=-4,
help="Number of worker threads to spawn. Negative numbers n correspond to: number_of _cores / |n|. Be careful, starting two many workers is know to cause trouble (unfortunately unclear why!).",
show_default=True,
)
@click.option(
"--workersbackend",
"-wkb",
type=str,
default=DEFAULT_WORKERS_BACKEND,
help="What backend to spawn workers with, can be ‘loky’ (multi-process) or ‘threading’ (multi-thread) ",
show_default=True,
) #
@click.option("--device", "-d", type=int, default=0, help="Sets the CUDA devices id, e.g. 0,1,2", show_default=True) #
@click.option("--check", "-ck", default=True, help="Checking integrity of written file.", show_default=True) #
def stabilize(
input_paths,
output_path,
channels,
reference_channel,
slicing,
store,
codec,
clevel,
overwrite,
maxrange,
minconfidence,
com,
quantile,
tolerance,
ordererror,
orderreg,
alphareg,
pcsigma,
dsigma,
logcomp,
edgefilter,
detrend,
maxproj,
model_input_path,
model_output_path,
workers,
workersbackend,
device,
check,
):
"""Stabilises dataset against translations across time."""
input_dataset, input_paths = glob_datasets(input_paths)
output_path = _get_output_path(input_paths[0], output_path, "_stabilized")
slicing = _parse_slicing(slicing)
channels = _parse_channels(input_dataset, channels)
with asection(
f"Stabilizing dataset(s): {input_paths}, saving it at: {output_path}, for channels: {channels}, slicing: {slicing} "
):
dataset_stabilize(
input_dataset,
output_path,
channels=channels,
model_output_path=model_output_path,
model_input_path=model_input_path,
reference_channel=reference_channel,
slicing=slicing,
zarr_store=store,
compression_codec=codec,
compression_level=clevel,
overwrite=overwrite,
max_range=maxrange,
min_confidence=minconfidence,
enable_com=com,
quantile=quantile,
tolerance=tolerance,
order_error=ordererror,
order_reg=orderreg,
alpha_reg=alphareg,
phase_correlogram_sigma=pcsigma,
denoise_input_sigma=dsigma,
log_compression=logcomp,
edge_filter=edgefilter,
detrend=detrend,
maxproj=maxproj,
workers=workers,
workers_backend=workersbackend,
device=device,
check=check,
debug_output="stabilization",
)
input_dataset.close()
aprint("Done!")
```
#### File: dexp/datasets/clearcontrol_dataset.py
```python
import os
import re
from fnmatch import fnmatch
from os import listdir
from os.path import exists, join
from typing import Any, List, Sequence, Tuple
import numpy as np
from arbol.arbol import aprint
from cachey import Cache
from dask import array, delayed
from dexp.datasets.base_dataset import BaseDataset
from dexp.io.compress_array import decompress_array
from dexp.utils.config import config_blosc
class CCDataset(BaseDataset):
def __init__(self, path, cache_size=8e9):
super().__init__(dask_backed=False)
config_blosc()
self.folder = path
self._channels = []
self._index_files = {}
all_files = list(listdir(path))
# print(all_files)
for file in all_files:
if fnmatch(file, "*.index.txt"):
if not file.startswith("._"):
channel = file.replace(".index.txt", "")
self._channels.append(channel)
self._index_files[channel] = join(path, file)
# print(self._channels)
# print(self._index_files)
self._nb_time_points = {}
self._times_sec = {}
self._shapes = {}
self._channel_shape = {}
self._time_points = {}
self._is_compressed = self._find_out_if_compressed(path)
for channel in self._channels:
self._parse_channel(channel)
self.cache = Cache(cache_size) # Leverage two gigabytes of memory
def _parse_channel(self, channel):
index_file = self._index_files[channel]
with open(index_file) as f:
lines = f.readlines()
lines = [line.strip() for line in lines]
lines = [re.split(r"\t+", line) for line in lines]
self._time_points[channel] = []
self._times_sec[channel] = []
self._shapes[channel] = []
for line in lines:
time_point = int(line[0])
time_sec = float(line[1])
shape = eval("(" + line[2] + ")")[::-1]
self._times_sec[channel].append(time_sec)
self._shapes[channel].append(shape)
if channel in self._channel_shape:
existing_shape = self._channel_shape[channel]
if shape != existing_shape:
aprint(
f"Warning: Channel {channel} has varying stack shape! Shape changes from "
+ f"{existing_shape} to {shape} at time point {time_point}"
)
self._channel_shape[channel] = shape
self._time_points[channel].append(time_point)
self._nb_time_points[channel] = len(self._time_points[channel])
def _get_stack_file_name(self, channel, time_point):
compressed_file_name = join(self.folder, "stacks", channel, str(time_point).zfill(6) + ".blc")
raw_file_name = join(self.folder, "stacks", channel, str(time_point).zfill(6) + ".raw")
if self._is_compressed is None:
if exists(compressed_file_name):
self._is_compressed = True
else:
self._is_compressed = False
if self._is_compressed:
return compressed_file_name
else:
return raw_file_name
def _get_array_for_stack_file(self, file_name, shape=None, dtype=None):
try:
if file_name.endswith(".raw"):
aprint(f"Accessing file: {file_name}")
dt = np.dtype(np.uint16)
dt = dt.newbyteorder("L")
array = np.fromfile(file_name, dtype=dt)
elif file_name.endswith(".blc"):
array = np.empty(shape=shape, dtype=dtype)
with open(file_name, "rb") as binary_file:
# Read the whole file at once
data = binary_file.read()
decompress_array(data, array)
# Reshape array:
if shape is not None:
array = array.reshape(shape)
return array
except FileNotFoundError:
aprint(f"Could not find file: {file_name} for array of shape: {shape}")
return np.zeros(shape, dtype=np.uint16)
def _get_slice_array_for_stack_file_and_z(self, file_name, shape, z):
try:
if file_name.endswith(".raw"):
aprint(f"Accessing file: {file_name} at z={z}")
length = shape[1] * shape[2] * np.dtype(np.uint16).itemsize
offset = z * length
dt = np.dtype(np.uint16)
dt = dt.newbyteorder("L")
array = np.fromfile(file_name, offset=offset, count=length, dtype=dt)
elif file_name.endswith(".blc"):
raise NotImplementedError("This type of access is not yet supported")
array = array.reshape(shape[1:])
return array
except FileNotFoundError:
aprint(f"Could not find file: {file_name} for array of shape: {shape} at z={z}")
return np.zeros(shape[1:], dtype=np.uint16)
def close(self):
# Nothing to do...
pass
def channels(self) -> List[str]:
return list(self._channels)
def shape(self, channel: str, time_point: int = 0) -> Sequence[int]:
try:
return (self._nb_time_points[channel],) + self._shapes[channel][time_point]
except (IndexError, KeyError):
return ()
def dtype(self, channel: str):
return np.uint16
def info(self, channel: str = None) -> str:
if channel:
info_str = (
f"Channel: '{channel}', nb time points: {self.shape(channel)[0]}, shape: {self.shape(channel)[1:]} "
)
return info_str
else:
return self.tree()
def get_metadata(self):
# TODO: implement this!
return {}
def append_metadata(self, metadata: dict):
raise NotImplementedError("Method append_metadata is not available for a joined dataset!")
def get_array(self, channel: str, per_z_slice: bool = True, wrap_with_dask: bool = False):
# Lazy and memorized version of get_stack:
lazy_get_stack = delayed(self.get_stack, pure=True)
# Lazily load each stack for each time point:
lazy_stacks = [lazy_get_stack(channel, time_point, per_z_slice) for time_point in self._time_points[channel]]
# Construct a small Dask array for every lazy value:
arrays = [
array.from_delayed(lazy_stack, dtype=np.uint16, shape=self._channel_shape[channel])
for lazy_stack in lazy_stacks
]
stacked_array = array.stack(arrays, axis=0) # Stack all small Dask arrays into one
return stacked_array
def get_stack(self, channel, time_point, per_z_slice=True, wrap_with_dask: bool = False):
file_name = self._get_stack_file_name(channel, time_point)
shape = self._shapes[channel][time_point]
if per_z_slice and not self._is_compressed:
lazy_get_slice_array_for_stack_file_and_z = delayed(
self.cache.memoize(self._get_slice_array_for_stack_file_and_z), pure=True
)
# Lazily load each stack for each time point:
lazy_stacks = [lazy_get_slice_array_for_stack_file_and_z(file_name, shape, z) for z in range(0, shape[0])]
arrays = [array.from_delayed(lazy_stack, dtype=np.uint16, shape=shape[1:]) for lazy_stack in lazy_stacks]
stack = array.stack(arrays, axis=0)
else:
stack = self._get_array_for_stack_file(file_name, shape=shape, dtype=np.uint16)
return stack
def add_channel(self, name: str, shape: Tuple[int, ...], dtype, enable_projections: bool = True, **kwargs) -> Any:
raise NotImplementedError("Not implemented!")
def get_projection_array(self, channel: str, axis: int, wrap_with_dask: bool = True) -> Any:
return None
def write_array(self, channel: str, array: np.ndarray):
raise NotImplementedError("Not implemented!")
def write_stack(self, channel: str, time_point: int, stack: np.ndarray):
raise NotImplementedError("Not implemented!")
def check_integrity(self, channels: Sequence[str]) -> bool:
# TODO: actually implement!
return True
def _find_out_if_compressed(self, path):
for root, dirs, files in os.walk(path):
for file in files:
if file.endswith(".blc"):
return True
return False
def time_sec(self, channel: str) -> np.ndarray:
return np.asarray(self._times_sec[channel])
```
#### File: operations/_test/test_segment.py
```python
import random
from pathlib import Path
import pytest
from arbol import asection
from dexp.cli.parsing import parse_devices
from dexp.datasets import ZDataset
from dexp.datasets.operations.segment import dataset_segment
from dexp.utils.backends.backend import Backend
from dexp.utils.backends.cupy_backend import is_cupy_available
@pytest.mark.parametrize(
"dexp_nuclei_background_data",
[dict(add_noise=True, length_z_factor=4)],
indirect=True,
)
def test_dataset_segment(dexp_nuclei_background_data, tmp_path: Path, display_test: bool):
if not is_cupy_available():
pytest.skip(f"Cupy not found. Skipping {test_dataset_segment.__name__} gpu test.")
# Load
_, _, image = dexp_nuclei_background_data
n_time_pts = 3
z_scale = 4
channels = [f"channels_{i}" for i in range(n_time_pts)]
out_channel = "Segments"
input_path = tmp_path / "in_ds.zarr"
output_path = tmp_path / "out_ds.zarr"
df_path = tmp_path / output_path.name.replace(".zarr", ".csv")
xp = Backend.get_xp_module(image)
images = [xp.zeros_like(image) for _ in range(n_time_pts)]
# adding some jitter to the different channels to emulate chromatic aberration
for im in images:
jitter = [random.randint(0, 5) for _ in range(image.ndim)]
src_slicing = tuple(slice(0, d - j) for j, d in zip(jitter, image.shape))
dst_slicing = tuple(slice(j, None) for j in jitter)
im[src_slicing] = image[dst_slicing]
with asection("Creating temporary zdatasets ..."):
in_ds = ZDataset(input_path, mode="w")
for im, ch in zip(images, channels):
in_ds.add_channel(name=ch, shape=(n_time_pts,) + image.shape, dtype=image.dtype)
for t in range(n_time_pts):
in_ds.write_stack(ch, t, im)
out_ds = ZDataset(output_path, mode="w")
with asection("Executing command `dexp segment ...`"):
dataset_segment(
in_ds,
out_ds,
detection_channels=channels,
features_channels=channels,
out_channel=out_channel,
devices=parse_devices("all"),
z_scale=z_scale,
area_threshold=1e2,
minimum_area=50,
h_minima=1,
compactness=0,
gamma=1,
use_edt=True,
)
assert df_path.exists()
if display_test:
import napari
viewer = napari.Viewer()
colors = ("red", "green", "cyan")
for color, ch in zip(colors, channels):
viewer.add_image(in_ds.get_array(ch), name=ch, scale=(z_scale, 1, 1), blending="additive", colormap=color)
viewer.add_labels(out_ds.get_array(out_channel), name="output", scale=(z_scale, 1, 1))
napari.run()
in_ds.close()
out_ds.close()
if __name__ == "__main__":
from dexp.utils.testing import test_as_demo
# the same as executing from the CLI
# pytest <file name> -s --display True
test_as_demo(__file__)
```
#### File: processing/denoising/j_invariance.py
```python
import itertools
import math
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Union
import numpy as np
from arbol import aprint, asection
from scipy.optimize import minimize, shgo
from dexp.processing.denoising.metrics import mean_squared_error
from dexp.utils import dict_or, xpArray
from dexp.utils.backends import Backend
def calibrate_denoiser(
image: xpArray,
denoise_function: Callable[[Any], xpArray],
denoise_parameters: Dict[str, List[Union[float, int]]],
setup_function: Optional[Callable[[xpArray], Any]] = None,
mode: str = "shgo+lbfgs",
max_evaluations: int = 4096,
stride: int = 4,
loss_function: Callable = mean_squared_error,
display: bool = False,
**other_fixed_parameters,
):
"""
Calibrates denoiser using self-supervised loss from Batson & Royer*
Derived from code here:
https://scikit-image.org/docs/dev/auto_examples/filters/plot_j_invariant_tutorial.html
Reference: "Noise2Self: Blind Denoising by Self-Supervision, International
Conference on Machine Learning, p. 524-533 (2019)"
This 'classic_denoisers' version uses a 'brute-force' optimizer. Good when the
denoiser is fast enough and the parameter space to explore small enough.
Parameters
----------
image: ArrayLike
Image to calibate denoiser with.
denoise_function: Callable
Denosing function to calibrate. Should take an image as first parameter,
all other parameters should have defaults
denoise_parameters:
Dictionary with keys corresponding to parameters of the denoising function.
Values are either: (i) a list of possible values (categorical parameter),
or (ii) a tuple of floats defining the bounds of that numerical parameter.
setup_function : Callable, optional
Function to pre process / setup denoising input.
mode : str
Optimisation mode. Can be: 'bruteforce', 'lbfgs' or 'shgo'.
max_evaluations: int
Maximum number of function evaluations during optimisation.
stride: int
Stride to compute self-supervised loss.
loss_function: Callable
Loss/Error function: takes two arrays and returns a distance-like function.
Can be: structural_error, mean_squared_error, _mean_absolute_error
display_images: bool
If True the denoised images for each parameter tested are displayed.
this _will_ be slow.
other_fixed_parameters: dict
Other fixed parameters to pass to the denoiser function.
Returns
-------
Dictionary with optimal parameters
"""
# Move image to backend:
image = Backend.to_backend(image)
aprint(f"Calibrating denoiser on image of shape: {image.shape}")
aprint(f"Stride for Noise2Self loss: {stride}")
aprint(f"Fixed parameters: {other_fixed_parameters}")
# Pass fixed parameters:
denoise_function = partial(denoise_function, **other_fixed_parameters)
with asection(f"Calibrating denoiser with method: {mode}"):
best_parameters = _calibrate_denoiser_search(
image,
denoise_function,
denoise_parameters=denoise_parameters,
setup_function=setup_function,
mode=mode,
max_evaluations=max_evaluations,
stride=stride,
loss_function=loss_function,
display_images=display,
)
aprint(f"Best parameters are: {best_parameters}")
return dict_or(best_parameters, other_fixed_parameters)
def _interpolate_image(image: xpArray):
# Backend:
sp = Backend.get_sp_module(image)
conv_filter = sp.ndimage.generate_binary_structure(image.ndim, 1).astype(image.dtype)
conv_filter.ravel()[conv_filter.size // 2] = 0
conv_filter /= conv_filter.sum()
interpolation = sp.ndimage.convolve(image, conv_filter, mode="mirror")
return interpolation
def _generate_mask(image: xpArray, stride: int = 4):
# Generate slice for mask:
spatialdims = image.ndim
n_masks = stride ** spatialdims
phases = np.unravel_index(n_masks // 2, (stride,) * len(image.shape[:spatialdims]))
mask = tuple(slice(p, None, stride) for p in phases)
return mask
def _product_from_dict(dictionary: Dict[str, List[Union[float, int]]]):
"""Utility function to convert parameter ranges to parameter combinations.
Converts a dict of lists into a list of dicts whose values consist of the
cartesian product of the values in the original dict.
Parameters
----------
dictionary : dict of lists
Dictionary of lists to be multiplied.
Yields
------
selections : dicts of values
Dicts containing individual combinations of the values in the input
dict.
"""
keys = dictionary.keys()
for element in itertools.product(*dictionary.values()):
yield dict(zip(keys, element))
def _calibrate_denoiser_search(
image: xpArray,
denoise_function: Callable[[Any], xpArray],
denoise_parameters: Dict[str, List[Union[float, int]]],
setup_function: Optional[Callable[[xpArray], Any]],
mode: str,
max_evaluations: int,
stride=4,
loss_function: Callable = mean_squared_error, # _structural_loss, #
display_images: bool = False,
):
"""Return a parameter search history with losses for a denoise function.
Parameters
----------
image : ndarray
Input data to be denoised (converted using `img_as_float`).
denoise_function : Callable
Denoising function to be calibrated.
denoise_parameters : dict of list
Ranges of parameters for `denoise_function` to be calibrated over.
setup_function : Callable, optional
Function to pre process / setup denoising input.
mode : str
Optimisation mode. Can be: "bruteforce", "lbfgs" or "shgo".
max_evaluations: int
Maximum number of function evaluations during optimisation.
stride : int, optional
Stride used in masking procedure that converts `denoise_function`
to J-invariance.
loss_function : Callable
Loss function to use
display : bool
When True the resulting images are displayed with napari
Returns
-------
parameters_tested : list of dict
List of parameters tested for `denoise_function`, as a dictionary of
kwargs.
losses : list of int
Self-supervised loss for each set of parameters in `parameters_tested`.
"""
# Move image to backend:
image = Backend.to_backend(image)
# Generate mask:
mask = _generate_mask(image, stride)
masked_image = image.copy()
masked_image[mask] = _interpolate_image(image)[mask]
# denoised images are kept here:
denoised_images = []
# Parameter names:
parameter_names = list(denoise_parameters.keys())
# Best parameters (to be found):
best_parameters = None
# Setting up denoising
if setup_function is not None:
denoising_input = setup_function(masked_image)
else:
denoising_input = masked_image
# Function to optimise:
def _loss_func(**_denoiser_kwargs):
# We compute the J-inv loss:
denoised = denoise_function(denoising_input, **_denoiser_kwargs)
loss = loss_function(denoised[mask], image[mask])
if math.isnan(loss) or math.isinf(loss):
loss = math.inf
aprint(f"J-inv loss is: {loss}")
loss = Backend.to_numpy(loss)
if display_images and not (math.isnan(loss) or math.isinf(loss)):
denoised = denoise_function(image, **_denoiser_kwargs)
denoised_images.append(denoised)
return -float(loss)
best_parameters = None
if "bruteforce" in mode:
with asection(f"Searching by brute-force for the best denoising parameters among: {denoise_parameters}"):
num_rounds = 4
best_loss = -math.inf
for _ in range(num_rounds):
# expand ranges:
expanded_denoise_parameters = {n: np.arange(*r) for (n, r) in denoise_parameters.items()}
# Generate all possible combinations:
cartesian_product_of_parameters = list(_product_from_dict(expanded_denoise_parameters))
for denoiser_kwargs in cartesian_product_of_parameters:
with asection(f"computing J-inv loss for: {denoiser_kwargs}"):
loss = _loss_func(**denoiser_kwargs)
if loss > best_loss:
best_loss = loss
best_parameters = denoiser_kwargs
if "shgo" in mode:
with asection(
"Searching by 'simplicial homology global optimization' (SHGO)"
f"the best denoising parameters among: {denoise_parameters}"
):
if best_parameters is None:
x0 = tuple(0.5 * (v[1] - v[0]) for v in denoise_parameters.values())
else:
x0 = tuple(best_parameters[k] for k in denoise_parameters.keys())
bounds = list(v[0:2] for v in denoise_parameters.values())
# Impedance mismatch:
def _func(*_denoiser_kwargs):
param_dict = {n: v for (n, v) in zip(parameter_names, tuple(_denoiser_kwargs[0]))}
value = -_loss_func(**param_dict)
return value
result = shgo(_func, bounds, sampling_method="sobol", options={"maxev": max_evaluations})
aprint(result)
best_parameters = dict({n: v for (n, v) in zip(parameter_names, result.x)})
if "lbfgs" in mode:
with asection(f"Searching by 'Limited-memory BFGS' the best denoising parameters among: {denoise_parameters}"):
if best_parameters is None:
x0 = tuple(0.5 * (v[1] - v[0]) for v in denoise_parameters.values())
else:
x0 = tuple(best_parameters[k] for k in denoise_parameters.keys())
bounds = list(v[0:2] for v in denoise_parameters.values())
# Impedance mismatch:
def _func(*_denoiser_kwargs):
param_dict = {n: v for (n, v) in zip(parameter_names, tuple(_denoiser_kwargs[0]))}
value = -_loss_func(**param_dict)
return value
result = minimize(
fun=_func,
x0=x0,
method="L-BFGS-B",
bounds=bounds,
options=dict(maxfun=max_evaluations, eps=1e-2, ftol=1e-9, gtol=1e-9),
)
aprint(result)
best_parameters = dict({n: v for (n, v) in zip(parameter_names, result.x)})
if display_images:
import napari
viewer = napari.Viewer()
viewer.add_image(Backend.to_numpy(image), name="image")
viewer.add_image(np.stack([Backend.to_numpy(i) for i in denoised_images]), name="denoised")
napari.run()
return best_parameters
```
#### File: remove_beads/demo/demo_remove_beads.py
```python
import click
import numpy as np
from arbol import asection
from dexp.processing.remove_beads.beadsremover import remove_beads_by_threshold
from dexp.utils.backends import CupyBackend
@click.command()
@click.argument("input_path", nargs=1)
@click.option("--zero-level", "-zl", default=40, help="Camera noise zero level.")
def main(input_path, zero_level):
import napari
viewer = napari.Viewer()
(layer,) = viewer.open(input_path, name="original data")
with CupyBackend() as bk:
image = bk.to_backend(layer.data)
image = np.clip(image, zero_level, None)
image -= zero_level
with asection("Removing beads"):
clean = remove_beads_by_threshold(image)
clean = bk.to_numpy(clean)
viewer.add_image(clean, name="without beads")
napari.run()
if __name__ == "__main__":
main()
``` |
{
"source": "JoOkuma/DifferentiableSketching",
"score": 2
} |
#### File: dsketch/datasets/quickdraw.py
```python
from typing import Callable, Optional
import torch
from PIL import Image, ImageDraw
from quickdraw import QuickDrawData, QuickDrawing, QuickDrawDataGroup
from torch.types import Number
from torch.utils.data.dataset import Dataset
from torchvision.transforms.functional import to_tensor
from ..raster.composite import softor
from ..raster.disttrans import line_edt2
from ..raster.raster import exp, nearest_neighbour, compute_nearest_neighbour_sigma_bres
# pytorch dataset with all Quickdraw classes, with default of 1000 samples per class
class QuickDrawDataset(Dataset):
def __init__(self,
recognized: Optional[bool] = None,
transform: Callable[[QuickDrawing], torch.Tensor] = None):
self.qd = QuickDrawData()
self.qd_class_names = self.qd.drawing_names
# dictionary of QuickDrawDataGroups based on all possible names, loads 1000 examples from each class, but can
# be changed by specifying max_drawings
self.qd_DataGroups = {name: QuickDrawDataGroup(name, recognized=recognized) for name in self.qd_class_names}
if transform is None:
self.transform = lambda x: x
else:
self.transform = transform
def __getitem__(self, index):
class_index = index//1000
insideclass_index = index%1000
datagroup = self.qd_DataGroups[self.qd_class_names[class_index]]
return self.transform(datagroup.get_drawing(insideclass_index))
def __len__(self):
return len(self.qd_class_names)*1000
# pytorch dataset for a quickdraw data group; uses a 'transform' to convert the
# QuickDrawing object into something more useful
class QuickDrawDataGroupDataset(Dataset):
def __init__(self,
name: str,
max_drawings: int = 1000,
recognized: Optional[bool] = None,
transform: Callable[[QuickDrawing], torch.Tensor] = None):
self.ds = QuickDrawDataGroup(name, max_drawings=max_drawings, recognized=recognized)
if transform is None:
self.transform = lambda x: x
else:
self.transform = transform
def __getitem__(self, index):
return self.transform(self.ds.get_drawing(index))
def __len__(self):
return self.ds.drawing_count
def get_line_segments(qd: QuickDrawing) -> torch.Tensor:
"""Convert a QuickDrawing to a tensor of line segment parameters.
Returned coordinates are in the default [0..256, 0..256] range used by QuickDraw.
Args:
qd: the sketch data to convert
Returns:
A tensor of shape [N, 2, 2] where N is the number of line segments and each row contains
[[start_i, start_j], [end_i, end_j]].
"""
pts = []
for stroke in qd.strokes:
start = stroke[0]
for i in range(1, len(stroke)):
end = stroke[i]
pts.append([[start[1], start[0]], [end[1], end[0]]]) # swap x,y to i,j
start = end
params = torch.tensor(pts, dtype=torch.get_default_dtype())
return params
# class to rasterise a QuickDraw image using the dsketch machinery
class QuickDrawRasterise:
def __init__(self, hard: bool = True, sigma: Number = None, device=None):
a = torch.linspace(0, 255, 256)
grid = torch.meshgrid(a, a)
self.grid = torch.stack(grid, dim=2)
self.hard = hard
self.device = device
if sigma is None and hard is False:
raise ValueError("Sigma must be set for soft rasterisation")
if sigma is None and hard is True:
sigma = compute_nearest_neighbour_sigma_bres(self.grid)
self.sigma = sigma
def __call__(self, qd: QuickDrawing) -> torch.Tensor:
params = get_line_segments(qd).to(self.device)
edts = line_edt2(params, self.grid)
if self.hard:
ras = nearest_neighbour(edts, self.sigma)
else:
ras = exp(edts, self.sigma)
# Render image (compositions work on a batch, so adding extra dim); this extra dim becomes the channel dim
img = softor(ras.unsqueeze(0))
return img
# class to rasterise a QuickDraw image using PIL
class QuickDrawRasterisePIL:
def __init__(self, ret_pil=False, stroke_width=1):
self.stroke_width = stroke_width
self.ret_pil = ret_pil
def __call__(self, qd: QuickDrawing) -> torch.Tensor:
image = Image.new("L", (256, 256), color=0)
image_draw = ImageDraw.Draw(image)
for stroke in qd.strokes:
image_draw.line(stroke, fill=255, width=self.stroke_width)
if self.ret_pil:
return image
return to_tensor(image)
```
#### File: characters/models/__init__.py
```python
import torchbearer
from dsketch.experiments.shared.utils import list_class_names
from .encoders import *
from .model_bases import get_model
from .recurrent_decoders import *
from .single_pass_decoders import *
MU, LOGVAR = torchbearer.state_key('mu'), torchbearer.state_key('logvar')
class AutoEncoder(nn.Module):
def __init__(self, encoder, decoder):
super().__init__()
self.encoder = encoder
self.decoder = decoder
def get_feature(self, x):
return self.encoder(x, None)
def forward(self, x, state=None):
x = self.encoder(x, state)
return self.decoder(x, state)
def get_callbacks(self, args):
return [*self.encoder.get_callbacks(args), *self.decoder.get_callbacks(args)]
class VariationalAutoEncoder(nn.Module):
def __init__(self, encoder, decoder, latent_size):
super().__init__()
self.encoder = encoder
self.decoder = decoder
self.latent_size = latent_size # note enc will be made to emit 2x latent size output & we'll split
# Sampling function (using the reparameterisation trick)
def _sample(self, mu, log_sigma2):
if self.training:
eps = torch.randn(mu.shape, device=mu.device)
return mu + torch.exp(log_sigma2 / 2) * eps
else:
return mu
def get_feature(self, x):
return self.encoder(x, None)[:, 0:self.latent_size]
def forward(self, x, state=None):
x = self.encoder(x)
state[MU] = x[:, 0:self.latent_size]
state[LOGVAR] = mu = x[:, self.latent_size:]
z = self._sample(state[MU], state[LOGVAR])
images = self.decoder(z, state)
return images
def get_callbacks(self, args):
return [*self.encoder.get_callbacks(args), *self.decoder.get_callbacks(args)]
def model_choices(clz):
return list_class_names(clz, __package__)
```
#### File: experiments/shared/metrics.py
```python
import warnings
import torch
from torchbearer import Metric
import torchbearer
from torchbearer.metrics import CategoricalAccuracy, mean, running_mean
from dsketch.experiments.shared import utils
from dsketch.losses import chamfer
from dsketch.utils.mod_haussdorff import binary_image_to_points, mod_hausdorff_distance
HARDRASTER = torchbearer.state_key('hardraster')
SQ_DISTANCE_TRANSFORM = torchbearer.state_key('squared_distance_transform')
Y_PRED_CLASSES = torchbearer.state_key('y_pred_classes')
@running_mean
@mean
class ClassificationMetric(Metric):
def __init__(self, classification_model):
super().__init__("recon_class_acc")
self.classification_model = classification_model
def process(self, state):
y_pred = self.classification_model(state[torchbearer.Y_PRED]) # take the reconstruction and classify it
y_true = state[utils.ORIGINAL_Y_TRUE]
if len(y_true.shape) == 2:
_, y_true = torch.max(y_true, 1)
_, y_pred = torch.max(y_pred, 1)
return (y_pred == y_true).float()
@running_mean
@mean
class ChamferMetric(Metric):
def __init__(self):
super().__init__('chamfer')
self.warn = False
def process(self, state):
if not self.warn and not (state[torchbearer.Y_TRUE] == state[torchbearer.Y_TRUE].bool()).all():
warnings.warn('Using chamfer distance on non-binary target images does not make sense')
self.warn = True
inp = state[SQ_DISTANCE_TRANSFORM]
tgt = state[torchbearer.Y_TRUE]
return chamfer(inp, tgt, dt2_fcn=None, ras_fcn=None, symmetric=False)
@running_mean
@mean
class ModifiedHausdorffMetric(Metric):
def __init__(self):
super().__init__('modified_hausdorff')
self.warn = False
def process(self, state):
if not self.warn and not (state[torchbearer.Y_TRUE] == state[torchbearer.Y_TRUE].bool()).all():
warnings.warn('Using modified Hausdorff distance on non-binary target images does not make sense')
self.warn = True
pred = binary_image_to_points(state[HARDRASTER])
target = binary_image_to_points(state[torchbearer.Y_TRUE])
return mod_hausdorff_distance(pred, target)
```
#### File: dsketch/utils/gaussian.py
```python
import math
import numbers
from typing import Union, Optional, List
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.types import _int, _size, Number
# noinspection PyTypeChecker
def gaussian_kernel(sigma: Union[_int, _size], dim: _int = 2, kernel_size: Optional[Union[_int, _size]] = None) \
-> torch.Tensor:
"""Compute a n-dimensional Gaussian kernel.
The created Gaussian is axis-aligned, but need not be isotropic.
Args:
sigma: The standard deviation of the Gaussian along each dimension. Can be a single int for an isotropic
kernel or tuple with dim elements.
dim: Number of dimensions.
kernel_size: The size of the kernel tensor. If None it will be set at the next odd integer above
floor(8*sigma+1). If it in an int the kernel will have the same size in all dimensions. Can also be a tuple
with dim elements for a non-square kernel.
Returns:
Gaussian kernel tensor
"""
if kernel_size is None:
kernel_size = int(8 * sigma + 1)
if kernel_size % 2 == 0:
kernel_size += 1
if sigma == 0:
kernel = torch.zeros([kernel_size] * dim)
kernel[(int(kernel_size / 2),) * dim] = 1
return kernel
if isinstance(kernel_size, numbers.Number):
kernel_size = [kernel_size] * dim
if isinstance(sigma, numbers.Number):
sigma = [sigma] * dim
# The kernel is the product of the gaussian of each dimension.
kernel = 1
meshgrids: int = torch.meshgrid(
[
torch.arange(size, dtype=torch.float32)
for size in kernel_size
]
)
for size, std, mgrid in zip(kernel_size, sigma, meshgrids):
mean = (size - 1) / 2
kernel *= 1 / (std * math.sqrt(2 * math.pi)) * torch.exp(-((mgrid - mean) / std) ** 2 / 2)
# Normalise sum to 1
kernel = kernel / torch.sum(kernel)
return kernel
def gaussian_pyramid_kernels(octaves: _int = 3, intervals: _int = 1, init_sigma: Number = 0.5, dim: _int = 2,
downsample: bool = False, interval_oversample: _int = 0):
"""Compute gaussian kernels required to incrementally build a pyramid.
Each kernel increases the scale of the input which it is convolved against from its current scale to the next in the
pyramid. Pyramids are defined by octaves (number of doublings of sigma from the init_sigma) and intervals which
defines how many steps are taken within an octave.
Args:
octaves: Number of octaves in the pyramid. Each octave represents a doubling of sigma.
intervals: Number of intervals to break each octave into
init_sigma: Standard deviation blurring of the input images. Default of 0.5 is the minimum that would be
possible without aliasing
dim: number of dimensions for the gaussian
downsample: if true then each octave is subsampled by taking every other pixel; this also halves the blurring
required after each octave
interval_oversample: extra intervals to add beyond the point of doubling
Returns:
A list of kernel tensors which incrementally increase the scale of the image to which they are applied. Note
that these kernels should be applied one after the other to be used correctly: i*k1, i*k1*k2, i*k1*k2*k3, ...
"""
prev_sigma = init_sigma
kernels = []
for j in range(octaves):
for i in range(intervals + interval_oversample):
k = 2 ** (1 / intervals)
sigma = prev_sigma * math.sqrt(k * k - 1) # this is the amount to increase by
prev_sigma = prev_sigma * k
kernels.append(gaussian_kernel(sigma, dim=dim, kernel_size=None))
if downsample:
prev_sigma = init_sigma # downsampling means that effectively each octave starts at init_sigma
else:
if interval_oversample == 0:
assert(abs(prev_sigma - init_sigma * 2 ** (j + 1)) < 1e-7)
prev_sigma = init_sigma * 2 ** (j + 1)
return kernels
def _get_conv(dim):
if dim == 1:
return F.conv1d
elif dim == 2:
return F.conv2d
elif dim == 3:
return F.conv3d
else:
raise RuntimeError(
'Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim)
)
class GaussianScaleSpace(nn.Module):
"""Build a Gaussian scale space from a 1d, 2d or 3d tensor.
Filtering is performed separately for each channel in the input using a depthwise convolution.
Optionally downsamples each octave to form a pyramid.
Args:
channels: Number of channels of the input tensors. Output will have this number of channels as well.
octaves: Number of octaves in the pyramid. Each octave represents a doubling of sigma.
intervals: Number of intervals to break each octave into
init_sigma: Standard deviation blurring of the input images. Default of 0.5 is the minimum that would be
possible without aliasing
dim: number of dimensions for the gaussian
downsample: if true then each octave is subsampled by taking every other pixel
interval_oversample: extra intervals to add beyond the point of doubling
Shape:
- Input: :math:`(N, C, *)` where :math:`*` means dim number of additional dimensions
- Output: List of octaves*intervals tensors. If downsample is False, they will each have the same
size as the input tensor. If downsample is True then the first intervals tensors will have the same size
as the input, the next intervals tensors will have half the size in the * dimensions and so forth.
"""
def __init__(self, channels: _int, octaves: _int = 3, intervals: _int = 1, init_sigma: Number = 0.5, dim: _int = 2,
downsample: bool = False, interval_oversample: _int = 0):
super(GaussianScaleSpace, self).__init__()
self.octaves = octaves
self.intervals = intervals
self.groups = channels
self.dim = dim
self.downsample = downsample
self.interval_oversample = interval_oversample
flatkernels = gaussian_pyramid_kernels(octaves, intervals, init_sigma, dim, downsample, interval_oversample)
for i in range(len(flatkernels)):
kernel = flatkernels[i]
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
self.register_buffer('weight' + str(i), kernel)
self.conv = _get_conv(dim)
def forward(self, inp: torch.Tensor) -> List[torch.Tensor]:
result = [inp]
c = 0
for o in range(self.octaves):
for i in range(self.intervals + self.interval_oversample):
weight = self.__getattr__('weight' + str(c))
p = int(weight.shape[-1] / 2)
padded = F.pad(result[-1], (p, p) * self.dim)
img = self.conv(padded, weight=weight, groups=self.groups)
result.append(img)
c += 1
if self.downsample:
idx = -1 - self.interval_oversample
if self.dim == 1:
result.append(result[idx][..., 0::2])
if self.dim == 2:
result.append(result[idx][..., 0::2, 0::2])
if self.dim == 3:
result.append(result[idx][..., 0::2, 0::2, 0::2])
return result
class GaussianBlur(nn.Module):
"""Apply gaussian smoothing on a 1d, 2d or 3d tensor.
Filtering is performed separately for each channel in the input using a depthwise convolution.
Args:
channels: Number of channels of the input tensors. Output will have this number of channels as well.
sigma: Standard deviation of gaussian blurring kernel.
dim: number of dimensions for the gaussian.
Shape:
- Input: :math:`(N, C, *)` where :math:`*` means dim number of additional dimensions
- Output: :math:`(N, C, *)` where :math:`*` means dim number of additional dimensions
"""
def __init__(self, channels: _int, sigma: Number, dim: _int = 2):
super(GaussianBlur, self).__init__()
self.groups = channels
self.dim = dim
kernel = gaussian_kernel(sigma, dim=dim)
kernel = kernel.view(1, 1, *kernel.size())
kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1))
self.register_buffer('weight', kernel)
self.conv = _get_conv(dim)
def forward(self, inp: torch.Tensor) -> torch.Tensor:
p = int(self.weight.shape[-1] / 2)
padded = F.pad(inp, (p, p) * self.dim)
result = self.conv(padded, weight=self.weight, groups=self.groups)
return result
class DoGScaleSpace(GaussianScaleSpace):
"""Build a Difference-of-Gaussian scale space from a 1d, 2d or 3d tensor.
Filtering is performed separately for each channel in the input using a depthwise convolution.
Optionally downsamples each octave to form a pyramid.
Args:
channels: Number of channels of the input tensors. Output will have this number of channels as well.
octaves: Number of octaves in the pyramid. Each octave represents a doubling of sigma.
intervals: Number of intervals to break each octave into
init_sigma: Standard deviation blurring of the input images. Default of 0.5 is the minimum that would be
possible without aliasing
dim: number of dimensions for the gaussian
downsample: if true then each octave is subsampled by taking every other pixel
interval_oversample: extra intervals to add beyond the point of doubling
Shape:
- Input: :math:`(N, C, *)` where :math:`*` means dim number of additional dimensions
- Output: List of octaves*intervals tensors. If downsample is False, they will each have the same
size as the input tensor. If downsample is True then the first intervals tensors will have the same size
as the input, the next intervals tensors will have half the size in the * dimensions and so forth.
"""
def __init__(self, channels: _int, octaves: _int = 3, intervals: _int = 1, init_sigma: Number = 0.5, dim: _int = 2,
downsample: bool = False, interval_oversample: _int = 1):
super(DoGScaleSpace, self).__init__(channels, octaves, intervals, init_sigma, dim, downsample, interval_oversample)
def forward(self, inp: torch.Tensor) -> List[torch.Tensor]:
gauss = super(DoGScaleSpace, self).forward(inp)
result = []
c = 1
for o in range(self.octaves):
for i in range(self.intervals + self.interval_oversample):
prev = gauss[c-1]
curr = gauss[c]
if prev.shape == curr.shape:
result.append(prev - curr)
c += 1
return result
``` |
{
"source": "JoOkuma/napari-interactive-segm",
"score": 2
} |
#### File: napari-interactive-segm/napari_interactive_segm/main.py
```python
import click
import napari
from tifffile import imsave, imread
import numpy as np
from pathlib import Path
@click.command()
@click.option('--im-dir', '-i', required=True, help='Input images directory.')
@click.option('--mk-dir', '-m', required=True, help='Output markers images directory.')
@click.option('--lb-dir', '-l', required=True, help='Output labels images directory')
def main(im_dir: str, mk_dir: str, lb_dir: str) -> None:
im_dir = Path(im_dir)
mk_dir = Path(mk_dir)
lb_dir = Path(lb_dir)
mk_dir.mkdir(exist_ok=True)
lb_dir.mkdir(exist_ok=True)
contrast_limits = (0, 1500)
gamma = 1.0
for im_path in im_dir.glob('*.tif*'):
viewer = napari.Viewer()
viewer.window.add_plugin_dock_widget("napari-interactive-segm")
im = imread(im_path)
mk = np.zeros_like(im, dtype=int)
lb = np.zeros_like(im, dtype=int)
im_layer = viewer.add_image(im, contrast_limits=contrast_limits, gamma=gamma)
lb_layer = viewer.add_labels(lb)
mk_layer = viewer.add_labels(mk)
mk_layer.brush_size = 1
viewer.show(block=True)
contrast_limits = im_layer.contrast_limits
gamma = im_layer.gamma
imsave(lb_dir / im_path.name, lb_layer.data)
imsave(mk_dir / im_path.name, mk_layer.data)
if __name__ == '__main__':
main()
``` |
{
"source": "JoOkuma/napari-plugin-debug",
"score": 2
} |
#### File: napari-plugin-debug/napari_plugin_debug/_writer.py
```python
from typing import Any, Dict, Optional
from napari_plugin_engine import napari_hook_implementation
@napari_hook_implementation
def napari_get_writer():
pass
@napari_hook_implementation
def napari_write_image():
pass
@napari_hook_implementation
def napari_write_tracks(path: str, data: Any, meta: Dict) -> str:
with open(path, mode='w') as f:
for row in data:
row = [str(elem) for elem in row]
f.write(','.join(row) + '\n')
return path
``` |
{
"source": "JoOkuma/PyIFT",
"score": 3
} |
#### File: demo/DGCI/segment.py
```python
import pyift.pyift as ift
import argparse
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def check_io_img(s):
if '.png' in s or '.pgm' in s or '.jpg' in s or '.ppm' in s:
return s
else:
raise argparse.ArgumentTypeError('Image must be a .ppm, .pgm, .png or .jpg file.')
def check_aux_img(s):
if '.pgm' in s or s == '':
return s
else:
raise argparse.ArgumentTypeError('Auxiliary images must be a .pgm file.')
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='Image segmentation using Dynamic Trees.')
parser.add_argument('-i', '--input-image', type=check_io_img, required=True)
parser.add_argument('-m', '--markers-image', type=check_aux_img, required=True)
parser.add_argument('-o', '--output-image', type=check_io_img, default='output.pgm', required=False)
parser.add_argument('-c', '--closest-root', type=str2bool, default='false', required=False)
parser.add_argument('-gm', '--gamma', type=float, default=0.0, required=False)
parser.add_argument('-s', '--saliency-image', type=check_aux_img, default='', required=False)
parser.add_argument('-a', '--alpha', type=float, default=0.5, required=False,
help='Alpha needs to be tuned according to original image and saliency depth.')
parser.add_argument('-d', '--delta', type=int, default=0, required=False)
args = parser.parse_args()
if args.gamma < 0.0:
raise argparse.ArgumentTypeError('Gamma must be greater than 0.0.')
if args.alpha < 0.0 or args.alpha > 1.0:
raise argparse.ArgumentTypeError('Alpha must be between 0.0 and 1.0.')
orig = ift.ReadImageByExt(args.input_image)
mrk = ift.ReadImageByExt(args.markers_image)
seeds = ift.LabeledSetFromSeedImage(mrk, True)
if args.saliency_image != '':
objmap = ift.ReadImageByExt(args.saliency_image)
else:
objmap = None
mimg = ift.ImageToMImage(orig, ift.LABNorm_CSPACE)
A = ift.Circular(1.0)
if args.closest_root:
segm = ift.DynTreeClosestRoot(mimg, A, seeds, args.delta, args.gamma, objmap, args.alpha)
else:
segm = ift.DynTreeRoot(mimg, A, seeds, args.delta, args.gamma, objmap, args.alpha)
ift.WriteImageByExt(ift.Normalize(segm, 0, 255), args.output_image)
``` |
{
"source": "JoOkuma/pytorch-metric-learning",
"score": 2
} |
#### File: pytorch_metric_learning/losses/fast_ap_loss.py
```python
import torch
from .base_metric_loss_function import BaseMetricLossFunction
from ..utils import loss_and_miner_utils as lmu
class FastAPLoss(BaseMetricLossFunction):
def __init__(self, num_bins, **kwargs):
super().__init__(**kwargs)
self.num_bins = int(num_bins)
self.num_edges = self.num_bins + 1
"""
Adapted from https://github.com/kunhe/FastAP-metric-learning
"""
def compute_loss(self, embeddings, labels, indices_tuple):
miner_weights = lmu.convert_to_weights(indices_tuple, labels)
N = labels.size(0)
a1_idx, p_idx, a2_idx, n_idx = lmu.get_all_pairs_indices(labels)
I_pos = torch.zeros(N, N).to(embeddings.device)
I_neg = torch.zeros(N, N).to(embeddings.device)
I_pos[a1_idx, p_idx] = 1
I_neg[a2_idx, n_idx] = 1
N_pos = torch.sum(I_pos, dim=1)
dist_mat = lmu.dist_mat(embeddings, squared=True)
histogram_max = 4. if self.normalize_embeddings else torch.max(dist_mat).item()
histogram_delta = histogram_max / self.num_bins
mid_points = torch.linspace(0., histogram_max, steps=self.num_edges).view(-1,1,1).to(embeddings.device)
pulse = torch.nn.functional.relu(1 - torch.abs(dist_mat-mid_points)/histogram_delta)
pos_hist = torch.t(torch.sum(pulse * I_pos, dim=2))
neg_hist = torch.t(torch.sum(pulse * I_neg, dim=2))
total_pos_hist = torch.cumsum(pos_hist, dim=1)
total_hist = torch.cumsum(pos_hist + neg_hist, dim=1)
loss = 0
h_pos_product = pos_hist * total_pos_hist
safe_H = (h_pos_product > 0) & (total_hist > 0)
if torch.sum(safe_H) > 0:
FastAP = torch.zeros_like(pos_hist).to(embeddings.device)
FastAP[safe_H] = h_pos_product[safe_H] / total_hist[safe_H]
FastAP = torch.sum(FastAP, dim=1)
safe_N = (N_pos > 0)
if torch.sum(safe_N) > 0:
FastAP = FastAP[safe_N] / N_pos[safe_N]
FastAP = FastAP * miner_weights[safe_N]
loss = 1 - torch.mean(FastAP)
return loss
```
#### File: pytorch_metric_learning/losses/lifted_structure_loss.py
```python
import torch
from .generic_pair_loss import GenericPairLoss
class GeneralizedLiftedStructureLoss(GenericPairLoss):
# The 'generalized' lifted structure loss shown on page 4
# of the "in defense of triplet loss" paper
# https://arxiv.org/pdf/1703.07737.pdf
def __init__(self, neg_margin, **kwargs):
self.neg_margin = neg_margin
super().__init__(use_similarity=False, iterate_through_loss=True, **kwargs)
def pair_based_loss(self, pos_pairs, neg_pairs, pos_pair_anchor_labels, neg_pair_anchor_labels):
neg_margin = self.maybe_mask_param(self.neg_margin, neg_pair_anchor_labels)
loss = torch.tensor(0.).to(pos_pairs.device)
if len(pos_pairs) > 0:
loss += torch.logsumexp(pos_pairs, dim=0)
if len(neg_pairs) > 0:
loss += torch.logsumexp(neg_margin - neg_pairs, dim=0)
return torch.mean(torch.relu(loss))
```
#### File: pytorch_metric_learning/losses/nca_loss.py
```python
from .base_metric_loss_function import BaseMetricLossFunction
from ..utils import loss_and_miner_utils as lmu
import torch
class NCALoss(BaseMetricLossFunction):
def __init__(self, softmax_scale=1, **kwargs):
super().__init__(**kwargs)
self.softmax_scale = softmax_scale
# https://www.cs.toronto.edu/~hinton/absps/nca.pdf
def compute_loss(self, embeddings, labels, indices_tuple):
return self.nca_computation(embeddings, embeddings, labels, labels, indices_tuple)
def nca_computation(self, query, reference, query_labels, reference_labels, indices_tuple):
miner_weights = lmu.convert_to_weights(indices_tuple, query_labels)
x = -lmu.dist_mat(query, reference, squared=True)
if query is reference:
diag_idx = torch.arange(query.size(0))
x[diag_idx, diag_idx] = float('-inf')
same_labels = (query_labels.unsqueeze(1) == reference_labels.unsqueeze(0)).float()
exp = torch.nn.functional.softmax(self.softmax_scale*x, dim=1)
exp = torch.sum(exp * same_labels, dim=1)
exp = exp * miner_weights
non_zero_prob = torch.masked_select(exp, exp != 0)
return -torch.mean(torch.log(non_zero_prob))
```
#### File: pytorch_metric_learning/utils/calculate_accuracies.py
```python
import numpy as np
from sklearn.metrics import normalized_mutual_info_score
import warnings
from . import stat_utils
METRICS = ["NMI", "precision_at_1", "r_precision", "mean_average_r_precision"]
def get_relevance_mask(shape, gt_labels, embeddings_come_from_same_source=False, label_counts=None):
# This assumes that k was set to at least the max number of relevant items
if label_counts is None:
label_counts = {k:v for k,v in zip(*np.unique(gt_labels, return_counts=True))}
relevance_mask = np.zeros(shape=shape, dtype=np.int)
for k,v in label_counts.items():
matching_rows = np.where(gt_labels==k)[0]
max_column = v-1 if embeddings_come_from_same_source else v
relevance_mask[matching_rows, :max_column] = 1
return relevance_mask
def r_precision(knn_labels, gt_labels, embeddings_come_from_same_source=False, label_counts=None):
relevance_mask = get_relevance_mask(knn_labels.shape, gt_labels, embeddings_come_from_same_source, label_counts)
matches_per_row = np.sum((knn_labels == gt_labels) * relevance_mask.astype(bool), axis=1)
max_possible_matches_per_row = np.sum(relevance_mask, axis=1)
return np.mean(matches_per_row / max_possible_matches_per_row)
def mean_average_r_precision(knn_labels, gt_labels, embeddings_come_from_same_source=False, label_counts=None):
relevance_mask = get_relevance_mask(knn_labels.shape, gt_labels, embeddings_come_from_same_source, label_counts)
num_samples, num_k = knn_labels.shape
equality = (knn_labels == gt_labels) * relevance_mask.astype(bool)
cumulative_correct = np.cumsum(equality, axis=1)
k_idx = np.tile(np.arange(1, num_k + 1), (num_samples, 1))
precision_at_ks = (cumulative_correct * equality) / k_idx
summed_precision_per_row = np.sum(precision_at_ks * relevance_mask, axis=1)
max_possible_matches_per_row = np.sum(relevance_mask, axis=1)
return np.mean(summed_precision_per_row / max_possible_matches_per_row)
def precision_at_k(knn_labels, gt_labels, k):
"""
Precision at k is the percentage of k nearest neighbors that have the correct
label.
Args:
knn_labels: numpy array of size (num_samples, k)
gt_labels: numpy array of size (num_samples, 1)
"""
curr_knn_labels = knn_labels[:, :k]
precision = np.mean(np.sum(curr_knn_labels == gt_labels, axis=1) / k)
return precision
def mean_average_precision(knn_labels, gt_labels):
"""
See this for an explanation:
https://web.stanford.edu/class/cs276/handouts/EvaluationNew-handout-1-per.pdf
"""
num_samples, num_k = knn_labels.shape
equality = knn_labels == gt_labels
num_correct_per_row = np.sum(equality, axis=1)
cumulative_correct = np.cumsum(equality, axis=1)
k_idx = np.tile(np.arange(1, num_k + 1), (num_samples, 1))
precision_at_ks = (cumulative_correct * equality) / k_idx
summed_precision_per_row = np.sum(precision_at_ks, axis=1)
with np.errstate(divide='ignore', invalid='ignore'):
avg_precision_per_row = summed_precision_per_row / num_correct_per_row
avg_precision_per_row[np.isnan(avg_precision_per_row)] = 0
return np.mean(avg_precision_per_row)
def NMI(input_embeddings, gt_labels):
"""
Returns NMI and also the predicted labels from k-means
"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
num_clusters = len(set(gt_labels.flatten()))
pred_labels = stat_utils.run_kmeans(input_embeddings, num_clusters)
nmi = normalized_mutual_info_score(gt_labels, pred_labels)
return nmi, pred_labels
def compute_accuracies(query_embeddings, knn_labels, query_labels, embeddings_come_from_same_source, label_counts):
"""
Computes clustering quality of query_embeddings.
Computes various retrieval scores given knn_labels (labels of nearest neighbors)
and the ground-truth labels of the query embeddings.
Returns the results in a dictionary.
"""
accuracies = {}
accuracies["NMI"] = NMI(query_embeddings, query_labels)[0]
accuracies["precision_at_1"] = precision_at_k(knn_labels, query_labels[:, None], 1)
accuracies["r_precision"] = r_precision(knn_labels, query_labels[:, None], embeddings_come_from_same_source, label_counts)
accuracies["mean_average_r_precision"] = mean_average_r_precision(knn_labels, query_labels[:, None], embeddings_come_from_same_source, label_counts)
return accuracies
def calculate_accuracy(
query,
reference,
query_labels,
reference_labels,
embeddings_come_from_same_source,
):
"""
Gets k nearest reference embeddings for each element of query.
Then computes various accuracy metrics.
"""
embeddings_come_from_same_source = embeddings_come_from_same_source or (query is reference)
unique_labels, label_counts = np.unique(reference_labels, return_counts=True)
num_k = min(1023, int(np.max(label_counts))) # faiss can only do a max of k=1024, and we have to do k+1
label_counts = {k:v for k,v in zip(unique_labels, label_counts)}
knn_indices = stat_utils.get_knn(
reference,
query,
num_k,
embeddings_come_from_same_source
)
knn_labels = reference_labels[knn_indices]
return compute_accuracies(query, knn_labels, query_labels, embeddings_come_from_same_source, label_counts)
``` |
{
"source": "JoOkuma/torch-em",
"score": 2
} |
#### File: plantseg/ovules/train_contrastive_2d.py
```python
import argparse
import os
from glob import glob
from functools import partial
import torch
import torch_em
from elf.io import open_file
from torch_em.model import UNet2d
ROOT_TRAIN = '/g/kreshuk/wolny/Datasets/Ovules/GT2x/train'
ROOT_VAL = '/g/kreshuk/wolny/Datasets/Ovules/GT2x/val'
# exclude the volumes that don't fit
def get_paths(split, patch_shape, raw_key):
root = ROOT_TRAIN if split == 'train' else ROOT_VAL
paths = glob(os.path.join(root, '*.h5'))
paths = [p for p in paths if all(
sh >= psh for sh, psh in zip(open_file(p, 'r')[raw_key].shape, patch_shape)
)]
return paths
def get_loader(split, patch_shape, batch_size,
n_samples=None, roi=None):
raw_key = 'raw'
label_key = 'label'
paths = get_paths(split, patch_shape, raw_key)
sampler = torch_em.data.MinForegroundSampler(min_fraction=0.1, p_reject=1.)
label_transform = partial(torch_em.transform.label.connected_components, ensure_zero=True)
return torch_em.default_segmentation_loader(
paths, raw_key,
paths, label_key,
batch_size=batch_size,
patch_shape=patch_shape,
label_transform=label_transform,
sampler=sampler,
n_samples=n_samples,
num_workers=8*batch_size,
shuffle=True,
label_dtype=torch.int64,
ndim=2
)
def get_model(n_out):
model = UNet2d(
in_channels=1,
out_channels=n_out,
initial_features=64,
gain=2,
depth=4,
final_activation=None
)
return model
def train_contrastive(args):
model = get_model(args.embed_dim)
patch_shape = [1, 736, 688]
# can train with larger batch sizes for scatter
batch_size = 4 if args.impl == 'scatter' else 1
train_loader = get_loader(
split='train',
patch_shape=patch_shape,
batch_size=1,
n_samples=2500
)
val_loader = get_loader(
split='val',
patch_shape=patch_shape,
batch_size=1,
n_samples=100
)
loss = torch_em.loss.ContrastiveLoss(
delta_var=.75,
delta_dist=2.,
impl=args.impl
)
name = "embedding_model2d_" + args.impl + "_d" + str(args.embed_dim)
trainer = torch_em.default_segmentation_trainer(
name=name,
model=model,
train_loader=train_loader,
val_loader=val_loader,
loss=loss,
metric=loss,
learning_rate=5e-5,
mixed_precision=True,
log_image_interval=50
)
if args.from_checkpoint:
trainer.fit(args.iterations, 'latest')
else:
trainer.fit(args.iterations)
def check(train=True, val=True, n_images=5):
from torch_em.util.debug import check_loader
patch_shape = [1, 512, 512]
if train:
print("Check train loader")
loader = get_loader('train', patch_shape, batch_size=1)
check_loader(loader, n_images)
if val:
print("Check val loader")
loader = get_loader('val', patch_shape, batch_size=1)
check_loader(loader, n_images)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--impl', '-i', default='scatter')
parser.add_argument('--check', '-c', type=int, default=0)
parser.add_argument('--iterations', '-n', type=int, default=int(1e5))
parser.add_argument('-d', '--embed_dim', type=int, default=12)
parser.add_argument('--from_checkpoint', type=int, default=0)
args = parser.parse_args()
if args.check:
check(train=True, val=True)
else:
train_contrastive(args)
```
#### File: test/loss/test_contrastive.py
```python
import unittest
import numpy as np
import torch
from torch_em.loss import ContrastiveLoss
class TestContrastiveLoss(unittest.TestCase):
def _test_random(self, impl):
loss = ContrastiveLoss(delta_var=1., delta_dist=2., impl=impl)
target_shape = (1, 1, 32, 32)
pred_shape = (1, 8, 32, 32)
x = torch.rand(*pred_shape)
x.requires_grad = True
x.retain_grad = True
y = torch.randint(low=0, high=5, size=target_shape)
lval = loss(x, y)
self.assertNotEqual(lval.item(), 0.)
lval.backward()
grads = x.grad
self.assertEqual(grads.shape, x.shape)
self.assertFalse(np.allclose(grads.numpy(), 0))
def test_expand_random(self):
self._test_random('expand')
@unittest.skipUnless(ContrastiveLoss.has_torch_scatter(), "Need pytorch_scatter")
def test_scatter_random(self):
self._test_random('scatter')
def _test_deterministic(self, impl):
loss = ContrastiveLoss(delta_var=1., delta_dist=2., impl=impl)
target_shape = (1, 1, 32, 32)
pred_shape = (1, 8, 32, 32)
# this should give a small loss
y = torch.randint(low=0, high=5, size=target_shape)
x = 2 * y.expand(pred_shape).to(torch.float32)
lval = loss(x, y)
self.assertLess(lval.item(), 0.2)
# this should give a large loss
y = torch.randint(low=0, high=5, size=target_shape)
x = torch.rand(*pred_shape)
lval = loss(x, y)
self.assertGreater(lval.item(), 1.)
def test_expand_deterministic(self):
self._test_deterministic('expand')
@unittest.skipUnless(ContrastiveLoss.has_torch_scatter(), "Need pytorch_scatter")
def test_scatter_deterministic(self):
self._test_deterministic('scatter')
# TODO
# def _test_ignore(self, impl):
# loss = ContrastiveLoss(delta_var=1., delta_dist=2., impl=impl, ignore_label=0)
# target_shape = (1, 1, 32, 32)
# pred_shape = (1, 8, 32, 32)
# x = torch.rand(*pred_shape)
# x.requires_grad = True
# x.retain_grad = True
# y = torch.randint(low=0, high=5, size=target_shape)
# lval = loss(x, y)
# self.assertNotEqual(lval.item(), 0.)
# lval.backward()
# grads = x.grad
# self.assertEqual(grads.shape, x.shape)
# grads = grads.numpy()
# self.assertFalse(np.allclose(grads, 0))
# ignore_mask = y.numpy() == 0
# self.assertTrue(np.allclose(grads[:, ignore_mask]), 0)
# def test_expand_ignore(self):
# self._test_ignore('expand')
# @unittest.skipUnless(ContrastiveLoss.has_torch_scatter(), "Need pytorch_scatter")
# def test_scatter_ignore(self):
# self._test_ignore('scatter')
def _test_impls(self, device):
target_shape = (1, 1, 32, 32)
pred_shape = (1, 8, 32, 32)
x = torch.rand(*pred_shape).to(device)
x.requires_grad = True
x.retain_grad = True
y = torch.randint(low=0, high=5, size=target_shape).to(device)
# compute the loss for expand implementation
loss = ContrastiveLoss(delta_var=1., delta_dist=2., impl='expand')
lval1 = loss(x, y)
lval1.backward()
grad1 = x.grad.detach().cpu()
self.assertEqual(grad1.shape, x.shape)
self.assertFalse(np.allclose(grad1, 0))
# compute the loss for the scatter implementation
x.grad = None # clear the gradients
loss = ContrastiveLoss(delta_var=1., delta_dist=2., impl='scatter')
lval2 = loss(x, y)
lval2.backward()
# compare the results
self.assertAlmostEqual(lval1.item(), lval2.item(), places=5)
grad2 = x.grad.detach().cpu()
self.assertTrue(np.allclose(grad1, grad2, atol=1e-6))
@unittest.skipUnless(ContrastiveLoss.has_torch_scatter(), "Need pytorch_scatter")
def test_impls(self):
self._test_impls(torch.device('cpu'))
if torch.cuda.is_available():
self._test_impls(torch.device('cuda'))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "joollnl/ISO-DART",
"score": 3
} |
#### File: framework/CAISO/tool_utils.py
```python
import requests
import xml.etree.ElementTree as ET
import csv
import zipfile
import pdb
import io
import os
import datetime
import time
import pandas as pd
import sys
URL = 'http://oasis.caiso.com/oasisapi/SingleZip'
QUERY_DATE_FORMAT = '%Y%m%dT%H:%M-0000'
DATA_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S-00:00'
DATA_DIR = os.path.join(os.getcwd(), 'data')
RAW_DIR = os.path.join(os.getcwd(), 'raw_data')
XML_DIR = os.path.join(os.getcwd(), 'raw_data', 'xml_files')
def write_request(params):
# Perform request
r = requests.get(URL, params=params, stream=True, verify=True)
print(r.url)
# If request is successful
if r.ok:
z = zipfile.ZipFile(io.BytesIO(r.content))
for src_file_name in z.namelist():
dst_file_name = '_'.join(src_file_name.split('_')[0:2]) + '_' + params["queryname"] + '.xml'
fout = open(os.path.join(XML_DIR, dst_file_name), 'wb')
fout.write(z.read(src_file_name))
fout.close()
readXml = str(z.read(src_file_name))
try:
errCode = readXml.split('<m:ERR_CODE>')[1].split('</m:ERR_CODE>')[0]
errMessage = readXml.split('<m:ERR_DESC>')[1].split('</m:ERR_DESC>')[0]
print("WARNING!! ERROR CODE:" + errCode + "\t" + errMessage + "\nProgram End!! Please Try Again.")
errDetector = 1
except:
errDetector = 0
pass
if errDetector == 1:
sys.exit()
else:
print(r.text)
print("WARNING: Request failed!!! with:")
print(r.url)
return dst_file_name
def request_to_csv(xml_file_name, csv_file_name, report='{http://www.caiso.com/soa/OASISReport_v1.xsd}'):
caiso_report = report
# Parse the xml file
tree = ET.parse(xml_file_name)
root = tree.getroot()
# Open the csv file for writing, appending if it already exists
if os.path.isfile(csv_file_name):
build_header = False
csv_handle = open(csv_file_name, 'a')
else:
build_header = True
csv_handle = open(csv_file_name, 'w')
csv_writer = csv.writer(csv_handle)
header = []
try:
if root[1][0][2][0].tag == caiso_report + 'ERR_CODE':
error_code = root[1][0][2][0].text
print(error_code)
return False
except IndexError:
pass
for report in root.iter(caiso_report + 'REPORT_DATA'):
if build_header:
for col in report:
header.append(col.tag.replace(caiso_report, ""))
csv_writer.writerow(header)
build_header = False
row = []
for col in report:
row.append(col.text)
csv_writer.writerow(row)
csv_handle.close()
return True
def get_time_start_end(start_date, duration):
end_date = start_date + datetime.timedelta(days=duration)
time_start = start_date.tz_convert('UTC').strftime(QUERY_DATE_FORMAT)
time_end = end_date.tz_convert('UTC').strftime(QUERY_DATE_FORMAT)
return time_start, time_end
def merge_csv(query_name, date_col):
# Run this from the python prompt to merge all of the annual files
# With date_col as the index column of the date time (i.e., 4 or 5)
# Index should identify the interval end.
# For prices it is index 5
# For load forecast it is index 5,
# For renewable forecasts it's index 4
dst_file_name = DATA_DIR % query_name + '.csv'
# Open the dest file for writing, appending if it already exists
build_header = True
dst_handle = open(dst_file_name, 'w')
dst_writer = csv.writer(dst_handle)
for year in ['2013', '2014', '2015', '2016', '2017']:
src_file_name = os.path.join(RAW_DIR, query_name) + '_' + year + '.csv'
src_handle = open(src_file_name, 'rb')
src_reader = csv.reader(src_handle)
header = next(src_reader)
if build_header:
dst_writer.writerow(header)
build_header = False
year = int(year)
for row in src_reader:
if pd.to_datetime(row[date_col], format=DATA_DATE_FORMAT).tz_localize('UTC').tz_convert(
'US/Pacific').year == year:
dst_writer.writerow(row)
src_handle.close()
dst_handle.close()
def order_separate_csv(query_name, market=None):
df = pd.read_csv(os.path.join(RAW_DIR, '{}.csv'.format(query_name)))
if query_name == 'ENE_WIND_SOLAR_SUMMARY':
sorted_df = df.sort_values(['OPR_DATE'])
else:
sorted_df = df.sort_values(['OPR_DATE', 'INTERVAL_NUM'])
os.remove(os.path.join(RAW_DIR, '{}.csv'.format(query_name)))
start = min(df.OPR_DATE)
end = max(df.OPR_DATE)
items = []
for item in df.DATA_ITEM:
if item not in items:
items.append(item)
os.chdir(os.path.join(DATA_DIR, 'CAISO'))
for item in items:
temp_df = sorted_df[sorted_df['DATA_ITEM'] == item]
if market is None:
temp_df.to_csv('{}_to_{}_{}_{}.csv'.format(start, end, query_name, item), index=False)
else:
temp_df.to_csv('{}_to_{}_{}_{}_{}.csv'.format(start, end, market, query_name, item), index=False)
def copy_csv(query_name):
df = pd.read_csv(os.path.join(RAW_DIR, '{}.csv'.format(query_name)))
os.remove(os.path.join(RAW_DIR, '{}.csv'.format(query_name)))
start = min(df.OPR_DATE)
end = max(df.OPR_DATE)
os.chdir(os.path.join(DATA_DIR, 'CAISO'))
df.to_csv('{}_to_{}_{}.csv'.format(start, end, query_name), index=False)
```
#### File: framework/NYISO/merge.py
```python
import os
import pandas as pd
from dateutil.relativedelta import relativedelta
def merge(path, dataid, start, duration):
destination = os.path.join(os.getcwd(), 'data', 'NYISO')
files = os.listdir(path)
files.sort()
date_list = []
for d in range(duration):
date = start + relativedelta(days=d)
datestring = str(date.year) + '{:02d}'.format(date.month) + '{:02d}'.format(date.day)
date_list.append(datestring)
selected_files = []
for f in files:
if f[0:8] in date_list:
selected_files.append(f)
if len(selected_files[0].split('_')) > 1:
suffix = selected_files[0].split('_')[-1].split('.')[0]
else:
suffix = ''
combined_csv = pd.concat([pd.read_csv(os.path.join(path, f)) for f in selected_files])
os.chdir(destination)
if len(suffix) > 0:
combined_csv.to_csv('{}_to_{}_{}_{}.csv'.format(date_list[0], date_list[-1], dataid, suffix), index=False)
else:
combined_csv.to_csv('{}_to_{}_{}.csv'.format(date_list[0], date_list[-1], dataid), index=False)
``` |
{
"source": "joolsa/Automatically-Mapping-Ad-Targeting-Criteria-between-Online-Ad-Platforms",
"score": 3
} |
#### File: joolsa/Automatically-Mapping-Ad-Targeting-Criteria-between-Online-Ad-Platforms/WordNetAnalysis.py
```python
from sematch.semantic.similarity import WordNetSimilarity
import re
import pandas as pd
import codecs
import csv
from nltk.stem import WordNetLemmatizer
wordnet_lemmatizer = WordNetLemmatizer()
csvFile = codecs.open('S:/path/Stopwords.csv', 'rU', 'cp1251')
df = pd.read_csv(csvFile, sep=';',header=None)
stop = df[0].tolist()
stop = "\\b|\\b".join(stop)
# You can use these lists of categories for testing model parameters
categoryList1 = ["Digital Activities / Canvas Gaming / Plays 5 out of 7 days","Mobile Device User / LG / LG V10"]
categoryList2 = ["Apparel & Accessories","Consumer Electronics/Mobile Phones", "Consumer Electronics/Game Consoles"]
# OR
# full data for real calculations (beware that it's rather slow process):
# csvFile = codecs.open('S:/path/Behaviors.csv','rU','cp1251')
# df1 = pd.read_csv(csvFile, sep=';',header=0)
# df1 = df1.fillna('')
# categoryList1 = df1['Facebook'].tolist()
# csvFile = codecs.open('S:/path/In-market audiences.csv','rU','cp1251')
# df2 = pd.read_csv(csvFile, sep=';',header=0)
# df2 = df2.fillna('')
# categoryList2 = df2['Google'].tolist()
def cleanTexts(texts):
clean=[]
for t in texts:
t = str(t)
t=t.lower()
t = re.sub(" ?(f|ht)(tp)(s?)(://)(.*)[.|/](.*)", ' ', t)
t = re.sub("@\w+ ?", ' ', t)
t = re.sub("[^\w\s]|[\d]", ' ', t)
t = re.sub(stop, ' ', t)
t = re.sub("\s+", ' ', t)
t = t.split()
t = [w for w in t if w.isalpha()]
t = [wordnet_lemmatizer.lemmatize(w) for w in t]
clean.append(t)
return clean
cleanCleanCat1=cleanTexts(categoryList1)
cleanCleanCat2=cleanTexts(categoryList2)
wns = WordNetSimilarity()
similarCategories=[]
for cat in cleanCleanCat1:
sims=[]
for t in cleanCleanCat2:
TextSim=[]
for w in cat:
# wdsSim=[1 if w == wr else wns.word_similarity(w, wr, 'li') for wr in t]
wdsSim = [wns.word_similarity(w, wr, 'li') for wr in t]
TextSim.extend(wdsSim)
sims.append((cleanCleanCat2.index(t),sum(TextSim)))
if max(sims,key=lambda x:x[1])[1]>0:
similarCategories.append((max(sims,key=lambda x:x[1])[0],max(sims,key=lambda x:x[1])[1]))
else:
similarCategories.append('')
print('{0} texts out of {1} done'.format(cleanCleanCat1.index(cat)+1, len(cleanCleanCat1)))
with open('S:/path/In-market audiences_sim.csv', 'w', newline='',
encoding='utf-8') as csvfile:
wr = csv.writer(csvfile, delimiter=';',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
for row in similarCategories:
wr.writerow(row)
``` |
{
"source": "joomcode/airflow",
"score": 2
} |
#### File: www/views/test_views.py
```python
import os
from unittest import mock
import pytest
from airflow.configuration import initialize_config
from airflow.plugins_manager import AirflowPlugin, EntryPointSource
from airflow.www.views import get_safe_url, truncate_task_duration
from tests.test_utils.config import conf_vars
from tests.test_utils.mock_plugins import mock_plugin_manager
from tests.test_utils.www import check_content_in_response, check_content_not_in_response
def test_configuration_do_not_expose_config(admin_client):
with conf_vars({('webserver', 'expose_config'): 'False'}):
resp = admin_client.get('configuration', follow_redirects=True)
check_content_in_response(
[
'Airflow Configuration',
'# Your Airflow administrator chose not to expose the configuration, '
'most likely for security reasons.',
],
resp,
)
@mock.patch.dict(os.environ, {"AIRFLOW__CORE__UNIT_TEST_MODE": "False"})
def test_configuration_expose_config(admin_client):
# make sure config is initialized (without unit test mote)
initialize_config()
with conf_vars({('webserver', 'expose_config'): 'True'}):
resp = admin_client.get('configuration', follow_redirects=True)
check_content_in_response(['Airflow Configuration', 'Running Configuration'], resp)
def test_redoc_should_render_template(capture_templates, admin_client):
with capture_templates() as templates:
resp = admin_client.get('redoc')
check_content_in_response('Redoc', resp)
assert len(templates) == 1
assert templates[0].name == 'airflow/redoc.html'
assert templates[0].local_context == {'openapi_spec_url': '/api/v1/openapi.yaml'}
def test_plugin_should_list_on_page_with_details(admin_client):
resp = admin_client.get('/plugin')
check_content_in_response("test_plugin", resp)
check_content_in_response("Airflow Plugins", resp)
check_content_in_response("source", resp)
check_content_in_response("<em>$PLUGINS_FOLDER/</em>test_plugin.py", resp)
def test_plugin_should_list_entrypoint_on_page_with_details(admin_client):
mock_plugin = AirflowPlugin()
mock_plugin.name = "test_plugin"
mock_plugin.source = EntryPointSource(
mock.Mock(), mock.Mock(version='1.0.0', metadata={'name': 'test-entrypoint-testpluginview'})
)
with mock_plugin_manager(plugins=[mock_plugin]):
resp = admin_client.get('/plugin')
check_content_in_response("test_plugin", resp)
check_content_in_response("Airflow Plugins", resp)
check_content_in_response("source", resp)
check_content_in_response("<em>test-entrypoint-testpluginview==1.0.0:</em> <Mock id=", resp)
def test_plugin_endpoint_should_not_be_unauthenticated(app):
resp = app.test_client().get('/plugin', follow_redirects=True)
check_content_not_in_response("test_plugin", resp)
check_content_in_response("Sign In - Airflow", resp)
@pytest.mark.parametrize(
"url, content",
[
(
"/taskinstance/list/?_flt_0_execution_date=2018-10-09+22:44:31",
"List Task Instance",
),
(
"/taskreschedule/list/?_flt_0_execution_date=2018-10-09+22:44:31",
"List Task Reschedule",
),
],
ids=["instance", "reschedule"],
)
def test_task_start_date_filter(admin_client, url, content):
resp = admin_client.get(url)
# We aren't checking the logic of the date filter itself (that is built
# in to FAB) but simply that our UTC conversion was run - i.e. it
# doesn't blow up!
check_content_in_response(content, resp)
@pytest.mark.parametrize(
"test_url, expected_url",
[
("", "/home"),
("http://google.com", "/home"),
("36539'%3balert(1)%2f%2f166", "/home"),
(
"http://localhost:8080/trigger?dag_id=test&origin=36539%27%3balert(1)%2f%2f166&abc=2",
"/home",
),
(
"http://localhost:8080/trigger?dag_id=test_dag&origin=%2Ftree%3Fdag_id%test_dag';alert(33)//",
"/home",
),
(
"http://localhost:8080/trigger?dag_id=test_dag&origin=%2Ftree%3Fdag_id%3Dtest_dag",
"http://localhost:8080/trigger?dag_id=test_dag&origin=%2Ftree%3Fdag_id%3Dtest_dag",
),
],
)
@mock.patch("airflow.www.views.url_for")
def test_get_safe_url(mock_url_for, app, test_url, expected_url):
mock_url_for.return_value = "/home"
with app.test_request_context(base_url="http://localhost:8080"):
assert get_safe_url(test_url) == expected_url
@pytest.mark.parametrize(
"test_duration, expected_duration",
[
(0.12345, 0.123),
(0.12355, 0.124),
(3.12, 3.12),
(9.99999, 10.0),
(10.01232, 10),
],
)
def test_truncate_task_duration(test_duration, expected_duration):
assert truncate_task_duration(test_duration) == expected_duration
``` |
{
"source": "joomcode/SynapseML",
"score": 2
} |
#### File: mmlsparktest/cyber/explain_tester.py
```python
__author__ = 'rolevin'
from typing import Any, Callable, List
from pyspark.ml.param.shared import HasInputCol, HasOutputCol
from mmlsparktest.spark import *
class ExplainTester:
def check_explain(self, explainable: Any, params: List[str], type_count_checker: Callable):
explained = explainable.explainParams()
assert len(explained.split('\n')) == len(params), explained
def to_camel_case(prefix: str, name: str) -> str:
if name == 'inputCol' and isinstance(explained, HasInputCol):
return prefix + 'InputCol'
elif name == 'outputCol' and isinstance(explained, HasOutputCol):
return prefix + 'OutputCol'
else:
parts = name.split('_')
return prefix + ''.join([parts[i][0:1].upper() + parts[i][1:] for i in range(len(parts))])
values = []
for pp in params:
assert pp in explained, explained
getter_method_name = to_camel_case('get', pp)
ret_value = getattr(type(explainable), getter_method_name)(explainable)
values.append(ret_value)
# test setter
setter_method_name = to_camel_case('set', pp)
getattr(type(explainable), setter_method_name)(explainable, ret_value)
re_ret_value = getattr(type(explainable), getter_method_name)(explainable)
# test that value stays the same
assert re_ret_value == ret_value
def count_instance(arr, tt):
return len([vv for vv in arr if (tt is not None and isinstance(vv, tt)) or (tt is None and vv is None)])
assert type_count_checker(count_instance(values, str), str)
assert type_count_checker(count_instance(values, int), int)
assert type_count_checker(count_instance(values, float), float)
assert type_count_checker(count_instance(values, bool), bool)
assert type_count_checker(count_instance(values, None), None)
``` |
{
"source": "joomladigger/The-Final-Project-for-EPAT",
"score": 2
} |
#### File: joomladigger/The-Final-Project-for-EPAT/code.py
```python
import numpy as np
import pandas as pd
import seaborn as sns
#from CAL.PyCAL import *
import matplotlib as mpl
mpl.style.use('bmh')
#sns.set_style('white')# bmhggplot
import matplotlib.pylab as plt
from datetime import datetime
from pandas import DataFrame, Series
import statsmodels.api as sm
from statsmodels.tsa.stattools import adfuller as ADF
# set the working directory
import os
os.getcwd() # this is to check the current working directory
os.chdir("D://EPAT//09 Final Project//")
# loading data
data = pd.read_csv('price.csv',index_col='id',parse_dates=False)
# using the API of UQER access data
code = list(set(data['exchangeCD'])) #delete the repeated objects
df = DataFrame()
for i in code:
df1 = DataFrame(data[data['exchangeCD']==i]['contractObject'])
df1.columns = [i]
df = pd.concat([df,df1],axis=1)
a1 = list(df['CCFX'])
a2 = list(df['XSGE'])
a3 = list(df['XDCE'])
a4 = list(df['XZCE'])
# access the contracts in CFFEX but not in SHFE
CFFEX = DataFrame(list(set(a1).difference(set(a2))),columns=['CCFX'])
# access the contracts in SHFE but not in CFFEX
SHFE = DataFrame(list(set(a2).difference(set(a1))),columns=['XSGE'])
# access the contracts in DCE but not in ZCE
DCE = DataFrame(list(set(a3).difference(set(a4))),columns=['XDCE'])
# access the contracts in ZCE but not in ZCE
ZCE = DataFrame(list(set(a4).difference(set(a3))),columns=['XZCE'])
s = pd.concat([CFFEX,SHFE,DCE,ZCE],axis=0)
s.dropna()
print ('The # of Contracts in CFFEX:',len(CFFEX),'There are:',list(CFFEX['CCFX']))
print ('The # of Contracts in SHFE:',len(SHFE),'There are:',list(SHFE['XSGE']))
print ('The # of Contracts in DCE:',len(DCE),'There are:',list(DCE['XDCE']))
print ('The # of Contracts in ZCE:',len(ZCE),'There are:',list(ZCE['XZCE']))
print ('Delete the repeated Contracts in CFFEX, the remaining:',len(SHFE)+len(DCE)+len(ZCE))
## Find trade pairs
# filer the contract with turnover less than 10000
#data = DataAPI.MktMFutdGet(tradeDate='20171229',mainCon=1,contractMark=u"",contractObject=u"",startDate=u"",endDate=u"",field=[u"contractObject",u"exchangeCD",u"tradeDate",u"closePrice",u"turnoverVol"],pandas="1")
data1 = data[data.turnoverVol > 10000 ][data.exchangeCD != u'CCFX'] # not include Contracts from CCFEX
print ('Main Contracts with Turnover Volumn more than 10000:' ,len(data),'there are:',list(data['contractObject']))
contract_set = list(set(data1['contractObject']))
# Find trading pairs
"""
Now that stocks have been filtered for their data and daily liquidity,
every possible stock pair for each industry will be tested for cointegration.
An ADF test will be performed such that, the alternative hypothesis is that the pair
to be tested is stationary. The null hypothesis will be rejected for p-values < 0.05.
"""
def find_cointegrated_pairs(dataframe, critial_level = 0.05):
n = dataframe.shape[1] # the length of dateframe
pvalue_matrix = np.ones((n, n)) # initialize the matrix of p
keys = dataframe.keys() # get the column names
pairs = [] # initilize the list for cointegration
for i in range(n):
for j in range(i+1, n): # for j bigger than i
stock1 = dataframe[keys[i]] # obtain the price of two contract
stock2 = dataframe[keys[j]]
result = sm.tsa.stattools.coint(stock1, stock2) # get conintegration
pvalue = result[1] # get the pvalue
pvalue_matrix[i, j] = pvalue
if pvalue < critial_level: # if p-value less than the critical level
pairs.append((keys[i], keys[j], pvalue)) # record the contract with that p-value
return pvalue_matrix, pairs
# Cleanning the data
data = pd.read_csv('price.csv',index_col='tradeDate',parse_dates=True)
df3 = DataFrame()
for i in contract_set:
df4 = DataFrame(data[data['contractObject']==i]['settlePrice'])
df4.columns = [i]
df3 = pd.concat([df3,df4],axis=1)
badFluidity = ['B','BB','FB','FU','JR','LR','PM','RI','RS','SM','WH','WR','TF', 'IH', 'IC', 'T', 'IF']
for i in badFluidity:
if i in df3.columns:
del df3[i]
for i in df3.columns:
if df3[i].dropna().shape[0] <= 500:
del df3[i]
all = df3.dropna().copy()
all.head()
all.to_csv("all_contracts.csv")
fig = plt.figure(figsize=(10,8))
pvalues, pairs = find_cointegrated_pairs(all,0.025)
sns.heatmap(1-pvalues, xticklabels=df3.columns, yticklabels=df3.columns, cmap='RdYlGn_r', mask = (pvalues == 1))
p = DataFrame(pairs,columns=['S1','S2','Pvalue'])
p_sorted = p.sort_index(by='Pvalue')
fig = plt.figure(figsize=(12,8))
stock_df1 = all['TA']
stock_df2 = all['RB']
stock_df1.plot(color='#F4718B')
stock_df2.plot(color='#407CE2')
plt.xlabel("Time"); plt.ylabel("Price")
plt.show()
def print_func(p,all):
for i in range(p.shape[0]):
s1 = p.iloc[i][0]
s2 = p.iloc[i][1]
print (i,s1,s2)
stock_df1 = all[s1]
stock_df2 = all[s2]
fig = plt.figure(figsize=(12,8))
stock_df1.plot(color='#F4718B')
stock_df2.plot(color='#407CE2')
plt.xlabel("Time"); plt.ylabel("Price")
plt.legend([s1, s2])
plt.show()
def print_func_scatter(p,all):
for i in range(p.shape[0]):
s1 = p.iloc[i][0]
s2 = p.iloc[i][1]
#print (i,s1,s2)
stock_df1 = all[s1]
stock_df2 = all[s2]
fig = plt.figure(figsize=(12,8))
plt.scatter(stock_df1,stock_df2)
plt.xlabel(s1); plt.ylabel(s2)
plt.show()
print_func(p_sorted, all)
# scatter
print_func_scatter(p_sorted, all)
p_sorted.to_csv("p_sorted.csv")
``` |
{
"source": "joommf-attic/oommffield",
"score": 3
} |
#### File: oommffield/oommffield/oommffield.py
```python
import random
import numpy as np
import matplotlib.pyplot as plt
import finitedifferencefield
import struct
class Field(finitedifferencefield.Field):
def write_oommf_file(self, filename, datatype='text'):
"""Write the FD field to the OOMMF (omf, ohf) file.
This method writes all necessary data to the omf or ohf file,
so that it can be read by OOMMF.
Args:
filename (str): filename including extension
type(str): Either 'text' or 'binary'
Example:
.. code-block:: python
>>> from oommffield import Field
>>> field = Field((0, 0, 0), (5, 4, 3), (1, 1, 1))
>>> field.set((1, 0, 5))
>>> field.write_oommf_file('fdfield.omf')
"""
oommf_file = open(filename, 'w')
# Define header lines.
header_lines = ['OOMMF OVF 2.0',
'',
'Segment count: 1',
'',
'Begin: Segment',
'Begin: Header',
'',
'Title: Field generated omf file',
'Desc: File generated by Field class',
'meshunit: m',
'meshtype: rectangular',
'xbase: {}'.format(self.d[0]),
'ybase: {}'.format(self.d[1]),
'zbase: {}'.format(self.d[2]),
'xnodes: {}'.format(self.n[0]),
'ynodes: {}'.format(self.n[1]),
'znodes: {}'.format(self.n[2]),
'xstepsize: {}'.format(self.d[0]),
'ystepsize: {}'.format(self.d[1]),
'zstepsize: {}'.format(self.d[2]),
'xmin: {}'.format(self.cmin[0]),
'ymin: {}'.format(self.cmin[1]),
'zmin: {}'.format(self.cmin[2]),
'xmax: {}'.format(self.cmax[0]),
'ymax: {}'.format(self.cmax[1]),
'zmax: {}'.format(self.cmax[2]),
'valuedim: {}'.format(self.dim),
'valuelabels: Magnetization_x Magnetization_y Magnetization_z',
'valueunits: A/m A/m A/m',
'',
'End: Header',
'']
if datatype == 'binary':
header_lines.append('Begin: Data Binary 8')
footer_lines = ['End: Data Binary 8',
'End: Segment']
if datatype == 'text':
header_lines.append('Begin: Data Text')
footer_lines = ['End: Data Text',
'End: Segment']
# Write header lines to OOMMF file.
for line in header_lines:
if line == '':
oommf_file.write('#\n')
else:
oommf_file.write('# ' + line + '\n')
if datatype == 'binary':
# Close the file and reopen with binary write
# appending to end of file.
oommf_file.close()
oommf_file = open(filename, 'ab')
# Add the 8 bit binary check value that OOMMF uses
packarray = [123456789012345.0]
# Write data lines to OOMMF file.
for iz in range(self.n[2]):
for iy in range(self.n[1]):
for ix in range(self.n[0]):
[packarray.append(vi) for vi in self.f[ix, iy, iz, :]]
v_binary = struct.pack('d'*len(packarray), *packarray)
oommf_file.write(v_binary)
oommf_file.close()
oommf_file = open(filename, 'a')
else:
for iz in range(self.n[2]):
for iy in range(self.n[1]):
for ix in range(self.n[0]):
v = [str(vi) for vi in self.f[ix, iy, iz, :]]
for vi in v:
oommf_file.write(' ' + vi)
oommf_file.write('\n')
# Write footer lines to OOMMF file.
for line in footer_lines:
oommf_file.write('# ' + line + '\n')
# Close the file.
oommf_file.close()
def read_oommf_file(filename, name='unnamed'):
try:
f = open(filename)
if 'Begin: Data Text' in f.read():
return read_oommf_file_text(filename, name)
else:
return read_oommf_file_binary(filename, name)
except UnicodeDecodeError:
return read_oommf_file_binary(filename, name)
def read_oommf_file_text(filename, name='unnamed'):
"""Read the OOMMF file and create an Field object.
Args:
filename (str): OOMMF file name
name (str): name of the Field object
Return:
Field object.
Example:
.. code-block:: python
from oommffield import read_oommf_file
oommf_filename = 'vector_field.omf'
field = read_oommf_file(oommf_filename, name='magnetisation')
"""
# Open and read the file.
f = open(filename, 'r')
lines = f.readlines()
f.close()
# Load metadata.
dic = {'xmin': None, 'ymin': None, 'zmin': None,
'xmax': None, 'ymax': None, 'zmax': None,
'xstepsize': None, 'ystepsize': None, 'zstepsize': None,
'xbase': None, 'ybase': None, 'zbase': None,
'xnodes': None, 'ynodes': None, 'znodes': None,
'valuedim': None}
for line in lines[0:50]:
for key in dic.keys():
if line.find(key) != -1:
dic[key] = float(line.split()[2])
cmin = (dic['xmin'], dic['ymin'], dic['zmin'])
cmax = (dic['xmax'], dic['ymax'], dic['zmax'])
d = (dic['xstepsize'], dic['ystepsize'], dic['zstepsize'])
cbase = (dic['xbase'], dic['ybase'], dic['zbase'])
n = (int(round(dic['xnodes'])),
int(round(dic['ynodes'])),
int(round(dic['znodes'])))
dim = int(dic['valuedim'])
field = Field(cmin, cmax, d, dim, name=name)
for j in range(len(lines)):
if lines[j].find('Begin: Data Text') != -1:
data_first_line = j+1
counter = 0
for iz in range(n[2]):
for iy in range(n[1]):
for ix in range(n[0]):
i = (ix, iy, iz)
line_data = lines[data_first_line+counter]
value = [float(vi) for vi in line_data.split()]
field.set_at_index(i, value)
counter += 1
return field
def read_oommf_file_binary(filename, name='unnamed'):
"""Read the OOMMF file and create an Field object.
Args:
filename (str): OOMMF file name
name (str): name of the Field object
Return:
Field object.
Example:
.. code-block:: python
from oommffield import read_oommf_file
oommf_filename = 'vector_field.omf'
field = read_oommf_file(oommf_filename, name='magnetisation')
"""
# Open and read the file.
with open(filename, 'rb') as f:
file = f.read()
lines = file.split(b'\n')
# Load metadata.
dic = {'xmin': None, 'ymin': None, 'zmin': None,
'xmax': None, 'ymax': None, 'zmax': None,
'xstepsize': None, 'ystepsize': None, 'zstepsize': None,
'xbase': None, 'ybase': None, 'zbase': None,
'xnodes': None, 'ynodes': None, 'znodes': None,
'valuedim': None}
for line in lines[0:50]:
for key in dic.keys():
if line.find(bytes(key, 'utf-8')) != -1:
dic[key] = float(line.split()[2])
cmin = (dic['xmin'], dic['ymin'], dic['zmin'])
cmax = (dic['xmax'], dic['ymax'], dic['zmax'])
d = (dic['xstepsize'], dic['ystepsize'], dic['zstepsize'])
cbase = (dic['xbase'], dic['ybase'], dic['zbase'])
n = (int(round(dic['xnodes'])),
int(round(dic['ynodes'])),
int(round(dic['znodes'])))
dim = int(dic['valuedim'])
field = Field(cmin, cmax, d, dim, name=name)
binary_header = b'# Begin: Data Binary '
# Here we find the start and end points of the
# binary data, in terms of byte position.
data_start = file.find(binary_header)
header = file[data_start:data_start + len(binary_header) + 1]
if b'8' in header:
bytesize = 8
elif b'4' in header:
bytesize = 4
data_start += len(b'# Begin: Data Binary 8\n')
data_end = file.find(b'# End: Data Binary ')
if bytesize == 4:
listdata = list(struct.iter_unpack('@f', file[data_start:data_end]))
try:
assert listdata[0] == 1234567.0
except:
raise AssertionError('Something has gone wrong'
' with reading Binary Data')
elif bytesize == 8:
listdata = list(struct.iter_unpack('@d', file[data_start:data_end]))
try:
assert listdata[0][0] == 123456789012345.0
except:
raise AssertionError('Something has gone wrong'
' with reading Binary Data')
counter = 1
for iz in range(n[2]):
for iy in range(n[1]):
for ix in range(n[0]):
i = (ix, iy, iz)
value = (listdata[counter][0],
listdata[counter+1][0],
listdata[counter+2][0])
field.set_at_index(i, value)
counter += 3
return field
``` |
{
"source": "joommf/joommfutils",
"score": 2
} |
#### File: joommfutils/ubermagutil/__init__.py
```python
import pkg_resources
import pytest
from . import progress
from .basic_logging import setup_logging
from .inherit_docs import inherit_docs
from .tools import changedir, hysteresis_values
__version__ = pkg_resources.get_distribution(__name__).version
def test():
"""Run all package tests.
Examples
--------
1. Run all tests.
>>> import ubermagutil as uu
...
>>> # uu.test()
"""
return pytest.main(["-v", "--pyargs", "ubermagutil", "-l"]) # pragma: no cover
```
#### File: ubermagutil/tests/test_tools.py
```python
import pathlib
import pytest
import ubermagutil as uu
def test_hysteresis_values():
res = uu.hysteresis_values(-1, 1, 1)
assert isinstance(res, list)
assert len(res) == 5
assert abs(res[0] - 1) < 1e-6
assert abs(res[-1] - 1) < 1e-6
assert (res[1] - res[0] - 1) < 1e-6
res = uu.hysteresis_values(-1e6, 1e6, 0.1e6)
assert isinstance(res, list)
assert len(res) == 41
assert abs(res[0] - 1e6) < 1e-6
assert abs(res[-1] - 1e6) < 1e-6
assert (res[1] - res[0] - 0.1e6) < 1e-6
res = uu.hysteresis_values(-1e-6, 1e-6, 0.01e-6)
assert isinstance(res, list)
assert len(res) == 401
assert abs(res[0] - 1e-6) < 1e-9
assert abs(res[-1] - 1e-6) < 1e-9
assert (res[1] - res[0] - 0.01e-6) < 1e-9
# Exception
with pytest.raises(ValueError):
uu.hysteresis_values(-1, 1, 0.3)
def test_changedir(tmp_path):
with uu.changedir(tmp_path):
with open("test.txt", "wt", encoding="utf-8") as f:
f.write("")
assert (tmp_path / "test.txt").exists()
assert not (pathlib.Path() / "test.txt").exists()
```
#### File: ubermagutil/typesystem/descriptors.py
```python
import keyword
import numbers
import numpy as np
class Descriptor:
"""Descriptor base class from which all descriptors in
``ubermagutil.typesystem`` are derived.
Before setting the attribute value of a decorated class is allowed, certain
type and value checks are performed. If they are not according to the
specifications in the ``__set__`` method (defined in the derived class),
``TypeError`` or ``ValueError`` is raised. If ``const=True`` is passed when
the class is instantiated, no value changes are allowed after the initial
assignment. Deleting attributes of a decorated class is never allowed.
Parameters
----------
name : str
Attribute name. It must be a valid Python variable name. Defaults to
``None``.
const : bool, optional
If ``const=True``, the attribute of the decorated class is constant and
its value cannot be changed after the first set.
Example
-------
1. Deriving a descriptor class from the base class ``Descriptor``, which
only allows positive integer values.
>>> import ubermagutil.typesystem as ts
...
>>> class PositiveInt(ts.Descriptor):
... def __set__(self, instance, value):
... if not isinstance(value, int):
... raise TypeError('Allowed only type(value) == int.')
... if value < 0:
... raise ValueError('Allowed only value >= 0.')
... super().__set__(instance, value)
...
>>> @ts.typesystem(myattribute=PositiveInt())
... class DecoratedClass:
... def __init__(self, myattribute):
... self.myattribute = myattribute
...
>>> dc = DecoratedClass(myattribute=5)
>>> dc.myattribute
5
>>> dc.myattribute = 101 # valid set
>>> dc.myattribute
101
>>> dc.myattribute = -1 # invalid set - negative value
Traceback (most recent call last):
...
ValueError: ...
>>> dc.myattribute = 3.14 # invalid set - float value
Traceback (most recent call last):
...
TypeError: ...
>>> dc.myattribute # value has not beed affected by invalid sets
101
"""
def __init__(self, name=None, **kwargs):
self.name = name
for key, value in kwargs.items():
setattr(self, key, value)
def __set__(self, instance, value):
"""If ``self.const=True``, changing the value of a decorated class
attribute after the initial set is not allowed.
Raises
------
AttributeError
If changing the value of a decorated class attribute is attempted.
Example
-------
1. Changing the value of a constant decorated class attribute.
>>> import ubermagutil.typesystem as ts
...
>>> @ts.typesystem(myattribute=ts.Descriptor(const=True))
... class DecoratedClass:
... def __init__(self, myattribute):
... self.myattribute = myattribute
...
>>> dc = DecoratedClass(myattribute="<NAME>")
>>> dc.myattribute
'<NAME>'
>>> dc.myattribute = '<NAME>'
Traceback (most recent call last):
...
AttributeError: ...
"""
if hasattr(self, "const"):
if not self.const or self.name not in instance.__dict__:
instance.__dict__[self.name] = value
else:
msg = f"Changing {self.name} not allowed."
raise AttributeError(msg)
else:
instance.__dict__[self.name] = value
def __delete__(self, instance):
"""Deleting the decorated class attribute is never allowed and
``AttributeError`` is raised.
Raises
------
AttributeError
If deleting decorated class attribute is attempted.
Example
-------
1. Deleting an attribute of a decorated class.
>>> import ubermagutil.typesystem as ts
...
>>> @ts.typesystem(myattribute=ts.Descriptor())
... class DecoratedClass:
... def __init__(self, myattribute):
... self.myattribute = myattribute
...
>>> dc = DecoratedClass(myattribute="<NAME>")
>>> dc.myattribute
'<NAME>'
>>> del dc.myattribute
Traceback (most recent call last):
...
AttributeError: ...
"""
msg = f"Deleting {self.name} not allowed."
raise AttributeError(msg)
class Typed(Descriptor):
"""Descriptor allowing setting attributes only with values of a certain
type.
Parameters
----------
expected_type : type
Allowed type of value.
allow_none : bool
If ``True``, the value can be set with ``None``.
Raises
------
TypeError
If ``type(value) != expected_type``.
Example
-------
1. Usage of ``Typed`` descriptor.
>>> import ubermagutil.typesystem as ts
...
>>> @ts.typesystem(myattribute=ts.Typed(expected_type=str))
... class DecoratedClass:
... def __init__(self, myattribute):
... self.myattribute = myattribute
...
>>> dc = DecoratedClass(myattribute='<NAME>')
>>> dc.myattribute
'<NAME>'
>>> dc.myattribute = '<NAME>' # valid set
>>> dc.myattribute
'<NAME>'
>>> dc.myattribute = 3.14 # invalid set
Traceback (most recent call last):
...
TypeError: ...
.. note::
This class was derived from ``ubermagutil.typesystem.Descriptor``
and inherits its functionality.
.. seealso:: :py:class:`~ubermagutil.typesystem.Descriptor`
"""
def __set__(self, instance, value):
if hasattr(self, "allow_none"):
if self.allow_none and value is None:
super().__set__(instance, value)
return None
if not isinstance(value, self.expected_type):
msg = f"Cannot set {self.name} with {type(value)}."
raise TypeError(msg)
super().__set__(instance, value)
class Scalar(Descriptor):
"""Descriptor allowing setting attributes only with scalars
(``numbers.Real``).
Parameters
----------
expected_type : int or float type, optional
Allowed type of ``value``. It should be a subset of ``numbers.Real``
(e.g. ``int`` or ``float``).
positive : bool, optional
If ``positive=True``, value must be positive (>0).
unsigned : bool, optional
If ``unsigned=True``, value must be unsigned (>=0).
otherwise : type
This type would also be accepted if specified. It has priority over
other descriptor specification.
Raises
------
TypeError
If ``type(value)`` is neither ``numbers.Real`` nor ``expected_type``
(if passed).
ValueError
If ``value < 0`` and ``unsigned=True`` is passed or ``value <= 0`` and
``positive=True`` is passed.
Example
-------
1. Usage of ``Scalar`` descriptor for defining a positive integer.
>>> import ubermagutil.typesystem as ts
...
>>> @ts.typesystem(myattribute=ts.Scalar(expected_type=int, positive=True))
... class DecoratedClass:
... def __init__(self, myattribute):
... self.myattribute = myattribute
...
>>> dc = DecoratedClass(myattribute=5)
>>> dc.myattribute
5
>>> dc.myattribute = 10 # valid set
>>> dc.myattribute
10
>>> dc.myattribute = 3.14 # invalid set
Traceback (most recent call last):
...
TypeError: ...
>>> dc.myattribute = 0 # invalid set
Traceback (most recent call last):
...
ValueError: ...
>>> dc.myattribute # the value was not affected by invalid sets
10
.. note::
This class was derived from ``ubermagutil.typesystem.Descriptor``
and inherits its functionality.
.. seealso:: :py:class:`~ubermagutil.typesystem.Descriptor`
"""
def __set__(self, instance, value):
if hasattr(self, "otherwise"):
if isinstance(value, self.otherwise):
super().__set__(instance, value)
return None
if not isinstance(value, numbers.Real):
msg = f"Cannot set {self.name} with {type(value)}."
raise TypeError(msg)
if hasattr(self, "expected_type"):
if not isinstance(value, self.expected_type):
msg = f"Cannot set {self.name} with {type(value)}."
raise TypeError(msg)
if hasattr(self, "unsigned"):
if self.unsigned and value < 0:
msg = f"Cannot set {self.name} with value = {value} < 0."
raise ValueError(msg)
if hasattr(self, "positive"):
if self.positive and value <= 0:
msg = f"Cannot set {self.name} with value = {value} <= 0."
raise ValueError(msg)
super().__set__(instance, value)
class Vector(Descriptor):
"""Descriptor allowing setting attributes only with vectors (``list``,
``tuple``, or ``numpy.ndarray``), whose elements are of ``numbers.Real``
type.
Parameters
----------
component_type : int or float type, optional
Type of the vector components. It should be a subset of
``numbers.Real`` (``int``, ``float``).
size : int, optional
Size (length, number of elements) of the vector.
positive : bool, optional
If ``positive=True``, values of all vector elements must be positive
(>0).
unsigned : bool, optional
If ``unsigned=True``, values of all vector components must be unsigned
(>=0).
otherwise : type
This type would also be accepted if specified. It has priority over
other descriptor specification.
Raises
------
TypeError
If the ``type(value)`` is not ``list``, ``tuple``, or ``numpy.ndarray``
or if the type of vector components is neither ``numbers.Real`` nor
``expected_type`` (if passed).
ValueError
If vector component value is ``value < 0`` and ``unsigned=True`` or
``value <= 0`` and ``positive=True``.
Example
-------
1. Usage of ``Vector`` descriptor for defining a three-dimensional vector,
whose components myattribute positive integer components.
>>> import ubermagutil.typesystem as ts
...
>>> @ts.typesystem(myattribute=ts.Vector(size=3, component_type=int,
... positive=True))
... class DecoratedClass:
... def __init__(self, myattribute):
... self.myattribute = myattribute
...
>>> dc = DecoratedClass(myattribute=(1, 2, 12))
>>> dc.myattribute
(1, 2, 12)
>>> dc.myattribute = (10, 11, 12) # valid set
>>> dc.myattribute
(10, 11, 12)
>>> dc.myattribute = (11, 12) # invalid set
Traceback (most recent call last):
...
ValueError: ...
>>> dc.myattribute = (0, 1, 2) # invalid set
Traceback (most recent call last):
...
ValueError: ...
>>> dc.myattribute = (1, 3.14, 2) # invalid set
Traceback (most recent call last):
...
TypeError: ...
>>> dc.myattribute # the value was not affected by invalid sets
(10, 11, 12)
.. note::
This class was derived from ``ubermagutil.typesystem.Descriptor``
and inherits its functionality.
.. seealso:: :py:class:`~ubermagutil.typesystem.Descriptor`
"""
def __set__(self, instance, value):
if hasattr(self, "otherwise"):
if isinstance(value, self.otherwise):
super().__set__(instance, value)
return None
if not isinstance(value, (tuple, list, np.ndarray)):
msg = f"Cannot set {self.name} with {type(value)}."
raise TypeError(msg)
if not all(isinstance(i, numbers.Real) for i in value):
msg = "Allowed only type(value[i]) == numbers.Real."
raise TypeError(msg)
if hasattr(self, "size"):
if len(value) != self.size:
msg = f"Cannot set {self.name} with length {len(value)} value."
raise ValueError(msg)
if hasattr(self, "component_type"):
if not all(isinstance(i, self.component_type) for i in value):
msg = f"Allowed only type(value[i]) == {self.component_type}."
raise TypeError(msg)
if hasattr(self, "unsigned"):
if self.unsigned and not all(i >= 0 for i in value):
raise ValueError("Allowed only value[i] >= 0.")
if hasattr(self, "positive"):
if self.positive and not all(i > 0 for i in value):
raise ValueError("Allowed only value[i] > 0.")
super().__set__(instance, value)
class Name(Descriptor):
"""Python identifier descriptor.
It allows setting attributes only with strings representing a valid Python
identifier which is not also a keyword. In other words, it allows valid
Python variable names. If ``allowed_char`` is passed, value is first split
at that character and then individual parts of the string checked.
Parameters
----------
allowed_char : (1,) str
Character allowed in ``value``.
Raises
------
TypeError
If the ``type(value)`` is not ``str``.
ValueError
If the string is not a valid identifier or it is a Python keyword.
Example
-------
1. Usage of ``Name`` descriptor.
>>> import ubermagutil.typesystem as ts
...
>>> @ts.typesystem(myattribute=ts.Name())
... class DecoratedClass:
... def __init__(self, myattribute):
... self.myattribute = myattribute
...
>>> dc = DecoratedClass(myattribute='object_name')
>>> dc.myattribute
'object_name'
>>> dc.myattribute = 'newname' # valid set
>>> dc.myattribute
'newname'
>>> dc.myattribute = '123newname' # invalid set
Traceback (most recent call last):
...
ValueError: ...
>>> dc.myattribute = '<NAME>' # invalid set
Traceback (most recent call last):
...
ValueError: ...
>>> dc.myattribute # the value was not affected by invalid sets
'newname'
.. note::
This class was derived from ``ubermagutil.typesystem.Descriptor``
and inherits its functionality.
.. seealso:: :py:class:`~ubermagutil.typesystem.Descriptor`
"""
def __set__(self, instance, value):
if not isinstance(value, str):
msg = f"Cannot set {self.name} with {type(value)}."
raise TypeError(msg)
if hasattr(self, "allowed_char"):
tmp_value = value.split(self.allowed_char)
else:
tmp_value = [value]
for s in tmp_value:
if not s.isidentifier() or keyword.iskeyword(s):
msg = f"{s} is not a valid variable name."
raise ValueError(msg)
super().__set__(instance, value)
class Dictionary(Descriptor):
"""Descriptor allowing setting attributes with a dictionary, which has keys
defined by ``key_descriptor`` and values defined by ``value_descriptor``.
Parameters
----------
key_descriptor : ubermagutil.typesystem.Descriptor or its derived class
Accepted dictionary key type.
value_descriptor : ubermagutil.typesystem.Descriptor or its derived class
Accepted dictionary value type.
allow_empty : bool, optional
If ``allow_empty=True``, the value can be an empty dictionary.
otherwise : type
This type would also be accepted if specified. It has priority over
other descriptor specification.
Raises
------
TypeError
If value passed is not a dictionary.
ValueError
If an empty dictionary is passed or a dictionary with invalid keys or
values.
Example
-------
1. The usage of ``Dictionary`` descriptor allowing keys defined by
``ubermagutil.typesystem.Name`` and values by
``ubermagutil.typesystem.Scalar``.
>>> import ubermagutil.typesystem as ts
...
>>> @ts.typesystem(myattribute=ts.Dictionary(key_descriptor=ts.Name(),
... value_descriptor=ts.Scalar()))
... class DecoratedClass:
... def __init__(self, myattribute):
... self.myattribute = myattribute
...
>>> dc = DecoratedClass(myattribute={'a': 1, 'b': -1.1})
>>> dc.myattribute
{'a': 1, 'b': -1.1}
>>> dc.myattribute = {'a': 1, 'b': -3} # valid set
>>> dc.myattribute
{'a': 1, 'b': -3}
>>> dc.myattribute = {1: 1, 'b': 3} # invalid set
Traceback (most recent call last):
...
TypeError: ...
>>> dc.myattribute = {'a': 1, 'c': 'd'} # invalid set
Traceback (most recent call last):
...
TypeError: ...
>>> dc.myattribute = {} # invalid set
Traceback (most recent call last):
...
ValueError: ...
>>> dc.myattribute # the value was not affected by invalid sets
{'a': 1, 'b': -3}
.. note::
This class was derived from ``ubermagutil.typesystem.Descriptor``
and inherits its functionality.
.. seealso:: :py:class:`~ubermagutil.typesystem.Descriptor`
"""
def __set__(self, instance, value):
if hasattr(self, "otherwise"):
if isinstance(value, self.otherwise):
super().__set__(instance, value)
return None
if not isinstance(value, dict):
msg = f"Cannot set {self.name} with {type(value)}."
raise TypeError(msg)
if not value:
if hasattr(self, "allow_empty"):
if not self.allow_empty:
msg = f"Cannot set {self.name} with an empty dictionary."
raise ValueError(msg)
else:
msg = f"Cannot set {self.name} with an empty dictionary."
raise ValueError(msg)
for key, val in value.items():
self.key_descriptor.__set__(self.key_descriptor, key)
self.value_descriptor.__set__(self.value_descriptor, val)
super().__set__(instance, value)
class Parameter(Descriptor):
"""Descriptor allowing setting attributes with a value described as
``descriptor`` or a dictionary. If a dictionary is passed, dictionary keys
are strings defined by ``ubermagutil.typesystem.Name`` descriptor, and the
values are defined by ``descriptor``.
Parameters
----------
descriptor : ubermagutil.typesystem.Descriptor or its derived class
Accepted value, or if a dictionary is passed, allowed value type.
otherwise : type
This type would also be accepted if specified. It has priority over
other descriptor specification.
Example
-------
1. The usage of ``Property`` descriptor allowing scalars.
>>> import ubermagutil.typesystem as ts
...
>>> @ts.typesystem(myattribute=ts.Parameter(descriptor=ts.Scalar()))
... class DecoratedClass:
... def __init__(self, myattribute):
... self.myattribute = myattribute
...
>>> dc = DecoratedClass(myattribute=-2)
>>> dc.myattribute
-2
>>> dc.myattribute = {'a': 1, 'b': -3} # valid set
>>> dc.myattribute
{'a': 1, 'b': -3}
>>> dc.myattribute = {'a': 1, 'b': 'abc'} # invalid set
Traceback (most recent call last):
...
TypeError: ...
>>> dc.myattribute = {'a b': 1, 'c': -3} # invalid set
Traceback (most recent call last):
...
ValueError: ...
>>> dc.myattribute = {} # invalid set
Traceback (most recent call last):
...
ValueError: ...
>>> dc.myattribute # the value was not affected by invalid sets
{'a': 1, 'b': -3}
.. note::
This class was derived from ``ubermagutil.typesystem.Descriptor``
and inherits its functionality.
.. seealso:: :py:class:`~ubermagutil.typesystem.Descriptor`
"""
def __set__(self, instance, value):
if hasattr(self, "otherwise"):
if isinstance(value, self.otherwise):
super().__set__(instance, value)
return None
if isinstance(value, dict):
dictdescriptor = Dictionary(
key_descriptor=Name(allowed_char=":"), value_descriptor=self.descriptor
)
dictdescriptor.__set__(dictdescriptor, value)
else:
self.descriptor.__set__(self.descriptor, value)
super().__set__(instance, value)
class Subset(Descriptor):
"""Descriptor allowing setting attributes only with a subset of a
predefined set.
Parameters
----------
sample_set : any type
Defines the set of allowed values.
unpack : bool
If ``True``, ``value`` is unpacked as ``set(value)``.
Raises
------
ValueError
If value is not a subset ``sample_set``.
Example
-------
1. Usage of ``Subset`` descriptor.
>>> import ubermagutil.typesystem as ts
...
>>> @ts.typesystem(myattribute=ts.Subset(sample_set='xyz', unpack=True))
... class DecoratedClass:
... def __init__(self, myattribute):
... self.myattribute = myattribute
...
>>> dc = DecoratedClass(myattribute='yx')
>>> dc.myattribute = 'zyyyyx' # valid set
>>> dc.myattribute = 'a' # invalid set
Traceback (most recent call last):
...
ValueError: ...
.. note::
This class was derived from ``ubermagutil.typesystem.Descriptor``
and inherits its functionality.
.. seealso:: :py:class:`~ubermagutil.typesystem.Descriptor`
"""
def __set__(self, instance, value):
if hasattr(self, "otherwise"):
if isinstance(value, self.otherwise):
super().__set__(instance, value)
return None
if self.unpack:
val = set(value)
if not val.issubset(self.sample_set):
msg = f"Cannot set {self.name} with {value}."
raise ValueError(msg)
else:
val = value
if val not in self.sample_set:
msg = f"Cannot set {self.name} with {value}."
raise ValueError(msg)
super().__set__(instance, val)
```
#### File: ubermagutil/units/units.py
```python
import collections
si_prefixes = collections.OrderedDict(
{
"y": 1e-24, # yocto
"z": 1e-21, # zepto
"a": 1e-18, # atto
"f": 1e-15, # femto
"p": 1e-12, # pico
"n": 1e-9, # nano
"u": 1e-6, # micro
"m": 1e-3, # mili
"": 1, # no prefix
"k": 1e3, # kilo
"M": 1e6, # mega
"G": 1e9, # giga
"T": 1e12, # tera
"P": 1e15, # peta
"E": 1e18, # exa
"Z": 1e21, # zetta
"Y": 1e24, # yotta
}
)
rsi_prefixes = {v: k for k, v in si_prefixes.items()}
def si_multiplier(value):
r"""Compute SI multiplier.
SI multiplier of :math:`x` is considered to be a value :math:`m=10^{n}`,
for :math:`n = ..., -6, -3, 0, 3, 6,...`, for which :math:`1 \le x/m
< 10^{3}`.
Parameters
----------
value : numbers.Real
Value for which the multiplier is computed.
Returns
-------
float
Multiplier as :math:`10^{n}`. If multiplier cannot be found, ``None``
is returned.
Examples
--------
1. Find a multiplier.
>>> import ubermagutil.units as uu
...
>>> uu.si_multiplier(5e-9) # value on a nanoscale
1e-09
>>> uu.si_multiplier(500e-6) # value on a microscale
1e-06
>>> uu.si_multiplier(0.5e-9) # value on a picoscale
1e-12
.. seealso:: :py:class:`~ubermagutil.units.si_max_multiplier`
"""
if value == 0:
return 1
else:
for prefix, multiplier in reversed(si_prefixes.items()):
if 1 <= abs(value) / multiplier < 1e3:
return multiplier
else:
return None
def si_max_multiplier(values):
"""Compute maximum SI multiplier for a list of values.
SI multiplier is computed for all elements of ``values`` using
``ubermagutil.units.si_multiplier`` and the largest one is returned.
Parameters
----------
values : list of numbers.Real
Values for which the maximum multiplier is computed.
Returns
-------
float
Multiplier as :math:`10^{n}`. If multiplier cannot be found, ``None``
is returned.
Examples
--------
1. Find a maximum multiplier.
>>> import ubermagutil.units as uu
...
>>> uu.si_max_multiplier([5e-9, 50e-9, 500e-9, 5000e-9])
1e-06
>>> uu.si_max_multiplier([500e-6, 1])
1
>>> uu.si_max_multiplier([500e-12, 1e-11])
1e-12
.. seealso:: :py:class:`~ubermagutil.units.si_multiplier`
"""
return max(list(map(si_multiplier, values)))
``` |
{
"source": "joomogmbh/GrillMeister",
"score": 2
} |
#### File: joomogmbh/GrillMeister/grillen.py
```python
from flask import Flask, render_template, request
from sqlalchemy import update
from forms import WurstOrderForm, DeleteOrderForm, IndexForm
import config
import os
#TODO: Nachträgliche Änderungen der getätigten Bestellungen
app = Flask(__name__)
app.config['SECRET_KEY'] = config.SECRET_KEY
app.config['SQLALCHEMY_DATABASE_URI'] = config.SQLALCHEMY_DATABASE_URI
# import models AFTER app is initiatlized
from models import db, DB_Bestellungen, DB_Events
def initEmptyDatabases():
db.create_all()
@app.route('/', methods=['GET', "POST"])
def index():
form=IndexForm(request.form)
if request.method == "POST":
if not os.path.exists(config.EVENTS_FILE):
initEmptyDatabases()
#create event
#create new Database or une database
new_event = DB_Events(name=form.name.data, date=form.date.data, offer=form.offer.data)
db.session.add(new_event)
db.session.commit()
#TODO: Datenbank umbenennen, config anpassen, Datenbank testen
return render_template('index.html', created=True, form=form)
return render_template('index.html', form=form)
@app.route('/grillen', methods=['GET', 'POST'])
def wurstOrder():
form=WurstOrderForm(request.form)
print('Valid input: ' + str(form.validate()))
if request.method == 'POST':
if not os.path.exists(config.BESTELLUNGEN_FILE):
initEmptyDatabases()
new_order = DB_Bestellungen(name=form.name.data, bratwurst=form.bratwurst.data, schinkengriller=form.schinkengriller.data, broetchen=form.broetchen.data*(int(form.bratwurst.data)+int(form.schinkengriller.data)), selbstversorger=form.selbstversorger.data)
if DB_Bestellungen.query.filter(DB_Bestellungen.name == form.name.data).one_or_none():
db.session.query(DB_Bestellungen).filter(DB_Bestellungen.name == form.name.data).update({DB_Bestellungen.bratwurst: form.bratwurst.data, DB_Bestellungen.broetchen: form.broetchen.data*(int(form.bratwurst.data)+int(form.schinkengriller.data)), DB_Bestellungen.schinkengriller: form.schinkengriller.data, DB_Bestellungen.selbstversorger: form.selbstversorger.data})
else:
db.session.add(new_order)
db.session.commit()
return render_template('order.html', bestellt=True, form=form)
return render_template('order.html', form=form)
@app.route('/summary', methods=['GET'])
def summary():
if os.path.exists(config.BESTELLUNGEN_FILE):
#namen = db.session.execute("SELECT name FROM bestellungen")
#bestellungen = db.session.execute("SELECT bratwurst FROM bestellungen")
#output = ""
db_req = db.session.execute("SELECT * FROM bestellungen")
keys = db_req.keys()
entries = db_req.fetchall()
print(keys)
print(entries)
#for x in namen.fetchall():
# name += "%s" % (x)
#for y in bestellungen.fetchall():
# bestellung += "%s" % (y)
# output += "<strong>%s</strong>: %s " % (request.keys()[y], x[y])
# output += "<br>"
#output += "<br>Teilnehmeranzahl: %s<br><br>" % x[0]
#for key in request.keys()[2:]:
# output += "%s: %s<br>" % (key, db.session.execute("SELECT SUM(%s) FROM bestellungen" % key).fetchall()[0][0]) #execute funktionert; sum rechnet alle zuammen, [0][0] "entfernt" die liest und tuple
#TODO: Richtiger Brötchenzähler
#TODO: Schöner machen
return render_template('summary.html', keys=keys, entries=entries)
elif not os.path.exists(config.BESTELLUNGEN_FILE):
return "No orders!"
#return str(output)
@app.route('/delete', methods=['GET', 'POST'])
def deleteOrderForm():
form=DeleteOrderForm(request.form)
if request.method == 'POST':
print(form.delete_secret.data)
print(form.confirm_delete.data)
if form.delete_secret.data == "Mettwoch" and form.confirm_delete.data:
return deleteOrders()
return "Hau ab!"
return render_template('delete_order.html', form=form)
def deleteOrders():
if os.path.exists(config.BESTELLUNGEN_FILE):
os.remove(config.BESTELLUNGEN_FILE)
return("Bestellungen erfolgreich gelöscht.")
return("Keine Bestellungen zum Löschen.")
``` |
{
"source": "joon1170/CreditCard-Fraud",
"score": 3
} |
#### File: joon1170/CreditCard-Fraud/01_explore_CDP_AWS.py
```python
from pyspark import SparkConf
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Create a SparkSession:
spark = SparkSession.builder \
.config("spark.hadoop.yarn.resourcemanager.principal", "jkim") \
.config("spark.yarn.access.hadoopFileSystems","s3a://jkim-appsdata/") \
.config("spark.executor.instances", "3") \
.config("spark.executor.cores", "2") \
.config("spark.executor.memory", "4g") \
.appName("jkim_explore01") \
.getOrCreate()
# Check Spark configuration
SparkConf().getAll()
# Load the creditcard data from HDFS:
# df = spark.read.csv("jkim/creditcard/", sep=",", header=True, inferSchema=True)
# Show first 5 lines to see if the delimited lines have been read properly
# df.show(5)
# And Print schema
# df.printSchema()
# Define a new schema
from pyspark.sql.types import *
schema = StructType([
StructField("Time", DoubleType()),
StructField("V1", DoubleType()),
StructField("V2", DoubleType()),
StructField("V3", DoubleType()),
StructField("V4", DoubleType()),
StructField("V5", DoubleType()),
StructField("V6", DoubleType()),
StructField("V7", DoubleType()),
StructField("V8", DoubleType()),
StructField("V9", DoubleType()),
StructField("V10", DoubleType()),
StructField("V11", DoubleType()),
StructField("V12", DoubleType()),
StructField("V13", DoubleType()),
StructField("V14", DoubleType()),
StructField("V15", DoubleType()),
StructField("V16", DoubleType()),
StructField("V17", DoubleType()),
StructField("V18", DoubleType()),
StructField("V19", DoubleType()),
StructField("V20", DoubleType()),
StructField("V21", DoubleType()),
StructField("V22", DoubleType()),
StructField("V23", DoubleType()),
StructField("V24", DoubleType()),
StructField("V25", DoubleType()),
StructField("V26", DoubleType()),
StructField("V27", DoubleType()),
StructField("V28", DoubleType()),
StructField("Amount", DoubleType()),
StructField("Class", IntegerType())
])
df = spark \
.read \
.format("csv") \
.option("sep", ",") \
.option("header", True) \
.schema(schema) \
.load("s3a://jkim-appsdata/ml/creditcard.csv")
df.describe("Time","Amount","Class").show()
# Run some basic checks on the data - any NULL values?
df_nonull = df.dropna()
df_nonull.describe("Time","Amount","Class").show()
# Add a new Category Column "Fraud"
df2 = df.withColumn("Fraud", df.Class == 1)
# Describe the new DataFrame
df2.select("Time", "V1", "V2", "Amount", "Class", "Fraud").show(5)
df2.describe("Time", "V1", "V2", "Amount", "Class").show()
# Load into Panda Dataframe to visualize summary better.
pdf = df2.toPandas()
# pdf.describe()
# Time Column - View distribution
# Plot Time with normal, and plot Time with fraud
# sns.distplot(pdf["Time"], kde=False)
# sns.distplot(pdf["Time"][pdf.Class == 0], kde=False)
# sns.distplot(pdf["Time"][pdf.Class == 1], kde=False)
# Filter "Normal" DataFrame where Class == 0
# and filter "Fraudulent" DataFrame where Class == 1
pdf_normal = pdf[pdf.Class == 0]
# pdf_normal.count()
# Plot distribution of Normal transactions
sns.jointplot(x="Time", y="Amount", data=pdf_normal, height=12, kind="reg")
pdf_fraud = pdf[pdf.Class == 1]
# Plot Distribution of Fraud transactions
sns.jointplot(x="Time", y="Amount", data=pdf_fraud, height=12, kind="reg")
# FacetGrid
def tmp_plot(): # Wrap plot build into function for CDSW
g = sns.FacetGrid(data=pdf, col="Fraud", sharex=True, size=10)
g = g.map(plt.scatter, "Time", "Amount")
tmp_plot()
# Explore each "V" features
from pyspark.sql.functions import count, mean
v_list = ["V1","V2","V3","V4","V5","V6","V7","V8","V9","V10", \
"V11","V12","V13","V14","V15","V16","V17","V18","V19","V20", \
"V21","V22","V23","V24","V25","V26","V27","V28"]
def explore(vfeatures):
for v in vfeatures:
df.rollup("Class").agg(count(v), mean(v)).orderBy("Class").show()
explore(v_list)
def tmp_plot2(vfeatures):
for v in vfeatures:
ax = plt.subplot(111)
sns.distplot(pdf[v][pdf.Class == 1], bins=50)
sns.distplot(pdf[v][pdf.Class == 0], bins=50)
ax.set_xlabel('')
ax.set_title('Feature: ' + str(v))
plt.show()
tmp_plot2(v_list)
# When visualizing the distribution of data between "normal" and "fraud" transactions,
# the following columns (features) show very different distribution between the two
# transaction types.
feature_selected = ["V1","V2","V3","V4","V9","V10","V11","V12","V14","V16","V17","V18","V19"]
# Save the data for next phase, Machine Learning
df2.write.parquet("s3a://jkim-appsdata/ml/creditcard/exploredata/", mode="overwrite")
# ## Cleanup
# Stop the SparkSession:
spark.stop()
``` |
{
"source": "joon3216/sudsoln",
"score": 3
} |
#### File: sudsoln/sudsoln/sudoku.py
```python
import sudsoln.candidate as candidate
import sudsoln.sarray as sarray
class Sudoku():
'''Sudoku puzzle.'''
def __init__(self, array, empty = '.', elements = None):
'''(Sudoku, 2d-array of object, str[, {objects}]) -> None
Precondition:
1. each element in elements is of length 1 if specified.
2. (elements is None) or (len(elements) >= 4)
3. len(empty) == 1
Initialize Sudoku puzzle.
>>> q_small = [ # not a np.array, but acceptable
... ['1', '.', '3', '.'],
... ['.', '2', '.', '.'],
... ['.', '.', '.', '.'],
... ['.', '.', '.', '4']
... ]
...
>>> q_small = Sudoku(q_small)
>>> q_small.n
2
>>> question1 = [ # mixture of int and str, and not a np.array
... ['.', '.', '.', '.', 2, '.', '.', '.', '.'],
... [ 8, 3, '.', 7, 1, 4, '.', 9, 6],
... ['.', 6, '.', 9, '.', 5, 4, '.', 8],
... ['.', 9, '.', 3, '.', 1, '.', '.', 4],
... ['.', 1, '.', 4, '.', 2, '.', '.', 7],
... ['.', 7, 5, '.', '.', '.', 2, 1, '.'],
... ['.', '.', 4, '.', '.', '.', 7, '.', '.'],
... ['.', '.', '.', 5, '.', 7, '.', '.', '.'],
... ['.', '.', '.', 1, 9, 6, '.', '.', '.']
... ]
...
>>> q1 = Sudoku(question1)
>>> q1.n
3
>>> question_big = [
... ['1', '6', 'F', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', 'E', 'G', '7'],
... ['.', '.', '.', '.', '.', '.', '.', '.', '.', '.', 'D', '3', 'A', 'F', '8', '.'],
... ['.', '.', '.', '.', '.', '.', '.', 'E', 'B', '5', 'C', 'G', '.', '.', '.', '.'],
... ['.', '.', '.', '.', '.', 'G', '3', 'D', 'A', '1', '.', '.', '.', '.', 'C', '2'],
... ['3', '9', '8', '.', '.', '.', '.', '.', '.', '.', '.', '.', '5', '1', 'B', 'G'],
... ['B', '.', '.', '.', '.', '.', '.', '.', '.', '.', 'E', '6', 'F', '2', 'A', '.'],
... ['.', '.', '.', 'C', '.', '.', '9', 'A', '8', '7', 'B', '2', '.', '.', '.', '.'],
... ['.', 'A', '1', 'E', '.', 'D', '6', 'C', '5', '3', '.', '.', '.', '.', '.', '4'],
... ['F', 'B', '4', '8', '.', '.', '.', '.', '.', '.', '.', '.', '1', '7', 'E', '3'],
... ['C', '.', '.', '.', '.', '.', '.', '.', '.', '.', '.', '8', 'D', '4', '.', '.'],
... ['.', '.', '.', '.', '.', '1', '.', 'G', '3', 'D', '6', '4', '.', '.', '.', '.'],
... ['.', '.', '.', '.', '.', '3', '8', 'F', '7', 'C', '5', '.', '.', '.', '2', '9'],
... ['G', '4', 'D', 'B', '.', '.', '.', '.', '.', '.', '.', '.', '2', '9', 'F', '5'],
... ['.', '.', '.', '.', '.', '.', '.', '4', '.', '.', '7', 'D', 'E', '6', '.', '.'],
... ['.', '.', '.', '.', '.', '.', '2', '5', 'C', 'G', '8', 'E', '7', '.', '.', '.'],
... ['.', '.', '.', '2', '.', '.', 'A', '1', '9', 'F', '.', '.', '.', '.', '3', '8']
... ]
...
>>> q_big = Sudoku(question_big)
>>> q_big.n
4
'''
# Array type and shape
if type(array) == str:
raise TypeError(
'String object is not acceptable. If you want to ' +\
'convert a string representation of Sudoku to ' +\
'Sudoku object, use ' +\
'sudsoln.to_sudoku(sudoku_str, elements, empty) ' +\
'instead.'
)
array = sarray.Array(array)
if len(set(array.shape)) != 1:
raise ValueError(
'The shape of array must be square, ' +\
'i.e. number of rows must be equal to number of columns.'
)
# Size
n = list(set(array.shape))[0] ** .5
if n < 2:
raise ValueError(
'The number of rows of array is too small; ' +\
'it has to be at least 4.'
)
if int(n) != n:
raise ValueError(
'The number of rows of array is not compatible ' +\
'for Sudoku puzzle; it has to be a square ' +\
'of some integer, e.g. 9 = 3 ** 2, 16 = 4 ** 2, etc.'
)
# elements and empty
if type(empty) != str:
raise TypeError('empty must be of type str.')
if len(empty) != 1:
raise ValueError('Length of empty must be 1.')
if elements is None:
elements = set(array.flatten())
if empty not in elements:
try: # assuming it is already an answer
Sudoku(
array = array,
elements = elements,
empty = empty
)
except ValueError:
# It isn't in its answer form.
# i.e. wrong specification of empty
raise KeyError(
"empty = '" + empty + "'" + " does not exist " +\
"in the array. Either specify the correct " +\
"string denoting the emptiness in the array, " +\
"or change the string denoting the emptiness " +\
"in the array by using " +\
"sudsoln.change_empty(array, old, new)."
)
else:
elements.remove(empty)
if len(elements) != n ** 2:
raise ValueError(
'Length of the guessed elements is ' +\
str(len(elements)) + ', not ' + str(int(n ** 2)) +\
'. Either make sure that: ' +\
'1. every element in the current array contains ' +\
'all of your intended elements at least once, or; ' +\
'2. specify elements explicitly, or; ' +\
'3. there is exactly one string, and only one, ' +\
'that denotes the emptiness in the array. ' +\
'For example, if you try to solve a 9-by-9 sudoku ' +\
'whose answer form consists of integers from 1 to ' +\
'9, either make sure that every integer from 1 to ' +\
'9 shows up in the current array at least once, ' +\
'or explicitly specify elements = set([str(i) for ' +\
'i in range(1, 10)]), or see if the array uses ' +\
"'.' and some other string, like ',' or ' ', to " +\
'denote the emptiness.'
)
else:
elements = set([str(item) for item in list(elements)])
el_test = set(array.flatten()).difference(elements.union({empty}))
if el_test != set():
raise ValueError(
'There exists an element in array that is not ' +\
'a member of elements: ' + str(el_test)
)
if len(elements) != n ** 2:
raise ValueError(
'The number of elements in elements must be ' +\
str(int(n ** 2)) + ', not ' + str(len(elements)) + '.'
)
self.show = array
self.n = int(n)
self.elements = elements
self.empty = empty
def __eq__(self, other):
'''(Sudoku, Sudoku) -> bool
Return True iff all the entries of self and other are the same.
'''
return self.show == other.show
def __getitem__(self, key):
'''(Sudoku, ints/lists/slices/tuples) -> str or Array
Return the entry of self at key.
>>> import sudsoln.questions as sq
>>> q1 = to_sudoku(sq.q1)
>>> q1
Sudoku(
. . . | . 2 . | . . .
8 3 . | 7 1 4 | . 9 6
. 6 . | 9 . 5 | 4 . 8
-------------------+-------------------+-------------------
. 9 . | 3 . 1 | . . 4
. 1 . | 4 . 2 | . . 7
. 7 5 | . . . | 2 1 .
-------------------+-------------------+-------------------
. . 4 | . . . | 7 . .
. . . | 5 . 7 | . . .
. . . | 1 9 6 | . . .
n: 3
elements: 1, 2, 3, 4, 5, 6, 7, 8, 9
empty: .
)
>>> q1[(0, 0)]
'.'
>>> q1[(2, 1)]
'6'
'''
return self.show[key]
def __repr__(self):
'''(Sudoku) -> Sudoku
Return the Sudoku representation of self.
>>> import sudsoln.questions as sq
>>> q1 = to_sudoku(sq.q1)
>>> q1
Sudoku(
. . . | . 2 . | . . .
8 3 . | 7 1 4 | . 9 6
. 6 . | 9 . 5 | 4 . 8
-------------------+-------------------+-------------------
. 9 . | 3 . 1 | . . 4
. 1 . | 4 . 2 | . . 7
. 7 5 | . . . | 2 1 .
-------------------+-------------------+-------------------
. . 4 | . . . | 7 . .
. . . | 5 . 7 | . . .
. . . | 1 9 6 | . . .
n: 3
elements: 1, 2, 3, 4, 5, 6, 7, 8, 9
empty: .
)
>>> q_big = to_sudoku(sq.q7)
>>> q_big
Sudoku(
1 6 F . | . . . . | . . . . | . E G 7
. . . . | . . . . | . . D 3 | A F 8 .
. . . . | . . . E | B 5 C G | . . . .
. . . . | . G 3 D | A 1 . . | . . C 2
------------------------+------------------------+------------------------+------------------------
3 9 8 . | . . . . | . . . . | 5 1 B G
B . . . | . . . . | . . E 6 | F 2 A .
. . . C | . . 9 A | 8 7 B 2 | . . . .
. A 1 E | . D 6 C | 5 3 . . | . . . 4
------------------------+------------------------+------------------------+------------------------
F B 4 8 | . . . . | . . . . | 1 7 E 3
C . . . | . . . . | . . . 8 | D 4 . .
. . . . | . 1 . G | 3 D 6 4 | . . . .
. . . . | . 3 8 F | 7 C 5 . | . . 2 9
------------------------+------------------------+------------------------+------------------------
G 4 D B | . . . . | . . . . | 2 9 F 5
. . . . | . . . 4 | . . 7 D | E 6 . .
. . . . | . . 2 5 | C G 8 E | 7 . . .
. . . 2 | . . A 1 | 9 F . . | . . 3 8
n: 4
elements: 1, 2, 3, 4, 5, 6, 7, 8, 9, A, B, C, D, E, F, G
empty: .
)
'''
n = self.n
headline, endline = 'Sudoku(\n', ')'
midline = ''
sep = '-----' * n + '----'
sepline = (sep + '+') * (n - 1) + sep + '\n'
str_self = str(self) # This is why each element has to be len 1
j = 0
for ind in range(0, len(str_self), n ** 2):
j += 1
str_row = list(str_self[ind:(ind + n ** 2)])
for i in range(len(str_row)):
if i != 0 and i % n == 0:
midline += ' | ' + str_row[i]
elif i == len(str_row) - 1:
if j % n != 0 or j == n ** 2:
midline += ' ' + str_row[i] + '\n'
else:
midline += ' ' + str_row[i] + '\n' + sepline
else:
midline += ' ' + str_row[i]
subsize = 'n: ' + str(self.n) + '\n'
lst_els = list(self.elements)
lst_els.sort()
els = 'elements: '
for item in enumerate(lst_els):
if item[0] != len(lst_els) - 1:
els += item[1] + ', '
else:
els += item[1] + '\n'
emp = 'empty: ' + self.empty + '\n'
return headline + midline + subsize + els + emp + endline
def __setitem__(self, key, value):
'''(Sudoku, (int, int), int/str) -> None
Assign value to self's key.
>>> import sudsoln.questions as sq
>>> q1 = to_sudoku(sq.q1)
>>> q1
Sudoku(
. . . | . 2 . | . . .
8 3 . | 7 1 4 | . 9 6
. 6 . | 9 . 5 | 4 . 8
-------------------+-------------------+-------------------
. 9 . | 3 . 1 | . . 4
. 1 . | 4 . 2 | . . 7
. 7 5 | . . . | 2 1 .
-------------------+-------------------+-------------------
. . 4 | . . . | 7 . .
. . . | 5 . 7 | . . .
. . . | 1 9 6 | . . .
n: 3
elements: 1, 2, 3, 4, 5, 6, 7, 8, 9
empty: .
)
>>> q1[(0, 0)] = 5
>>> q1[(8, 8)] = '5'
>>> q1
Sudoku(
5 . . | . 2 . | . . .
8 3 . | 7 1 4 | . 9 6
. 6 . | 9 . 5 | 4 . 8
-------------------+-------------------+-------------------
. 9 . | 3 . 1 | . . 4
. 1 . | 4 . 2 | . . 7
. 7 5 | . . . | 2 1 .
-------------------+-------------------+-------------------
. . 4 | . . . | 7 . .
. . . | 5 . 7 | . . .
. . . | 1 9 6 | . . 5
n: 3
elements: 1, 2, 3, 4, 5, 6, 7, 8, 9
empty: .
)
'''
self.show[key] = value
def __str__(self):
'''(Sudoku) -> str
Return the string representation of self.
>>> import sudsoln.questions as sq
>>> q1 = to_sudoku(sq.q1)
>>> str(q1)
'....2....83.714.96.6.9.54.8.9.3.1..4.1.4.2..7.75...21...4...7.....5.7......196...'
>>> q1.solve_logically()
>>> str(q1)
'549628371832714596761935428298371654613452987475869213154283769986547132327196845'
'''
result = ''
for item in self.show.flatten():
result += item
return result
def all_missings(self):
'''(Sudoku) -> {str: {int: set of str}}
Return all missing values of all submatrices, rows, and columns
of self.
>>> import sudsoln.questions as sq
>>> q1 = to_sudoku(sq.q1)
>>> q1
Sudoku(
. . . | . 2 . | . . .
8 3 . | 7 1 4 | . 9 6
. 6 . | 9 . 5 | 4 . 8
-------------------+-------------------+-------------------
. 9 . | 3 . 1 | . . 4
. 1 . | 4 . 2 | . . 7
. 7 5 | . . . | 2 1 .
-------------------+-------------------+-------------------
. . 4 | . . . | 7 . .
. . . | 5 . 7 | . . .
. . . | 1 9 6 | . . .
n: 3
elements: 1, 2, 3, 4, 5, 6, 7, 8, 9
empty: .
)
>>> q1.all_missings() == {
... 'submatrix': {
... 1: {'9', '7', '1', '2', '5', '4'},
... 2: {'3', '6', '8'},
... 3: {'7', '1', '2', '5', '3'},
... 4: {'8', '2', '3', '6', '4'},
... 5: {'9', '7', '8', '5', '6'},
... 6: {'9', '8', '5', '3', '6'},
... 7: {'9', '7', '1', '8', '2', '5', '3', '6'},
... 8: {'2', '3', '4', '8'},
... 9: {'9', '1', '8', '2', '5', '3', '6', '4'}
... },
... 'row': {
... 0: {'9', '7', '1', '8', '5', '3', '6', '4'},
... 1: {'2', '5'},
... 2: {'3', '7', '2', '1'},
... 3: {'7', '8', '2', '5', '6'},
... 4: {'9', '8', '5', '3', '6'},
... 5: {'9', '8', '3', '6', '4'},
... 6: {'9', '1', '8', '2', '5', '3', '6'},
... 7: {'9', '1', '8', '2', '3', '6', '4'},
... 8: {'7', '8', '2', '5', '3', '4'}
... },
... 'col': {
... 0: {'9', '7', '1', '2', '5', '3', '6', '4'},
... 1: {'2', '5', '4', '8'},
... 2: {'9', '7', '1', '8', '2', '3', '6'},
... 3: {'2', '6', '8'},
... 4: {'7', '8', '5', '3', '6', '4'},
... 5: {'9', '3', '8'},
... 6: {'9', '1', '8', '5', '3', '6'},
... 7: {'7', '8', '2', '5', '3', '6', '4'},
... 8: {'9', '1', '2', '5', '3'}
... }
... }
...
True
'''
n = self.n
result = {'submatrix': {}, 'row': {}, 'col': {}}
for i in range(n ** 2):
result['submatrix'].update({i + 1: self.missing(s = i + 1)})
result['row'].update({i: self.missing(r = i)})
result['col'].update({i: self.missing(c = i)})
return result
def candidates(self):
'''(Sudoku) -> Candidate
Return all numbers that can be entered at each entry of self
if that entry is self.empty.
>>> import sudsoln.questions as sq
>>> q6 = to_sudoku(sq.q6)
>>> q6
Sudoku(
. 3 | . 4
. . | . .
--------------+--------------
. . | 1 .
2 . | . .
n: 2
elements: 1, 2, 3, 4
empty: .
)
>>> q6.candidates() == candidate.Candidate(
... {
... (0, 0): {'1'},
... (0, 2): {'2'},
... (1, 0): {'1', '4'},
... (1, 1): {'1', '2', '4'},
... (1, 2): {'2', '3'},
... (1, 3): {'1', '2', '3'},
... (2, 0): {'4', '3'},
... (2, 1): {'4'},
... (2, 3): {'2', '3'},
... (3, 1): {'1', '4'},
... (3, 2): {'4', '3'},
... (3, 3): {'3'}
... },
... elements = {1, 2, 3, 4}
... )
...
True
'''
n = self.n
empty = self.empty
elements = self.elements
entries = {}
for i in range(1, n ** 2, n): # e.g. n == 3 => 1, 4, 7
subm, subm_missing = {}, {}
for j in range(n): # define submatrices first
subm[i + j] = self.submatrix(i + j)
subm_missing[i + j] = self.missing(s = i + j)
for K in range(n): # iterate over rows of a binded submatrix
row_missing = self.missing(r = i + K - 1)
subm_index = 0
col_iters = list(range(n - 1, n ** 2, n))
for L in range(n ** 2): # iterate over columns
if self.show[(i + K - 1, L)] == empty:
col_missing = self.missing(c = L)
entries[(i + K - 1, L)] =\
subm_missing[i + subm_index].intersection(\
row_missing, col_missing
)
if L == col_iters[subm_index]:
subm_index += 1
return candidate.Candidate(entries, elements = elements)
def col(self, c):
'''(Sudoku, int) -> Array
Precondition: 0 <= c <= self.n ** 2 - 1
Return one of self.n ** 2 columns of self selected by c.
>>> import sudsoln.questions as sq
>>> q1 = to_sudoku(sq.q1)
>>> q1
Sudoku(
. . . | . 2 . | . . .
8 3 . | 7 1 4 | . 9 6
. 6 . | 9 . 5 | 4 . 8
-------------------+-------------------+-------------------
. 9 . | 3 . 1 | . . 4
. 1 . | 4 . 2 | . . 7
. 7 5 | . . . | 2 1 .
-------------------+-------------------+-------------------
. . 4 | . . . | 7 . .
. . . | 5 . 7 | . . .
. . . | 1 9 6 | . . .
n: 3
elements: 1, 2, 3, 4, 5, 6, 7, 8, 9
empty: .
)
>>> q1.col(3).flatten()
['.', '7', '9', '3', '4', '.', '.', '5', '1']
'''
return self.show[:, c]
def copy(self):
'''(Sudoku) -> Sudoku
Return a deep copy of self.
>>> import sudsoln.questions as sq
>>> q1 = to_sudoku(sq.q1)
>>> q1_cp = q1.copy()
>>> q1_cp == q1
True
>>> id(q1_cp) != id(q1)
True
>>> q1_cp[0, 0] = 5
>>> q1_cp.row(0)
Array([
['5', '.', '.', '.', '2', '.', '.', '.', '.']
])
>>> q1.row(0)
Array([
['.', '.', '.', '.', '2', '.', '.', '.', '.']
])
'''
puzzle_copy = self.show.copy()
return Sudoku(
puzzle_copy,
elements = self.elements,
empty = self.empty
)
def group(self, by):
'''(Sudoku, str) -> {int: Candidate}
Precondition: by in ['submatrix', 'row', 'col']
Return the candidate values grouped by 'by', which is either
'submatrix', 'row', or 'col'.
>>> import sudsoln.questions as sq
>>> q6 = to_sudoku(sq.q6)
>>> q6
Sudoku(
. 3 | . 4
. . | . .
--------------+--------------
. . | 1 .
2 . | . .
n: 2
elements: 1, 2, 3, 4
empty: .
)
>>> q6.group(by = 'submatrix') == {
... 1: candidate.Candidate(
... {
... (0, 0): {'1'},
... (1, 0): {'4', '1'},
... (1, 1): {'4', '2', '1'}
... },
... elements = {1, 2, 3, 4}
... ),
... 2: candidate.Candidate(
... {
... (0, 2): {'2'},
... (1, 2): {'3', '2'},
... (1, 3): {'3', '2', '1'}
... },
... elements = {1, 2, 3, 4}
... ),
... 3: candidate.Candidate(
... {
... (2, 0): {'3', '4'},
... (2, 1): {'4'},
... (3, 1): {'4', '1'}
... },
... elements = {1, 2, 3, 4}
... ),
... 4: candidate.Candidate(
... {
... (2, 3): {'3', '2'},
... (3, 2): {'3', '4'},
... (3, 3): {'3'}
... },
... elements = {1, 2, 3, 4}
... )
... }
...
True
>>> q6.group(by = 'row') == {
... 0: candidate.Candidate(
... {
... (0, 0): {'1'},
... (0, 2): {'2'}
... },
... elements = {1, 2, 3, 4}
... ),
... 1: candidate.Candidate(
... {
... (1, 0): {'4', '1'},
... (1, 1): {'4', '2', '1'},
... (1, 2): {'3', '2'},
... (1, 3): {'3', '2', '1'}
... },
... elements = {1, 2, 3, 4}
... ),
... 2: candidate.Candidate(
... {
... (2, 0): {'3', '4'},
... (2, 1): {'4'},
... (2, 3): {'3', '2'}
... },
... elements = {1, 2, 3, 4}
... ),
... 3: candidate.Candidate(
... {
... (3, 1): {'4', '1'},
... (3, 2): {'3', '4'},
... (3, 3): {'3'}
... },
... elements = {1, 2, 3, 4}
... )
... }
...
True
>>> q6.group(by = 'col') == {
... 0: candidate.Candidate(
... {
... (0, 0): {'1'},
... (1, 0): {'4', '1'},
... (2, 0): {'3', '4'}
... },
... elements = {1, 2, 3, 4}
... ),
... 1: candidate.Candidate(
... {
... (1, 1): {'4', '2', '1'},
... (2, 1): {'4'},
... (3, 1): {'4', '1'}
... },
... elements = {1, 2, 3, 4}
... ),
... 2: candidate.Candidate(
... {
... (0, 2): {'2'},
... (1, 2): {'3', '2'},
... (3, 2): {'3', '4'}
... },
... elements = {1, 2, 3, 4}
... ),
... 3: candidate.Candidate(
... {
... (1, 3): {'3', '2', '1'},
... (2, 3): {'3', '2'},
... (3, 3): {'3'}
... },
... elements = {1, 2, 3, 4}
... )
... }
...
True
'''
if by not in ['submatrix', 'row', 'col']:
raise ValueError(
"by must be either 'submatrix', 'row', or 'col'."
)
return self.candidates().group(by)
def is_valid_answer(self):
'''(Sudoku) -> bool
Return True iff self is a valid sudoku answer, and False otherwise.
>>> q_small = [
... [ 1, '.', 3, '.'],
... ['.', 2, '.', '.'],
... ['.', '.', '.', '.'],
... ['.', '.', '.', 4]
... ]
...
>>> q_small = Sudoku(q_small)
>>> q_small
Sudoku(
1 . | 3 .
. 2 | . .
--------------+--------------
. . | . .
. . | . 4
n: 2
elements: 1, 2, 3, 4
empty: .
)
>>> q_small.is_valid_answer()
False
>>> q_small.solve_logically()
>>> q_small
Sudoku(
1 4 | 3 2
3 2 | 4 1
--------------+--------------
4 1 | 2 3
2 3 | 1 4
n: 2
elements: 1, 2, 3, 4
empty: .
)
>>> q_small.is_valid_answer()
True
'''
n = self.n
empty = self.empty
elements = self.elements
if empty in self.show.flatten(): # not even finished yet
return False
for i in range(n ** 2):
if elements != set(self.submatrix(i + 1).flatten()):
return False
elif elements != set(self.row(i).flatten()):
return False
elif elements != set(self.col(i).flatten()):
return False
return True
def itemset(self, entry, value):
'''(Sudoku, (int, int), int/str) -> None
Precondition:
1. value in self.elements
2. each int in entry is from 0 to self.n ** 2 - 1 inclusive.
Mutate entry number of self to value.
>>> q_small = [
... [ 1, '.', 3, '.'],
... ['.', 2, '.', '.'],
... ['.', '.', '.', '.'],
... ['.', '.', '.', 4]
... ]
...
>>> q_small = Sudoku(q_small)
>>> q_small
Sudoku(
1 . | 3 .
. 2 | . .
--------------+--------------
. . | . .
. . | . 4
n: 2
elements: 1, 2, 3, 4
empty: .
)
>>> q_small.itemset((0, 1), 4)
>>> q_small
Sudoku(
1 4 | 3 .
. 2 | . .
--------------+--------------
. . | . .
. . | . 4
n: 2
elements: 1, 2, 3, 4
empty: .
)
'''
self.show.itemset(entry, value)
def itemsets(self, entries):
'''(Sudoku, Candidate or {(int, int): set of ints/strs}) -> None
Precondition: each int in entries is exactly one element of
self.elements.
Mutate entry number of self according to values given in entries
if the value set has length 1.
>>> q_small = [
... [ 1, '.', 3, '.'],
... ['.', 2, '.', '.'],
... ['.', '.', '.', '.'],
... ['.', '.', '.', 4]
... ]
...
>>> q_small = Sudoku(q_small, elements = {'1', '2', '3', '4'})
>>> candids = q_small.candidates()
>>> candids == candidate.Candidate(
... {
... (0, 1): {'4'},
... (0, 3): {'2'},
... (1, 0): {'3', '4'},
... (1, 2): {'1', '4'},
... (1, 3): {'1'},
... (2, 0): {'3', '2', '4'},
... (2, 1): {'3', '1', '4'},
... (2, 2): {'1', '2'},
... (2, 3): {'3', '1', '2'},
... (3, 0): {'3', '2'},
... (3, 1): {'3', '1'},
... (3, 2): {'1', '2'}
... },
... elements = {1, 2, 3, 4}
... )
...
True
>>> q_small
Sudoku(
1 . | 3 .
. 2 | . .
--------------+--------------
. . | . .
. . | . 4
n: 2
elements: 1, 2, 3, 4
empty: .
)
>>> q_small.itemsets(candids)
>>> q_small
Sudoku(
1 4 | 3 2
. 2 | . 1
--------------+--------------
. . | . .
. . | . 4
n: 2
elements: 1, 2, 3, 4
empty: .
)
'''
if type(entries) == dict:
if entries == {}:
return None
for entry, values in entries.items():
if len(values) == 1:
self.itemset(entry, list(values)[0])
elif 'Candidate' in str(type(entries)):
elements = self.elements
if entries == candidate.Candidate({}, elements = elements):
return None
for entry, values in entries.items():
if len(values) == 1:
self.itemset(entry, list(values)[0])
def melt(self, include_empty = True):
'''(Sudoku, bool) -> Candidate
Return Candidate form of self, and include empty entries
as well if include_empty is True (by default).
>>> import numpy as np
>>> q_small = np.array([
... [ 1, '.', 3, '.'],
... ['.', 2, '.', '.'],
... ['.', '.', '.', '.'],
... ['.', '.', '.', 4]
... ])
...
>>> q_small = Sudoku(q_small)
>>> q_small.melt() == candidate.Candidate(
... {
... (0, 0): {'1'}, (0, 1): {'.'}, (0, 2): {'3'}, (0, 3): {'.'},
... (1, 0): {'.'}, (1, 1): {'2'}, (1, 2): {'.'}, (1, 3): {'.'},
... (2, 0): {'.'}, (2, 1): {'.'}, (2, 2): {'.'}, (2, 3): {'.'},
... (3, 0): {'.'}, (3, 1): {'.'}, (3, 2): {'.'}, (3, 3): {'4'}
... },
... elements = {1, 2, 3, 4}
... )
...
True
>>> q_small.melt(include_empty = False) == candidate.Candidate(
... {
... (0, 0): {'1'}, (0, 2): {'3'}, (1, 1): {'2'}, (3, 3): {'4'}
... },
... elements = {1, 2, 3, 4}
... )
...
True
'''
n = self.n
empty = self.empty
result = {}
for i in range(n ** 2):
for j in range(n ** 2):
result[(i, j)] = {self.show[(i, j)]}
if not include_empty:
result_copy = result.copy()
for k, v in result_copy.items():
if list(v)[0] == empty:
result.pop(k)
return candidate.Candidate(result, elements = self.elements)
def missing(self, s = None, r = None, c = None):
'''(Sudoku[, int, int, int]) -> set of str
Precondition:
1. 1 <= s <= self.n ** 2
2. 0 <= r <= self.n ** 2 - 1
3. 0 <= c <= self.n ** 2 - 1
Return all missing values of self at the specified submatrix
number s, the specified row number r, or the specified column
number c.
If s is specified, then r and c will be ignored;
if s is None and r is specified, then c will be ignored;
If neither s, r, nor c are specified, the function returns None.
>>> import sudsoln.questions as sq
>>> q1 = to_sudoku(sq.q1)
>>> q1
Sudoku(
. . . | . 2 . | . . .
8 3 . | 7 1 4 | . 9 6
. 6 . | 9 . 5 | 4 . 8
-------------------+-------------------+-------------------
. 9 . | 3 . 1 | . . 4
. 1 . | 4 . 2 | . . 7
. 7 5 | . . . | 2 1 .
-------------------+-------------------+-------------------
. . 4 | . . . | 7 . .
. . . | 5 . 7 | . . .
. . . | 1 9 6 | . . .
n: 3
elements: 1, 2, 3, 4, 5, 6, 7, 8, 9
empty: .
)
>>> q1.missing(s = 1) == {'2', '5', '4', '9', '7', '1'}
True
>>> q1.missing(r = 3) == {'6', '2', '8', '5', '7'}
True
>>> q1.missing(c = 8) == {'3', '2', '5', '9', '1'}
True
'''
elements = self.elements
if s is not None:
return elements.difference(set(self.submatrix(s).flatten()))
elif r is not None:
return elements.difference(set(self.row(r).flatten()))
elif c is not None:
return elements.difference(set(self.col(c).flatten()))
def row(self, r):
'''(Sudoku, int) -> Array
Precondition: 0 <= r <= self.n ** 2 - 1
Return one of self.n ** 2 rows of self selected by r.
>>> import sudsoln.questions as sq
>>> q1 = to_sudoku(sq.q1)
>>> q1
Sudoku(
. . . | . 2 . | . . .
8 3 . | 7 1 4 | . 9 6
. 6 . | 9 . 5 | 4 . 8
-------------------+-------------------+-------------------
. 9 . | 3 . 1 | . . 4
. 1 . | 4 . 2 | . . 7
. 7 5 | . . . | 2 1 .
-------------------+-------------------+-------------------
. . 4 | . . . | 7 . .
. . . | 5 . 7 | . . .
. . . | 1 9 6 | . . .
n: 3
elements: 1, 2, 3, 4, 5, 6, 7, 8, 9
empty: .
)
>>> q1.row(2)
Array([
['.', '6', '.', '9', '.', '5', '4', '.', '8']
])
'''
return self.show[r, :]
def solve(self, max_trial = 200, quietly = False, seed = None):
'''(Sudoku, int, bool[, int]) -> str, int
Mutate self to the answer form, or until max_trial is met, and
return the time it took to compute the answer and the number of
trials used. seed can be given for reproducibility. Set
quietly = True to display no messages.
'''
trial = 0
import datetime
n = self.n
empty = self.empty
start = datetime.datetime.now()
self.solve_logically()
sudoku_copy = self.copy()
sudoku_copy_melted = sudoku_copy.melt()
if empty in self.show.flatten():
if not quietly:
msg = "Logical approaches weren't enough. " +\
"Solving with a brute force..."
print(msg)
trial += self.solve_forcefully(
max_trial = max_trial,
quietly = quietly,
seed = seed
)
end = datetime.datetime.now()
if self.is_valid_answer():
return str(end - start), trial
else:
if not quietly:
print('Mission failed; max_trial of', max_trial, 'met.')
self.itemsets(sudoku_copy_melted)
return str(end - start), max_trial
def solve_by_hidden_pairs(self, by = 'submatrix', start = None):
'''(Sudoku, str[, Candidate]) -> Candidate
Mutate self using the hidden pairs method based on 'by'. Starting
candidate can be specified with 'start' argument; if start is
None, then self.candidates() will be the starting point.
'''
return self.solve_by(
by = by, start = start,
condition = ['contains', 2], deep = True
)
def solve_by(
self,
by,
start = None,
condition = ['contains', 1],
deep = False
):
'''(Sudoku, str[, Candidate, [str, int], bool]) -> Candidate
Precondition: by in ['row', 'col', 'submatrix']
Solve self by methods involving pairs, triples, or a higher order.
'''
elements = self.elements
bases = ['row', 'col', 'submatrix']
bases.remove(by)
names = bases
if start is None:
candidates_global = self.candidates()
candidates_group = self.group(by = by)
else:
candidates_global = start
candidates_group = start.group(by = by)
changing = True
while changing:
sudoku_copy = self.copy()
etm = candidate.Candidate({}, elements = elements)
cg_cp = candidates_group.copy()
for V in cg_cp.values():
appearances = V.appearances(names)
appearances.sieve(condition = condition, deep = deep)
candidates_global.refine(
etm,
appearances = appearances,
condition = condition,
deep = deep
)
self.itemsets(etm)
self.itemsets(candidates_global)
candidates_group = candidates_global.group(by = by)
if self == sudoku_copy and cg_cp == candidates_group:
changing = False
return candidates_global
def solve_by_pointing_pairs(self, by = 'submatrix', start = None):
'''(Sudoku, str[, Candidate]) -> Candidate
Precondition: by in ['row', 'col', 'submatrix']
Say bases = ['row', 'col', 'submatrix'], and one item is removed by
bases.remove(by). Define the two leftovers in bases as group1 and
group2 respectively. This method eliminates candidate numbers in
other entries of the same group1 (e.g. rows) or group2
(e.g. columns) based on entries of 'by' (e.g. submatrix) it
belongs, mutate self into the closest answer form, and return a
refined Candidate (better than self.candidates() in a sense that
it has fewer, equal at worst, candidate numbers at each entry)
based on iterations. Starting candidate can be specified with
'start' argument; if start is None, then self.candidates() will be
the starting point.
'''
return self.solve_by(
by = by, start = start,
condition = ['contains', 1], deep = False
)
def solve_forcefully(
self,
max_trial = 300,
quietly = False,
seed = None
):
'''(Sudoku, int, bool[, int or None]) -> int
Try out candidate numbers in each entry randomly until self is
mutated into the answer form, or until max_trial is met. seed
can be given for reproducibility. Set quietly = True if you don't
want to display any messages.
'''
import random
if seed is not None:
random.seed(seed)
trial = 1
empty = self.empty
sudoku_melt = self.melt()
while empty in self.show.flatten():
if empty not in self.show.flatten():
return trial
entries = self.solve_logically()
if set() in list(entries.values()):
if not quietly:
print(
"Trial number", trial,
"out of", max_trial, "failed;",
"proceeding to the next trial..."
)
trial += 1
if trial == max_trial:
return max_trial
self.itemsets(sudoku_melt)
else:
keys = list(entries.keys()); keys.sort()
guess = random.choice(list(entries[keys[0]]))
self.itemset(keys[0], guess)
self.solve_logically()
if empty not in self.show.flatten() and \
not self.is_valid_answer():
self.itemsets(sudoku_melt)
return trial
def solve_globally(self):
'''(Sudoku) -> None
Find the only possible number at each entry of self, plug it
into that entry, and repeat the process until no new mutation
is made.
'''
changing = True
while changing:
sudoku_copy = self.copy()
possible_numbers = self.candidates()
for k, v in possible_numbers.items():
if len(v) == 1:
self.itemset(k, list(v)[0])
if sudoku_copy == self:
changing = False
def solve_locally(self, by):
'''(Sudoku, str) -> None
Precondition: by in ['submatrix', 'row', 'col']
Find the unique candidate number within each 'by' of self,
plug that number into that entry, and repeat the process across
every other groups until no new mutation is made.
'''
changing = True
while changing:
sudoku_copy = self.copy()
possible_numbers = self.unique_candidates(by = by)
for k, v in possible_numbers.items():
if len(v) == 1:
self.itemset(k, list(v)[0])
if sudoku_copy == self:
changing = False
def solve_logically(self):
'''(Sudoku) -> Candidate or None
Mutate self to the answer form as close as possible (that is,
having the least number of empty's), using only logical
approaches that don't involve randomness or brute force in number
assignment. Return Candidate if .solve_by*() methods have involved,
and None otherwise.
'''
empty = self.empty
sudoku_copy = self.copy()
bases = ['submatrix', 'row', 'col']
not_ready = True
there_is_a_progress = True
while there_is_a_progress:
sudoku_copy_after_iter = self.copy()
self.solve_globally()
if empty not in str(self):
return None
for component in bases:
self.solve_locally(by = component)
self.solve_globally()
if empty not in str(self):
return None
start = self.solve_by_pointing_pairs()
for component2 in bases:
self.solve_by_hidden_pairs(by = component2, start = start)
self.solve_by_pointing_pairs(start = start)
if (sudoku_copy == self or sudoku_copy_after_iter == self):
there_is_a_progress = False
return start
def submatrix(self, s):
'''(Sudoku, int) -> Array
Precondition: 1 <= s <= self.n ** 2
Return one of self.n ** 2 submatrices of self selected by s.
>>> import sudsoln.questions as sq
>>> q1 = to_sudoku(sq.q1)
>>> q1
Sudoku(
. . . | . 2 . | . . .
8 3 . | 7 1 4 | . 9 6
. 6 . | 9 . 5 | 4 . 8
-------------------+-------------------+-------------------
. 9 . | 3 . 1 | . . 4
. 1 . | 4 . 2 | . . 7
. 7 5 | . . . | 2 1 .
-------------------+-------------------+-------------------
. . 4 | . . . | 7 . .
. . . | 5 . 7 | . . .
. . . | 1 9 6 | . . .
n: 3
elements: 1, 2, 3, 4, 5, 6, 7, 8, 9
empty: .
)
>>> q1.submatrix(1)
Array([
['.', '.', '.'],
['8', '3', '.'],
['.', '6', '.']
])
>>> q1.submatrix(3)
Array([
['.', '.', '.'],
['.', '9', '6'],
['4', '.', '8']
])
>>> q1.submatrix(4)
Array([
['.', '9', '.'],
['.', '1', '.'],
['.', '7', '5']
])
'''
n = self.n
number = 0
for i in range(n, n ** 2 + 1, n):
for j in range(n, n ** 2 + 1, n):
number += 1
if number == s:
return self.show[(i - n):(i), (j - n):(j)]
def unique_candidates(self, by):
'''(Sudoku, str) -> Candidate
Precondition: by in ['submatrix', 'row', 'col']
Return the unique candidate number at each entry, within each
group of self, grouped by 'by'.
>>> q_small = '1.3..2.........4'
>>> q_small = to_sudoku(q_small, elements = {1, 2, 3, 4})
>>> q_small
Sudoku(
1 . | 3 .
. 2 | . .
--------------+--------------
. . | . .
. . | . 4
n: 2
elements: 1, 2, 3, 4
empty: .
)
>>> q_small.unique_candidates('submatrix') ==\\
... candidate.Candidate({
... (0, 1): set(), (0, 3): {'2'},
... (1, 0): {'3'}, (1, 2): {'4'}, (1, 3): set(),
... (2, 0): set(), (2, 1): set(), (2, 2): set(), (2, 3): {'3'},
... (3, 0): set(), (3, 1): set(), (3, 2): set()
... },
... elements = {1, 2, 3, 4}
... )
...
True
>>> q_small.unique_candidates('row') ==\\
... candidate.Candidate({
... (0, 1): {'4'}, (0, 3): {'2'},
... (1, 0): {'3'}, (1, 2): set(), (1, 3): set(),
... (2, 0): set(), (2, 1): set(), (2, 2): set(), (2, 3): set(),
... (3, 0): set(), (3, 1): set(), (3, 2): set()
... },
... elements = {1, 2, 3, 4}
... )
...
True
>>> q_small.unique_candidates('col') ==\\
... candidate.Candidate({
... (0, 1): set(), (0, 3): set(),
... (1, 0): set(), (1, 2): {'4'}, (1, 3): set(),
... (2, 0): set(), (2, 1): set(), (2, 2): set(), (2, 3): {'3'},
... (3, 0): set(), (3, 1): set(), (3, 2): set()
... },
... elements = {1, 2, 3, 4}
... )
...
True
'''
n = self.n
start = self.group(by = by)
elements = self.elements
result = candidate.Candidate({}, elements = elements)
for V in start.values():
keys = list(V.keys()); keys.sort() # sorting is unnecessary
for i in range(len(keys)):
blacklist, the_rest = [], set()
blacklist.append(keys[i])
for k, v in V.items():
if k not in blacklist:
the_rest.update(v)
possible_nums = V[keys[i]].difference(the_rest)
result.update({keys[i]: possible_nums})
return result
def change_empty(array, old, new):
'''(2d-array of objects, str, str) -> None
Precondition: len(new) == 1
Mutate array by replacing olds with new.
>>> eg = [
... ['1', '.', '3', '.'],
... ['.', '2', '.', '.'],
... ['.', '.', '.', '.'],
... ['.', '.', '.', '4']
... ]
...
>>> change_empty(eg, '.', ' ')
>>> eg == [
... ['1', ' ', '3', ' '],
... [' ', '2', ' ', ' '],
... [' ', ' ', ' ', ' '],
... [' ', ' ', ' ', '4']
... ]
...
True
'''
assert len(new) == 1, 'len(new) != 1'
ch_old = lambda x: new if x == old else x
if 'ndarray' in str(type(array)) or 'list' in str(type(array)):
for i in range(len(array)):
array[i] = list(map(ch_old, array[i]))
elif 'Array' in str(type(array)):
shape = array.shape
for i in range(len(array.show)):
array.show[i] = list(map(ch_old, array.show[i]))
else:
raise TypeError(str(type(array)) + ' not supported')
def fprint(Sudoku):
'''(Sudoku) -> None
Print out the formatted version of Sudoku.
'''
n = Sudoku.n
the_range = range(0, n ** 4 + 1, n ** 2)
item = ''
result = ''
sudoku_lst = list(str(Sudoku))
for c in enumerate(sudoku_lst):
item += c[1]
if c[0] != 0 and c[0] + 1 in the_range:
if c[0] != n ** 4 - 1:
result += item + '\n'
else:
result += item + ''
item = ''
print(result)
def to_sudoku(sudoku_str, elements = None, empty = '.'):
'''(str[, {objects} or None, str]) -> Sudoku
Preconditions if elements is not None:
1. set(list(sudoku_str)).issubset(elements.union(empty))
2. len(elements) == len(sudoku_str) ** .5
3. All elements in elements has len 1, as well as empty.
Return the Sudoku object of sudoku_str if it is a string
representation of Sudoku.
'''
n = int(len(sudoku_str) ** .25)
array = sarray.Array(list(sudoku_str[:(n**4)])).reshape(n**2, n**2)
return Sudoku(
array = array,
elements = elements,
empty = empty
)
if __name__ == '__main__':
import doctest
doctest.testmod()
``` |
{
"source": "Joon7891/Competitive-Programming",
"score": 3
} |
#### File: Competitive-Programming/CCC/CCC '08 S3 - Maze.py
```python
y_size, x_size = 0, 0
def in_maze(x, y):
return -1 < x and x < x_size and -1 < y and y < y_size
def execute(maze):
to_visit = [[0, 0]]
visited = []
length = 1
while len(to_visit) > 0:
new_to = []
for node in to_visit:
x, y = node[0], node[1]
#Check
if x == x_size - 1 and y == y_size - 1:
return length
#Up
if (maze[y][x] == '+' or maze[y][x] == '|') and in_maze(x, y - 1) and not [x, y - 1] in visited and maze[y - 1][x] != '*':
if not [x, y - 1] in new_to:
new_to.append([x, y - 1])
#Down
if (maze[y][x] == '+' or maze[y][x] == '|') and in_maze(x, y + 1) and not [x, y + 1] in visited and maze[y + 1][x] != '*':
if not [x, y + 1] in new_to:
new_to.append([x, y + 1])
#Right
if (maze[y][x] == '+' or maze[y][x] == '-') and in_maze(x + 1, y) and not [x + 1, y] in visited and maze[y][x + 1] != '*':
if not [x + 1, y] in new_to:
new_to.append([x + 1, y])
#Left
if (maze[y][x] == '+' or maze[y][x] == '-') and in_maze(x - 1, y) and not [x - 1, y] in visited and maze[y][x - 1] != '*':
if not [x - 1, y] in new_to:
new_to.append([x - 1, y])
visited.append([x, y])
to_visit = new_to[:]
length += 1
return -1
n = int(input())
for i in range(n):
y_size = int(input())
x_size = int(input())
maze = []
for i in range(y_size):
maze.append([char for char in input()])
print(execute(maze))
```
#### File: Competitive-Programming/ECOO/ECOO '15 R1 P4 - Neanderthal Numbers.py
```python
words = ["ook", "ookook", "oog", "ooga", "ug", "mook", "mookmook", "oogam", "oogum", "ugug"]
memo = {}
def combinations(testWord):
if testWord in memo:
return memo[testWord]
newCount = 0
if len(testWord) is 0:
return 1
for newWord in words:
if testWord.startswith(newWord):
newCount += combinations(testWord.replace(newWord, "", 1))
memo[testWord] = newCount
return newCount
for i in range(10):
print(combinations(input()))
```
#### File: Competitive-Programming/ECOO/ECOO '19 R2 P1 - Email.py
```python
def solve():
n = int(input())
total = set()
for i in range(n):
email = input()
at = 0
for i in range(len(email)):
if email[i] == '@':
at = i
new = str()
plus_flag = False
for i in range(len(email)):
if email[i] == '+':
plus_flag = True
elif i < at and email[i] != '.' and not plus_flag:
new += email[i].lower()
elif i >= at:
new += email[i].lower()
total.add(new)
return len(total)
for _ in range(10):
print(solve())
```
#### File: Competitive-Programming/Miscellaneous/Bubble Sort.py
```python
def output(a):
size = len(a)
final = str()
for i in range(size):
final += str(a[i]) + ' '
return final.strip()
n = int(input())
a = [int(x) for x in input().split()]
print(output(a))
for i in range(n):
for j in range(n - 1):
if a[j] > a[j + 1]:
swap = a[j]
a[j] = a[j + 1]
a[j + 1] = swap
print(output(a))
```
#### File: Competitive-Programming/Miscellaneous/Non-Strategic Bombing.py
```python
def area(x1, y1, x2, y2, x3, y3):
return abs((x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)) / 2.0)
def solve(x1, y1, x2, y2, x3, y3, x, y):
total = area (x1, y1, x2, y2, x3, y3)
a1 = area (x, y, x2, y2, x3, y3)
a2 = area (x1, y1, x, y, x3, y3)
a3 = area (x1, y1, x2, y2, x, y)
if total == (a1 + a2 + a3):
return True
return False
n, m = map(int, input().split())
points = []
for i in range(n):
points.append([int(x) for x in input().split()])
for i in range(m):
counter = 0
a = [int(x) for x in input().split()]
for point in points:
if solve(a[0], a[1], a[2], a[3], a[4], a[5], point[0], point[1]):
counter += 1
print(counter)
``` |
{
"source": "Joon7891/Stock-Tracker",
"score": 3
} |
#### File: Joon7891/Stock-Tracker/stock.py
```python
from yahoo_fin import stock_info as si
class Stock:
def __init__(self, ticker):
self.ticker = ticker
self.day_open = round(si.get_quote_table(ticker)['Open'], 3)
self.cur_price = round(si.get_live_price(ticker), 3)
self.day_change = round(self.day_open - self.cur_price, 3)
self.percent_change = self.day_change / self.day_open
def update(self):
self.cur_price = si.get_live_price(self.ticker)
self.day_change = round(self.day_open - self.cur_price, 3)
self.percent_change = 100 * self.day_change / self.day_open
def __str__(self):
day_open = '{0:.3f}'.format(self.day_open)
cur_price = '{0:.3f}'.format(self.cur_price)
day_change = '{0:.3f}'.format(self.day_change)
percent_change = '{0:.3f}'.format(self.percent_change)
return '{0:>10}{1:>15}{2:>15}{3:>15}{4:>15}'.format(self.ticker, day_open, cur_price, day_change, percent_change)
``` |
{
"source": "Joon7891/UW-Course-Scraper",
"score": 4
} |
#### File: Joon7891/UW-Course-Scraper/main.py
```python
from course import get_course
def main():
print("Course Name:")
code, num = input().split()
course = get_course(code, num)
if course is None:
print("Course Not Found")
else:
print(course)
if __name__ == "__main__":
main()
``` |
{
"source": "joonahn/taxonomy-assign-frontend",
"score": 3
} |
#### File: joonahn/taxonomy-assign-frontend/server.py
```python
import os
import os.path
import shutil
import json
from flask import current_app, request
BASE_DIR = os.path.dirname(__file__)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
UPLOAD_DIRECTORY = os.path.join(MEDIA_ROOT, 'upload')
CHUNKS_DIRECTORY = os.path.join(MEDIA_ROOT, 'chunks')
# Utils
##################
def make_response(status=200, content=None):
""" Construct a response to an upload request.
Success is indicated by a status of 200 and { "success": true }
contained in the content.
Also, content-type is text/plain by default since IE9 and below chokes
on application/json. For CORS environments and IE9 and below, the
content-type needs to be text/html.
"""
return current_app.response_class(json.dumps(content,
indent=None if request.is_xhr else 2), mimetype='text/plain')
def validate(attrs):
""" No-op function which will validate the client-side data.
Werkzeug will throw an exception if you try to access an
attribute that does not have a key for a MultiDict.
"""
try:
#required_attributes = ('qquuid', 'qqfilename')
#[attrs.get(k) for k,v in attrs.items()]
return True
except Exception as e:
return False
def handle_delete(uuid):
""" Handles a filesystem delete based on UUID."""
location = os.path.join(UPLOAD_DIRECTORY, uuid)
print(uuid)
print(location)
shutil.rmtree(location)
def handle_upload(f, attrs):
""" Handle a chunked or non-chunked upload.
"""
chunked = False
dest_folder = os.path.join(UPLOAD_DIRECTORY, attrs['qquuid'])
dest = os.path.join(dest_folder, attrs['qqfilename'])
# Chunked
if 'qqtotalparts' in attrs and int(attrs['qqtotalparts']) > 1:
chunked = True
dest_folder = os.path.join(CHUNKS_DIRECTORY, attrs['qquuid'])
dest = os.path.join(dest_folder, attrs['qqfilename'], str(attrs['qqpartindex']))
save_upload(f, dest)
if chunked and (int(attrs['qqtotalparts']) - 1 == int(attrs['qqpartindex'])):
combine_chunks(attrs['qqtotalparts'],
attrs['qqtotalfilesize'],
source_folder=os.path.dirname(dest),
dest=os.path.join(UPLOAD_DIRECTORY, attrs['qquuid'],
attrs['qqfilename']))
shutil.rmtree(os.path.dirname(os.path.dirname(dest)))
def save_upload(f, path):
""" Save an upload.
Uploads are stored in media/uploads
"""
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path))
with open(path, 'wb+') as destination:
destination.write(f.read())
def combine_chunks(total_parts, total_size, source_folder, dest):
""" Combine a chunked file into a whole file again. Goes through each part
, in order, and appends that part's bytes to another destination file.
Chunks are stored in media/chunks
Uploads are saved in media/uploads
"""
if not os.path.exists(os.path.dirname(dest)):
os.makedirs(os.path.dirname(dest))
with open(dest, 'wb+') as destination:
for i in range(int(total_parts)):
part = os.path.join(source_folder, str(i))
with open(part, 'rb') as source:
destination.write(source.read())
``` |
{
"source": "joonamo/highres-miku",
"score": 2
} |
#### File: highres-miku/server/async_http_client.py
```python
import httpx
import asyncio
_client = None
def getClient():
global _client
if _client is None:
_client = httpx.AsyncClient()
return _client
async def deleteClient():
global _client
if _client is not None:
await _client.aclose()
``` |
{
"source": "joonamo/hsl-stop-monitor",
"score": 3
} |
#### File: joonamo/hsl-stop-monitor/hsl.py
```python
import requests
import datetime
def parse_vehicle_code(code):
# TODO: Currently everything is a bus
return "%d%s" % (int(code[1:4]), code[4].strip())
def parse_datetime(time, date):
if len(time) == 3:
time = "0%s" % time
# HSL has more hours in a day than a anyone else
delta = datetime.timedelta()
if int(time[0:2]) >= 24:
delta = datetime.timedelta(days = 1)
time = "%02d%s" % (int(time[0:2]) % 24, time[2:])
return datetime.datetime.strptime(
"%s %s" % (time, date),
"%H%M %Y%m%d") + delta
class hsl_system(object):
def __init__(self, username, password):
super(hsl_system, self).__init__()
self.username = str(username)
self.password = str(password)
self.base_url = "http://api.reittiopas.fi/hsl/prod/?user=%s&pass=%s&epsg_in=<PASSWORD>&epsg_out=<PASSWORD>&format=json" % (self.username, self.password)
def get_stop_info(self, stop_code, extra_params = ""):
url = "%s&request=stop&result_contains=stop&code=%s&%s" % (self.base_url, str(stop_code), extra_params)
r = requests.get(url)
if not(r.ok):
raise Exception(
"Failed to get stop info from url '%s'" % url ,
"Reason: %s, Text from HSL: %s" % (r.reason, r.text))
return r.json()[0]
def get_departures(self, stop_code, dep_limit = 10, time_limit = 360):
stop = self.get_stop_info(stop_code, ("dep_limit=%d&time_limit=%d" % (dep_limit, time_limit)))
departures = stop["departures"]
return [{"code": parse_vehicle_code(d["code"]), "time": parse_datetime(str(d["time"]), str(d["date"]))} for d in departures]
``` |
{
"source": "joonamo/maze_robot",
"score": 3
} |
#### File: joonamo/maze_robot/bot_rider_server.py
```python
import urllib.parse
import http.server
import socketserver
import json
import traceback
import sys
import serial
import re
import os
import getopt
PORT = 8000
opts, args = getopt.getopt(sys.argv[1:], "p:", ["cvd="])
for o, a in opts:
if o == "-p":
PORT = int(a)
elif o == "--cvd":
os.chdir(a)
last_request = None
ser = serial.Serial('/dev/ttyAMA0', baudrate = 115200, timeout=0.1)
status_pattern = re.compile("MANUAL: ([-+]?\d+), L ([-+]?\d+), F ([-+]?\d+), R ([-+]?\d+), dir ([-+]?\d+), speed ([-+]?\d+), left_mapped ([-+]?\d+), right_mapped ([-+]?\d+)")
class RequestHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
print("Request!")
print(self.path)
parsed_path = urllib.parse.urlparse(self.path)
if parsed_path.path == "/status":
queries = urllib.parse.parse_qs(parsed_path.query)
response = {}
if "speed" in queries:
print("speed: %s" % str(queries["speed"][0]))
ser.write((u"s%ds" % int(float(queries["speed"][0]))).encode("UTF-8"))
response["speed"] = queries["speed"]
if "dir" in queries:
print("dir: %s" % str(queries["dir"][0]))
ser.write((u"d%dd" % int(float(queries["dir"][0]))).encode("UTF-8"))
response["dir"] = queries["dir"]
ser.reset_input_buffer()
ser.write(b"q")
r = str(ser.readline())
print("Status: %s" % (r,))
status_table = {"raw_status": str(r)}
try:
s = re.search(status_pattern, r).groups()
status_table["manual"] = int(s[0])
status_table["dist_l"] = int(s[1])
status_table["dist_f"] = int(s[2])
status_table["dist_r"] = int(s[3])
status_table["dir"] = int(s[4])
status_table["speed"] = int(s[5])
status_table["left_mapped"] = int(s[6])
status_table["right_mapped"] = int(s[7])
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
response["status"] = status_table
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.send_header('Access-Control-Allow-Origin', 'http://127.0.0.1:8000')
self.end_headers()
self.wfile.write(json.dumps(response).encode("UTF-8"))
elif self.path == "/toggle_manual":
ser.write(b"a")
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.send_header('Access-Control-Allow-Origin', 'http://127.0.0.1:8000')
self.end_headers()
self.wfile.write("{}".encode("UTF-8"))
else:
http.server.SimpleHTTPRequestHandler.do_GET(self)
socketserver.TCPServer.allow_reuse_address = True
httpd = socketserver.TCPServer(("", PORT), RequestHandler)
print("serving at port", PORT)
httpd.serve_forever()
``` |
{
"source": "joonamo/photoplaces",
"score": 2
} |
#### File: photoplaces/photoplaces_web/clustering_db_proxy.py
```python
from django.forms.models import model_to_dict
from copy import copy
class ClusteringDBProxy:
fields_to_save = []
one_to_one_relationships_to_save = []
def __init__(self, db_entry):
self.db_entry = db_entry
self.values = model_to_dict(db_entry)
def set_up_relations(self, **kwargs):
pass
def save_to_db(self):
for field in fields_to_save:
setattr(self.db_entry, field, self.values[field])
for rel in one_to_one_relationships_to_save:
setattr(self.db_entry, rel, self.values[rel]["id"])
self.db_entry.save()
class RunProxy(ClusteringDBProxy):
def set_up_relations(self, **kwargs):
normalized_centers = kwargs["normalized_centers"]
self.values["normalized_set"] = normalized_centers[self.values["normalized_set"]]
class ClusterProxy(ClusteringDBProxy):
def set_up_relations(self, **kwargs):
normalized_entries = kwargs["normalized_entries"]
relations = []
for e in self.values["normalized_entries"]:
relations.append(normalized_entries[e])
self.values["normalized_entries"] = copy(relations)
photos = kwargs["photos"]
relations = []
for e in self.values["photos"]:
relations.append(photos[e])
self.values["photos"] = copy(relations)
self.values["normalized_centers"] = kwargs["normalized_centers"][self.values["normalized_centers"]]
def save_to_db(self):
super.save_to_db()
self.db_entry.clear_normalized_entries()
self.add_normalized_entries_from_keys(
[e.id for e in self.values["normalized_entries"]],
[e.id for e in self.values["photos"]])
class PhotoProxy(ClusteringDBProxy):
pass
class NormalizedEntryProxy(ClusteringDBProxy):
def set_up_relations(self, **kwargs):
photos = kwargs["photos"]
self.values["actual_photo"] = photos[self.values["actual_photo"]]
class NormalizedSetProxy(ClusteringDBProxy):
pass
```
#### File: photoplaces/photoplaces_web/k_means.py
```python
from models import *
import traceback
import time
from datetime import datetime
import sys
from django.db import models
import numpy as np
import math_functions.cyclical_math as cm
from Queue import Queue
from threading import Thread
import math_functions.normalization as norm
from django.forms.models import model_to_dict
class KMeans:
def __init__(self, run = None):
if run:
self.run = run
else:
self.run = PhotoClusterRun(
algorithm = "KM")
def write_message(self, msg):
print(msg)
#self.run.write_message(msg)
def set_up(self, qs, k):
# Set up
self.write_message("Setting up...")
self.run.status = "W"
self.run.normalized_set = qs[0].normalized_set
self.run.save()
print("Creating %d clusters with %d entries..." % (k, qs.count()))
# K-means++ initialization
centers = []
# choose the very first cluster center
all_points = qs.all()
if self.run.clusters.all().count() == 0:
first = np.random.choice(qs)
centers.append((first.location_x, first.location_y, first.month, {}))
PhotoCluster.create_cluster(
self.run,
first.actual_photo,
first)
print("First center created")
else:
# For some reason there already is centers, why u set up again?
print("%d clusters already exist" % (self.run.clusters.all().count(),))
for center in self.run.clusters.all():
some_photo = center.normalized_entries.all()[0]
try:
centers.append((some_photo.location_x, some_photo.location_y, some_photo.month, {}))
except Exception as e:
print("Something was pretty wrong with pre-existing center:")
exc_type, exc_value, exc_traceback = sys.exc_info()
for s in traceback.format_exception(exc_type, exc_value, exc_traceback):
self.write_message(s)
while self.run.clusters.all().count() < k:
# calculate distance to closest center
def get_weight(point):
closest = float("inf")
for center in centers:
d = center[3].get(point)
if d == None:
d = np.sqrt(
norm.dist(point.location_x, center[0]) ** 2 +
norm.dist(point.location_y, center[1]) ** 2)
center[3][point] = d
if d < closest:
closest = d
return d
# Choose more centers
weights = np.array([get_weight(p) for p in all_points])
weights /= weights.sum()
new_center = np.random.choice(qs, p = weights)
centers.append((new_center.location_x, new_center.location_y, new_center.month, {}))
PhotoCluster.create_cluster(
self.run,
new_center.actual_photo,
new_center)
print("%d/%d cluster centers initialized" % (len(centers), k))
print("Updating centers...")
self.update_all_cluster_centers()
print("Cluster centers created, running one iteration...")
self.process_iteration(normalized_entries = qs.values())
print("\"That took forever!\" - <NAME>")
print("Set up done")
def simple_visualization(self, **kwargs):
import matplotlib.pyplot as plt
x = np.array([])
y = np.array([])
c = np.array([])
center_x = np.array([])
center_y = np.array([])
center_c = np.array([])
center_month = np.array([])
for cluster in self.run.clusters.all().prefetch_related("normalized_entries", "normalized_centers"):
normalized_entries = cluster.normalized_entries.all().values()
x = np.concatenate((x, [e["location_x"] for e in normalized_entries]))
y = np.concatenate((y, [e["location_y"] for e in normalized_entries]))
c = np.concatenate((c, np.ones(len(normalized_entries)) * cluster.pk))
center_x = np.concatenate((center_x, [cluster.normalized_centers.location_x_mean]))
center_y = np.concatenate((center_y, [cluster.normalized_centers.location_y_mean]))
center_c = np.concatenate((center_c, [cluster.pk]))
center_month = np.concatenate((center_month, [cluster.normalized_centers.month_mean_natural]))
fig = plt.gcf()
fig.clear()
plt.scatter(x, y, c = c, hold = True, marker = ".", linewidths = 0)
plt.scatter(center_x, center_y, c = center_c, hold = True, marker = "s")
if kwargs.get("show_months"):
for i in xrange(len(center_x)):
plt.text(center_x[i], center_y[i], np.floor(center_month[i]))
file_name = kwargs.get("file_name")
if file_name is None:
print("showing plot...")
plt.show()
else:
print("saving plot...")
fig.set_size_inches(32,18)
plt.savefig(file_name, bbox_inches='tight')
def update_all_cluster_centers(self, **kwargs):
def worker():
while True:
cluster = q.get()
if cluster is None:
q.put(None)
break
self.update_normalized_center(cluster)
print("Cluster center normalization done, %d in queue" % (q.qsize()))
q.task_done()
threads = []
q = Queue()
for i in xrange(3):
t = Thread(target = worker)
t.daemon = True
t.start()
threads.append(t)
force = kwargs.get("force")
all_clusters = kwargs.get("all_clusters")
if all_clusters is None:
if not force:
all_clusters = self.run.clusters.filter(normalized_centers_dirty = True).prefetch_related("photos", "normalized_entries", "normalized_centers")
else:
all_clusters = self.run.clusters.all().prefetch_related("photos", "normalized_entries", "normalized_centers")
for cluster in all_clusters:
q.put(cluster)
print("Everything in queue, processing...")
q.join()
q.put(None)
for thread in threads:
print("killing thread")
thread.join()
print("it's gone!")
print("Cluster center updates done!")
def update_normalized_center(self, cluster):
# calculate user counts
if len(cluster.normalized_entries.all()) == 0:
return
user_counts = {}
distinct_users = cluster.photos.order_by('username_md5').values('username_md5').distinct()
for user in distinct_users:
user = user["username_md5"]
user_counts[user] = cluster.photos.filter(username_md5 = user).count()
normalized_entries = cluster.normalized_entries.all().values()
entries = cluster.photos.all().values()
if len(entries) == 0:
return
weights = np.array([1.0 / user_counts[e["username_md5"]] for e in entries])
location_xs = np.array([e["location_x"] for e in normalized_entries])
location_ys = np.array([e["location_y"] for e in normalized_entries])
hours = np.array([e["time"].hour for e in entries])
months = np.array([e["time"].month for e in entries])
if not cluster.normalized_centers:
c = NormalizedPhotoSet()
c.save()
cluster.normalized_centers = c
cluster.save()
normalized_set = cluster.normalized_centers
normalized_set.location_x_mean = np.average(location_xs, weights = weights)
normalized_set.location_y_mean = np.average(location_ys, weights = weights)
normalized_set.hour_mean_natural = cm.cycle_avg(hours, 24, weights = weights)
normalized_set.hour_mean = norm.cyclical_z_score(
normalized_set.hour_mean_natural,
self.run.normalized_set.hour_mean,
self.run.normalized_set.hour_deviation,
24)
normalized_set.month_mean_natural = cm.cycle_avg(months, 12, weights = weights)
normalized_set.month_mean = norm.cyclical_z_score(
normalized_set.month_mean_natural,
self.run.normalized_set.month_mean,
self.run.normalized_set.month_deviation,
12)
cluster.normalized_centers_dirty = False
normalized_set.save()
cluster.save()
def process_iteration(self, **kwargs):
normalized_entries = kwargs.get("normalized_entries")
add_normalized_entries_from_clusters = False
if normalized_entries is None:
normalized_entries = []
add_normalized_entries_from_clusters = True
print ("Querying clusters...")
cluster_centers = []
cluster_map = {}
all_clusters = kwargs.get("all_clusters")
if all_clusters is None:
all_clusters = self.run.clusters.all().prefetch_related("normalized_centers", "normalized_entries", "photos")
print("iterating clusters...")
for cluster in all_clusters:
cluster_map[cluster.id] = [[],[]]
d = model_to_dict(cluster.normalized_centers)
d["cluster_id"] = cluster.pk
cluster_centers.append(d)
if add_normalized_entries_from_clusters:
normalized_entries += cluster.normalized_entries.all().values()
print("iterating entries...")
done = 0
count_all = len(normalized_entries)
month_cycle = self.run.normalized_set.month_z_cycle_length
for entry in normalized_entries:
lowest = float("inf")
closest = -1
x = entry["location_x"]
y = entry["location_y"]
month = entry["month"]
for cluster in cluster_centers:
d = norm.dist(cluster["location_x_mean"], x) ** 2 +\
norm.dist(cluster["location_y_mean"], y) ** 2 #+\
#cm.cyclical_distance(cluster["month_mean"], month, month_cycle) ** 2
d = np.sqrt(d)
if d < lowest:
closest = cluster["cluster_id"]
lowest = d
cluster_map[closest][0].append(entry["id"])
cluster_map[closest][1].append(entry["actual_photo_id"])
done += 1
if done % 5000 == 0:
print("%6d/%6d (%3.1f%%) processed" % (done, count_all, 100.0 * done / count_all))
print("All processed... pushing to db...")
q = Queue()
def worker():
while True:
cluster = q.get()
if cluster is None:
q.put(None)
break
retries = 1
while retries >= 0:
try:
cluster_pk = cluster.pk
cluster.clear_normalized_entries()
cluster.add_normalized_entries_from_keys(*cluster_map[cluster_pk])
except Exception as e:
exc_type, exc_value, exc_traceback = sys.exc_info()
for s in traceback.format_exception(exc_type, exc_value, exc_traceback):
self.write_message(s)
retries -= 1
if retries < 0:
raise e
else:
break
try:
self.update_normalized_center(cluster)
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
for s in traceback.format_exception(exc_type, exc_value, exc_traceback):
self.write_message(s)
pass
print("Added %d entries to cluster. %3d left." % (len(cluster_map[cluster.pk][0]), q.qsize()))
q.task_done()
threads = []
for i in xrange(3):
t = Thread(target = worker)
t.daemon = True
t.start()
threads.append(t)
for cluster in all_clusters:
q.put(cluster)
q.join()
q.put(None)
for thread in threads:
print("killing thread")
thread.join()
print("it's gone!")
print("Entries added, updating cluster centers...")
self.update_all_cluster_centers()
print("Iteration done!")
```
#### File: photoplaces/photoplaces_web/photo_entry_normalization.py
```python
from models import PhotoLocationEntry, NormalizedPhotoSet, NormalizedPhotoEntry
import numpy as np
from math_functions.cyclical_math import *
from math_functions.normalization import *
from Queue import Queue
from threading import Thread, Event
def visualize_counts(qs, field):
values = qs.order_by(field).values(field).distinct()
values = [v[field] for v in values]
m = 20000
for value in values:
c = qs.filter(**{field + "__gte": value - 0.001, field + "__lte": value + 0.001}).count()
print( ("%2.6f: %6d " % (value, c)) + "#" * int(float(c) / m * 60))
def normalize_photo_entry(entry, target_set):
e = NormalizedPhotoEntry(
actual_photo = entry,
normalized_set = target_set,
location_x = z_score(entry.location[0], target_set.location_x_mean, target_set.location_x_deviation),
location_y = z_score(entry.location[1], target_set.location_y_mean, target_set.location_y_deviation),
month = cyclical_z_score(entry.time.month, target_set.month_mean, target_set.month_deviation, 12),
hour = cyclical_z_score(entry.time.hour, target_set.hour_mean, target_set.hour_deviation, 24))
e.save()
def normalize_values(normalized_set):
count = PhotoLocationEntry.objects.all().count()
def worker():
while True:
e = q.get()
if NormalizedPhotoEntry.objects.filter(actual_photo = e).count() == 0:
normalize_photo_entry(e, normalized_set)
done = NormalizedPhotoEntry.objects.all().count()
if done % 100 == 0 or done == 1:
print("%d / %d (%3.1f) done" % (done, count, float(done) / count * 100))
q.task_done()
q = Queue()
for i in xrange(4):
t = Thread(target = worker)
t.daemon = True
t.start()
for v in PhotoLocationEntry.objects.all():
q.put(v)
print("All in Queue, waiting...")
q.join()
# Untested, only done in interactive console...
hours = ns.entries.order_by("hour").values("hour").distinct()
normalized_set.hour_z_cycle_length = abs(hours[0]["hour"] - hours[1]["hour"]) * 24
months = ns.entries.order_by("month").values("month").distinct()
normalized_set.month_z_cycle_length = abs(months[0]["month"] - months[1]["month"]) * 12
print("All done")
def create_normalized_set():
print("Getting objects...")
values = PhotoLocationEntry.objects.all()
print("creating NormalizedPhotoSet...")
normalized_set = NormalizedPhotoSet()
print("Calculating mean month...")
months = [v.time.month for v in values]
month_mean = cycle_avg(months, 12)
print("It is %f, saving..." % month_mean)
normalized_set.month_mean = month_mean
print("Calculating mean hour...")
hours = [v.time.hour for v in values]
hour_mean = cycle_avg(hours, 24)
print("It is %f, saving..." % hour_mean)
normalized_set.hour_mean = hour_mean
print("Calculating mean x...")
x = [v.location[0] for v in values]
x_mean = np.mean(x)
print("It is %f, saving..." % x_mean)
normalized_set.location_x_mean = x_mean
print("Calculating mean y...")
y = [v.location[1] for v in values]
y_mean = np.mean(y)
print("It is %f, saving..." % y_mean)
normalized_set.location_y_mean = y_mean
print("Calculating month MAD...")
def dist12(a, b):
return cyclical_distance(a,b,12)
month_mad = mean_absolute_deviation(months, month_mean, dist12)
print("It is %f, saving..." % month_mad)
normalized_set.month_deviation = month_mad
print("Calculating hour MAD...")
def dist24(a, b):
return cyclical_distance(a,b, 24)
hour_mad = mean_absolute_deviation(hours, hour_mean, dist24)
print("It is %f, saving..." % hour_mad)
normalized_set.hour_deviation = hour_mad
print("Calculating x MAD...")
x_mad = mean_absolute_deviation(x, x_mean)
print("It is %f, saving..." % x_mad)
normalized_set.location_x_deviation = x_mad
print("Calculating y MAD...")
y_mad = mean_absolute_deviation(y, y_mean)
print("It is %f, saving..." % y_mad)
normalized_set.location_y_deviation = y_mad
normalized_set.save()
print("All done")
```
#### File: photoplaces/tools/generate_color_scale.py
```python
class color():
def __init__(self,r,g,b):
self.r = float(r)
self.g = float(g)
self.b = float(b)
def __str__(self):
return ("rgb(%d, %d, %d)" % (self.r, self.g, self.b))
def __repr__(self):
return ("rgb(%d, %d, %d)" % (self.r, self.g, self.b))
def __add__(self, other):
return color(self.r + other.r, self.g + other.g, self.b + other.b)
def __sub__(self, other):
return color(self.r - other.r, self.g - other.g, self.b - other.b)
def __mul__(self, other):
if isinstance(other, color):
return color(self.r * other.r, self.g * other.g, self.b * other.b)
else:
other = float(other)
return color(self.r * other, self.g * other, self.b * other)
def __truediv__(self, other):
if isinstance(other, color):
return color(self.r / other.r, self.g / other.g, self.b / other.b)
else:
other = float(other)
return color(self.r / other, self.g / other, self.b / other)
def lerp(v0, v1, t):
return v0 + (v1 - v0) * t
def generate_color_map(colors):
colors.append(colors[0])
result = [None] * 13
result[0] = colors[0]
result[12] = colors[0]
last_idx = 0
for i in xrange(1, len(colors)):
idx = int(float(i) / (len(colors) - 1) * 12)
for j in xrange(last_idx + 1, idx + 1):
result[j] = lerp(colors[i], colors[i - 1], (1.0 - (j - last_idx) / float(idx - last_idx)))
last_idx = idx
out = ""
for i in xrange(12):
out += ".month_%d {fill: %s;}\n" % (i + 1, result[i])
return out
``` |
{
"source": "joonamo/pocket-game",
"score": 3
} |
#### File: pocket-game/LevelBuilder/levelBuild.py
```python
from imgtogb import read_png
from sourceGenerator import write_tiledata, write_mapdata, write_palette_data
import argparse
import xml.etree.ElementTree as XET
from pathlib import Path
def main():
parser = argparse.ArgumentParser(description="Compiles tiled to gb")
parser.add_argument("infile", help="level file", type=str)
parser.add_argument("outfile", help="Output file", type=str)
parser.add_argument("-b", "--bank", help="bank", type=str, default="3")
args = parser.parse_args()
infile = args.infile
outfile = args.outfile
bank = args.bank
root = XET.parse(infile).getroot()
layer = root.find("layer")
map_width = layer.attrib['width']
map_height = layer.attrib['height']
level_data = [int(v) for v in layer.find('data').text.replace("\n", "").split(",")]
level_gids = set(level_data)
tileset_gid_map = dict()
seen_tiles_id = 0
tiles_data = []
unique_palettes = []
collision_tiles = []
collision_down_tiles = []
tilemap_dir = Path(infile).parent
for tilemap in root.findall("tileset"):
tilemap_gid = int(tilemap.attrib["firstgid"])
tilemap_path = tilemap_dir.joinpath(tilemap.attrib["source"])
print("processing tileset", tilemap_path)
tileroot = XET.parse(tilemap_path).getroot()
root_image = tileroot.find("image")
if root_image is not None:
# This is single-image tiled map
root_name = root_image.attrib["source"]
(root_tile_data, root_palettes, root_palette_data) = read_png(
str(tilemap_path.parent.joinpath(root_name)),
False,
True)
for tile in tileroot.findall("tile"):
tile_id = int(tile.attrib["id"])
tile_gid = tilemap_gid + tile_id
if not tile_gid in level_gids:
continue
tileset_gid_map[tile_gid] = seen_tiles_id
if tile.find("properties/property[@name='collision'][@value='true']") is not None:
collision_tiles.append(seen_tiles_id)
elif tile.find("properties/property[@name='collision_down'][@value='true']") is not None:
collision_down_tiles.append(seen_tiles_id)
if (root_image is None):
image = tile.find("image")
name = image.attrib["source"]
(tile_data, palettes, palette_data) = read_png(
str(tilemap_path.parent.joinpath(name)),
False,
True)
else:
name = root_name + "@" + str(tile_id)
start_offset = tile_id * 16
tile_data = root_tile_data[start_offset : (start_offset + 16)]
palette_start_offset = root_palettes[tile_id]
palette_data = root_palette_data[palette_start_offset : palette_start_offset + 4]
if palette_data in unique_palettes:
palette_idx = unique_palettes.index(palette_data)
else:
palette_idx = len(unique_palettes)
unique_palettes.append(palette_data)
tile_entry = {
"name": name + " gid " + str(tile_gid),
"hexdata": [hex(v).rjust(4, " ") for v in tile_data],
"palette_idx": palette_idx
}
tiles_data.append(tile_entry)
seen_tiles_id += 1
map_name = args.infile.split("/")[-1].split(".")[0]
map_data = ",".join([hex(tileset_gid_map[v]) for v in level_data])
write_mapdata(outfile,
bank,
map_data,map_width,
map_height,
map_name,
collision_tiles,
collision_down_tiles)
write_tiledata(outfile, bank, tiles_data, map_name)
write_palette_data(outfile, bank, map_name, unique_palettes)
if __name__ == "__main__":
main()
```
#### File: pocket-game/LevelBuilder/spriteBuild.py
```python
from imgtogb import read_png
from sourceGenerator import write_tiledata
import argparse
def main():
parser = argparse.ArgumentParser(description="Compiles tiled to gb")
parser.add_argument("infile", help="level file", type=str)
parser.add_argument("outfile", help="Output file", type=str)
parser.add_argument("-b", "--bank", help="bank", type=str, default="3")
parser.add_argument("-pr", "--palette_reference", help="Palette reference file", type=str, default=None)
args = parser.parse_args()
infile = args.infile
outfile = args.outfile
bank = args.bank
palette_reference = args.palette_reference
sprite_name = args.infile.split("/")[-1].split(".")[0]
(tile_data, palettes, palette_data) = read_png(
infile,
False,
True,
palette_reference)
tiles_data = []
for idx, palette in enumerate(palettes):
start_offset = idx * 16
data = tile_data[start_offset : (start_offset + 16)]
tiles_data.append({
"name": sprite_name + "@" + str(idx),
"hexdata": [hex(v).rjust(4, " ") for v in tile_data],
"palette_idx": palette
})
write_tiledata(outfile, bank, tiles_data, sprite_name)
if __name__ == "__main__":
main()
``` |
{
"source": "joonan30/hail",
"score": 2
} |
#### File: hail/fs/google_fs.py
```python
import os
from typing import Dict, List
import gcsfs
from hurry.filesize import size
from .fs import FS
class GoogleCloudStorageFS(FS):
def __init__(self):
if 'GOOGLE_APPLICATION_CREDENTIALS' not in os.environ:
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = '/gsa-key/privateKeyData'
self.client = gcsfs.core.GCSFileSystem(secure_serialize=True)
def open(self, path: str, mode: str = 'r', buffer_size: int = 2**18):
return self.client.open(path, mode, buffer_size)
def copy(self, src: str, dest: str):
if src.startswith('gs://'):
return self.client.copy(src, dest)
else:
return self.client.put(src, dest)
def exists(self, path: str) -> bool:
return self.client.exists(path)
def is_file(self, path: str) -> bool:
try:
return not self._stat_is_dir(self.client.info(path))
except FileNotFoundError:
return False
def is_dir(self, path: str) -> bool:
try:
return self._stat_is_dir(self.client.info(path))
except FileNotFoundError:
return False
def stat(self, path: str) -> Dict:
return self._process_obj(self.client.info(path))
def _process_obj(self, stats: Dict) -> Dict:
return {
'is_dir': self._stat_is_dir(stats),
'size_bytes': stats['size'],
'size': size(stats['size']),
'path': stats['path'],
'owner': stats['bucket'],
'modification_time': stats.get('updated')
}
def _stat_is_dir(self, stats: Dict) -> bool:
return stats['storageClass'] == 'DIRECTORY' or stats['name'].endswith('/')
def ls(self, path: str) -> List[Dict]:
files = self.client.ls(path, detail=True)
return [self._process_obj(file) for file in files]
```
#### File: hailtop/batch_client/aioclient.py
```python
import math
import random
import asyncio
import aiohttp
import hailtop.gear.auth as hj
from .globals import complete_states
job_array_size = 50
max_job_submit_attempts = 3
def filter_params(complete, success, attributes):
params = None
if complete is not None:
if not params:
params = {}
params['complete'] = '1' if complete else '0'
if success is not None:
if not params:
params = {}
params['success'] = '1' if success else '0'
if attributes is not None:
if not params:
params = {}
for n, v in attributes.items():
params[f'a:{n}'] = v
return params
class Job:
@staticmethod
def exit_code(job_status):
if 'exit_code' not in job_status or job_status['exit_code'] is None:
return None
exit_codes = job_status['exit_code']
exit_codes = [exit_codes[task] for task in ['setup', 'main', 'cleanup'] if task in exit_codes]
i = 0
while i < len(exit_codes):
ec = exit_codes[i]
if ec is None:
return None
if ec > 0:
return ec
i += 1
return 0
@staticmethod
def total_duration(job_status):
if 'duration' not in job_status or job_status['duration'] is None:
return None
durations = job_status['duration']
durations = [durations[task] for task in ['setup', 'main', 'cleanup'] if task in durations]
i = 0
duration = 0
while i < len(durations):
d = durations[i]
if d is None:
return None
duration += d
i += 1
return duration
@staticmethod
def unsubmitted_job(batch_builder, job_id, attributes=None, parent_ids=None):
assert isinstance(batch_builder, BatchBuilder)
_job = UnsubmittedJob(batch_builder, job_id, attributes, parent_ids)
return Job(_job)
@staticmethod
def submitted_job(batch, job_id, attributes=None, parent_ids=None, _status=None):
assert isinstance(batch, Batch)
_job = SubmittedJob(batch, job_id, attributes, parent_ids, _status)
return Job(_job)
def __init__(self, job):
self._job = job
@property
def batch_id(self):
return self._job.batch_id
@property
def job_id(self):
return self._job.job_id
@property
def id(self):
return self._job.id
@property
def attributes(self):
return self._job.attributes
@property
def parent_ids(self):
return self._job.parent_ids
async def is_complete(self):
return await self._job.is_complete()
async def status(self):
return await self._job.status()
@property
def _status(self):
return self._job._status
async def wait(self):
return await self._job.wait()
async def log(self):
return await self._job.log()
async def pod_status(self):
return await self._job.pod_status()
class UnsubmittedJob:
def _submit(self, batch):
return SubmittedJob(batch, self._job_id, self.attributes, self.parent_ids)
def __init__(self, batch_builder, job_id, attributes=None, parent_ids=None):
if parent_ids is None:
parent_ids = []
if attributes is None:
attributes = {}
self._batch_builder = batch_builder
self._job_id = job_id
self.attributes = attributes
self.parent_ids = parent_ids
@property
def batch_id(self):
raise ValueError("cannot get the batch_id of an unsubmitted job")
@property
def job_id(self):
raise ValueError("cannot get the job_id of an unsubmitted job")
@property
def id(self):
raise ValueError("cannot get the id of an unsubmitted job")
async def is_complete(self):
raise ValueError("cannot determine if an unsubmitted job is complete")
async def status(self):
raise ValueError("cannot get the status of an unsubmitted job")
@property
def _status(self):
raise ValueError("cannot get the _status of an unsubmitted job")
async def wait(self):
raise ValueError("cannot wait on an unsubmitted job")
async def log(self):
raise ValueError("cannot get the log of an unsubmitted job")
async def pod_status(self):
raise ValueError("cannot get the pod status of an unsubmitted job")
class SubmittedJob:
def __init__(self, batch, job_id, attributes=None, parent_ids=None, _status=None):
if parent_ids is None:
parent_ids = []
if attributes is None:
attributes = {}
self._batch = batch
self.batch_id = batch.id
self.job_id = job_id
self.id = (self.batch_id, self.job_id)
self.attributes = attributes
self.parent_ids = parent_ids
self._status = _status
async def is_complete(self):
if self._status:
state = self._status['state']
if state in complete_states:
return True
await self.status()
state = self._status['state']
return state in complete_states
async def status(self):
self._status = await self._batch._client._get(f'/api/v1alpha/batches/{self.batch_id}/jobs/{self.job_id}')
return self._status
async def wait(self):
i = 0
while True:
if await self.is_complete():
return self._status
j = random.randrange(math.floor(1.1 ** i))
await asyncio.sleep(0.100 * j)
# max 44.5s
if i < 64:
i = i + 1
async def log(self):
return await self._batch._client._get(f'/api/v1alpha/batches/{self.batch_id}/jobs/{self.job_id}/log')
async def pod_status(self):
return await self._batch._client._get(f'/api/v1alpha/batches/{self.batch_id}/jobs/{self.job_id}/pod_status')
class Batch:
def __init__(self, client, id, attributes):
self._client = client
self.id = id
self.attributes = attributes
async def cancel(self):
await self._client._patch(f'/api/v1alpha/batches/{self.id}/cancel')
async def status(self, limit=None, offset=None):
params = None
if limit is not None:
if not params:
params = {}
params['limit'] = str(limit)
if offset is not None:
if limit is None:
raise ValueError("cannot define 'offset' without a 'limit'")
params['offset'] = str(offset)
return await self._client._get(f'/api/v1alpha/batches/{self.id}', params=params)
async def wait(self):
i = 0
while True:
status = await self.status(limit=0)
if status['complete']:
return await self.status()
j = random.randrange(math.floor(1.1 ** i))
await asyncio.sleep(0.100 * j)
# max 44.5s
if i < 64:
i = i + 1
async def delete(self):
await self._client._delete(f'/api/v1alpha/batches/{self.id}')
class BatchBuilder:
def __init__(self, client, attributes, callback):
self._client = client
self._job_idx = 0
self._job_docs = []
self._jobs = []
self._submitted = False
self.attributes = attributes
self.callback = callback
def create_job(self, image, command=None, args=None, env=None, ports=None,
resources=None, tolerations=None, volumes=None, security_context=None,
service_account_name=None, attributes=None, callback=None, parents=None,
input_files=None, output_files=None, always_run=False, pvc_size=None):
if self._submitted:
raise ValueError("cannot create a job in an already submitted batch")
self._job_idx += 1
if parents is None:
parents = []
parent_ids = []
foreign_batches = []
invalid_job_ids = []
for parent in parents:
job = parent._job
if isinstance(job, UnsubmittedJob):
if job._batch_builder != self:
foreign_batches.append(job)
elif not 0 < job._job_id < self._job_idx:
invalid_job_ids.append(job)
else:
parent_ids.append(job._job_id)
else:
foreign_batches.append(job)
error_msg = []
if len(foreign_batches) != 0:
error_msg.append('Found {} parents from another batch:\n{}'.format(str(len(foreign_batches)),
"\n".join([str(j) for j in foreign_batches])))
if len(invalid_job_ids) != 0:
error_msg.append('Found {} parents with invalid job ids:\n{}'.format(str(len(invalid_job_ids)),
"\n".join([str(j) for j in invalid_job_ids])))
if error_msg:
raise ValueError("\n".join(error_msg))
if env:
env = [{'name': k, 'value': v} for (k, v) in env.items()]
else:
env = []
env.extend([{
'name': 'POD_IP',
'valueFrom': {
'fieldRef': {'fieldPath': 'status.podIP'}
}
}, {
'name': 'POD_NAME',
'valueFrom': {
'fieldRef': {'fieldPath': 'metadata.name'}
}
}])
container = {
'image': image,
'name': 'main'
}
if command:
container['command'] = command
if args:
container['args'] = args
if env:
container['env'] = env
if ports:
container['ports'] = [{
'containerPort': p,
'protocol': 'TCP'
} for p in ports]
if resources:
container['resources'] = resources
if volumes:
container['volumeMounts'] = [v['volume_mount'] for v in volumes]
spec = {
'containers': [container],
'restartPolicy': 'Never'
}
if volumes:
spec['volumes'] = [v['volume'] for v in volumes]
if tolerations:
spec['tolerations'] = tolerations
if security_context:
spec['securityContext'] = security_context
if service_account_name:
spec['serviceAccountName'] = service_account_name
doc = {
'spec': spec,
'parent_ids': parent_ids,
'always_run': always_run,
'job_id': self._job_idx
}
if attributes:
doc['attributes'] = attributes
if callback:
doc['callback'] = callback
if input_files:
doc['input_files'] = input_files
if output_files:
doc['output_files'] = output_files
if pvc_size:
doc['pvc_size'] = pvc_size
self._job_docs.append(doc)
j = Job.unsubmitted_job(self, self._job_idx, attributes, parent_ids)
self._jobs.append(j)
return j
async def _submit_job_with_retry(self, batch_id, docs):
n_attempts = 0
saved_err = None
while n_attempts < max_job_submit_attempts:
try:
return await self._client._post(f'/api/v1alpha/batches/{batch_id}/jobs/create', json={'jobs': docs})
except Exception as err: # pylint: disable=W0703
saved_err = err
n_attempts += 1
await asyncio.sleep(1)
raise saved_err
async def submit(self):
if self._submitted:
raise ValueError("cannot submit an already submitted batch")
self._submitted = True
batch_doc = {}
if self.attributes:
batch_doc['attributes'] = self.attributes
if self.callback:
batch_doc['callback'] = self.callback
batch = None
try:
b = await self._client._post('/api/v1alpha/batches/create', json=batch_doc)
batch = Batch(self._client, b['id'], b.get('attributes'))
docs = []
n = 0
for jdoc in self._job_docs:
n += 1
docs.append(jdoc)
if n == job_array_size:
await self._submit_job_with_retry(batch.id, docs)
n = 0
docs = []
if docs:
await self._submit_job_with_retry(batch.id, docs)
await self._client._patch(f'/api/v1alpha/batches/{batch.id}/close')
except Exception as err: # pylint: disable=W0703
if batch:
await batch.cancel()
raise err
for j in self._jobs:
j._job = j._job._submit(batch)
self._job_docs = []
self._jobs = []
self._job_idx = 0
return batch
class BatchClient:
def __init__(self, session=None, url=None, token_file=None, token=None, headers=None):
if url is None:
url = 'http://batch.default'
self.url = url
if session is None:
session = aiohttp.ClientSession(raise_for_status=True,
timeout=aiohttp.ClientTimeout(total=60))
self._session = session
if token is None:
token = hj.find_token(token_file)
userdata = hj.JWTClient.unsafe_decode(token)
assert "bucket_name" in userdata
self.bucket = userdata["bucket_name"]
self._cookies = None
if headers is None:
headers = {}
headers['Authorization'] = f'Bearer {token}'
self._headers = headers
async def _get(self, path, params=None):
response = await self._session.get(
self.url + path, params=params, cookies=self._cookies, headers=self._headers)
return await response.json()
async def _post(self, path, json=None):
response = await self._session.post(
self.url + path, json=json, cookies=self._cookies, headers=self._headers)
return await response.json()
async def _patch(self, path):
await self._session.patch(
self.url + path, cookies=self._cookies, headers=self._headers)
async def _delete(self, path):
await self._session.delete(
self.url + path, cookies=self._cookies, headers=self._headers)
async def _refresh_k8s_state(self):
await self._post('/refresh_k8s_state')
async def list_batches(self, complete=None, success=None, attributes=None):
params = filter_params(complete, success, attributes)
batches = await self._get('/api/v1alpha/batches', params=params)
return [Batch(self,
b['id'],
attributes=b.get('attributes'))
for b in batches]
async def get_job(self, batch_id, job_id):
b = await self.get_batch(batch_id)
j = await self._get(f'/api/v1alpha/batches/{batch_id}/jobs/{job_id}')
return Job.submitted_job(
b,
j['job_id'],
attributes=j.get('attributes'),
parent_ids=j.get('parent_ids', []),
_status=j)
async def get_batch(self, id):
b = await self._get(f'/api/v1alpha/batches/{id}')
return Batch(self,
b['id'],
attributes=b.get('attributes'))
def create_batch(self, attributes=None, callback=None):
return BatchBuilder(self, attributes, callback)
async def close(self):
await self._session.close()
self._session = None
```
#### File: dev/benchmark/cli.py
```python
import sys
import argparse
def print_help():
main_parser = argparse.ArgumentParser(
prog='hailctl dev benchmark',
description='Run and analyze Hail benchmarks.')
subparsers = main_parser.add_subparsers()
subparsers.add_parser(
'run',
help='Run Hail benchmarks locally.',
description='Run Hail benchmarks locally.')
subparsers.add_parser(
'compare',
help='Compare Hail benchmarks.',
description='Run Hail benchmarks.')
main_parser.print_help()
def main(args):
if not args:
print_help()
sys.exit(0)
else:
module = args[0]
args = args[1:]
if module == 'run':
from .run import cli
cli.main(args)
elif module == 'compare':
from .compare import cli
cli.main(args)
elif module in ('-h', '--help', 'help'):
print_help()
else:
sys.stderr.write(f"ERROR: no such module: {module!r}")
print_help()
sys.exit(1)
```
#### File: hailtop/hailctl/__main__.py
```python
import os
import sys
import argparse
import re
import time
from hailtop import hailctl
def print_help():
main_parser = argparse.ArgumentParser(prog='hailctl',
description='Manage and monitor Hail deployments.')
subs = main_parser.add_subparsers()
subs.add_parser('dataproc',
help='Manage Google Dataproc clusters configured for Hail.',
description='Manage Google Dataproc clusters configured for Hail.')
subs.add_parser('dev',
help='Manage Hail development utilities.',
description='Manage Hail development utilities.')
subs.add_parser('version',
help='Print version information and exit.',
description='Print version information and exit.')
subs.add_parser('batch',
help='Manage batches running on the batch service managed by the Hail team.',
description='Manage batches running on the batch service managed by the Hail team.')
main_parser.print_help()
def check_for_update():
try:
check_file = os.path.expanduser('~') + '/.hail_version_check'
if os.path.exists(check_file):
last_modified = os.stat(check_file).st_ctime_ns
delta = time.time() - last_modified / 10 ** 9
assert delta > 0
day = 60 * 60 * 24
check_for_update = delta / day > 1
else:
check_for_update = True
if check_for_update:
open(check_file, 'w').close() # touch the file
import subprocess as sp
try:
pip_out = sp.check_output(['pip', 'search', 'hail'], stderr=sp.STDOUT)
except Exception: # pylint: disable=broad-except
pip_out = sp.check_output(['pip3', 'search', 'hail'], stderr=sp.STDOUT)
latest = re.search(r'hail \((\d+)\.(\d+)\.(\d+).*', pip_out.decode()).groups()
installed = re.search(r'(\d+)\.(\d+)\.(\d+).*', hailctl.version()).groups()
def int_version(version):
return tuple(map(int, version))
def fmt_version(version):
return '.'.join(version)
if int_version(latest) > int_version(installed):
sys.stderr.write(f'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n'
f'You have Hail {fmt_version(installed)} installed, '
f'but a newer version {fmt_version(latest)} exists.\n'
f' To upgrade to the latest version, please run:\n\n'
f' pip3 install -U hail\n\n'
f'~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n')
except Exception: # pylint: disable=broad-except
pass
def print_version():
print(hailctl.version())
def main():
check_for_update()
if len(sys.argv) == 1:
print_help()
sys.exit(0)
else:
module = sys.argv[1]
args = sys.argv[2:]
if module == 'version':
print_version()
elif module == 'dataproc':
from hailtop.hailctl.dataproc import cli
cli.main(args)
elif module == 'dev':
from hailtop.hailctl.dev import cli
cli.main(args)
elif module == 'batch':
from hailtop.hailctl.batch import cli
cli.main(args)
elif module in ('-h', '--help', 'help'):
print_help()
else:
sys.stderr.write(f"ERROR: no such module: {module!r}")
print_help()
sys.exit(1)
if __name__ == '__main__':
main()
```
#### File: hailtop/pipeline/pipeline.py
```python
import os
import re
import uuid
from .backend import LocalBackend, BatchBackend
from .task import Task
from .resource import Resource, InputResourceFile, TaskResourceFile, ResourceGroup
from .utils import PipelineException
class Pipeline:
"""
Object representing the distributed acyclic graph (DAG) of jobs to run.
Examples
--------
Create a pipeline object:
>>> p = Pipeline()
Create a new pipeline task that prints hello to a temporary file `t.ofile`:
>>> t = p.new_task()
>>> t.command(f'echo "hello" > {t.ofile}')
Write the temporary file `t.ofile` to a permanent location
>>> p.write_output(t.ofile, 'output/hello.txt')
Execute the DAG:
>>> p.run()
Parameters
----------
name: :obj:`str`, optional
Name of the pipeline.
backend: :func:`.Backend`, optional
Backend used to execute the jobs. Default is :class:`.LocalBackend`
attributes: :obj:`dict` of :obj:`str` to :obj:`str`, optional
Key-value pairs of additional attributes. 'name' is not a valid keyword.
Use the name argument instead.
default_image: :obj:`str`, optional
Docker image to use by default if not specified by a task.
default_memory: :obj:`str`, optional
Memory setting to use by default if not specified by a task. Only
applicable if a docker image is specified for the :class:`.LocalBackend`
or the :class:`.BatchBackend`. Value is in GB.
default_cpu: :obj:`str`, optional
CPU setting to use by default if not specified by a task. Only
applicable if a docker image is specified for the :class:`.LocalBackend`
or the :class:`.BatchBackend`.
default_storage: :obj:`str`, optional
Storage setting to use by default if not specified by a task. Only
applicable for the :class:`.BatchBackend`.
"""
_counter = 0
_uid_prefix = "__PIPELINE__"
_regex_pattern = r"(?P<PIPELINE>{}\d+)".format(_uid_prefix)
@classmethod
def _get_uid(cls):
uid = "{}{}".format(cls._uid_prefix, cls._counter)
cls._counter += 1
return uid
def __init__(self, name=None, backend=None, attributes=None,
default_image=None, default_memory=None, default_cpu=None,
default_storage=None):
self._tasks = []
self._resource_map = {}
self._allocated_files = set()
self._input_resources = set()
self._uid = Pipeline._get_uid()
self.name = name
if attributes is None:
attributes = {}
if 'name' in attributes:
raise PipelineException("'name' is not a valid attribute. Use the name argument instead.")
self.attributes = attributes
self._default_image = default_image
self._default_memory = default_memory
self._default_cpu = default_cpu
self._default_storage = default_storage
if backend:
self._backend = backend
elif os.environ.get('BATCH_URL') is not None:
self._backend = BatchBackend(os.environ.get('BATCH_URL'))
else:
self._backend = LocalBackend()
def new_task(self, name=None, attributes=None):
"""
Initialize a new task object with default memory, docker image,
and CPU settings if specified upon pipeline creation.
Examples
--------
>>> t = p.new_task()
Parameters
----------
name: :obj:`str`, optional
Name of the task.
attributes: :obj:`dict` of :obj:`str` to :obj:`str`, optional
Key-value pairs of additional attributes. 'name' is not a valid keyword.
Use the name argument instead.
Returns
-------
:class:`.Task`
"""
if attributes is None:
attributes = {}
t = Task(pipeline=self, name=name, attributes=attributes)
if self._default_image is not None:
t.image(self._default_image)
if self._default_memory is not None:
t.memory(self._default_memory)
if self._default_cpu is not None:
t.cpu(self._default_cpu)
if self._default_storage is not None:
t.storage(self._default_storage)
self._tasks.append(t)
return t
def _tmp_file(self, prefix=None, suffix=None):
def _get_random_file():
file = '{}{}{}'.format(prefix if prefix else '',
uuid.uuid4().hex[:8],
suffix if suffix else '')
if file not in self._allocated_files:
self._allocated_files.add(file)
return file
return _get_random_file()
return _get_random_file()
def _new_task_resource_file(self, source, value=None):
trf = TaskResourceFile(value if value else self._tmp_file())
trf._add_source(source)
self._resource_map[trf._uid] = trf
return trf
def _new_input_resource_file(self, input_path, value=None):
irf = InputResourceFile(value if value else self._tmp_file())
irf._add_input_path(input_path)
self._resource_map[irf._uid] = irf
self._input_resources.add(irf)
return irf
def _new_resource_group(self, source, mappings):
assert isinstance(mappings, dict)
root = self._tmp_file()
d = {}
new_resource_map = {}
for name, code in mappings.items():
if not isinstance(code, str):
raise PipelineException(f"value for name '{name}' is not a string. Found '{type(code)}' instead.")
r = self._new_task_resource_file(source=source, value=eval(f'f"""{code}"""')) # pylint: disable=W0123
d[name] = r
new_resource_map[r._uid] = r
self._resource_map.update(new_resource_map)
rg = ResourceGroup(source, root, **d)
self._resource_map.update({rg._uid: rg})
return rg
def read_input(self, path, extension=None):
"""
Create a new input resource file object representing a single file.
Examples
--------
Read the file `hello.txt`:
>>> p = Pipeline()
>>> input = p.read_input('hello.txt')
>>> t = p.new_task()
>>> t.command(f"cat {input}")
>>> p.run()
Parameters
----------
path: :obj:`str`
File path to read.
extension: :obj:`str`, optional
File extension to use.
Returns
-------
:class:`.InputResourceFile`
"""
irf = self._new_input_resource_file(path)
if extension is not None:
irf.add_extension(extension)
return irf
def read_input_group(self, **kwargs):
"""
Create a new resource group representing a mapping of identifier to
input resource files.
Examples
--------
Read a binary PLINK file:
>>> p = Pipeline()
>>> bfile = p.read_input_group(bed="data/example.bed",
... bim="data/example.bim",
... fam="data/example.fam")
>>> t = p.new_task()
>>> t.command(f"plink --bfile {bfile} --geno --out {t.geno}")
>>> t.command(f"wc -l {bfile.fam}")
>>> t.command(f"wc -l {bfile.bim}")
>>> p.run()
Read a FASTA file and it's index (file extensions matter!):
>>> fasta = p.read_input_group({'fasta': 'data/example.fasta',
... 'fasta.idx': 'data/example.fasta.idx'})
Create a resource group where the identifiers don't match the file extensions:
>>> rg = p.read_input_group(foo='data/foo.txt',
... bar='data/bar.txt')
`rg.foo` and `rg.bar` will not have the `.txt` file extension and
instead will be `{root}.foo` and `{root}.bar` where `{root}` is a random
identifier.
Notes
-----
The identifier is used to refer to
a specific resource file. For example, given the resource group `rg`, you
can use the attribute notation `rg.identifier` or the get item notation
`rg[identifier]`.
The file extensions for each file are derived from the identifier.
This is equivalent to `"{root}.identifier"` from
:meth:`.Task.declare_resource_group`. We are planning on adding flexibility
to incorporate more complicated extensions in the future such as `.vcf.bgz`.
For now, use :func:`ResourceFile.add_extension` to add an extension to a
resource file.
Parameters
----------
kwargs: :obj:`dict` of :obj:`str` to :obj:`str`
Key word arguments where the name/key is the identifier and the value
is the file path.
Returns
-------
:class:`.InputResourceFile`
"""
root = self._tmp_file()
new_resources = {name: self._new_input_resource_file(file, root + '.' + name) for name, file in kwargs.items()}
rg = ResourceGroup(None, root, **new_resources)
self._resource_map.update({rg._uid: rg})
return rg
def write_output(self, resource, dest): # pylint: disable=R0201
"""
Write resource file or resource file group to an output destination.
Examples
--------
Write a single task intermediate to a permanent location:
>>> p = Pipeline()
>>> t = p.new_task()
>>> t.command(f'echo "hello" > {t.ofile}')
>>> p.write_output(t.ofile, 'output/hello.txt')
>>> p.run()
Notes
-----
All :class:`.TaskResourceFile` are temporary files and must be written
to a permanent location using :meth:`.write_output` if the output needs
to be saved.
Parameters
----------
resource: :class:`.ResourceFile` or :class:`.ResourceGroup`
Resource to be written to a file.
dest: :obj:`str`
Destination file path. For a single :class:`.ResourceFile`, this will
simply be `dest`. For a :class:`.ResourceGroup`, `dest` is the file
root and each resource file will be written to `{root}.identifier`
where `identifier` is the identifier of the file in the
:class:`.ResourceGroup` map.
"""
if not isinstance(resource, Resource):
raise PipelineException(f"'write_output' only accepts Resource inputs. Found '{type(resource)}'.")
if isinstance(resource, TaskResourceFile) and resource not in resource._source._mentioned:
name = resource._source._resources_inverse
raise PipelineException(f"undefined resource '{name}'\n"
f"Hint: resources must be defined within the "
"task methods 'command' or 'declare_resource_group'")
resource._add_output_path(dest)
def select_tasks(self, pattern):
"""
Select all tasks in the pipeline whose name matches `pattern`.
Examples
--------
Select tasks in pipeline matching `qc`:
>>> p = Pipeline()
>>> t = p.new_task().name('qc')
>>> qc_tasks = p.select_tasks('qc')
>>> assert qc_tasks == [t]
Parameters
----------
pattern: :obj:`str`
Regex pattern matching task names.
Returns
-------
:obj:`list` of :class:`.Task`
"""
return [task for task in self._tasks if task.name is not None and re.match(pattern, task.name) is not None]
def run(self, dry_run=False, verbose=False, delete_scratch_on_exit=True):
"""
Execute a pipeline.
Examples
--------
Create a simple pipeline and execute it:
>>> p = Pipeline()
>>> t = p.new_task()
>>> t.command('echo "hello"')
>>> p.run()
Parameters
----------
dry_run: :obj:`bool`, optional
If `True`, don't execute code.
verbose: :obj:`bool`, optional
If `True`, print debugging output.
delete_scratch_on_exit: :obj:`bool`, optional
If `True`, delete temporary directories with intermediate files.
"""
dependencies = {task: task._dependencies for task in self._tasks}
ordered_tasks = []
niter = 0
while dependencies:
for task, deps in dependencies.items():
if not deps:
ordered_tasks.append(task)
niter = 0
for task, _ in dependencies.items():
dependencies[task] = dependencies[task].difference(set(ordered_tasks))
for task in ordered_tasks:
if task in dependencies:
del dependencies[task]
niter += 1
if niter == 100:
raise PipelineException("cycle detected in dependency graph")
self._tasks = ordered_tasks
self._backend._run(self, dry_run, verbose, delete_scratch_on_exit)
def __str__(self):
return self._uid
```
#### File: hail/utils/test_google_fs_utils.py
```python
import unittest
import hail as hl
from hail.utils import *
from hail.utils.java import Env
from hail.utils.linkedlist import LinkedList
from hail.fs.hadoop_fs import HadoopFS
from ..helpers import *
import os
setUpModule = startTestHailContext
tearDownModule = stopTestHailContext
BUCKET = os.environ.get("TEST_BUCKET_NAME", None)
class Tests(unittest.TestCase):
@classmethod
def setUpClass(cls):
if BUCKET is None:
raise unittest.case.SkipTest("TEST_BUCKET_NAME not set in env")
if 'HAIL_TEST_SERVICE_BACKEND_URL' not in os.environ:
raise unittest.case.SkipTest("HAIL_TEST_SERVICE_BACKEND_URL not set in env")
def test_hadoop_methods(self):
data = ['foo', 'bar', 'baz']
data.extend(map(str, range(100)))
with hadoop_open(f'{BUCKET}/test_out.txt', 'w') as f:
for d in data:
f.write(d)
f.write('\n')
with hadoop_open(f'{BUCKET}/test_out.txt') as f:
data2 = [line.strip() for line in f]
self.assertEqual(data, data2)
with hadoop_open(f'{BUCKET}/test_out.txt.gz', 'w') as f:
for d in data:
f.write(d)
f.write('\n')
with hadoop_open(f'{BUCKET}/test_out.txt.gz') as f:
data3 = [line.strip() for line in f]
self.assertEqual(data, data3)
hadoop_copy(f'{BUCKET}/test_out.txt.gz',
f'{BUCKET}/test_out.copy.txt.gz')
with hadoop_open(f'{BUCKET}/test_out.copy.txt.gz') as f:
data4 = [line.strip() for line in f]
self.assertEqual(data, data4)
local_fs = HadoopFS()
with local_fs.open(resource('randomBytes'), buffer_size=100) as f:
with hadoop_open(f'{BUCKET}/randomBytesOut', 'w', buffer_size=2**18) as out:
b = f.read()
out.write(b)
with hadoop_open(f'{BUCKET}/randomBytesOut', buffer_size=2**18) as f:
b2 = f.read()
self.assertEqual(b, b2)
with self.assertRaises(Exception):
hadoop_open(f'{BUCKET}/randomBytesOut', 'xb')
def test_hadoop_exists(self):
with hadoop_open(f'{BUCKET}/test_exists.txt', 'w') as f:
f.write("HELLO WORLD")
r_exists = f'{BUCKET}/test_exists.txt'
r_not_exists = f'{BUCKET}/not_exists.txt'
self.assertTrue(hl.hadoop_exists(r_exists))
self.assertFalse(hl.hadoop_exists(r_not_exists))
def test_hadoop_is_file(self):
a_file = f'{BUCKET}/test_hadoop_is_file.txt'
with hadoop_open(a_file, 'w') as f:
f.write("HELLO WORLD")
self.assertTrue(hl.hadoop_is_file(a_file))
self.assertFalse(hl.hadoop_is_file(f'{BUCKET}/'))
self.assertFalse(hl.hadoop_is_file(f'{BUCKET}/invalid-path'))
def test_hadoop_stat(self):
stat1 = hl.hadoop_stat(f'{BUCKET}/')
self.assertEqual(stat1['is_dir'], True)
stat2 = hl.hadoop_stat(f'{BUCKET}/test_out.copy.txt.gz')
self.assertEqual(stat2['size_bytes'], 302)
self.assertEqual(stat2['is_dir'], False)
self.assertTrue('path' in stat2)
self.assertTrue('owner' in stat2)
self.assertTrue('modification_time' in stat2)
```
#### File: notebook/notebook/notebook.py
```python
import aiohttp
import aiohttp_jinja2
import aiohttp_session
import aiohttp_session.cookie_storage
import asyncio
import base64
import jinja2
import kubernetes_asyncio as kube
import logging
import os
import re
import uuid
import uvloop
from cryptography import fernet
from pythonjsonlogger import jsonlogger
class CustomJsonFormatter(jsonlogger.JsonFormatter):
def add_fields(self, log_record, record, message_dict):
super(CustomJsonFormatter, self).add_fields(log_record, record, message_dict)
log_record['funcNameAndLine'] = "{}:{}".format(record.funcName, record.lineno)
def configure_logging():
fmt = CustomJsonFormatter('(levelname) (asctime) (filename) (funcNameAndLine) (message)')
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(fmt)
logging.basicConfig(handlers=[stream_handler], level=logging.INFO)
configure_logging()
log = logging.getLogger('notebook')
app = aiohttp.web.Application(client_max_size=None)
routes = aiohttp.web.RouteTableDef()
uvloop.install()
def read_string(f):
with open(f, 'r') as f:
return f.read().strip()
app.secret_key = read_string('/notebook-secrets/secret-key')
PASSWORD = read_string('/notebook-secrets/password')
ADMIN_PASSWORD = read_string('/notebook-secrets/admin-password')
INSTANCE_ID = uuid.uuid4().hex
log.info(f'INSTANCE_ID {INSTANCE_ID}')
try:
with open('notebook-worker-images', 'r') as f:
def get_name(line):
return re.search("/([^/:]+):", line).group(1)
WORKER_IMAGES = {get_name(line): line.strip() for line in f}
except FileNotFoundError as e:
raise ValueError(
"working directory must contain a file called `notebook-worker-images' "
"containing the name of the docker image to use for worker pods.") from e
async def start_pod(jupyter_token, image):
pod_id = uuid.uuid4().hex
service_spec = kube.client.V1ServiceSpec(
selector={
'app': 'notebook-worker',
'hail.is/notebook-instance': INSTANCE_ID,
'uuid': pod_id},
ports=[kube.client.V1ServicePort(port=80, target_port=8888)])
service_template = kube.client.V1Service(
metadata=kube.client.V1ObjectMeta(
generate_name='notebook-worker-service-',
labels={
'app': 'notebook-worker',
'hail.is/notebook-instance': INSTANCE_ID,
'uuid': pod_id}),
spec=service_spec)
svc = await app['k8s'].create_namespaced_service(
'default',
service_template
)
pod_spec = kube.client.V1PodSpec(
security_context=kube.client.V1SecurityContext(
run_as_user=1000),
containers=[
kube.client.V1Container(
command=[
'jupyter',
'notebook',
f'--NotebookApp.token={jupyter_token}',
f'--NotebookApp.base_url=/instance/{svc.metadata.name}/'
],
name='default',
image=image,
ports=[kube.client.V1ContainerPort(container_port=8888)],
resources=kube.client.V1ResourceRequirements(
requests={'cpu': '1.601', 'memory': '1.601G'}),
readiness_probe=kube.client.V1Probe(
http_get=kube.client.V1HTTPGetAction(
path=f'/instance/{svc.metadata.name}/login',
port=8888)))])
pod_template = kube.client.V1Pod(
metadata=kube.client.V1ObjectMeta(
generate_name='notebook-worker-',
labels={
'app': 'notebook-worker',
'hail.is/notebook-instance': INSTANCE_ID,
'uuid': pod_id,
}),
spec=pod_spec)
pod = await app['k8s'].create_namespaced_pod(
'default',
pod_template)
return svc, pod
@routes.get('/healthcheck')
async def healthcheck():
return aiohttp.web.Response()
@routes.get('/', name='root')
@aiohttp_jinja2.template('index.html')
async def root(request):
session = await aiohttp_session.get_session(request)
if 'svc_name' not in session:
log.info(f'no svc_name found in session {session.keys()}')
return {'form_action_url': str(request.app.router['new'].url_for()),
'images': list(WORKER_IMAGES),
'default': 'gew2019'}
svc_name = session['svc_name']
jupyter_token = session['jupyter_token']
# str(request.app.router['root'].url_for()) +
url = request.url.with_path(f'instance/{svc_name}/?token={jupyter_token}')
log.info('redirecting to ' + url)
raise aiohttp.web.HTTPFound(url)
@routes.get('/new', name='new')
async def new_get(request):
session = await aiohttp_session.get_session(request)
pod_name = session.get('pod_name')
svc_name = session.get('svc_name')
if pod_name:
await delete_worker_pod(pod_name, svc_name)
session.clear()
raise aiohttp.web.HTTPFound(
request.app.router['root'].url_for())
@routes.post('/new')
async def new_post(request):
session = await aiohttp_session.get_session(request)
log.info('new received')
form = await request.post()
password = form['password']
image = form['image']
if password != PASSWORD or image not in WORKER_IMAGES:
raise aiohttp.web.HTTPForbidden()
jupyter_token = fernet.Fernet.generate_key().decode('ascii')
svc, pod = await start_pod(jupyter_token, WORKER_IMAGES[image])
session['svc_name'] = svc.metadata.name
session['pod_name'] = pod.metadata.name
session['jupyter_token'] = jupyter_token
raise aiohttp.web.HTTPFound(
request.app.router['wait'].url_for())
@routes.get('/wait', name='wait')
@aiohttp_jinja2.template('wait.html')
async def wait_webpage(request):
return {}
@routes.get('/auth/{requested_svc_name}')
async def auth(request):
session = await aiohttp_session.get_session(request)
requested_svc_name = request.match_info['requested_svc_name']
approved_svc_name = session.get('svc_name')
if approved_svc_name and approved_svc_name == requested_svc_name:
return aiohttp.web.Response()
raise aiohttp.web.HTTPForbidden()
async def get_all_workers():
workers = await app['k8s'].list_namespaced_pod(
namespace='default',
watch=False,
label_selector='app=notebook-worker')
workers_and_svcs = []
for w in workers.items:
uuid = w.metadata.labels['uuid']
svcs = await app['k8s'].list_namespaced_service(
namespace='default',
watch=False,
label_selector='uuid=' + uuid).items
assert len(svcs) <= 1
if len(svcs) == 1:
workers_and_svcs.append((w, svcs[0]))
else:
log.info(f'assuming pod {w.metadata.name} is getting deleted '
f'because it has no service')
return workers_and_svcs
@routes.get('/workers', name='workers')
@aiohttp_jinja2.template('workers.html')
async def workers(request):
session = await aiohttp_session.get_session(request)
if not session.get('admin'):
raise aiohttp.web.HTTPFound(
request.app.router['admin-login'].url_for())
workers_and_svcs = await get_all_workers()
return {'workers': workers_and_svcs,
'workers_url': str(request.app.router['workers'].url_for()),
'leader_instance': INSTANCE_ID}
@routes.get('/workers/{pod_name}/{svc_name}/delete')
async def workers_delete(request):
session = await aiohttp_session.get_session(request)
pod_name = request.match_info['pod_name']
svc_name = request.match_info['svc_name']
if not session.get('admin'):
raise aiohttp.web.HTTPFound(
request.app.router['admin-login'].url_for())
await delete_worker_pod(pod_name, svc_name)
raise aiohttp.web.HTTPFound(request.app.router['workers'].url_for())
@routes.post('/workers/delete-all-workers')
async def delete_all_workers(request):
session = await aiohttp_session.get_session(request)
if not session.get('admin'):
raise aiohttp.web.HTTPFound(
request.app.router['admin-login'].url_for())
workers_and_svcs = await get_all_workers()
await asyncio.gather(*[
delete_worker_pod(pod.metadata.name, svc.metadata.name)
for pod, svc in workers_and_svcs])
raise aiohttp.web.HTTPFound(request.app.router['workers'].url_for())
async def delete_worker_pod(pod_name, svc_name):
try:
await app['k8s'].delete_namespaced_pod(
pod_name,
'default')
except kube.client.rest.ApiException as e:
log.info(f'pod {pod_name} or associated service already deleted {e}')
try:
await app['k8s'].delete_namespaced_service(
svc_name,
'default')
except kube.client.rest.ApiException as e:
log.info(f'service {svc_name} (for pod {pod_name}) already deleted {e}')
@routes.get('/admin-login', name='admin-login')
@aiohttp_jinja2.template('admin-login.html')
async def admin_login(request):
return {'form_action_url': str(request.app.router['workers'].url_for())}
@routes.post('/admin-login')
async def admin_login_post(request):
session = await aiohttp_session.get_session(request)
form = await request.post()
if form['password'] != <PASSWORD>:
raise aiohttp.web.HTTPForbidden()
session['admin'] = True
raise aiohttp.web.HTTPFound(request.app.router['workers'].url_for())
@routes.get('/worker-image')
async def worker_image(request):
del request
return aiohttp.web.Response(text='\n'.join(WORKER_IMAGES.values()))
@routes.get('/waitws')
async def wait_websocket(request):
session = await aiohttp_session.get_session(request)
ws = aiohttp.web.WebSocketResponse()
await ws.prepare(request)
pod_name = session['pod_name']
svc_name = session['svc_name']
jupyter_token = session['jupyter_token']
log.info(f'received wait websocket for {svc_name} {pod_name}')
while True:
try:
response = await app['client_session'].head(
f'https://notebook.hail.is/instance-ready/{svc_name}/',
timeout=1)
if response.status < 500:
log.info(
f'HEAD on jupyter succeeded for {svc_name} {pod_name} '
f'response: {response}')
# if someone responds with a 2xx, 3xx, or 4xx, the notebook
# server is alive and functioning properly (in particular, our
# HEAD request will return 405 METHOD NOT ALLOWED)
break
# somewhat unusual, means the gateway had an error before we
# timed out, usually means the gateway itself is broken
log.info(f'HEAD on jupyter failed for {svc_name} {pod_name} response: {response}')
except Exception as e:
log.info(f'GET on jupyter failed for {svc_name} {pod_name} {e}')
await asyncio.sleep(1)
notebook_url_scheme = request.url.scheme.replace('ws', 'http')
notebook_url = request.url.with_scheme(notebook_url_scheme)
notebook_url = notebook_url.with_path(
f'instance/{svc_name}/?token={<PASSWORD>_token}')
await ws.send_str(notebook_url)
await ws.close()
log.info(f'notification sent to user for {svc_name} {pod_name}')
return ws
async def setup_k8s(app):
kube.config.load_incluster_config()
app['k8s'] = kube.client.CoreV1Api()
async def cleanup(app):
await app['client_session'].close()
if __name__ == '__main__':
my_path = os.path.dirname(os.path.abspath(__file__))
aiohttp_jinja2.setup(
app,
loader=jinja2.FileSystemLoader(os.path.join(my_path, 'templates')))
routes.static('/static', os.path.join(my_path, 'static'))
app.add_routes(routes)
app.on_startup.append(setup_k8s)
app['client_session'] = aiohttp.ClientSession()
app.on_cleanup.append(cleanup)
fernet_key = fernet.Fernet.generate_key()
secret_key = base64.urlsafe_b64decode(fernet_key)
aiohttp_session.setup(
app,
aiohttp_session.cookie_storage.EncryptedCookieStorage(secret_key))
aiohttp.web.run_app(app, host='0.0.0.0', port=5000)
``` |
{
"source": "joonaskalda/streamlit-heroku",
"score": 3
} |
#### File: template/my_component/__init__.py
```python
import os
import streamlit.components.v1 as components
import streamlit as st
import time
import numpy as np
import IPython.display as ipd
#ipd.Audio(audio, rate=16000)
from online_scd.model import SCDModel
from online_scd.streaming import StreamingDecoder
import timeit
from online_scd.utils import load_wav_file
import multiprocessing
#import playsound
import queue
import time
from typing import List
import numpy as np
import pydub
from pydub.playback import play
import streamlit as st
from streamlit_webrtc import (
ClientSettings,
WebRtcMode,
webrtc_streamer,
)
# Create a _RELEASE constant. We'll set this to False while we're developing
# the component, and True when we're ready to package and distribute it.
# (This is, of course, optional - there are innumerable ways to manage your
# release process.)
_RELEASE = False
# Declare a Streamlit component. `declare_component` returns a function
# that is used to create instances of the component. We're naming this
# function "_component_func", with an underscore prefix, because we don't want
# to expose it directly to users. Instead, we will create a custom wrapper
# function, below, that will serve as our component's public API.
# It's worth noting that this call to `declare_component` is the
# *only thing* you need to do to create the binding between Streamlit and
# your component frontend. Everything else we do in this file is simply a
# best practice.
if not _RELEASE:
_component_func = components.declare_component(
# We give the component a simple, descriptive name ("my_component"
# does not fit this bill, so please choose something better for your
# own component :)
"my_component",
# Pass `url` here to tell Streamlit that the component will be served
# by the local dev server that you run via `npm run start`.
# (This is useful while your component is in development.)
url="http://localhost:8080",
)
model = SCDModel.load_from_checkpoint("template/my_component/test/sample_model/checkpoints/epoch=102.ckpt")
file_name = "template/my_component/frontend/src/audio/3321821.wav"
else:
# When we're distributing a production version of the component, we'll
# replace the `url` param with `path`, and point it to to the component's
# build directory:
parent_dir = os.path.dirname(os.path.abspath(__file__))
build_dir = os.path.join(parent_dir, "frontend/build")
_component_func = components.declare_component("my_component", path=build_dir)
model = SCDModel.load_from_checkpoint("template/my_component/test/sample_model/checkpoints/epoch=102.ckpt")
file_name = "template/my_component/frontend/src/audio/3321821.wav"
# Create a wrapper function for the component. This is an optional
# best practice - we could simply expose the component function returned by
# `declare_component` and call it done. The wrapper allows us to customize
# our component's API: we can pre-process its input args, post-process its
# output value, and add a docstring for users.
def my_component(name, key=None):
"""Create a new instance of "my_component".
Parameters
----------
name: str
The name of the thing we're saying hello to. The component will display
the text "Hello, {name}!"
key: str or None
An optional key that uniquely identifies this component. If this is
None, and the component's arguments are changed, the component will
be re-mounted in the Streamlit frontend and lose its current state.
Returns
-------
int
The number of times the component's "Click Me" button has been clicked.
(This is the value passed to `Streamlit.setComponentValue` on the
frontend.)
"""
# Call through to our private component function. Arguments we pass here
# will be sent to the frontend, where they'll be available in an "args"
# dictionary.
#
# "default" is a special argument that specifies the initial return
# value of the component before the user has interacted with it.
component_value = _component_func(name=name, key=key, default=0)
# We could modify the value returned from the component if we wanted.
# There's no need to do this in our simple example - but it's an option.
return component_value
# Add some test code to play with the component while it's in development.
# During development, we can run this just as we would any other Streamlit
# app: `$ streamlit run my_component/__init__.py`
def stream_sample():
st.subheader("Streaming a sample .wav")
# Create a second instance of our component whose `name` arg will vary
# based on a text_input widget.
#
# We use the special "key" argument to assign a fixed identity to this
# component instance. By default, when a component's arguments change,
# it is considered a new instance and will be re-mounted on the frontend
# and lose its current state. In this case, we want to vary the component's
# "name" argument without having it get recreated.
sound = pydub.AudioSegment.from_wav(file_name)
sound = sound.set_channels(1).set_frame_rate(16000)
audio = np.array(sound.get_array_of_samples())/32768
last_rows = np.zeros((1,1))
chart = st.line_chart(last_rows)
text_output = st.empty()
streaming_decoder = StreamingDecoder(model)
frame_number = 0
#p = multiprocessing.Process(target=playsound.playsound, args=(file_name,))
#play_obj = wave_obj.play()
start_0 = timeit.default_timer()
was_clicked = my_component("test", key="foo")
if was_clicked:
for i in range(0, len(audio), 1000):
# while (num_clicks%2 == 0):
# time.sleep(0.1)
start = timeit.default_timer()
for probs in streaming_decoder.process_audio(audio[i: i+1000]):
new_rows = np.zeros((1, 1))
new_rows[0,0] = probs[1].detach().numpy()
chart.add_rows(new_rows)
frame_number += 1
end = timeit.default_timer()
# text_output.markdown(f"{end-start_0} seconds")
time.sleep(max(0,1/16-end+start))
# st.button("Re-run")
def stream_mic():
st.subheader("Streaming from microphone")
webrtc_ctx = webrtc_streamer(
key="speech-<PASSWORD>",
mode=WebRtcMode.SENDONLY,
audio_receiver_size=1024,
client_settings=ClientSettings(
rtc_configuration={
"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]
},
media_stream_constraints={"video": False, "audio": True},
),
)
status_indicator = st.empty()
if not webrtc_ctx.state.playing:
return
status_indicator.write("Loading...")
text_output = st.empty()
stream = None
last_rows = np.zeros((1,1))
chart = st.line_chart(last_rows)
streaming_decoder = StreamingDecoder(model)
frame_number = 0
status_indicator.write("Model loaded.")
ct=0
while True:
if webrtc_ctx.audio_receiver:
sound_chunk = pydub.AudioSegment.empty()
try:
audio_frames = webrtc_ctx.audio_receiver.get_frames(timeout=1)
except queue.Empty:
time.sleep(0.1)
status_indicator.write("No frame arrived.")
continue
status_indicator.write("Running. Say something!")
for audio_frame in audio_frames:
sound = pydub.AudioSegment(
data=audio_frame.to_ndarray().tobytes(),
sample_width=audio_frame.format.bytes,
frame_rate=audio_frame.sample_rate,
channels=len(audio_frame.layout.channels),
)
sound_chunk += sound
if len(sound_chunk) > 0:
sound_chunk = sound_chunk.set_channels(1).set_frame_rate(
16000
)
buffer = np.array(sound_chunk.get_array_of_samples())
text_output.markdown(f"{ct/16000} seconds")
buffer = np.array(buffer)/32768
ct+=len(buffer)
#text_output.markdown(f"burh{ct}")
for i in range(0, len(buffer), 1000):
for probs in streaming_decoder.process_audio(buffer[i: i+1000]):
new_rows = np.zeros((1, 1))
new_rows[0,0] = probs[1].detach().numpy()
chart.add_rows(new_rows)
frame_number += 1
else:
status_indicator.write("AudioReciver is not set. Abort.")
break
def stream_upload():
st.subheader("Streaming an upload")
# Create a second instance of our component whose `name` arg will vary
# based on a text_input widget.
#
# We use the special "key" argument to assign a fixed identity to this
# component instance. By default, when a component's arguments change,
# it is considered a new instance and will be re-mounted on the frontend
# and lose its current state. In this case, we want to vary the component's
# "name" argument without having it get recreated.
# name_input = st.text_input("Enter a name", value="Streamlit")
uploaded_file = st.file_uploader("Choose a file")
if uploaded_file is not None:
sound = pydub.AudioSegment.from_wav(uploaded_file)
sound = sound.set_channels(1).set_frame_rate(16000)
audio = np.array(sound.get_array_of_samples())/32768
last_rows = np.zeros((1,1))
chart = st.line_chart(last_rows)
text_output = st.empty()
streaming_decoder = StreamingDecoder(model)
frame_number = 0
#p = multiprocessing.Process(target=playsound.playsound, args=(file_name,))
#play_obj = wave_obj.play()
start_0 = timeit.default_timer()
was_clicked = my_component("test", key="foo")
if was_clicked:
for i in range(0, len(audio), 1000):
# while (num_clicks%2 == 0):
# time.sleep(0.1)
start = timeit.default_timer()
for probs in streaming_decoder.process_audio(audio[i: i+1000]):
new_rows = np.zeros((1, 1))
new_rows[0,0] = probs[1].detach().numpy()
chart.add_rows(new_rows)
frame_number += 1
end = timeit.default_timer()
# text_output.markdown(f"{end-start_0} seconds")
time.sleep(max(0,1/16-end+start))
# st.button("Re-run")
def main():
option = st.selectbox(
'Which audio source would you like to use?',
('sample wav (osoon)','microphone', 'upload'), 0)
if option == 'sample wav (osoon)':
#file_name = "3321821.wav"
stream_sample()
elif option == 'microphone':
stream_mic()
elif option == 'upload':
stream_upload()
if __name__ == "__main__":
main()
```
#### File: my_component/online_scd/streaming.py
```python
import torch
import functools
import numpy as np
import librosa
import online_scd.data as data
class InputFrameGenerator(object):
def __init__(self, blocksize, stepsize):
self.blocksize = blocksize
self.stepsize = stepsize
self.buffer = None
def frames(self, frames):
if self.buffer is not None:
stack = np.concatenate([self.buffer, frames])
else:
stack = frames.copy()
stack_length = len(stack)
nb_frames = (
stack_length - self.blocksize + self.stepsize) // self.stepsize
nb_frames = max(nb_frames, 0)
frames_length = nb_frames * self.stepsize + \
self.blocksize - self.stepsize
last_block_size = stack_length - frames_length
self.buffer = stack[int(nb_frames * self.stepsize):]
for index in range(0, int(nb_frames * self.stepsize), int(self.stepsize)):
yield stack[index:index + self.blocksize]
class StreamingSlidingWindowCmn:
def __init__(self, num_feats, cmn_window=600):
self.cmn_window = cmn_window
self.rolling_position = 0
self.rolling_buffer = np.zeros((num_feats, cmn_window))
self.buffer_length = 0
def process(self, frame):
self.rolling_buffer[:, self.rolling_position] = frame
self.rolling_position = (self.rolling_position + 1) % self.cmn_window
self.buffer_length = min(self.buffer_length + 1, self.cmn_window)
return frame - self.rolling_buffer[:, 0:self.buffer_length].mean(1)
class AudioStream2MelSpectrogram:
def __init__(self, sample_rate=16000, num_fbanks=40, cmn_window=600):
self.sample_rate = sample_rate
self.num_fbanks = num_fbanks
self.input_frame_generator = InputFrameGenerator(400, 160)
self.cmn = StreamingSlidingWindowCmn(num_fbanks, cmn_window)
def process_audio(self, audio):
for frames in self.input_frame_generator.frames(audio):
single_feat = librosa.feature.melspectrogram(frames, sr=self.sample_rate,
center=False,
n_fft=int(2.5*self.sample_rate/100.0), hop_length=self.sample_rate//100,
fmin=40, fmax=self.sample_rate//2-400, n_mels=self.num_fbanks)
single_feat = single_feat[:, 0]
single_feat = np.log(np.clip(single_feat, data.EPSILON.numpy(), None))
single_feat = self.cmn.process(single_feat)
yield single_feat
class StreamingDecoder:
def __init__(self, model):
self.model = model
self.model.eval()
self.audio2mel = AudioStream2MelSpectrogram(16000, model.hparams.num_fbanks)
self.mels_to_conv_input = InputFrameGenerator(model.encoder_fov, model.hparams.detection_period)
self.hidden_state = None
self.frame_counter = 0
self.discard_counter = 0
def process_audio(self, audio):
for feature in self.audio2mel.process_audio(audio):
for x in self.mels_to_conv_input.frames(feature.reshape(1, self.model.hparams.num_fbanks)):
x = torch.from_numpy(x).permute(1, 0).unsqueeze(0).float()
x = self.model.encode_windowed_features(x)
y, self.hidden_state = self.model.decode_single_timestep(x, self.hidden_state)
probs = y.log_softmax(dim=-1).exp()
if self.discard_counter < (self.model.hparams.label_delay - self.model.encoder_fov//2) // self.model.hparams.detection_period:
# we discard output for the 1st second (or whatever the label delay is)
self.discard_counter += 1
else:
yield probs.squeeze()
def find_speaker_change_times(self, audio, threshold=0.5):
for y in self.process_audio(audio):
if y[1] > threshold:
change_time = self.frame_counter / 100
if change_time > 0:
yield change_time
self.frame_counter += 10
```
#### File: my_component/online_scd/test_streaming.py
```python
import unittest
import asyncio
import numpy as np
import torch
from streaming import InputFrameGenerator, AudioStream2MelSpectrogram, StreamingSlidingWindowCmn, StreamingDecoder
import online_scd.trs as trs
import online_scd.data as data
from online_scd.model import SCDModel
from online_scd.trs import Transcritpion
class TestInputFrameGenerator(unittest.TestCase):
def test_frames(self):
raw_input1 = np.arange(500)
raw_input2 = np.arange(500, 1000)
ifg = InputFrameGenerator(400, 160)
result = list(ifg.frames(raw_input1)) + list(ifg.frames(raw_input2))
self.assertEqual(result[0].tolist(), raw_input1[0:400].tolist())
self.assertEqual(result[1].tolist(), raw_input1[160:500].tolist() + raw_input2[0:60].tolist())
def test_2d(self):
raw_input1 = np.random.random((100, 2))
raw_input2 = np.random.random((100, 2))
ifg = InputFrameGenerator(30, 10)
result = list(ifg.frames(raw_input1)) + list(ifg.frames(raw_input2))
np.testing.assert_almost_equal(result[0], raw_input1[0:30, :].tolist())
class TestAudioStream2MelSpectrogram(unittest.TestCase):
def test_features(self):
audio = trs.load_wav_file("test/sample_dataset/3321821.wav", 16000)[0: 16000]
features = data.extract_features(audio, 16000, 40)
a2s = AudioStream2MelSpectrogram(16000, 40)
streamed_features = []
for i in range(0, len(audio), 1000):
for feature in a2s.process_audio(audio[i: i+1000]):
streamed_features.append(feature)
self.assertEqual(len(features), len(streamed_features))
#breakpoint()
np.testing.assert_almost_equal(features[-1].tolist(), streamed_features[-1].tolist(), decimal=3)
class TestStreamingSlidingWindowCmn(unittest.TestCase):
def test_sliding_window_cmn(self):
cmn = StreamingSlidingWindowCmn(num_feats=2, cmn_window=5)
input_data = np.random.random((2, 100))
output_data = np.zeros((2, 100))
for i in range(input_data.shape[1]):
output_data[:, i] = cmn.process(input_data[:, i])
np.testing.assert_almost_equal(output_data[:, 9], input_data[:, 9] - input_data[:, 5:10].mean(1))
class TestModel(unittest.TestCase):
def test_decoding(self):
model = SCDModel.load_from_checkpoint("test/sample_model/checkpoints/epoch=102.ckpt")
transcription = Transcritpion("test/sample_dataset/71_ID117_344945.wav", "test/sample_dataset/71_ID117_344945.trs")
speech_sections = transcription.get_speech_sections()
audio = speech_sections[0].wav_tensor[0:16000*100]
mel_spec = data.extract_features(audio, 16000, model.hparams.num_fbanks).unsqueeze(0)
mel_spec_length = torch.tensor(mel_spec.shape[-2]).unsqueeze(0)
output_enc_padded = model._encode_features(mel_spec, mel_spec_length)
logits = model._decode(output_enc_padded, (mel_spec_length - model.encoder_fov) // model.hparams.detection_period)
nonstreaming_breaks = logits.log_softmax(dim=-1).squeeze().argmax(1).nonzero(as_tuple=True)[0]
streaming_model = StreamingDecoder(model)
streaming_outputs = []
for i in range(0, len(audio), 1000):
for output in streaming_model.process_audio(audio[i: i+1000]):
streaming_outputs.append(output)
streaming_breaks = torch.stack(streaming_outputs).squeeze().argmax(1).nonzero(as_tuple=True)[0] \
+ (model.hparams.label_delay - model.encoder_fov//2) // model.hparams.detection_period
# Assert that the overlap between streaming and non-streaming is more than 90%
print("Breaks from non-streaming decoding:", nonstreaming_breaks)
print("Breaks from streaming decoding:", streaming_breaks)
self.assertTrue(len(np.intersect1d(nonstreaming_breaks.numpy(), streaming_breaks.numpy())) / len(streaming_breaks) > 0.9)
self.assertTrue(len(np.intersect1d(nonstreaming_breaks.numpy(), streaming_breaks.numpy())) / len(nonstreaming_breaks) > 0.9)
def test_streaming_with_times(self):
model = SCDModel.load_from_checkpoint("test/sample_model/checkpoints/epoch=102.ckpt")
transcription = Transcritpion("test/sample_dataset/71_ID117_344945.wav", "test/sample_dataset/71_ID117_344945.trs")
speech_sections = transcription.get_speech_sections()
audio = speech_sections[0].wav_tensor
print("True speaker change points: ", speech_sections[0].relative_speaker_change_points)
streaming_decoder = StreamingDecoder(model)
streaming_outputs = []
for i in range(0, len(audio), 1000):
for time in streaming_decoder.find_speaker_change_times(audio[i: i+1000]):
print("Found speaker change point: ", time)
if __name__ == '__main__':
unittest.main()
```
#### File: my_component/online_scd/utils.py
```python
from collections import OrderedDict, defaultdict
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
import numpy as np
from scipy.io import wavfile
def tp_fp_fn(preds, targets, tolerance=50):
"""
Returns a tuple of true positives, false positives and false negatives given
predictions and target values.
"""
preds_idx = np.where(preds)[0]
targets_idx = np.where(targets)[0]
n = len(targets_idx)
m = len(preds_idx)
if (m==0):
return 0.0, 0.0, n
elif (n==0):
return 0.0, m, 0.0
delta = np.zeros((n, m))
for i in range(n):
for j in range(m):
delta[i, j] = abs(targets_idx[i]-preds_idx[j])
delta[np.where(delta > tolerance)] = np.inf
# h always contains the minimum value in delta matrix
# h == np.inf means that no boundary can be matched
h = np.amin(delta)
n_matches = 0.
# while there are still boundaries to match
while h < np.inf:
# increment match count
n_matches += 1
# find boundaries to match
k = np.argmin(delta)
i = k // m
j = k % m
# make sure they cannot be matched again
delta[i, :] = np.inf
delta[:, j] = np.inf
# update minimum value in delta
h = np.amin(delta)
return n_matches, m-n_matches, n-n_matches
def load_wav_file(sound_file_path, sample_rate):
"""Load the wav file at the given file path and return a float32 numpy array."""
with open(sound_file_path, 'rb') as f:
wav_sample_rate, sound_np = wavfile.read(f)
# FIXME: resample is necessary
assert(sample_rate == wav_sample_rate)
if sound_np.dtype != np.float32:
assert sound_np.dtype == np.int16
sound_np = np.divide(
sound_np, 32768, dtype=np.float32
) # ends up roughly between -1 and 1
assert(len(sound_np.shape) == 1)
return sound_np
```
#### File: template/my_component/streamlit.py
```python
import streamlit as st
import time
import numpy as np
import IPython.display as ipd
#ipd.Audio(audio, rate=16000)
from online_scd.model import SCDModel
from online_scd.streaming import StreamingDecoder
import timeit
from online_scd.utils import load_wav_file
import multiprocessing
import playsound
import queue
import time
from typing import List
import numpy as np
import pydub
from pydub.playback import play
import streamlit as st
from streamlit_webrtc import (
ClientSettings,
WebRtcMode,
webrtc_streamer,
)
def stream_mic():
webrtc_ctx = webrtc_streamer(
key="speech-to-text",
mode=WebRtcMode.SENDONLY,
audio_receiver_size=1024,
client_settings=ClientSettings(
rtc_configuration={
"iceServers": [{"urls": ["stun:stun.l.google.com:19302"]}]
},
media_stream_constraints={"video": False, "audio": True},
),
)
status_indicator = st.empty()
if not webrtc_ctx.state.playing:
return
status_indicator.write("Loading...")
text_output = st.empty()
stream = None
last_rows = np.zeros((1,1))
chart = st.line_chart(last_rows)
model = SCDModel.load_from_checkpoint("test/sample_model/checkpoints/epoch=102.ckpt")
streaming_decoder = StreamingDecoder(model)
frame_number = 0
status_indicator.write("Model loaded.")
ct=0
while True:
if webrtc_ctx.audio_receiver:
sound_chunk = pydub.AudioSegment.empty()
try:
audio_frames = webrtc_ctx.audio_receiver.get_frames(timeout=1)
except queue.Empty:
time.sleep(0.1)
status_indicator.write("No frame arrived.")
continue
status_indicator.write("Running. Say something!")
for audio_frame in audio_frames:
sound = pydub.AudioSegment(
data=audio_frame.to_ndarray().tobytes(),
sample_width=audio_frame.format.bytes,
frame_rate=audio_frame.sample_rate,
channels=len(audio_frame.layout.channels),
)
sound_chunk += sound
if len(sound_chunk) > 0:
sound_chunk = sound_chunk.set_channels(1).set_frame_rate(
16000
)
buffer = np.array(sound_chunk.get_array_of_samples())
text_output.markdown(f"{ct/16000} seconds")
buffer = np.array(buffer)/32768
ct+=len(buffer)
#text_output.markdown(f"burh{ct}")
for i in range(0, len(buffer), 1000):
for probs in streaming_decoder.process_audio(buffer[i: i+1000]):
new_rows = np.zeros((1, 1))
new_rows[0,0] = probs[1].detach().numpy()
chart.add_rows(new_rows)
frame_number += 1
else:
status_indicator.write("AudioReciver is not set. Abort.")
break
# rerun.
st.button("Re-run")
def stream(file_name):
sound = pydub.AudioSegment.from_wav(file_name)
sound = sound.set_channels(1).set_frame_rate(16000)
audio = np.array(sound.get_array_of_samples())/32768
last_rows = np.zeros((1,1))
chart = st.line_chart(last_rows)
text_output = st.empty()
model = SCDModel.load_from_checkpoint("test/sample_model/checkpoints/epoch=102.ckpt")
streaming_decoder = StreamingDecoder(model)
frame_number = 0
#p = multiprocessing.Process(target=playsound.playsound, args=(file_name,))
import simpleaudio as sa
wave_obj = sa.WaveObject.from_wave_file(file_name)
#play_obj = wave_obj.play()
p = multiprocessing.Process(target=lambda x:x.play(), args=(wave_obj,))
p.start()
start_0 = timeit.default_timer()
for i in range(0, len(audio), 1000):
start = timeit.default_timer()
for probs in streaming_decoder.process_audio(audio[i: i+1000]):
new_rows = np.zeros((1, 1))
new_rows[0,0] = probs[1].detach().numpy()
chart.add_rows(new_rows)
frame_number += 1
end = timeit.default_timer()
text_output.markdown(f"{end-start_0} seconds")
time.sleep(max(0,1/16-end+start))
st.button("Re-run")
def main():
option = st.selectbox(
'Which audio source would you like to use?',
('microphone', 'sample wav (osoon)'), 0)
if option == 'sample wav (osoon)':
file_name = "3321821.wav"
stream(file_name)
elif option == 'microphone':
stream_mic()
if __name__ == "__main__":
main()
``` |
{
"source": "joonaslomps/hiragana-ocr",
"score": 2
} |
#### File: hiragana-ocr/code/test.py
```python
import argparse
import datetime
import imutils
import time
import cv2
import numpy as np
from random import shuffle
from matplotlib import pyplot as plt
from os import listdir
from os.path import isfile, join
letters = ["a","i","u","e","o","ka","ki","ku","ke","ko","sa","shi","su","se","so","fu","ha","hi","ho","he","ma","mi","mu","me","mo","n","na","ni","no","nu","ne","ra","ri","ru","re","ro","ta","chi","to","te","tsu","wa","wo","ya","yo","yu"]
lettersN = range(46)
filePrefixes = ["a","i","u","e","o","ka","ki","ku","ke","ko","sa","shi","su","se","so","fu","ha","hi","ho","he","ma","mi","mu","me","mo","n_","na","ni","no","nu","ne","ra","ri","ru","re","ro","ta","chi","to","te","tsu","wa","wo","ya","yo","yu", "da", "ji_", "du", "de", "do","zo","ji(shi)","zu","ze","zo","ba","bi","bu","be","bo","pa","pi","pu","pe","po", "ga","gi","gu","ge","go"]
SZ=50
bin_n = 16 # Number of bins
affine_flags = cv2.WARP_INVERSE_MAP|cv2.INTER_LINEAR
SVM_GAMMA = 5.383
SVM_C = 2.67
def deskew(img):
m = cv2.moments(img)
if abs(m['mu02']) < 1e-2:
return img.copy()
skew = m['mu11']/m['mu02']
M = np.float32([[1, skew, -0.5*SZ*skew], [0, 1, 0]])
img = cv2.warpAffine(img,M,(SZ, SZ),flags=affine_flags)
return img
def hog(img):
gx = cv2.Sobel(img, cv2.CV_32F, 1, 0)
gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
mag, ang = cv2.cartToPolar(gx, gy)
# quantizing binvalues in (0...16)
bins = np.int32(bin_n*ang/(2*np.pi))
x = 25
# Divide to 4 sub-squares
bin_cells = bins[:x,:x], bins[x:,:x], bins[:x,x:], bins[x:,x:]
mag_cells = mag[:x,:x], mag[x:,:x], mag[:x,x:], mag[x:,x:]
hists = [np.bincount(b.ravel(), m.ravel(), bin_n) for b, m in zip(bin_cells, mag_cells)]
hist = np.hstack(hists)
return hist
def printNthInList(list, n):
text = ""
for i in range(2500):
if i % 50 > 0:
if list[n][i] != 0:
text += "+"
else:
text += " "
else:
text += "\n"
print text
# CREATE NxNpx data from picture
def make_unified_data(N):
for letter in letters:
for i in range(10):
image = cv2.imread("../data/"+letter+"/"+str(i)+".png")
image = cv2.resize(image, (N,N))
cv2.imwrite("../data/"+letter+"/"+str(i)+"_"+str(N)+"x"+str(N)+".png", image)
def make_usable_data(x, dataN, offset):
onePicPx = len(x[0]) * len(x[0][0])
# Make each pictures to 1-dim array (train data) 8 picture per letter
offset = offset
data = []
for i in range(len(x)/10):
for i in range(dataN):
data.append(x[offset+i])
offset += 10
data = np.array(data)
data = data.reshape(-1,onePicPx).astype(np.float32)
return data
# Load in the letters
def generate_image_data():
cells = []
for letter in letters:
for i in range(10):
if letter == "sa" and i == 6:
image = cv2.imread("../data/"+letter+"/"+str(i)+"_50x50.png",0)
thresh = cv2.threshold(image,100,255,cv2.THRESH_BINARY_INV)[1]
thresh = cv2.dilate(thresh, np.ones((3,3),np.uint8), iterations=2)
cells.append(thresh)
else:
image = cv2.imread("../data/"+letter+"/"+str(i)+"_50x50.png",0)
thresh = cv2.threshold(image,100,255,cv2.THRESH_BINARY_INV)[1]
thresh = cv2.dilate(thresh, np.ones((3,3),np.uint8), iterations=2)
cells.append(thresh)
# Make images to np array
x = np.array(cells)
deskewed = [map(deskew,row) for row in x]
hogdata = [map(hog,row) for row in deskewed]
return x,hogdata
######################################################
# SVM
######################################################
def test_SVM_accuracy(x, trainN, testN, name):
## TRAINING ###
# Make each pictures to 1-dim array (train data) trainN picture per letter
train = make_usable_data(x, trainN, 0)
# Generate integer values for letters
train_labels = np.repeat(lettersN, trainN)[:,np.newaxis]
# Make svm
svm = cv2.ml.SVM_create()
svm.setGamma(SVM_GAMMA)
svm.setC(SVM_C)
svm.setKernel(cv2.ml.SVM_LINEAR)
svm.setType(cv2.ml.SVM_C_SVC)
ok = svm.train(train,cv2.ml.ROW_SAMPLE,train_labels)
### TESTING ###
# Make each pictures to 1-dim array (test data) testN pictures per letter
test = make_usable_data(x, testN, trainN)
# Generate integer values for letters
test_labels = np.repeat(lettersN, testN)[:,np.newaxis]
result = svm.predict(test)
### CHECK ACCURACY ###
mask = result[1]==test_labels
correct = np.count_nonzero(mask)
accuracy = correct*100.0/result[1].size
print name + str(accuracy)
######################################################
# SVM
######################################################
######################################################
# k-Nearest Neighbour - with picture
# x = Array of characters in format of [[px,px,px],[px,px,px],[px,px,px]] - 3x3px image.
#
######################################################
def test_kNN_accuracy(x, trainN, testN, name):
## TRAINING ###
# Make each pictures to 1-dim array (train data) trainN picture per letter
train = make_usable_data(x, trainN, 0)
print len(train)
print len(train[0])
# Generate integer values for letters
train_labels = np.repeat(lettersN, trainN)[:,np.newaxis]
# Do the real k-nearest neighbour search
knn = cv2.ml.KNearest_create()
knn.train(train, cv2.ml.ROW_SAMPLE, train_labels)
### TESTING ###
# Make each pictures to 1-dim array (test data) testN pictures per letter
test = make_usable_data(x, testN, trainN)
ret,result,neighbours,dist = knn.findNearest(test,k=4)
test_labels = np.repeat(lettersN, testN)[:,np.newaxis]
### CHECK ACCURACY ###
matches = result==test_labels
correct = np.count_nonzero(matches)
accuracy = correct*100.0/result.size
print name + str(accuracy)
######################################################
# k-Nearest Neighbour - with picture
######################################################
# Merges the pari of rectangles into one
def pair_pairs(pairs, rects):
for pair in pairs:
upper = None
lower = None
if pair[0][1] > pair[1][1]:
upper = pair[1]
lower = pair[0]
else:
upper = pair[0]
lower = pair[1]
x = min(upper[0], lower[0])
y = upper[1]
w = abs(lower[0] - upper[0]) + max(upper[2], lower[2])
h = lower[1] - upper[1] + lower[3]
rects.append((x,y,w,h))
return rects
def find_pairs(rects, offset):
pairs = []
changed = []
# Fix contours for side by side
for i in range(len(rects)):
for j in range(len(rects)):
if j <= i:
continue
c_1 = rects[i]
c_2 = rects[j]
if abs(c_1[0]-c_2[0]) <= offset:
pairs.append([c_1,c_2])
changed.append(c_1)
changed.append(c_2)
return pairs, changed
def rec_from_image(fileLocation, rawdata, hogdata):
image = cv2.imread(fileLocation,0)
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
white = np.copy(image)
for i in range(len(white)):
for j in range(len(white[0])):
white[i][j] = 255
blur = cv2.GaussianBlur(image, (3, 3), 0)
frameDelta = cv2.absdiff(white, blur)
thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
thresh = cv2.dilate(thresh, None, iterations=2)
_, cnts, _ = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
rects = []
for c in cnts:
rects.append(cv2.boundingRect(c))
# Fix contours for up and down
pairs, changed = find_pairs(rects, 10)
rects = pair_pairs(pairs, rects)
for c in changed:
if c in rects:
rects.remove(c)
pairs, changed = find_pairs(rects, 50)
rects = pair_pairs(pairs, rects)
for c in changed:
if c in rects:
rects.remove(c)
knnRawImage = np.copy(image)
knnHOGImage = np.copy(image)
SVMRawImage = np.copy(image)
SVMHOGImage = np.copy(image)
train_labels = np.repeat(lettersN, 10)[:,np.newaxis]
trainRaw = make_usable_data(rawdata, 10, 0)
trainHOG = make_usable_data(hogdata, 10, 0)
# ### Train kNN-raw
knnRaw = cv2.ml.KNearest_create()
knnRaw.train(trainRaw, cv2.ml.ROW_SAMPLE, train_labels)
print "kNN-Raw trained"
# ### Train kNN-HOG
knnHOG = cv2.ml.KNearest_create()
knnHOG.train(trainHOG, cv2.ml.ROW_SAMPLE, train_labels)
print "kNN-HOG trained"
# ### Train SVM-raw
svmRAW = cv2.ml.SVM_create()
svmRAW.setGamma(SVM_GAMMA)
svmRAW.setC(SVM_C)
svmRAW.setKernel(cv2.ml.SVM_LINEAR)
svmRAW.setType(cv2.ml.SVM_C_SVC)
ok = svmRAW.train(trainRaw,cv2.ml.ROW_SAMPLE,train_labels)
print "SVM-HOG trained"
# ### Train SVM-raw
svmHOG = cv2.ml.SVM_create()
svmHOG.setGamma(SVM_GAMMA)
svmHOG.setC(SVM_C)
svmHOG.setKernel(cv2.ml.SVM_LINEAR)
svmHOG.setType(cv2.ml.SVM_C_SVC)
ok = svmHOG.train(trainHOG,cv2.ml.ROW_SAMPLE,train_labels)
print "SVM-HOG trained"
for rect in rects:
# compute the bounding box for the contour, draw it on the frame,
# and update the text
(x, y, w, h) = rect
rectImage = image[y:h+y,x:w+x]
rectImage = cv2.resize(rectImage, (50,50))
thresh = cv2.threshold(rectImage,100,255,cv2.THRESH_BINARY_INV)[1]
thresh = cv2.dilate(thresh, np.ones((3,3),np.uint8), iterations=2)
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
test_raw = np.array([thresh])
deskewed = [map(deskew,row) for row in test_raw]
test_hogdata = [map(hog,row) for row in deskewed]
test_hogdata = np.array(test_hogdata)
test_raw = test_raw.reshape(-1,2500).astype(np.float32)
test_hogdata = test_hogdata.reshape(-1,3200).astype(np.float32)
ret,result,neighbours,dist = knnRaw.findNearest(test_raw, k=4)
cv2.putText(knnRawImage,letters[int(result[0][0])],(x+w/2,y+h+20), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,255,0),2)
ret,result,neighbours,dist = knnHOG.findNearest(test_hogdata, k=4)
cv2.putText(knnHOGImage,letters[int(result[0][0])],(x+w/2,y+h+20), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,255,0),2)
result = svmRAW.predict(test_raw)
cv2.putText(SVMRawImage,letters[int(result[1][0][0])],(x+w/2,y+h+20), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,255,0),2)
result = svmHOG.predict(test_hogdata)
cv2.putText(SVMHOGImage,letters[int(result[1][0][0])],(x+w/2,y+h+20), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,255,0),2)
cv2.imshow("image", image)
cv2.imshow("knnRawImage", knnRawImage)
cv2.imshow("knnHOGImage", knnHOGImage)
cv2.imshow("SVMRawImage", SVMRawImage)
cv2.imshow("SVMHOGImage", SVMHOGImage)
# TEST
def test_kNN_HOG_accuracy_full(test_amount):
knnRaw = cv2.ml.KNearest_create()
folder = "../data/templates/singles_50x50/"
files = [f for f in listdir(folder) if isfile(join(folder, f))]
fileCounts = []
train = []
train_labels = []
test = []
test_labels = []
i = 1
j = 0
for name in filePrefixes:
nameFiles = [k for k in files if k.startswith(name)]
shuffle(nameFiles)
fileCounts.append(len(nameFiles))
for fileName in nameFiles:
image = cv2.imread(folder + fileName,0)
thresh = cv2.threshold(image,220,255,cv2.THRESH_BINARY_INV)[1]
thresh = cv2.dilate(thresh, np.ones((3,3),np.uint8), iterations=2)
thresh = np.array([thresh])
deskewed = [map(deskew,row) for row in thresh]
hogData = [map(hog,row) for row in deskewed]
if(len(nameFiles) - test_amount < j):
test.append(hogData)
test_labels.append(filePrefixes.index(name))
else:
train.append(hogData)
train_labels.append(filePrefixes.index(name))
j = j+1
j=0
# print i/71.0 * 100.0
i = i+1
# print fileCounts
# # Make images to np array
x = np.array(train)
x = x.reshape(-1,3200).astype(np.float32)
knnRaw.train(x, cv2.ml.ROW_SAMPLE, np.array(train_labels))
y = np.array(test)
y = y.reshape(-1,3200).astype(np.float32)
ret,result,neighbours,dist = knnRaw.findNearest(y,k=4)
correct = 0
for i in range(len(neighbours)):
# print str(neighbours[i]) + " - " + str(test_labels[i]) + " - " + str(result[i])
if test_labels[i] == result[i][0]:
correct = correct + 1
accuracy = correct*100.0/result.size
print "kNN - HOG: " + str(accuracy) + "%"
def test_kNN_RAW_accuracy_full(test_amount):
knnRaw = cv2.ml.KNearest_create()
folder = "../data/templates/singles_50x50/"
files = [f for f in listdir(folder) if isfile(join(folder, f))]
fileCounts = []
train = []
train_labels = []
test = []
test_labels = []
i = 1
j = 0
for name in filePrefixes:
nameFiles = [k for k in files if k.startswith(name)]
shuffle(nameFiles)
fileCounts.append(len(nameFiles))
for fileName in nameFiles:
image = cv2.imread(folder + fileName,0)
thresh = cv2.threshold(image,220,255,cv2.THRESH_BINARY_INV)[1]
thresh = cv2.dilate(thresh, np.ones((3,3),np.uint8), iterations=2)
if(len(nameFiles) - test_amount <= j):
test.append(thresh)
test_labels.append(filePrefixes.index(name))
else:
train.append(thresh)
train_labels.append(filePrefixes.index(name))
j = j+1
j=0
# print i/71.0 * 100.0
i = i+1
# print fileCounts
# # Make images to np array
x = np.array(train)
x = x.reshape(-1,2500).astype(np.float32)
knnRaw.train(x, cv2.ml.ROW_SAMPLE, np.array(train_labels))
y = np.array(test)
y = y.reshape(-1,2500).astype(np.float32)
ret,result,neighbours,dist = knnRaw.findNearest(y,k=4)
correct = 0
for i in range(len(neighbours)):
# print str(neighbours[i]) + " - " + str(test_labels[i]) + " - " + str(result[i])
if test_labels[i] == result[i][0]:
correct = correct + 1
accuracy = correct*100.0/result.size
print "kNN - RAW: " + str(accuracy) + "%"
def test_SVM_RAW_accuracy_full(test_amount):
svm = cv2.ml.SVM_create()
svm.setGamma(SVM_GAMMA)
svm.setC(SVM_C)
svm.setKernel(cv2.ml.SVM_LINEAR)
svm.setType(cv2.ml.SVM_C_SVC)
folder = "../data/templates/singles_50x50/"
files = [f for f in listdir(folder) if isfile(join(folder, f))]
fileCounts = []
train = []
train_labels = []
test = []
test_labels = []
i = 1
j = 0
for name in filePrefixes:
nameFiles = [k for k in files if k.startswith(name)]
shuffle(nameFiles)
fileCounts.append(len(nameFiles))
for fileName in nameFiles:
image = cv2.imread(folder + fileName,0)
thresh = cv2.threshold(image,220,255,cv2.THRESH_BINARY_INV)[1]
thresh = cv2.dilate(thresh, np.ones((3,3),np.uint8), iterations=2)
if(len(nameFiles) - test_amount <= j):
test.append(thresh)
test_labels.append(filePrefixes.index(name))
else:
train.append(thresh)
train_labels.append(filePrefixes.index(name))
j = j+1
j=0
# print i/71.0 * 100.0
i = i+1
# print fileCounts
# # Make images to np array
x = np.array(train)
x = x.reshape(-1,2500).astype(np.float32)
ok = svm.train(x,cv2.ml.ROW_SAMPLE,np.array(train_labels))
y = np.array(test)
y = y.reshape(-1,2500).astype(np.float32)
result = svm.predict(y)
correct = 0
for i in range(len(result[1])):
# print str(test_labels[i]) + " - " + str(result[1][i][0])
if test_labels[i] == result[1][i][0]:
correct = correct + 1
accuracy = correct*100.0/result[1].size
print "SVM - RAW: " + str(accuracy) + "%"
def test_SVM_HOG_accuracy_full(test_amount):
svm = cv2.ml.SVM_create()
svm.setGamma(SVM_GAMMA)
svm.setC(SVM_C)
svm.setKernel(cv2.ml.SVM_LINEAR)
svm.setType(cv2.ml.SVM_C_SVC)
folder = "../data/templates/singles_50x50/"
files = [f for f in listdir(folder) if isfile(join(folder, f))]
fileCounts = []
train = []
train_labels = []
test = []
test_labels = []
i = 1
j = 0
for name in filePrefixes:
nameFiles = [k for k in files if k.startswith(name)]
shuffle(nameFiles)
fileCounts.append(len(nameFiles))
for fileName in nameFiles:
image = cv2.imread(folder + fileName,0)
thresh = cv2.threshold(image,220,255,cv2.THRESH_BINARY_INV)[1]
thresh = cv2.dilate(thresh, np.ones((3,3),np.uint8), iterations=2)
thresh = np.array([thresh])
deskewed = [map(deskew,row) for row in thresh]
hogData = [map(hog,row) for row in deskewed]
if(len(nameFiles) - test_amount < j):
test.append(hogData)
test_labels.append(filePrefixes.index(name))
else:
train.append(hogData)
train_labels.append(filePrefixes.index(name))
j = j+1
j=0
# print i/71.0 * 100.0
i = i+1
# print fileCounts
# # Make images to np array
x = np.array(train)
x = x.reshape(-1,3200).astype(np.float32)
ok = svm.train(x,cv2.ml.ROW_SAMPLE,np.array(train_labels))
y = np.array(test)
y = y.reshape(-1,3200).astype(np.float32)
result = svm.predict(y)
correct = 0
for i in range(len(result[1])):
# print str(test_labels[i]) + " - " + str(result[1][i][0])
if test_labels[i] == result[1][i][0]:
correct = correct + 1
accuracy = correct*100.0/result[1].size
print "SVM - HOG: " + str(accuracy) + "%"
####################################################################
# From https://gist.github.com/moshekaplan/5106221#file-test_surf-py
def filter_matches(kp1, kp2, matches, ratio = 0.75):
mkp1, mkp2 = [], []
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
m = m[0]
mkp1.append( kp1[m.queryIdx] )
mkp2.append( kp2[m.trainIdx] )
kp_pairs = zip(mkp1, mkp2)
return kp_pairs
def explore_match(win, img1, img2, kp_pairs, status = None, H = None):
h1, w1 = img1.shape[:2]
h2, w2 = img2.shape[:2]
vis = np.zeros((max(h1, h2), w1+w2), np.uint8)
vis[:h1, :w1] = img1
vis[:h2, w1:w1+w2] = img2
vis = cv2.cvtColor(vis, cv2.COLOR_GRAY2BGR)
if H is not None:
corners = np.float32([[0, 0], [w1, 0], [w1, h1], [0, h1]])
corners = np.int32( cv2.perspectiveTransform(corners.reshape(1, -1, 2), H).reshape(-1, 2) + (w1, 0) )
cv2.polylines(vis, [corners], True, (255, 255, 255))
if status is None:
status = np.ones(len(kp_pairs), np.bool_)
p1 = np.int32([kpp[0].pt for kpp in kp_pairs])
p2 = np.int32([kpp[1].pt for kpp in kp_pairs]) + (w1, 0)
green = (0, 255, 0)
red = (0, 0, 255)
white = (255, 255, 255)
kp_color = (51, 103, 236)
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
if inlier:
col = green
cv2.circle(vis, (x1, y1), 2, col, -1)
cv2.circle(vis, (x2, y2), 2, col, -1)
else:
col = red
r = 2
thickness = 3
cv2.line(vis, (x1-r, y1-r), (x1+r, y1+r), col, thickness)
cv2.line(vis, (x1-r, y1+r), (x1+r, y1-r), col, thickness)
cv2.line(vis, (x2-r, y2-r), (x2+r, y2+r), col, thickness)
cv2.line(vis, (x2-r, y2+r), (x2+r, y2-r), col, thickness)
vis0 = vis.copy()
for (x1, y1), (x2, y2), inlier in zip(p1, p2, status):
if inlier:
cv2.line(vis, (x1, y1), (x2, y2), green)
cv2.imshow(win, vis)
def draw_matches(window_name, kp_pairs, img1, img2):
"""Draws the matches for """
mkp1, mkp2 = zip(*kp_pairs)
p1 = np.float32([kp.pt for kp in mkp1])
p2 = np.float32([kp.pt for kp in mkp2])
if len(kp_pairs) >= 4:
H, status = cv2.findHomography(p1, p2, cv2.RANSAC, 5.0)
#print '%d / %d inliers/matched' % (np.sum(status), len(status))
else:
H, status = None, None
#print '%d matches found, not enough for homography estimation' % len(p1)
if len(p1):
explore_match(window_name, img1, img2, kp_pairs, status, H)
####################################################################
# x, hogdata = generate_image_data()
# test_kNN_accuracy(x,8,2, "kNN: Raw-Data accuracy: ") # kNN with raw pixel data
# test_kNN_accuracy(hogdata,8,2, "kNN: HOG data accuracy: ") # kNN with HOG data
# test_SVM_accuracy(x,8,2, "SVM: Raw-Data data accuracy: ") # SVM with raw pixel data
# test_SVM_accuracy(hogdata,8,2, "SVM: HOG data accuracy: ") # SVM with HOG data
# testFile = "../data/long/nihon.png"
# rec_from_image(testFile, x, hogdata)
# Test with whole dataset.
# test_kNN_HOG_accuracy_full(80)
# test_kNN_RAW_accuracy_full(80)
# test_SVM_RAW_accuracy_full(80)
# test_SVM_HOG_accuracy_full(80)
folder = "../data/templates/singles_50x50/"
surf = cv2.SURF(100)
surf.extended = True
files = ["ba_86.png","go_172.png","po_64.png","hi_157.png","de_28.png","go_111.png","ho_91.png","ya_134.png","to_169.png","ki_166.png"]
matcher = cv2.BFMatcher(cv2.NORM_L2)
# for file in files:
# image = cv2.imread(folder + file, 0)
# kp, des = surf.detectAndCompute(image, None)
# print len(kp)
# img2 = cv2.drawKeypoints(image,kp,None,(255,0,0),4)
image1 = cv2.imread(folder + files[1], 0)
kp1, des1 = surf.detectAndCompute(image1, None)
# image1keypoints = cv2.drawKeypoints(image1,kp1,None,(255,0,0),4)
image2 = cv2.imread(folder + files[3], 0)
kp2, des2 = surf.detectAndCompute(image2, None)
# image2keypoints = cv2.drawKeypoints(image2,kp2,None,(255,0,0),4)
print len(kp1)
print len(kp2)
print len(des1)
print len(des2)
raw_matches = matcher.knnMatch(des1, trainDescriptors = des2, k = 2) #2
kp_pairs = filter_matches(kp1, kp2, raw_matches)
draw_matches("test", kp_pairs, image1, image2)
cv2.waitKey(0)
``` |
{
"source": "joonaspessi/acoustic-scene-2018",
"score": 3
} |
#### File: joonaspessi/acoustic-scene-2018/utils.py
```python
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from time import time
from sklearn.model_selection import GridSearchCV
from sklearn.preprocessing import Normalizer
from scipy.spatial.distance import pdist
from sklearn.model_selection import PredefinedSplit
def training_data():
# X training set
X_train = np.load("data/X_train.npy")
# X test set
X_test = np.load("data/X_test.npy")
# Y training set
y_train_csv = np.array(pd.read_csv("data/y_train.csv"))
# Create an index of class names
# ---------------------------------------------------------------------
# Label encoder for Y scene_label column
y_label_encoder = preprocessing.LabelEncoder()
y_label_encoder.fit(y_train_csv[0:,1])
y_train = y_label_encoder.transform(y_train_csv[0:, 1])
return X_train, y_train, X_test, y_label_encoder
def extract_feature(train, test, method="B", scale=False):
# Extract features
# ---------------------------------------------------------------------
X_train = []
X_test = []
if (method == "A"):
# A:
# 20040-dimensional vector from each sample
X_train = train.reshape(-1, train.shape[1] * train.shape[2])
X_test = test.reshape(-1, train.shape[1] * train.shape[2])
elif(method == "B"):
# B:
# (4500,40) data matrix, each of the 4500 samples is a vector of 40 frequency bins averaged over time
X_train = np.mean(train,axis=2)
X_test = np.mean(test,axis=2)
elif(method == "C"):
# C:
# (4500,501) data matrix, each of the 4500 samples is a vector of 501 time points averaged frequencies
X_train = np.mean(train,axis=1)
X_test = np.mean(test,axis=1)
elif method == "D":
# D:
# (4500,780) data matrix, each of the 4500 samples is a vectorized distance matrix of the cosine/correlation/euclidean/seuclidean...
# distances between time point vectors on each frequency bin
for index in range(np.shape(train)[0]):
observations = train[index,:,:]
dist_train = pdist(observations, 'seuclidean')
X_train.append(dist_train)
for index in range(np.shape(test)[0]):
observations = test[index,:,:]
dist_test = pdist(observations, 'seuclidean')
X_test.append(dist_test)
X_train = np.array(X_train)
X_test = np.array(X_test)
elif method == "plain":
X_train = train
X_test = test
# Scale
if scale:
scaler = preprocessing.MinMaxScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
return X_train, X_test
def write_submission(name, y):
filename = "".join(["submission_", name, ".csv"])
with open(filename, "w") as fp:
fp.write("Id,Scene_label\n")
for i, label in enumerate(y):
fp.write("%d,%s\n" % (i, label))
def report(results, name, n_top=3):
print(name,"Grid scores on development set:")
for i in range(1, n_top + 1):
candidates = np.flatnonzero(results['rank_test_score'] == i)
for candidate in candidates:
print("Model with rank: {0}".format(i))
print("Mean validation score: {0:.3f} (std: {1:.3f})".format(
results['mean_test_score'][candidate],
results['std_test_score'][candidate]))
print(name, "Parameters: {0}".format(results['params'][candidate]))
print("")
def split_data(X_train, y_train):
y_cv_split = np.array(pd.read_csv("data/crossvalidation_train.csv"))
train_indices = y_cv_split[y_cv_split[:,2] == "train"][:,0].astype(int)
test_indices = y_cv_split[y_cv_split[:,2] == "test"][:,0].astype(int)
X_test = X_train[train_indices, :]
X_cv = X_train[test_indices, :]
y_test = y_train[train_indices]
y_cv = y_train[test_indices]
# Array which contains "test" values marked as True and "train" as false
mask = y_cv_split[:, 2] == "test"
mask = mask.astype(int)
# "test" values marked as 0 and train values "-1"
mask -= 1
cv_split_indices = PredefinedSplit(mask).split()
return X_test, X_cv, y_test, y_cv, cv_split_indices
def test_classifier(Clf, param_grid, feature_extract):
X_train, y_train, X_test, y_label_encoder = training_data()
X_train, X_test = extract_feature(X_train, X_test, feature_extract)
X_t, X_cv, y_t, y_cv, cv_indices = split_data(X_train, y_train)
clf = Clf()
name = Clf.__name__
grid_search = GridSearchCV(clf, param_grid=param_grid, cv=cv_indices)
start = time()
grid_search.fit(X_train, y_train)
print("GridSearchCV took %.2f seconds for %d candidate parameter settings." % (time() - start, len(grid_search.cv_results_['params'])))
report(grid_search.cv_results_, name)
# not neccessary to do, but good to compare that result is similar than with gridsearch
testModel = Clf(**grid_search.best_params_)
testModel.fit(X_t, y_t)
score = accuracy_score(y_cv, testModel.predict(X_cv))
print()
print("--------------------")
print(name)
print("Best estimator crossValidation score:", score)
print("Parameters: {0}".format(grid_search.best_params_))
print("--------------------")
print()
# Predict against test set
y_pred_test = grid_search.predict(X_test)
# write submission file for prediction
y_pred_labels = list(y_label_encoder.inverse_transform(y_pred_test))
write_submission(name, y_pred_labels)
return clf, score
``` |
{
"source": "joonaspessi/courses",
"score": 4
} |
#### File: SGN-41007/Week4/ex4_5.py
```python
from math import sqrt, pi
from numpy import log, exp, linspace
import matplotlib.pyplot as plt
def gaussian(x, mu, sigma):
a = 1 / (sqrt(2 * pi) * sigma)
b = exp(-pow(x - mu, 2) / (2 * pow(sigma, 2)))
return a * b
def log_gaussian(x, mu, sigma):
a = log(1 / (sqrt(2 * pi) * sigma))
b = -pow(x - mu, 2) / (2 * pow(sigma, 2))
return a + b
if __name__ == "__main__":
x = linspace(-5, 5, num=1000)
y1 = gaussian(x, 0, 1)
y2 = log_gaussian(x, 0, 1)
plt.plot(x, y1, 'r', label='gaussian')
plt.plot(x, y2, 'b', label='log_gaussian')
plt.show()
f, axarr = plt.subplots(2)
axarr[0].plot(x, y1, 'r', label='gaussian')
axarr[1].plot(x, y2, 'b', label='log_gaussian')
plt.show()
exit()
```
#### File: SGN-41007/Week6/utils.py
```python
from os.path import join, abspath
from os import walk
def absolute_file_paths(directories):
for directory in directories:
for dir_path, _, file_names in walk(directory):
for f in file_names:
yield abspath(join(dir_path, f))
``` |
{
"source": "joonaspu/ViControl",
"score": 3
} |
#### File: ViControl/examples/pygame_test.py
```python
import argparse
import platform
import pygame
from connection import Connection
if platform.system() == "Windows":
PYGAME_WINDOW = "python.exe"
else:
PYGAME_WINDOW = "pygame test"
parser = argparse.ArgumentParser()
parser.add_argument("x", nargs="?", type=int, help="X resolution", default=800)
parser.add_argument("y", nargs="?", type=int, help="Y resolution", default=600)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-f", "--fullscreen",
action="store_const",
dest="mode",
const="f",
help="Start the testing app in fullscreen mode"
)
group.add_argument(
"-b", "--borderless",
action="store_const",
dest="mode",
const="b",
help="Start the testing app in a borderless window"
)
class LimList():
""" List with a max size """
def __init__(self, max_size=10):
self.ls = []
self.max_size = max_size
def __iter__(self):
return iter(self.ls)
def add(self, item):
self.ls.insert(0, item)
if len(self.ls) > self.max_size:
self.ls.pop()
def main(args):
# Start pygame
pygame.init()
res = (args.x, args.y)
print(args.mode)
if args.mode == "f":
screen = pygame.display.set_mode(res, flags=pygame.FULLSCREEN)
elif args.mode == "b":
screen = pygame.display.set_mode(res, flags=pygame.NOFRAME)
else:
screen = pygame.display.set_mode(res, flags=0)
pygame.display.set_caption("pygame test")
pygame.mouse.set_pos(400, 300)
font = pygame.font.SysFont(None, 30)
# Start client connection
c = Connection()
key_d_log = LimList(10)
key_u_log = LimList(10)
keys_held = set()
click_pos = None
mouse_pos = None
mouse_rel = None
frame = 0
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
exit()
name = pygame.key.name(event.key)
keys_held.add(name)
key_d_log.add(name)
if event.type == pygame.KEYUP:
name = pygame.key.name(event.key)
keys_held.discard(name)
key_u_log.add(name)
if event.type == pygame.MOUSEBUTTONDOWN:
name = "mouse {}".format(event.button)
keys_held.add(name)
key_d_log.add(name)
click_pos = event.pos
if event.type == pygame.MOUSEBUTTONUP:
name = "mouse {}".format(event.button)
keys_held.discard(name)
key_u_log.add(name)
click_pos = event.pos
if event.type == pygame.MOUSEMOTION:
mouse_pos = event.pos
mouse_rel = event.rel
screen.fill((0, 0, 0))
# Draw text
surface = font.render("Down: " + ", ".join(key_d_log), True, (255, 255, 255))
screen.blit(surface, (10, 10))
surface = font.render("Up: " + ", ".join(key_u_log), True, (255, 255, 255))
screen.blit(surface, (10, 40))
surface = font.render("Held: " + ", ".join(sorted(keys_held)), True, (255, 255, 255))
screen.blit(surface, (10, 70))
surface = font.render("Click: " + str(click_pos), True, (255, 255, 255))
screen.blit(surface, (10, 100))
surface = font.render("Mouse: {} (rel: {})".format(mouse_pos, mouse_rel), True, (255, 255, 255))
screen.blit(surface, (10, 130))
surface = font.render("Frame: {}".format(frame), True, (255, 255, 255))
screen.blit(surface, (10, 160))
pygame.display.flip()
# Send/receive requests
# Set image quality
c.req.quality = 80
# Press keys
if frame == 1000:
c.req.press_keys.append("x")
c.req.press_keys.append("left shift")
c.req.press_keys.append("semicolon")
c.req.press_keys.append("numpad 0")
# Take screenshot
if frame == 1001:
c.req.get_image = True
c.req.process_name = PYGAME_WINDOW
# Release keys
if frame == 1100:
c.req.release_keys.append("x")
c.req.release_keys.append("left shift")
c.req.release_keys.append("semicolon")
c.req.release_keys.append("numpad 0")
# Take screenshot
if frame == 1101:
c.req.get_image = True
c.req.process_name = PYGAME_WINDOW
# Move mouse
if frame == 1200:
c.req.mouse.x = 1
c.req.mouse.y = -25
# Take screenshot
if frame == 1201:
c.req.get_image = True
c.req.process_name = PYGAME_WINDOW
resp = c.send_request()
if resp is not False:
if len(resp.image) > 0:
with open("frame_{}.jpg".format(frame), "wb") as f:
f.write(resp.image)
frame += 1
if __name__ == "__main__":
args = parser.parse_args()
main(args)
``` |
{
"source": "joonaspu/video-game-behavioural-cloning",
"score": 3
} |
#### File: joonaspu/video-game-behavioural-cloning/play_atari.py
```python
import os
from argparse import ArgumentParser
from multiprocessing import Process, Queue, set_start_method
from queue import Empty
import multiprocessing
from random import randint
from functools import reduce
import statistics
parser = ArgumentParser("Play and evaluate Atari games with a trained network.")
parser.add_argument("models", type=str, nargs="+",
help="Path of the file(s) where the model will be loaded from.")
parser.add_argument("--save", "-s", type=str, nargs="?", default="./results",
help="Path where the results of the evaluation will be saved.")
parser.add_argument("--env", type=str, default="SpaceInvaders-v0",
help="Name of the Atari environment to use")
parser.add_argument("--framestack", type=int, default=3,
help="Number of frames to stack (must match the number used in model)")
parser.add_argument("--merge", action="store_true",
help="Merge stacked frames into one image.")
parser.add_argument("--width", "-x", type=int, default=84,
help="Width of the image")
parser.add_argument("--height", "-y", type=int, default=84,
help="Height of the image")
parser.add_argument("--display", action="store_true",
help="Display gameplay in a window")
parser.add_argument("--processes", type=int, default=1,
help="How many parallel processes to run.")
parser.add_argument("--games", type=int, default=1,
help="How many games (per process) to run.")
parser.add_argument("--action", type=str, default="sampling",
choices=["sampling", "argmax"],
help="Use random sampling or argmax to pick actions.")
parser.add_argument("--no-op", type=int, default=0,
help="Maximum number of no-op actions at the beginning of each game.")
parser.add_argument("--max-frames", type=int, default=40000,
help="Maximum number of frames to run the game for before ending evaluation.")
parser.add_argument("--no-cuda", action="store_true",
help="Disable CUDA")
parser.add_argument("--random", action="store_true",
help="Ignore model and just pick random actions.")
args = parser.parse_args()
import numpy as np
from PIL import Image, ImageChops
import gym
import torch
import torch.nn as nn
import torch.nn.functional as F
from utils.networks import Mnih2015
if args.no_cuda:
device = torch.device("cpu")
else:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def play_game(model_name, env_name, game_queue, reward_queue, index):
"""Plays one game with the given model and gym environment
and returns the final score (i.e. cumulative reward)"""
print("Starting process #{}..".format(index))
if not args.random:
model = torch.load(model_name, map_location=device)
model.eval()
env = gym.make(env_name, full_action_space=True)
rng = np.random.default_rng()
while not game_queue.empty():
try:
game = game_queue.get(False, None)
except Empty:
print("Game queue empty")
return
# Pick a random number of no-ops to perform
no_ops = randint(0, args.no_op)
no_ops_done = 0
o = env.reset()
r, d, i = (0.0, False, None)
total_reward = 0
total_frames = 0
# Create a frame stack and fill it with zeros (black images)
stack = []
for _ in range(args.framestack):
stack.append(np.zeros((args.width, args.height, 3), dtype=np.uint8))
while True:
if args.display:
env.render()
# Resize image
img = Image.fromarray(o)
img = img.resize((args.width, args.height), Image.BILINEAR)
img = np.asarray(img)
# Update the frame stack
stack.insert(0, img)
while len(stack) > args.framestack:
stack.pop()
# Make sure we have enough frames stacked
if len(stack) != args.framestack:
continue
if args.merge:
# Convert numpy arrays to images
image_stack = map(Image.fromarray, stack)
# Get lightest pixel values from the stack
img = reduce(ImageChops.lighter, image_stack)
np_stack = np.asarray(img, dtype=np.float32)
np_stack = np.expand_dims(np_stack, axis=0)
else:
# Convert stack to numpy array with correct dimensions and type
np_stack = np.concatenate(stack, axis=2)
np_stack = np.expand_dims(np_stack, axis=0)
np_stack = np_stack.astype(np.float32)
# Normalize
np_stack /= 255
if no_ops_done < no_ops:
# Send a no-op action if we haven't done enough no-ops yet
o, r, d, i = env.step(0)
no_ops_done += 1
elif not args.random:
prediction = model(torch.Tensor(np.swapaxes(np_stack, 1, 3)).to(device)).detach().cpu()
prediction = F.softmax(prediction, dim=1)
if args.action == "argmax":
prediction = np.argmax(prediction)
elif args.action == "sampling":
# Perform a weighted selection from the indices
prediction = np.array(prediction[0])
p = prediction/np.sum(prediction)
prediction = rng.choice(list(range(len(prediction))), p=p)
o, r, d, i = env.step(prediction)
elif args.random:
o, r, d, i = env.step(np.random.randint(18))
total_reward += r
total_frames += 1
# Stop evaluation if game reaches terminal state or
# maximum number of frames is exceeded
if d or total_frames > args.max_frames:
reward_queue.put(total_reward)
break
print("#{} finished game {}".format(index, game))
def main():
set_start_method("spawn")
for model in args.models:
# Get model name from path
model_name = os.path.basename(os.path.normpath(model))
# Make sure results directory exists
results_path = os.path.normpath(args.save)
if not os.path.exists(results_path):
os.mkdir(results_path)
# Path to the results file
results_name = "{}.txt".format(model_name)
results_file = os.path.normpath(os.path.join(results_path, results_name))
print("Evaluating model {}".format(model))
# Queue for holding the rewards from processes
rewards = multiprocessing.Manager().Queue(1000000)
# Queue for holding remaining game IDs
games = multiprocessing.Manager().Queue(1000000)
for i in range(args.games):
games.put(i)
procs = []
# Start processes
# Using threads doesn't work as the OpenAI Atari gym crashes if run
# from multiple threads at the same time. Processes work fine though.
for i in range(args.processes):
proc = Process(target=play_game, args=(model, args.env, games, rewards, i))
proc.start()
procs.append(proc)
print("Processes started")
# Wait for processes to finish
for k, proc in enumerate(procs):
print("Waiting to join process #{}".format(k))
proc.join()
print("Joined process #{}".format(k))
print("Processes joined")
# Collect results from processes
with open(results_file, "w") as f:
rewards_list = []
while not rewards.empty():
r = rewards.get()
rewards_list.append(r)
f.write("{}\n".format(r))
print(r)
if len(rewards_list) <= 1:
avg = 0
std = 0
minim = 0
maxim = 0
else:
avg = round(statistics.mean(rewards_list), 1)
std = round(statistics.stdev(rewards_list), 1)
minim = min(rewards_list)
maxim = max(rewards_list)
f.write("Avg: {}".format(avg))
print("Avg: {}, std: {}, min: {}, max: {}".format(avg, std, minim, maxim))
if __name__ == "__main__":
main()
```
#### File: joonaspu/video-game-behavioural-cloning/plot_atari_eval.py
```python
from argparse import ArgumentParser
import statistics
import os
import re
import matplotlib.pyplot as plt
def get_avg_from_file(file_path):
with open(file_path) as f:
avg_line = f.readlines()[-1]
match = re.match(r"Avg: (.*)", avg_line)
return float(match.group(1))
def get_stdev_from_file(file_path):
values = get_datapoints_from_file(file_path)
return statistics.stdev(values)
def get_datapoints_from_file(file_path):
with open(file_path) as f:
lines = f.readlines()
values = []
for line in lines:
try:
values.append(float(line))
except ValueError:
pass
return values
PLOT_DIR = "plots"
if __name__ == "__main__":
parser = ArgumentParser("Plot Atari evaluation results.")
parser.add_argument("path", type=str,
help="Path to the directory where the result files are loaded from.")
parser.add_argument("name", type=str,
help="Name of the evaluation to plot.")
parser.add_argument("--show", action="store_true",
help="Show the figure on screen.")
parser.add_argument("--save", action="store_true",
help="Save the figure on disk.")
parser.add_argument("--noplot", action="store_true",
help="Do not do plotting.")
parser.add_argument("--repeats", type=int, default=3,
help="Number of repeated experiments.")
args = parser.parse_args()
averages = []
for repeat in range(1, args.repeats + 1):
# Regex for finding the result files
# 1st group: name
# 2nd group: epoch number
r = re.compile(r"(.*)_{}_([0-9]{{1,4}})\.pt\.txt".format(repeat))
files = []
path = os.path.normpath(args.path)
# Find matching files
for entry in os.listdir(path):
full_entry = os.path.join(path, entry)
if os.path.isfile(full_entry):
match = r.match(entry)
if match is not None and match.group(1) == args.name:
epoch = int(match.group(2))
files.append((
epoch,
get_avg_from_file(full_entry),
get_stdev_from_file(full_entry),
get_datapoints_from_file(full_entry)
))
# Sort the file list by epoch
files.sort(key=lambda x: x[0])
x, y, yerr, points = zip(*files)
x = list(x)
y = list(y)
yerr = list(yerr)
for epoch, entry, stdev, _ in files:
print("{}: {} (std {})".format(epoch, entry, stdev))
# Average of the final three
avrg_of_last_three = statistics.mean(y[-3:])
averages.append(avrg_of_last_three)
print("Average of final three eval points: ", avrg_of_last_three)
if args.noplot:
continue
plt.figure()
plt.rcParams["figure.figsize"] = (8, 6)
for i, v in enumerate(x):
for _y in points[i]:
plt.scatter(v, _y, marker="_", c="#00000028", linewidths=1)
plt.errorbar(x, y, yerr=yerr)
plt.title("{}_{}, max: {}: avrg[-3:]: {}".format(
args.name,
repeat,
round(max(y), 2),
round(avrg_of_last_three, 2)
))
if args.save:
if not os.path.exists(PLOT_DIR):
os.makedirs(PLOT_DIR)
file_name = os.path.basename(os.path.normpath("{}_{}".format(args.name, repeat)))
plt.savefig(os.path.join(PLOT_DIR, "{}.png".format(file_name)))
if args.show:
plt.show()
print("{}: ${} \pm {}$".format(
args.name,
round(statistics.mean(averages), 1),
round(statistics.stdev(averages), 1)
))
```
#### File: joonaspu/video-game-behavioural-cloning/record_human_play.py
```python
import argparse
import time
import os
import json
from video_game_env.connection import Connection
# Note on the recording:
# Human actions per request are button presses/mouse movements
# that happened between last and current request.
# I.e. Human actions come delayed by one frame. Record
# accordingly.
# This code records human player of arbritrary game,
# and records the images (as seen on screen), saved on
# disk along with actions and rewards for imitation learning.
# Actions and rewards are stored in a .json file in
# [args.output]/trajectories_pressed_buttons/[env_name]/[timestamp].json
# and frames are stored in
# [args.output]/screens/[env_name]/[timestamp]/
# as #.png images, where # is the timestep in the environment.
# Structure of the .json file:
#
# {
# "steps": [
# {"b": buttons pressed in step 0, "m": mouse movement in step 0, "t": time since start in ms},
# {"b": buttons pressed in step 1, "m": mouse movement in step 1, "t": time since start in ms},
# ...
# For [num_images] - 1 frames (last image does not have an action)
# ]
# }
parser = argparse.ArgumentParser("""Record humans playing video games.
Hotkeys:
- Page Up + Q: Quit
- Page Up + R: Start recording, or
stop and start new recording
- Page Up + S: Stop recording
""")
parser.add_argument("--dont-start-binary", action="store_true",
help="Do not start the recorder binary.")
parser.add_argument("--binary", default="video_game_env/main",
help="Path to the recorder binary.")
parser.add_argument("-f", "--framerate", type=int, default=20,
help="At what FPS we should store experiences (default: 20)")
parser.add_argument("-q", "--quality", type=int, default=80,
help="JPEG compression quality (default: 80)")
parser.add_argument("process_name", type=str,
help="Name of process to be recorded.")
parser.add_argument("env_name", type=str,
help="Name to be used when storing samples.")
parser.add_argument("output", type=str,
help="Root directory for saved recordings.")
def finish_recording(recording_path, env_name, unique_id, data):
"""Store recorded data into a json file"""
trajectory_file = os.path.join(
recording_path,
"trajectories_pressed_buttons",
"{}".format(env_name),
"{}.json".format(unique_id)
)
with open(trajectory_file, "w") as f:
json.dump(data, f)
def start_recording(recording_path, env_name):
"""
Create and initialize any directories/files
for recording, and return unique
ID for this recording (timestamp).
"""
unique_id = str(int(time.time()))
screens_dir = os.path.join(
recording_path,
"screens",
"{}".format(env_name),
unique_id
)
trajectories_dir = os.path.join(
recording_path,
"trajectories_pressed_buttons",
"{}".format(env_name)
)
os.makedirs(screens_dir)
os.makedirs(trajectories_dir, exist_ok=True)
return unique_id, screens_dir
def main(args):
c = Connection(
start_binary=not args.dont_start_binary,
binary_path=args.binary
)
record = False
# ID for current recording directories
recording_id = None
# Directory where to save images
image_directory = None
# Actions and other metadata per frame. To be
# stored in a JSON file.
recorded_data = []
recording_index = 0
recording_start_time = None
# Store previous response
# for storing actions with one frame delay
previous_response = None
# Also store when last response happened
previous_frame_time = None
frame_time = None
target_time_per_frame = 1.0 / args.framerate
print("Ready to record (Page Up + r)...")
# KeyboardInterrupt catch for saving
# unsaved data.
try:
while True:
frame_time = time.time()
# TODO check that there is a frame
c.req.get_keys = True
c.req.get_mouse = True
c.req.get_image = True
c.req.quality = args.quality
c.req.process_name = args.process_name
response = c.send_request()
# Hotkeys for Record, Stop and Quit
if "page up" in response.pressed_keys:
if "q" in response.pressed_keys:
# Make sure we do not discard
# any samples
if record:
finish_recording(
args.output,
args.env_name,
recording_id,
recorded_data
)
exit()
if "r" in response.pressed_keys:
# If recording, save current frames.
# Make sure we have some frames recorded,
# because otherwise this triggers too soon
if record and recording_index > args.framerate:
finish_recording(
args.output,
args.env_name,
recording_id,
recorded_data
)
print("Saved {} frames".format(recording_index))
elif record and recording_index < args.framerate:
continue
if not record:
# Show helpful info
print("Recording started (Page Up + s to stop)...")
print("Or Page Up + r to save current frames.")
record = True
recorded_data = []
previous_response = None
previous_frame_time = None
recording_id = None
recording_index = 0
recording_start_time = time.time()
recording_id, image_directory = start_recording(
args.output,
args.env_name
)
continue
elif "s" in response.pressed_keys:
if record:
record = False
finish_recording(
args.output,
args.env_name,
recording_id,
recorded_data
)
print("Recording done with {} frames".format(recording_index))
# Store actions and current image
if record:
# Store image
image = response.image
with open(os.path.join(image_directory, "{}.jpg".format(recording_index)), "wb") as f:
f.write(image)
recording_index += 1
# If we had previous_response, store actions.
# This will delay actions by one frame (to align them),
# and also will cause one frame to be without actions (final)
if previous_response:
x, y = previous_response.mouse.x, previous_response.mouse.y
pressed_keys = tuple(previous_response.pressed_keys)
# Get timing of previous frame (the timing when screenshot was taken.
# Actions happen between frames, not at the specific times)
recording_time_ms = int((previous_frame_time - recording_start_time) * 1000)
recorded_data.append({
"m": (x, y),
"b": pressed_keys,
# Time when frame was recorded
"t": recording_time_ms
})
previous_frame_time = frame_time
previous_response = response
# Sleep between requests, aiming for
# the desired framerate
sleep_time = target_time_per_frame - time.time() + frame_time
if sleep_time <= 0.0:
# Using standard print so we know how often
# we are missing frames
print("[Warning] Can not keep up with the desired framerate.")
sleep_time = 0.0
else:
time.sleep(sleep_time)
except KeyboardInterrupt:
# Save if recording
if record:
print("Saving current data to disk...")
finish_recording(
args.output,
args.process_name,
recording_id,
recorded_data
)
if __name__ == '__main__':
args = parser.parse_args()
main(args)
``` |
{
"source": "joonas-yoon/hexagrid",
"score": 4
} |
#### File: hexagrid/classes/grid.py
```python
from math import sqrt
from classes.cell import Cell
from tkinter import CURRENT
class Grid:
def __init__(self, canvas, rows, cols):
self.canvas = canvas
self.rows, self.cols = rows, cols
self.cell_size = cell_size = 30
self.grid = [[Cell(x, y, cell_size) for x in range(cols)] for y in range(rows)]
self.canvas.bind("<ButtonPress-1>", lambda event: Grid.show_dfs(event, self))
self.canvas.bind("<ButtonPress-3>", lambda event: Grid.delete_cell(event, self))
for r in range(rows):
for c in range(cols):
dy = [-1, -1, 0, 0, 1, 1]
dx = [0, 1, -1, 1, 0, 1]
for d in range(6):
ny = r + dy[d]
nx = c + dx[d]
if ny < 0 or ny >= rows or nx < 0 or nx >= cols:
continue
self.grid[r][c].add_adjacent(self.grid[ny][nx])
self.cell_by_item_tag = dict()
def draw(self):
cy = self.canvas.winfo_reqheight() / 2 - self.rows * self.cell_size * 1.5 / 2
for row in range(self.rows):
cx = self.canvas.winfo_reqwidth() / 2 - self.cols * self.cell_size * sqrt(3) / 2
if row % 2:
cx -= self.cell_size * sqrt(3) / 2
for col in range(self.cols):
cell = self.grid[row][col]
cell.set_xy(cx, cy)
cell.draw(self.canvas)
cell.id = self.canvas.find_closest(cx, cy)
self.cell_by_item_tag[cell.id] = cell
cx += self.cell_size * sqrt(3)
cy += self.cell_size * 1.5
@staticmethod
def delete_cell(event, grid):
canvas = grid.canvas
if canvas.find_withtag(CURRENT):
item = canvas.find_closest(event.x, event.y)
cell = grid.cell_by_item_tag[item]
cell.enable(not cell.enabled)
cell.update(canvas)
@staticmethod
def show_dfs(event, grid):
canvas = grid.canvas
if canvas.find_withtag(CURRENT):
item = canvas.find_closest(event.x, event.y)
cell = grid.cell_by_item_tag[item]
if cell.enabled:
Grid.dfs(canvas, cell, None)
@staticmethod
def dfs(canvas, cell, visited):
if visited is None:
visited = set()
elif cell.id in visited:
return None
visited.add(cell.id)
canvas.update_idletasks()
canvas.after(25)
canvas.itemconfig(cell.id, fill="red")
for next_cell in cell.adjacent_cells:
if next_cell.enabled:
Grid.dfs(canvas, next_cell, visited)
canvas.update_idletasks()
canvas.after(25)
canvas.itemconfig(cell.id, fill="#ccc")
``` |
{
"source": "JoonatanL/orix",
"score": 2
} |
#### File: orix/crystal_map/crystal_map_properties.py
```python
import numpy as np
class CrystalMapProperties(dict):
"""A class to store properties with in a CrystalMap instance.
This class is a thin wrapper around :class:`dict`. It overrides setting
and getting property arrays in the `dict` to handle a data mask
correctly, i.e. whether data points are considered to be in the data.
Attributes
----------
id : numpy.ndarray
1D integer array with the id of each point in the data.
is_in_data : numpy.ndarray
1D boolean array with True for points in the data, of the same size
as the data.
"""
def __init__(self, dictionary, id, is_in_data=None):
"""Create a `CrystalMapProperties` object.
Parameters
----------
dictionary : dict
Dictionary of properties with `key` equal to the property name
and `value` as the numpy array.
id : numpy.ndarray
1D integer array with the id of each point in the entire data,
i.e. not just points in the data.
is_in_data : numpy.ndarray, optional
1D boolean array with True for points in the data. If ``None``
is passed (default), all points are considered to be in the
data.
"""
super().__init__(**dictionary)
self.id = id
if is_in_data is None:
self.is_in_data = np.ones(id.size, dtype=bool)
else:
self.is_in_data = is_in_data
def __setitem__(self, key, value):
"""Add a 1D array to or update an existing array in the
dictionary. If `key` is the name of an existing array, only the
points in the data (where `self.is_in_data` is True) are set.
"""
# Get array values if `key` already present, or zeros
array = self.setdefault(key, np.zeros(self.is_in_data.size))
# Determine array data type from input
if hasattr(value, "__iter__"):
value_type = type(value[0])
else:
value_type = type(value)
array = array.astype(value_type)
array[self.is_in_data] = value
super().__setitem__(key, array)
def __getitem__(self, item):
"""Return a dictionary entry, ensuring that only points in the data
are returned.
"""
array = super().__getitem__(item)
return array[self.is_in_data]
```
#### File: io/plugins/ang.py
```python
import re
import warnings
from diffpy.structure import Lattice, Structure
import numpy as np
from orix.crystal_map import CrystalMap, PhaseList
from orix.quaternion.rotation import Rotation
# MTEX has this format sorted out, check out their readers when fixing
# issues and adapting to other versions of this file format in the future:
# https://github.com/mtex-toolbox/mtex/blob/develop/interfaces/loadEBSD_ang.m
# https://github.com/mtex-toolbox/mtex/blob/develop/interfaces/loadEBSD_ACOM.m
# Plugin description
format_name = "ang"
file_extensions = ["ang"]
writes = False
writes_this = CrystalMap
def file_reader(filename):
"""Return a :class:`~orix.crystal_map.crystal_map.CrystalMap` object
from a file in EDAX TLS's .ang format. The map in the input is assumed
to be 2D.
Many vendors produce an .ang file. Supported vendors are:
* EDAX TSL
* NanoMegas ASTAR Index
* EMsoft (from program `EMdpmerge`)
All points satisfying the following criteria are classified as not
indexed:
* EDAX TSL: confidence index == -1
Parameters
----------
filename : str
Path and file name.
Returns
-------
CrystalMap
"""
# Get file header
with open(filename) as f:
header = _get_header(f)
# Get phase names and crystal symmetries from header (potentially empty)
phase_names, symmetries, lattice_constants = _get_phases_from_header(header)
structures = []
for name, abcABG in zip(phase_names, lattice_constants):
structures.append(Structure(title=name, lattice=Lattice(*abcABG)))
# Read all file data
file_data = np.loadtxt(filename)
# Get vendor and column names
n_rows, n_cols = file_data.shape
vendor, column_names = _get_vendor_columns(header, n_cols)
# Data needed to create a CrystalMap object
data_dict = {
"euler1": None,
"euler2": None,
"euler3": None,
"x": None,
"y": None,
"phase_id": None,
"prop": {},
}
for column, name in enumerate(column_names):
if name in data_dict.keys():
data_dict[name] = file_data[:, column]
else:
data_dict["prop"][name] = file_data[:, column]
# Add phase list to dictionary
unique_phase_ids = np.unique(data_dict["phase_id"]).astype(int)
data_dict["phase_list"] = PhaseList(
names=phase_names,
point_groups=symmetries,
structures=structures,
ids=unique_phase_ids,
)
# Set which data points are not indexed
if vendor == "tsl":
data_dict["phase_id"][np.where(data_dict["prop"]["ci"] == -1)] = -1
# TODO: Add not-indexed convention for INDEX ASTAR
# Set scan unit
if vendor in ["tsl", "emsoft"]:
scan_unit = "um"
else: # NanoMegas
scan_unit = "nm"
data_dict["scan_unit"] = scan_unit
# Create rotations
data_dict["rotations"] = Rotation.from_euler(
np.column_stack(
(data_dict.pop("euler1"), data_dict.pop("euler2"), data_dict.pop("euler3"))
)
)
return CrystalMap(**data_dict)
def _get_header(file):
"""Return the first lines starting with '#' in an .ang file.
Parameters
----------
file : _io.TextIO
File object.
Returns
-------
header : list
List with header lines as individual elements.
"""
header = []
line = file.readline()
while line.startswith("#"):
header.append(line.rstrip())
line = file.readline()
return header
def _get_vendor_columns(header, n_cols_file):
"""Return the .ang file column names and vendor, determined from the
header.
Parameters
----------
header : list
List with header lines as individual elements.
n_cols_file : int
Number of file columns.
Returns
-------
vendor : str
Determined vendor ("tsl", "astar", or "emsoft").
column_names : list of str
List of column names.
"""
# Assume EDAX TSL by default
vendor = "tsl"
# Determine vendor by searching for the vendor footprint in the header
vendor_footprint = {
"emsoft": "EMsoft",
"astar": "ACOM",
}
for name, footprint in vendor_footprint.items():
for line in header:
if footprint in line:
vendor = name
break
# Vendor column names
column_names = {
"unknown": [
"euler1",
"euler2",
"euler3",
"x",
"y",
"unknown1",
"unknown2",
"phase_id",
],
"tsl": [
"euler1",
"euler2",
"euler3",
"x",
"y",
"iq", # Image quality from Hough transform
"ci", # Confidence index
"phase_id",
"unknown1",
"fit", # Pattern fit
"unknown2",
"unknown3",
"unknown4",
"unknown5",
],
"emsoft": [
"euler1",
"euler2",
"euler3",
"x",
"y",
"iq", # Image quality from <NAME>'s method
"dp", # Dot product
"phase_id",
],
"astar": [
"euler1",
"euler2",
"euler3",
"x",
"y",
"ind", # Correlation index
"rel", # Reliability
"phase_id",
"relx100", # Reliability x 100
],
}
n_cols_expected = len(column_names[vendor])
if n_cols_file != n_cols_expected:
warnings.warn(
f"Number of columns, {n_cols_file}, in the file is not equal to "
f"the expected number of columns, {n_cols_expected}, for the \n"
f"assumed vendor '{vendor}'. Will therefore assume the following "
"columns: euler1, euler2, euler3, x, y, unknown1, unknown2, "
"phase_id, unknown3, unknown4, etc."
)
vendor = "unknown"
n_cols_unknown = len(column_names["unknown"])
if n_cols_file > n_cols_unknown:
# Add potential extra columns to properties
for i in range(n_cols_file - n_cols_unknown):
column_names["unknown"].append("unknown" + str(i + 3))
return vendor, column_names[vendor]
def _get_phases_from_header(header):
"""Return phase names and symmetries detected in an .ang file
header.
Parameters
----------
header : list
List with header lines as individual elements.
Returns
-------
phase_names : list of str
List of names of detected phases.
phase_point_groups : list of str
List of point groups of detected phase.
lattice_constants : list of list of floats
List of list of lattice parameters of detected phases.
Notes
-----
Regular expressions are used to collect phase name, formula and
point group. This function have been tested with files from the
following vendor's formats: EDAX TSL OIM Data Collection v7, ASTAR
Index, and EMsoft v4/v5.
"""
regexps = {
"name": "# MaterialName([ \t]+)([A-z0-9 ]+)",
"formula": "# Formula([ \t]+)([A-z0-9 ]+)",
"point_group": "# Symmetry([ \t]+)([A-z0-9 ]+)",
"lattice_constants": r"# LatticeConstants([ \t+])(.*)",
}
phases = {"name": [], "formula": [], "point_group": [], "lattice_constants": []}
for line in header:
for key, exp in regexps.items():
match = re.search(exp, line)
if match:
group = re.split("[ \t]", match.group(2).lstrip(" ").rstrip(" "))
group = list(filter(None, group))
if key == "lattice_constants":
group = [float(i) for i in group]
else:
group = group[0]
phases[key].append(group)
# Check if formula is empty (sometimes the case for ASTAR Index)
names = phases["formula"]
if len(names) == 0 or any([i != "" for i in names]):
names = phases["name"]
return names, phases["point_group"], phases["lattice_constants"]
```
#### File: io/plugins/emsoft_h5ebsd.py
```python
import re
from diffpy.structure import Lattice, Structure
from h5py import File
import numpy as np
from orix.crystal_map import CrystalMap, Phase, PhaseList
from orix.quaternion.rotation import Rotation
# Plugin description
format_name = "emsoft_h5ebsd"
file_extensions = ["h5", "hdf5", "h5ebsd"]
writes = False
writes_this = CrystalMap
footprint = ["Scan 1"] # Unique HDF5 footprint
def file_reader(filename, refined=False, **kwargs):
"""Return a :class:`~orix.crystal_map.crystal_map.CrystalMap` object
from a file in EMsoft's dictionary indexing dot product file format.
Parameters
----------
filename : str
Path and file name.
refined : bool, optional
Whether to return refined orientations (default is False).
kwargs
Keyword arguments passed to :func:`h5py.File`.
Returns
-------
CrystalMap
"""
mode = kwargs.pop("mode", "r")
f = File(filename, mode=mode, **kwargs)
# Get groups for convenience
ebsd_group = f["Scan 1/EBSD"]
data_group = ebsd_group["Data"]
header_group = ebsd_group["Header"]
phase_group = header_group["Phase/1"]
# Get map shape and step sizes
ny = header_group["nRows"][:][0]
nx = header_group["nColumns"][:][0]
step_y = header_group["Step Y"][:][0]
map_size = ny * nx
# Some of the data needed to create a CrystalMap object
phase_name, point_group, structure = _get_phase(phase_group)
data_dict = {
# Get map coordinates ("Y Position" data set is not correct in EMsoft as of
# 2020-04, see:
# https://github.com/EMsoft-org/EMsoft/blob/7762e1961508fe3e71d4702620764ceb98a78b9e/Source/EMsoftHDFLib/EMh5ebsd.f90#L1093)
"x": data_group["X Position"][:],
# y = data_group["Y Position"][:]
"y": np.sort(np.tile(np.arange(ny) * step_y, nx)),
# Get phase IDs
"phase_id": data_group["Phase"][:],
# Get phase name, point group and structure (lattice)
"phase_list": PhaseList(
Phase(name=phase_name, point_group=point_group, structure=structure)
),
"scan_unit": "um",
}
# Get rotations
if refined:
euler = data_group["RefinedEulerAngles"][:]
else: # Get n top matches for each pixel
top_match_idx = data_group["TopMatchIndices"][:][:map_size] - 1
dictionary_size = data_group["FZcnt"][:][0]
dictionary_euler = data_group["DictionaryEulerAngles"][:][:dictionary_size]
euler = dictionary_euler[top_match_idx, :]
data_dict["rotations"] = Rotation.from_euler(euler)
# Get number of top matches kept per data point
n_top_matches = f["NMLparameters/EBSDIndexingNameListType/nnk"][:][0]
data_dict["prop"] = _get_properties(
data_group=data_group, n_top_matches=n_top_matches, map_size=map_size,
)
f.close()
return CrystalMap(**data_dict)
def _get_properties(data_group, n_top_matches, map_size):
"""Return a dictionary of properties within an EMsoft h5ebsd file, with
property names as the dictionary key and arrays as the values.
Parameters
----------
data_group : h5py.Group
HDF5 group with the property data sets.
n_top_matches : int
Number of rotations per point.
map_size : int
Data size.
Returns
-------
properties : dict
Property dictionary.
"""
expected_properties = [
"AvDotProductMap",
"CI",
"CIMap",
"IQ",
"IQMap",
"ISM",
"ISMap",
"KAM",
"OSM",
"RefinedDotProducts",
"TopDotProductList",
"TopMatchIndices",
]
# Get properties
properties = {}
for property_name in expected_properties:
if property_name in data_group.keys():
prop = data_group[property_name][:]
if prop.shape[-1] == n_top_matches:
prop = prop[:map_size].reshape((map_size,) + (n_top_matches,))
else:
prop = prop.reshape(map_size)
properties[property_name] = prop
return properties
def _get_phase(data_group):
"""Return phase information from a phase data group in an EMsoft dot
product file.
Parameters
----------
data_group : h5py.Group
HDF5 group with the property data sets.
Returns
-------
name : str
Phase name.
point_group : str
Phase point group.
structure : diffpy.structure.Structure
Phase structure.
"""
name = re.search(r"([A-z0-9]+)", data_group["MaterialName"][:][0].decode()).group(1)
point_group = re.search(
r"\[([A-z0-9]+)\]", data_group["Point Group"][:][0].decode()
).group(1)
lattice = Lattice(
*tuple(
data_group[f"Lattice Constant {i}"][:]
for i in ["a", "b", "c", "alpha", "beta", "gamma"]
)
)
structure = Structure(title=name, lattice=lattice)
return name, point_group, structure
```
#### File: orix/plot/crystal_map_plot.py
```python
import warnings
import matplotlib.font_manager as fm
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
from matplotlib.axes import Axes
from matplotlib.image import AxesImage
from matplotlib.projections import register_projection
from mpl_toolkits.axes_grid1 import make_axes_locatable
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
import numpy as np
from orix.scalar import Scalar
from orix.vector import Vector3d
class CrystalMapPlot(Axes):
"""2D plotting of :class:`~orix.crystal_map.crystal_map.CrystalMap`
objects.
"""
name = "plot_map"
_data_axes = None
_data_slices = None
_data_shape = None
def plot_map(
self,
crystal_map,
value=None,
scalebar=True,
scalebar_properties=None,
legend=True,
legend_properties=None,
axes=None,
depth=None,
**kwargs,
) -> AxesImage:
"""Plot a 2D map with any CrystalMap attribute as map values.
Wraps :meth:`matplotlib.axes.Axes.imshow`, see that method for
relevant keyword arguments.
Parameters
----------
crystal_map : orix.crystal_map.CrystalMap
Crystal map object to obtain data to plot from.
value : numpy.ndarray, optional
Attribute array to plot. If value is None (default), a phase
map is plotted.
scalebar : bool, optional
Whether to add a scalebar (default is True) along the
horizontal map dimension.
scalebar_properties : dict
Dictionary of keyword arguments passed to
:func:`mpl_toolkits.axes_grid1.anchored_artists.AnchoredSizeBar`.
legend : bool, optional
Whether to add a legend to the plot. This is only implemented
for a phase plot (in which case default is True).
legend_properties : dict
Dictionary of keyword arguments passed to
:meth:`matplotlib.axes.legend`.
axes : tuple of ints, optional
Which data axes to plot if data has more than two dimensions.
The index of data to plot in the final dimension is determined
by `depth`. If None (default), data along the two last axes is
plotted.
depth : int, optional
Which layer along the third axis to plot if data has more than
two dimensions. If None (default), data in the first index
(layer) is plotted.
kwargs
Keyword arguments passed to
:meth:`matplotlib.axes.Axes.imshow`.
Returns
-------
im : matplotlib.image.AxesImage
Image object, to be used further to get data from etc.
See Also
--------
matplotlib.axes.Axes.imshow
orix.plot.CrystalMapPlot.add_scalebar
orix.plot.CrystalMapPlot.add_overlay
orix.plot.CrystalMapPlot.add_colorbar
Examples
--------
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> from orix import plot
>>> from orix.io import load
Import a crystal map
>>> cm = load("/some/directory/data.ang")
Plot a phase map
>>> fig = plt.figure() # Get figure
>>> ax = fig.add_subplot(projection="plot_map") # Get axes
>>> im = ax.plot_map(cm) # Get image
Add an overlay
>>> ax.add_overlay(cm, cm.iq)
Plot an arbitrary map property, also changing scalebar location
>>> ax = plt.subplot(projection="plot_map")
>>> ax.plot_map(
... cm, cm.dp, cmap="cividis", scalebar_properties={"loc": 4})
Add a colorbar
>>> cbar = ax.add_colorbar("Dot product") # Get colorbar
Plot orientation angle in degrees of one phase
>>> cm2 = cm["austenite"]
>>> austenite_angles = cm2.orientations.angle.data * 180 / np.pi
>>> fig = plt.figure()
>>> ax = fig.add_subplot(projection="plot_map")
>>> im = ax.plot_map(cm2, austenite_angles)
>>> ax.add_colorbar("Orientation angle [$^{\circ}$]")
Remove all figure and axes padding
>>> ax.remove_padding()
Write annotated figure to file
>>> fig.savefig(
... "/some/directory/image.png",
... pad_inches=0,
... bbox_inches="tight"
...)
Write un-annotated image to file
>>> plt.imsave("/some/directory/image2.png", im.get_array())
"""
self._set_plot_shape(crystal_map=crystal_map, axes=axes, depth=depth)
patches = None
if value is None: # Phase map
# Color each map pixel with corresponding phase color RGB tuple
phase_id = crystal_map.get_map_data("phase_id")
phase_id = phase_id[self._data_slices]
unique_phase_ids = np.unique(phase_id[~np.isnan(phase_id)])
data = np.ones(phase_id.shape + (3,))
for i, color in zip(
unique_phase_ids, crystal_map.phases_in_data.colors_rgb
):
mask = phase_id == int(i)
data[mask] = data[mask] * color
# Add legend patches to plot
patches = []
for _, p in crystal_map.phases_in_data:
patches.append(mpatches.Patch(color=p.color_rgb, label=p.name))
else: # Create masked array of correct shape
if isinstance(value, (Scalar, Vector3d)):
value = value.data
data = crystal_map.get_map_data(value)
data = data[self._data_slices]
# Squeeze 1-dimensions
data = np.squeeze(data)
# Legend
if legend and isinstance(patches, list):
if legend_properties is None:
legend_properties = {}
self._add_legend(patches, **legend_properties)
# Scalebar
if scalebar:
if scalebar_properties is None:
scalebar_properties = {}
_ = self.add_scalebar(crystal_map, **scalebar_properties)
im = self.imshow(X=data, **kwargs)
im = self._override_status_bar(im, crystal_map)
return im
def add_scalebar(self, crystal_map, **kwargs):
"""Add a scalebar to the axes object via `AnchoredSizeBar`.
To find an appropriate scalebar width, this snippet from MTEX
written by <NAME> and <NAME> is used:
https://github.com/mtex-toolbox/mtex/blob/b8fc167d06d453a2b3e212b1ac383acbf85a5a27/plotting/scaleBar.m,
Parameters
----------
crystal_map : orix.crystal_map.CrystalMap
Crystal map object to obtain necessary data from.
kwargs
Keyword arguments passed to
:func:`mpl_toolkits.axes_grid1.anchored_artists.AnchoredSizeBar`.
`alpha` can also be passed, to set the scalebar transparency.
Returns
-------
bar : mpl_toolkits.axes_grid1.anchored_artists.AnchoredSizeBar
Scalebar.
Examples
--------
>>> cm
Phase Orientations Name Symmetry Color
1 5657 (48.4%) austenite 432 tab:blue
2 6043 (51.6%) ferrite 432 tab:orange
Properties: iq, dp
Scan unit: um
Create a phase map without a scale bar and add it afterwards
>>> fig = plt.figure()
>>> ax = fig.add_subplot(projection="plot_map")
>>> im = ax.plot_map(cm, scalebar=False)
>>> sbar = ax.add_scalebar(cm, loc=4, frameon=False)
"""
last_axis = crystal_map.ndim - 1
horizontal = crystal_map._coordinate_axes[last_axis] # Get whether z, y or x
map_width = crystal_map.shape[last_axis]
step_size = crystal_map._step_sizes[horizontal]
scan_unit = crystal_map.scan_unit
# Initial scalebar width should be approximately 1/10 of map width
scalebar_width = 0.1 * map_width * step_size
# Ensure a suitable number is used, e.g. going from 1000 nm to 1 um
scalebar_width, scan_unit, factor = convert_unit(scalebar_width, scan_unit)
# This snippet for finding a suitable scalebar width is taken from MTEX:
# https://github.com/mtex-toolbox/mtex/blob/b8fc167d06d453a2b3e212b1ac383acbf85a5a27/plotting/scaleBar.m,
# written by <NAME> and <NAME>. We want a round, not too high
# number without decimals
good_values = np.array(
[1, 2, 5, 10, 15, 20, 25, 50, 75, 100, 125, 150, 200, 500, 750], dtype=int,
)
# Find good data closest to initial scalebar width
difference = abs(scalebar_width - good_values)
good_value_idx = np.where(difference == difference.min())[0][0]
scalebar_width = good_values[good_value_idx]
# Scale width by factor from above conversion (usually factor = 1.0)
scalebar_width = scalebar_width * factor
scalebar_width_px = scalebar_width / step_size
# Allow for a potential decimal in scalebar number if something didn't go as
# planned
if scalebar_width.is_integer():
scalebar_width = int(scalebar_width)
else:
warnings.warn(f"Scalebar width {scalebar_width} is not an integer.")
if scan_unit == "um":
scan_unit = "\u03BC" + "m"
# Set up arguments to AnchoredSizeBar() if not already present in kwargs
d = {
"loc": 3,
"pad": 0.2,
"sep": 3,
"frameon": True,
"borderpad": 0.5,
"size_vertical": scalebar_width_px / 12,
"fontproperties": fm.FontProperties(size=11),
}
[kwargs.setdefault(k, v) for k, v in d.items()]
alpha = kwargs.pop("alpha", 0.6)
# Create scalebar
bar = AnchoredSizeBar(
transform=self.axes.transData,
size=scalebar_width_px,
label=str(scalebar_width) + " " + scan_unit,
**kwargs,
)
bar.patch.set_alpha(alpha)
self.axes.add_artist(bar)
return bar
def add_overlay(self, crystal_map, item):
"""Use a crystal map property as gray scale values of a phase map.
The property's range is adjusted to [0, 1] for maximum contrast.
Parameters
----------
crystal_map : orix.crystal_map.CrystalMap
Crystal map object to obtain necessary data from.
item : str
Name of map property to scale phase array with. The property
range is adjusted for maximum contrast.
Examples
--------
>>> cm
Phase Orientations Name Symmetry Color
1 5657 (48.4%) austenite 432 tab:blue
2 6043 (51.6%) ferrite 432 tab:orange
Properties: iq, dp
Scan unit: um
Plot a phase map with a map property as overlay
>>> fig = plt.figure()
>>> ax = fig.add_subplot(projection="plot_map")
>>> im = ax.plot_map(cm)
>>> ax.add_overlay(cm, cm.dp)
"""
image = self.images[0]
image_data = image.get_array()
if image_data.ndim < 3:
# Adding overlay to a scalar plot (should this be allowed?)
image_data = image.to_rgba(image_data)[:, :, :3] # No alpha
# Scale prop to [0, 1] to maximize image contrast
overlay = crystal_map.get_map_data(item)
overlay_min = np.nanmin(overlay)
rescaled_overlay = (overlay - overlay_min) / (np.nanmax(overlay) - overlay_min)
n_channels = 3
for i in range(n_channels):
image_data[:, :, i] *= rescaled_overlay
image.set_data(image_data)
def add_colorbar(self, label=None, **kwargs):
"""Add an opinionated colorbar to the figure.
Parameters
----------
label : str, optional
Colorbar title, default is ``None``.
kwargs
Keyword arguments passed to
:meth:`mpl_toolkits.axes_grid1.make_axes_locatable.append_axes`.
Returns
-------
cbar : matplotlib.colorbar
Colorbar.
Examples
--------
>>> cm
Phase Orientations Name Symmetry Color
1 5657 (48.4%) austenite 432 tab:blue
2 6043 (51.6%) ferrite 432 tab:orange
Properties: iq, dp
Scan unit: um
Plot a map property and add a colorbar
>>> fig = plt.figure()
>>> ax = fig.add_subplot(projection="plot_map")
>>> im = ax.plot_map(cm, cm.dp, cmap="inferno")
>>> cbar = ax.add_colorbar("Dot product")
If the default options are not satisfactory, the colorbar can be
updated
>>> cbar.ax.set_ylabel(ylabel="dp", rotation=90)
"""
# Keyword arguments
d = {"position": "right", "size": "5%", "pad": 0.1}
[kwargs.setdefault(k, v) for k, v in d.items()]
# Add colorbar
divider = make_axes_locatable(self)
cax = divider.append_axes(**kwargs)
cbar = self.figure.colorbar(self.images[0], cax=cax)
# Set label with padding
cbar.ax.get_yaxis().labelpad = 15
cbar.ax.set_ylabel(label, rotation=270)
return cbar
def remove_padding(self):
"""Remove all white padding outside of the figure.
Examples
--------
>>> cm
Phase Orientations Name Symmetry Color
1 5657 (48.4%) austenite 432 tab:blue
2 6043 (51.6%) ferrite 432 tab:orange
Properties: iq, dp
Scan unit: um
Remove all figure and axes padding of a phase map
>>> fig = plt.figure()
>>> ax = fig.add_subplot(projection="plot_map")
>>> ax.plot_map(cm)
>>> ax.remove_padding()
"""
self.set_axis_off()
self.margins(0, 0)
# Tune subplot layout
cbar = self.images[0].colorbar
if cbar is not None:
right = self.figure.subplotpars.right
else:
right = 1
self.figure.subplots_adjust(top=1, bottom=0, right=right, left=0)
def _set_plot_shape(self, crystal_map, axes=None, depth=None):
"""Set `CrystalMapPlot` attributes describing which data axes to
plot.
Parameters
----------
crystal_map : orix.crystal_map.CrystalMap
Map to determine plotting axes and slices from.
axes : list of ints, optional
Data axes to plot. If ``None``, the last two data axes are
plotted (default).
depth : int, optional
Which data layer to plot along the final axis not in `axes` if
data is 3D. If ``None``, this is set to zero, i.e. the first
layer (default).
"""
ndim = crystal_map.ndim
# Get data axes to plot
if axes is None:
axes = [ndim - 2, ndim - 1]
axes = list(axes)
axes.sort()
self._data_axes = axes[:2] # Can only plot two axes!
if depth is None: # Plot first layer
depth = 0
# Get data slices to plot
slices = []
data_shape = []
for data_axis, axis_size in zip(
crystal_map._coordinate_axes.keys(), crystal_map._original_shape
):
data_slice = slice(depth, depth + 1, None)
for plot_axis in self._data_axes:
if data_axis == plot_axis:
data_slice = slice(None, None, None)
data_shape.append(axis_size)
slices.append(data_slice)
self._data_slices = tuple(slices)
self._data_shape = tuple(data_shape)
def _add_legend(self, patches, **kwargs):
"""Add a legend to the axes object.
Parameters
----------
patches : list of matplotlib.patches.Patch
Patches with color code and name.
kwargs
Keyword arguments passed to :meth:`matplotlib.axes.legend`.
"""
d = {
"borderpad": 0.3,
"handlelength": 0.75,
"handletextpad": 0.3,
"framealpha": 0.6,
"prop": fm.FontProperties(size=11),
}
[kwargs.setdefault(k, v) for k, v in d.items()]
self.legend(handles=patches, **kwargs)
def _override_status_bar(self, image, crystal_map):
"""Display coordinates, a property value (if scalar values are
plotted), and Euler angles (in radians) per data point in the
status bar.
This is done by overriding
:meth:`matplotlib.images.AxesImage.get_cursor_data`,
:meth:`matplotlib.images.AxesImage.format_cursor_data` and
:meth:`matplotlib.axes.Axes.format_coord`.
Parameters
----------
image : matplotlib.images.AxesImage
Image object.
crystal_map : orix.crystal_map.CrystalMap
Crystal map object to obtain necessary data from.
Returns
-------
image : matplotlib.images.AxesImage
Image object where the above mentioned methods are overridden.
"""
# Get data shape to plot
n_rows, n_cols = self._data_shape
# Get rotations, ensuring correct masking
# TODO: Show orientations in Euler angles (computationally
# intensive...)
r = crystal_map.get_map_data("rotations", decimals=3)
r = r[self._data_slices].squeeze()
# Get image data, overwriting potentially masked regions set to 0.0
image_data = image.get_array() # numpy.masked.MaskedArray
image_data[image_data.mask] = np.nan
def status_bar_data(event):
col = int(event.xdata + 0.5)
row = int(event.ydata + 0.5)
return row, col, r[row, col], image_data[row, col]
# Set width of status bar fields
x_width = len(str(n_cols - 1))
y_width = len(str(n_rows - 1))
scalar_width = len(str(np.nanmax(image_data)))
# Override
image.get_cursor_data = status_bar_data
self.axes.format_coord = lambda x, y: ""
def format_status_bar_data_rgb(data):
"""Status bar format for RGB plots."""
return (
f"(y,x):({data[0]:{y_width}},{data[1]:{x_width}})"
f" rot:({data[2][0]:5},{data[2][1]:5},{data[2][2]:5})"
)
def format_status_bar_data_scalar(data):
"""Status bar format for scalar plots."""
return (
f"(y,x):({data[0]:{y_width}},{data[1]:{x_width}})"
f" val:{data[3]:{scalar_width}}"
f" rot:({data[2][0]:5},{data[2][1]:5},{data[2][2]:5})"
)
# Pick status bar format and override this as well
if image_data.ndim > 2 and image_data.shape[-1] == 3:
image.format_cursor_data = format_status_bar_data_rgb
else:
image.format_cursor_data = format_status_bar_data_scalar
return image
register_projection(CrystalMapPlot)
def convert_unit(value, unit):
"""Return the data with a suitable, not too large, unit.
This algorithm is taken directly from MTEX [Bachmann2010]_
https://github.com/mtex-toolbox/mtex/blob/a74545383160610796b9525eedf50a241800ffae/plotting/plotting_tools/switchUnit.m.
Parameters
----------
value : float
The data to convert.
unit : str
The data unit, e.g. um. If `px` is passed, `um` is assumed.
Returns
-------
new_value : float
The input data converted to the suitable unit.
new_unit : str
A (possibly) more suitable unit than the input.
factor : float
Factor to multiple `new_value` with to get the input data.
Examples
--------
>>> convert_unit(17.55 * 1e3, 'nm')
17.55 um 999.9999999999999
>>> convert_unit(17.55 * 1e-3, 'mm')
17.55 um 0.001
"""
unit_is_px = False
if unit == "px":
unit = "um"
unit_is_px = True
# Create lookup-table with units and power
lookup_table = []
letters = "yzafpnum kMGTPEZY"
new_unit_idx = None
for i, letter in enumerate(letters):
# Ensure 'm' is entered correctly
current_unit = (letter + "m").strip(" ")
lookup_table.append((current_unit, 10 ** (3 * i - 24)))
if unit == current_unit:
new_unit_idx = i
# Find the lookup-table index of the most suitable unit
value_in_metres = value * lookup_table[new_unit_idx][1]
power_of_value = np.floor(np.log10(value_in_metres))
suitable_unit_idx = int(np.floor(power_of_value / 3) + 8)
# Calculate new data, unit and the conversion factor
new_value = value_in_metres / lookup_table[suitable_unit_idx][1]
new_unit = lookup_table[suitable_unit_idx][0]
factor = lookup_table[suitable_unit_idx][1] / lookup_table[new_unit_idx][1]
if unit_is_px:
new_unit = "px"
return new_value, new_unit, factor
```
#### File: orix/plot/rotation_plot.py
```python
from matplotlib import projections
from mpl_toolkits.mplot3d import Axes3D
from orix.vector.neo_euler import Rodrigues, AxAngle
class RotationPlot(Axes3D):
name = None
transformation_class = None
def transform(self, xs):
from orix.quaternion.rotation import Rotation
if isinstance(xs, Rotation):
transformed = self.transformation_class.from_rotation(xs.get_plot_data())
else:
transformed = self.transformation_class(xs)
x, y, z = transformed.xyz
return x, y, z
def scatter(self, xs, **kwargs):
x, y, z = self.transform(xs)
return super().scatter(x, y, z, **kwargs)
def plot(self, xs, **kwargs):
x, y, z = self.transform(xs)
return super().plot(x, y, z, **kwargs)
def plot_wireframe(self, xs, **kwargs):
x, y, z = self.transform(xs)
return super().plot_wireframe(x, y, z, **kwargs)
class RodriguesPlot(RotationPlot):
"""Plot rotations in a Rodrigues-Frank projection."""
name = "rodrigues"
transformation_class = Rodrigues
class AxAnglePlot(RotationPlot):
"""Plot rotations in an Axes-Angle projection."""
name = "axangle"
transformation_class = AxAngle
projections.register_projection(RodriguesPlot)
projections.register_projection(AxAnglePlot)
```
#### File: orix/quaternion/orientation_region.py
```python
import itertools
import numpy as np
from orix.quaternion import Quaternion
from orix.quaternion.rotation import Rotation
from orix.quaternion.symmetry import C1, get_distinguished_points
from orix.vector.neo_euler import Rodrigues, AxAngle
EPSILON = 1e-9 # small number to avoid round off problems
def _get_large_cell_normals(s1, s2):
dp = get_distinguished_points(s1, s2)
normals = Rodrigues.zero(dp.shape + (2,))
planes1 = dp.axis * np.tan(dp.angle.data / 4)
planes2 = -dp.axis * np.tan(dp.angle.data / 4) ** -1
planes2.data[np.isnan(planes2.data)] = 0
normals[:, 0] = planes1
normals[:, 1] = planes2
normals: Rotation = Rotation.from_neo_euler(normals).flatten().unique(
antipodal=False
)
if not normals.size:
return normals
_, inv = normals.axis.unique(return_inverse=True)
axes_unique = []
angles_unique = []
for i in np.unique(inv):
n = normals[inv == i]
axes_unique.append(n.axis.data[0])
angles_unique.append(n.angle.data.max())
normals = Rotation.from_neo_euler(
AxAngle.from_axes_angles(np.array(axes_unique), angles_unique)
)
return normals
def get_proper_groups(Gl, Gr):
"""Return the appropriate groups for the asymmetric domain calculation.
Parameters
----------
Gl, Gr : Symmetry
Returns
-------
Gl, Gr : Symmetry
The proper subgroup(s) or proper inversion subgroup(s) as appropriate.
Raises
------
NotImplementedError
If both groups are improper and neither contain an inversion, special
consideration is needed which is not yet implemented in orix.
"""
if Gl.is_proper and Gr.is_proper:
return Gl, Gr
elif Gl.is_proper and not Gr.is_proper:
return Gl, Gr.proper_subgroup
elif not Gl.is_proper and Gr.is_proper:
return Gl.proper_subgroup, Gr
else:
if Gl.contains_inversion and Gr.contains_inversion:
return Gl.proper_subgroup, Gr.proper_subgroup
elif Gl.contains_inversion and not Gr.contains_inversion:
return Gl.proper_subgroup, Gr.laue_proper_subgroup
elif not Gl.contains_inversion and Gr.contains_inversion:
return Gl.laue_proper_subgroup, Gr.proper_subgroup
else:
raise NotImplementedError(
"Both groups are improper, " "and do not contain inversion."
)
class OrientationRegion(Rotation):
"""A set of :class:`~orix.quaternion.rotation.Rotation` which are the
normals of an orientation region.
"""
@classmethod
def from_symmetry(cls, s1, s2=C1):
"""The set of unique (mis)orientations of a symmetrical object.
Parameters
----------
s1, s2 : Symmetry
"""
s1, s2 = get_proper_groups(s1, s2)
large_cell_normals = _get_large_cell_normals(s1, s2)
disjoint = s1 & s2
fundamental_sector = disjoint.fundamental_sector()
fundamental_sector_normals = Rotation.from_neo_euler(
AxAngle.from_axes_angles(fundamental_sector, np.pi)
)
normals = Rotation(
np.concatenate([large_cell_normals.data, fundamental_sector_normals.data])
)
orientation_region = cls(normals)
vertices = orientation_region.vertices()
if vertices.size:
orientation_region = orientation_region[
np.any(
np.isclose(orientation_region.dot_outer(vertices).data, 0), axis=1
)
]
return orientation_region
def vertices(self):
"""The vertices of the asymmetric domain.
Returns
-------
Rotation
"""
normal_combinations = list(itertools.combinations(self, 3))
if len(normal_combinations) < 1:
return Rotation.empty()
c1, c2, c3 = zip(*normal_combinations)
c1, c2, c3 = (
Rotation.stack(c1).flatten(),
Rotation.stack(c2).flatten(),
Rotation.stack(c3).flatten(),
)
v = Rotation.triple_cross(c1, c2, c3)
v = v[~np.any(np.isnan(v.data), axis=-1)]
v = v[v < self].unique()
surface = np.any(np.isclose(v.dot_outer(self).data, 0), axis=1)
return v[surface]
def faces(self):
normals = Rotation(self)
vertices = self.vertices()
faces = []
for n in normals:
faces.append(vertices[np.isclose(vertices.dot(n).data, 0)])
faces = [f for f in faces if f.size > 2]
return faces
def __gt__(self, other):
"""Overridden greater than method. Applying this to an Orientation
will return only orientations those that lie within the OrientationRegion
"""
c = Quaternion(self).dot_outer(Quaternion(other)).data
inside = np.logical_or(
np.all(np.greater_equal(c, -EPSILON), axis=0),
np.all(np.less_equal(c, +EPSILON), axis=0),
)
return inside
def get_plot_data(self):
from orix.vector import Vector3d
theta = np.linspace(0, 2 * np.pi - EPSILON, 361)
rho = np.linspace(0, np.pi - EPSILON, 181)
theta, rho = np.meshgrid(theta, rho)
g = Vector3d.from_polar(rho, theta)
n = Rodrigues.from_rotation(self).norm.data[:, np.newaxis, np.newaxis]
if n.size == 0:
return Rotation.from_neo_euler(AxAngle.from_axes_angles(g, np.pi))
d = (-self.axis).dot_outer(g.unit).data
x = n * d
x = 2 * np.arctan(x ** -1)
x[x < 0] = np.pi
x = np.min(x, axis=0)
r = Rotation.from_neo_euler(AxAngle.from_axes_angles(g.unit, x))
return r
```
#### File: orix/tests/test_io.py
```python
from contextlib import contextmanager
from collections import OrderedDict
from io import StringIO
from numbers import Number
import os
import sys
from diffpy.structure import Lattice, Structure
from diffpy.structure.spacegroups import GetSpaceGroup
from h5py import File
import pytest
import numpy as np
from orix import __version__ as orix_version
from orix.crystal_map import CrystalMap, Phase, PhaseList
from orix.io import (
load,
save,
loadang,
loadctf,
_plugin_from_footprints,
_overwrite_or_not,
)
from orix.io.plugins.ang import (
_get_header,
_get_phases_from_header,
_get_vendor_columns,
)
from orix.io.plugins.orix_hdf5 import (
hdf5group2dict,
dict2crystalmap,
dict2phaselist,
dict2phase,
dict2structure,
dict2lattice,
dict2atom,
dict2hdf5group,
crystalmap2dict,
phaselist2dict,
phase2dict,
structure2dict,
lattice2dict,
atom2dict,
)
from orix.io.plugins import ang, emsoft_h5ebsd, orix_hdf5
from orix.quaternion.rotation import Rotation
from orix.tests.conftest import (
ANGFILE_TSL_HEADER,
ANGFILE_ASTAR_HEADER,
ANGFILE_EMSOFT_HEADER,
)
plugin_list = [ang, emsoft_h5ebsd, orix_hdf5]
@contextmanager
def replace_stdin(target):
orig = sys.stdin
sys.stdin = target
yield
sys.stdin = orig
def assert_dictionaries_are_equal(input_dict, output_dict):
for key in output_dict.keys():
output_value = output_dict[key]
input_value = input_dict[key]
if isinstance(output_value, (dict, OrderedDict)):
assert_dictionaries_are_equal(input_value, output_value)
else:
if isinstance(output_value, (np.ndarray, Number)):
assert np.allclose(input_value, output_value)
elif isinstance(output_value, Rotation):
assert np.allclose(input_value.to_euler(), output_value.to_euler())
elif isinstance(output_value, Phase):
assert_dictionaries_are_equal(
input_value.__dict__, output_value.__dict__
)
elif isinstance(output_value, PhaseList):
assert_dictionaries_are_equal(input_value._dict, output_value._dict)
elif isinstance(output_value, Structure):
assert np.allclose(output_value.xyz, input_value.xyz)
assert str(output_value.element) == str(input_value.element)
assert np.allclose(output_value.occupancy, input_value.occupancy)
else:
assert input_value == output_value
class TestGeneralIO:
def test_load_no_filename_match(self):
fname = "what_is_hip.ang"
with pytest.raises(IOError, match=f"No filename matches '{fname}'."):
_ = load(fname)
@pytest.mark.parametrize("temp_file_path", ["ctf"], indirect=["temp_file_path"])
def test_load_unsupported_format(self, temp_file_path):
np.savetxt(temp_file_path, X=np.random.rand(100, 8))
with pytest.raises(IOError, match=f"Could not read "):
_ = load(temp_file_path)
@pytest.mark.parametrize(
"top_group, expected_plugin",
[("Scan 1", emsoft_h5ebsd), ("crystal_map", orix_hdf5), ("Scan 2", None)],
)
def test_plugin_from_footprints(self, temp_file_path, top_group, expected_plugin):
with File(temp_file_path, mode="w") as f:
f.create_group(top_group)
assert (
_plugin_from_footprints(
temp_file_path, plugins=[emsoft_h5ebsd, orix_hdf5]
)
is expected_plugin
)
def test_overwrite_or_not(self, crystal_map, temp_file_path):
save(temp_file_path, crystal_map)
with pytest.warns(UserWarning, match="Not overwriting, since your terminal "):
_overwrite_or_not(temp_file_path)
@pytest.mark.parametrize(
"answer, expected", [("y", True), ("n", False), ("m", None)]
)
def test_overwrite_or_not_input(
self, crystal_map, temp_file_path, answer, expected
):
save(temp_file_path, crystal_map)
if answer == "m":
with replace_stdin(StringIO(answer)):
with pytest.raises(EOFError):
_overwrite_or_not(temp_file_path)
else:
with replace_stdin(StringIO(answer)):
assert _overwrite_or_not(temp_file_path) is expected
@pytest.mark.parametrize("temp_file_path", ["angs", "hdf4", "h6"])
def test_save_unsupported_raises(self, temp_file_path, crystal_map):
_, ext = os.path.splitext(temp_file_path)
with pytest.raises(IOError, match=f"'{ext}' does not correspond to any "):
save(temp_file_path, crystal_map)
def test_save_overwrite_raises(self, temp_file_path, crystal_map):
with pytest.raises(ValueError, match="`overwrite` parameter can only be "):
save(temp_file_path, crystal_map, overwrite=1)
@pytest.mark.parametrize(
"overwrite, expected_phase_name", [(True, "hepp"), (False, "")]
)
def test_save_overwrite(
self, temp_file_path, crystal_map, overwrite, expected_phase_name
):
assert crystal_map.phases[0].name == ""
save(temp_file_path, crystal_map)
assert os.path.isfile(temp_file_path) is True
crystal_map.phases[0].name = "hepp"
save(temp_file_path, crystal_map, overwrite=overwrite)
crystal_map2 = load(temp_file_path)
assert crystal_map2.phases[0].name == expected_phase_name
@pytest.mark.parametrize(
"angfile_astar, expected_data",
[
(
(
(2, 5),
(1, 1),
np.ones(2 * 5, dtype=int),
np.array(
[
[4.485496, 0.952426, 0.791507],
[1.343904, 0.276111, 0.825890],
[1.343904, 0.276111, 0.825890],
[1.343904, 0.276111, 0.825890],
[4.555309, 2.895152, 3.972020],
[1.361357, 0.276111, 0.825890],
[4.485496, 0.220784, 0.810182],
[0.959931, 2.369110, 4.058938],
[0.959931, 2.369110, 4.058938],
[4.485496, 0.220784, 0.810182],
],
),
),
np.array(
[
[0.77861956, -0.12501022, 0.44104243, 0.42849224],
[0.46256046, -0.13302712, -0.03524667, -0.87584204],
[0.46256046, -0.13302712, -0.03524667, -0.87584204],
[0.46256046, -0.13302712, -0.03524667, -0.87584204],
[0.05331986, 0.95051048, 0.28534763, -0.11074093],
[0.45489991, -0.13271448, -0.03640618, -0.87984517],
[0.8752001, -0.02905178, 0.10626836, 0.47104969],
[0.3039118, 0.01972273, -0.92612154, 0.22259272],
[0.3039118, 0.01972273, -0.92612154, 0.22259272],
[0.8752001, -0.02905178, 0.10626836, 0.47104969],
]
),
),
],
indirect=["angfile_astar"],
)
def test_loadang(angfile_astar, expected_data):
loaded_data = loadang(angfile_astar)
assert np.allclose(loaded_data.data, expected_data)
def test_loadctf():
""" Crude test of the ctf loader """
z = np.random.rand(100, 8)
fname = "temp.ctf"
np.savetxt(fname, z)
_ = loadctf(fname)
os.remove(fname)
class TestAngPlugin:
@pytest.mark.parametrize(
"angfile_tsl, map_shape, step_sizes, phase_id, n_unknown_columns, example_rot",
[
(
# Read by angfile_tsl() via request.param (passed via `indirect` below)
(
(5, 3), # map_shape
(0.1, 0.1), # step_sizes
np.zeros(5 * 3, dtype=int), # phase_id
5, # n_unknown_columns
np.array(
[[1.59942, 2.37748, 4.53419], [1.59331, 2.37417, 4.53628]]
), # rotations as rows of Euler angle triplets
),
(5, 3),
(0.1, 0.1),
np.zeros(5 * 3, dtype=int),
5,
np.array(
[[1.59942, 2.37748, -1.74690], [1.59331, 2.37417, -1.74899]]
), # rotations as rows of Euler angle triplets
),
(
(
(8, 4), # map_shape
(1.5, 1.5), # step_sizes
np.zeros(8 * 4, dtype=int), # phase_id
5, # n_unknown_columns
np.array(
[[5.81107, 2.34188, 4.47345], [6.16205, 0.79936, 1.31702]]
), # rotations as rows of Euler angle triplets
),
(8, 4),
(1.5, 1.5),
np.zeros(8 * 4, dtype=int),
5,
np.array(
[[-0.12113, 2.34188, 1.31702], [-0.47211, 0.79936, -1.80973]]
), # rotations as rows of Euler angle triplets
),
],
indirect=["angfile_tsl"],
)
def test_load_ang_tsl(
self,
angfile_tsl,
map_shape,
step_sizes,
phase_id,
n_unknown_columns,
example_rot,
):
cm = load(angfile_tsl)
# Fraction of non-indexed points
non_indexed_fraction = int(np.prod(map_shape) * 0.1)
assert non_indexed_fraction == np.sum(~cm.is_indexed)
# Properties
assert list(cm.prop.keys()) == [
"iq",
"ci",
"unknown1",
"fit",
"unknown2",
"unknown3",
"unknown4",
"unknown5",
]
# Coordinates
ny, nx = map_shape
dy, dx = step_sizes
assert np.allclose(cm.x, np.tile(np.arange(nx) * dx, ny))
assert np.allclose(cm.y, np.sort(np.tile(np.arange(ny) * dy, nx)))
# Map shape and size
assert cm.shape == map_shape
assert cm.size == np.prod(map_shape)
# Attributes are within expected ranges or have a certain value
assert cm.prop["ci"].max() <= 1
assert cm["indexed"].fit.max() <= 3
assert all(cm["not_indexed"].fit == 180)
assert all(cm["not_indexed"].ci == -1)
# Phase IDs (accounting for non-indexed points)
phase_id[cm["not_indexed"].id] = -1
assert np.allclose(cm.phase_id, phase_id)
# Rotations
rot_unique = np.unique(cm["indexed"].rotations.to_euler(), axis=0)
assert np.allclose(
np.sort(rot_unique, axis=0), np.sort(example_rot, axis=0), atol=1e-5
)
assert np.allclose(
cm["not_indexed"].rotations.to_euler()[0],
np.array([np.pi, 0, np.pi]),
atol=1e-5,
)
# Phases
assert cm.phases.size == 2 # Including non-indexed
assert cm.phases.ids == [-1, 0]
phase = cm.phases[0]
assert phase.name == "Aluminum"
assert phase.point_group.name == "432"
@pytest.mark.parametrize(
"angfile_astar, map_shape, step_sizes, phase_id, example_rot",
[
(
# Read by angfile_astar() via request.param (passed via `indirect`
# below)
(
(9, 3), # map_shape
(4.5, 4.5), # step_sizes
np.ones(9 * 3, dtype=int), # phase_id
np.array(
[
[1.895079, 0.739496, 1.413542],
[1.897871, 0.742638, 1.413717],
]
),
),
(9, 3),
(4.5, 4.5),
np.ones(9 * 3, dtype=int),
np.array(
[[1.895079, 0.739496, 1.413542], [1.897871, 0.742638, 1.413717]]
),
),
(
(
(11, 13), # map_shape
(10, 10), # step_sizes
np.ones(11 * 13, dtype=int), # phase_id
np.array(
[
[1.621760, 2.368935, 4.559324],
[1.604481, 2.367539, 4.541870],
]
),
),
(11, 13),
(10, 10),
np.ones(11 * 13, dtype=int),
np.array(
[[1.621760, 2.368935, -1.723861], [1.604481, 2.367539, -1.741315]]
),
),
],
indirect=["angfile_astar"],
)
def test_load_ang_astar(
self, angfile_astar, map_shape, step_sizes, phase_id, example_rot,
):
cm = load(angfile_astar)
# Properties
assert list(cm.prop.keys()) == ["ind", "rel", "relx100"]
# Coordinates
ny, nx = map_shape
dy, dx = step_sizes
assert np.allclose(cm.x, np.tile(np.arange(nx) * dx, ny))
assert np.allclose(cm.y, np.sort(np.tile(np.arange(ny) * dy, nx)))
# Map shape and size
assert cm.shape == map_shape
assert cm.size == np.prod(map_shape)
# Attributes are within expected ranges or have a certain value
assert cm.prop["ind"].max() <= 100
assert cm.prop["rel"].max() <= 1
assert cm.prop["relx100"].max() <= 100
relx100 = (cm.prop["rel"] * 100).astype(int)
assert np.allclose(cm.prop["relx100"], relx100)
# Phase IDs
assert np.allclose(cm.phase_id, phase_id)
# Rotations
rot_unique = np.unique(cm.rotations.to_euler(), axis=0)
assert np.allclose(
np.sort(rot_unique, axis=0), np.sort(example_rot, axis=0), atol=1e-6
)
# Phases
assert cm.phases.size == 1
assert cm.phases.ids == [1]
phase = cm.phases[1]
assert phase.name == "Nickel"
assert phase.point_group.name == "432"
@pytest.mark.parametrize(
"angfile_emsoft, map_shape, step_sizes, phase_id, example_rot",
[
(
# Read by angfile_emsoft() via request.param (passed via `indirect`
# below)
(
(10, 11), # map_shape
(4.5, 4.5), # step_sizes
np.concatenate(
(
np.ones(int(np.ceil((10 * 11) / 2))),
np.ones(int(np.floor((10 * 11) / 2))) * 2,
)
), # phase_id
np.array(
[
[1.895079, 0.739496, 1.413542],
[1.897871, 0.742638, 1.413717],
]
),
),
(10, 11),
(4.5, 4.5),
np.concatenate(
(
np.ones(int(np.ceil((10 * 11) / 2))),
np.ones(int(np.floor((10 * 11) / 2))) * 2,
)
),
np.array(
[[1.895079, 0.739496, 1.413542], [1.897871, 0.742638, 1.413717]]
),
),
(
(
(3, 6), # map_shape
(10, 10), # step_sizes
np.concatenate(
(
np.ones(int(np.ceil((3 * 6) / 2))),
np.ones(int(np.floor((3 * 6) / 2))) * 2,
)
), # phase_id
np.array(
[[1.62176, 2.36894, -1.72386], [1.60448, 2.36754, -1.72386]]
),
),
(3, 6),
(10, 10),
np.concatenate(
(
np.ones(int(np.ceil((3 * 6) / 2))),
np.ones(int(np.floor((3 * 6) / 2))) * 2,
)
),
np.array([[1.62176, 2.36894, -1.72386], [1.60448, 2.36754, -1.72386]]),
),
],
indirect=["angfile_emsoft"],
)
def test_load_ang_emsoft(
self, angfile_emsoft, map_shape, step_sizes, phase_id, example_rot,
):
cm = load(angfile_emsoft)
# Properties
assert list(cm.prop.keys()) == ["iq", "dp"]
# Coordinates
ny, nx = map_shape
dy, dx = step_sizes
assert np.allclose(cm.x, np.tile(np.arange(nx) * dx, ny))
assert np.allclose(cm.y, np.sort(np.tile(np.arange(ny) * dy, nx)))
# Map shape and size
assert cm.shape == map_shape
assert cm.size == np.prod(map_shape)
# Attributes are within expected ranges or have a certain value
assert cm.prop["iq"].max() <= 100
assert cm.prop["dp"].max() <= 1
# Phase IDs
assert np.allclose(cm.phase_id, phase_id)
# Rotations
rot_unique = np.unique(cm.rotations.to_euler(), axis=0)
assert np.allclose(
np.sort(rot_unique, axis=0), np.sort(example_rot, axis=0), atol=1e-5
)
# Phases (change if file header is changed!)
phases_in_data = cm["indexed"].phases_in_data
assert phases_in_data.size == 2
assert phases_in_data.ids == [1, 2]
assert phases_in_data.names == ["austenite", "ferrite"]
assert [i.name for i in phases_in_data.point_groups] == ["432"] * 2
def test_get_header(self, temp_ang_file):
temp_ang_file.write(ANGFILE_ASTAR_HEADER)
temp_ang_file.close()
assert _get_header(open(temp_ang_file.name)) == [
"# File created from ACOM RES results",
"# ni-dislocations.res",
"# ".rstrip(),
"# ".rstrip(),
"# MaterialName Nickel",
"# Formula",
"# Symmetry 43",
"# LatticeConstants 3.520 3.520 3.520 90.000 90.000 90.000",
"# NumberFamilies 4",
"# hklFamilies 1 1 1 1 0.000000",
"# hklFamilies 2 0 0 1 0.000000",
"# hklFamilies 2 2 0 1 0.000000",
"# hklFamilies 3 1 1 1 0.000000",
"#",
"# GRID: SqrGrid#",
]
@pytest.mark.parametrize(
"expected_vendor, expected_columns, vendor_header",
[
(
"tsl",
[
"iq",
"ci",
"phase_id",
"unknown1",
"fit",
"unknown2",
"unknown3",
"unknown4",
"unknown5",
],
ANGFILE_TSL_HEADER,
),
("astar", ["ind", "rel", "phase_id", "relx100"], ANGFILE_ASTAR_HEADER),
("emsoft", ["iq", "dp", "phase_id"], ANGFILE_EMSOFT_HEADER),
],
)
def test_get_vendor_columns(
self, expected_vendor, expected_columns, vendor_header, temp_ang_file
):
expected_columns = ["euler1", "euler2", "euler3", "x", "y"] + expected_columns
n_cols_file = len(expected_columns)
temp_ang_file.write(vendor_header)
temp_ang_file.close()
header = _get_header(open(temp_ang_file.name))
vendor, column_names = _get_vendor_columns(header, n_cols_file)
assert vendor == expected_vendor
assert column_names == expected_columns
@pytest.mark.parametrize("n_cols_file", [15, 20])
def test_get_vendor_columns_unknown(self, temp_ang_file, n_cols_file):
temp_ang_file.write("Look at me!\nI'm Mr. .ang file!\n")
temp_ang_file.close()
header = _get_header(open(temp_ang_file.name))
with pytest.warns(UserWarning, match=f"Number of columns, {n_cols_file}, "):
vendor, column_names = _get_vendor_columns(header, n_cols_file)
assert vendor == "unknown"
expected_columns = [
"euler1",
"euler2",
"euler3",
"x",
"y",
"unknown1",
"unknown2",
"phase_id",
] + ["unknown" + str(i + 3) for i in range(n_cols_file - 8)]
assert column_names == expected_columns
@pytest.mark.parametrize(
"header_phase_part, expected_names, expected_point_groups, "
"expected_lattice_constants",
[
(
[
[
"# MaterialName Nickel",
"# Formula",
"# Symmetry 43",
"# LatticeConstants 3.520 3.520 3.520 90.000 90.000 "
"90.000",
],
[
"# MaterialName Aluminium",
"# Formula Al",
"# Symmetry m3m",
"# LatticeConstants 3.520 3.520 3.520 90.000 90.000 "
"90.000",
],
],
["Nickel", "Aluminium"],
["43", "m3m"],
[[3.52, 3.52, 3.52, 90, 90, 90], [3.52, 3.52, 3.52, 90, 90, 90]],
),
],
)
def test_get_phases_from_header(
self,
header_phase_part,
expected_names,
expected_point_groups,
expected_lattice_constants,
):
# Create header from parts
header = [
"# File created from ACOM RES results",
"# ni-dislocations.res",
"# ",
"# ",
]
hkl_families = [
"# NumberFamilies 4",
"# hklFamilies 1 1 1 1 0.000000",
"# hklFamilies 2 0 0 1 0.000000",
"# hklFamilies 2 2 0 1 0.000000",
"# hklFamilies 3 1 1 1 0.000000",
]
for phase in header_phase_part:
header += phase + hkl_families
header += [
"#",
"# GRID: SqrGrid#",
]
names, point_groups, lattice_constants = _get_phases_from_header(header)
assert names == expected_names
assert point_groups == expected_point_groups
assert np.allclose(lattice_constants, expected_lattice_constants)
class TestEMsoftPlugin:
@pytest.mark.parametrize(
(
"temp_emsoft_h5ebsd_file, map_shape, step_sizes, example_rot, "
"n_top_matches, refined"
),
[
(
(
(7, 3), # map_shape
(1.5, 1.5), # step_sizes
np.array(
[
[6.148271, 0.792205, 1.324879],
[6.155951, 0.793078, 1.325229],
]
), # rotations as rows of Euler angle triplets
50, # n_top_matches
True, # refined
),
(7, 3),
(1.5, 1.5),
np.array(
[[6.148271, 0.792205, 1.324879], [6.155951, 0.793078, 1.325229],]
),
50,
True,
),
(
(
(5, 17),
(0.5, 0.5),
np.array(
[
[6.148271, 0.792205, 1.324879],
[6.155951, 0.793078, 1.325229],
]
),
20,
False,
),
(5, 17),
(0.5, 0.5),
np.array(
[[6.148271, 0.792205, 1.324879], [6.155951, 0.793078, 1.325229],]
),
20,
False,
),
],
indirect=["temp_emsoft_h5ebsd_file"],
)
def test_load_emsoft(
self,
temp_emsoft_h5ebsd_file,
map_shape,
step_sizes,
example_rot,
n_top_matches,
refined,
):
cm = load(temp_emsoft_h5ebsd_file.filename, refined=refined)
assert cm.shape == map_shape
assert (cm.dy, cm.dx) == step_sizes
if refined:
n_top_matches = 1
assert cm.rotations_per_point == n_top_matches
# Properties
expected_props = [
"AvDotProductMap",
"CI",
"CIMap",
"IQ",
"IQMap",
"ISM",
"ISMap",
"KAM",
"OSM",
"TopDotProductList",
"TopMatchIndices",
]
if refined:
expected_props += ["RefinedDotProducts"]
actual_props = list(cm.prop.keys())
actual_props.sort()
expected_props.sort()
assert actual_props == expected_props
assert cm.phases["austenite"].structure == Structure(
title="austenite",
lattice=Lattice(a=3.595, b=3.595, c=3.595, alpha=90, beta=90, gamma=90),
)
class TestOrixHDF5Plugin:
def test_file_writer(self, crystal_map, temp_file_path):
save(filename=temp_file_path, object2write=crystal_map)
with File(temp_file_path, mode="r") as f:
assert f["manufacturer"][()][0].decode() == "orix"
assert f["version"][()][0].decode() == orix_version
@pytest.mark.parametrize(
"crystal_map_input",
[
((4, 4, 3), (1, 1.5, 1.5), 1, [0, 1]),
((2, 4, 3), (1, 1.5, 1.5), 2, [0, 1, 2]),
],
indirect=["crystal_map_input"],
)
def test_write_read_masked(self, crystal_map_input, temp_file_path):
cm = CrystalMap(**crystal_map_input)
save(filename=temp_file_path, object2write=cm[cm.x > 2])
cm2 = load(temp_file_path)
assert cm2.size != cm.size
with pytest.raises(ValueError, match="operands could not be broadcast"):
_ = np.allclose(cm2.x, cm.x)
cm2.is_in_data = cm.is_in_data
assert cm2.size == cm.size
assert np.allclose(cm2.x, cm.x)
def test_file_writer_raises(self, temp_file_path, crystal_map):
with pytest.raises(OSError, match="Cannot write to the already open file "):
with File(temp_file_path, mode="w") as _:
save(temp_file_path, crystal_map, overwrite=True)
def test_dict2hdf5group(self, temp_file_path):
with File(temp_file_path, mode="w") as f:
group = f.create_group(name="a_group")
with pytest.warns(UserWarning, match="The orix HDF5 writer could not"):
dict2hdf5group(
dictionary={"a": [np.array(24.5)], "c": set()}, group=group
)
def test_crystalmap2dict(self, temp_file_path, crystal_map_input):
cm = CrystalMap(**crystal_map_input)
cm_dict = crystalmap2dict(cm)
this_dict = {"hello": "there"}
cm_dict2 = crystalmap2dict(cm, dictionary=this_dict)
cm_dict2.pop("hello")
assert_dictionaries_are_equal(cm_dict, cm_dict2)
assert np.allclose(cm_dict["data"]["x"], crystal_map_input["x"])
assert cm_dict["header"]["z_step"] == cm.dz
def test_phaselist2dict(self, phase_list):
pl_dict = phaselist2dict(phase_list)
this_dict = {"hello": "there"}
this_dict = phaselist2dict(phase_list, dictionary=this_dict)
this_dict.pop("hello")
assert_dictionaries_are_equal(pl_dict, this_dict)
def test_phase2dict(self, phase_list):
phase_dict = phase2dict(phase_list[0])
this_dict = {"hello": "there"}
this_dict = phase2dict(phase_list[0], dictionary=this_dict)
this_dict.pop("hello")
assert_dictionaries_are_equal(phase_dict, this_dict)
def test_phase2dict_spacegroup(self):
"""Space group is written to dict as an int or "None"."""
sg100 = 100
phase = Phase(space_group=sg100)
phase_dict1 = phase2dict(phase)
assert phase_dict1["space_group"] == sg100
sg200 = GetSpaceGroup(200)
phase.space_group = sg200
phase_dict2 = phase2dict(phase)
assert phase_dict2["space_group"] == sg200.number
phase.space_group = None
phase_dict3 = phase2dict(phase)
assert phase_dict3["space_group"] == "None"
def test_structure2dict(self, phase_list):
structure = phase_list[0].structure
structure_dict = structure2dict(structure)
this_dict = {"hello": "there"}
this_dict = structure2dict(structure, this_dict)
this_dict.pop("hello")
lattice1 = structure_dict["lattice"]
lattice2 = this_dict["lattice"]
assert np.allclose(lattice1["abcABG"], lattice2["abcABG"])
assert np.allclose(lattice1["baserot"], lattice2["baserot"])
assert_dictionaries_are_equal(structure_dict["atoms"], this_dict["atoms"])
def test_hdf5group2dict_update_dict(self, temp_file_path, crystal_map):
save(temp_file_path, crystal_map)
with File(temp_file_path, mode="r") as f:
this_dict = {"hello": "there"}
this_dict = hdf5group2dict(f["crystal_map"], dictionary=this_dict)
assert this_dict["hello"] == "there"
assert this_dict["data"] == f["crystal_map/data"]
assert this_dict["header"] == f["crystal_map/header"]
def test_file_reader(self, crystal_map, temp_file_path):
save(filename=temp_file_path, object2write=crystal_map)
cm2 = load(filename=temp_file_path)
assert_dictionaries_are_equal(crystal_map.__dict__, cm2.__dict__)
def test_dict2crystalmap(self, crystal_map):
cm2 = dict2crystalmap(crystalmap2dict(crystal_map))
assert_dictionaries_are_equal(crystal_map.__dict__, cm2.__dict__)
def test_dict2phaselist(self, phase_list):
phase_list2 = dict2phaselist(phaselist2dict(phase_list))
assert phase_list.size == phase_list2.size
assert phase_list.ids == phase_list2.ids
assert phase_list.names == phase_list2.names
assert phase_list.colors == phase_list2.colors
assert [
s1.name == s2.name
for s1, s2 in zip(phase_list.point_groups, phase_list2.point_groups)
]
def test_dict2phase(self, phase_list):
phase1 = phase_list[0]
phase2 = dict2phase(phase2dict(phase1))
assert phase1.name == phase2.name
assert phase1.color == phase2.color
assert phase1.space_group.number == phase2.space_group.number
assert phase1.point_group.name == phase2.point_group.name
assert phase1.structure.lattice.abcABG() == phase2.structure.lattice.abcABG()
def test_dict2phase_spacegroup(self):
"""Space group number int or None is properly parsed from a dict.
"""
phase1 = Phase(space_group=200)
phase_dict = phase2dict(phase1)
phase2 = dict2phase(phase_dict)
assert phase1.space_group.number == phase2.space_group.number
phase_dict.pop("space_group")
phase3 = dict2phase(phase_dict)
assert phase3.space_group is None
def test_dict2structure(self, phase_list):
structure1 = phase_list[0].structure
structure2 = dict2structure(structure2dict(structure1))
lattice1 = structure1.lattice
lattice2 = structure2.lattice
assert lattice1.abcABG() == lattice2.abcABG()
assert np.allclose(lattice1.baserot, lattice2.baserot)
assert str(structure1.element) == str(structure2.element)
assert np.allclose(structure1.xyz, structure2.xyz)
def test_dict2lattice(self, phase_list):
lattice = phase_list[0].structure.lattice
lattice2 = dict2lattice(lattice2dict(lattice))
assert lattice.abcABG() == lattice2.abcABG()
assert np.allclose(lattice.baserot, lattice2.baserot)
def test_dict2atom(self, phase_list):
atom = phase_list[0].structure[0]
atom2 = dict2atom(atom2dict(atom))
assert str(atom.element) == str(atom2.element)
assert np.allclose(atom.xyz, atom2.xyz)
def test_read_point_group_from_v0_3_x(self, temp_file_path, crystal_map):
crystal_map.phases[0].point_group = "1"
save(filename=temp_file_path, object2write=crystal_map)
# First, ensure point group data set name is named "symmetry", as in v0.3.0
with File(temp_file_path, mode="r+") as f:
for phase in f["crystal_map/header/phases"].values():
phase["symmetry"] = phase["point_group"]
del phase["point_group"]
# Then, make sure it can still be read
cm2 = load(filename=temp_file_path)
# And that the symmetry operations are the same, for good measure
print(crystal_map)
print(cm2)
assert np.allclose(
crystal_map.phases[0].point_group.data, cm2.phases[0].point_group.data
)
```
#### File: orix/tests/test_neoeuler.py
```python
import pytest
import numpy as np
from orix.vector.neo_euler import Rodrigues, Homochoric
from orix.quaternion.rotation import Rotation
""" Rodrigues """
@pytest.mark.parametrize(
"rotation, expected",
[
(Rotation([1, 0, 0, 0]), [0, 0, 0]),
(Rotation([0.9239, 0.2209, 0.2209, 0.2209]), [0.2391, 0.2391, 0.2391]),
],
)
def test_from_rotation(rotation, expected):
rodrigues = Rodrigues.from_rotation(rotation)
assert np.allclose(rodrigues.data, expected, atol=1e-4)
@pytest.mark.parametrize(
"rodrigues, expected", [(Rodrigues([0.2391, 0.2391, 0.2391]), np.pi / 4),]
)
def test_angle(rodrigues, expected):
angle = rodrigues.angle
assert np.allclose(angle.data, expected, atol=1e-3)
""" Homochoric"""
@pytest.mark.parametrize(
"rotation", [Rotation([1, 0, 0, 0]), Rotation([0.9239, 0.2209, 0.2209, 0.2209])]
)
def test_Homochoric_from_rotation(rotation):
h = Homochoric.from_rotation(rotation)
return None
@pytest.mark.parametrize(
"rotation", [Rotation([1, 0, 0, 0]), Rotation([0.9239, 0.2209, 0.2209, 0.2209])]
)
@pytest.mark.xfail(strict=True, reason=AttributeError)
def test_Homochoric_angle(rotation):
h = Homochoric.from_rotation(rotation)
h.angle
```
#### File: orix/tests/test_rotation.py
```python
from diffpy.structure.spacegroups import sg225
from math import cos, sin, tan, pi
import numpy as np
import pytest
from orix.quaternion import Quaternion
from orix.quaternion.rotation import Rotation
from orix.vector import Vector3d
rotations = [
(0.707, 0.0, 0.0, 0.707),
(0.5, -0.5, -0.5, 0.5),
(0.0, 0.0, 0.0, 1.0),
(1.0, 1.0, 1.0, 1.0),
((0.5, -0.5, -0.5, 0.5), (0.0, 0.0, 0.0, 1.0),),
Rotation([(2, 4, 6, 8), (-1, -2, -3, -4)]),
np.array((4, 3, 2, 1)),
]
quaternions = [
(0.881, 0.665, 0.123, 0.517),
(0.111, 0.222, 0.333, 0.444),
((1, 0, 0.5, 0), (3, 1, -1, -2),),
[
[[0.343, 0.343, 0, -0.333], [-7, -8, -9, -10],],
[[0.00001, -0.0001, 0.001, -0.01], [0, 0, 0, 0]],
],
]
vectors = [(1, 0, 0), (1, 1, 0), (0.7, 0.8, 0.9), [[1, 1, 1], [0.4, 0.5, -0.6],]]
@pytest.fixture(params=rotations)
def rotation(request):
return Rotation(request.param)
rotation_2 = rotation
@pytest.fixture(params=quaternions)
def quaternion(request):
return Quaternion(request.param)
@pytest.fixture(params=vectors)
def vector(request):
return Vector3d(request.param)
def test_init(rotation):
assert np.allclose(rotation.norm.data, 1)
assert rotation.improper.shape == rotation.shape
assert np.all(rotation.improper == False)
def test_slice(rotation):
r = rotation[0]
assert np.allclose(r.data, rotation.data[0])
assert r.improper.shape == r.shape
def test_unit(rotation):
assert isinstance(rotation.unit, Rotation)
assert np.allclose(rotation.unit.norm.data, 1)
@pytest.mark.parametrize(
"rotation, quaternion, expected",
[
([0.5, 0.5, 0.5, 0.5], [1, 0, 0, 0], [0.5, 0.5, 0.5, 0.5]),
(
[0.5, -0.5, -0.5, 0.5],
[0, cos(pi / 4), sin(pi / 4), 0],
[cos(pi / 4), 0, sin(pi / 4), 0],
),
(
[0.794743, 0.50765, -0.33156, 0.0272659],
[0.545394, 0.358915, 0.569472, 0.499427],
[0.426441, 0.380997, 0.0280051, 0.819881],
),
],
indirect=["rotation", "quaternion"],
)
def test_mul_quaternion(rotation, quaternion, expected):
r = rotation * quaternion
assert isinstance(r, Quaternion)
assert np.allclose(r.data, expected)
rotation.improper = 1
ri = rotation * quaternion
assert np.allclose(r.data, ri.data)
@pytest.mark.parametrize(
"r1, i1, r2, i2, expected, expected_i",
[
([0.5, 0.5, 0.5, 0.5], 0, [0.5, 0.5, 0.5, 0.5], 0, [-0.5, 0.5, 0.5, 0.5], 0),
([0.5, 0.5, 0.5, 0.5], 1, [0.5, 0.5, 0.5, 0.5], 0, [-0.5, 0.5, 0.5, 0.5], 1),
(
[0.285883, 0.726947, 0.611896, -0.124108],
0,
[-0.247817, -0.574353, 0.594154, 0.505654],
1,
[0.0458731, 0.0387992, -0.278082, 0.958677],
1,
),
(
[tan(pi / 6), 0, -tan(pi / 6), tan(pi / 6)],
1,
[0.5, -0.5, -0.5, 0.5],
1,
[-0.288675, -0.288675, -0.866025, 0.288675],
0,
),
],
)
def test_mul_rotation(r1, i1, r2, i2, expected, expected_i):
r1 = Rotation(r1)
r1.improper = i1
r2 = Rotation(r2)
r2.improper = i2
r = r1 * r2
assert isinstance(r, Rotation)
assert np.allclose(r.data, expected)
assert np.all(r.improper == expected_i)
@pytest.mark.parametrize(
"rotation, i, vector, expected",
[
([0.5, 0.5, 0.5, 0.5], 0, [1, 1, 0], [0, 1, 1]),
([0.5, 0.5, 0.5, 0.5], 1, [1, 1, 0], [0, -1, -1]),
(
[-0.172767, -0.346157, 0.664402, -0.63945],
0,
[0.237425, -0.813408, 0.531034],
[0.500697, -0.524764, 0.688422],
),
(
[-0.172767, -0.346157, 0.664402, -0.63945],
1,
[0.237425, -0.813408, 0.531034],
[-0.500697, 0.524764, -0.688422],
),
],
indirect=["rotation", "vector"],
)
def test_mul_vector(rotation, i, vector, expected):
rotation.improper = i
v = rotation * vector
assert isinstance(v, Vector3d)
assert np.allclose(v.data, expected)
@pytest.mark.parametrize(
"rotation, i, number, expected_i",
[
([0.5, 0.5, 0.5, 0.5], 0, 1, 0),
([0.5, 0.5, 0.5, 0.5], 1, 1, 1),
([0.5, 0.5, 0.5, 0.5], 1, -1, 0),
([[0, 1, 0, 0], [0, 0, 1, 0]], [0, 1], [-1, 1], [1, 1]),
([[0, 1, 0, 0], [0, 0, 1, 0]], [1, 0], [-1, 1], [0, 0]),
pytest.param([0.5, 0.5, 0.5, 0.5], 1, 2, 0, marks=pytest.mark.xfail),
pytest.param(
[0.545394, 0.358915, 0.569472, 0.499427], 0, -2, 0, marks=pytest.mark.xfail
),
],
indirect=["rotation"],
)
def test_mul_number(rotation, i, number, expected_i):
rotation.improper = i
r = rotation * number
assert np.allclose(rotation.data, r.data)
assert np.allclose(r.improper, expected_i)
@pytest.mark.xfail(strict=True, reason=TypeError)
def test_mul_failing(rotation):
_ = rotation * "cant-mult-by-this"
@pytest.mark.parametrize(
"rotation, i, expected_i",
[([0.5, 0.5, 0.5, 0.5], 0, 1), ([0.5, 0.5, 0.5, 0.5], 1, 0),],
indirect=["rotation"],
)
def test_neg(rotation, i, expected_i):
rotation.improper = i
r = -rotation
assert np.allclose(r.improper, expected_i)
""" these tests address .to_euler() and .from_euler()"""
@pytest.fixture()
def e():
e = np.random.rand(10, 3)
return e
def test_to_from_euler(e):
""" Checks that going euler2quat2euler gives no change """
r = Rotation.from_euler(e)
e2 = r.to_euler()
assert np.allclose(e.data, e2.data)
def test_direction_kwarg(e):
r = Rotation.from_euler(e, direction="lab2crystal")
def test_Krakow_Hielscher(e):
r = Rotation.from_euler(e, convention="Krakow_Hielscher")
@pytest.mark.xfail()
def test_direction_kwarg_dumb(e):
r = Rotation.from_euler(e, direction="dumb_direction")
@pytest.mark.xfail()
def test_unsupported_conv_to(e):
r = Rotation.from_euler(e)
r.to_euler(convention="unsupported")
@pytest.mark.xfail()
def test_unsupported_conv_from(e):
r = Rotation.from_euler(e, convention="unsupported")
def test_edge_cases_to_euler():
x = np.sqrt(1 / 2)
q = Rotation(np.asarray([x, 0, 0, x]))
e = q.to_euler()
q = Rotation(np.asarray([0, x, 0, 0]))
e = q.to_euler()
@pytest.mark.parametrize(
"rotation, improper, expected, improper_expected",
[
(
np.array([[0.5, 0.5, 0.5, 0.5], [1, 0, 0, 1],]),
[0, 0],
np.array([[0.5, 0.5, 0.5, 0.5], [0.707106, 0, 0, 0.707106],]),
[0, 0],
),
(
np.array([[0.5, 0.5, 0.5, 0.5], [1, 0, 0, 1],]),
[0, 1],
np.array([[0.5, 0.5, 0.5, 0.5], [0.707106, 0, 0, 0.707106],]),
[0, 1],
),
(
np.array([[0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5],]),
[0, 0],
np.array([[0.5, 0.5, 0.5, 0.5],]),
[0],
),
(
np.array([[0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5],]),
[0, 1],
np.array([[0.5, 0.5, 0.5, 0.5], [0.5, 0.5, 0.5, 0.5],]),
[0, 1],
),
],
indirect=["rotation"],
)
def test_unique(rotation, improper, expected, improper_expected):
rotation.improper = improper
u = rotation.unique()
assert np.allclose(u.data, expected, atol=1e-6)
assert np.allclose(u.improper, improper_expected)
def test_kwargs_unique(rotation):
""" return_index and return_inverse edge cases"""
rotation.unique(return_index=True, return_inverse=True)
rotation.unique(return_index=True, return_inverse=False)
rotation.unique(return_index=False, return_inverse=True)
@pytest.mark.parametrize(
"rotation, improper, expected, improper_expected",
[
(
np.array(
[
[0.231386, 0.270835, 0.779474, 0.515294],
[-0.515294, -0.779474, 0.270835, 0.231386],
]
),
[0, 1],
np.array(
[
[0.231386, -0.270835, -0.779474, -0.515294],
[-0.515294, 0.779474, -0.270835, -0.231386],
]
),
[0, 1],
),
],
indirect=["rotation"],
)
def test_inv(rotation, improper, expected, improper_expected):
rotation.improper = improper
r = ~rotation
assert np.allclose(r.data, expected, atol=1e-6)
assert np.allclose(r.improper, improper_expected)
@pytest.mark.parametrize(
"rotation, improper, rotation_2, improper_2, expected",
[
(
np.array(
[
[-0.192665, -0.7385, 0.605678, -0.22506],
[0.194855, -0.0613995, 0.814759, -0.542614],
[-0.440859, -0.61701, -0.305151, 0.576042],
]
),
[0, 0, 0],
np.array(
[
[0.311833, -0.670051, -0.635546, -0.22332],
[-0.0608553, -0.380776, -0.662, 0.642699],
]
),
[0, 1],
np.array([[0.1001, 0], [0.2947, 0], [0.3412, 0],]),
),
(
np.array(
[
[
[0.75175, 0.250266, -0.352737, 0.49781],
[0.242073, -0.698966, 0.315235, -0.594537],
[0.46822, 0.43453, -0.653468, 0.40612],
[0.472186, -0.414235, -0.552524, -0.547875],
[0.767081, -0.320688, 0.0707849, 0.551122],
],
[
[-0.507603, -0.63199, -0.441212, 0.385045],
[0.775813, 0.122649, -0.616902, -0.0500386],
[0.243256, 0.243706, 0.919676, 0.18876],
[0.472742, 0.453436, 0.677063, -0.335405],
[0.0951788, -0.0223328, 0.924478, -0.368487],
],
]
),
np.array([[1, 0, 0, 1, 0], [1, 1, 0, 1, 1]]),
np.array(
[
[0.733623, -0.289254, -0.51314, -0.338846],
[0.654535, 0.491901, 0.544886, -0.180876],
[0.529135, 0.166796, -0.329274, 0.764051],
]
),
[0, 0, 1],
np.array(
[
[
[0, 0, 0.9360],
[0.4195, 0.0939, 0],
[0.4155, 0.0907, 0],
[0, 0, 0.0559],
[0.4324, 0.2832, 0],
],
[
[0, 0, 0.0655],
[0, 0, 0.5959],
[0.4279, 0.7461, 0],
[0, 0, 0.1534],
[0, 0, 0.5393],
],
]
),
),
],
indirect=["rotation", "rotation_2"],
)
def test_dot_outer_rot(rotation, improper, rotation_2, improper_2, expected):
rotation.improper = improper
rotation_2.improper = improper_2
cosines = rotation.dot_outer(rotation_2)
assert cosines.shape == rotation.shape + rotation_2.shape
assert np.allclose(cosines.data, expected, atol=1e-4)
@pytest.mark.parametrize(
"rotation, improper, quaternion, expected",
[
(
np.array(
[
[0.915014, 0.033423, -0.292416, 0.275909],
[0.117797, -0.260041, -0.54774, 0.786437],
[0.301376, 0.818476, 0.482242, 0.0819321],
]
),
[0, 0, 1],
np.array(
[
[0.15331, -0.0110295, -0.17113, 0.973185],
[0.969802, 0.089686, 0.186519, -0.12904],
]
),
np.array([[0.4585, 0.8002], [0.8800, 0.1127], [0, 0],]),
),
],
indirect=["rotation", "quaternion"],
)
def test_dot_outer_quat(rotation, improper, quaternion, expected):
rotation.improper = improper
cosines = rotation.dot_outer(quaternion)
assert cosines.shape == rotation.shape + quaternion.shape
assert np.allclose(cosines.data, expected, atol=1e-4)
@pytest.mark.parametrize(
"rotation, expected",
[
([1, 0, 0, 0], [0, 0, 1]),
([-1, 0, 0, 0], [0, 0, -1]),
([0, 0.5 ** 0.5, 0.5 ** 0.5, 0], [0.5 ** 0.5, 0.5 ** 0.5, 0]),
([[1, 0, 0, 0], [-1, 0, 0, 0],], [[0, 0, 1], [0, 0, -1]]),
],
indirect=["rotation"],
)
def test_axis(rotation, expected):
ax = rotation.axis
assert np.allclose(ax.data, expected)
@pytest.mark.parametrize(
"rotation, improper",
[([(1, 0, 0, 0), (1, 0, 0, 0)], [0, 1]), ([(0.5 ** 0.5, 0, 0, 0.5 ** 0.5)], [1]),],
)
def test_antipodal(rotation, improper):
rotation = Rotation(rotation)
rotation.improper = improper
a = rotation.antipodal
assert np.allclose(a[0].data, rotation.data)
assert np.allclose(a[1].data, -rotation.data)
assert np.allclose(a[0].improper, rotation.improper)
assert np.allclose(a[1].improper, rotation.improper)
@pytest.mark.parametrize("shape, reference", [((1,), (1, 0, 0, 0))])
def test_random_vonmises(shape, reference):
r = Rotation.random_vonmises(shape, 1.0, reference)
assert r.shape == shape
assert isinstance(r, Rotation)
class TestFromToMatrix:
def test_to_matrix(self):
r = Rotation([[1, 0, 0, 0], [3, 0, 0, 0], [0, 1, 0, 0], [0, 2, 0, 0]]).reshape(
2, 2
)
om = np.array(
[np.eye(3), np.eye(3), np.diag([1, -1, -1]), np.diag([1, -1, -1])]
)
assert np.allclose(r.to_matrix(), om.reshape((2, 2, 3, 3)))
def test_from_matrix(self):
r = Rotation([[1, 0, 0, 0], [3, 0, 0, 0], [0, 1, 0, 0], [0, 2, 0, 0]])
om = np.array(
[np.eye(3), np.eye(3), np.diag([1, -1, -1]), np.diag([1, -1, -1])]
)
assert np.allclose(Rotation.from_matrix(om).data, r.data)
assert np.allclose(
Rotation.from_matrix(om.reshape((2, 2, 3, 3))).data, r.reshape(2, 2).data
)
def test_from_to_matrix(self):
om = np.array(
[np.eye(3), np.eye(3), np.diag([1, -1, -1]), np.diag([1, -1, -1])]
)
assert np.allclose(Rotation.from_matrix(om).to_matrix(), om)
def test_from_to_matrix2(self, e):
r = Rotation.from_euler(e.reshape((5, 2, 3)))
assert np.allclose(Rotation.from_matrix(r.to_matrix()).data, r.data)
def test_get_rotation_matrix_from_diffpy(self):
"""Checking that getting rotation matrices from diffpy.structure
works without issue.
"""
r = Rotation.from_matrix([i.R for i in sg225.symop_list])
assert not np.isnan(r.data).any()
``` |
{
"source": "joonatant/gauss-elim-generator",
"score": 3
} |
#### File: gauss-elim-generator/python/csv_parser.py
```python
import pandas as pd
import re
import math
#toimii str-inputeille muotoa 'n' ja 'n/m', joissa n ja m ovat kokonaislukuja
def str_to_tuple(entry):
entry = str(entry)
if ("/" in entry):
temp = entry.split("/")
if (not len(temp)==2):
raise Exception("Wrong type of input.")
else:
return (int(temp[0]), int(temp[1]))
else:
return (int(entry), 1)
def csv_to_matrix(fileName="gauss.csv"):
df = pd.read_csv(fileName, header=None)
data = df.values.tolist()
allowed = "-0123456789/"
for n in range(0, len(data)):
for m in range(0, len(data[n])):
temp=""
#print(data[n][m])
for l in range(0, len(str(data[n][m]))):
k = str(data[n][m])[l]
if(k in allowed):
temp = temp + k
data[n][m] = str_to_tuple(temp)
return data
```
#### File: gauss-elim-generator/python/matrix_to_tex.py
```python
import math
import sys
global global_inv
global_inv = len(sys.argv) > 2 and sys.argv[2] == "i"
def matrix_latex_gen(A, num=0):
rows = len(A)
rowlen = len(A[0])
output = ""
if(global_inv):
output = "\\[\n\\left[\n\\begin{array}{" + (int(rowlen/2))*"c" + "|"+ (int(rowlen/2))*"c"+"}\n"
else:
output = "\\[\n\\left[\n\\begin{array}{" + (rowlen-1)*"c" + "|c}\n"
for n in range(0, rows):
temp = ""
for m in range(0, rowlen):
if(m == rowlen - 1):
temp = temp + tuple_to_tex(A[n][m]) + " \\\\\n"
else:
temp = temp + tuple_to_tex(A[n][m]) + " & "
output = output + temp
title = ""
if(num>0):
title = "(" + str(num) + ")"
output = output + "\\end{array}\n\\right]"+ title +"\n\\]\n"
return output
def full_latex(total, fileName="gauss.tex"):
begin = "\\documentclass[12pt]{article}\n\\usepackage{amsmath}\n\\usepackage[margin=1in]{geometry}\n\\begin{document}\n"
matricies = [matrix_latex_gen(total[i], i+1) for i in range(0, len(total))]
end = "\\end{document}"
file = open(fileName, "w")
file.write(begin)
file.writelines(matricies)
file.write(end)
file.close()
def tuple_to_tex(a):
if(a[1]==1):
return str(a[0])
else:
return "\\dfrac{"+str(a[0])+"}{"+str(a[1])+"}"
``` |
{
"source": "jooncco/is_A",
"score": 2
} |
#### File: jooncco/is_A/build_KB.py
```python
from fastText import load_model
from sklearn.metrics.pairwise import cosine_similarity
# globals
model = load_model('wiki.ko.bin')
category = ['학교', '음식', '회사', '식물', '동물', '인물', '숫자', '음료', '색상', '국가', '도시', '의류', '프로그램', '사이트', '행성', '단체', '도구', '게임', '언어', '기계', '서적', '영화', '건축물']
category_idx = dict()
for i in range(0, len(category)):
category_idx[category[i]] = i
def initialize():
magic_words = ''
magic_words = magic_words+ '연세\t학교\t1\n'+'대학교\t학교\t1\n'+'고등학교\t학교\t1\n'+'초등학교\t학교\t1\n'+'서울\t학교\t1\n'+'유치원\t학교\t1\n'+'성당\t학교\t1\n'
magic_words = magic_words + '포도\t음식\t1\n'+'햄버거\t음식\t1\n'+'국수\t음식\t1\n'+'김치\t음식\t1\n'+'콜라\t음식\t1\n'
magic_words = magic_words + '삼성\t회사\t1\n'+'구글\t회사\t1\n'+'페이스북\t회사\t1\n'+'네이버\t회사\t1\n'+'아마존\t회사\t1\n'+'네이트\t회사\t1\n'+'트위터\t회사\t1\n'
magic_words = magic_words + '나무\t식물\t1\n'+'장미\t식물\t1\n'+'꽃\t식물\t1\n'+'풀\t식물\t1\n'+'데이지\t식물\t1\n'+'나팔꽃\t식물\t1\n'+'라플레시아\t식물\t1\n'+'튤립\t식물\t1\n'
magic_words = magic_words + '사자\t동물\t1\n'+'인간\t동물\t1\n'+'호랑이\t동물\t1\n'+'기린\t동물\t1\n'+'사람\t동물\t1\n'+'게\t동물\t1\n'+'토끼\t동물\t1\n'+'돼지\t동물\t1\n'
magic_words = magic_words + '문재인\t인물\t1\n'+'범키\t인물\t1\n'+'로꼬\t인물\t1\n'+'씨엔블루\t인물\t1\n'+'에디킴\t인물\t1\n'+'마마무\t인물\t1\n'+'폴김\t인물\t1\n'+'김연아\t인물\t1\n'
magic_words = magic_words + '1\t숫자\t1\n'+'2\t숫자\t1\n'+'3\t숫자\t1\n'+'4\t숫자\t1\n'+'5\t숫자\t1\n'+'7\t숫자\t1\n'+'열\t숫자\t1\n'+'하나\t숫자\t1\n'+'둘\t숫자\t1\n'+'셋\t숫자\t1\n'
magic_words = magic_words + '콜라\t음료\t1\n'+'사이다\t음료\t1\n'+'쥬스\t음료\t1\n'+'커피\t음료\t1\n'+'참이슬\t음료\t1\n'+'맥주\t음료\t1\n'+'카스\t음료\t1\n'+'오렌지주스\t음료\t1\n'
magic_words = magic_words + '하얀색\t색상\t1\n'+'검정\t색상\t1\n'+'빨강\t색상\t1\n'+'노랑\t색상\t1\n'+'초록\t색상\t1\n'+'파랑\t색상\t1\n'+'주황\t색상\t1\n'+'보라\t색상\t1\n'
magic_words = magic_words + '대한민국\t국가\t1\n'+'미국\t국가\t1\n'+'러시아\t국가\t1\n'+'중국\t국가\t1\n'+'일본\t국가\t1\n'+'캐나다\t국가\t1\n'+'터키\t국가\t1\n'+'스위스\t국가\t1\n'+'독일\t국가\t1\n'
magic_words = magic_words + '서울\t도시\t1\n'+'인천\t도시\t1\n'+'베를린\t도시\t1\n'+'베이징\t도시\t1\n'+'도쿄\t도시\t1\n'+'부산\t도시\t1\n'+'홍콩\t도시\t1\n'+'런던\t도시\t1\n'
magic_words = magic_words + '드레스\t의류\t1\n'+'후드티\t의류\t1\n'+'모자\t의류\t1\n'+'티셔츠\t의류\t1\n'+'청바지\t의류\t1\n'+'모자\t의류\t1\n'+'운동화\t의류\t1\n'
magic_words = magic_words + '게임\t프로그램\t1\n'+'백신\t프로그램\t1\n'+'윈도우\t프로그램\t1\n'+'포토샵\t프로그램\t1\n'+'크롬\t프로그램\t1\n'+'인터넷\t프로그램\t1\n'
magic_words = magic_words + '구글\t사이트\t1\n'+'아마존\t사이트\t1\n'+'네이버\t사이트\t1\n'+'페이스북\t사이트\t1\n'+'다음\t사이트\t1\n'+'네이트\t사이트\t1\n'+'옥션\t사이트\t1\n'
magic_words = magic_words + '지구\t행성\t1\n'+'토성\t행성\t1\n'+'금성\t행성\t1\n'+'화성\t행성\t1\n'+'목성\t행성\t1\n'+'수성\t행성\t1\n'+'토성\t행성\t1\n'+'천왕성\t행성\t1\n'+'해왕성\t행성\t1\n'
magic_words = magic_words + '국경없는의사회\t단체\t1\n'+'녹십자\t단체\t1\n'+'적십자\t단체\t1\n'+'유엔\t단체\t1\n'+'동아리\t단체\t1\n'+'노동자연대\t단체\t1\n'+'팀\t단체\t1\n'
magic_words = magic_words + '가위\t도구\t1\n'+'컴퓨터\t도구\t1\n'+'연필\t도구\t1\n'+'펜\t도구\t1\n'+'라디오\t도구\t1\n'+'빨대\t도구\t1\n'+'컵\t도구\t1\n'+'계산기\t도구\t1\n'
magic_words = magic_words + '디아블로\t게임\t1\n'+'오버워치\t게임\t1\n'+'롤\t게임\t1\n'+'배틀그라운드\t게임\t1\n'+'워크래프트\t게임\t1\n'+'포커\t게임\t1\n'+'크레이지 아케이드\t게임\t1\n'+'메이플스토리\t게임\t1\n'
magic_words = magic_words + '한국어\t언어\t1\n'+'영어\t언어\t1\n'+'중국어\t언어\t1\n'+'라틴어\t언어\t1\n'+'스페인어\t언어\t1\n'+'라틴어\t언어\t1\n'+'러시아어\t언어\t1\n'+'프랑스어\t언어\t1\n'
magic_words = magic_words + '엔진\t기계\t1\n'+'자동차\t기계\t1\n'+'제초기\t기계\t1\n'+'컴퓨터\t기계\t1\n'+'포크레인\t기계\t1\n'+'휴대폰\t기계\t1\n'+'트럭\t기계\t1\n'+'노트북\t기계\t1\n'
magic_words = magic_words + '책\t서적\t1\n'+'위인전\t서적\t1\n'+'자기개발서\t서적\t1\n'+'소설\t서적\t1\n'+'시집\t서적\t1\n'+'동화책\t서적\t1\n'+'수필\t서적\t1\n'+'산문\t서적\t1\n'+'추리소설\t서적\t1\n'
magic_words = magic_words + '공포\t영화\t1\n'+'해리포터\t영화\t1\n'+'반지의제왕\t영화\t1\n'+'타임패러독스\t영화\t1\n'+'액션\t영화\t1\n'+'호러\t영화\t1\n'+'로맨스\t영화\t1\n'+'코믹\t영화\t1\n'
magic_words = magic_words + '롯데타워\t건축물\t1\n'+'63빌딩\t건축물\t1\n'+'청와대\t건축물\t1\n'+'백주년 기념관\t건축물\t1\n'+'경복궁\t건축물\t1\n'+'건물\t건축물\t1\n'+'빌딩\t건축물\t1\n'+'조각상\t건축물\t1\n'+'육교\t건축물\t1\n'
with open('basis.txt', 'w') as kb:
kb.write(magic_words)
kb.close()
def grow(fileName, initialGrow=False):
basis_file, output_file = 'basis.txt', 'loco_kb.txt'
with open(fileName, 'r') as data, open(output_file, 'a') as kb:
# copy basis on first growth.
if initialGrow:
with open(basis_file, 'r') as basis:
for line in basis:
kb.write(line)
# grow
for row in data:
word = row.strip()
word_vec = model.get_word_vector(word)
score_vec = [0]*23 # scores for each category.
cur_concept = ''
count = -1 # number of entities in one category in basis.txt
with open(basis_file, 'r') as basis:
for line in basis:
entity, concept, score = line.split('\t')
score = score.strip()
# calculate similarity between (entity, word)
sim = cosine_similarity([word_vec], [model.get_word_vector(entity)])[0][0]
score_vec[category_idx[concept]] = score_vec[category_idx[concept]] + sim*float(score)
if cur_concept != concept:
if count > 0:
score_vec[category_idx[cur_concept]] = score_vec[category_idx[cur_concept]] / float(count)
cur_concept = concept
count = 1
else:
count = count + 1
score_vec[-1] = score_vec[-1]/float(count)
for idx in range(0, len(category)):
if score_vec[idx] > 0.3:
kb.write(word+'\t'+category[idx]+'\t'+str(score_vec[idx])+'\n')
initialize()
grow('test.txt', initialGrow=True)
``` |
{
"source": "jooner/lab",
"score": 3
} |
#### File: jooner/lab/train_embedding.py
```python
import numpy as np
from tqdm import tqdm, tnrange, tqdm_notebook
from pandas import read_csv
from nltk import word_tokenize
import torch
from torch import optim
import torch.nn as nn
import torch.nn.functional as F
whitepapers = read_csv('whitepapers.csv')
# merge description and document text
whitepapers['text'] = whitepapers.description + ' ' +\
whitepapers.document_text
# filter down to relevant entries
df = whitepapers.drop(columns=['description',
'document_text',
'document_tokens'])
del whitepapers
# tokenize (aka .split()++, thank you nltk)
train_txt = ''
for _, row in df.iterrows():
train_txt += row['text'].lower()
tokens = word_tokenize(train_txt)
del df
# word2idx and idx2word setup
unique_tokens = set(tokens)
w2x = {word: idx for (idx, word) in enumerate(unique_tokens)}
x2w = {idx: word for (idx, word) in enumerate(unique_tokens)}
indices = [w2x[w] for w in tokens]
# generate training data
window = 2
train_data = []
for idx in range(len(indices)):
for r in range(-window, window + 1):
cxt = idx + r
if not ((cxt < 0) or (cxt >= len(indices)) or (idx == cxt)):
train_data.append([indices[idx], indices[cxt]])
train_data = np.array(train_data)
train_data = torch.LongTensor(train_data)
# record vocab_size
vocab_size = len(unique_tokens)
# sanity check
for [x,y] in train_data[200100:200105]:
print(x2w[int(x)], x2w[int(y)])
# clean memory
del indices
del tokens
# Continuous Bag-of-Words Model
class CBOW(nn.Module):
def __init__(self, vocab_size, embedding_dim, hidden_dim,
context_size, batch_size):
super(CBOW, self).__init__()
self.batch_size = batch_size
self.embed = nn.Embedding(vocab_size, embedding_dim)
self.fc1 = nn.Linear(embedding_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, vocab_size)
self.out = nn.Softmax(dim=2)
def forward(self, x):
x = self.embed(x).view(self.batch_size, 1, -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return self.out(x).squeeze()
model = CBOW(vocab_size=vocab_size, embedding_dim=100, hidden_dim=128,
context_size=2, batch_size=256).cuda()
def one_hot(idx_batch):
one_hot_mat = torch.zeros((len(idx_batch), vocab_size)).float()
indices = torch.LongTensor(idx_batch).view(-1, 1)
one_hot_mat.scatter_(1, indices, 1.0)
return one_hot_mat
def mat_loss(pred, gt):
delta = pred.float() - gt.float()
norm = torch.norm(delta, p=2, dim=1)
return (torch.sum(norm) / gt.shape[1])
def batchify(data, batch_size, use_cuda=False):
rm_size = len(data) % batch_size
x, y = data[:-rm_size, 0], data[:-rm_size, 1]
if use_cuda:
x = x.view(-1, batch_size).cuda()
else:
x = x.view(-1, batch_size)
y = y.view(-1, batch_size)
return x, y
x, y = batchify(train_data, batch_size=256, use_cuda=True)
def train(x_train, y_train, num_epochs, use_cuda=False):
loss_fn = mat_loss
optimizer = optim.SGD(model.parameters(), lr=1e-2)
scheduler = optim.lr_scheduler.StepLR(optimizer,
step_size=10,
gamma=0.5)
for epoch in range(num_epochs):
total_loss = 0
for batch_idx in tqdm(range(x_train.shape[0])):
x = x_train[batch_idx, :]
y = y_train[batch_idx, :]
model.zero_grad()
log_prob = model(x)
gt = one_hot(y)
if use_cuda:
gt = gt.cuda()
loss = loss_fn(log_prob, gt)
loss.backward()
scheduler.step()
total_loss += loss.data
print(total_loss)
torch.save(model, 'models/model_{}'.format(total_loss))
print("Successfully Saved model_{}!".format(total_loss))
train(x, y, num_epochs=100, use_cuda=True)
``` |
{
"source": "jooneyp/it-ebooksDownloader",
"score": 3
} |
#### File: jooneyp/it-ebooksDownloader/pdfCrawler.py
```python
from bs4 import BeautifulSoup as bs
from urllib2 import urlopen
from Queue import Queue
import mechanize
def down(id) :
br.addheaders = [('Referer', 'http://it-ebooks.info/book/' + str(id) + '/')]
url = ("http://it-ebooks.info/book/" + str(id) + "/")
soup = bs(urlopen(url).read())
title = str(soup.html.head.title)[7:-36]
if title != title.replace('/', '-') :
title = title.replace('/', '-')
print "!! replaced to " + title
As = soup.find_all('a')
for a in As :
strLink = str(a.get('href'))
downLink_index = strLink.find("filepi")
if downLink_index != -1 :
print str(id) + " # " + str(title) + " ... "
br.retrieve(strLink, title + '.pdf')[0]
print str(id) + " # Done."
br = mechanize.Browser()
br.set_handle_robots(False)
start = 0
end = 100 #<- put id-number here
for i in xrange(start, end) :
down(i)
``` |
{
"source": "joongang-poc/gcp-dataflow",
"score": 2
} |
#### File: gcp-dataflow/src/col_article_basic_body.py
```python
import apache_beam as beam
import csv
import json
import os
from apache_beam.io.gcp.bigquery_tools import parse_table_schema_from_json
def preProcessing(fields):
fields = fields.replace("$","")
json_data = json.loads(fields)
if "press_date" not in json_data :
json_data["press_date"] = {}
if "date" not in json_data["press_date"] :
json_data["press_date"]["date"] = None
if "totalid" not in json_data :
json_data["totalid"] = None
if "section" not in json_data or json_data["section"] is None :
json_data["section"] = []
section_dict = {}
section_dict["cat1"]=None
section_dict["cat2"]=None
section_dict["cat3"]=None
json_data["section"].append(section_dict)
if "article_title" not in json_data :
json_data["article_title"] = None
if "urlpath" not in json_data :
json_data["urlpath"] = None
if "jamid" not in json_data :
json_data["jamid"] = None
if "box_article_count" not in json_data:
json_data["box_article_count"] = None
if "image_cnt" not in json_data:
json_data["image_cnt"] = None
if "sns_cnt" not in json_data:
json_data["sns_cnt"] = None
if "vod_cnt" not in json_data:
json_data["vod_cnt"] = None
if "article_type" not in json_data:
json_data["article_type"] = None
if "keyword" not in json_data:
json_data["keyword"] = None
if "origin_article_id" not in json_data:
json_data["origin_article_id"] = None
if "release_department" not in json_data:
json_data["release_department"] = None
if "source_code" not in json_data:
json_data["source_code"] = None
if "source_name" not in json_data:
json_data["source_name"] = None
if "bulk_site" not in json_data:
json_data["bulk_site"] = None
if "article_flag" not in json_data:
json_data["article_flag"] = None
if "text_cnt" not in json_data:
json_data["text_cnt"] = None
if "create_date" not in json_data :
json_data["create_date"] = {}
if "date" not in json_data["create_date"] :
json_data["create_date"]["date"] = None
if "_id" not in json_data :
json_data["_id"] = {}
if "oid" not in json_data["_id"] :
json_data["_id"]["oid"] = None
if "service_date" not in json_data:
json_data["service_date"] = {}
if "date" not in json_data["service_date"]:
json_data["service_date"]["date"] = None
if "reporter" not in json_data:
json_data["reporter"] = {}
if "reporter_seq" not in json_data["reporter"]:
json_data["reporter"]["reporter_seq"] = None
if "reporter_name" not in json_data["reporter"]:
json_data["reporter"]["reporter_name"] = None
if "department_name" not in json_data["reporter"]:
json_data["reporter"]["department_name"] = None
if "article_body" not in json_data :
json_data["article_body"] = None
if "first_img_src" not in json_data :
json_data["first_img_src"] = None
if "embed_youtube" not in json_data :
json_data["embed_youtube"] = None
if "embed_ooyala" not in json_data :
json_data["embed_ooyala"] = None
yield json_data
class DataTransformation:
def __init__(self):
dir_path = os.path.abspath(os.path.dirname(__file__))
self.schema_str=""
schema_file = os.path.join(dir_path, "resource", "[SCHEMA_FILE]")
with open(schema_file) \
as f:
data = f.read()
self.schema_str = '{"fields": ' + data + '}'
class PTransform(beam.DoFn) :
def __init__(self, default=""):
self.default = default
def process(self, fields):
com = []
com2 = {}
com3 = ""
if type(fields["reporter"]) == type(com):
if len(fields["reporter"])==0 :
reporter_seq = None
reporter_name = None
department_name = None
else :
reporter_seq = fields["reporter"][0]["reporter_seq"]
reporter_name = fields["reporter"][0]["reporter_name"]
department_name = fields["reporter"][0]["department_name"]
if type(fields["reporter"]) == type(com2):
reporter_seq = None
reporter_name = None
department_name = None
section_cat1 = []
section_cat2 = []
section_cat3 = []
recu_sect = []
if type(fields["section"] == type(com)) :
if type(fields["section"][0]) == type(com3) :
sub_recu_cat = {}
real_for = len(fields["section"])
for i in range(real_for) :
sub_recu_cat["cat1"] = fields["section"][i]
sub_recu_cat["cat2"] = None
sub_recu_cat["cat3"] = None
recu_sect.append(sub_recu_cat)
if type(fields["section"][0]) == type(com2) :
sub_recu_cat = {}
for i in range(len(fields["section"])) :
if 'cat1' in fields["section"][i] :
sub_recu_cat["cat1"] = fields["section"][i]["cat1"]
if 'cat2' in fields["section"][i] :
sub_recu_cat["cat2"] = fields["section"][i]["cat2"]
if 'cat3' in fields["section"][i] :
sub_recu_cat["cat3"] = fields["section"][i]["cat3"]
recu_sect.append(sub_recu_cat)
embed_ovpl = []
if fields["embed_ooyala"] is not None :
embed_ovpl = fields["embed_ooyala"].split("@@")
embed_youtubel = []
if fields["embed_youtube"] is not None :
embed_youtubel = fields["embed_youtube"].split("@@")
create_date_variable = None
if fields["create_date"]["date"] is not None :
create_date_variable = fields["create_date"]["date"][0:19]
service_date_variable = None
if fields["service_date"]["date"] is not None :
service_date_variable = fields["service_date"]["date"][0:19]
press_date_variable = None
if fields["press_date"]["date"] is not None :
press_date_variable = fields["press_date"]["date"][0:19]
tablerow = {}
sub_reporter_dict = {}
sub_reporter_dict["reporter_seq"] = reporter_seq
sub_reporter_dict["reporter_name"] = reporter_name
sub_reporter_dict["department_name"] = department_name
tablerow["reporter"] = sub_reporter_dict
tablerow["totalid"] = fields["totalid"]
tablerow["article_title"] = fields["article_title"]
tablerow["urlpath"] = fields["urlpath"]
tablerow["jamid"] = fields["jamid"]
tablerow["bulk_site"] = fields["bulk_site"]
tablerow["article_flag"] = fields["article_flag"]
tablerow["text_cnt"] = fields["text_cnt"]
tablerow["create_date"] = create_date_variable
tablerow["box_article_count"] = fields["box_article_count"]
tablerow["image_cnt"] = fields["image_cnt"]
tablerow["sns_cnt"] = fields["sns_cnt"]
tablerow["vod_cnt"] = fields["vod_cnt"]
tablerow["article_type"] = fields["article_type"]
tablerow["keyword"] = fields["keyword"]
tablerow["release_department"] = fields["release_department"]
tablerow["origin_article_id"] = fields["origin_article_id"]
tablerow["service_date"] = service_date_variable
tablerow["section"] = recu_sect
tablerow["source_code"] = fields["source_code"]
tablerow["source_name"] = fields["source_name"]
tablerow["press_date"] = press_date_variable
tablerow["article_body"] = fields["article_body"]
tablerow["first_img_src"] = fields["first_img_src"]
tablerow["embed_youtube"] = embed_youtubel
tablerow["embed_ovp"] = embed_ovpl
yield tablerow
def run(project, bucket, dataset) :
argv = [
"--project={0}".format(project),
"--job_name=col-article-basic-body",
"--save_main_session",
"--region=asia-northeast1",
"--staging_location=gs://{0}/staging/".format(bucket),
"--temp_location=gs://{0}/temp-location/".format(bucket),
"--max_num_workers=8",
"--autoscaling_algorithm=THROUGHPUT_BASED",
"--runner=DataflowRunner",
"--worker_region=asia-northeast3"
]
events_output = "{}:{}.[DATASET_NAME]".format(project, dataset)
filename = "gs://{}/[FILE_NAME]".format(bucket)
pipeline = beam.Pipeline(argv=argv)
ptransform = (pipeline
| "Read from GCS" >> beam.io.ReadFromText(filename)
| "Pre-Processing" >> beam.FlatMap(preProcessing)
| "PTransform" >> beam.ParDo(PTransform())
)
data_ingestion = DataTransformation()
schema = parse_table_schema_from_json(data_ingestion.schema_str)
(ptransform
| "events:out" >> beam.io.Write(
beam.io.BigQuerySink(
events_output, schema=schema,
write_disposition=beam.io.BigQueryDisposition.WRITE_TRUNCATE,
create_disposition=beam.io.BigQueryDisposition.CREATE_IF_NEEDED)
))
pipeline.run()
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Run pipeline on the cloud")
parser.add_argument("--project", dest="project", help="Unique project ID", required=True)
parser.add_argument("--bucket", dest="bucket", help="Bucket where your data were ingested", required=True)
parser.add_argument("--dataset", dest="dataset", help="BigQuery dataset")
args = vars(parser.parse_args())
print("Correcting timestamps and writing to BigQuery dataset {}".format(args["dataset"]))
run(project=args["project"], bucket=args["bucket"], dataset=args["dataset"])
``` |
{
"source": "joongbo/tta",
"score": 2
} |
#### File: joongbo/tta/run_unsupervisedstsb.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import collections
import modeling
import tensorflow as tf
import tokenization
import numpy as np
import scipy as sp
import csv
from sklearn.metrics.pairwise import cosine_similarity
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string("config_file", "",
"The config json file corresponding to the trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("model_checkpoint", "",
"checkpoint")
flags.DEFINE_string("vocab_file", "",
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_integer("max_seq_length", 128, "The length of maximum sequence.")
class TestingInstance(object):
"""A single test instance (sentence pair)."""
def __init__(self, tokens):
self.tokens = tokens
self.input_tokens = tokens
self.target_tokens = tokens
def __str__(self):
s = ""
s += "tokens: %s\n" % (" ".join(
[tokenization.printable_text(x) for x in self.tokens]))
s += "\n"
return s
def __repr__(self):
return self.__str__()
def create_testing_instances(sentence, tokenizer, max_seq_length=128):
"""Create `TestInstance`s from raw text."""
max_token_num = max_seq_length - 2
tokens = tokenizer.tokenize(sentence)
if len(tokens) > max_token_num:
tokens = tokens[:max_token_num]
if tokens[0] is not "[SOS]":
tokens.insert(0, "[SOS]")
if tokens[-1] is not "[EOS]":
tokens.append("[EOS]")
instances = []
instances.append(create_instances_from_tokens(tokens))
return instances
def create_instances_from_tokens(tokens):
"""Creates `TestInstance`s for a single sentence."""
instance = TestingInstance(tokens)
return instance
# load tokenizer
tokenizer = tokenization.FullTokenizer(
vocab_file = FLAGS.vocab_file,
do_lower_case=True)
word_to_id = tokenizer.vocab
# load trained model
config = modeling.BertConfig.from_json_file(FLAGS.config_file)
tf.reset_default_graph()
session_config = tf.ConfigProto()
session_config.gpu_options.allow_growth = True
sess = tf.Session(config=session_config)
input_ids = tf.placeholder(dtype=tf.int32, shape=[None, None])
input_mask = tf.placeholder(dtype=tf.int32, shape=[None, None])
model = modeling.BertModel(
config=config,
is_training=False,
input_ids=input_ids,
input_mask=input_mask,
use_one_hot_embeddings=False)
input_tensor = model.get_sequence_output()
input_embeddings = model.get_embedding_output()
input_shape = modeling.get_shape_list(input_tensor, expected_rank=3)
input_tensor = tf.reshape(input_tensor, [input_shape[0]*input_shape[1], input_shape[2]])
saver = tf.train.Saver()
saver.restore(sess, FLAGS.model_checkpoint)
print()
# load STSb-dev-set
labels = []
refs = []
hyps = []
with open('data/stsbenchmark/sts-dev.csv') as f:
reader = csv.reader(f, delimiter='\n')
dev_list = []
for line in reader:
dev = line[0].split('\t')
labels.append(float(dev[4]))
refs.append(dev[5])
hyps.append(dev[6])
# calculate correlation
print('Get scores on STSb-dev. Processing ..')
similarity_scores_representation = []
# similarity_scores_embeddings = []
for cnt, (ref, hyp) in enumerate(zip(refs, hyps)):
if (cnt+1) % 200 == 0:
print(cnt+1, end=', ')
instances = create_testing_instances(ref, tokenizer,
FLAGS.max_seq_length)
batch_input_ids = []
batch_input_mask = []
for _instance in instances:
_input_ids = [word_to_id[_token] for _token in _instance.input_tokens]
_input_mask = [1] * len(_input_ids)
batch_input_ids.append(_input_ids)
batch_input_mask.append(_input_mask)
feed_dict = {input_ids : batch_input_ids,
input_mask : batch_input_mask,
}
[representations_ref, embeddings_ref] = sess.run([input_tensor, input_embeddings], feed_dict=feed_dict)
instances = create_testing_instances(hyp, tokenizer,
FLAGS.max_seq_length)
batch_input_ids = []
batch_input_mask = []
for _instance in instances:
_input_ids = [word_to_id[_token] for _token in _instance.input_tokens]
_input_mask = [1] * len(_input_ids)
batch_input_ids.append(_input_ids)
batch_input_mask.append(_input_mask)
feed_dict = {input_ids : batch_input_ids,
input_mask : batch_input_mask,
}
[representations_hyp, embeddings_hyp] = sess.run([input_tensor, input_embeddings], feed_dict=feed_dict)
sentence_representation_mean_ref = np.mean(representations_ref[1:-1], axis=0)
sentence_representation_mean_hyp = np.mean(representations_hyp[1:-1], axis=0)
score = cosine_similarity([sentence_representation_mean_ref], [sentence_representation_mean_hyp])
similarity_scores_representation.append(score[0][0])
# sentence_embeddings_mean_ref = np.mean(embeddings_ref[0][1:-1], axis=0)
# sentence_embeddings_mean_hyp = np.mean(embeddings_hyp[0][1:-1], axis=0)
# score = cosine_similarity([sentence_embeddings_mean_ref], [sentence_embeddings_mean_hyp])
# similarity_scores_embeddings.append(score[0][0])
print('')
print('STSb-dev (context):', sp.stats.pearsonr(labels, similarity_scores_representation)[0])
# print('STSb-dev (embed) :', sp.stats.pearsonr(labels, similarity_scores_embeddings)[0])
# load STSb-test-set
labels = []
refs = []
hyps = []
with open('data/stsbenchmark/sts-test.csv') as f:
reader = csv.reader(f, delimiter='\n')
test_list = []
for line in reader:
test = line[0].split('\t')
labels.append(float(test[4]))
refs.append(test[5])
hyps.append(test[6])
# calculate correlation
print('Get scores on STSb-test. Processing ..')
similarity_scores_representation = []
# similarity_scores_embeddings = []
for cnt, (ref, hyp) in enumerate(zip(refs, hyps)):
if (cnt+1) % 200 == 0:
print(cnt+1, end=', ')
instances = create_testing_instances(ref, tokenizer,
FLAGS.max_seq_length)
batch_input_ids = []
batch_input_mask = []
for _instance in instances:
_input_ids = [word_to_id[_token] for _token in _instance.input_tokens]
_input_mask = [1] * len(_input_ids)
batch_input_ids.append(_input_ids)
batch_input_mask.append(_input_mask)
feed_dict = {input_ids : batch_input_ids,
input_mask : batch_input_mask,
}
[representations_ref, embeddings_ref] = sess.run([input_tensor, input_embeddings], feed_dict=feed_dict)
instances = create_testing_instances(hyp, tokenizer,
FLAGS.max_seq_length)
batch_input_ids = []
batch_input_mask = []
for _instance in instances:
_input_ids = [word_to_id[_token] for _token in _instance.input_tokens]
_input_mask = [1] * len(_input_ids)
batch_input_ids.append(_input_ids)
batch_input_mask.append(_input_mask)
feed_dict = {input_ids : batch_input_ids,
input_mask : batch_input_mask,
}
[representations_hyp, embeddings_hyp] = sess.run([input_tensor, input_embeddings], feed_dict=feed_dict)
sentence_representation_mean_ref = np.mean(representations_ref[1:-1], axis=0)
sentence_representation_mean_hyp = np.mean(representations_hyp[1:-1], axis=0)
score = cosine_similarity([sentence_representation_mean_ref], [sentence_representation_mean_hyp])
similarity_scores_representation.append(score[0][0])
# sentence_embeddings_mean_ref = np.mean(embeddings_ref[0][1:-1], axis=0)
# sentence_embeddings_mean_hyp = np.mean(embeddings_hyp[0][1:-1], axis=0)
# score = cosine_similarity([sentence_embeddings_mean_ref], [sentence_embeddings_mean_hyp])
# similarity_scores_embeddings.append(score[0][0])
print('')
print('STSb-test (context):', sp.stats.pearsonr(labels, similarity_scores_representation)[0])
# print('STSb-test (embed) :', sp.stats.pearsonr(labels, similarity_scores_embeddings)[0])
``` |
{
"source": "joongh/configbuilder",
"score": 3
} |
#### File: joongh/configbuilder/validator.py
```python
import ast
import socket
import os
class Validator(object):
def __init__(self):
self.TYPE_PREFIX = {
'list of': self.get_validate_list_of,
'config in': self.get_validate_config_in,
}
def get_validator(self, t):
validator_name = 'validate_%s' % t.strip().lower()
try:
return getattr(self, validator_name)
except AttributeError as err:
for key, val in self.TYPE_PREFIX.iteritems():
if t.lower().startswith(key):
return self.TYPE_PREFIX[key](t[len(key):].strip())
raise err
def get_validate_list_of(self, subtype):
name = 'validate_list_of_%s' % subtype.strip().lower()
validate_func = lambda v: self._validate_list_of(v, subtype)
setattr(self, name, validate_func)
return validate_func
def get_validate_config_in(self, configname):
name = 'validate_config_in_%s' % configname.strip().lower()
validate_func = lambda v: self._validate_config_in(v, configname)
setattr(self, name, validate_func)
return validate_func
def validate_string(self, value):
if not isinstance(value, basestring):
raise ValueError('Value should be a string.')
return value.strip()
def validate_boolean(self, value):
if isinstance(value, basestring):
value = ast.literal_eval(value)
if type(value) is not bool:
raise ValueError('Value should be a boolean.')
return value
def validate_integer(self, value):
return int(value)
def validate_ip(self, value):
if not isinstance(value, basestring):
raise ValueError('Value should be a IP address string.')
try:
socket.inet_pton(socket.AF_INET, value)
except socket.error as err:
raise ValueError(err.message)
else:
return value.strip()
def validate_path(self, value):
if not isinstance(value, basestring):
raise ValueError('Value should be a path string.')
return os.path.normpath(value)
def validate_existingpath(self, value):
value = self.validate_path(value)
if not os.path.exists(value):
raise ValueError('%s does not exist.' % value)
return value
def validate_filepath(self, value):
value = self.validate_existingpath(value)
if not os.path.isfile(value):
raise ValueError('%s does not a file.' % value)
return value
def validate_directorypath(self, value):
value = self.validate_existingpath(value)
if not os.path.isdir(value):
raise ValueError('%s does not a directory.' % value)
return value
def validate_list(self, value):
if type(value) is not list:
raise ValueError('Value should be a list')
return value
def _validate_list_of(self, value, subtype):
subvalidator = self.get_validator(subtype)
value = self.validate_list(value)
return [subvalidator(v) for v in value]
def _validate_choiceses(self, value, choices):
value = self.validate_string(value)
if value not in choices:
raise ValueError('Value should be in %s' % choices)
return value
def _validate_config_in(self, value, configname):
return '%s/%s' % (configname, value.strip())
``` |
{
"source": "joonholee-research/MUDA",
"score": 3
} |
#### File: MUDA/toy/dataset.py
```python
import torch
from torch.utils.data import Dataset
class MyDataset(Dataset):
def __init__(self, data, target, transform=None):
self.data = torch.from_numpy(data).float()
self.target = torch.from_numpy(target).long()
self.transform = transform
def __getitem__(self, index):
x = self.data[index]
y = self.target[index]
if self.transform:
x = self.transform(x)
return x, y
def __len__(self):
return len(self.data)
``` |
{
"source": "JoonHong-Kim/KoDALLE",
"score": 2
} |
#### File: JoonHong-Kim/KoDALLE/loader.py
```python
from random import randint, choice
from pathlib import Path
from typing import Tuple
from PIL import Image, UnidentifiedImageError
import torch
from torch.utils.data import Dataset
from torchvision.transforms import transforms
from transformers import AutoTokenizer
from preprocess import remove_style, remove_subj
class TextImageDataset(Dataset):
def __init__(
self,
text_folder: str,
image_folder: str,
text_len: int,
image_size: int,
truncate_captions: bool,
resize_ratio: float,
tokenizer: AutoTokenizer = None,
shuffle: bool = False,
) -> None:
"""
@param folder: Folder containing images and text files matched by their paths' respective "stem"
@param truncate_captions: Rather than throw an exception, captions which are too long will be truncated.
"""
super().__init__()
self.shuffle = shuffle
# path = Path(folder)
self.tokenizer = tokenizer
text_path = Path(text_folder)
text_files = [*text_path.glob("**/*[0-9].txt")]
image_folder = image_folder
image_path = Path(image_folder)
image_files = [
*image_path.glob("**/*[0-9].png"),
*image_path.glob("**/*[0-9].jpg"),
*image_path.glob("**/*[0-9].jpeg"),
]
text_files = {text_file.stem: text_file for text_file in text_files}
image_files = {image_file.stem: image_file for image_file in image_files}
keys = image_files.keys() & text_files.keys()
self.keys = list(keys)
self.text_files = {k: v for k, v in text_files.items() if k in keys}
self.image_files = {k: v for k, v in image_files.items() if k in keys}
self.text_len = text_len
self.truncate_captions = truncate_captions
self.resize_ratio = resize_ratio
self.tokenizer = tokenizer
self.image_transform = transforms.Compose(
[
transforms.Lambda(
lambda img: img.convert("RGB") if img.mode != "RGB" else img
),
transforms.Resize([image_size, image_size]),
transforms.ToTensor(),
]
)
def __len__(self) -> int:
return len(self.keys)
def __getitem__(self, ind: int) -> Tuple[torch.tensor, torch.tensor, torch.tensor]:
key = self.keys[ind]
text_file = self.text_files[key]
image_file = self.image_files[key]
descriptions = text_file.read_text(encoding="utf-8")
descriptions = remove_style(descriptions).split("\n")
descriptions = list(filter(lambda t: len(t) > 0, descriptions))
try:
description = choice(descriptions)
except IndexError as zero_captions_in_file_ex:
print(f"An exception occurred trying to load file {text_file}.")
print(f"Skipping index {ind}")
return self.skip_sample(ind)
# ADD PREPROCESSING FUNCTION HERE
encoded_dict = self.tokenizer(
description,
return_tensors="pt",
padding="max_length",
truncation=True,
max_length=self.text_len,
add_special_tokens=True,
return_token_type_ids=False, # for RoBERTa
)
# flattens nested 2D tensor into 1D tensor
flattened_dict = {i: v.squeeze() for i, v in encoded_dict.items()}
input_ids = flattened_dict["input_ids"]
attention_mask = flattened_dict["attention_mask"]
try:
image_tensor = self.image_transform(Image.open(image_file))
except (UnidentifiedImageError, OSError) as corrupt_image_exceptions:
print(f"An exception occurred trying to load file {image_file}.")
print(f"Skipping index {ind}")
return self.skip_sample(ind)
return input_ids, image_tensor, attention_mask
def random_sample(self):
return self.__getitem__(randint(0, self.__len__() - 1))
def sequential_sample(self, ind):
if ind >= self.__len__() - 1:
return self.__getitem__(0)
return self.__getitem__(ind + 1)
def skip_sample(self, ind):
if self.shuffle:
return self.random_sample()
return self.sequential_sample(ind=ind)
class ImgDatasetExample(Dataset):
"""only for baseline cropped images"""
def __init__(
self, image_folder: str, image_transform: transforms.Compose = None,
) -> None:
self.image_transform = image_transform
self.image_path = Path(image_folder)
self.image_files = [
*self.image_path.glob("**/*.png"),
*self.image_path.glob("**/*.jpg"),
*self.image_path.glob("**/*.jpeg"),
]
def __getitem__(self, index: int) -> torch.tensor:
image = Image.open(self.image_files[index])
if self.image_transform:
image = self.image_transform(image)
return torch.tensor(image)
def __len__(self) -> int:
return len(self.image_files)
``` |
{
"source": "JoonHong-Kim/T2I_CL",
"score": 3
} |
#### File: DM-GAN+CL/code/masks.py
```python
import torch
def mask_correlated_samples(args):
mask = torch.ones((args.batch_size * 2, args.batch_size * 2), dtype=bool)
mask = mask.fill_diagonal_(0)
for i in range(args.batch_size):
mask[i, args.batch_size + i] = 0
mask[args.batch_size + i, i] = 0
return mask
def mask_correlated_samples_2(batch_size):
mask = torch.ones((batch_size * 2, batch_size * 2), dtype=bool)
mask = mask.fill_diagonal_(0)
for i in range(batch_size):
mask[i, batch_size + i] = 0
mask[batch_size + i, i] = 0
return mask
``` |
{
"source": "JoonHyeongPark/GeneMethyl",
"score": 3
} |
#### File: GeneMethyl/Source code/BetavalueDistribution.py
```python
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot
matplotlib.pyplot.style.use('ggplot')
import seaborn as sns
def DrawDensityPlot(disease, roughness_cutoff, whether_histogram) :
def GetOnlyValidBetavalueRow(line, length) :
betavalue_row = []
for k in range(length) :
if(line[k] == "NA") : continue
betavalue_row.append(float(line[k]))
return betavalue_row
input_tumor = open(disease + ".DNA_methylation_450K.tsv", 'r')
input_tumor.readline() # sample line
section_summation = [0 for i in range(int(1 / roughness_cutoff) + 1)]
while(True) :
line1 = input_tumor.readline().replace("\t\n", "\tNA\n").replace("\t\t", "\tNA\t").replace("\t\t", "\tNA\t").split()
if(len(line1) == 0) : break
site_id = line1.pop(0)
betavalue_row = GetOnlyValidBetavalueRow(line1, len(line1)) # getting betavalue for each cpg site
for value in betavalue_row : section_summation[int(value / roughness_cutoff)] += 1
path = os.pwd() + "/Result"
if not os.path.exists("Result/DistributionPlot") : os.mkdir("Result/DistributionPlot")
sns.set()
sns.distplot(section_summation, kde = True, hist = whether_histogram)
matplotlib.pyplot.title(disease + " Betavalue Distribution Plot")
matplotlib.pyplot.xlabel("Betavalue")
matplotlib.pyplot.grid(True)
figure = matplotlib.pyplot.gcf()
matplotlib.pyplot.show()
figure.savefig(path + "/DistributionPlot/" + disease + ".Betavalue.Distribution.Plot.pdf")
return section_summation
#roughness_cutoff = float(0.001)
#disease = "PANCANCER"
#whether_histogram = True
#Draw(disease, roughness_cutoff, True)
``` |
{
"source": "JoonHyeongPark/IMMethyl",
"score": 3
} |
#### File: IMMethyl/Betavalue Count In Segment/Histogram.py
```python
import pandas as pd
import matplotlib.pyplot as plt
gap = [0.05, 0.025, 0.01, 0.001, 0.0001]
def Draw(cutoff_gap) :
input_file = open(str(cutoff_gap) + "cutoff.count_array.txt", 'r')
input_table = input_file.readlines()
histogram_vector = []
for line in input_table : histogram_vector.append(int(line.split()[2]))
plt.hist(histogram_vector)
plt.show()
input_file.close()
return
Draw(0.05)
```
#### File: CpG site Correlation/Debug/Debug_Specific_CpGsite_Pancancer.py
```python
from operator import itemgetter
from scipy import stats
import numpy as np
betavalue_arr = []
cytoact_arr = []
probe_name = []
sample_id = []
######################################################################################################################################################
def getting_cytoact() :
cytoact_file = open("TCGA_methylation_cowork_1.txt", 'r')
header = cytoact_file.readline().split() # header 읽기
id_posit = header.index("id") # sample ID positioning
cytoact_posit = header.index("CytAct") # CytAct positioning
cytodata = cytoact_file.readlines() # 데이터 테이블 통째로 읽어들임
cytoact_file.close()
for line in cytodata :
line = line.split()
sample_id.append(line[id_posit].replace('_', '')) # sample ID 추출 (주형으로 사용할 것)
sample_count = len(sample_id)
for i in range(0, sample_count) : cytoact_arr.append(None) # CytAct value table 초기화
for line in cytodata :
line = line.split() # 1 sample data를 분절해서 CytAct value 추출하기 위함
if(line[cytoact_posit] != "NA") : # CytAct value가 결측치가 아니라면
sample_posit = sample_id.index(line[id_posit].replace('_', ''))
cytoact_arr[sample_posit] = float(line[cytoact_posit]) # 저장한다
return;
######################################################################################################################################################
getting_cytoact()
print("CytAct_Completed")
######################################################################################################################################################
def reset_betavalue() :
del betavalue_arr[:]
for reset_x in range(0, probe_separation_number) : betavalue_arr.append({})
return
######################################################################################################################################################
output = open("debug2.txt", 'w')
filename1 = open("PANCANCER.humanmethylation450.tumor.txt", 'r') # cancer name별로 파일명이 다름을 고려해줌
sample_name = filename1.readline().split(); filename1.readline()
del sample_name[0]; del sample_name[0]
now_target = filename1.readline().split()
probe_name = now_target.pop(0)
output.write("%s\n" % probe_name)
column1 = []
column2 = []
for i in range(0, len(sample_name)) :
sample_name[i] = sample_name[i][:15].replace('-', '')
if(sample_name[i] in sample_id and now_target[i] != "NA") :
posit = sample_id.index(sample_name[i])
printline = "%s\t%s\t%s\n" % (sample_name[i], now_target[i], cytoact_arr[posit])
column1.append(float(now_target[i]))
column2.append(float(cytoact_arr[posit]))
output.write(printline)
cor = stats.spearmanr(column1, column2)
lastprint = "%f\t%f\n" % (cor[0], cor[1])
output.write(lastprint)
output.close()
print("END")
```
#### File: IMMethyl/Filtering Invalid Samples/ValidSampleByCytAct.py
```python
cancerlist = ["ACC", "BLCA", "BRCA", "CESC", "CHOL", "COAD", "DLBC", "ESCA", "GBM", "HNSC", "KICH", "KIRC", "KIRP", "LGG", "LIHC", "LUAD", "LUSC", "MESO", "OV", "PAAD", "PCPG", "PRAD", "READ", "SARC", "SKCM", "STAD", "TGCT", "THCA", "THYM", "UCEC", "UCS", "UVM", "PANCANCER"]
input_file1 = []
probe_count = 485577
sample_id = []
cytoact = []
sample_index = []
def GetSample() :
cytoact_file = open("PANCANCER.CytAct.txt", 'r')
header = cytoact_file.readline().split() # getting header
id_posit = header.index("id") # sample ID positioning
cytoact_posit = header.index("CytAct") # CytAct positioning
cytodata = cytoact_file.readlines() # read data table
cytoact_file.close()
count = 0
global sample_id
global cytoact
for line in cytodata :
line = line.split()
sample_id.append(line[id_posit].replace('-', '')) # sample ID extraction
cytoact.append(float(line[cytoact_posit])) # CytAct extraction
count += 1
return count # Sample number return
sample_number = GetSample()
for i in range(0, len(cancerlist)) :
input_tumor = open(cancerlist[i] + ".humanmethylation450.tumor.txt", 'r')
sample_header1 = input_tumor.readline().split() # sample line
input_tumor.readline() # junk line
del sample_header1[0]; del sample_header1[0]
sample_index = []
sample_binary_table = []
length = len(sample_header1)
invalid_sample = []
for j in range(0, length) :
sample_header1[j] = sample_header1[j][:15].replace('-', '')
if(sample_header1[j] in sample_id) : sample_index.append(sample_id.index(sample_header1[j]))
else :
invalid_sample.append(sample_header1[j])
sample_index.append(-1)
print(len(invalid_sample))
output_file = open(cancerlist[i] + ".Invalid.Samples.By.CytAct.txt", 'w')
for invalid_sample_id in invalid_sample : output_file.write(invalid_sample_id + "\n")
``` |
{
"source": "joonhyuk96sj/pequod",
"score": 3
} |
#### File: pequod/scripts/pq-data-process.py
```python
import sys
import fileinput
import operator
users = 1800000
def init_dict(dict_data):
global users
for i in range(users):
dict_data[i] = 0
def print_dict(dict_type, dict_data):
print(dict_type)
for key, value in sorted(dict_data.items()):
print(key, value)
print("\n")
def print_dict_all(dict_type, dict_pos, dict_sub, dict_sub_r, dict_sscan, dict_pscan):
print(dict_type)
for key, value in sorted(dict_pos.items(), key=operator.itemgetter(1)):
print(key, value, dict_sub[key], dict_sub_r[key], dict_sscan[key], dict_pscan[key])
print("\n")
if __name__ == "__main__":
if len(sys.argv) != 2:
sys.exit()
fname = sys.argv[1]
d_sub = {}
d_sub_r = {}
d_pos = {}
d_sscan = {}
d_pscan = {}
init_dict(d_sub)
init_dict(d_sub_r)
init_dict(d_pos)
init_dict(d_sscan)
init_dict(d_pscan)
# ----------------------------------
for line in fileinput.input([fname]):
parsing = line.split('\n')[0].split()
if len(parsing) <= 1:
continue
if parsing[0] != "[DB]":
continue
if parsing[1] != "PUT" and parsing[1] != "SCAN":
continue
key = parsing[2]
key_info = key.split('|')
key_type = key_info[0]
key_user = int(key_info[1])
if parsing[1] == "PUT":
if key_type == 's':
cnt = d_sub.get(key_user)
d_sub[key_user] = cnt+1
key_user_2 = int(key_info[2])
cnt = d_sub_r.get(key_user_2)
d_sub_r[key_user_2] = cnt+1
elif key_type == 'p':
cnt = d_pos.get(key_user)
d_pos[key_user] = cnt+1
key_time = int(key_info[2])
if key_time >= 999999999:
break
else:
print("Invalid Type")
break
else:
if key_type == 's':
cnt = d_sscan.get(key_user)
d_sscan[key_user] = cnt+1
elif key_type == 'p':
cnt = d_pscan.get(key_user)
d_pscan[key_user] = cnt+1
else:
print("Invalid Type")
break
# ----------------------------------
#print_dict("sub dict", d_sub)
#print_dict("sub-r dict", d_sub_r)
#print_dict("pos dict", d_pos)
#print_dict("sscan dict", d_sscan)
#print_dict("pscan dict", d_pscan)
print_dict_all("ALL dict", d_pos, d_sub, d_sub_r, d_sscan, d_pscan)
``` |
{
"source": "jooni22/fabric-bolt",
"score": 2
} |
#### File: accounts/tests/test_views.py
```python
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.contrib.auth.models import Group
from django.contrib.auth import get_user_model
User = get_user_model()
class TestAccountViews(TestCase):
def setUp(self):
password = '<PASSWORD>'
self.user = User.objects.create_superuser(email='<EMAIL>', password=password, first_name='ted')
self.user_john = User.objects.create_superuser(email='<EMAIL>', password=password, first_name='john')
# You'll need to log him in before you can send requests through the client
self.client.login(email=self.user.email, password=password)
def test_accounts_user_change(self):
g = Group.objects.get(name='Admin')
view = reverse('accounts_user_change', args=(self.user.pk,))
get_response = self.client.get(view)
self.assertTrue(get_response.status_code, 200)
post_response = self.client.post(view, {
'first_name': 'sue',
'email': '<EMAIL>',
'is_active': True,
'user_level': g.pk
})
u = User.objects.get(pk=self.user.pk)
self.assertEqual(u.first_name, 'sue')
def test_accounts_user_change(self):
self.assertEqual(User.objects.all().count(), 2)
view = reverse('accounts_user_delete', args=(self.user_john.pk,))
post_response = self.client.post(view)
self.assertTrue(post_response.status_code, 302)
self.assertEqual(User.objects.all().count(), 1)
def test_accounts_user_view(self):
view = reverse('accounts_user_view', args=(self.user_john.pk,))
get_response = self.client.get(view)
self.assertTrue(get_response.status_code, 200)
self.assertTrue('deployment_table' in get_response.context)
self.assertEqual(len(get_response.context['deployment_table'].data), 0)
```
#### File: core/mixins/storages.py
```python
from django.core.files.storage import FileSystemStorage
class FileStorageCHMOD600(FileSystemStorage):
def __init__(self, location=None, base_url=None, file_permissions_mode=None, directory_permissions_mode=None):
return super(FileStorageCHMOD600, self).__init__(location=location, base_url=base_url,
file_permissions_mode=0o600, directory_permissions_mode=directory_permissions_mode)
```
#### File: fabric-bolt/fabric_bolt/fabfile.py
```python
import cgi
import datetime
import time
from tempfile import NamedTemporaryFile
from fabric.api import *
from fabric import colors
@task
def update():
"""Requires code_root env variable. Does a git pull and restarts the web server"""
require('code_root')
git_pull()
restart_web_server()
@task
def git_pull():
"""Does a git stash then a git pull on the project"""
run('cd %s; git stash; git pull' % (env.code_root))
@task
def restart_web_server():
"""Restart the web server"""
run('%s/apache2/bin/restart' % env.code_root_parent)
@task
def migrate():
"""Runs python manage.py migrate"""
run('cd %s; python manage.py migrate --settings=%s' % (env.code_root, env.settings_file))
@task
def collect_static():
"""Runs python manage.py collect_static --noinput"""
run('cd %s; python manage.py collectstatic --settings=%s --noinput' % (env.code_root, env.settings_file))
@task
def pip_install():
"""Runs pip install -r requirements/frozen.txt (for example site)"""
run('cd %s; pip install -r requirements/frozen.txt' % (env.code_root))
@task
def publish_changes():
"""Runs these functions in order (git_pull, pip_install, migrate, collect_static, restart_web_server)"""
git_pull()
pip_install()
migrate()
collect_static()
restart_web_server()
@task
def do_nothing():
for x in range(0, 20):
print 'nothing {}'.format(x)
time.sleep(0.2)
input = prompt('Enter something:')
for x in range(0, 20):
print 'nothing {} - {}'.format(x, input)
time.sleep(0.2)
@task
def color_test():
number = 1
for x in range(0, 2):
print colors.blue('{}: Blue text'.format(number), bold=False)
number += 1
time.sleep(0.2)
print colors.cyan('{}: cyan text'.format(number), bold=False)
number += 1
time.sleep(0.2)
print colors.green('{}: green text'.format(number), bold=False)
number += 1
time.sleep(0.2)
print colors.magenta('{}: magenta text'.format(number), bold=False)
number += 1
time.sleep(0.2)
print colors.red('{}: red text'.format(number), bold=False)
number += 1
time.sleep(0.2)
print colors.white('{}: white text'.format(number), bold=False)
number += 1
time.sleep(0.2)
print colors.yellow('{}: yellow text'.format(number), bold=False)
number += 1
time.sleep(0.2)
print colors.blue('{}: Blue text bold'.format(number), bold=True)
number += 1
time.sleep(0.2)
print colors.cyan('{}: cyan text bold'.format(number), bold=True)
number += 1
time.sleep(0.2)
print colors.green('{}: green text bold'.format(number), bold=True)
number += 1
time.sleep(0.2)
print colors.magenta('{}: magenta text bold'.format(number), bold=True)
number += 1
time.sleep(0.2)
print colors.red('{}: red text bold'.format(number), bold=True)
number += 1
time.sleep(0.2)
print colors.white('{}: white text bold'.format(number), bold=True)
number += 1
time.sleep(0.2)
print colors.yellow('{}: yellow text bold'.format(number), bold=True)
number += 1
time.sleep(0.2)
print
@task
def test_env(argument="nothing"):
print("Task Arguments:")
print argument
print
print("Task Env:")
for x, y in env.iteritems():
print '{}: {}'.format(x, y)
@task
def update_sandbox_site(comment_text):
"""put's a text file on the server"""
file_to_deliver = NamedTemporaryFile(delete=False)
file_text = "Deployed at: {} <br /> Comment: {}".format(datetime.datetime.now().strftime('%c'), cgi.escape(comment_text))
file_to_deliver.write(file_text)
file_to_deliver.close()
put(file_to_deliver.name, '/var/www/html/index.html', use_sudo=True)
```
#### File: hosts/tests/test_general.py
```python
from django.test import TestCase
from fabric_bolt.hosts.models import full_domain_validator, ValidationError, Host
class SimpleTest(TestCase):
def test_full_domain_validator_wrong(self):
def validate():
full_domain_validator('http://www.google.com/')
self.assertRaises(ValidationError, validate)
def test_full_domain_validator_too_long(self):
def validate():
full_domain_validator('adawdawdawdawdawadawdawdawdawdawadawdawdawdawdawadawdawdawdawdawadawdawdawdawdawadawdawdawdawdawadawdawdawdawdawadawdawdawdawdawadawdawdawdawdawadawdawdawdawdawadawdawdawdawdawadawdawdawdawdawadawdawdawdawdawadawdawdawdawdawadawdawdawdawdawaaaaaadawdaw.com')
self.assertRaises(ValidationError, validate)
def test_full_domain_validator_label_too_long(self):
def validate():
full_domain_validator('example.comcomcomcomcomcomcomcomcomcomcomcomcomcomcomcomcomcomcomcomcomc')
self.assertRaises(ValidationError, validate)
def test_full_domain_validator_bad_characters(self):
def validate():
full_domain_validator('exa#mple.com')
self.assertRaises(ValidationError, validate)
def test_full_domain_validator_no_domain(self):
ret = full_domain_validator(False)
self.assertEqual(ret, None)
def test_full_domain_validator_valid(self):
full_domain_validator('google.com')
def test_full_domain_validator_valid(self):
full_domain_validator('google.com.')
def test_hook_unicode(self):
host = Host()
host.name = '127.0.0.1'
host.alias = u'code'
self.assertEqual(unicode(host), u'code')
```
#### File: fabric_bolt/hosts/utils.py
```python
from Crypto.PublicKey import RSA
from django.core.files.base import ContentFile
from fabric_bolt.hosts import models
def create_ssh_config(remote_user='root', name='Auto Generated SSH Key',
file_name='fabricbolt_private.key', email='<EMAIL>', public_key_text=None,
private_key_text=None):
"""Create SSH Key"""
if not private_key_text and not public_key_text:
key = RSA.generate(2048)
pubkey = key.publickey()
private_key_text = key.exportKey('PEM')
public_key_text = pubkey.exportKey('OpenSSH')
ssh_config = models.SSHConfig()
ssh_config.name = name
ssh_config.private_key_file.save(file_name, ContentFile(private_key_text))
ssh_config.public_key = '{} {}'.format(public_key_text, email)
ssh_config.remote_user = remote_user
ssh_config.save()
return ssh_config
```
#### File: fabric_bolt/hosts/views.py
```python
from django.views.generic import CreateView, UpdateView, DeleteView, DetailView, TemplateView, FormView
from django.core.urlresolvers import reverse_lazy, reverse
from django.contrib import messages
from django_tables2.views import SingleTableView
from fabric_bolt.core.mixins.views import MultipleGroupRequiredMixin, GroupRequiredMixin
from fabric_bolt.hosts import models, tables, forms
from fabric_bolt.hosts.utils import create_ssh_config
class HostList(MultipleGroupRequiredMixin, SingleTableView):
group_required = ['Admin', 'Deployer', ]
table_class = tables.HostTable
model = models.Host
class HostDetail(MultipleGroupRequiredMixin, DetailView):
group_required = ['Admin', 'Deployer', ]
model = models.Host
class HostCreate(MultipleGroupRequiredMixin, CreateView):
"""View for creating a host. Hosts let us know where we can shovel code to."""
group_required = ['Admin', 'Deployer', ]
model = models.Host
form_class = forms.HostCreateForm
template_name_suffix = '_create'
def form_valid(self, form):
"""First call the parent's form valid then let the user know it worked."""
form_valid_from_parent = super(HostCreate, self).form_valid(form)
messages.success(self.request, 'Host {} Successfully Created'.format(self.object))
return form_valid_from_parent
def get_success_url(self):
"""Send them back to the detail view for that host"""
return reverse('hosts_host_detail', kwargs={'pk': self.object.pk})
class HostUpdate(GroupRequiredMixin, UpdateView):
group_required = 'Admin'
model = models.Host
form_class = forms.HostUpdateForm
template_name_suffix = '_update'
def form_valid(self, form):
"""First call the parent's form valid then let the user know it worked."""
form_valid_from_parent = super(HostUpdate, self).form_valid(form)
messages.success(self.request, 'Host {} Successfully Updated'.format(self.object))
return form_valid_from_parent
def get_success_url(self):
""""""
return reverse('hosts_host_detail', kwargs={'pk': self.object.pk})
class HostDelete(GroupRequiredMixin, DeleteView):
group_required = 'Admin'
model = models.Host
success_url = reverse_lazy('hosts_host_list')
def delete(self, request, *args, **kwargs):
messages.success(self.request, 'Host {} Successfully Deleted'.format(self.get_object()))
return super(HostDelete, self).delete(self, request, *args, **kwargs)
class SSHKeys(TemplateView):
template_name = 'hosts/ssh_configs.html'
def get_view(self, *args, **kwargs):
return super(SSHKeys, self).get(self.request, *args, **kwargs)
def post(self, *args, **kwargs):
"""Create the SSH file & then return the normal get method..."""
existing_ssh = models.SSHConfig.objects.all()
if existing_ssh.exists():
return self.get_view()
remote_user = self.request.POST.get('remote_user', 'root')
create_ssh_config(remote_user=remote_user)
return self.get_view()
def get_context_data(self, **kwargs):
ssh_configs = models.SSHConfig.objects.all()
return {
'ssh_configs': ssh_configs,
}
class SSHKeysCreate(FormView):
form_class = forms.CreateSSHConfig
template_name = 'hosts/host_ssh_config_create.html'
success_url = reverse_lazy('hosts_ssh_config')
def form_valid(self, form):
create_ssh_config(
name=form.cleaned_data.get('name'),
private_key_text=form.cleaned_data.get('private_key'),
public_key_text=form.cleaned_data.get('public_key'),
remote_user=form.cleaned_data.get('remote_user'),
)
return super(SSHKeysCreate, self).form_valid(form)
class SSHKeyDelete(DeleteView):
model = models.SSHConfig
success_url = reverse_lazy('hosts_ssh_config')
```
#### File: fabric_bolt/projects/model_managers.py
```python
from django.db import models
class ActiveManager(models.Manager):
def get_queryset(self):
return super(ActiveManager, self).get_queryset().filter(date_deleted__isnull=True)
class ActiveDeploymentManager(models.Manager):
def get_queryset(self):
return super(ActiveDeploymentManager, self).get_queryset()\
.filter(date_deleted__isnull=True,
stage__date_deleted__isnull=True,
stage__project__date_deleted__isnull=True)
```
#### File: task_runners/basic/__init__.py
```python
from django.conf.urls import url
from ..base import BaseTaskRunnerBackend
class BasicStreamBackend(BaseTaskRunnerBackend):
def get_detail_template(self):
return 'task_runners/deployment_detail_basic.html'
def get_urls(self):
from .views import DeploymentOutputStream
return [
url(r'^output/$', DeploymentOutputStream.as_view(), name='projects_deployment_output'),
]
```
#### File: task_runners/basic/views.py
```python
import subprocess
import ansiconv
import sys
from django.conf import settings
from django.http import StreamingHttpResponse
from django.shortcuts import get_object_or_404
from django.views.generic import View
from fabric_bolt.projects.models import Deployment
from fabric_bolt.projects.signals import deployment_finished
from fabric_bolt.projects.views import StageSubPageMixin
from .. import backend
class DeploymentOutputStream(StageSubPageMixin, View):
"""
Deployment view does the heavy lifting of calling Fabric Task for a Project Stage
"""
def output_stream_generator(self):
if backend.get_task_details(self.project, self.object.task.name) is None:
return
try:
process = subprocess.Popen(
backend.build_command(self.project, self.object, self.request.session),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True,
executable=getattr(settings, 'SHELL', '/bin/bash'),
)
all_output = ''
yield '<link rel="stylesheet" type="text/css" href="/static/css/console-style.css">'
while True:
nextline = process.stdout.readline()
if nextline == '' and process.poll() is not None:
break
all_output += nextline
nextline = '<span class="output-line">{}</span>'.format(ansiconv.to_html(nextline))
yield nextline + ' '*1024
sys.stdout.flush()
self.object.status = self.object.SUCCESS if process.returncode == 0 else self.object.FAILED
yield '<span id="finished" style="display:none;">{}</span> {}'.format(self.object.status, ' '*1024)
self.object.output = all_output
self.object.save()
deployment_finished.send(self.object, deployment_id=self.object.pk)
except Exception as e:
message = "An error occurred: " + e.message
yield '<span class="output-line">{}</span>'.format(message) + ' '*1024
yield '<span id="finished" style="display:none;">failed</span> {}'.format('*1024')
def get(self, request, *args, **kwargs):
self.object = get_object_or_404(
Deployment,
stage=self.stage,
pk=int(kwargs['pk']),
status=Deployment.PENDING
)
resp = StreamingHttpResponse(self.output_stream_generator())
return resp
```
#### File: task_runners/channels/consumers.py
```python
import json
import os
import subprocess
from importlib import import_module
import ansiconv
import sys
import fcntl
from channels import Group
from channels.auth import channel_session_user_from_http, channel_session_user
from channels.sessions import channel_session
from django.conf import settings
from fabric_bolt.projects.models import Project, Deployment
from fabric_bolt.projects.signals import deployment_finished
from .. import backend
import time
def start_task(message):
time.sleep(1)
project = Project.objects.get(id=message.content['project_id'])
deployment = Deployment.objects.get(id=message.content['deployment_id'])
deployment.output = ''
deployment.save()
engine = import_module(settings.SESSION_ENGINE)
SessionStore = engine.SessionStore
session = SessionStore(message.content['session_key'])
if backend.get_task_details(project, deployment.task.name) is None:
return
process = subprocess.Popen(
backend.build_command(project, deployment, session),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
shell=True,
executable=getattr(settings, 'SHELL', '/bin/sh'),
close_fds=True
)
fd = process.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
while True:
try:
nextline = process.stdout.readline()
except IOError as e:
nextline = ''
if nextline == '' and process.poll() is not None:
break
#
# next_input = deployment.get_next_input()
# if next_input:
# process.stdin.write(next_input + '\n')
if nextline:
Group("deployment-{}".format(deployment.id)).send({
"text": json.dumps({
'status': 'pending',
'text': str('<span class="output-line">{}</span>'.format(ansiconv.to_html(nextline)))
}),
}, immediately=True)
deployment.add_output(nextline)
sys.stdout.flush()
Deployment.objects.filter(pk=deployment.id).update(
status=deployment.SUCCESS if process.returncode == 0 else deployment.FAILED
)
Group("deployment-{}".format(deployment.id)).send({
"text": json.dumps({
'status': deployment.SUCCESS if process.returncode == 0 else deployment.FAILED,
'text': ''
}),
}, immediately=True)
deployment_finished.send(deployment, deployment_id=deployment.pk)
@channel_session_user_from_http
def ws_connect(message):
message.reply_channel.send({"accept": True})
# Work out room name from path (ignore slashes)
deployment_id = message.content['path'].strip("/")
# Save room in session and add us to the group
message.channel_session['deployment_id'] = deployment_id
Group("deployment-{}".format(deployment_id)).add(message.reply_channel)
deployment = Deployment.objects.filter(pk=deployment_id)[0]
Group("deployment-{}".format(deployment_id)).send({
"text": json.dumps({
"text": deployment.get_formatted_output(),
'status': deployment.status
})
}, immediately=True)
# @channel_session
# def ws_receive(message):
# deployment = Deployment.objects.filter(pk=message.channel_session['deployment_id'])[0]
# deployment.add_input(message.content['text'])
@channel_session_user
def ws_disconnect(message):
Group("deployment-{}".format(message.channel_session['deployment_id'])).discard(message.reply_channel)
```
#### File: task_runners/channels/__init__.py
```python
from channels import Channel
from ..base import BaseTaskRunnerBackend
class ChannelsBackend(BaseTaskRunnerBackend):
def get_detail_template(self):
return 'task_runners/deployment_detail_channels.html'
def pre_start_task(self, deployment, project, request):
Channel("start_task").send({
"deployment_id": deployment.id,
"project_id": project.id,
"session_key": request.session.session_key
})
```
#### File: web_hooks/tests/test_forms.py
```python
from django.test import TestCase
from django.contrib.auth import get_user_model
from fabric_bolt.projects import models
from fabric_bolt.web_hooks import models as hook_models
from fabric_bolt.web_hooks import forms
User = get_user_model()
class TestHooksForms(TestCase):
project_type = None
project = None
stage = None
configuration = None
task = None
deployment = None
def setUp(self):
password = '<PASSWORD>'
self.user = User.objects.create_superuser(email='<EMAIL>', password=password)
# You'll need to log him in before you can send requests through the client
self.client.login(email=self.user.email, password=password)
self._create_project()
def _create_project(self):
# Bare bones project
project = models.Project()
project.name = 'TEST_PROJECT'
project.description = 'TEST_DESCRIPTION'
project.save()
# Bare bones stage
stage = models.Stage()
stage.project = project
stage.name = 'Production'
stage.save()
self.stage = stage
# Bare bones configuration
configuration = models.Configuration()
configuration.project = project
configuration.stage = stage
configuration.key = 'KEY'
configuration.value = 'VALUE'
configuration.prompt_me_for_input = True
configuration.save()
self.configuration = configuration
# Bare bones task
task = models.Task()
task.name = 'TASK_NAME'
task.save()
self.task = task
# Bare bones deployment
deployment = models.Deployment()
deployment.user = self.user
deployment.stage = stage
deployment.comments = 'COMMENTS'
deployment.output = 'OUTPUT'
deployment.task = task
deployment.save()
# Setup Hook
hook = hook_models.Hook()
hook.url = 'http://example.com'
hook.save()
project_hook = hook_models.Hook()
project_hook.url = 'http://example.com/project/hook/'
project_hook.project = project
project_hook.save()
self.deployment = deployment
self.hook = hook
self.project_hook = project_hook
self.project = project
def test_hook_create_form(self):
hook_form = forms.HookCreateForm(data={'project': self.project.pk, 'url': 'http://www.example.com'})
hook_form.save()
def test_hook_create_form_clean_project(self):
hook_form = forms.HookCreateForm(data={'project': self.project.pk, 'url': 'http://www.example.com'})
hook_form.cleaned_data = { 'project': self.project.pk}
p = hook_form.clean_project()
self.assertEqual(self.project.pk, p.pk)
def test_hook_create_form_clean_project_none(self):
hook_form = forms.HookCreateForm(data={'project': self.project.pk, 'url': 'http://www.example.com'})
hook_form.cleaned_data = { 'project': None}
p = hook_form.clean_project()
self.assertEqual(p, None)
def test_hook_update_form(self):
hook_form = forms.HookUpdateForm(instance=self.project, data={'project': self.project.pk, 'url': 'http://www.example.com'})
hook_form.is_valid()
hook_form.save()
```
#### File: web_hooks/tests/test_hooks.py
```python
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from django.contrib.auth import get_user_model
from django.core.cache import cache
from model_mommy import mommy
from fabric_bolt.projects import models
from fabric_bolt.web_hooks import models as hook_models
from fabric_bolt.web_hooks.tasks import DeliverHook
import mock
User = get_user_model()
class TestHooks(TestCase):
project_type = None
project = None
stage = None
configuration = None
task = None
deployment = None
def setUp(self):
password = '<PASSWORD>'
self.user = User.objects.create_superuser(email='<EMAIL>', password=password)
# You'll need to log him in before you can send requests through the client
self.client.login(email=self.user.email, password=password)
self._create_project()
def _create_project(self):
# Bare bones project
project = models.Project()
project.name = 'TEST_PROJECT'
project.description = 'TEST_DESCRIPTION'
project.save()
# Bare bones stage
stage = models.Stage()
stage.project = project
stage.name = 'Production'
stage.save()
self.stage = stage
# Bare bones configuration
configuration = models.Configuration()
configuration.project = project
configuration.stage = stage
configuration.key = 'KEY'
configuration.value = 'VALUE'
configuration.prompt_me_for_input = True
configuration.save()
self.configuration = configuration
# Bare bones task
task = models.Task()
task.name = 'TASK_NAME'
task.save()
self.task = task
# Bare bones deployment
deployment = models.Deployment()
deployment.user = self.user
deployment.stage = stage
deployment.comments = 'COMMENTS'
deployment.output = 'OUTPUT'
deployment.task = task
deployment.save()
# Setup Hook
hook = hook_models.Hook()
hook.url = 'http://example.com'
hook.save()
project_hook = hook_models.Hook()
project_hook.url = 'http://example.com/project/hook/'
project_hook.project = project
project_hook.save()
self.deployment = deployment
self.hook = hook
self.project_hook = project_hook
self.project = project
def test_web_hooks(self):
self.assertEqual(2, self.project.web_hooks().count())
def test_global_web_hooks(self):
global_hooks = hook_models.Hook.objects.filter(project=None)
self.assertEqual(1, global_hooks.count())
def test_project_web_hooks(self):
project_hooks = hook_models.Hook.objects.filter(project=self.project)
self.assertEqual(1, project_hooks.count())
@mock.patch('fabric_bolt.web_hooks.tasks.requests')
def test_task_post_data(self, mock_requests):
mock_requests.post.return_value.status_code = 200
d = DeliverHook()
ret = d.post_data('http://www.example.com', {'junk': 'payload'})
self.assertEqual(ret.status_code, 200)
# def test_task_post_data_run(self):
#
# d = DeliverHook()
# ret = d.run('http://www.example.com', {'junk': 'payload'})
@mock.patch('fabric_bolt.web_hooks.tasks.requests')
def test_task_delete_hook_410(self, mock_requests):
# post_data deletes hooks when the status code is 410
mock_requests.post.return_value.status_code = 410
h = hook_models.Hook()
h.url = 'http://example.com/project/delete/me/'
h.project = self.project
h.save()
hook_id = h.pk
d = DeliverHook()
ret = d.post_data('http://example.com/api/123', {'junk': 'payload'}, hook_id)
def look_up_error(hook_id):
hook_models.Hook.objects.get(pk=hook_id)
self.assertRaises(hook_models.Hook.DoesNotExist, look_up_error, hook_id)
@mock.patch('fabric_bolt.web_hooks.tasks.requests')
def test_task_delete_hook(self, mock_requests):
# post_data deletes hooks when the status code is 410
mock_requests.post.return_value.status_code = 410
h = hook_models.Hook()
h.url = 'http://example.com/project/delete/me/'
h.project = self.project
h.save()
d = DeliverHook()
# We're testing we don't have hook deleted, since we're not passing in the hook id
ret = d.post_data('http://example.com/api/123', {'junk': 'payload'})
hook_models.Hook.objects.get(pk=h.pk)
# @override_settings(CELERY_EAGER_PROPAGATES_EXCEPTIONS=True,
# CELERY_ALWAYS_EAGER=True,
# BROKER_BACKEND='memory')
# def test_task_wrapper(self):
# from fabric_bolt.web_hooks.tasks import deliver_hook_wrapper
#
# deliver_hook_wrapper('http://www.example.com', {'dummy': 'payload'})
``` |
{
"source": "joonilahn/Deep-Classifier",
"score": 3
} |
#### File: joonilahn/Deep-Classifier/calculate_mean_std.py
```python
import os
import sys
import torch
from torch.utils.data import DataLoader
from torch.utils.data.dataset import Dataset
from torchvision import transforms
from datasets.dataset import CustomDataset
from transforms.custom_transforms import InvertColor
def online_mean_and_sd(loader):
"""Compute the mean and sd in an online fashion
Var[x] = E[X^2] - E^2[X]
"""
cnt = 0
use_gpu = True
fst_moment = torch.empty(3)
snd_moment = torch.empty(3)
if use_gpu:
fst_moment = fst_moment.cuda()
snd_moment = snd_moment.cuda()
for i, data in enumerate(loader):
imgs = data[0]
imgs *= 255.0
if use_gpu:
imgs = imgs.cuda()
b, c, h, w = imgs.shape
nb_pixels = b * h * w
sum_ = imgs.sum(dim=0).sum(dim=-1).sum(dim=-1)
sum_of_square = (imgs ** 2).sum(dim=0).sum(dim=-1).sum(dim=-1)
fst_moment = (cnt * fst_moment + sum_) / (cnt + nb_pixels)
snd_moment = (cnt * snd_moment + sum_of_square) / (cnt + nb_pixels)
cnt += nb_pixels
print("Calculated batch {}/{}".format(i + 1, len(loader)), end="\r")
return fst_moment, torch.sqrt(snd_moment - fst_moment ** 2)
if __name__ == "__main__":
maindir = sys.argv[1]
outputfile = sys.argv[2]
train_transform = transforms.Compose(
[transforms.Resize((224, 224)),
transforms.ToTensor()]
)
dataset = CustomDataset(os.path.abspath(sys.argv[1]), transform=train_transform)
loader = DataLoader(dataset, batch_size=1600, pin_memory=False)
mean, std = online_mean_and_sd(loader)
print("Mean: {}, Std: {}".format(mean.data, std.data))
with open(outputfile, "w") as f:
f.write("Mean: {}, Std: {}".format(mean.data, std.data))
```
#### File: deep_classifier/logger/logger.py
```python
import logging
import logging.handlers
def CreateLogger(logger_name, filename):
# Create Logger
logger = logging.getLogger(logger_name)
# Check handler exists
if len(logger.handlers) > 0:
return logger # Logger already exists
logger.setLevel(logging.DEBUG)
logger.propagate = False
# formatter = logging.Formatter('\n[%(levelname)s|%(name)s|%(filename)s:%(lineno)s] %(asctime)s > %(message)s')
# Create Handlers
streamHandler = logging.StreamHandler()
streamHandler.setLevel(logging.INFO)
fileHandler = logging.FileHandler(filename)
fileHandler.setLevel(logging.DEBUG)
# streamHandler.setFormatter(formatter)
logger.addHandler(streamHandler)
logger.addHandler(fileHandler)
return logger
```
#### File: deep_classifier/models/basiccnn.py
```python
import torch
import torch.nn as nn
basic_cfg = [32, "M", 64, "M", 128, 128, "M", 256, 256, "M", 256, 256, "M"]
super_basic_cfg = [8, "M", 16, "M", 32, 32, "M"]
# super_basic_cfg2 = [16, "M", 32, "M", 64, 64, "M", 128, 128, "M"]
# super_basic_cfg3 = [8, "M", 16, 16, "M"]
class BasicCNN(nn.Module):
def __init__(self, features, num_classes=2, init_weights=True):
super(BasicCNN, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(256 * 7 * 7, 2048),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(2048, 2048),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(2048, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
class SuperBasicCNN(nn.Module):
def __init__(self, features, num_classes=2, init_weights=True):
super(SuperBasicCNN, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
num_hidden = 1024
self.classifier = nn.Sequential(
nn.Linear(32 * 7 * 7, num_hidden),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(num_hidden, num_hidden),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(num_hidden, num_classes),
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, in_channels=1, batch_norm=False):
layers = []
in_channels = in_channels
for v in cfg:
if v == "M":
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
def basic_cnn(num_classes=2, cfg=basic_cfg, pretrained=None):
model = BasicCNN(make_layers(cfg), num_classes=num_classes)
if pretrained:
model.load_state_dict(torch.load(pretrained))
print("Loaded %s" % pretrained)
return model
def super_basic_cnn(num_classes=2, cfg=super_basic_cfg, pretrained=None):
model = SuperBasicCNN(make_layers(cfg, in_channels=3), num_classes=num_classes)
if pretrained:
model.load_state_dict(torch.load(pretrained))
print("Loaded %s" % pretrained)
return model
```
#### File: deep_classifier/models/loader.py
```python
from .basiccnn import basic_cnn, super_basic_cnn
from .densenet import customDenseNet
from .inception import customInception
from .inceptionv4 import inception_v4
from .mobilenet import mobilenet
from .pnasnet import pnasnet5
from .resnet import customResNet, resnet18_gray
from .resnext import resnext
from .efficientnet import efficientNet
import re
def load_model(
modelname, num_classes, pretrained, batch_norm=True, finetune_from=False
):
if modelname.lower() == "inception-v4":
print("Loading %s" % modelname)
model = inception_v4(num_classes=num_classes, pretrained=pretrained)
return model
elif modelname.lower() == "inception-v3":
print("Loading %s" % modelname)
model = customInception(num_classes=num_classes, pretrained=pretrained)
return model
elif modelname.lower() == "pnasnet5":
print("Loading %s" % modelname)
model = pnasnet5(
num_classes=num_classes, pretrained=pretrained, finetune_from=finetune_from
)
return model
elif "resnet" in modelname.lower():
print("Loading %s" % modelname)
if "gray" in modelname.lower():
model = resnet18_gray(num_classes=num_classes, pretrained=pretrained)
else:
num_layers = int(re.search("\d+", modelname).group())
model = customResNet(
num_layers=num_layers,
num_classes=num_classes,
pretrained=pretrained,
finetune_from=finetune_from,
)
return model
elif "resnext" in modelname.lower():
print("Loading %s" % modelname)
model = resnext(
num_classes=num_classes,
modelname=modelname,
pretrained=pretrained,
finetune_from=finetune_from,
)
return model
elif "densenet" in modelname.lower():
print("Loading %s" % modelname)
num_layers = int(re.search("\d+", modelname).group())
model = customDenseNet(num_layers, num_classes, pretrained=pretrained)
return model
elif modelname.lower() == "mobilenet":
print("Loading %s" % modelname)
model = mobilenet(num_classes=num_classes, pretrained=pretrained)
return model
elif modelname.lower() == "basiccnn":
print("Loading %s" % modelname)
model = basic_cnn(num_classes=num_classes, pretrained=pretrained)
return model
elif modelname.lower() == "superbasiccnn":
print("Loading %s" % modelname)
model = super_basic_cnn(num_classes=num_classes, pretrained=pretrained)
return model
elif "efficientnet" in modelname.lower():
"""
modelname: efficientnet-b{number}
e.g) efficientnet-b6
"""
print("Loading %s" % modelname)
if modelname.endswith("gray"):
modelname = re.search(r"(efficientnet-b\d{1}).*", modelname).group(1)
model = efficientNet(
modelname,
num_classes,
in_channels=1,
pretrained=pretrained,
use_batchnorm=batch_norm,
)
else:
model = efficientNet(
modelname, num_classes, pretrained=pretrained, use_batchnorm=batch_norm
)
return model
else:
raise NotImplementedError
```
#### File: deep_classifier/optim/__init__.py
```python
from .lr_scheduler import GradualWarmupScheduler
from .radam import RAdam
import torch.optim
def get_optimizer(parameters, cfg):
"""
Get optimizer by name
- SGD: Typical Stochastic Gradient Descent. Default is SGD with nesterov momentum of 0.9.
- ADAM: Adam optimizer
- RADAM: Rectified Adam
"""
if cfg.SOLVER.OPTIMIZER == "SGD":
optimizer = torch.optim.SGD(
filter(lambda p: p.requires_grad, parameters),
lr=cfg.SOLVER.BASE_LR,
momentum=0.9,
nesterov=True,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
)
elif cfg.SOLVER.OPTIMIZER == "ADAM":
optimizer = torch.optim.Adam(
filter(lambda p: p.requires_grad, parameters),
lr=cfg.SOLVER.BASE_LR,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
)
elif cfg.SOLVER.OPTIMIZER == "RADAM":
from optim.radam import RAdam
optimizer = RAdam(
filter(lambda p: p.requires_grad, parameters),
lr=cfg.SOLVER.BASE_LR,
weight_decay=cfg.SOLVER.WEIGHT_DECAY,
)
return optimizer
def get_scheduler(optimizer, cfg):
"""
Get scheduler by name
- STATIC: No learning rate scheduling
- EXP_DECAY: Decay the learning rate exponentially by rate of lr_gamma
- COSINE: Scheduler the learning rate per cosine annealing.
"""
if cfg.SOLVER.LR_SCHEDULER == "STATIC":
scheduler = None
elif cfg.SOLVER.LR_SCHEDULER == "EXP_DECAY":
scheduler = torch.optim.lr_scheduler.ExponentialLR(
optimizer, cfg.SOLVER.LR_GAMMA
)
elif cfg.SOLVER.LR_SCHEDULER == "COSINE":
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, cfg.SOLVER.NUM_EPOCHS
)
elif cfg.SOLVER.LR_SCHEDULER == "MULTISTEP":
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, cfg.SOLVER.LR_MILESTONES, cfg.SOLVER.MULTISTEP_GAMMA
)
return scheduler
```
#### File: deep_classifier/transforms/get_transform.py
```python
from torchvision.transforms import (
Compose,
Normalize,
RandomHorizontalFlip,
Resize,
ToTensor,
)
from .custom_transforms import *
def get_test_transform(cfg):
transforms_dict = {
"Resize": Resize((cfg.DATASETS.IMG_HEIGHT, cfg.DATASETS.IMG_WIDTH)),
"RandomHorizontalFlip": RandomHorizontalFlip(0.5),
"ToTensor": ToTensor(),
"Normalize": Normalize(mean=cfg.DATASETS.MEAN, std=cfg.DATASETS.STD),
"InvertColor": InvertColor(),
"UpperLeftCrop": UpperLeftCrop(),
"UpperRightCrop": UpperRightCrop(),
"BottomLeftCrop": BottomLeftCrop(),
"ExpandTensorCH": ExpandTensorCH(),
"RightBottomCrop": RightBottomCrop()
}
test_transform_list = []
# get train_transform_list
for transform_type in cfg.DATASETS.TEST_TRANSFORM_TYPES:
test_transform_list.append(transforms_dict[transform_type])
return Compose(test_transform_list)
def get_train_val_transform(cfg):
"""
Define how images are transformed before feeding into a model.
Args:
- transforms_types(list(str))
"""
transforms_dict = {
"Resize": Resize((cfg.DATASETS.IMG_HEIGHT, cfg.DATASETS.IMG_WIDTH)),
"RandomHorizontalFlip": RandomHorizontalFlip(0.5),
"ToTensor": ToTensor(),
"Normalize": Normalize(mean=cfg.DATASETS.MEAN, std=cfg.DATASETS.STD),
"InvertColor": InvertColor(),
"UpperLeftCrop": UpperLeftCrop(),
"UpperRightCrop": UpperRightCrop(),
"BottomLeftCrop": BottomLeftCrop(),
"ExpandTensorCH": ExpandTensorCH(),
"RandomSwapImageRatio": RandomSwapImageRatio(),
"RandomRotation": RandomRotation(cfg.DATASETS.RANDOM_ROTATION),
"RightBottomCrop": RightBottomCrop()
}
train_transform_list = []
val_transform_list = []
# get train_transform_list
for transform_type in cfg.DATASETS.TRAIN_TRANSFORM_TYPES:
train_transform_list.append(transforms_dict[transform_type])
# get val_transform_list
for transform_type in cfg.DATASETS.TEST_TRANSFORM_TYPES:
val_transform_list.append(transforms_dict[transform_type])
# define transform
train_transform = Compose(train_transform_list)
val_transform = Compose(val_transform_list)
train_val_transforms = (train_transform, val_transform)
return train_val_transforms
```
#### File: joonilahn/Deep-Classifier/train.py
```python
import argparse
import os
import pathlib
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torchvision.transforms
from deep_classifier.config.defaults import get_cfg_defaults
from deep_classifier.datasets import get_dataloader
from deep_classifier.logger.logger import CreateLogger
from deep_classifier.logger.logger_utils import (make_dir, save_configs,
save_model, save_params,
savelogs)
from deep_classifier.models import load_model
from deep_classifier.optim import get_optimizer, get_scheduler
from tensorboardX import SummaryWriter
from torch.cuda.amp import GradScaler, autocast
from tqdm import tqdm
warnings.simplefilter("ignore", FutureWarning)
warnings.simplefilter("ignore", UserWarning)
# Parse console arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"config", type=str, help="yaml type config file to be used for training"
)
parser.add_argument(
"--resume-train",
action="store_true",
help="if true, resume training from the checkpoint",
)
parser.add_argument(
"--fp16",
action="store_true",
help="Use mixed precision in training"
)
parser.add_argument("--checkpoint", type=str, help="a path for the checkpoint")
parser.add_argument(
"--opts",
help="Modify config options using the command-line",
default=[],
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
# Get configs from a config file
CFG = get_cfg_defaults()
CFG.merge_from_file(args.config)
CFG.merge_from_list(args.opts)
device_ids = ",".join(str(d) for d in CFG.SYSTEM.DEVICE_IDS)
os.environ["CUDA_VISIBLE_DEVICES"] = device_ids
# Create system logger
CFG.SOLVER.CKPT_DIR = os.path.join("checkpoint", CFG.SOLVER.SAVEDIR)
pathlib.Path(CFG.SOLVER.CKPT_DIR).mkdir(parents=True, exist_ok=True)
MY_LOGGER = CreateLogger("deep-classifier", CFG.SOLVER.CKPT_DIR + "/train_result.log")
MY_LOGGER.info(CFG)
def save_configfile(save_dir, config_filename):
savefile = os.path.join(save_dir, os.path.basename(config_filename))
with open(savefile, "w") as f:
f.write(CFG.dump())
def test_model(model, testloader, epoch, best_acc, num_classes, writer):
model.eval()
running_corrects = 0
num_data = 0
MY_LOGGER.info(
"==================================Validation phase=================================="
)
with torch.no_grad():
for data in tqdm(testloader, total=len(testloader)):
inputs, labels = data
inputs = inputs.cuda()
labels = labels.cuda()
# forward pass
outputs = model(inputs)
_, preds = torch.max(outputs.data, 1)
running_corrects += torch.sum(preds == labels.data).item()
num_data += inputs.size(0)
test_acc = running_corrects / num_data * 100.0
MY_LOGGER.info(
"Validation Accuracy for the {:d} test images: {:.2f}%".format(
num_data, test_acc
)
)
# tensorboard logging for every epoch
writer.add_scalar("Epoch Val accuracy", test_acc, epoch + 1)
if test_acc > best_acc:
best_acc = test_acc
best_model_wts = model.state_dict()
save_params(best_model_wts, CFG.SOLVER.CKPT_DIR, "best_weights.pth")
return test_acc, best_acc
# train model function
def train_model(
model,
criterion,
optimizer,
scheduler,
dataloaders,
num_classes,
start_epoch,
writer,
use_fp16=False
):
since = time.time()
best_model_wts = model.state_dict()
best_acc = 0.0
# Set iterations and epochs
global_iterations = 0
num_train = len(dataloaders["train"].dataset)
num_val = len(dataloaders["val"].dataset)
iter_per_epoch = len(dataloaders["train"])
num_epochs = CFG.SOLVER.NUM_EPOCHS
if use_fp16:
MY_LOGGER.info("Use automatic mixed precision for training.")
scaler = GradScaler()
for epoch in range(num_epochs):
epoch += start_epoch
MY_LOGGER.info("Epoch {}/{}".format(epoch + 1, num_epochs + start_epoch))
MY_LOGGER.info("-" * 10)
# Iterate over data
if epoch % CFG.SOLVER.VALID_INTERVAL == 0:
phases = ["train", "val"]
else:
phases = ["train"]
for phase in phases:
if phase == "train":
model.train(True)
if scheduler:
scheduler.step()
elif phase == "val":
val_acc, best_acc = test_model(
model, dataloaders[phase], epoch, best_acc, num_classes, writer
)
break
epoch_lr = optimizer.param_groups[0]["lr"]
running_loss = 0.0
running_num_examples = 0
running_corrects = 0
i_start_time = time.time()
for i, data in enumerate(dataloaders[phase]):
global_iterations += 1
# log start time for ith iteration
if i % CFG.SOLVER.PRINT_INTERVAL == 0:
i_start_time = time.time()
# get the inputs and labels
inputs, labels = data
inputs = inputs.cuda()
labels = labels.cuda()
model = model.cuda()
# zero the parameter gradients every iteration
optimizer.zero_grad()
# forward pass
with autocast(enabled=use_fp16):
if CFG.MODEL.BACKBONE == "inception-v3":
if phase == "train":
logit, aux_logit = model.forward(inputs)
else:
logit = model.forward(inputs)
else:
logit = model.forward(inputs)
_, preds = torch.max(logit.data, 1)
# compute loss
loss = criterion(logit, labels)
# backward + optimize only if in training phase
if phase == "train":
if use_fp16:
scaler.scale(loss).backward()
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
optimizer.step()
# compute statistics
batch_loss = loss.item()
batch_corrects = torch.sum(preds == labels.data).item()
batch_acc = batch_corrects / inputs.size(0) * 100
running_loss += batch_loss
running_corrects += batch_corrects
running_num_examples += inputs.size(0)
# MY_LOGGER.info key statistics
if i % CFG.SOLVER.PRINT_INTERVAL == 0:
time_elapsed = ( time.time() - i_start_time ) / CFG.SOLVER.PRINT_INTERVAL
MY_LOGGER.info(
"Epoch: {0:}/{1:}, Iterations: {2:}/{3:}, {4:} loss: {5:6.4f}, accuracy: {6:.2f}%, lr: {7:.6f} time elapsed: {8:6.4f}".format(
epoch + 1,
num_epochs + start_epoch,
i + 1,
iter_per_epoch,
phase,
batch_loss,
batch_acc,
epoch_lr,
time_elapsed % 60,
)
)
i_start_time = time.time()
if i % 100 == 0:
# ============ TensorBoard logging ============#
# Save log file every 1000 iteration
# (1) Log the scalar values
avg_loss = running_loss / (i + 1)
avg_acc = running_corrects / running_num_examples * 100
savelogs(writer, phase, epoch, avg_loss, avg_acc, global_iterations)
epoch_loss = running_loss / len(dataloaders[phase])
epoch_acc = running_corrects / running_num_examples * 100
MY_LOGGER.info(
"Epoch {0:}/{1:} {2:} Loss: {3:.4f}, Accuracy: {4:.2f}% [{5:}/{6:}]".format(
epoch + 1,
num_epochs + start_epoch,
phase,
epoch_loss,
epoch_acc,
running_corrects,
running_num_examples,
)
)
# tensorboard logging for every epoch
writer.add_scalar("Epoch " + phase + " loss", epoch_loss, epoch + 1)
writer.add_scalar("Epoch " + phase + " accuracy", epoch_acc, epoch + 1)
writer.add_scalar("learning rate", epoch_lr, epoch + 1)
# deep copy the model
save_model(
model, optimizer, epoch, CFG.SOLVER.CKPT_DIR, "last_checkpoint.pth.tar"
)
time_elapsed = time.time() - since
MY_LOGGER.info(
"Training complete in {:.0f}m {:.0f}s".format(
time_elapsed // 60, time_elapsed % 60
)
)
def main():
# save config file
save_configfile(CFG.SOLVER.CKPT_DIR, args.config)
# load dataloader
dataloaders, num_classes = get_dataloader(CFG)
MY_LOGGER.info("Number of classes for the dataset is %d" % num_classes)
MY_LOGGER.info(dataloaders["train"].dataset._labeldict)
# load model
model = load_model(CFG.MODEL.BACKBONE, num_classes, CFG.MODEL.PRETRAINED,
batch_norm=CFG.MODEL.BATCH_NORM)
model = model.cuda()
MY_LOGGER.info(model)
# loss functions and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = get_optimizer(model.parameters(), CFG)
if CFG.SYSTEM.MULTI_GPU:
MY_LOGGER.info("Using DataParallel")
model = torch.nn.DataParallel(model)
scheduler = get_scheduler(optimizer, CFG)
if CFG.SOLVER.LR_WARMUP:
from deep_classifier.optim.lr_scheduler import GradualWarmupScheduler
scheduler = GradualWarmupScheduler(
optimizer,
multiplier=CFG.SOLVER.LR_MULTIPLIER,
total_epoch=CFG.SOLVER.WARMUP_EPOCHS,
after_scheduler=scheduler,
)
MY_LOGGER.info(
"Warmup lr for %d epochs.\n Initial LR: %f, LR after warmup %f"
% (
CFG.SOLVER.WARMUP_EPOCHS,
CFG.SOLVER.BASE_LR,
CFG.SOLVER.BASE_LR * CFG.SOLVER.LR_MULTIPLIER,
)
)
start_epoch = 0
# Load model state if resuming the train
if args.resume_train:
ckpt = torch.load(args.checkpoint)
model.load_state_dict(ckpt["state_dict"])
optimizer = ckpt["optimizer"]
start_epoch = ckpt["epoch"]
MY_LOGGER.info(
"Resuming the training. Start epoch is {}".format(start_epoch + 1)
)
# set tensorboard logger
logdir = "logs/" + CFG.SOLVER.SAVEDIR
pathlib.Path(logdir).mkdir(parents=True, exist_ok=True)
writer = SummaryWriter(logdir)
# Train the model
train_model(
model,
criterion,
optimizer,
scheduler,
dataloaders,
num_classes,
start_epoch,
writer,
use_fp16=args.fp16
)
if __name__ == "__main__":
main()
``` |
{
"source": "joonion/algorithms-lying-down",
"score": 3
} |
#### File: algorithms-lying-down/SlidingWindows/LeetCode.53.MaximumSubarray.4.py
```python
from typing import List
def max_to_left(mid, low, S):
lmax, lsum = S[mid], 0
for i in range(mid, low - 1, -1):
lsum += S[i]
lmax = max(lmax, lsum)
return lmax
def max_to_right(mid, high, S):
rmax, rsum = S[mid], 0
for i in range(mid, high + 1):
rsum += S[i]
rmax = max(rmax, rsum)
return rmax
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
if len(nums) == 0:
return 0
elif len(nums) == 1:
return nums[0]
else:
mid = len(nums) // 2
print(nums[:mid], nums[mid:])
lmax = max_to_left(mid - 1, 0, nums)
rmax = max_to_right(mid, len(nums) - 1, nums)
print(lmax, rmax, lmax + rmax)
return max(lmax + rmax, \
self.maxSubArray(nums[:mid]), \
self.maxSubArray(nums[mid:]))
s = Solution()
# n = [31, -41, 59, 26, -53, 58, 97, -93, -23, 84]
n = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
print(s.maxSubArray(n))
```
#### File: algorithms-lying-down/String/LeetCode.3.LongestSubstring.py
```python
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
longest, current = "", ""
for j in range(len(s)):
i = current.find(s[j])
if i >= 0:
current = current[i + 1:]
current += s[j]
if len(longest) < len(current):
longest = current[0:len(current)]
print(longest, current)
return len(longest)
sol = Solution()
# sin = "abcabcbb"
sin = "pwwkew"
print(sol.lengthOfLongestSubstring(sin))
``` |
{
"source": "joonion/coding-for-problem-solving",
"score": 4
} |
#### File: coding-for-problem-solving/CodingTest.04/4.2.py
```python
def collatz(n):
# recursion 재귀 함수
if n == 1:
return [1]
else:
if n % 2 == 0:
return [n] + collatz(n // 2)
else:
return [n] + collatz(3 * n + 1)
n = int(input())
seq = collatz(n)
print(seq)
for i in range(len(seq)):
print(seq[i], end=" ")
print(len(seq))
```
#### File: coding-for-problem-solving/CodingTest.04/4.3.py
```python
def collatz(n):
seq = []
while n > 1:
seq.append(n)
if n % 2 == 0:
n = n // 2
else:
n = 3 * n + 1
seq.append(n)
return seq
N, M = list(map(int, input().split()))
longest = 0
for i in range(N, M + 1):
seq = collatz(i)
if longest < len(seq):
longest = len(seq)
print(longest)
```
#### File: coding-for-problem-solving/Lecture.01/sum.2.py
```python
def sum2(n):
if n == 1:
return 1
else:
return n + sum2(n - 1)
N = 10
S = sum2(N)
print(S)
```
#### File: coding-for-problem-solving/Lecture.01/sum.4.py
```python
def sum4(n):
return n * (n + 1) // 2
N = 10
S = sum4(N)
print(S)
```
#### File: coding-for-problem-solving/Lecture.03/problem.1.py
```python
import random
def find_missing(S):
n = len(S) + 1
return (n**2 + n) // 2 - sum(S)
def find_missing_r(S, i):
if i not in S:
return i
else:
return find_missing_r(S, i-1)
N = int(input("자연수를 입력하세요: "))
nums = [i for i in range(1, N + 1)]
S = random.sample(nums, len(nums) - 1)
print(S)
missing = find_missing(S)
print(missing)
```
#### File: coding-for-problem-solving/Lecture.04/find_largest.3.py
```python
import random
def find_largest(nums):
if len(nums) == 1:
return nums[0]
else:
largest = find_largest(nums[1:])
if nums[0] < largest:
return largest
else:
return nums[0]
sequence = list(range(10, 100))
N = 10
nums = random.sample(sequence, N)
MAX = find_largest(nums)
print(nums)
print(MAX, max(nums))
```
#### File: coding-for-problem-solving/Lecture.10/1.roman.1.py
```python
def to_arabic(char):
if char == "I":
return 1
elif char == "V":
return 5
elif char == "X":
return 10
elif char == "L":
return 50
elif char == "C":
return 100
elif char == "D":
return 500
elif char == "M":
return 1000
romans = "IVXLCDM"
for char in romans:
print(char, ":", to_arabic(char))
```
#### File: coding-for-problem-solving/Lecture.11/date.1.py
```python
year, month, day = 2021, 11, 25
n = 1000
def leapyear(y):
return (y % 4 == 0 and y % 100 != 0) or y % 400 == 0
def invalid(year, month, day):
num_of_days = 30 if month in [4, 6, 9, 11] else 31
if month == 2:
num_of_days = 29 if leapyear(year) else 28
return day > num_of_days
for _ in range(n):
day += 1
if invalid(year, month, day):
day, month = 1, month + 1
if month > 12:
month, year = 1, year + 1
print(year, month, day)
```
#### File: coding-for-problem-solving/Lecture.14/fibonacci.4.py
```python
def fib(n):
F = [0, 1]
if n <= 1:
return F[n]
else:
for i in range(2, n + 1):
F.append(F[i - 1] + F[i - 2])
return F[n]
for n in range(101):
print(n, ":", fib(n))
```
#### File: coding-for-problem-solving/Lecture.16/josephus.1.py
```python
def josephus(n, k):
q = [i for i in range(1, n + 1)]
while len(q) > 1:
for i in range(k - 1):
q.append(q.pop(0))
q.pop(0)
return q[0]
print(josephus(41, 3))
```
#### File: coding-for-problem-solving/Lecture.17/cryptanalyze.py
```python
from collections import OrderedDict
def is_valid(letters, words):
a, b, c = words
n = len(c)
carry = 0
for i in range(n - 1, -1, -1):
if any(letters[word[i]] is None for word in words):
return True
elif letters[a[i]] + letters[b[i]] + carry == letters[c[i]]:
carry = 0
elif letters[a[i]] + letters[b[i]] + carry == 10 + letters[c[i]]:
carry = 1
else:
return False
return True
def solve(letters, unassigned, nums, words):
if len(unassigned) == 0:
if (is_valid(letters, words)):
return letters
else:
return None
char = unassigned[0]
for num in nums:
letters[char] = num
nums.remove(num)
if is_valid(letters, words):
solution = solve(letters, unassigned[1:], nums, words)
if solution:
return solution
nums.add(num)
letters[char] = None
return False
def normalize(word, n):
diff = n - len(word)
return ['#'] * diff + word
def order_letters(words):
n = len(words[2])
letters = OrderedDict()
for i in range(n - 1, -1, -1):
for word in words:
if word[i] not in letters:
letters[word[i]] = None
return letters
def cryptarithm(problem):
words = list(map(list, problem))
n = len(words[2])
words[0] = normalize(words[0], n)
words[1] = normalize(words[1], n)
letters = order_letters(words)
unassigned = [c for c in letters if c != '#']
nums = set(range(0, 10))
return solve(letters, unassigned, nums, words)
words = ["SEND", "MORE", "MONEY"]
solution = cryptarithm(words)
print(solution)
for word in words:
for c in word:
print(c, solution[c])
```
#### File: coding-for-problem-solving/Lecture.17/cryptarithm.2.py
```python
from itertools import permutations
from time import time
def toint(s, map):
value = 0
for i in range(len(s)):
value += map[s[i]] * (10**i)
return value
def promising(i, x, y, z, map):
a = toint(x[:(i+1)], map)
b = toint(y[:(i+1)], map)
c = toint(z[:(i+1)], map)
limit = 10 ** (max(len(str(a)), len(str(b))))
return (a + b) % limit == c % limit
def is_valid(x, y, z, map):
if 0 in [map[x[-1]], map[y[-1]], map[z[-1]]]:
return False
a = str(toint(x, map))
b = str(toint(y, map))
c = str(toint(z, map))
return int(a) + int(b) == int(c)
def solveto(i, x, y, z, map):
global solved
print(i, map, solved)
if i == max(len(x), len(y)):
if is_valid(x, y, z, map):
solved.append(map)
print(solved)
else:
letters = set(x[i] + y[i] + z[i]) - map.keys()
digits = set(i for i in range(10)) - set(map.values())
perms = list(permutations(digits, len(letters)))
for perm in perms:
for (k, v) in zip(letters, perm):
map[k] = v
if promising(i, x, y, z, map):
solveto(i + 1, x, y, z, map)
for k in letters:
map.pop(k)
def solve(n, m, s):
empty = {}
return solveto(0, n[::-1], m[::-1], s[::-1], empty)
solved = []
if __name__=='__main__':
n, m, s = "SEND", "MORE", "MONEY"
start = time()
solve(n, m, s)
print(solved)
duration = time() - start
print("Elapsed:", duration)
for map in solved:
print(n, toint(n, map))
print(m, toint(m, map))
print(s, toint(s, map))
``` |
{
"source": "joonion/daily-coding-problems",
"score": 3
} |
#### File: daily-coding-problems/Chap.17/17.4.py
```python
def nth_sevenish_number(n):
answer, bit_place = 0, 0
while n > 0:
if n & 1 == 1:
answer += 7 ** bit_place
n >>= 1
bit_place += 1
return answer
# n = 1
# print(nth_sevenish_number(n))
for n in range(1, 10):
print(nth_sevenish_number(n))
``` |
{
"source": "joonion/fibona-chicken-number",
"score": 4
} |
#### File: joonion/fibona-chicken-number/FibonaChicken.2.py
```python
from math import sqrt, floor
phi = (1 + sqrt(5)) / 2
phi_ = 1 - phi
def Binet(i):
return round((phi ** i - phi_ ** i) / sqrt(5))
# n = 50
# for i in range(n + 1):
# print(i, Binet(i))
def FibonacciSequence(n):
F = [0, 1]
for i in range(2, n + 1):
F.append(F[i - 1] + F[i - 2])
return F
n = 1000
F = FibonacciSequence(n)
for i in range(n + 1):
if F[i] != Binet(i):
print("Oops!", i, F[i], Binet(i))
break
```
#### File: joonion/fibona-chicken-number/FibonaChicken.5.py
```python
from math import sqrt, log, floor
phi = (1 + sqrt(5)) / 2
phi_ = 1 - phi
def Binet(i):
return round((phi ** i - phi_ ** i) / sqrt(5))
def inverse_fibonacci(N):
return round(log(sqrt(5) * N) / log(phi))
def is_perfect(n):
rootn = floor(sqrt(n))
return True if rootn * rootn == n else False
def is_fibonacci(N):
x, y = 5 * N * N + 4, 5 * N * N - 4
return is_perfect(x) or is_perfect(y)
def FibonaChicken(N):
if is_fibonacci(N):
return Binet(inverse_fibonacci(N) - 1)
N = int(input("자애로운 자여, 몇 명이나 먹이려고 하는고? "))
C = FibonaChicken(N)
print("그렇다면", C, "마리를 시키거라")
print("능히", N, "명을 먹이는데 부족함이 없느니라.")
``` |
{
"source": "joonion/leetcode-in-python",
"score": 3
} |
#### File: leetcode-in-python/Array/0189.RotateArray.1.py
```python
from typing import List
class Solution:
def rotate(self, nums: List[int], k: int) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
n = len(nums)
t = nums[:]
for i in range(n):
nums[(i + k) % n] = t[i]
s = Solution()
# L, k = [1, 2, 3, 4, 5, 6, 7], 3
L, k = [-1, -100, 3, 99], 2
s.rotate(L, k)
print(L)
``` |
{
"source": "joonion/Project-Euler",
"score": 3
} |
#### File: Project-Euler/Python/010.py
```python
def solve(n):
sieve = [True] * (n + 1)
s = 0
for i in range(2, n + 1):
if sieve[i] == True:
s += i
for j in range(i + i, n + 1, i):
sieve[j] = False
return s
# n = 10
n = 2000000
print(solve(n))
```
#### File: Project-Euler/Python/011.py
```python
def solve(n, d, A):
largest, direction, maxi, maxj = 0, 0, 0, 0
for i in range(n - d + 1):
for j in range(n - d + 1):
hori, vert, diagup, diagdn = 1, 1, 1, 1
for k in range(d):
hori *= A[i][j + k]
vert *= A[i + k][j]
diagup *= A[i + k][j + k]
diagdn *= A[i + k][j - k + d - 1]
if largest < hori:
largest, direction, maxi, maxj = hori, 0, i, j
if largest < vert:
largest, direction, maxi, maxj = vert, 1, i, j
if largest < diagup:
largest, direction, maxi, maxj = diagup, 2, i, j
if largest < diagdn:
largest, direction, maxi, maxj = diagdn, 3, i, j + d - 1
return largest, direction, maxi, maxj
n, d = 20, 4
A = [[] for _ in range(n)]
for i in range(n):
A[i] = list(map(int, input().split()))
answer, direction, i, j = solve(n, d, A)
print(answer, direction, i, j)
```
#### File: Project-Euler/Python/016.py
```python
from math import factorial
def path(x, y, n):
if x > n or y > n:
return 0
elif x == n and y == n:
return 1
else:
return path(x + 1, y, n) + path(x, y + 1, n)
def path2(x, y, n, D):
if x > n or y > n:
return 0
elif x == n and y == n:
D[x][y] = 1
elif D[x][y] == -1:
D[x][y] = path2(x + 1, y, n, D) + path2(x, y + 1, n, D)
return D[x][y]
# returns binomial(2n, n): Draw the Pascal's triangle!
def path3(n):
return factorial(2 * n) // (factorial(n) ** 2)
def solve(n):
D = [[-1 for _ in range(n + 1)] for _ in range(n + 1)]
return path2(0, 0, n, D)
# n = 2
n = 20
answer = path3(n)
print(answer)
```
#### File: Project-Euler/Python/025.py
```python
def solve(n):
F = [1, 1]
while len(str(F[-1])) < n:
F.append(F[-1] + F[-2])
return len(F)
# n = 3
n = 1000
answer = solve(n)
print(answer)
```
#### File: Project-Euler/Python/028.py
```python
def direction(dir, n, row, col):
if row == col and row <= 0:
return 0 # turn to the right
elif row == col and row > 0:
return 1 # turn to the left
elif row + col == 0 and col < 0:
return 2 # turn to the top
elif row + col == 1 and col > 0:
return 3 # turn to the bottom
return dir
def solve(n):
move = [(0, 1), (0, -1), (-1, 0), (1, 0)]
M = [[0 for _ in range(n)] for _ in range(n)]
row, col, dir = 0, 0, 0
for i in range(1, n * n + 1):
M[row + n // 2][col + n // 2] = i
dir = direction(dir, n, row, col)
row, col = row + move[dir][0], col + move[dir][1]
# returns the sum of the numbers on the diagonal
s = 0
for i in range(n):
s += M[i][i] + M[i][n - 1 - i]
return s - 1
# n = 5
n = 1001
answer = solve(n)
print(answer)
```
#### File: Project-Euler/Python/029.py
```python
def solve(n, m):
S = set([])
for a in range(n, m + 1):
for b in range(n, m + 1):
S.add(a ** b)
return len(S)
# n, m = 2, 5
n, m = 2, 100
answer = solve(n, m)
print(answer)
```
#### File: Project-Euler/Python/042.py
```python
from math import floor, sqrt
def is_triangle(word):
s = 0
for i in range(len(word)):
s += ord(word[i]) - ord('A') + 1
n = 8 * s + 1
if n == floor(sqrt(n)) ** 2:
return True
else:
return False
def solve(words):
count = 0
for i in range(len(words)):
if is_triangle(words[i]):
count += 1
return count
def trim(s):
return s.replace("\"", "")
words = list(map(trim, input().split(",")))
answer = solve(words)
print(answer)
```
#### File: Project-Euler/Python/043.py
```python
from itertools import permutations
def is_divisible(nstr):
p = [2, 3, 5, 7, 11, 13, 17]
for i in range(len(p)):
if int(nstr[(i + 1):(i + 4)]) % p[i] != 0:
return False
return True
def solve():
s = "0123456789"
perms = list(permutations(s))
total = 0
for i in range(len(perms)):
nstr = "".join(perms[i])
if is_divisible(nstr):
total += int(nstr)
return total
answer = solve()
print(answer)
```
#### File: Project-Euler/Python/057.py
```python
def solve(N):
count = 0
n, d = 1, 1
for _ in range(N):
n, d = n + 2 * d, n + d
if len(str(n)) > len(str(d)):
count += 1
return count
n = 1000
print(solve(n))
# n = 8
# for i in range(1, n + 1):
# solve(i)
# print()
```
#### File: Project-Euler/Python/087.py
```python
from math import sqrt
def is_prime(n):
for i in range(2, int(sqrt(n)) + 1):
if n % i == 0:
return False
return True
def find_primes(maxp):
primes = [2]
n = 3
while n <= maxp:
if is_prime(n):
primes.append(n)
n += 2
return primes
def check_prime_power(primes, check):
for a in range(len(primes)):
if primes[a] ** 2 >= len(check): break
for b in range(len(primes)):
if primes[a] ** 2 + primes[b] ** 3 >= len(check): break
for c in range(len(primes)):
n = primes[a] ** 2 + primes[b] ** 3 + primes[c] ** 4
if n >= len(check): break
check[n] = 1
return sum(check)
def solve(n):
maxp = int(sqrt(n))
primes = find_primes(maxp)
check = [0] * n
return check_prime_power(primes, check)
# n = 50
n = 50000000
print(solve(n))
``` |
{
"source": "joonion/the-art-of-coding",
"score": 4
} |
#### File: Level.1/Step.02/2.3_SelectionSort.py
```python
def find_smallest(S):
smallest, position = S[0], 0
for i in range(len(S)):
if S[i] < smallest:
smallest, position = S[i], i
return position
def selection_sort(S):
sorted = []
while len(S) > 0:
position = find_smallest(S)
smallest = S.pop(position)
sorted.append(smallest)
return sorted
S = list(map(int, input().split()))
sorted = selection_sort(S)
print(sorted)
print(sorted[0], sorted[-1])
``` |
{
"source": "joonjCode/django-todo-rest-ajax",
"score": 2
} |
#### File: todo_drf/frontend/views.py
```python
from django.shortcuts import render
# Create your views here.
def list(req):
return render(req, 'frontend/list.html')
``` |
{
"source": "joonjCode/python",
"score": 3
} |
#### File: python/anthony/dec_t.py
```python
import functools
def dec(func):
@functools.wraps(func)
def dec_inner(*args, **kwargs):
print(f'got {args} {kwargs}')
ret = func(*args, **kwargs)
print('after')
return ret
return dec_inner
def dec2(greeting, farewell):
def dec2_decorator(func):
@functools.wraps(func)
def dec2_inner(*args, **kwargs):
print(greeting)
ret = func(*args, **kwargs)
print(farewell)
return ret
return dec2_inner
return dec2_decorator
@dec2('hello', 'goodbye')
def f(x:int) -> None:
print(f'hello {x}')
def main():
breakpoint()
f(1)
if __name__ == '__main__':
exit(main())
```
#### File: python/anthony/hello_test.py
```python
import hello
import pytest
def test_main(capsys):
hello.main(['Joon'])
out, err = capsys.readouterr()
assert out == 'Hello Joon\n'
assert err == ''
def test_main_error_with_emptystring(capsys):
assert hello.main([''])
out, err = capsys.readouterr()
assert out == ''
assert err == "Persons's name must not be empty\n"
```
#### File: python/async-webscraping/ascrape-multi.py
```python
from aiohttp import ClientSession
import asyncio
import pathlib
async def fetch(url, session, year):
async with session.get(url) as resp:
html_body = await resp.read()
return {'body':html_body, 'year':year}
async def main(start_year:int= 2020, years_ago:int=5):
html_body = ''
tasks= []
async with ClientSession() as session:
for i in range(0, years_ago):
year = start_year - i
url = f'https://www.boxofficemojo.com/year/{year}/'
print(f'year : {year} {url}')
tasks.append(
asyncio.create_task(
fetch(url, session, year)
)
)
pages_content = await asyncio.gather(*tasks)
return pages_content
results = asyncio.run(main())
# print(results)
output_dir = pathlib.Path().resolve() / 'snapshots'
output_dir.mkdir(parents=True, exist_ok=True)
for result in results:
curr_year = result.get('year')
html_data = result.get('body')
output_file = output_dir/f'{curr_year}.html'
output_file.write_text(html_data.decode())
```
#### File: python/command-line-tool/cli_fire.py
```python
import fire
from getpass import getpass
def hello(name = 'world'):
return f'hello {name}'
def login(name = None):
if name == None:
name = input('what is your name\n')
pw = getpass('<PASSWORD>')
return name, pw
if __name__ == '__main__':
fire.Fire(hello)
```
#### File: src/niz_os/main.py
```python
import pathlib
from flask import Flask
from .resources import get_resource_path
BASE_DIR = pathlib.Path(__file__).resolve().parent
DATA_DIR = get_resource_path('data')
IMG_PATH = DATA_DIR / 'python.jpg'
# init app
web_app = Flask(__name__)
@web_app.route('/', methods=['GET']) # http://localhost:5000
def index():
return {'dir': str(BASE_DIR), 'data_dir': str(DATA_DIR), 'IMG_PATH': str(IMG_PATH)}, 200
```
#### File: basics/generator_basic/basic.py
```python
def generator_func(num):
for i in range(num):
yield i*2
# g = generator_func(100)
# print(g) # object
# print(next(g)) # 0
# print(next(g)) # 2 4 6
for item in generator_func(100):
print(item)
def make_list(num):
result = []
for i in range(num):
result.append(i*2)
return result
```
#### File: python-scripting/network-programming/client.py
```python
import socket
import threading
nickname = input('Choose a nickname: ')
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(('127.0.0.1', 55555))
def receive():
while True:
try:
message = client.recv(1024).decode('ascii')
if message == 'Joon':
client.send(nickname.encode('ascii'))
else:
print(message)
except :
print('An Eror occurred')
client.close()
break
def write():
while True:
message = f'{nickname} : {input("")}'
client.send(message.encode('ascii'))
receive_thread = threading.Thread(target=receive)
receive_thread.start()
write_thread = threading.Thread(target=write)
write_thread.start()
```
#### File: python-scripting/network-programming/tcp-chatroom.py
```python
import threading
import socket
host = '127.0.0.1'
port = 55555
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((host, port))
server.listen()
clients = []
nicknames = []
def broadcast(message):
for client in clients:
client.send(message)
def handel(client):
while True:
try:
message = client.recv(1024)
broadcast(message)
except :
index = clients.index(client)
clients.remove(client)
client.close()
nickname = nicknames[index]
broadcast(f'{nickname} has left the chat'.encode('ascii'))
nicknames.remove(nickname)
break
def receive():
while True:
client, address = server.accept()
print(f'Connected with {str(address)}')
client.send('Joon'.encode('ascii'))
nickname = client.recv(1024).decode('ascii')
nicknames.append(nickname)
clients.append(client)
print(f'Nickanme of the client is {nickname}')
broadcast(f'{nickname} joined the chat'.encode('ascii'))
client.send('Connected to the server'.encode('ascii'))
thread = threading.Thread(target=handel, args=(client,))
thread.start()
print('Server is listening')
receive()
```
#### File: python-scripting/pdf-script/pdf_merger.py
```python
import PyPDF2
import sys
inputs = sys.argv[1:]
def pdf_combiner(pdf_list):
merger = PyPDF2.PdfFileMerger()
for pdf in pdf_list:
merger.append(pdf)
merger.write('pdf/merged.pdf')
pdf_combiner(inputs)
```
#### File: python/python-to-db/crud.py
```python
from dataclasses import dataclass
import sqlalchemy
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
# @dataclass
class Movie(Base):
# create table
__tablename__ = 'movies'
# Create column
id = Column(Integer, primary_key=True)
name = Column(String)
genre = Column(String)
desc = Column(String)
year = Column(Integer, nullable=True)
def __repr__ (self):
return f'<movie name = {self.name}'
engine = create_engine('sqlite:///app.db') # mysql, postgres
Session = sessionmaker(bind = engine)
my_sess = Session()
# Add table to db
# Base.metadata.create_all(engine)
# movie1 = Movie(name='interstellar', genre='scifi')
# movie2 = Movie(name='Martian', genre='scifi')
# # prepare
# my_sess.add(movie1, movie2)
# # commit
# my_sess.commit()
# Retrieve
# movie_a = my_sess.query(Movie).get(1)
# print(movie_a)
# Get all
# qs = my_sess.query(Movie).all()
# print(qs)
# filter by column value
# qs = my_sess.query(Movie).filter_by(name = 'interstellar').all()
# filter by column value containing sth
# qs = my_sess.query(Movie).filter(Movie.name.contains('Inter')).all()
# Update
movie_a = my_sess.query(Movie).get(1)
movie_a.desc = 'great movie'
# my_sess.commit()
print(movie_a.id, movie_a.desc)
# qs = my_sess.query(Movie).filter(Movie.name.contains('Inter')).all()
# print(qs)
# Delete
# my_sess.delete(movie_a)
# session.commit()
# session.flush()
``` |
{
"source": "joonjCode/serverless-fastapi",
"score": 2
} |
#### File: serverless-fastapi/app/main.py
```python
from fastapi import FastAPI
from app.core import config
from app.api.api_v1.api import router as api_router
from mangum import Mangum
app = FastAPI()
@app.get("/")
async def root():
return {"message" : f"This is our secret_key : {config.settings.secret_key}"}
app.include_router(api_router, prefix=config.settings.prefix)
handler = Mangum(app)
``` |
{
"source": "joonjCode/web-crawling-tutorial",
"score": 3
} |
#### File: web-crawling-tutorial/beautifulsoup/save.py
```python
import csv
'''
Try to use
with open('jobs.csv', 'w') as f:
fields = ['title','company','location','link']
w = csv.DictWriter(f,fieldnames = fields)
w.wirteheader()
w.writerow()
'''
def save_to_csv(jobs):
file = open('indeed_python_jobs.csv', mode = 'w', encoding='utf8')
writer = csv.writer(file)
writer.writerow(['title', 'company', 'location','link'])
for job in jobs:
writer.writerow(list(job.values()))
return
``` |
{
"source": "Joon-Jung/HoloLensForCV",
"score": 3
} |
#### File: Samples/py/recorder_console.py
```python
import os
import sys
import glob
import tarfile
import argparse
import sqlite3
import shutil
import json
import subprocess
import urllib.request
import numpy as np
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--dev_portal_address", default="127.0.0.1:10080",
help="The IP address for the HoloLens Device Portal")
parser.add_argument("--dev_portal_username", required=True,
help="The username for the HoloLens Device Portal")
parser.add_argument("--dev_portal_password", required=True,
help="The password for the HoloLens Device Portal")
parser.add_argument("--workspace_path", required=True,
help="Path to workspace folder used for downloading "
"recordings and reconstruction using COLMAP")
parser.add_argument("--colmap_path", help="Path to COLMAP.bat executable")
parser.add_argument("--ref_camera_name", default="vlc_ll")
parser.add_argument("--frame_rate", type=int, default=5)
parser.add_argument("--start_frame", type=int, default=-1)
parser.add_argument("--max_num_frames", type=int, default=-1)
parser.add_argument("--num_refinements", type=int, default=3)
args = parser.parse_args()
return args
def mkdir_if_not_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def rotmat2qvec(rotmat):
Rxx, Ryx, Rzx, Rxy, Ryy, Rzy, Rxz, Ryz, Rzz = rotmat.flat
K = np.array([
[Rxx - Ryy - Rzz, 0, 0, 0],
[Ryx + Rxy, Ryy - Rxx - Rzz, 0, 0],
[Rzx + Rxz, Rzy + Ryz, Rzz - Rxx - Ryy, 0],
[Ryz - Rzy, Rzx - Rxz, Rxy - Ryx, Rxx + Ryy + Rzz]]) / 3.0
eigvals, eigvecs = np.linalg.eigh(K)
qvec = eigvecs[[3, 0, 1, 2], np.argmax(eigvals)]
if qvec[0] < 0:
qvec *= -1
return qvec
class DevicePortalBrowser(object):
def connect(self, address, username, password):
print("Connecting to HoloLens Device Portal...")
self.url = "http://{}".format(address)
password_manager = urllib.request.HTTPPasswordMgrWithDefaultRealm()
password_manager.add_password(None, self.url, username, password)
handler = urllib.request.HTTPBasicAuthHandler(password_manager)
opener = urllib.request.build_opener(handler)
opener.open(self.url)
urllib.request.install_opener(opener)
print("=> Connected to HoloLens at address:", self.url)
print("Searching for CV: Recorder application...")
response = urllib.request.urlopen(
"{}/api/app/packagemanager/packages".format(self.url))
packages = json.loads(response.read().decode())
self.package_full_name = None
for package in packages["InstalledPackages"]:
if package["Name"] == "CV: Recorder":
self.package_full_name = package["PackageFullName"]
break
assert self.package_full_name is not None, \
"CV: Recorder package must be installed on HoloLens"
print("=> Found CV: Recorder application with name:",
self.package_full_name)
print("Searching for recordings...")
response = urllib.request.urlopen(
"{}/api/filesystem/apps/files?knownfolderid="
"LocalAppData&packagefullname={}&path=\\\\TempState".format(
self.url, self.package_full_name))
recordings = json.loads(response.read().decode())
self.recording_names = []
for recording in recordings["Items"]:
# Check if the recording contains any file data.
response = urllib.request.urlopen(
"{}/api/filesystem/apps/files?knownfolderid="
"LocalAppData&packagefullname={}&path=\\\\TempState\\{}".format(
self.url, self.package_full_name, recording["Id"]))
files = json.loads(response.read().decode())
if len(files["Items"]) > 0:
self.recording_names.append(recording["Id"])
self.recording_names.sort()
print("=> Found a total of {} recordings".format(
len(self.recording_names)))
def list_recordings(self, verbose=True):
for i, recording_name in enumerate(self.recording_names):
print("[{: 6d}] {}".format(i, recording_name))
if len(self.recording_names) == 0:
print("=> No recordings found on device")
def get_recording_name(self, recording_idx):
try:
return self.recording_names[recording_idx]
except IndexError:
print("=> Recording does not exist")
def download_recording(self, recording_idx, workspace_path):
recording_name = self.get_recording_name(recording_idx)
if recording_name is None:
return
recording_path = os.path.join(workspace_path, recording_name)
mkdir_if_not_exists(recording_path)
print("Downloading recording {}...".format(recording_name))
response = urllib.request.urlopen(
"{}/api/filesystem/apps/files?knownfolderid="
"LocalAppData&packagefullname={}&path=\\\\TempState\\{}".format(
self.url, self.package_full_name, recording_name))
files = json.loads(response.read().decode())
for file in files["Items"]:
if file["Type"] != 32:
continue
destination_path = os.path.join(recording_path, file["Id"])
if os.path.exists(destination_path):
print("=> Skipping, already downloaded:", file["Id"])
continue
print("=> Downloading:", file["Id"])
urllib.request.urlretrieve(
"{}/api/filesystem/apps/file?knownfolderid=LocalAppData&" \
"packagefullname={}&filename=\\\\TempState\\{}\\{}".format(
self.url, self.package_full_name,
recording_name, file["Id"]), destination_path)
def delete_recording(self, recording_idx):
recording_name = self.get_recording_name(recording_idx)
if recording_name is None:
return
print("Deleting recording {}...".format(recording_name))
response = urllib.request.urlopen(
"{}/api/filesystem/apps/files?knownfolderid="
"LocalAppData&packagefullname={}&path=\\\\TempState\\{}".format(
self.url, self.package_full_name, recording_name))
files = json.loads(response.read().decode())
for file in files["Items"]:
if file["Type"] != 32:
continue
print("=> Deleting:", file["Id"])
urllib.request.urlopen(urllib.request.Request(
"{}/api/filesystem/apps/file?knownfolderid=LocalAppData&" \
"packagefullname={}&filename=\\\\TempState\\{}\\{}".format(
self.url, self.package_full_name,
recording_name, file["Id"]), method="DELETE"))
self.recording_names.remove(recording_name)
def read_sensor_poses(path, identity_camera_to_image=False):
poses = {}
with open(path, "r") as fid:
header = fid.readline()
for line in fid:
line = line.strip()
if not line:
continue
elems = line.split(",")
assert len(elems) == 50
time_stamp = int(elems[0])
# Compose the absolute camera pose from the two relative
# camera poses provided by the recorder application.
# The absolute camera pose defines the transformation from
# the world to the camera coordinate system.
frame_to_origin = np.array(list(map(float, elems[2:18])))
frame_to_origin = frame_to_origin.reshape(4, 4).T
camera_to_frame = np.array(list(map(float, elems[18:34])))
camera_to_frame = camera_to_frame.reshape(4, 4).T
if abs(np.linalg.det(frame_to_origin[:3, :3]) - 1) < 0.01:
if identity_camera_to_image:
camera_to_image = np.eye(4)
else:
camera_to_image = np.array(
[[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
poses[time_stamp] = np.dot(
camera_to_image,
np.dot(camera_to_frame, np.linalg.inv(frame_to_origin)))
return poses
def read_sensor_images(recording_path, camera_name):
image_poses = read_sensor_poses(os.path.join(
recording_path, camera_name + ".csv"))
image_paths = sorted(glob.glob(
os.path.join(recording_path, camera_name, "*.pgm")))
paths = []
names = []
time_stamps = []
poses = []
for image_path in image_paths:
basename = os.path.basename(image_path)
name = os.path.join(camera_name, basename).replace("\\", "/")
time_stamp = int(os.path.splitext(basename)[0])
if time_stamp in image_poses:
paths.append(image_path)
names.append(name)
time_stamps.append(time_stamp)
poses.append(image_poses[time_stamp])
return paths, names, np.array(time_stamps), poses
def synchronize_sensor_frames(args, recording_path, output_path, camera_names):
# Collect all sensor frames.
images = {}
for camera_name in camera_names:
images[camera_name] = read_sensor_images(recording_path, camera_name)
# Synchronize the frames based on their time stamps.
ref_image_paths, ref_image_names, ref_time_stamps, ref_image_poses = \
images[args.ref_camera_name]
ref_time_diffs = np.diff(ref_time_stamps)
assert np.all(ref_time_stamps >= 0)
time_per_frame = 10**7 / 30.0
time_per_frame_sampled = 30.0 / args.frame_rate * time_per_frame
ref_image_paths_sampled = []
ref_image_names_sampled = []
ref_time_stamps_sampled = []
ref_image_poses_sampled = []
ref_prev_time_stamp = ref_time_stamps[0]
for i in range(1, len(ref_time_stamps)):
if ref_time_stamps[i] - ref_prev_time_stamp >= time_per_frame_sampled:
ref_image_paths_sampled.append(ref_image_paths[i])
ref_image_names_sampled.append(ref_image_names[i])
ref_time_stamps_sampled.append(ref_time_stamps[i])
ref_image_poses_sampled.append(ref_image_poses[i])
ref_prev_time_stamp = ref_time_stamps[i]
ref_image_paths, ref_image_names, ref_time_stamps, ref_image_poses = \
(ref_image_paths_sampled, ref_image_names_sampled,
ref_time_stamps_sampled, ref_image_poses_sampled)
if args.max_num_frames > 0:
assert args.start_frame < len(ref_image_paths)
end_frame = min(len(ref_image_paths),
args.start_frame + args.max_num_frames)
ref_image_paths = ref_image_paths[args.start_frame:end_frame]
ref_image_names = ref_image_names[args.start_frame:end_frame]
ref_time_stamps = ref_time_stamps[args.start_frame:end_frame]
sync_image_paths = {}
sync_image_names = {}
sync_image_poses = {}
for image_path, image_name, image_pose in zip(ref_image_paths,
ref_image_names,
ref_image_poses):
sync_image_paths[image_name] = [image_path]
sync_image_names[image_name] = [image_name]
sync_image_poses[image_name] = [image_pose]
max_sync_time_diff = time_per_frame / 5
for camera_name, (image_paths, image_names, time_stamps, image_poses) \
in images.items():
if camera_name == args.ref_camera_name:
continue
for image_path, image_name, time_stamp, image_pose in \
zip(image_paths, image_names, time_stamps, image_poses):
time_diffs = np.abs(time_stamp - ref_time_stamps)
min_time_diff_idx = np.argmin(time_diffs)
min_time_diff = time_diffs[min_time_diff_idx]
if min_time_diff < max_sync_time_diff:
sync_ref_image_name = ref_image_names[min_time_diff_idx]
sync_image_paths[sync_ref_image_name].append(image_path)
sync_image_names[sync_ref_image_name].append(image_name)
sync_image_poses[sync_ref_image_name].append(image_pose)
# Copy the frames to the output directory.
for camera_name in camera_names:
mkdir_if_not_exists(os.path.join(output_path, camera_name))
sync_frames = []
sync_poses = []
for ref_image_name, ref_time_stamp in zip(ref_image_names, ref_time_stamps):
image_basename = "{}.pgm".format(ref_time_stamp)
frame_images = []
frame_poses = []
for image_path, image_name, image_pose in \
zip(sync_image_paths[ref_image_name],
sync_image_names[ref_image_name],
sync_image_poses[ref_image_name]):
if len(sync_image_paths[ref_image_name]) == 4:
camera_name = os.path.dirname(image_name)
new_image_path = os.path.join(
output_path, camera_name, image_basename)
if not os.path.exists(new_image_path):
shutil.copyfile(image_path, new_image_path)
new_image_name = os.path.join(camera_name, image_basename)
frame_images.append(new_image_name.replace("\\", "/"))
frame_poses.append(image_pose)
sync_frames.append(frame_images)
sync_poses.append(frame_poses)
return sync_frames, sync_poses
def extract_recording(recording_path):
print("Extracting recording data...")
for file_name in glob.glob(os.path.join(recording_path, "*.tar")):
print("=> Extracting tarfile:", file_name)
tar = tarfile.open(file_name)
tar.extractall(path=recording_path)
tar.close()
def reconstruct_recording(args, recording_path, dense=True):
reconstruction_path = os.path.join(recording_path, "reconstruction")
database_path = os.path.join(reconstruction_path, "database.db")
image_path = os.path.join(reconstruction_path, "images")
image_list_path = os.path.join(reconstruction_path, "image_list.txt")
sparse_colmap_path = os.path.join(reconstruction_path, "sparse_colmap")
sparse_hololens_path = \
os.path.join(reconstruction_path, "sparse_hololens")
dense_path = os.path.join(reconstruction_path, "dense")
rig_config_path = os.path.join(reconstruction_path, "rig_config.json")
mkdir_if_not_exists(reconstruction_path)
extract_recording(recording_path)
camera_names = ("vlc_ll", "vlc_lf", "vlc_rf", "vlc_rr")
print("Syncrhonizing sensor frames...")
frames, poses = synchronize_sensor_frames(
args, recording_path, image_path, camera_names)
with open(image_list_path, "w") as fid:
for frame in frames:
for image_name in frame:
fid.write("{}\n".format(image_name))
subprocess.call([
args.colmap_path, "feature_extractor",
"--image_path", image_path,
"--database_path", database_path,
"--image_list_path", image_list_path,
])
# These OpenCV camera model parameters were determined for a specific
# HoloLens using the self-calibration capabilities of COLMAP.
# The parameters should be sufficiently accurate as an initialization and
# the parameters will be refined during the COLMAP reconstruction process.
camera_model_id = 4
camera_model_name = "OPENCV"
camera_width = 640
camera_height = 480
camera_params = {
"vlc_ll": "450.072070 450.274345 320 240 "
"-0.013211 0.012778 -0.002714 -0.003603",
"vlc_lf": "448.189452 452.478090 320 240 "
"-0.009463 0.003013 -0.006169 -0.008975",
"vlc_rf": "449.435779 453.332057 320 240 "
"-0.000305 -0.013207 0.003258 0.001051",
"vlc_rr": "450.301002 450.244147 320 240 "
"-0.010926 0.008377 -0.003105 -0.004976",
}
mkdir_if_not_exists(sparse_hololens_path)
cameras_file = open(os.path.join(sparse_hololens_path, "cameras.txt"), "w")
images_file = open(os.path.join(sparse_hololens_path, "images.txt"), "w")
points_file = open(os.path.join(sparse_hololens_path, "points3D.txt"), "w")
connection = sqlite3.connect(database_path)
cursor = connection.cursor()
camera_ids = {}
for camera_name in camera_names:
camera_params_list = \
list(map(float, camera_params[camera_name].split()))
camera_params_float = np.array(camera_params_list, dtype=np.double)
cursor.execute("INSERT INTO cameras"
"(model, width, height, params, prior_focal_length) "
"VALUES(?, ?, ?, ?, ?);",
(camera_model_id, camera_width,
camera_height, camera_params_float, 1))
camera_id = cursor.lastrowid
camera_ids[camera_name] = camera_id
cursor.execute("UPDATE images SET camera_id=? "
"WHERE name LIKE '{}%';".format(camera_name),
(camera_id,))
connection.commit()
cameras_file.write("{} {} {} {} {}\n".format(
camera_id, camera_model_name,
camera_width, camera_height,
camera_params[camera_name]))
for image_names, image_poses in zip(frames, poses):
for image_name, image_pose in zip(image_names, image_poses):
camera_name = os.path.dirname(image_name)
camera_id = camera_ids[camera_name]
cursor.execute(
"SELECT image_id FROM images WHERE name=?;", (image_name,))
image_id = cursor.fetchone()[0]
qvec = rotmat2qvec(image_pose[:3, :3])
tvec = image_pose[:, 3]
images_file.write("{} {} {} {} {} {} {} {} {} {}\n\n".format(
image_id, qvec[0], qvec[1], qvec[2], qvec[3],
tvec[0], tvec[1], tvec[2], camera_id, image_name
))
connection.close()
cameras_file.close()
images_file.close()
points_file.close()
subprocess.call([
args.colmap_path, "exhaustive_matcher",
"--database_path", database_path,
"--SiftMatching.guided_matching", "true",
])
with open(rig_config_path, "w") as fid:
fid.write("""[
{{
"ref_camera_id": {},
"cameras":
[
{{
"camera_id": {},
"image_prefix": "vlc_ll"
}},
{{
"camera_id": {},
"image_prefix": "vlc_lf"
}},
{{
"camera_id": {},
"image_prefix": "vlc_rf"
}},
{{
"camera_id": {},
"image_prefix": "vlc_rr"
}}
]
}}
]""".format(camera_ids[args.ref_camera_name],
camera_ids["vlc_ll"],
camera_ids["vlc_lf"],
camera_ids["vlc_rf"],
camera_ids["vlc_rr"]))
for i in range(args.num_refinements):
if i == 0:
sparse_input_path = sparse_hololens_path
else:
sparse_input_path = sparse_colmap_path + str(i - 1)
sparse_output_path = sparse_colmap_path + str(i)
mkdir_if_not_exists(sparse_output_path)
subprocess.call([
args.colmap_path, "point_triangulator",
"--database_path", database_path,
"--image_path", image_path,
"--input_path", sparse_input_path,
"--output_path", sparse_output_path,
])
subprocess.call([
args.colmap_path, "rig_bundle_adjuster",
"--input_path", sparse_output_path,
"--output_path", sparse_output_path,
"--rig_config_path", rig_config_path,
"--BundleAdjustment.max_num_iterations", str(25),
"--BundleAdjustment.max_linear_solver_iterations", str(100),
])
if not dense:
return
subprocess.call([
args.colmap_path, "image_undistorter",
"--image_path", image_path,
"--input_path", sparse_output_path,
"--output_path", dense_path,
])
subprocess.call([
args.colmap_path, "patch_match_stereo",
"--workspace_path", dense_path,
"--PatchMatchStereo.geom_consistency", "0",
"--PatchMatchStereo.min_triangulation_angle", "2",
])
subprocess.call([
args.colmap_path, "stereo_fusion",
"--workspace_path", dense_path,
"--StereoFusion.min_num_pixels", "15",
"--input_type", "photometric",
"--output_path", os.path.join(dense_path, "fused.ply"),
])
def print_help():
print("Available commands:")
print(" help: Print this help message")
print(" exit: Exit the console loop")
print(" list: List all recordings")
print(" list device: List all recordings on the HoloLens")
print(" list workspace: List all recordings in the workspace")
print(" download X: Download recording X from the HoloLens")
print(" delete X: Delete recording X from the HoloLens")
print(" delete all: Delete all recordings from the HoloLens")
print(" extract X: Extract recording X in the workspace")
print(" reconstruct X: Perform sparse and dense reconstruction "
"of recording X in the workspace")
print(" reconstruct sparse X: Perform sparse reconstruction "
"of recording X in the workspace")
def list_workspace_recordings(workspace_path):
recording_names = sorted(os.listdir(workspace_path))
for i, recording_name in enumerate(recording_names):
print("[{: 6d}] {}".format(i, recording_name))
if len(recording_names) == 0:
print("=> No recordings found in workspace")
def parse_command_and_index(command, num_commands=2):
error_in_command = False
sub_commands = command.split()
if len(sub_commands) != num_commands:
error_in_command = True
else:
try:
index = int(sub_commands[-1])
except:
error_in_command = True
if error_in_command:
print("=> Invalid command, expected a recording index")
return
return index
def main():
args = parse_args()
mkdir_if_not_exists(args.workspace_path)
dev_portal_browser = DevicePortalBrowser()
dev_portal_browser.connect(args.dev_portal_address,
args.dev_portal_username,
args.dev_portal_password)
print()
print_help()
print()
while True:
try:
command = input(">>> ").strip().lower()
except EOFError:
break
if command == "help":
print_help()
if command == "exit":
break
elif command == "list":
print("Device recordings:")
dev_portal_browser.list_recordings()
print("Workspace recordings:")
list_workspace_recordings(args.workspace_path)
elif command == "list device":
dev_portal_browser.list_recordings()
elif command == "list workspace":
list_workspace_recordings(args.workspace_path)
elif command.startswith("download"):
recording_idx = parse_command_and_index(command)
if recording_idx is not None:
dev_portal_browser.download_recording(
recording_idx, args.workspace_path)
elif command.startswith("delete"):
if command == "delete all":
for _ in range(len(dev_portal_browser.recording_names)):
dev_portal_browser.delete_recording(0)
else:
recording_idx = parse_command_and_index(command)
if recording_idx is not None:
dev_portal_browser.delete_recording(recording_idx)
elif command.startswith("extract"):
recording_idx = parse_command_and_index(command)
if recording_idx is not None:
try:
recording_names = sorted(os.listdir(args.workspace_path))
recording_name = recording_names[recording_idx]
except IndexError:
print("=> Recording does not exist")
else:
extract_recording(
os.path.join(args.workspace_path, recording_name))
elif command.startswith("reconstruct"):
if not args.colmap_path:
print("=> Cannot reconstruct, "
"because path to COLMAP is not specified")
continue
if "sparse" in command:
recording_idx = parse_command_and_index(command, num_commands=3)
else:
recording_idx = parse_command_and_index(command)
if recording_idx is not None:
try:
recording_names = sorted(os.listdir(args.workspace_path))
recording_name = recording_names[recording_idx]
except IndexError:
print("=> Recording does not exist")
else:
dense = "sparse" not in command
reconstruct_recording(
args, os.path.join(args.workspace_path, recording_name),
dense=dense)
else:
print("=> Command not found")
print()
print_help()
if __name__ == "__main__":
main()
``` |
{
"source": "Joonkkyo/KoBART-translation-for-IPU",
"score": 3
} |
#### File: Joonkkyo/KoBART-translation-for-IPU/torchlight_test.py
```python
import pytorch_lightning as pl
import torch
import torchvision
import torchvision.transforms as transforms
from simple_torch_model import SimpleTorchModel
# This class shows a minimal lightning example. This example uses our own
# SimpleTorchModel which is a basic 2 conv, 2 FC torch network. It can be
# found in simple_torch_model.py.
class SimpleLightning(pl.LightningModule):
def __init__(self):
super().__init__()
self.model = SimpleTorchModel()
def training_step(self, batch, _):
x, label = batch
prediction = self.model(x)
loss = torch.nn.functional.nll_loss(prediction, label)
return loss
def validation_step(self, batch, _):
x, label = batch
prediction = self.model(x)
preds = torch.argmax(prediction, dim=1)
acc = torch.sum(preds==label).float() / len(label)
return acc
# PopTorch doesn't currently support logging within steps. Use the Lightning
# callback hooks instead.
def on_train_batch_end(self,outputs, batch, batch_idx, dataloader_idx):
self.log('StepLoss', outputs["loss"])
def validation_epoch_end(self, outputs):
self.log('val_acc', torch.stack(outputs).mean(), prog_bar=True)
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.parameters(), lr=0.01)
return optimizer
if __name__ == '__main__':
# Create the model as usual.
model = SimpleLightning()
# Normal PyTorch dataset.
train_set = torchvision.datasets.FashionMNIST("FashionMNIST",
train=True,
download=True,
transform=transforms.Compose(
[transforms.ToTensor()]))
# Normal PyTorch dataloader.
train_loader = torch.utils.data.DataLoader(train_set,
batch_size=16,
shuffle=True)
# Run on IPU using IPUs=1. This will run on IPU but will not include any custom
# PopTorch Options. Changing IPUs to 1 to IPUs=N will replicate the graph N
# times. This can lead to issues with the DataLoader batching - the script
# ipu_options_and_dataloading.py shows how these can be avoided through the
# use of IPUOptions.
trainer = pl.Trainer(ipus=1,
max_epochs=3,
progress_bar_refresh_rate=20,
log_every_n_steps=1)
# When fit is called the model will be compiled for IPU and will run on the available IPU devices.
trainer.fit(model, train_loader)
``` |
{
"source": "joonkyu4220/bullet3",
"score": 2
} |
#### File: deep_mimic/env/humanoid_pose_interpolator.py
```python
import numpy as np
from pybullet_utils import bullet_client
import math
from .math_utils import *
class HumanoidPoseInterpolator(object):
# REPRESENTATION_MODE_CHECKPOINT
# def __init__(self):
def __init__(self, arg_parser = None):
# REPRESENTATION_MODE_CHECKPOINT
# self.state_representation_mode = "Quaternion"
# self.action_representation_mode = "AxisAngle"
# self.state_representation_mode = "6D"
# self.action_representation_mode = "6D"
# REPRESENTATION_MODE_CHECKPOINT
self._arg_parser = arg_parser
self.state_representation_mode = self._arg_parser.parse_string('state_repr', default="Quaternion")
self.action_representation_mode = self._arg_parser.parse_string('action_repr', default="AxisAngle")
if self.action_representation_mode == "Quaternion":
self.action_dim = 4
elif self.action_representation_mode == "Euler":
self.action_dim = 3
elif self.action_representation_mode == "AxisAngle":
self.action_dim = 4
elif self.action_representation_mode == "RotVec":
self.action_dim = 3
elif self.action_representation_mode == "RotMat":
self.action_dim = 9
elif self.action_representation_mode == "6D":
self.action_dim = 6
pass
def Reset(self,
basePos=[0, 0, 0],
baseOrn=[0, 0, 0, 1],
chestRot=[0, 0, 0, 1],
neckRot=[0, 0, 0, 1],
rightHipRot=[0, 0, 0, 1],
rightKneeRot=[0],
rightAnkleRot=[0, 0, 0, 1],
rightShoulderRot=[0, 0, 0, 1],
rightElbowRot=[0],
leftHipRot=[0, 0, 0, 1],
leftKneeRot=[0],
leftAnkleRot=[0, 0, 0, 1],
leftShoulderRot=[0, 0, 0, 1],
leftElbowRot=[0],
baseLinVel=[0, 0, 0],
baseAngVel=[0, 0, 0],
chestVel=[0, 0, 0],
neckVel=[0, 0, 0],
rightHipVel=[0, 0, 0],
rightKneeVel=[0],
rightAnkleVel=[0, 0, 0],
rightShoulderVel=[0, 0, 0],
rightElbowVel=[0],
leftHipVel=[0, 0, 0],
leftKneeVel=[0],
leftAnkleVel=[0, 0, 0],
leftShoulderVel=[0, 0, 0],
leftElbowVel=[0]):
self._basePos = basePos
self._baseLinVel = baseLinVel
#print("HumanoidPoseInterpolator.Reset: baseLinVel = ", baseLinVel)
self._baseOrn = baseOrn
self._baseAngVel = baseAngVel
self._chestRot = chestRot
self._chestVel = chestVel
self._neckRot = neckRot
self._neckVel = neckVel
self._rightHipRot = rightHipRot
self._rightHipVel = rightHipVel
self._rightKneeRot = rightKneeRot
self._rightKneeVel = rightKneeVel
self._rightAnkleRot = rightAnkleRot
self._rightAnkleVel = rightAnkleVel
self._rightShoulderRot = rightShoulderRot
self._rightShoulderVel = rightShoulderVel
self._rightElbowRot = rightElbowRot
self._rightElbowVel = rightElbowVel
self._leftHipRot = leftHipRot
self._leftHipVel = leftHipVel
self._leftKneeRot = leftKneeRot
self._leftKneeVel = leftKneeVel
self._leftAnkleRot = leftAnkleRot
self._leftAnkleVel = leftAnkleVel
self._leftShoulderRot = leftShoulderRot
self._leftShoulderVel = leftShoulderVel
self._leftElbowRot = leftElbowRot
self._leftElbowVel = leftElbowVel
def ComputeLinVel(self, posStart, posEnd, deltaTime):
vel = [(posEnd[0] - posStart[0]) / deltaTime, (posEnd[1] - posStart[1]) / deltaTime,
(posEnd[2] - posStart[2]) / deltaTime]
return vel
def ComputeAngVel(self, ornStart, ornEnd, deltaTime, bullet_client):
dorn = bullet_client.getDifferenceQuaternion(ornStart, ornEnd)
axis, angle = bullet_client.getAxisAngleFromQuaternion(dorn)
angVel = [(axis[0] * angle) / deltaTime, (axis[1] * angle) / deltaTime,
(axis[2] * angle) / deltaTime]
return angVel
def ComputeAngVelRel(self, ornStart, ornEnd, deltaTime, bullet_client):
ornStartConjugate = [-ornStart[0], -ornStart[1], -ornStart[2], ornStart[3]]
pos_diff, q_diff = bullet_client.multiplyTransforms([0, 0, 0], ornStartConjugate, [0, 0, 0],
ornEnd)
axis, angle = bullet_client.getAxisAngleFromQuaternion(q_diff)
angVel = [(axis[0] * angle) / deltaTime, (axis[1] * angle) / deltaTime,
(axis[2] * angle) / deltaTime]
return angVel
def NormalizeVector(self, vec):
# length2 = vec[0] * vec[0] + vec[1] * vec[1] + vec[2] * vec[2]
length2 = self.DotProduct(vec, vec)
if (length2 > 0):
length = math.sqrt(length2)
vec[0] /= length
vec[1] /= length
vec[2] /= length
return vec
def NormalizeQuaternion(self, orn):
# length2 = orn[0] * orn[0] + orn[1] * orn[1] + orn[2] * orn[2] + orn[3] * orn[3]
length2 = self.DotProduct(orn, orn)
if (length2 > 0):
length = math.sqrt(length2)
orn[0] /= length
orn[1] /= length
orn[2] /= length
orn[3] /= length
return orn
#print("Normalize? length=",length)
def PostProcessMotionData(self, frameData):
baseOrn1Start = [frameData[5], frameData[6], frameData[7], frameData[4]]
chestRotStart = [frameData[9], frameData[10], frameData[11], frameData[8]]
neckRotStart = [frameData[13], frameData[14], frameData[15], frameData[12]]
rightHipRotStart = [frameData[17], frameData[18], frameData[19], frameData[16]]
rightAnkleRotStart = [frameData[22], frameData[23], frameData[24], frameData[21]]
rightShoulderRotStart = [frameData[26], frameData[27], frameData[28], frameData[25]]
leftHipRotStart = [frameData[31], frameData[32], frameData[33], frameData[30]]
leftAnkleRotStart = [frameData[36], frameData[37], frameData[38], frameData[35]]
leftShoulderRotStart = [frameData[40], frameData[41], frameData[42], frameData[39]]
def GetPose(self):
pose = [
# these 7 elements will be zero-ed out in pybullet_deep_mimic_env.set_action()
self._basePos[0], self._basePos[1], self._basePos[2],
self._baseOrn[0], self._baseOrn[1], self._baseOrn[2], self._baseOrn[3],
# these values will be given in ConvertFromAction()
self._chestRot[0], self._chestRot[1], self._chestRot[2], self._chestRot[3],
self._neckRot[0], self._neckRot[1], self._neckRot[2], self._neckRot[3],
self._rightHipRot[0], self._rightHipRot[1], self._rightHipRot[2], self._rightHipRot[3],
self._rightKneeRot[0],
self._rightAnkleRot[0], self._rightAnkleRot[1], self._rightAnkleRot[2], self._rightAnkleRot[3],
self._rightShoulderRot[0], self._rightShoulderRot[1], self._rightShoulderRot[2], self._rightShoulderRot[3],
self._rightElbowRot[0],
self._leftHipRot[0], self._leftHipRot[1], self._leftHipRot[2], self._leftHipRot[3],
self._leftKneeRot[0],
self._leftAnkleRot[0], self._leftAnkleRot[1], self._leftAnkleRot[2], self._leftAnkleRot[3],
self._leftShoulderRot[0], self._leftShoulderRot[1], self._leftShoulderRot[2], self._leftShoulderRot[3],
self._leftElbowRot[0]
]
return pose
def Slerp(self, frameFraction, frameData, frameDataNext, bullet_client):
keyFrameDuration = frameData[0]
basePos1Start = [frameData[1], frameData[2], frameData[3]]
basePos1End = [frameDataNext[1], frameDataNext[2], frameDataNext[3]]
self._basePos = [
basePos1Start[0] + frameFraction * (basePos1End[0] - basePos1Start[0]),
basePos1Start[1] + frameFraction * (basePos1End[1] - basePos1Start[1]),
basePos1Start[2] + frameFraction * (basePos1End[2] - basePos1Start[2])
]
self._baseLinVel = self.ComputeLinVel(basePos1Start, basePos1End, keyFrameDuration)
baseOrn1Start = [frameData[5], frameData[6], frameData[7], frameData[4]]
baseOrn1Next = [frameDataNext[5], frameDataNext[6], frameDataNext[7], frameDataNext[4]]
self._baseOrn = bullet_client.getQuaternionSlerp(baseOrn1Start, baseOrn1Next, frameFraction)
self._baseAngVel = self.ComputeAngVel(baseOrn1Start, baseOrn1Next, keyFrameDuration,
bullet_client)
##pre-rotate to make z-up
#y2zPos=[0,0,0.0]
#y2zOrn = p.getQuaternionFromEuler([1.57,0,0])
#basePos,baseOrn = p.multiplyTransforms(y2zPos, y2zOrn,basePos1,baseOrn1)
chestRotStart = [frameData[9], frameData[10], frameData[11], frameData[8]]
chestRotEnd = [frameDataNext[9], frameDataNext[10], frameDataNext[11], frameDataNext[8]]
self._chestRot = bullet_client.getQuaternionSlerp(chestRotStart, chestRotEnd, frameFraction)
self._chestVel = self.ComputeAngVelRel(chestRotStart, chestRotEnd, keyFrameDuration,
bullet_client)
neckRotStart = [frameData[13], frameData[14], frameData[15], frameData[12]]
neckRotEnd = [frameDataNext[13], frameDataNext[14], frameDataNext[15], frameDataNext[12]]
self._neckRot = bullet_client.getQuaternionSlerp(neckRotStart, neckRotEnd, frameFraction)
self._neckVel = self.ComputeAngVelRel(neckRotStart, neckRotEnd, keyFrameDuration,
bullet_client)
rightHipRotStart = [frameData[17], frameData[18], frameData[19], frameData[16]]
rightHipRotEnd = [frameDataNext[17], frameDataNext[18], frameDataNext[19], frameDataNext[16]]
self._rightHipRot = bullet_client.getQuaternionSlerp(rightHipRotStart, rightHipRotEnd,
frameFraction)
self._rightHipVel = self.ComputeAngVelRel(rightHipRotStart, rightHipRotEnd, keyFrameDuration,
bullet_client)
rightKneeRotStart = [frameData[20]]
rightKneeRotEnd = [frameDataNext[20]]
self._rightKneeRot = [
rightKneeRotStart[0] + frameFraction * (rightKneeRotEnd[0] - rightKneeRotStart[0])
]
self._rightKneeVel = [(rightKneeRotEnd[0] - rightKneeRotStart[0]) / keyFrameDuration]
rightAnkleRotStart = [frameData[22], frameData[23], frameData[24], frameData[21]]
rightAnkleRotEnd = [frameDataNext[22], frameDataNext[23], frameDataNext[24], frameDataNext[21]]
self._rightAnkleRot = bullet_client.getQuaternionSlerp(rightAnkleRotStart, rightAnkleRotEnd,
frameFraction)
self._rightAnkleVel = self.ComputeAngVelRel(rightAnkleRotStart, rightAnkleRotEnd,
keyFrameDuration, bullet_client)
rightShoulderRotStart = [frameData[26], frameData[27], frameData[28], frameData[25]]
rightShoulderRotEnd = [
frameDataNext[26], frameDataNext[27], frameDataNext[28], frameDataNext[25]
]
self._rightShoulderRot = bullet_client.getQuaternionSlerp(rightShoulderRotStart,
rightShoulderRotEnd, frameFraction)
self._rightShoulderVel = self.ComputeAngVelRel(rightShoulderRotStart, rightShoulderRotEnd,
keyFrameDuration, bullet_client)
rightElbowRotStart = [frameData[29]]
rightElbowRotEnd = [frameDataNext[29]]
self._rightElbowRot = [
rightElbowRotStart[0] + frameFraction * (rightElbowRotEnd[0] - rightElbowRotStart[0])
]
self._rightElbowVel = [(rightElbowRotEnd[0] - rightElbowRotStart[0]) / keyFrameDuration]
leftHipRotStart = [frameData[31], frameData[32], frameData[33], frameData[30]]
leftHipRotEnd = [frameDataNext[31], frameDataNext[32], frameDataNext[33], frameDataNext[30]]
self._leftHipRot = bullet_client.getQuaternionSlerp(leftHipRotStart, leftHipRotEnd,
frameFraction)
self._leftHipVel = self.ComputeAngVelRel(leftHipRotStart, leftHipRotEnd, keyFrameDuration,
bullet_client)
leftKneeRotStart = [frameData[34]]
leftKneeRotEnd = [frameDataNext[34]]
self._leftKneeRot = [
leftKneeRotStart[0] + frameFraction * (leftKneeRotEnd[0] - leftKneeRotStart[0])
]
self._leftKneeVel = [(leftKneeRotEnd[0] - leftKneeRotStart[0]) / keyFrameDuration]
leftAnkleRotStart = [frameData[36], frameData[37], frameData[38], frameData[35]]
leftAnkleRotEnd = [frameDataNext[36], frameDataNext[37], frameDataNext[38], frameDataNext[35]]
self._leftAnkleRot = bullet_client.getQuaternionSlerp(leftAnkleRotStart, leftAnkleRotEnd,
frameFraction)
self._leftAnkleVel = self.ComputeAngVelRel(leftAnkleRotStart, leftAnkleRotEnd,
keyFrameDuration, bullet_client)
leftShoulderRotStart = [frameData[40], frameData[41], frameData[42], frameData[39]]
leftShoulderRotEnd = [
frameDataNext[40], frameDataNext[41], frameDataNext[42], frameDataNext[39]
]
self._leftShoulderRot = bullet_client.getQuaternionSlerp(leftShoulderRotStart,
leftShoulderRotEnd, frameFraction)
self._leftShoulderVel = self.ComputeAngVelRel(leftShoulderRotStart, leftShoulderRotEnd,
keyFrameDuration, bullet_client)
leftElbowRotStart = [frameData[43]]
leftElbowRotEnd = [frameDataNext[43]]
self._leftElbowRot = [
leftElbowRotStart[0] + frameFraction * (leftElbowRotEnd[0] - leftElbowRotStart[0])
]
self._leftElbowVel = [(leftElbowRotEnd[0] - leftElbowRotStart[0]) / keyFrameDuration]
pose = self.GetPose()
return pose
def ConvertFromAction(self, pybullet_client, action):
#turn action into pose
self.Reset() #?? needed?
index = 0
self._chestRot = getQuaternionFromAction(action[index:index+self.action_dim], self.action_representation_mode)
index += self.action_dim
self._neckRot = getQuaternionFromAction(action[index:index+self.action_dim], self.action_representation_mode)
index += self.action_dim
self._rightHipRot = getQuaternionFromAction(action[index:index+self.action_dim], self.action_representation_mode)
index += self.action_dim
self._rightKneeRot = [action[index]]
index += 1
self._rightAnkleRot = getQuaternionFromAction(action[index:index+self.action_dim], self.action_representation_mode)
index += self.action_dim
self._rightShoulderRot = getQuaternionFromAction(action[index:index+self.action_dim], self.action_representation_mode)
index += self.action_dim
self._rightElbowRot = [action[index]]
index += 1
self._leftHipRot = getQuaternionFromAction(action[index:index+self.action_dim], self.action_representation_mode)
index += self.action_dim
self._leftKneeRot = [action[index]]
index += 1
self._leftAnkleRot = getQuaternionFromAction(action[index:index+self.action_dim], self.action_representation_mode)
index += self.action_dim
self._leftShoulderRot = getQuaternionFromAction(action[index:index+self.action_dim], self.action_representation_mode)
index += self.action_dim
self._leftElbowRot = [action[index]]
index += 1
# if self.action_representation_mode == "6D":
# sixdim = action[index:index + 6]
# axis, angle = getAxisAngleFromSixDim(sixdim)
# self._chestRot = pybullet_client.getQuaternionFromAxisAngle(axis, angle)
# index += 6
# sixdim = action[index:index + 6]
# axis, angle = getAxisAngleFromSixDim(sixdim)
# self._neckRot = pybullet_client.getQuaternionFromAxisAngle(axis, angle)
# index += 6
# sixdim = action[index:index + 6]
# axis, angle = getAxisAngleFromSixDim(sixdim)
# self._rightHipRot = pybullet_client.getQuaternionFromAxisAngle(axis, angle)
# index += 6
# angle = action[index]
# self._rightKneeRot = [angle]
# index += 1
# sixdim = action[index:index + 6]
# axis, angle = getAxisAngleFromSixDim(sixdim)
# self._rightAnkleRot = pybullet_client.getQuaternionFromAxisAngle(axis, angle)
# index += 6
# sixdim = action[index:index + 6]
# axis, angle = getAxisAngleFromSixDim(sixdim)
# self._rightShoulderRot = pybullet_client.getQuaternionFromAxisAngle(axis, angle)
# index += 6
# angle = action[index]
# self._rightElbowRot = [angle]
# index += 1
# sixdim = action[index:index + 6]
# axis, angle = getAxisAngleFromSixDim(sixdim)
# self._leftHipRot = pybullet_client.getQuaternionFromAxisAngle(axis, angle)
# index += 6
# angle = action[index]
# self._leftKneeRot = [angle]
# index += 1
# sixdim = action[index:index + 6]
# axis, angle = getAxisAngleFromSixDim(sixdim)
# self._leftAnkleRot = pybullet_client.getQuaternionFromAxisAngle(axis, angle)
# index += 6
# sixdim = action[index:index + 6]
# axis, angle = getAxisAngleFromSixDim(sixdim)
# self._leftShoulderRot = pybullet_client.getQuaternionFromAxisAngle(axis, angle)
# index += 6
# angle = action[index]
# self._leftElbowRot = [angle]
# index += 1
# elif self.action_representation_mode == "AxisAngle":
# angle = action[index]
# axis = [action[index + 1], action[index + 2], action[index + 3]]
# index += 4
# self._chestRot = pybullet_client.getQuaternionFromAxisAngle(axis, angle)
# #print("pose._chestRot=",pose._chestRot)
# angle = action[index]
# axis = [action[index + 1], action[index + 2], action[index + 3]]
# index += 4
# self._neckRot = pybullet_client.getQuaternionFromAxisAngle(axis, angle)
# angle = action[index]
# axis = [action[index + 1], action[index + 2], action[index + 3]]
# index += 4
# self._rightHipRot = pybullet_client.getQuaternionFromAxisAngle(axis, angle)
# angle = action[index]
# index += 1
# self._rightKneeRot = [angle]
# angle = action[index]
# axis = [action[index + 1], action[index + 2], action[index + 3]]
# index += 4
# self._rightAnkleRot = pybullet_client.getQuaternionFromAxisAngle(axis, angle)
# angle = action[index]
# axis = [action[index + 1], action[index + 2], action[index + 3]]
# index += 4
# self._rightShoulderRot = pybullet_client.getQuaternionFromAxisAngle(axis, angle)
# angle = action[index]
# index += 1
# self._rightElbowRot = [angle]
# angle = action[index]
# axis = [action[index + 1], action[index + 2], action[index + 3]]
# index += 4
# self._leftHipRot = pybullet_client.getQuaternionFromAxisAngle(axis, angle)
# angle = action[index]
# index += 1
# self._leftKneeRot = [angle]
# angle = action[index]
# axis = [action[index + 1], action[index + 2], action[index + 3]]
# index += 4
# self._leftAnkleRot = pybullet_client.getQuaternionFromAxisAngle(axis, angle)
# angle = action[index]
# axis = [action[index + 1], action[index + 2], action[index + 3]]
# index += 4
# self._leftShoulderRot = pybullet_client.getQuaternionFromAxisAngle(axis, angle)
# angle = action[index]
# index += 1
# self._leftElbowRot = [angle]
pose = self.GetPose()
return pose
```
#### File: deep_mimic/env/humanoid_stable_pd.py
```python
from pybullet_utils import pd_controller_stable
from pybullet_envs.deep_mimic.env import humanoid_pose_interpolator
import math
import numpy as np
from .math_utils import *
chest = 1
neck = 2
rightHip = 3
rightKnee = 4
rightAnkle = 5
rightShoulder = 6
rightElbow = 7
rightWrist = 8
leftHip = 9
leftKnee = 10
leftAnkle = 11
leftShoulder = 12
leftElbow = 13
leftWrist = 14
jointFrictionForce = 0
class HumanoidStablePD(object):
def __init__( self, pybullet_client, mocap_data, timeStep,
useFixedBase=True, arg_parser=None, useComReward=False):
# REPRESENTATION_MODE_CHECKPOINT
# self.state_representation_mode = "Quaternion"
# self.action_representation_mode = "AxisAngle"
# self.state_representation_mode = "6D"
# self.action_representation_mode = "6D"
self._pybullet_client = pybullet_client
self._mocap_data = mocap_data
self._arg_parser = arg_parser
# REPRESENTATION_MODE_CHECKPOINT
self.state_representation_mode = self._arg_parser.parse_string('state_repr', default="Quaternion")
self.action_representation_mode = self._arg_parser.parse_string('action_repr', default="AxisAngle")
if self.state_representation_mode == "Quaternion":
self.state_dim = 4
elif self.state_representation_mode == "Euler":
self.state_dim = 3
elif self.state_representation_mode == "AxisAngle":
self.state_dim = 4
elif self.state_representation_mode == "RotVec":
self.state_dim = 3
elif self.state_representation_mode == "RotMat":
self.state_dim = 9
elif self.state_representation_mode == "6D":
self.state_dim = 6
# DISCONTINUITY_CHECKPOINT
self.state_discontinuity_encounter = 0
print("LOADING humanoid!")
flags=self._pybullet_client.URDF_MAINTAIN_LINK_ORDER+self._pybullet_client.URDF_USE_SELF_COLLISION+self._pybullet_client.URDF_USE_SELF_COLLISION_EXCLUDE_ALL_PARENTS
self._sim_model = self._pybullet_client.loadURDF(
"humanoid/humanoid.urdf", [0, 0.889540259, 0],
globalScaling=0.25,
useFixedBase=useFixedBase,
flags=flags)
#self._pybullet_client.setCollisionFilterGroupMask(self._sim_model,-1,collisionFilterGroup=0,collisionFilterMask=0)
#for j in range (self._pybullet_client.getNumJoints(self._sim_model)):
# self._pybullet_client.setCollisionFilterGroupMask(self._sim_model,j,collisionFilterGroup=0,collisionFilterMask=0)
self._end_effectors = [5, 8, 11, 14] #ankle and wrist, both left and right
self._kin_model = self._pybullet_client.loadURDF(
"humanoid/humanoid.urdf", [0, 0.85, 0],
globalScaling=0.25,
useFixedBase=True,
flags=self._pybullet_client.URDF_MAINTAIN_LINK_ORDER)
self._pybullet_client.changeDynamics(self._sim_model, -1, lateralFriction=0.9)
for j in range(self._pybullet_client.getNumJoints(self._sim_model)):
self._pybullet_client.changeDynamics(self._sim_model, j, lateralFriction=0.9)
self._pybullet_client.changeDynamics(self._sim_model, -1, linearDamping=0, angularDamping=0)
self._pybullet_client.changeDynamics(self._kin_model, -1, linearDamping=0, angularDamping=0)
#todo: add feature to disable simulation for a particular object. Until then, disable all collisions
self._pybullet_client.setCollisionFilterGroupMask(self._kin_model,
-1,
collisionFilterGroup=0,
collisionFilterMask=0)
self._pybullet_client.changeDynamics(
self._kin_model,
-1,
activationState=self._pybullet_client.ACTIVATION_STATE_SLEEP +
self._pybullet_client.ACTIVATION_STATE_ENABLE_SLEEPING +
self._pybullet_client.ACTIVATION_STATE_DISABLE_WAKEUP)
alpha = 0.4
self._pybullet_client.changeVisualShape(self._kin_model, -1, rgbaColor=[1, 1, 1, alpha])
for j in range(self._pybullet_client.getNumJoints(self._kin_model)):
self._pybullet_client.setCollisionFilterGroupMask(self._kin_model,
j,
collisionFilterGroup=0,
collisionFilterMask=0)
self._pybullet_client.changeDynamics(
self._kin_model,
j,
activationState=self._pybullet_client.ACTIVATION_STATE_SLEEP +
self._pybullet_client.ACTIVATION_STATE_ENABLE_SLEEPING +
self._pybullet_client.ACTIVATION_STATE_DISABLE_WAKEUP)
self._pybullet_client.changeVisualShape(self._kin_model, j, rgbaColor=[1, 1, 1, alpha])
# REPRESENTATION_MODE_CHECKPOINT
# self._poseInterpolator = humanoid_pose_interpolator.HumanoidPoseInterpolator()
self._poseInterpolator = humanoid_pose_interpolator.HumanoidPoseInterpolator(self._arg_parser)
for i in range(self._mocap_data.NumFrames() - 1):
frameData = self._mocap_data._motion_data['Frames'][i]
self._poseInterpolator.PostProcessMotionData(frameData)
self._stablePD = pd_controller_stable.PDControllerStableMultiDof(self._pybullet_client)
self._timeStep = timeStep
self._kpOrg = [
0, 0, 0,
0, 0, 0, 0,
1000, 1000, 1000, 1000,
100, 100, 100, 100,
500, 500, 500, 500,
500,
400, 400, 400, 400,
400, 400, 400, 400,
300,
500, 500, 500, 500,
500,
400, 400, 400, 400,
400, 400, 400, 400,
300
]
self._kdOrg = [
0, 0, 0,
0, 0, 0, 0,
100, 100, 100, 100,
10, 10, 10, 10,
50, 50, 50, 50,
50,
40, 40, 40, 40,
40, 40, 40, 40,
30,
50, 50, 50, 50,
50,
40, 40, 40, 40,
40, 40, 40, 40,
30
]
self._jointIndicesAll = [
chest, neck, rightHip, rightKnee, rightAnkle, rightShoulder, rightElbow, leftHip, leftKnee,
leftAnkle, leftShoulder, leftElbow
]
for j in self._jointIndicesAll:
#self._pybullet_client.setJointMotorControlMultiDof(self._sim_model, j, self._pybullet_client.POSITION_CONTROL, force=[1,1,1])
self._pybullet_client.setJointMotorControl2(self._sim_model,
j,
self._pybullet_client.POSITION_CONTROL,
targetPosition=0,
positionGain=0,
targetVelocity=0,
force=jointFrictionForce)
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
j,
self._pybullet_client.POSITION_CONTROL,
targetPosition=[0, 0, 0, 1],
targetVelocity=[0, 0, 0],
positionGain=0,
velocityGain=1,
force=[jointFrictionForce, jointFrictionForce, jointFrictionForce])
self._pybullet_client.setJointMotorControl2(self._kin_model,
j,
self._pybullet_client.POSITION_CONTROL,
targetPosition=0,
positionGain=0,
targetVelocity=0,
force=0)
self._pybullet_client.setJointMotorControlMultiDof(
self._kin_model,
j,
self._pybullet_client.POSITION_CONTROL,
targetPosition=[0, 0, 0, 1],
targetVelocity=[0, 0, 0],
positionGain=0,
velocityGain=1,
force=[jointFrictionForce, jointFrictionForce, 0])
self._jointDofCounts = [4, 4, # chest, neck
4, 1, 4, 4, 1, # right
4, 1, 4, 4, 1 # left
]
#only those body parts/links are allowed to touch the ground, otherwise the episode terminates
fall_contact_bodies = []
if self._arg_parser is not None:
fall_contact_bodies = self._arg_parser.parse_ints("fall_contact_bodies")
self._fall_contact_body_parts = fall_contact_bodies
#[x,y,z] base position and [x,y,z,w] base orientation!
self._totalDofs = 7
for dof in self._jointDofCounts:
self._totalDofs += dof
self.setSimTime(0)
self._useComReward = useComReward
self.resetPose()
def resetPose(self):
#print("resetPose with self._frame=", self._frame, " and self._frameFraction=",self._frameFraction)
pose = self.computePose(self._frameFraction)
self.initializePose(self._poseInterpolator, self._sim_model, initBase=True)
self.initializePose(self._poseInterpolator, self._kin_model, initBase=False)
def initializePose(self, pose, phys_model, initBase, initializeVelocity=True):
useArray = True
if initializeVelocity:
if initBase:
self._pybullet_client.resetBasePositionAndOrientation(phys_model, pose._basePos,
pose._baseOrn)
self._pybullet_client.resetBaseVelocity(phys_model, pose._baseLinVel, pose._baseAngVel)
if useArray:
indices = [chest,neck,rightHip,rightKnee,
rightAnkle, rightShoulder, rightElbow,leftHip,
leftKnee, leftAnkle, leftShoulder,leftElbow]
jointPositions = [pose._chestRot, pose._neckRot, pose._rightHipRot, pose._rightKneeRot,
pose._rightAnkleRot, pose._rightShoulderRot, pose._rightElbowRot, pose._leftHipRot,
pose._leftKneeRot, pose._leftAnkleRot, pose._leftShoulderRot, pose._leftElbowRot]
jointVelocities = [pose._chestVel, pose._neckVel, pose._rightHipVel, pose._rightKneeVel,
pose._rightAnkleVel, pose._rightShoulderVel, pose._rightElbowVel, pose._leftHipVel,
pose._leftKneeVel, pose._leftAnkleVel, pose._leftShoulderVel, pose._leftElbowVel]
self._pybullet_client.resetJointStatesMultiDof(phys_model, indices,
jointPositions, jointVelocities)
else:
self._pybullet_client.resetJointStateMultiDof(phys_model, chest, pose._chestRot,
pose._chestVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, neck, pose._neckRot, pose._neckVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, rightHip, pose._rightHipRot,
pose._rightHipVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, rightKnee, pose._rightKneeRot,
pose._rightKneeVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, rightAnkle, pose._rightAnkleRot,
pose._rightAnkleVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, rightShoulder,
pose._rightShoulderRot, pose._rightShoulderVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, rightElbow, pose._rightElbowRot,
pose._rightElbowVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, leftHip, pose._leftHipRot,
pose._leftHipVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, leftKnee, pose._leftKneeRot,
pose._leftKneeVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, leftAnkle, pose._leftAnkleRot,
pose._leftAnkleVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, leftShoulder,
pose._leftShoulderRot, pose._leftShoulderVel)
self._pybullet_client.resetJointStateMultiDof(phys_model, leftElbow, pose._leftElbowRot,
pose._leftElbowVel)
else:
if initBase:
self._pybullet_client.resetBasePositionAndOrientation(phys_model, pose._basePos,
pose._baseOrn)
if useArray:
indices = [chest,neck,rightHip,rightKnee,
rightAnkle, rightShoulder, rightElbow,leftHip,
leftKnee, leftAnkle, leftShoulder,leftElbow]
jointPositions = [pose._chestRot, pose._neckRot, pose._rightHipRot, pose._rightKneeRot,
pose._rightAnkleRot, pose._rightShoulderRot, pose._rightElbowRot, pose._leftHipRot,
pose._leftKneeRot, pose._leftAnkleRot, pose._leftShoulderRot, pose._leftElbowRot]
self._pybullet_client.resetJointStatesMultiDof(phys_model, indices,jointPositions)
else:
self._pybullet_client.resetJointStateMultiDof(phys_model, chest, pose._chestRot, [0, 0, 0])
self._pybullet_client.resetJointStateMultiDof(phys_model, neck, pose._neckRot, [0, 0, 0])
self._pybullet_client.resetJointStateMultiDof(phys_model, rightHip, pose._rightHipRot,
[0, 0, 0])
self._pybullet_client.resetJointStateMultiDof(phys_model, rightKnee, pose._rightKneeRot, [0])
self._pybullet_client.resetJointStateMultiDof(phys_model, rightAnkle, pose._rightAnkleRot,
[0, 0, 0])
self._pybullet_client.resetJointStateMultiDof(phys_model, rightShoulder,
pose._rightShoulderRot, [0, 0, 0])
self._pybullet_client.resetJointStateMultiDof(phys_model, rightElbow, pose._rightElbowRot,
[0])
self._pybullet_client.resetJointStateMultiDof(phys_model, leftHip, pose._leftHipRot,
[0, 0, 0])
self._pybullet_client.resetJointStateMultiDof(phys_model, leftKnee, pose._leftKneeRot, [0])
self._pybullet_client.resetJointStateMultiDof(phys_model, leftAnkle, pose._leftAnkleRot,
[0, 0, 0])
self._pybullet_client.resetJointStateMultiDof(phys_model, leftShoulder,
pose._leftShoulderRot, [0, 0, 0])
self._pybullet_client.resetJointStateMultiDof(phys_model, leftElbow, pose._leftElbowRot, [0])
def calcCycleCount(self, simTime, cycleTime):
phases = simTime / cycleTime
count = math.floor(phases)
loop = True
#count = (loop) ? count : cMathUtil::Clamp(count, 0, 1);
return count
def getCycleTime(self):
keyFrameDuration = self._mocap_data.KeyFrameDuration()
cycleTime = keyFrameDuration * (self._mocap_data.NumFrames() - 1)
return cycleTime
def setSimTime(self, t):
self._simTime = t
#print("SetTimeTime time =",t)
keyFrameDuration = self._mocap_data.KeyFrameDuration()
cycleTime = self.getCycleTime()
#print("self._motion_data.NumFrames()=",self._mocap_data.NumFrames())
self._cycleCount = self.calcCycleCount(t, cycleTime)
#print("cycles=",cycles)
frameTime = t - self._cycleCount * cycleTime
if (frameTime < 0):
frameTime += cycleTime
#print("keyFrameDuration=",keyFrameDuration)
#print("frameTime=",frameTime)
self._frame = int(frameTime / keyFrameDuration)
#print("self._frame=",self._frame)
self._frameNext = self._frame + 1
if (self._frameNext >= self._mocap_data.NumFrames()):
self._frameNext = self._frame
self._frameFraction = (frameTime - self._frame * keyFrameDuration) / (keyFrameDuration)
def computeCycleOffset(self):
firstFrame = 0
lastFrame = self._mocap_data.NumFrames() - 1
frameData = self._mocap_data._motion_data['Frames'][0]
frameDataNext = self._mocap_data._motion_data['Frames'][lastFrame]
basePosStart = [frameData[1], frameData[2], frameData[3]]
basePosEnd = [frameDataNext[1], frameDataNext[2], frameDataNext[3]]
self._cycleOffset = [
basePosEnd[0] - basePosStart[0], basePosEnd[1] - basePosStart[1],
basePosEnd[2] - basePosStart[2]
]
return self._cycleOffset
def computePose(self, frameFraction):
frameData = self._mocap_data._motion_data['Frames'][self._frame]
frameDataNext = self._mocap_data._motion_data['Frames'][self._frameNext]
self._poseInterpolator.Slerp(frameFraction, frameData, frameDataNext, self._pybullet_client)
#print("self._poseInterpolator.Slerp(", frameFraction,")=", pose)
self.computeCycleOffset()
oldPos = self._poseInterpolator._basePos
self._poseInterpolator._basePos = [
oldPos[0] + self._cycleCount * self._cycleOffset[0],
oldPos[1] + self._cycleCount * self._cycleOffset[1],
oldPos[2] + self._cycleCount * self._cycleOffset[2]
]
pose = self._poseInterpolator.GetPose()
return pose
def convertActionToPose(self, action):
pose = self._poseInterpolator.ConvertFromAction(self._pybullet_client, action)
return pose
def computeAndApplyPDForces(self, desiredPositions, maxForces):
dofIndex = 7
scaling = 1
indices = []
forces = []
targetPositions=[]
targetVelocities=[]
kps = []
kds = []
for index in range(len(self._jointIndicesAll)):
jointIndex = self._jointIndicesAll[index]
indices.append(jointIndex)
kps.append(self._kpOrg[dofIndex])
kds.append(self._kdOrg[dofIndex])
if self._jointDofCounts[index] == 4:
force = [
scaling * maxForces[dofIndex + 0],
scaling * maxForces[dofIndex + 1],
scaling * maxForces[dofIndex + 2]
]
targetVelocity = [0,0,0]
targetPosition = [
desiredPositions[dofIndex + 0],
desiredPositions[dofIndex + 1],
desiredPositions[dofIndex + 2],
desiredPositions[dofIndex + 3]
]
if self._jointDofCounts[index] == 1:
force = [scaling * maxForces[dofIndex]]
targetPosition = [desiredPositions[dofIndex + 0]]
targetVelocity = [0]
forces.append(force)
targetPositions.append(targetPosition)
targetVelocities.append(targetVelocity)
dofIndex += self._jointDofCounts[index]
#static char* kwlist[] = { "bodyUniqueId",
#"jointIndices",
#"controlMode", "targetPositions", "targetVelocities", "forces", "positionGains", "velocityGains", "maxVelocities", "physicsClientId", NULL };
self._pybullet_client.setJointMotorControlMultiDofArray(self._sim_model,
indices,
self._pybullet_client.STABLE_PD_CONTROL,
targetPositions = targetPositions,
targetVelocities = targetVelocities,
forces=forces,
positionGains = kps,
velocityGains = kds,
)
def computePDForces(self, desiredPositions, desiredVelocities, maxForces):
"""Compute torques from the PD controller."""
if desiredVelocities == None:
desiredVelocities = [0] * self._totalDofs
taus = self._stablePD.computePD(bodyUniqueId=self._sim_model,
jointIndices=self._jointIndicesAll,
desiredPositions=desiredPositions,
desiredVelocities=desiredVelocities,
kps=self._kpOrg,
kds=self._kdOrg,
maxForces=maxForces,
timeStep=self._timeStep)
return taus
def applyPDForces(self, taus):
"""Apply pre-computed torques."""
dofIndex = 7
scaling = 1
useArray = True
indices = []
forces = []
if (useArray):
for index in range(len(self._jointIndicesAll)):
jointIndex = self._jointIndicesAll[index]
indices.append(jointIndex)
if self._jointDofCounts[index] == 4:
force = [
scaling * taus[dofIndex + 0], scaling * taus[dofIndex + 1],
scaling * taus[dofIndex + 2]
]
if self._jointDofCounts[index] == 1:
force = [scaling * taus[dofIndex]]
#print("force[", jointIndex,"]=",force)
forces.append(force)
dofIndex += self._jointDofCounts[index]
self._pybullet_client.setJointMotorControlMultiDofArray(self._sim_model,
indices,
self._pybullet_client.TORQUE_CONTROL,
forces=forces)
else:
for index in range(len(self._jointIndicesAll)):
jointIndex = self._jointIndicesAll[index]
if self._jointDofCounts[index] == 4:
force = [
scaling * taus[dofIndex + 0], scaling * taus[dofIndex + 1],
scaling * taus[dofIndex + 2]
]
#print("force[", jointIndex,"]=",force)
self._pybullet_client.setJointMotorControlMultiDof(self._sim_model,
jointIndex,
self._pybullet_client.TORQUE_CONTROL,
force=force)
if self._jointDofCounts[index] == 1:
force = [scaling * taus[dofIndex]]
#print("force[", jointIndex,"]=",force)
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
jointIndex,
controlMode=self._pybullet_client.TORQUE_CONTROL,
force=force)
dofIndex += self._jointDofCounts[index]
def setJointMotors(self, desiredPositions, maxForces):
controlMode = self._pybullet_client.POSITION_CONTROL
startIndex = 7
chest = 1
neck = 2
rightHip = 3
rightKnee = 4
rightAnkle = 5
rightShoulder = 6
rightElbow = 7
leftHip = 9
leftKnee = 10
leftAnkle = 11
leftShoulder = 12
leftElbow = 13
# position gain
kp = 0.2
forceScale = 1
#self._jointDofCounts=[4,4,4,1,4,4,1,4,1,4,4,1]
maxForce = [
forceScale * maxForces[startIndex], forceScale * maxForces[startIndex + 1],
forceScale * maxForces[startIndex + 2], forceScale * maxForces[startIndex + 3]
]
startIndex += 4
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
chest,
controlMode,
targetPosition=self._poseInterpolator._chestRot,
positionGain=kp,
force=maxForce)
maxForce = [
maxForces[startIndex], maxForces[startIndex + 1], maxForces[startIndex + 2],
maxForces[startIndex + 3]
]
startIndex += 4
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
neck,
controlMode,
targetPosition=self._poseInterpolator._neckRot,
positionGain=kp,
force=maxForce)
maxForce = [
maxForces[startIndex], maxForces[startIndex + 1], maxForces[startIndex + 2],
maxForces[startIndex + 3]
]
startIndex += 4
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
rightHip,
controlMode,
targetPosition=self._poseInterpolator._rightHipRot,
positionGain=kp,
force=maxForce)
maxForce = [forceScale * maxForces[startIndex]]
startIndex += 1
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
rightKnee,
controlMode,
targetPosition=self._poseInterpolator._rightKneeRot,
positionGain=kp,
force=maxForce)
maxForce = [
maxForces[startIndex], maxForces[startIndex + 1], maxForces[startIndex + 2],
maxForces[startIndex + 3]
]
startIndex += 4
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
rightAnkle,
controlMode,
targetPosition=self._poseInterpolator._rightAnkleRot,
positionGain=kp,
force=maxForce)
maxForce = [
forceScale * maxForces[startIndex], forceScale * maxForces[startIndex + 1],
forceScale * maxForces[startIndex + 2], forceScale * maxForces[startIndex + 3]
]
startIndex += 4
maxForce = [forceScale * maxForces[startIndex]]
startIndex += 1
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
rightElbow,
controlMode,
targetPosition=self._poseInterpolator._rightElbowRot,
positionGain=kp,
force=maxForce)
maxForce = [
maxForces[startIndex], maxForces[startIndex + 1], maxForces[startIndex + 2],
maxForces[startIndex + 3]
]
startIndex += 4
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
leftHip,
controlMode,
targetPosition=self._poseInterpolator._leftHipRot,
positionGain=kp,
force=maxForce)
maxForce = [forceScale * maxForces[startIndex]]
startIndex += 1
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
leftKnee,
controlMode,
targetPosition=self._poseInterpolator._leftKneeRot,
positionGain=kp,
force=maxForce)
maxForce = [
maxForces[startIndex], maxForces[startIndex + 1], maxForces[startIndex + 2],
maxForces[startIndex + 3]
]
startIndex += 4
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
leftAnkle,
controlMode,
targetPosition=self._poseInterpolator._leftAnkleRot,
positionGain=kp,
force=maxForce)
maxForce = [
maxForces[startIndex], maxForces[startIndex + 1], maxForces[startIndex + 2],
maxForces[startIndex + 3]
]
startIndex += 4
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
leftShoulder,
controlMode,
targetPosition=self._poseInterpolator._leftShoulderRot,
positionGain=kp,
force=maxForce)
maxForce = [forceScale * maxForces[startIndex]]
startIndex += 1
self._pybullet_client.setJointMotorControlMultiDof(
self._sim_model,
leftElbow,
controlMode,
targetPosition=self._poseInterpolator._leftElbowRot,
positionGain=kp,
force=maxForce)
#print("startIndex=",startIndex)
def getPhase(self):
keyFrameDuration = self._mocap_data.KeyFrameDuration()
cycleTime = keyFrameDuration * (self._mocap_data.NumFrames() - 1)
phase = self._simTime / cycleTime
phase = math.fmod(phase, 1.0)
if (phase < 0):
phase += 1
return phase
def buildHeadingTrans(self, rootOrn):
#align root transform 'forward' with world-space x axis
eul = self._pybullet_client.getEulerFromQuaternion(rootOrn)
refDir = [1, 0, 0]
rotVec = self._pybullet_client.rotateVector(rootOrn, refDir)
heading = math.atan2(-rotVec[2], rotVec[0])
heading2 = eul[1]
# print("heading=",heading)
headingOrn = self._pybullet_client.getQuaternionFromAxisAngle([0, 1, 0], -heading)
return headingOrn
def buildOriginTrans(self):
rootPos, rootOrn = self._pybullet_client.getBasePositionAndOrientation(self._sim_model)
# print("rootPos=",rootPos)
# print("rootOrn=",rootOrn)
invRootPos = [-rootPos[0], 0, -rootPos[2]]
#invOrigTransPos, invOrigTransOrn = self._pybullet_client.invertTransform(rootPos,rootOrn)
headingOrn = self.buildHeadingTrans(rootOrn)
# print("headingOrn=",headingOrn)
headingMat = self._pybullet_client.getMatrixFromQuaternion(headingOrn)
# print("headingMat=",headingMat)
#dummy, rootOrnWithoutHeading = self._pybullet_client.multiplyTransforms([0,0,0],headingOrn, [0,0,0], rootOrn)
#dummy, invOrigTransOrn = self._pybullet_client.multiplyTransforms([0,0,0],rootOrnWithoutHeading, invOrigTransPos, invOrigTransOrn)
invOrigTransPos, invOrigTransOrn = self._pybullet_client.multiplyTransforms([0, 0, 0],
headingOrn,
invRootPos,
[0, 0, 0, 1])
# print("invOrigTransPos=",invOrigTransPos)
# print("invOrigTransOrn=",invOrigTransOrn)
invOrigTransMat = self._pybullet_client.getMatrixFromQuaternion(invOrigTransOrn)
# print("invOrigTransMat =",invOrigTransMat )
return invOrigTransPos, invOrigTransOrn
def getState(self):
stateVector = []
phase = self.getPhase()
# print("phase=",phase)
stateVector.append(phase)
rootTransPos, rootTransOrn = self.buildOriginTrans() # transformation: rootPos -> [0, height, 0], forward -> [1, 0, 0]
basePos, baseOrn = self._pybullet_client.getBasePositionAndOrientation(self._sim_model) # transformation: [0, 0, 0] -> rootPos, [0, 0, 0, 1] -> rootOrn
rootPosRel, dummy = self._pybullet_client.multiplyTransforms(rootTransPos, rootTransOrn,
basePos, [0, 0, 0, 1]) # transformation: [0, 0, 0] -> rootPos -> [0, height, 0], forward -> [1, 0, 0]
# print("!!!rootPosRel =",rootPosRel )
# print("rootTransPos=",rootTransPos)
# print("basePos=",basePos)
localPos, localOrn = self._pybullet_client.multiplyTransforms(rootTransPos, rootTransOrn,
basePos, baseOrn) # transformation: [0, 0, 0] -> rootPos -> [0, height, 0], [0, 0, 0, 1] -> rootOrn -> towards[1, 0, 0]
localPos = [
localPos[0] - rootPosRel[0], localPos[1] - rootPosRel[1], localPos[2] - rootPosRel[2]
] # always [0, 0, 0]
# print("localPos=",localPos)
stateVector.append(rootPosRel[1]) # root height
#self.pb2dmJoints=[0,1,2,9,10,11,3,4,5,12,13,14,6,7,8]
self.pb2dmJoints = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
linkIndicesSim = []
for pbJoint in range(self._pybullet_client.getNumJoints(self._sim_model)):
linkIndicesSim.append(self.pb2dmJoints[pbJoint])
linkStatesSim = self._pybullet_client.getLinkStates(self._sim_model, linkIndicesSim, computeForwardKinematics=True, computeLinkVelocity=True)
for pbJoint in range(self._pybullet_client.getNumJoints(self._sim_model)):
j = self.pb2dmJoints[pbJoint]
#print("joint order:",j)
#ls = self._pybullet_client.getLinkState(self._sim_model, j, computeForwardKinematics=True)
ls = linkStatesSim[pbJoint]
linkPos = ls[0]
linkOrn = ls[1]
linkPosLocal, linkOrnLocal = self._pybullet_client.multiplyTransforms(
rootTransPos, rootTransOrn, linkPos, linkOrn) # local position and orientation, origin at PROJECTED root
if (linkOrnLocal[3] < 0): # ***
linkOrnLocal = [-linkOrnLocal[0], -linkOrnLocal[1], -linkOrnLocal[2], -linkOrnLocal[3]]
linkPosLocal = [
linkPosLocal[0] - rootPosRel[0], linkPosLocal[1] - rootPosRel[1],
linkPosLocal[2] - rootPosRel[2]
] # local position, origin at root (NOT PROJECTED)
for l in linkPosLocal:
stateVector.append(l)
#re-order the quaternion, DeepMimic uses w,x,y,z
# stateVector.append(linkOrnLocal[3])
# stateVector.append(linkOrnLocal[0])
# stateVector.append(linkOrnLocal[1])
# stateVector.append(linkOrnLocal[2])
# unnecessary, because of ***
if (linkOrnLocal[3] < 0):
linkOrnLocal[0] *= -1
linkOrnLocal[1] *= -1
linkOrnLocal[2] *= -1
linkOrnLocal[3] *= -1
# if self.state_representation_mode == "Quaternion":
# stateVector.append(linkOrnLocal[3])
# stateVector.append(linkOrnLocal[0])
# stateVector.append(linkOrnLocal[1])
# stateVector.append(linkOrnLocal[2])
# elif self.state_representation_mode == "6D":
# linkOrnLocal = self._pybullet_client.getMatrixFromQuaternion(linkOrnLocal)
# stateVector.append(linkOrnLocal[0])
# stateVector.append(linkOrnLocal[1])
# stateVector.append(linkOrnLocal[2])
# stateVector.append(linkOrnLocal[3])
# stateVector.append(linkOrnLocal[4])
# stateVector.append(linkOrnLocal[5])
if abs(linkOrnLocal[3]) < eps:
self.state_discontinuity_encounter += 1
linkOrnLocal = getStateFromQuaternion(linkOrnLocal, self.state_representation_mode)
for elem in linkOrnLocal:
stateVector.append(elem)
# print(type(self._pybullet_client.getMatrixFromQuaternion([0, 0, 0, 1])))
# print(self._pybullet_client.getMatrixFromQuaternion([0, 0, 0.7071, 0.7071]))
# print(type(self._pybullet_client.getQuaternionFromAxisAngle([1, 0, 0], 0)))
# print(self._pybullet_client.getQuaternionFromAxisAngle([1, 0, 0], 0))
# print(type(self._pybullet_client.getEulerFromQuaternion([0, 0, 0, 1])))
# print(self._pybullet_client.getEulerFromQuaternion([0, 0, 0, 1]))
# print(type(self._pybullet_client.getQuaternionFromEuler([0, 0, 0])))
# print(self._pybullet_client.getQuaternionFromEuler([0, 0, 0]))
for pbJoint in range(self._pybullet_client.getNumJoints(self._sim_model)):
j = self.pb2dmJoints[pbJoint]
#ls = self._pybullet_client.getLinkState(self._sim_model, j, computeLinkVelocity=True)
ls = linkStatesSim[pbJoint]
# print(pbJoint)
# print("!!!!!!!!!!!!!!!!!!")
# print(ls[0]) # linkWorldPosition
# print(ls[1]) # linkWorldOrientation
# print(ls[2]) # localInertialFramePosition
# print(ls[3]) # localInertialFrameOrientation
# print(ls[4]) # worldLinkFramePosition
# print(ls[5]) # worldLinkFrameOrientation
# print(ls[6]) # worldLinkLinearVelocity
# print(ls[7]) # worldLinkAngularVelocity
linkLinVel = ls[6]
linkAngVel = ls[7]
linkLinVelLocal, unused = self._pybullet_client.multiplyTransforms([0, 0, 0], rootTransOrn,
linkLinVel, [0, 0, 0, 1]) # because translation transformation doesn't affect linear velocity
#linkLinVelLocal=[linkLinVelLocal[0]-rootPosRel[0],linkLinVelLocal[1]-rootPosRel[1],linkLinVelLocal[2]-rootPosRel[2]]
linkAngVelLocal, unused = self._pybullet_client.multiplyTransforms([0, 0, 0], rootTransOrn,
linkAngVel, [0, 0, 0, 1]) # neither the angular velocity
for l in linkLinVelLocal:
stateVector.append(l)
for l in linkAngVelLocal:
stateVector.append(l)
#print("stateVector len=",len(stateVector))
#for st in range (len(stateVector)):
# print("state[",st,"]=",stateVector[st])
return stateVector
def terminates(self):
#check if any non-allowed body part hits the ground
terminates = False
pts = self._pybullet_client.getContactPoints()
for p in pts:
part = -1
#ignore self-collision
if (p[1] == p[2]):
continue
if (p[1] == self._sim_model):
part = p[3]
if (p[2] == self._sim_model):
part = p[4]
if (part >= 0 and part in self._fall_contact_body_parts):
#print("terminating part:", part)
terminates = True
return terminates
def quatMul(self, q1, q2):
return [
q1[3] * q2[0] + q1[0] * q2[3] + q1[1] * q2[2] - q1[2] * q2[1],
q1[3] * q2[1] + q1[1] * q2[3] + q1[2] * q2[0] - q1[0] * q2[2],
q1[3] * q2[2] + q1[2] * q2[3] + q1[0] * q2[1] - q1[1] * q2[0],
q1[3] * q2[3] - q1[0] * q2[0] - q1[1] * q2[1] - q1[2] * q2[2]
]
def calcRootAngVelErr(self, vel0, vel1):
diff = [vel0[0] - vel1[0], vel0[1] - vel1[1], vel0[2] - vel1[2]]
return diff[0] * diff[0] + diff[1] * diff[1] + diff[2] * diff[2]
def calcRootRotDiff(self, orn0, orn1):
orn0Conj = [-orn0[0], -orn0[1], -orn0[2], orn0[3]]
q_diff = self.quatMul(orn1, orn0Conj)
axis, angle = self._pybullet_client.getAxisAngleFromQuaternion(q_diff)
return angle * angle
def getReward(self, pose):
"""Compute and return the pose-based reward."""
#from DeepMimic double cSceneImitate::CalcRewardImitate
#todo: compensate for ground height in some parts, once we move to non-flat terrain
# not values from the paper, but from the published code.
pose_w = 0.5
vel_w = 0.05
end_eff_w = 0.15
# does not exist in paper
root_w = 0.2
if self._useComReward:
com_w = 0.1
else:
com_w = 0
total_w = pose_w + vel_w + end_eff_w + root_w + com_w
pose_w /= total_w
vel_w /= total_w
end_eff_w /= total_w
root_w /= total_w
com_w /= total_w
pose_scale = 2
vel_scale = 0.1
end_eff_scale = 40
root_scale = 5
com_scale = 10
err_scale = 1 # error scale
reward = 0
pose_err = 0
vel_err = 0
end_eff_err = 0
root_err = 0
com_err = 0
heading_err = 0
#create a mimic reward, comparing the dynamics humanoid with a kinematic one
#pose = self.InitializePoseFromMotionData()
#print("self._kin_model=",self._kin_model)
#print("kinematicHumanoid #joints=",self._pybullet_client.getNumJoints(self._kin_model))
#self.ApplyPose(pose, True, True, self._kin_model, self._pybullet_client)
#const Eigen::VectorXd& pose0 = sim_char.GetPose();
#const Eigen::VectorXd& vel0 = sim_char.GetVel();
#const Eigen::VectorXd& pose1 = kin_char.GetPose();
#const Eigen::VectorXd& vel1 = kin_char.GetVel();
#tMatrix origin_trans = sim_char.BuildOriginTrans();
#tMatrix kin_origin_trans = kin_char.BuildOriginTrans();
#
#tVector com0_world = sim_char.CalcCOM();
if self._useComReward:
comSim, comSimVel = self.computeCOMposVel(self._sim_model)
comKin, comKinVel = self.computeCOMposVel(self._kin_model)
#tVector com_vel0_world = sim_char.CalcCOMVel();
#tVector com1_world;
#tVector com_vel1_world;
#cRBDUtil::CalcCoM(joint_mat, body_defs, pose1, vel1, com1_world, com_vel1_world);
#
root_id = 0
#tVector root_pos0 = cKinTree::GetRootPos(joint_mat, pose0);
#tVector root_pos1 = cKinTree::GetRootPos(joint_mat, pose1);
#tQuaternion root_rot0 = cKinTree::GetRootRot(joint_mat, pose0);
#tQuaternion root_rot1 = cKinTree::GetRootRot(joint_mat, pose1);
#tVector root_vel0 = cKinTree::GetRootVel(joint_mat, vel0);
#tVector root_vel1 = cKinTree::GetRootVel(joint_mat, vel1);
#tVector root_ang_vel0 = cKinTree::GetRootAngVel(joint_mat, vel0);
#tVector root_ang_vel1 = cKinTree::GetRootAngVel(joint_mat, vel1);
mJointWeights = [
0.20833, 0.10416, 0.0625, 0.10416, 0.0625, 0.041666666666666671, 0.0625, 0.0416, 0.00,
0.10416, 0.0625, 0.0416, 0.0625, 0.0416, 0.0000
]
num_end_effs = 0
num_joints = 15
root_rot_w = mJointWeights[root_id]
rootPosSim, rootOrnSim = self._pybullet_client.getBasePositionAndOrientation(self._sim_model)
rootPosKin, rootOrnKin = self._pybullet_client.getBasePositionAndOrientation(self._kin_model)
linVelSim, angVelSim = self._pybullet_client.getBaseVelocity(self._sim_model)
#don't read the velocities from the kinematic model (they are zero), use the pose interpolator velocity
#see also issue https://github.com/bulletphysics/bullet3/issues/2401
linVelKin = self._poseInterpolator._baseLinVel
angVelKin = self._poseInterpolator._baseAngVel
root_rot_err = self.calcRootRotDiff(rootOrnSim, rootOrnKin)
pose_err += root_rot_w * root_rot_err
root_vel_diff = [
linVelSim[0] - linVelKin[0], linVelSim[1] - linVelKin[1], linVelSim[2] - linVelKin[2]
]
root_vel_err = root_vel_diff[0] * root_vel_diff[0] + root_vel_diff[1] * root_vel_diff[
1] + root_vel_diff[2] * root_vel_diff[2]
root_ang_vel_err = self.calcRootAngVelErr(angVelSim, angVelKin)
vel_err += root_rot_w * root_ang_vel_err
useArray = True
if useArray:
jointIndices = range(num_joints)
simJointStates = self._pybullet_client.getJointStatesMultiDof(self._sim_model, jointIndices)
kinJointStates = self._pybullet_client.getJointStatesMultiDof(self._kin_model, jointIndices)
if useArray:
linkStatesSim = self._pybullet_client.getLinkStates(self._sim_model, jointIndices)
linkStatesKin = self._pybullet_client.getLinkStates(self._kin_model, jointIndices)
for j in range(num_joints):
curr_pose_err = 0
curr_vel_err = 0
w = mJointWeights[j]
if useArray:
simJointInfo = simJointStates[j]
else:
simJointInfo = self._pybullet_client.getJointStateMultiDof(self._sim_model, j)
#print("simJointInfo.pos=",simJointInfo[0])
#print("simJointInfo.vel=",simJointInfo[1])
if useArray:
kinJointInfo = kinJointStates[j]
else:
kinJointInfo = self._pybullet_client.getJointStateMultiDof(self._kin_model, j)
#print("kinJointInfo.pos=",kinJointInfo[0])
#print("kinJointInfo.vel=",kinJointInfo[1])
if (len(simJointInfo[0]) == 1):
angle = simJointInfo[0][0] - kinJointInfo[0][0]
curr_pose_err = angle * angle
velDiff = simJointInfo[1][0] - kinJointInfo[1][0]
curr_vel_err = velDiff * velDiff
if (len(simJointInfo[0]) == 4):
#print("quaternion diff")
diffQuat = self._pybullet_client.getDifferenceQuaternion(simJointInfo[0], kinJointInfo[0])
axis, angle = self._pybullet_client.getAxisAngleFromQuaternion(diffQuat)
curr_pose_err = angle * angle
diffVel = [
simJointInfo[1][0] - kinJointInfo[1][0], simJointInfo[1][1] - kinJointInfo[1][1],
simJointInfo[1][2] - kinJointInfo[1][2]
]
curr_vel_err = diffVel[0] * diffVel[0] + diffVel[1] * diffVel[1] + diffVel[2] * diffVel[2]
pose_err += w * curr_pose_err
vel_err += w * curr_vel_err
is_end_eff = j in self._end_effectors
if is_end_eff:
if useArray:
linkStateSim = linkStatesSim[j]
linkStateKin = linkStatesKin[j]
else:
linkStateSim = self._pybullet_client.getLinkState(self._sim_model, j)
linkStateKin = self._pybullet_client.getLinkState(self._kin_model, j)
linkPosSim = linkStateSim[0]
linkPosKin = linkStateKin[0]
linkPosDiff = [
linkPosSim[0] - linkPosKin[0], linkPosSim[1] - linkPosKin[1],
linkPosSim[2] - linkPosKin[2]
]
curr_end_err = linkPosDiff[0] * linkPosDiff[0] + linkPosDiff[1] * linkPosDiff[
1] + linkPosDiff[2] * linkPosDiff[2]
end_eff_err += curr_end_err
num_end_effs += 1
if (num_end_effs > 0):
end_eff_err /= num_end_effs
#double root_ground_h0 = mGround->SampleHeight(sim_char.GetRootPos())
#double root_ground_h1 = kin_char.GetOriginPos()[1]
#root_pos0[1] -= root_ground_h0
#root_pos1[1] -= root_ground_h1
root_pos_diff = [
rootPosSim[0] - rootPosKin[0], rootPosSim[1] - rootPosKin[1], rootPosSim[2] - rootPosKin[2]
]
root_pos_err = root_pos_diff[0] * root_pos_diff[0] + root_pos_diff[1] * root_pos_diff[
1] + root_pos_diff[2] * root_pos_diff[2]
#
#root_rot_err = cMathUtil::QuatDiffTheta(root_rot0, root_rot1)
#root_rot_err *= root_rot_err
#root_vel_err = (root_vel1 - root_vel0).squaredNorm()
#root_ang_vel_err = (root_ang_vel1 - root_ang_vel0).squaredNorm()
root_err = root_pos_err + 0.1 * root_rot_err + 0.01 * root_vel_err + 0.001 * root_ang_vel_err
# COM error in initial code -> COM velocities
if self._useComReward:
com_err = 0.1 * np.sum(np.square(comKinVel - comSimVel))
# com_err = 0.1 * np.sum(np.square(comKin - comSim))
#com_err = 0.1 * (com_vel1_world - com_vel0_world).squaredNorm()
#print("pose_err=",pose_err)
#print("vel_err=",vel_err)
pose_reward = math.exp(-err_scale * pose_scale * pose_err)
vel_reward = math.exp(-err_scale * vel_scale * vel_err)
end_eff_reward = math.exp(-err_scale * end_eff_scale * end_eff_err)
root_reward = math.exp(-err_scale * root_scale * root_err)
com_reward = math.exp(-err_scale * com_scale * com_err)
reward = pose_w * pose_reward + vel_w * vel_reward + end_eff_w * end_eff_reward + root_w * root_reward + com_w * com_reward
# pose_reward,vel_reward,end_eff_reward, root_reward, com_reward);
#print("reward=",reward)
#print("pose_reward=",pose_reward)
#print("vel_reward=",vel_reward)
#print("end_eff_reward=",end_eff_reward)
#print("root_reward=",root_reward)
#print("com_reward=",com_reward)
info_rew = dict(
pose_reward=pose_reward,
vel_reward=vel_reward,
end_eff_reward=end_eff_reward,
root_reward=root_reward,
com_reward=com_reward
)
info_errs = dict(
pose_err=pose_err,
vel_err=vel_err,
end_eff_err=end_eff_err,
root_err=root_err,
com_err=com_err
)
return reward
def computeCOMposVel(self, uid: int):
"""Compute center-of-mass position and velocity."""
pb = self._pybullet_client
num_joints = 15
jointIndices = range(num_joints)
link_states = pb.getLinkStates(uid, jointIndices, computeLinkVelocity=1)
link_pos = np.array([s[0] for s in link_states])
link_vel = np.array([s[-2] for s in link_states])
tot_mass = 0.
masses = []
for j in jointIndices:
mass_, *_ = pb.getDynamicsInfo(uid, j)
masses.append(mass_)
tot_mass += mass_
masses = np.asarray(masses)[:, None]
com_pos = np.sum(masses * link_pos, axis=0) / tot_mass
com_vel = np.sum(masses * link_vel, axis=0) / tot_mass
return com_pos, com_vel
``` |
{
"source": "joonleesky/cs294",
"score": 3
} |
#### File: cs294/hw1/network.py
```python
import tensorflow as tf
import numpy as np
import tf_util
class CloneNetwork:
def __init__(self, input_size, output_size, config):
self.input_size = input_size
self.output_size = output_size
self.hidden_sizes = config.hidden_sizes
self.learning_rate = config.learning_rate
self.build_network()
def build_network(self):
self.add_placeholder()
self.add_embedding()
self.pred = self.add_prediction_op()
self.loss = self.add_loss_op(self.pred)
self.train_op = self.add_train_op(self.loss)
self.add_summary()
def train(self, sess, x, y):
_ , summary = sess.run([self.train_op, self.merged],
feed_dict = {self.X:x, self.Y:y})
return summary
def predict(self, sess, x):
return sess.run(self.pred, feed_dict = {self.X:x})
def add_placeholder(self):
self.X = tf.placeholder(tf.float32, [None, self.input_size])
self.Y = tf.placeholder(tf.float32, [None, self.output_size])
def add_embedding(self):
self.global_step = tf.Variable(0, trainable = False, name = 'global_step')
self.W1 = tf.get_variable("W1", shape=[self.input_size, self.hidden_sizes[0]],
initializer=tf.contrib.layers.xavier_initializer())
self.b1 = tf.zeros([self.hidden_sizes[0]])
self.W2 = tf.get_variable("W2", shape=[self.hidden_sizes[0], self.hidden_sizes[1]],
initializer=tf.contrib.layers.xavier_initializer())
self.b2 = tf.zeros([self.hidden_sizes[1]])
self.W3 = tf.get_variable("W3", shape=[self.hidden_sizes[1], self.output_size],
initializer=tf.contrib.layers.xavier_initializer())
self.b3 = tf.zeros([self.output_size])
def add_prediction_op(self):
h1 = tf.nn.relu(tf.matmul(self.X,self.W1) + self.b1)
h2 = tf.nn.relu(tf.matmul(h1,self.W2) + self.b2)
pred = tf.matmul(h2, self.W3) + self.b3
return pred
def add_loss_op(self, pred):
return tf.reduce_sum(tf.square(self.Y - pred))
def add_train_op(self, loss):
return tf.train.AdamOptimizer(learning_rate =self.learning_rate).minimize(loss, global_step = self.global_step)
def add_summary(self):
tf.summary.scalar('loss',self.loss)
self.merged = tf.summary.merge_all()
``` |
{
"source": "joonro/pyprocessmacro",
"score": 3
} |
#### File: pyprocessmacro/pyprocessmacro/models.py
```python
import numpy as np
from .utils import fast_OLS, fast_optimize, bootstrap_sampler, eval_expression, bias_corrected_ci, z_score, \
percentile_ci
import scipy.stats as stats
from numpy.linalg import inv, LinAlgError
from numpy import dot
from itertools import product, combinations
import pandas as pd
from functools import partial
import warnings
class BaseLogit(object):
"""
A convenience parent class for the methods used in Logistic models.
"""
def __init__(self, endog, exog, options):
self._endog = endog
self._exog = exog
self._n_obs = exog.shape[0]
self._n_vars = exog.shape[1]
if not options:
options = {}
self._options = options
@staticmethod
def _cdf(X):
"""
The CDF of the logistic function.
:param X: A scalar
:return: A scalar
"""
idx = X > 0
out = np.empty(X.size, dtype=float)
with warnings.catch_warnings():
warnings.filterwarnings('error')
try:
out[idx] = 1 / (1 + np.exp(-X[idx]))
exp_X = np.exp(X[~idx])
out[~idx] = exp_X / (1 + exp_X)
return out
except RuntimeWarning:
raise RuntimeError
def _loglike(self, params):
return np.sum(self._loglikeobs(params))
def _loglikeobs(self, params):
q = 2 * self._endog - 1
X = self._exog
return np.log(self._cdf(q * dot(X, params)))
def _score(self, params):
z = dot(self._exog, params)
L = self._cdf(z)
return dot(self._endog - L, self._exog)
def _hessian(self, params):
X = self._exog
L = self._cdf(dot(X, params))
return dot(L * (1 - L) * X.T, X)
def _optimize(self):
max_iter = self._options["iterate"]
tolerance = self._options["convergence"]
iterations = 0
score = lambda params: self._score(params) / self._n_obs
hess = lambda params: -self._hessian(params) / self._n_obs
oldparams = np.inf
newparams = np.repeat(0, self._n_vars)
while iterations < max_iter and np.any(np.abs(newparams - oldparams) > tolerance):
oldparams = newparams
H = hess(oldparams)
newparams = oldparams - dot(inv(H), score(oldparams))
iterations += 1
return newparams
class NullLogitModel(BaseLogit):
def __init__(self, endog, options=None):
n_obs = endog.shape[0]
exog = np.ones((n_obs, 1))
if not options:
options = {}
super().__init__(endog, exog, options)
class ParallelMediationModel(object):
"""
A class describing a parallel mediation model between an endogenous variable Y, one or several mediators M, and a
set of exogenous predictors for the endogenous variable and the mediators.
"""
def __init__(self, data, exog_terms_y, exog_terms_m, mod_symb, spot_values,
n_meds, analysis_list, symb_to_ind, symb_to_var, options=None):
"""
:param data: array
An NxK array of data
:param exog_terms_y: list of strings
Symbols of exogenous terms for the estimation of the outcome Y
:param exog_terms_m: list of strings
Symbols of exogenous terms for the estimation of the mediator(s) M (same for all mediators)
:param mod_symb: list of strings
Symbols of the moderator(s) of the path from X to the mediator(s) M and of the path from M to Y
:param spot_values: dict of lists
The spotlight values of the moderator(s)
:param n_meds: int
Number of mediator(s)
:param analysis_list: list of ["PMM", "CMM", "MMM"]
The list of additional analysis to conduct.
:param symb_to_ind: dict of int
Dictionary mapping the symbols to the indices of the variable in the data
:param symb_to_var:
Dictionary mapping the symbols to the actual names of the variable in the data
:param options: dict
Dictionary of options, from the Process object
"""
self._data = data
self._exog_terms_y = exog_terms_y
self._exog_terms_m = exog_terms_m
self._n_meds = n_meds
self._symb_to_ind = symb_to_ind
self._symb_to_var = symb_to_var
self._n_obs = data.shape[0]
if not options:
options = {}
self._options = options
self._vars_y = [i for i in self._exog_terms_y if (("*" not in i) & (i != "Cons"))]
self._ind_y = self._symb_to_ind["y"]
self._exog_inds_y = [self._symb_to_ind[var] for var in self._exog_terms_y]
self._vars_m = [i for i in self._exog_terms_m if (("*" not in i) & (i != "Cons"))]
self._endog_vars_m = ["m{}".format(i + 1) for i in range(self._n_meds)]
self._inds_m = [self._symb_to_ind[m] for m in self._endog_vars_m]
self._exog_inds_m = [self._symb_to_ind[var] for var in self._exog_terms_m]
self._compute_betas_m = fast_OLS
if self._options["logit"]:
max_iter = self._options["iterate"]
tolerance = self._options["convergence"]
self._compute_betas_y = partial(fast_optimize, n_obs=self._n_obs, n_vars=len(self._exog_inds_y),
max_iter=max_iter, tolerance=tolerance)
else:
self._compute_betas_y = fast_OLS
self._true_betas_y, self._true_betas_m = self._estimate_true_params()
self._boot_betas_y, self._boot_betas_m, self._n_fail_samples = self._estimate_bootstrapped_params()
self._base_derivs = self._gen_derivatives()
self._moderators_symb = mod_symb
self._moderators_values = [spot_values.get(i, [0]) for i in self._moderators_symb]
self._has_moderation = True if mod_symb else False
self._analysis_list = analysis_list
if self._has_moderation:
self.estimation_results = self._cond_ind_effects()
else:
self.estimation_results = self._simple_ind_effects()
def _estimate_true_params(self):
"""
Compute the true parameters for:
* The path from the predictors to Y (computed using OLS/Logit, depending on the nature of Y)
* The path(s) from the mediator(s) to Y (computed using OLS)
:return: A tuple of (true_betas_y, true_betas_m)
* true_betas_y is a vector of size n_params_y
* true_betas_m is a list of vectors of size n_params_m
"""
# True betas of the path from Ms to Y
endog_y = self._data[:, self._ind_y]
exog_y = self._data[:, self._exog_inds_y]
true_betas_y = self._compute_betas_y(endog_y, exog_y)
# For each mediator Mi, true betas from X to Mi
true_betas_m = []
m_exog = self._data[:, self._exog_inds_m]
for m_ind in self._inds_m:
m_endog = self._data[:, m_ind]
betas = self._compute_betas_m(m_endog, m_exog)
true_betas_m.append(betas)
return true_betas_y, true_betas_m
def _estimate_bootstrapped_params(self):
"""
Compute the bootstrapped parameters for:
* The path from the predictors to Y (computed using OLS/Logit, depending on the nature of Y)
* The path(s) from the mediator(s) to Y (computed using OLS)
:return: A tuple of (true_betas_y, true_betas_m)
* true_betas_y is a matrix of size n_boots x n_params_y
* true_betas_m is a list of matrices of size n_boots x n_params_y
"""
n_boots = self._options["boot"]
seed = self._options["seed"]
boot_betas_y = np.empty((n_boots, len(self._exog_terms_y)))
boot_betas_m = np.empty((self._n_meds, n_boots, len(self._exog_terms_m)))
n_fail_samples = 0
boot_ind = 0
sampler = bootstrap_sampler(self._n_obs, seed)
while boot_ind < n_boots:
ind = next(sampler)
data_boot = self._data[ind, :]
y_e = data_boot[:, self._ind_y]
y_x = data_boot[:, self._exog_inds_y]
try:
y_b = self._compute_betas_y(y_e, y_x)
m_x = data_boot[:, self._exog_inds_m]
boot_betas_y[boot_ind] = y_b
for j, m_ind in enumerate(self._inds_m):
m_e = data_boot[:, m_ind]
m_b = self._compute_betas_m(m_e, m_x)
boot_betas_m[j][boot_ind] = m_b
boot_ind += 1
except LinAlgError: # Hessian (Logit) or X'X (OLS) cannot be inverted
n_fail_samples += 1
return boot_betas_y, boot_betas_m, n_fail_samples
def _gen_derivatives(self):
"""
Generate the list of symbolic derivatives for the indirect path(s) from X to Y. The derivative of the path from
X to M is taken with respect to X, and the derivative of the path to Y is taken with respect to M.
For instance (Model 21), we consider the equation of x_to_m:
* The equation of x_to_m is: aConstant + bX + cW + dX*W. Rearranging for X: 1*(aConstant + cW) + X*(b + dW).
* The derivative of this expression is: (b + dW), or in matrix form: [0, 1, 0, W] * [a, b, c, d]
The first vector depends on the value of the moderator W: therefore, it cannot be represented numerically.
Instead, we express derivative using the following technique:
* Each term in the equation (i.e. Constant, X, W, X*W) is represented by a row.
* Each variable is represented by a column.
* The column for X (the variable with respect to which the equation is derivated) is equal to 0 if the
term does not contain X, and 1 otherwise
* The other columns are equal to the variable if the term contains the variable, and to 1 otherwise.
That way, the product of the columns is equal to the value of each term in the derivative:
X W
[[ 0, 1 ], # Value of the Constant term : 0*1 = 0
[ 1, 1 ], # Value of X term : 1*1 = 1
[ 0, W ], # Value of the W term: 0*W = 0
[ 1, W ]] # Value of the X*W: 1*W = W
The advantage of this matrix is that it is a symbolic expression, in which we can substitute for the values of
the moderators, and then take the product of columns to obtain the numerical representation of the derivative
as a vector.
:return: dict of matrices
A dictionary with keys 'x_to_m' and 'm_to_y':
'x_to_m' is the symbolic derivative of X to the mediator(s) M (one derivative)
'm_to_y' is the list of symbolic derivative(s) from the mediator(s) M to Y (n_meds derivative(s))
"""
derivs = {}
# Derivative of X to M
vars_m = self._vars_m
exog_terms_m = self._exog_terms_m
x_to_m = np.empty((len(vars_m), len(exog_terms_m)), dtype="object")
for j, var in enumerate(vars_m):
if var == "x":
x_to_m[j] = [1 if var in term else 0 for term in exog_terms_m]
else:
x_to_m[j] = [var if var in term else 1 for term in exog_terms_m]
derivs['x_to_m'] = x_to_m.T
list_m_to_y = []
for i in range(self._n_meds): # For all the mediators...
# ... derivate the path from M to Y (unique to each mediator)
vars_y = self._vars_y
exog_terms_y = self._exog_terms_y
m_to_y = np.empty((len(vars_y), len(exog_terms_y)), dtype="object")
for j, var in enumerate(vars_y):
if var == "m{}".format(i + 1):
m_to_y[j] = [1 if var in term else 0 for term in exog_terms_y]
else:
m_to_y[j] = [var if var in term else 1 for term in exog_terms_y]
list_m_to_y.append(m_to_y.T)
derivs['m_to_y'] = list_m_to_y
return derivs
def _indirect_effect_at(self, med_index, mod_dict):
"""
Compute the indirect effect through a specific mediator at specific value(s) of the moderator(s)
:param med_index: int
Index of the mediator.
:param mod_dict: dict
None, or a mod_name:mod_value dictionary of moderator values.
:return: e: scalar
Effect at the moderator values
be: array
Effects for all bootstrap samples (N_Boots x 1)
se: scalar
Standard error based on bootstrap samples
llci: scalar
Lower level of CI based on bootstrap samples
ulci: scalar
Upper level of CI based on bootstrap samples
"""
conf = self._options["conf"]
der_x_to_m = self._base_derivs["x_to_m"]
der_m_to_y = self._base_derivs["m_to_y"][med_index]
expr_x_to_m = eval_expression(der_x_to_m, mod_dict)
expr_m_to_y = eval_expression(der_m_to_y, mod_dict)
# Generation of the effects and bootstrapped effects: product of m_der and y_der
e = dot(self._true_betas_y, expr_m_to_y) * dot(self._true_betas_m[med_index], expr_x_to_m)
be = dot(self._boot_betas_y, expr_m_to_y) * dot(self._boot_betas_m[med_index], expr_x_to_m)
se = be.std(ddof=1)
if self._options["percent"]:
llci, ulci = percentile_ci(be, conf)
else:
llci, ulci = bias_corrected_ci(e, be, conf)
return e, be, se, llci, ulci
def _get_conditional_indirect_effects(self, med_index, mod_symb, mod_values):
"""
Return the indirect effects for all combinations of the moderators mod_symb specified in mod_values.
:param med_index: int
Index of the mediator.
:param mod_names: array
An array of moderator names
:param mod_values: matrix
A (N_Comb x N_Mods) matrix of all combinations of values for all moderator(s)
:return: e: array
Effects for all combinations of the moderator values (N_Comb x 1)
be: matrix
Effects for all combinations of the moderator values for all bootstrap samples (N_Comb x N_Boots)
se: array
SE based on bootstrap samples for all combinations of the moderator values (N_Comb x 1)
llci: array
LLCI based on bootstrap samples for all combinations of the moderator values (N_Comb x 1)
ulci: array
ULCI based on bootstrap samples for all combinations of the moderator values (N_Comb x 1)
"""
n_boots = self._options["boot"]
n_comb = len(mod_values)
e, se, llci, ulci = np.empty((4, n_comb))
be = np.empty((n_comb, n_boots))
for i, vals in enumerate(mod_values):
mod_dict = {k: v for k, v in zip(mod_symb, vals)}
e[i], be[i], se[i], llci[i], ulci[i] = self._indirect_effect_at(med_index, mod_dict)
return e, be, se, llci, ulci
def _simple_ind_effects(self):
"""
Generate the indirect effects.
This is done only if the indirect path from X to Y through M is not moderated.
If the option "total" is set to 1, then the total indirect effect is estimated.
If the option "contrast" is set to 1, then the pairwise contrasts between the different mediators are estimated.
:return: dict
A dictionary of lists "effect", "se", "llci", and "ulci".
"""
conf = self._options["conf"]
n_boots = self._options["boot"]
e = np.empty(self._n_meds)
be = np.empty((self._n_meds, n_boots))
for i in range(self._n_meds):
e[i], be[i], *_ = self._indirect_effect_at(i, {})
effects = []
se = []
llci, ulci = [], []
if self._options["total"]:
total_e = e.sum()
boot_total_e = be.sum(axis=0)
total_se = boot_total_e.std(ddof=1)
if self._options["percent"]:
total_ci = percentile_ci(boot_total_e, conf)
else:
total_ci = bias_corrected_ci(total_e, boot_total_e, conf)
effects.append(total_e)
se.append(total_se)
llci.append(total_ci[0])
ulci.append(total_ci[1])
for i in range(self._n_meds):
effects.append(e[i])
se.append(be[i].std(ddof=1))
if self._options["percent"]:
ci = percentile_ci(be[i], conf)
else:
ci = bias_corrected_ci(e[i], be[i], conf)
llci.append(ci[0])
ulci.append(ci[1])
if self._options["contrast"]:
inds = [i for i in range(self._n_meds)]
contrasts = combinations(inds, 2)
for i1, i2 in contrasts:
cont_e = e[i1] - e[i2]
boot_cont_e = be[i1] - be[i2]
cont_se = boot_cont_e.std(ddof=1)
if self._options["percent"]:
cont_ci = percentile_ci(boot_cont_e, conf)
else:
cont_ci = bias_corrected_ci(cont_e, boot_cont_e, conf)
effects.append(cont_e)
se.append(cont_se)
llci.append(cont_ci[0])
ulci.append(cont_ci[1])
statistics = [np.array(i).flatten() for i in [effects, se, llci, ulci]]
return {k: v for k, v in zip(["effect", "se", "llci", "ulci"], statistics)}
def _cond_ind_effects(self):
"""
Generate the conditional indirect effects for all mediators.
:return: dict
A dictionary "effect", "se", "llci", and "ulci" of (N_Meds x N_Comb) matrices, corresponding to the
statistics for the N_Meds mediators at the N_Comb different levels of the moderator(s).
"""
mod_values = [i for i in product(*self._moderators_values)]
mod_symb = self._moderators_symb
n_combs = len(mod_values)
effects, se, llci, ulci = np.empty((4, self._n_meds, n_combs))
for i in range(self._n_meds):
effects[i], _, se[i], llci[i], ulci[i] = self._get_conditional_indirect_effects(i, mod_symb, mod_values)
statistics = [i.flatten() for i in [effects, se, llci, ulci]]
return {k: v for k, v in zip(["effect", "se", "llci", "ulci"], statistics)}
def _PMM_index(self):
"""
The Partial Moderated Mediation (PMM) index is only computed when exactly two moderators are present on the
mediation path.
It represents the marginal impact of one moderator (i.e. the impact of an increase in one unit for this
moderator on the indirect effect), conditional on a value of zero for the other moderator.
"""
if "PMM" not in self._analysis_list:
raise ValueError("This model does not report the Index for Partial Moderated Mediation.")
conf = self._options["conf"]
n_boots = self._options["boot"]
mod1, mod2 = self._moderators_symb # Only two moderators
dict_baseline = dict([[mod1, 0], [mod2, 0]])
e_baseline, be_baseline = np.empty(self._n_meds), np.empty((self._n_meds, n_boots))
dict_mod1 = dict([[mod1, 1], [mod2, 0]])
e_mod1, be_mod1 = np.empty(self._n_meds), np.empty((self._n_meds, n_boots))
dict_mod2 = dict([[mod1, 0], [mod2, 1]])
e_mod2, be_mod2 = np.empty(self._n_meds), np.empty((self._n_meds, n_boots))
effects, se, llci, ulci = np.empty((4, 2, self._n_meds))
for i in range(self._n_meds):
e_baseline[i], be_baseline[i], *_ = self._indirect_effect_at(i, dict_baseline)
e_mod1[i], be_mod1[i], *_ = self._indirect_effect_at(i, dict_mod1)
e_mod2[i], be_mod2[i], *_ = self._indirect_effect_at(i, dict_mod2)
e_pmm1 = e_mod1[i] - e_baseline[i] # Moderator1 at 1 vs. Moderator1 at 0
e_pmm2 = e_mod2[i] - e_baseline[i] # Moderator2 at 1 vs. Moderator2 at 0
be_pmm1 = be_mod1[i] - be_baseline[i]
be_pmm2 = be_mod2[i] - be_baseline[i]
effects[0][i] = e_pmm1
se[0][i] = be_pmm1.std(ddof=1)
if self._options["percent"]:
llci[0][i], ulci[0][i] = percentile_ci(be_pmm1, conf)
else:
llci[0][i], ulci[0][i] = bias_corrected_ci(e_pmm1, be_pmm1, conf)
effects[1][i] = e_pmm2
se[1][i] = be_pmm2.std(ddof=1)
if self._options["percent"]:
llci[1][i], ulci[1][i] = percentile_ci(be_pmm2, conf)
else:
llci[1][i], ulci[1][i] = bias_corrected_ci(e_pmm2, be_pmm2, conf)
statistics = [i.flatten() for i in [effects, se, llci, ulci]]
return {k: v for k, v in zip(["effect", "se", "llci", "ulci"], statistics)}
def _MMM_index(self):
"""
"""
if "MMM" not in self._analysis_list:
raise ValueError("This model does not report the Index for Moderated Moderated Mediation.")
conf = self._options["conf"]
n_boots = self._options["boot"]
mod1, mod2 = self._moderators_symb # Only two moderators
dict_baseline = dict([[mod1, 1], [mod2, 1]])
e_baseline, be_baseline = np.empty(self._n_meds), np.empty((self._n_meds, n_boots))
dict_mod1 = dict([[mod1, 2], [mod2, 0]])
e_mod1, be_mod1 = np.empty(self._n_meds), np.empty((self._n_meds, n_boots))
dict_mod2 = dict([[mod1, 0], [mod2, 2]])
e_mod2, be_mod2 = np.empty(self._n_meds), np.empty((self._n_meds, n_boots))
effects, se, llci, ulci = np.empty((4, 1, self._n_meds))
for i in range(self._n_meds):
e_baseline[i], be_baseline[i], *_ = self._indirect_effect_at(i, dict_baseline)
e_mod1[i], be_mod1[i], *_ = self._indirect_effect_at(i, dict_mod1)
e_mod2[i], be_mod2[i], *_ = self._indirect_effect_at(i, dict_mod2)
e_pmm = e_baseline[i] - (e_mod1[i] + e_mod2[i]) / 2
be_pmm = be_mod1[i] - be_baseline[i]
effects[0][i] = e_pmm
se[0][i] = be_pmm.std(ddof=1)
if self._options["percent"]:
llci[0][i], ulci[0][i] = percentile_ci(be_pmm, conf)
else:
llci[0][i], ulci[0][i] = bias_corrected_ci(e_pmm, be_pmm, conf)
statistics = [i.flatten() for i in [effects, se, llci, ulci]]
return {k: v for k, v in zip(["effect", "se", "llci", "ulci"], statistics)}
def _CMM_index(self):
"""
"""
if "CMM" not in self._analysis_list:
raise ValueError("This model does not report the Index for Conditional Moderated Mediation.")
conf = self._options["conf"]
mod1, mod2 = self._moderators_symb
mod1_val, mod2_val = self._moderators_values
n_levels_mod1 = len(mod1_val)
n_levels_mod2 = len(mod2_val)
effects_mod1, se_mod1, llci_mod1, ulci_mod1 = np.empty((4, self._n_meds, n_levels_mod1))
effects_mod2, se_mod2, llci_mod2, ulci_mod2 = np.empty((4, self._n_meds, n_levels_mod2))
for i in range(self._n_meds):
for j, val in enumerate(mod1_val): # Conditional moderated mediation effects for Moderator 1
dict_off = dict([[mod1, val], [mod2, 0]])
dict_on = dict([[mod1, val], [mod2, 1]])
e_off, be_off, *_ = self._indirect_effect_at(i, dict_off)
e_on, be_on, *_ = self._indirect_effect_at(i, dict_on)
e_cmm = e_on - e_off
be_cmm = be_on - be_off
effects_mod1[i][j] = e_cmm
se_mod1[i][j] = be_cmm.std(ddof=1)
if self._options["percent"]:
llci_mod1[i][j], ulci_mod1[i][j] = percentile_ci(be_cmm, conf)
else:
llci_mod1[i][j], ulci_mod1[i][j] = bias_corrected_ci(e_cmm, be_cmm, conf)
for j, val in enumerate(mod2_val): # Conditional moderated mediation effects for Moderator 1
dict_off = dict([[mod1, val], [mod2, 0]])
dict_on = dict([[mod1, val], [mod2, 1]])
e_off, be_off, *_ = self._indirect_effect_at(i, dict_off)
e_on, be_on, *_ = self._indirect_effect_at(i, dict_on)
e_cmm = e_on - e_off
be_cmm = be_on - be_off
effects_mod2[i][j] = e_cmm
se_mod2[i][j] = be_cmm.std(ddof=1)
if self._options["percent"]:
llci_mod2[i][j], ulci_mod2[i][j] = percentile_ci(be_cmm, conf)
else:
llci_mod2[i][j], ulci_mod2[i][j] = bias_corrected_ci(e_cmm, be_cmm, conf)
stats_mod1 = [i.flatten() for i in [effects_mod1, se_mod1, llci_mod1, ulci_mod1]]
stats_mod2 = [i.flatten() for i in [effects_mod2, se_mod2, llci_mod2, ulci_mod2]]
statistics = np.concatenate([stats_mod1, stats_mod2], axis=1)
return {k: v for k, v in zip(["effect", "se", "llci", "ulci"], statistics)}
def _cond_ind_effects_wrapper(self):
"""
A wrapper for the conditional indirect effects.
:return: pd.DataFrame
A DataFrame of effects, se, llci, and ulci, for the conditional indirect effects.
"""
symb_to_var = self._symb_to_var
results = self.estimation_results
rows_stats = np.array([results["effect"], results["se"], results["llci"], results["ulci"]]).T
cols_stats = ["Effect", "Boot SE", "BootLLCI", "BootULCI"]
mod_values = self._moderators_values
med_values = [[symb_to_var.get('m{}'.format(i + 1), 'm{}'.format(i + 1)) for i in range(self._n_meds)]]
values = med_values + mod_values
rows_levels = np.array([i for i in product(*values)])
cols_levels = ["Mediator"] + [symb_to_var.get(x, x) for x in self._moderators_symb]
rows = np.concatenate([rows_levels, rows_stats], axis=1)
cols = cols_levels + cols_stats
df = pd.DataFrame(rows, columns=cols, index=[""] * rows.shape[0])
return df.apply(pd.to_numeric, args=["ignore"])
def _simple_ind_effects_wrapper(self):
"""
A wrapper for the indirect effects (and for total/contrast effects if specified)
:return: pd.DataFrame
A DataFrame of effects, se, llci, and ulci, for the simple/total/constrasts of indirect effects.
"""
symb_to_var = self._symb_to_var
results = self.estimation_results
rows_stats = np.array([results["effect"], results["se"], results["llci"], results["ulci"]]).T
med_names = [symb_to_var.get('m{}'.format(i + 1), 'm{}'.format(i + 1)) for i in range(self._n_meds)]
rows_levels = []
if self._options["total"]:
rows_levels += ["TOTAL"]
rows_levels += med_names
if self._options["contrast"]:
contrasts = ["Contrast: {} vs. {}".format(a, b) for a, b in combinations(med_names, 2)]
rows_levels += contrasts
rows_levels = np.array(rows_levels).reshape(-1, 1)
rows = np.concatenate([rows_levels, rows_stats], axis=1)
cols = ["", "Effect", "Boot SE", "BootLLCI", "BootULCI"]
df = pd.DataFrame(rows, columns=cols, index=[""] * rows.shape[0])
return df.apply(pd.to_numeric, args=["ignore"])
def _PMM_index_wrapper(self):
"""
A wrapper for the Partial Moderated Mediation index.
:return: pd.DataFrame
A DataFrame of effects, se, llci, and ulci, for the PMM index.
"""
symb_to_var = self._symb_to_var
results = self._PMM_index()
rows_stats = np.array([results["effect"], results["se"], results["llci"], results["ulci"]]).T
cols_stats = ["Index", "Boot SE", "LLCI", "ULCI"]
mod_names = [[symb_to_var.get(i, i) for i in self._moderators_symb]]
med_names = [[symb_to_var.get('m{}'.format(i + 1), 'm{}'.format(i + 1)) for i in range(self._n_meds)]]
values = mod_names + med_names
rows_levels = np.array([i for i in product(*values)])
cols_levels = ["Moderator", "Mediator"]
rows = np.concatenate([rows_levels, rows_stats], axis=1)
cols = cols_levels + cols_stats
df = pd.DataFrame(rows, columns=cols, index=[""] * rows.shape[0])
return df.apply(pd.to_numeric, args=["ignore"])
def _CMM_index_wrapper(self):
"""
A wrapper for the Conditional Moderated Mediation index.
:return: pd.DataFrame
A DataFrame of effects, se, llci, and ulci, for the CMM index.
"""
symb_to_var = self._symb_to_var
results = self._CMM_index()
rows_stats = np.array([results["effect"], results["se"], results["llci"], results["ulci"]]).T
cols_stats = ["Index", "Boot SE", "BootLLCI", "BootULCI"]
mod1_name, mod2_name = [symb_to_var.get(i, i) for i in self._moderators_symb]
mod1_values, mod2_values = self._moderators_values
med_names = [symb_to_var.get('m{}'.format(i + 1), 'm{}'.format(i + 1)) for i in range(self._n_meds)]
rows_modname = [mod2_name] * len(mod1_values) * self._n_meds + [mod1_name] * len(mod2_values) * self._n_meds
rows_modname = np.reshape(rows_modname, (-1, 1))
rows_medname = np.concatenate([np.repeat(med_names, len(mod1_values)), np.repeat(med_names, len(mod2_values))])
rows_medname = np.reshape(rows_medname, (-1, 1))
rows_modvalues = np.concatenate([np.tile(mod1_values, self._n_meds), np.tile(mod2_values, self._n_meds)])
rows_modvalues = np.reshape(rows_modvalues, (-1, 1))
cols_levels = ["Focal Mod", "Mediator", "Other Mod At"]
rows_levels = np.concatenate([rows_modname, rows_medname, rows_modvalues], axis=1)
rows = np.concatenate([rows_levels, rows_stats], axis=1)
cols = cols_levels + cols_stats
df = pd.DataFrame(rows, columns=cols, index=[""] * rows.shape[0])
return df.apply(pd.to_numeric, args=["ignore"])
def _MMM_index_wrapper(self):
"""
A wrapper for the Moderated Moderated Mediation index.
:return: pd.DataFrame
A DataFrame of effects, se, llci, and ulci, for the CMM index.
"""
symb_to_var = self._symb_to_var
results = self._MMM_index()
rows_stats = np.array([results["effect"], results["se"], results["llci"], results["ulci"]]).T
cols_stats = ["Index", "Boot SE", "BootLLCI", "BootULCI"]
med_names = [[symb_to_var.get('m{}'.format(i + 1), 'm{}'.format(i + 1)) for i in range(self._n_meds)]]
rows_levels = np.array([i for i in product(*med_names)])
cols_levels = ["Mediator"]
rows = np.concatenate([rows_levels, rows_stats], axis=1)
cols = cols_levels + cols_stats
df = pd.DataFrame(rows, columns=cols, index=[""] * rows.shape[0])
return df.apply(pd.to_numeric, args=["ignore"])
def MMM_index_summary(self):
if "MMM" in self._analysis_list:
return self._MMM_index_wrapper()
else:
raise NotImplementedError("This model does not reported the Moderated Moderated Mediation index.")
def PMM_index_summary(self):
if "PMM" in self._analysis_list:
return self._PMM_index_wrapper()
else:
raise NotImplementedError("This model does not reported the Partial Moderated Mediation index.")
def CMM_index_summary(self):
if "CMM" in self._analysis_list:
return self._CMM_index_wrapper()
else:
raise NotImplementedError("This model does not reported the Conditional Moderated Mediation index.")
def coeff_summary(self):
"""
Get the summary of the indirect effect(s).
:return: The appropriate moderated/unmoderated effect(s).
"""
return self._cond_ind_effects_wrapper() if self._has_moderation else self._simple_ind_effects_wrapper()
def summary(self):
"""
Pretty-print the summary with text. Used by Process to display the coefficients in a nicer way.
:return: A string to display.
"""
prec = self._options["precision"]
float_format = partial('{:.{prec}f}'.format, prec=prec)
analysis_func = {"PMM": ('PARTIAL MODERATED MEDIATION', self._PMM_index_wrapper),
"MMM": ('MODERATED MODERATED MEDIATION', self._MMM_index_wrapper),
"CMM": ('CONDITIONAL MODERATED MEDIATION', self._CMM_index_wrapper)}
symb_to_var = self._symb_to_var
if self._has_moderation:
basestr = "Conditional indirect effect(s) of {x} on {y} at values of the moderator(s):\n\n" \
"{coeffs}\n\n".format(x=symb_to_var["x"], y=symb_to_var["y"],
coeffs=self.coeff_summary().to_string(float_format=float_format))
else:
basestr = "Indirect effect of {x} on {y}:\n\n" \
"{coeffs}\n\n".format(x=symb_to_var["x"], y=symb_to_var["y"],
coeffs=self.coeff_summary().to_string(float_format=float_format))
for a in self._analysis_list:
name, get_results = analysis_func[a]
results = get_results()
basestr += "**************** INDEX OF {name} ******************\n\n" \
"{results}\n\n".format(name=name, results=results.to_string(float_format=float_format))
return basestr
def __str__(self):
return self.summary()
class BaseOutcomeModel(object):
"""
A statistical model reflecting the path from independent predictors (X, or X and M)
to an endogenous outcome (Y, or M).
"""
def __init__(self, data, endogvar, exogvars, symb_to_ind, symb_to_var, options=None):
"""
Instantiate the model.
:param data: np.array
A NxK array of data
:param endogvar: string
The name of the endogenous variable.
:param exogvars: list of strings
The names of the exogenous variables.
:param symb_to_ind: dict of int
A dictionary mapping variable symbols to indices.
:param symb_to_var: dict of strings
A dictionary mapping variable symbols to names.
:param options: dict
A dictionary of options.
"""
if options is None:
options = {}
self._data = data
self._endogvar = endogvar
self._exogvars = exogvars
self._symb_to_ind = symb_to_ind
self._symb_to_var = symb_to_var
if not options:
options = {}
self._options = options
endog_ind = self._symb_to_ind[self._endogvar]
exog_ind = [self._symb_to_ind[var] for var in self._exogvars]
self._endog = data[:, endog_ind]
self._exog = data[:, exog_ind]
self._n_obs = self._exog.shape[0]
self._n_vars = self._exog.shape[1]
self._varnames = [i for i in self._exogvars if (("*" not in i) & (i != "Cons"))]
self._derivative = self._gen_derivative(wrt="x")
self.estimation_results = self._estimate()
def _gen_derivative(self, wrt):
"""
Generate a symbolic derivative of the equation with respect to the variable 'wrt', and stores it in a matrix.
For instance (Model 21), we consider the equation aConstant + bX + cW + dX*W, that we derivate wrt to X:
* The rearranged equation for X is: 1*(aConstant + cW) + X*(b + dW).
* The derivative of this expression is: (b + dW), or in matrix form: [0, 1, 0, W] * [a, b, c, d]
The first vector depends on the value of the moderator W: therefore, it cannot be represented numerically.
Instead, we express derivative using the following technique:
* Each term in the equation (i.e. Constant, X, W, X*W) is represented by a row.
* Each variable is represented by a column.
* The column for X (the variable with respect to which the equation is derivated) is equal to 0 if the
term does not contain X, and 1 otherwise
* The other columns are equal to the variable if the term contains the variable, and to 1 otherwise.
That way, the product of the columns is equal to the value of each term in the derivative:
X W
[[ 0, 1 ], # Value of the Constant term : 0*1 = 0
[ 1, 1 ], # Value of X term : 1*1 = 1
[ 0, W ], # Value of the W term: 0*W = 0
[ 1, W ]] # Value of the X*W: 1*W = W
The advantage of this matrix is that it is a symbolic expression, in which we can substitute for the values of
the moderators, and then take the product of columns to obtain the numerical representation of the derivative
as a vector.
:return: A matrix of size (n_terms x n_vars)
"""
deriv = np.empty((len(self._varnames), len(self._exogvars)), dtype="object")
for i, var in enumerate(self._varnames):
if var == wrt:
deriv[i] = [1 if var in term else 0 for term in self._exogvars]
else:
deriv[i] = [var if var in term else 1 for term in self._exogvars]
return deriv.T
def coeff_summary(self):
"""
Get the estimates of the terms in the model.
:return: A DataFrame of betas, se, t (or z), p, llci, ulci for all variables of the model.
"""
results = self.estimation_results
if results:
if "t" in results.keys(): # Model has t-stats rather than z-stats
coeffs = np.array(
[results["betas"], results["se"], results["t"], results["p"], results["llci"], results["ulci"]]).T
df = pd.DataFrame(coeffs, index=results["names"],
columns=["coeff", "se", "t", "p", "LLCI", "ULCI"])
else: # Model has z-stats.
coeffs = np.array(
[results["betas"], results["se"], results["z"], results["p"], results["llci"], results["ulci"]]).T
df = pd.DataFrame(coeffs, index=results["names"],
columns=["coeff", "se", "Z", "p", "LLCI", "ULCI"])
else:
raise NotImplementedError(
"The model has not been estimated yet. Please estimate the model first."
)
return df
def _estimate(self):
pass
class OLSOutcomeModel(BaseOutcomeModel):
"""
An OLS subclass for OutcomeModels. Implement methods specific to the OLS estimation.
"""
def _estimate(self):
"""
Estimate the coefficients and statistics of the OLS model, and store the results in a dictionary of
estimation_results.
:return: self
"""
y = self._endog
x = self._exog
n_obs = self._n_obs
n_vars = self._n_vars
inv_xx = inv(dot(x.T, x))
xy = dot(x.T, y)
betas = dot(inv_xx, xy)
df_e = n_obs - n_vars
df_r = n_vars - 1
resid = y - dot(x, betas)
mse = (resid ** 2).sum() / df_e
sse = dot(resid.T, resid) / df_e
errortype = "standard" if self._options["hc3"] == 1 else "HC3"
if errortype == 'standard':
vcv = np.true_divide(1, n_obs - n_vars) * dot(resid.T, resid) * inv_xx
elif errortype == 'HC0':
sq_resid = (resid ** 2).squeeze()
vcv = dot(dot(dot(inv_xx, x.T) * sq_resid, x), inv_xx)
elif errortype == 'HC1':
sq_resid = (resid ** 2).squeeze()
vcv = np.true_divide(n_obs, n_obs - n_vars - 1) * \
dot(dot(dot(inv_xx, x.T) * sq_resid, x), inv_xx)
elif errortype == 'HC2':
sq_resid = (resid ** 2).squeeze()
H = (x.dot(inv_xx) * x).sum(axis=-1)
vcv = dot(dot(dot(inv_xx, x.T) * (sq_resid / (1 - H)), x), inv_xx)
elif errortype == 'HC3':
sq_resid = (resid ** 2).squeeze()
H = (x.dot(inv_xx) * x).sum(axis=-1)
vcv = dot(dot(dot(inv_xx, x.T) * (sq_resid / ((1 - H) ** 2)), x), inv_xx)
else:
raise ValueError("The covariance type {} is not supported. Please specify 'standard', 'HC0'"
"'HC1', 'HC2', or 'HC3".format(errortype))
betas = betas.squeeze()
se = np.sqrt(np.diagonal(vcv)).squeeze()
t = betas / se
p = stats.t.sf(np.abs(t), df_e) * 2
conf = self._options["conf"]
zscore = z_score(conf)
R2 = 1 - resid.var() / y.var()
adjR2 = 1 - (1 - R2) * ((n_obs - 1) / (n_obs - n_vars - 1))
F = (R2 / df_r) / ((1 - R2) / df_e)
F_pval = 1 - stats.f._cdf(F, df_r, df_e)
llci = betas - (se * zscore)
ulci = betas + (se * zscore)
names = [self._symb_to_var.get(x, x) for x in self._exogvars]
estimation_results = {"betas": betas,
"se": se,
"vcv": vcv,
"t": t,
"p": p,
"R2": R2,
"adjR2": adjR2,
"df_e": int(df_e),
"df_r": int(df_r),
"mse": mse,
"F": F,
"sse": sse,
"F_pval": F_pval,
"llci": llci,
"ulci": ulci,
"names": names,
"n": int(n_obs)}
return estimation_results
def model_summary(self):
"""
The summary of the model statistics: R², F-stats, etc...
:return: A DataFrame of model statistics
"""
results = self.estimation_results
stats = ["R2", "adjR2", "mse", "F", "df_r", "df_e", "F_pval"]
row = [[results[s] for s in stats]]
df = pd.DataFrame(row, index=[""], columns=["R²", "Adj. R²", "MSE", "F", "df1", "df2", "p-value"])
return df
def coeff_summary(self):
"""
The summary of the OLS estimates for the model: betas, se, t, p-values, etc...
:return: A DataFrame of coefficient statistics
"""
return super().coeff_summary()
def summary(self):
"""
Pretty-print the summary with text. Used by Process to display the model and coefficients in a nicer way.
:return: A string to display.
"""
prec = self._options["precision"]
float_format = partial('{:.{prec}f}'.format, prec=prec)
basestr = ("Outcome = {} \n"
"OLS Regression Summary\n\n{}\n\n"
"Coefficients\n\n{}".format(self._symb_to_var[self._endogvar],
self.model_summary().to_string(float_format=float_format),
self.coeff_summary().to_string(float_format=float_format)))
return basestr
def __str__(self):
return self.summary()
class LogitOutcomeModel(BaseOutcomeModel, BaseLogit):
"""
A Logit subclass for OutcomeModels. Implement methods specific to the Logistic estimation.
"""
def _estimate(self):
"""
Estimate the coefficients and statistics of the Logistic model, and store the results in a dictionary of
estimation_results.
:return: self
"""
betas = self._optimize()
vcv = inv(self._hessian(betas))
se = np.sqrt(np.diagonal(vcv)).squeeze()
z = betas / se
p = stats.norm.sf(np.abs(z)) * 2
conf = self._options["conf"]
zscore = z_score(conf)
llci = betas - (se * zscore)
ulci = betas + (se * zscore)
# GOF statistics
llmodel = self._loglike(betas)
lmodel = np.exp(llmodel)
minus2ll = -2 * llmodel
null_model = NullLogitModel(self._endog, self._options)
betas_null = null_model._optimize()
llnull = null_model._loglike(betas_null)
lnull = np.exp(llnull)
d = 2 * (llmodel - llnull)
pvalue = stats.chi2.sf(d, self._n_vars - 1)
mcfadden = 1 - llmodel / llnull
coxsnell = 1 - (lnull / lmodel) ** (2 / self._n_obs)
nagelkerke = coxsnell / (1 - lnull ** (2 / self._n_obs))
names = [self._symb_to_var.get(x, x) for x in self._exogvars]
estimation_results = {"betas": betas,
"se": se,
"vcv": vcv,
"z": z,
"p": p,
"llci": llci,
"ulci": ulci,
"mcfadden": mcfadden,
"coxsnell": coxsnell,
"nagelkerke": nagelkerke,
"d": d,
"minus2ll": minus2ll,
"pvalue": pvalue,
"n": int(self._n_obs),
"names": names}
return estimation_results
def model_summary(self):
"""
The summary of the model statistics: Model LL, pseudo R², etc...
:return: A DataFrame of model statistics
"""
results = self.estimation_results
row = [[results[i] for i in ["minus2ll", "d", "pvalue", "mcfadden", "coxsnell", "nagelkerke", "n"]]]
return pd.DataFrame(row, index=[""],
columns=["-2LL", "Model LL", "p-value", "McFadden", "Cox-Snell", "Nagelkerke", "n"])
def coeff_summary(self):
"""
The summary of the OLS estimates for the model: betas, se, t, p-values, etc...
:return: A DataFrame of coefficient statistics
"""
return super().coeff_summary()
def summary(self):
"""
Pretty-print the summary with text. Used by Process to display the model and coefficients in a nicer way.
:return: A string to display.
"""
prec = self._options["precision"]
float_format = partial('{:.{prec}f}'.format, prec=prec)
basestr = ("\n**************************************************************************\n"
"Outcome = {} \n"
"Logistic Regression Summary\n\n{}\n\n"
"Coefficients\n\n{}".format(self._symb_to_var[self._endogvar],
self.model_summary().to_string(float_format=float_format),
self.coeff_summary().to_string(float_format=float_format)))
return basestr
def __str__(self):
return self.summary()
class DirectEffectModel(object):
def __init__(self, model, mod_symb, spot_values, has_mediation, symb_to_var, options=None):
"""
A container for the direct effect of the variable X on the outcome Y. If the model includes one or several
moderators of X, this container returns the conditional direct effects.
:param model: process.OutcomeModel
The OutcomeModel object of the outcome Y.
:param mod_symb: list of string
The symbols of the moderators of the direct effect.
:param symb_to_var: dict of string
The dictionary mapping each symbol to a variable name.
:param options: dict
The options of the model.
"""
self._model = model
self._is_logit = isinstance(model, LogitOutcomeModel)
self._symb_to_var = symb_to_var
self._derivative = self._model._derivative
self._has_mediation = has_mediation
self._moderators_symb = mod_symb
self._moderators_values = [spot_values.get(i, [0]) for i in self._moderators_symb]
self._has_moderation = True if self._moderators_symb else False
if not options:
options = {}
self._options = options
self._estimation_results = self._estimate()
def _estimate(self):
"""
Estimates the direct effect of X on Y, and return the results into as a dictionary.
:return: dict
A dictionary of parameters and model estimates.
"""
mod_values = [i for i in product(*self._moderators_values)]
mod_symb = self._moderators_symb
betas, se, llci, ulci = self._get_conditional_direct_effects(mod_symb, mod_values)
t = betas / se
if self._is_logit:
p = stats.norm.sf(np.abs(t)) * 2
else:
df_e = self._model.estimation_results["df_e"]
p = stats.t.sf(np.abs(t), df_e) * 2
estimation_results = {"betas": betas,
"se": se,
"t": t,
"p": p,
"llci": llci,
"ulci": ulci}
return estimation_results
def _get_conditional_direct_effects(self, mod_symb, mod_values):
"""
Estimates the conditional direct effects of X on Y, at different values of the moderator(s)
:param mod_symb: list of string
A list of moderator symbols
:param mod_values: array of int/float
A list of lists of spotlight values for each moderator.
:return:
"""
betas, se, llci, ulci = np.zeros((4, len(mod_values)))
for i, val in enumerate(mod_values): # All possible products of level(s) of moderator(s)
mod_dict = {n: v for n, v in zip(mod_symb, val)}
betas[i], se[i], llci[i], ulci[i] = self._direct_effect_at(mod_dict)
return betas, se, llci, ulci
def _direct_effect_at(self, mod_dict):
"""
Compute the direct effect at specific value(s) of the moderator(s)
:param mod_dict: dict
None, or a mod_symb:mod_value dictionary of moderator values.
:return: e: scalar
Effect at the moderator values
se: scalar
Standard error
llci: scalar
Lower level of CI based on normal theory
ulci: scalar
Upper level of CI based on normal theory
"""
conf = self._options["conf"]
b = self._model.estimation_results["betas"]
vcv = self._model.estimation_results["vcv"]
deriv = self._derivative
grad = eval_expression(deriv, mod_dict) # Gradient at level(s) of the moderator(s)
betas = dot(grad, b) # Estimate is dot product of gradient and coefficients
var = dot(dot(grad, vcv), np.transpose(grad)) # V(Grad(X)) = Grad(X).V(X).Grad'(X)
se = np.sqrt(var)
zscore = z_score(conf)
llci = betas - (se * zscore)
ulci = betas + (se * zscore)
return betas, se, llci, ulci
def coeff_summary(self):
"""
The summary of the direct effect(s): betas, se, t, p-values, etc...
:return: pd.DataFrame
A DataFrame of coefficient statistics
"""
if self._estimation_results:
symb_to_var = self._symb_to_var
results = self._estimation_results
statistics = [results["betas"], results["se"], results["t"], results["p"], results["llci"],
results["ulci"]]
coeffs_rows = np.array([i.flatten() for i in statistics]).T
if self._is_logit:
coeffs_columns = ["Effect", "SE", "Z", "p", "LLCI", "ULCI"]
else:
coeffs_columns = ["Effect", "SE", "t", "p", "LLCI", "ULCI"]
mod_rows = np.array([i for i in product(*self._moderators_values)])
mod_columns = [symb_to_var.get(x, x) for x in self._moderators_symb]
rows = np.concatenate([mod_rows, coeffs_rows], axis=1)
columns = mod_columns + coeffs_columns
df = pd.DataFrame(rows, columns=columns, index=[""] * rows.shape[0])
return df
else:
raise NotImplementedError("The model has not been estimated yet. Please estimate the model first.")
def summary(self):
"""
Pretty-print the summary with text. Used by Process to display the coefficients in a nicer way.
:return: string
The text summary of the model.
"""
symb_to_var = self._symb_to_var
prec = self._options["precision"]
float_format = partial('{:.{prec}f}'.format, prec=prec)
if self._has_mediation:
if self._has_moderation:
basestr = "Conditional direct effect(s) of {x} on {y} at values of the moderator(s):\n\n" \
"{coeffs}\n".format(x=symb_to_var["x"], y=symb_to_var["y"],
coeffs=self.coeff_summary().to_string(float_format=float_format))
else:
basestr = "Direct effect of {x} on {y}:\n\n" \
"{coeffs}\n".format(x=symb_to_var["x"], y=symb_to_var["y"],
coeffs=self.coeff_summary().to_string(float_format=float_format))
else:
basestr = "Conditional effect(s) of {x} on {y} at values of the moderator(s):\n\n" \
"{coeffs}\n".format(x=symb_to_var["x"], y=symb_to_var["y"],
coeffs=self.coeff_summary().to_string(float_format=float_format))
return basestr
def __str__(self):
return self.summary()
``` |
{
"source": "joonsauce/sus-bot",
"score": 3
} |
#### File: joonsauce/sus-bot/redditAPI.py
```python
from setting import *
# sets parameters for using AsyncPRAW
reddit = asyncpraw.Reddit(
client_id = reddit_id,
client_secret = reddit_secret,
user_agent = reddit_agent,
)
# susmeme command; sends random meme from r/amongusmemes
@bot.command()
async def susmeme(ctx):
# sets the subreddit the bot pulls the information from, just change the string to the subreddit you want
subreddit = await reddit.subreddit("amongusmemes")
# gets a random post from the subreddit
image_link = await subreddit.random()
# makes bot verify if the post is stickied or nsfw and that the link is an image
if not image_link.stickied and image_link.over_18 is False \
and image_link.url.endswith(('jpg', 'jpeg', 'png')):
# sets embed as discord embed
embed = discord.Embed(
# sets color theme for embed
colour=discord.Colour.red()
)
# sets title of embed as the title of the post
embed.set_author(name=image_link.title)
# sets the image as the image linked with the random post
embed.set_image(url=image_link.url)
# makes bot send the embed as a message
await ctx.send(embed=embed)
# if verification fails, send out an error message; this may be changed to repeat until it works
else:
# makes bot send error message
await ctx.send("An error has occured, please try again later. Code: sbsusm_verificationFailed")
```
#### File: joonsauce/sus-bot/roll.py
```python
from setting import *
from roll_functions import *
# roll command; makes bot run a simulated gamble
@bot.command()
async def roll(ctx, *, msg=''):
if not msg:
await ctx.send("Please enter the amount of susCash you wish to use. Code: sbroll_nosusCashEntered")
else:
try:
bet = int(msg)
except ValueError:
await ctx.send("The amount of susCash you entered is not a number. Code: sbroll_wrongsusCash")
else:
if bet <= 0:
await ctx.send("Please enter a positive number to bet.")
else:
data = getRollData()
if data == -1:
await ctx.send("There has been an error. Please try again later. Code: sbroll_getRollData")
else:
user_there = findUser(str(ctx.author.id))
if user_there == -1:
await ctx.send("There has been an error. Please try again later. Code: sbroll_findUser")
elif user_there == -2:
response = addUser(str(ctx.author.id))
if response == -1:
await ctx.send("There has been an error. Please try again later. Code: sbroll_addUser")
else:
user_there = findUser(str(ctx.author.id))
if user_there == -1:
await ctx.send("There has been an error. Please try again later. Code: sbroll_findUser")
elif user_there == -2:
await ctx.send(
"There has been an error. Please try again later. Code: sbroll_unknownfindUser")
else:
data = getRollData()
if data == -1:
await ctx.send(
"There has been an error. Please try again later. Code: sbroll_getRollData")
else:
result = verifyResults(data, user_there, bet)
await ctx.send(result)
else:
result = verifyResults(data, user_there, bet)
await ctx.send(result)
# bal command; allows user to check how much susCash they have
@bot.command()
async def bal(ctx):
data = getRollData()
if data == -1:
await ctx.send("There has been an error. Please try again later. Code: sbbal_getRollData")
else:
user_there = findUser(str(ctx.author.id))
if user_there == -1:
await ctx.send("There has been an error. Please try again later. Code: sbbal_findUser")
elif user_there == -2:
response = addUser(str(ctx.author.id))
if response == -1:
await ctx.send("There has been an error. Please try again later. Code: sbbal_addUser")
else:
user_there = findUser(str(ctx.author.id))
if user_there == -1:
await ctx.send("There has been an error. Please try again later. Code: sbroll_findUser")
else:
pass
data = getRollData()
if data == -1:
await ctx.send("There has been an error. Please try again later. Code: sbroll_getRollData")
else:
total = int(data["records"][user_there]["fields"]["sus"])
await ctx.send("You currently have {0} susCash under your account.".format(total))
``` |
{
"source": "JoonseoKang/mcan-cap",
"score": 2
} |
#### File: JoonseoKang/mcan-cap/cal_sim.py
```python
from core.data.ans_punct import prep_ans
import numpy as np
import en_vectors_web_lg, random, re, json
import json
from core.data.data_utils import ques_load
import pandas as pd
from numpy import dot
from numpy.linalg import norm
import numpy as np
stat_ques_list = \
json.load(open('./datasets/caption/train_cap2.json', 'r'))['data'] + \
json.load(open('./datasets/caption/val_cap.json', 'r'))['data'] + \
json.load(open('./datasets/caption/test_cap.json', 'r'))['data']
def tokenize(stat_ques_list, use_glove):
token_to_ix = {
'PAD': 0,
'UNK': 1,
}
spacy_tool = None
pretrained_emb = []
if use_glove:
spacy_tool = en_vectors_web_lg.load()
pretrained_emb.append(spacy_tool('PAD').vector)
pretrained_emb.append(spacy_tool('UNK').vector)
for ques in stat_ques_list:
words = re.sub(
r"([.,'!?\"()*#:;])",
'',
ques['question'].lower()
).replace('-', ' ').replace('/', ' ').split()
for word in words:
if word not in token_to_ix:
token_to_ix[word] = len(token_to_ix)
if use_glove:
pretrained_emb.append(spacy_tool(word).vector)
for ques in stat_ques_list:
words = re.sub(
r"([.,'!?\"()*#:;])",
'',
ques['caption'].lower()
).replace('-', ' ').replace('/', ' ').split()
for word in words:
if word not in token_to_ix:
token_to_ix[word] = len(token_to_ix)
if use_glove:
pretrained_emb.append(spacy_tool(word).vector)
pretrained_emb = np.array(pretrained_emb)
return token_to_ix, pretrained_emb
token_to_ix, pretrained_emb = tokenize(stat_ques_list, True)
with open('datasets/caption/train_cap2.json') as train_cap:
train_cap = json.load(train_cap)
with open('datasets/caption/val_cap.json') as val_cap:
val_cap = json.load(val_cap)
with open('datasets/caption/test_cap.json') as test_cap:
test_cap = json.load(test_cap)
# df_train = pd.DataFrame(train_cap['data'])
# df_val = pd.DataFrame(val_cap['data'])
# df_test = pd.DataFrame(test_cap['data'])
from core.data.ans_punct import prep_ans
def txt2vec(sentence):
# s = sentence.split()
tt = []
new_i = re.sub(
r"([.,'!?\"()*#:;])",
'',
sentence.lower()
).replace('-', ' ').replace('/', ' ').split()
for i in new_i:
if i in token_to_ix:
num = token_to_ix[i]
tt.append(pretrained_emb[num])
else:
# num = token_to_ix['UNK']
tt.append(pretrained_emb[1])
return tt
# bundesbahn
# txt2vec('bundesbahn')
# token_to_ix['bundesbahn']
# txt2vec('junseo')
def cos_sim(A, B):
return np.matmul(A, np.transpose(B)) / (norm(A) * norm(B))
def word_sim(w1,w2): #word simiarity
s = 0.5 * (1+ cos_sim(w1,w2))
return s
# word_sim(pretrained_emb[1281], pretrained_emb[2154])
# token_to_ix['bad']
# token_to_ix['good']
# cos_sim(pretrained_emb[1073], pretrained_emb[168])
# word_sim(pretrained_emb[1073], pretrained_emb[168])
def sent_sim(sent1, sent2):
sent2vec1 = txt2vec(sent1) #question
sent2vec2 = txt2vec(sent2) #caption
global sent_similarity
sent_tmp = []
for i in sent2vec1:
vec_tmp = []
for j in sent2vec2:
tmp_sim = word_sim(i, j)
vec_tmp.append(tmp_sim)
sent_tmp.append(max(vec_tmp))
sent_similarity = sum(sent_tmp) / len(sent2vec1)
return sent_similarity
# sent_sim('is there a apple?', 'an apple is on the table')
# sent_sim('hello', 'hello')
# train_cap['data'][0]['question']
# train_cap['data'][0]['caption']
# sent_sim(train_cap['data'][0]['question'], train_cap['data'][0]['caption'])
# train_cap['data'][0]['similarity'] = sent_sim(train_cap['data'][0]['question'], train_cap['data'][0]['caption'])
######train_answer similarity########
# with open('./datasets/caption/train_qacap.json') as train_qacap:
# train_qacap = json.load(train_qacap)
#
# # def ans_stat(json_file):
# # ans_to_ix, ix_to_ans = json.load(open(json_file, 'r'))
# #
# # return ans_to_ix, ix_to_ans
#
# # ans_to_ix, ix_to_ans = ans_stat('./core/data/answer_dict.json')
#
# for i in train_qacap['data']:
# i['answer'] = prep_ans(i['multiple_choice_answer'])
#
# for i in train_qacap['data']:
# del i['multiple_choice_answer']
# # del i['index']
# with open('datasets/caption/train_qacap.json', 'w') as f:
# json.dump(train_qacap, f)
with open('./datasets/caption/val_qacap.json') as val_qacap:
val_qacap = json.load(val_qacap)
for i in val_qacap['data']:
i['q_similarity'] = sent_sim(i['question'], i['caption'])
for i in val_qacap['data']:
i['a_similarity'] = sent_sim(i['multiple_choice_answer'], i['caption'])
for i in val_qacap['data']:
i['total_similarity'] = (i['a_similarity'] + i['q_similarity'] ) / 2
with open('datasets/caption/val_qacap_sim.json', 'w') as f:
json.dump(val_qacap, f)
with open('./datasets/caption/train_qacap_sim.json') as train_qacap:
train_qacap = json.load(train_qacap)
df_sim = pd.DataFrame(val_qacap['data'])
# import matplotlib.pyplot as plt
# # plt.hist([df_sim['similarity'], df_val['similarity'], df_test['similarity']], label=['train', 'val', 'test'])
# plt.hist(df_sim['a_similarity'],color='blue', label='train', alpha=0.5)
# plt.hist(df_sim['q_similarity'],color='red', label='val', alpha=0.5)
# plt.hist(df_sim['total_similarity'], color='green', label='test', alpha=0.5)
# plt.legend(loc='upper right')
# plt.show()
df_sim2 = df_sim.sort_values(by='total_similarity',ascending=False)
df_sim2['total_similarity'].isnull().sum()
df_sim2.iloc[0]
del df_sim2['index']
del df_sim2['image_id']
del df_sim2['question']
del df_sim2['question_id']
del df_sim2['caption']
del df_sim2['multiple_choice_answer']
df_sim2.describe()
#####################################
for i in train_cap['data']:
i['q_similarity'] = sent_sim(i['question'], i['caption'])
for i in val_cap['data']:
i['similarity'] = sent_sim(i['question'], i['caption'])
for i in test_cap['data']:
i['similarity'] = sent_sim(i['question'], i['caption'])
for i in train_cap['data']:
del i['index']
for i in val_cap['data']:
del i['index']
for i in test_cap['data']:
del i['index']
with open('datasets/caption/train_cap.json', 'w') as f:
json.dump(train_cap, f)
with open('datasets/caption/val_cap.json', 'w') as f2:
json.dump(val_cap, f2)
with open('datasets/caption/test_cap.json', 'w') as f3:
json.dump(test_cap, f3)
########################################################################################################################
"""similarity distribution check"""
df_train = pd.DataFrame(train_cap['data'])
df_val = pd.DataFrame(val_cap['data'])
df_test = pd.DataFrame(test_cap['data'])
import matplotlib.pyplot as plt
plt.hist([df_train['similarity'], df_val['similarity'], df_test['similarity']], label=['train', 'val', 'test'])
# plt.hist(df_train['similarity'],color='blue', label='train', alpha=0.5)
# plt.hist(df_val['similarity'],color='red', label='val', alpha=0.5)
# plt.hist(df_test['similarity'], color='green', label='test', alpha=0.5)
plt.legend(loc='upper right')
plt.show()
########################################################################################################################
"""train+val"""
df_tv = pd.concat([df_train, df_val], ignore_index=True)
df_tv = df_tv.drop(['image_id', 'question_id'], axis='columns')
df_tv = df_tv.sort_values(by='similarity',ascending=False)
df_tv['similarity'].isnull().sum() #413개
df_tv = df_tv.fillna(0)
df_tv.describe() # q3 is 0.83 mean is 0.80 q1 is 0.77
"""similarity check"""
# df_t = df_train.drop(['image_id', 'question_id'], axis='columns')
# df_t.sort_values(by='similarity')
# df_t['similarity'].isnull().sum() #275개
# df_t = df_t.fillna(0)
# df_t = df_t.sort_values(by='similarity', ascending=False)
# df_t.iloc[0]
# df_t.iloc[15]
#
# df_t.describe()
# sent_sim('Where are they riding a skylift?', 'a man and a woman posing for a picture')
#
# txt2vec('skylift') 모두 0 인 matrix
# word_sim(txt2vec('skylift'), txt2vec('picture'))
########################################################################################################################
# from core.data.ans_punct import prep_ans
# import numpy as np
# import en_vectors_web_lg, random, re, json
# import json
# from core.data.data_utils import ques_load
# img_feat_path_list = []
# # split_list = __C.SPLIT[__C.RUN_MODE].split('+')
#
# stat_ques_list = \
# json.load(open('./datasets/vqa/v2_OpenEnded_mscoco_train2014_questions.json', 'r'))['questions'] + \
# json.load(open('./datasets/vqa/v2_OpenEnded_mscoco_val2014_questions.json', 'r'))['questions'] + \
# json.load(open('./datasets/vqa/v2_OpenEnded_mscoco_test2015_questions.json', 'r'))['questions']
#
#
# # qid_to_ques = ques_load(ques_list)
#
# def tokenize(stat_ques_list, use_glove):
# token_to_ix = {
# 'PAD': 0,
# 'UNK': 1,
# }
#
# spacy_tool = None
# pretrained_emb = []
# if use_glove:
# spacy_tool = en_vectors_web_lg.load()
# pretrained_emb.append(spacy_tool('PAD').vector)
# pretrained_emb.append(spacy_tool('UNK').vector)
#
# for ques in stat_ques_list:
# words = re.sub(
# r"([.,'!?\"()*#:;])",
# '',
# ques['question'].lower()
# ).replace('-', ' ').replace('/', ' ').split()
#
# for word in words:
# if word not in token_to_ix:
# token_to_ix[word] = len(token_to_ix)
# if use_glove:
# pretrained_emb.append(spacy_tool(word).vector)
#
# pretrained_emb = np.array(pretrained_emb)
#
# return token_to_ix, pretrained_emb
#
# token_to_ix, pretrained_emb = tokenize(stat_ques_list, True)
#
# token_to_ix
# len(pretrained_emb)
#
# stat_ques_list[1]
# words = re.sub(
# r"([.,'!?\"()*#:;])",
# '',
# stat_ques_list[2]['question'].lower()
# ).replace('-', ' ').replace('/', ' ').split()
#
# token_to_ix = {
# 'PAD': 0,
# 'UNK': 1,
# }
# spacy_tool = None
# pretrained_emb = []
# spacy_tool = en_vectors_web_lg.load()
# pretrained_emb.append(spacy_tool('PAD').vector)
# pretrained_emb.append(spacy_tool('UNK').vector)
#
# for word in words:
# if word not in token_to_ix:
# token_to_ix[word] = len(token_to_ix)
# pretrained_emb.append(spacy_tool(word).vector)
#
# pretrained_emb = np.array(pretrained_emb)
#
# tmp_cap ="a baseball player standing next to home plate"
#
# words_cap = re.sub(
# r"([.,'!?\"()*#:;])",
# '',
# tmp_cap.lower()
# ).replace('-', ' ').replace('/', ' ').split()
#
# token_to_ix_cap = {
# 'PAD': 0,
# 'UNK': 1,
# }
#
# pretrained_emb_cap = []
# pretrained_emb_cap.append(spacy_tool('PAD').vector)
# pretrained_emb_cap.append(spacy_tool('UNK').vector)
#
# for word in words_cap:
# if word not in token_to_ix_cap:
# token_to_ix_cap[word] = len(token_to_ix_cap)
# pretrained_emb_cap.append(spacy_tool(word).vector)
#
# pretrained_emb_cap = np.array(pretrained_emb_cap)
#
# pretrained_emb[2]
# pretrained_emb_cap[2]
##################################유사도 구하기############################################################################
# len(words)
# len(words_cap)
# len(pretrained_emb)
# len(pretrained_emb[2:])
#
#
# tt = []
# for i in pretrained_emb[2:]:
# tmp = []
# for j in pretrained_emb_cap[2:]:
# tmtm = word_sim(i,j)
# tmp.append(tmtm)
# print(max(tmp))
# tt.append(max(tmp))
# s = sum(tt)
# sim = s / len(pretrained_emb[2:])
#
# sim
##########################################################
# from core.data.ans_punct import prep_ans
# import numpy as np
# import en_vectors_web_lg, random, re, json
# import json
# from core.data.data_utils import ques_load
#
# stat_ques_list = \
# json.load(open('./datasets/caption/train_cap.json', 'r'))['data'] + \
# json.load(open('./datasets/caption/val_cap.json', 'r'))['data'] + \
# json.load(open('./datasets/caption/test_cap.json', 'r'))['data']
#
# def tokenize(stat_ques_list, use_glove):
# token_to_ix = {
# 'PAD': 0,
# 'UNK': 1,
# }
#
# spacy_tool = None
# pretrained_emb = []
# if use_glove:
# spacy_tool = en_vectors_web_lg.load()
# pretrained_emb.append(spacy_tool('PAD').vector)
# pretrained_emb.append(spacy_tool('UNK').vector)
#
# for ques in stat_ques_list:
# words = re.sub(
# r"([.,'!?\"()*#:;])",
# '',
# ques['question'].lower()
# ).replace('-', ' ').replace('/', ' ').split()
#
# for word in words:
# if word not in token_to_ix:
# token_to_ix[word] = len(token_to_ix)
# if use_glove:
# pretrained_emb.append(spacy_tool(word).vector)
# for ques in stat_ques_list:
# words = re.sub(
# r"([.,'!?\"()*#:;])",
# '',
# ques['caption'].lower()
# ).replace('-', ' ').replace('/', ' ').split()
#
# for word in words:
# if word not in token_to_ix:
# token_to_ix[word] = len(token_to_ix)
# if use_glove:
# pretrained_emb.append(spacy_tool(word).vector)
#
# pretrained_emb = np.array(pretrained_emb)
#
# return token_to_ix, pretrained_emb
#
# token_to_ix, pretrained_emb = tokenize(stat_ques_list, True)
#
# ###########################################################
# from numpy import dot
# from numpy.linalg import norm
# import numpy as np
#
# def cos_sim(A, B):
# return dot(A, B) / (norm(A) * norm(B))
#
# def word_sim(w1,w2): #word simiarity
# s = 0.5 * (1+ cos_sim(w1,w2))
# return s
#
#
# def txt2vec(sentence):
# s = sentence.split()
# tt = []
# for i in s:
# new_i = re.sub(
# r"([.,'!?\"()*#:;])",
# '',
# i.lower()
# )
# num = token_to_ix[new_i]
# tt.append(pretrained_emb[num])
# return tt
#
# def sent_sim(ss1, ss2): #sentence simiarity
# s1 = txt2vec(ss1)
# s2 = txt2vec(ss2)
# t = []
# for i in s1[2:]: #question 0,1 are PAD, UNK
# tmp = []
# for j in s2[2:]: #caption
# tmp_sim = word_sim(i,j)
# tmp.append(tmp_sim)
# t.append(max(tmp))
# sent_sim = sum(t) / len(s1[2:])
# return sent_sim
#
#
#
# sent = 'i like a girl'
# s = sent.split()
# s[0]
# token_to_ix[s[0]]
# pretrained_emb[token_to_ix[s[0]]]
# a = txt2vec('i like a girl')
# b = txt2vec('a girl is standing')
``` |
{
"source": "joonseok-kim/simplification",
"score": 3
} |
#### File: simplification/prism/ring.py
```python
from prism.segment import Segment
__all__ = ['Ring']
class Ring:
def __init__(self, coordinates):
self._segments = []
for i in range(len(coordinates)-1):
self._segments.append(Segment(coordinates[i], coordinates[i + 1]))
for i in range(len(self._segments)-1):
self._segments[i].prev_seg = self._segments[i - 1]
self._segments[i].next_seg = self._segments[i + 1]
self._segments[-1].prev_seg = self._segments[-2]
self._segments[-1].next_seg = self._segments[0]
@property
def segments(self):
return self._segments
def merge(self, seg):
"""
Merge the segment with next segment.
:param seg: segment to merge
:return: None
"""
if seg in self.segments:
seg.prev_seg.next_seg = seg.next_seg
seg.next_seg.prev_seg = seg.prev_seg
seg.next_seg.sp = seg.prev_seg.ep
self.segments.remove(seg)
def update(self, seg, sp, ep):
"""
Update the segment with new start and end points.
:param seg: segment to update
:param sp: start point
:param ep: end point
:return: new segment
"""
if seg in self.segments:
seg.sp = sp
seg.ep = ep
seg.prev_seg.ep = seg.sp
seg.next_seg.sp = seg.ep
return seg
def remove(self, seg, q):
"""
Remove the segment and join two neighboring segments on q.
:param seg: segment to remove
:param q: new point
:return: None
"""
if seg in self.segments:
seg.prev_seg.next_seg = seg.next_seg
seg.next_seg.prev_seg = seg.prev_seg
seg.prev_seg.ep = q
seg.next_seg.sp = q
self.segments.remove(seg)
def __getitem__(self, index):
return self.segments[index]
def __len__(self):
return len(self.segments)
def __repr__(self):
_mgs = ''
for i in range(len(self.segments)):
_mgs += str(self.segments[i]) + ',' if i < len(self.segments) else str(self.segments[i])
return _mgs
@property
def coordinates(self):
_coordinates = []
for seg in self.segments:
_coordinates.append(seg.sp)
_coordinates.append(self.segments[-1].ep)
return _coordinates
```
#### File: simplification/prism/simplify.py
```python
from heapq import heappush, heappop, heapify
import math
from math import pi, isinf
from prism.ring import Ring
from shapely.geometry import LinearRing
from shapely.geometry import LineString
from shapely.geometry import Point
from shapely.geometry import Polygon
from shapely.wkt import loads
__all__ = ['simplify', 'simplify_ring']
# internal use for debugging
_debug_mode = False
def simplify(polygon, tau=1, epsilon=pi/36, delta=pi/180, gamma=None, merge_first=False):
# type: (Polygon, float, float, float, float, bool) -> Polygon
"""
Returns a simplified polygon using a simplification method considering to preserve important spatial properties.
:param polygon: polygon to simplify
:param tau: tolerance distance
:param epsilon: tolerance angle
:param delta: angle threshold used to determine if consecutive segments are collinear
:param gamma: distance threshold used to determine whether to join neighboring segments
:param merge_first: condition whether or not it merges neighbors first when possible
:return: a simplified polygon
"""
exterior = simplify_ring(polygon.exterior, tau, epsilon, delta, gamma, merge_first)
interiors = []
for ring in polygon.interiors:
interiors.append(simplify_ring(ring, tau, epsilon, delta, gamma, merge_first))
if exterior is None:
return None
return Polygon(exterior, interiors)
def simplify_ring(linear_ring, tau=1, epsilon=pi/36, delta=pi/180, gamma=None, merge_first=False):
# type: (LinearRing, float, float, float, float, bool) -> LinearRing
"""
Returns a simplified ring using a simplification method considering to preserve important spatial properties.
:param linear_ring: ring to simplify
:param tau: tolerance distance
:param epsilon: tolerance angle
:param delta: angle threshold used to determine if consecutive segments are collinear
:param gamma: distance threshold used to determine whether to join neighboring segments
:param merge_first: condition whether or not it merges neighbors first when possible
:return: a simplified ring
"""
# Initialize a priority queue
queue = []
push = heappush
pop = heappop
# deep copy from the linear ring
_coordinates = []
for coord in linear_ring.coords:
_x, _y = coord
_coordinates.append((_x, _y))
ring = Ring(_coordinates)
def remove_from_queue(seg):
"""
Remove a segment from the queue.
:param seg: segment to remove from the queue
:return: None
"""
try:
queue.remove(seg)
heapify(queue) # it is required to keep the heap structure
except ValueError:
# it is possible because some segment can be removed from the queue
pass
def enqueue(seg):
"""
Enqueue a segment. If the segment exists in the queue, the enqueuing is ignored.
:param seg: segment to enqueue
:return: None
"""
if seg in queue:
# avoid adding the existing segment in the queue
pass
else:
push(queue, seg)
# Enqueue all segments
for line_segment in ring:
enqueue(line_segment)
def remove_middle_point(seg):
"""
Remove the middle point between a segment and its next segment
:param seg: segment
:return: None
"""
remove_from_queue(seg.next_seg)
ring.merge(seg)
enqueue(seg.next_seg)
if _debug_mode:
print('remove_middle_point:', seg.next_seg)
def project(px, py, x, y, tan):
"""
Return a point projected from (px,py) on the line that passes through (x, y) with the tangent.
:param px: x coordinate of a point to project
:param py: y coordinate of a point to project
:param x: x coordinate of a line
:param y: y coordinate of a line
:param tan: tangent of a line
:return: the projected point
"""
if tan == 0:
new_x = px
new_y = y
elif isinf(tan):
new_x = x
new_y = py
else:
cot = 1.0 / tan
new_x = (px + tan * tan * x + tan * (py - y)) / (1 + tan * tan)
new_y = (py + cot * cot * y + cot * (px - x)) / (1 + cot * cot)
return new_x, new_y
def intersection2(seg, x, y, tan):
"""
Returns intersection of one line extending from a segment
with the line that passes through (x, y) with the tangent.
:param seg: segment
:param x: x coordinate of a line
:param y: y coordinate of a line
:param tan: tangent of a line
:return: intersection point. If two lines are parallel, returns None.
"""
s1 = seg.sp
e1 = seg.ep
s2 = (x, y)
e2 = (x + 1, y + tan)
a1 = e1[1] - s1[1]
b1 = s1[0] - e1[0]
c1 = a1 * s1[0] + b1 * s1[1]
a2 = e2[1] - s2[1]
b2 = s2[0] - e2[0]
c2 = a2 * s2[0] + b2 * s2[1]
dt = a1 * b2 - a2 * b1
if dt == 0:
return None
new_x = (b2 * c1 - b1 * c2) / dt
new_y = (a1 * c2 - a2 * c1) / dt
return new_x, new_y
def intersection(a, b):
"""
Returns intersection of two lines extending from two segments.
:param a: segment a
:param b: segment b
:return: intersection point. If two lines are parallel, returns None.
"""
t = (a.sp[0] - a.ep[0]) * (b.sp[1] - b.ep[1]) - (a.sp[1] - a.ep[1]) * (b.sp[0] - b.ep[0])
x = (a.sp[0] * a.ep[1] - a.sp[1] * a.ep[0]) * (b.sp[0] - b.ep[0]) - (a.sp[0] - a.ep[0]) * \
(b.sp[0] * b.ep[1] - b.sp[1] * b.ep[0])
y = (a.sp[0] * a.ep[1] - a.sp[1] * a.ep[0]) * (b.sp[1] - b.ep[1]) - (a.sp[1] - a.ep[1]) * \
(b.sp[0] * b.ep[1] - b.sp[1] * b.ep[0])
if t == 0:
# if two lines are parallel
return None
return x / t, y / t
def conditional_segment_regression(seg):
"""
Remove a middle point if the merge_first flag is set and it is appropriate.
Otherwise, find a segment to consider both length and angle of the previous and next segments.
:param seg: segment to regress
:return: None
"""
if merge_first:
if seg.prev_seg.length() < tau and seg.next_seg.length() < tau:
if seg.prev_seg.length() < seg.next_seg.length():
if LineString([seg.prev_seg.sp, seg.next_seg.sp]).length < tau:
remove_from_queue(seg.prev_seg)
remove_middle_point(seg.prev_seg)
return
else:
if LineString([seg.prev_seg.ep, seg.next_seg.ep]).length < tau:
remove_middle_point(seg)
return
segment_regression(seg)
def segment_regression(seg):
"""
Find a segment to consider both length and angle of the previous and next segments.
:param seg: segment to regress
:return: None
"""
remove_from_queue(seg.prev_seg)
remove_from_queue(seg.next_seg)
ratio = seg.prev_seg.length()/(seg.prev_seg.length() + seg.next_seg.length())
line = LineString([seg.sp, seg.ep])
p = line.interpolate(ratio, normalized=True)
a1 = seg.prev_seg.slope_as_angle()
a2 = seg.next_seg.slope_as_angle()
if abs(a1-a2) > math.pi:
if a1 > a2:
a2 += math.pi * 2
else:
a1 += math.pi * 2
angle = a1 * ratio + a2 * (1 - ratio)
angle = angle if angle <= 2 * math.pi else angle - (2 * math.pi)
theta = math.tan(angle)
prev2 = seg.prev_seg.prev_seg
next2 = seg.next_seg.next_seg
# Intersection of the previous of the previous segment with the line through p with slope theta.
q1 = intersection2(prev2, p.xy[0][0], p.xy[1][0], theta)
if q1 is None or LineString([prev2.sp, prev2.ep]).distance(Point(q1)) > seg.length():
# Intersection of the previous segment with the line through p with slope theta if q1 is too far.
q1 = project(seg.prev_seg.sp[0], seg.prev_seg.sp[1], p.xy[0][0], p.xy[1][0], theta)
# Intersection of the next of the next segment with the line through p with slope theta.
q2 = intersection2(next2, p.xy[0][0], p.xy[1][0], theta)
if q2 is None or LineString([next2.sp, next2.ep]).distance(Point(q2)) > seg.length():
# Intersection of the next segment with the line through p with slope theta if q2 is too far.
q2 = project(seg.next_seg.ep[0], seg.next_seg.ep[1], p.xy[0][0], p.xy[1][0], theta)
# update the segment with new two points
seg = ring.update(seg, q1, q2)
enqueue(seg.prev_seg)
enqueue(seg)
enqueue(seg.next_seg)
if _debug_mode:
print('regression:', p.coords.xy, theta, seg, q1, q2)
def join_segment(seg, p):
"""
Remove a segment and join the previous and next segments with point p
:param seg: target segment
:param p: join point
:return: None
"""
remove_from_queue(seg.prev_seg)
remove_from_queue(seg.next_seg)
ring.remove(seg, p)
enqueue(seg.prev_seg)
enqueue(seg.next_seg)
if _debug_mode:
print('join:', p)
def translate_segment(seg):
"""
Translate segments depending on the length of the previous and next segments
:param seg: target segment
:return: None
"""
remove_from_queue(seg.prev_seg)
remove_from_queue(seg.next_seg)
prev_length = seg.prev_seg.length()
next_length = seg.next_seg.length()
p = 'same length'
if prev_length < next_length:
p = seg.ep[0] - (seg.prev_seg.ep[0] - seg.prev_seg.sp[0]), seg.ep[1] - \
(seg.prev_seg.ep[1] - seg.prev_seg.sp[1])
ring.update(seg, seg.prev_seg.sp, p)
ring.update(seg.next_seg, p, seg.next_seg.ep)
ring.remove(seg.prev_seg, seg.sp)
enqueue(seg)
enqueue(seg.next_seg)
elif prev_length > next_length:
p = seg.sp[0] + (seg.next_seg.ep[0] - seg.next_seg.sp[0]), seg.sp[1] + \
(seg.next_seg.ep[1] - seg.next_seg.sp[1])
ring.update(seg.prev_seg, seg.prev_seg.sp, p)
ring.update(seg, p, seg.next_seg.ep)
ring.remove(seg.next_seg, seg.ep)
enqueue(seg)
enqueue(seg.prev_seg)
else:
ring.update(seg, seg.prev_seg.sp, seg.next_seg.ep)
ring.remove(seg.next_seg, seg.ep)
ring.remove(seg.prev_seg, seg.sp)
enqueue(seg)
if _debug_mode:
print('translate:', p)
# main iteration for simplification
while len(queue) > 0 and len(ring) >= 3:
s = pop(queue) # de-queue the next segment
if _debug_mode:
print('de-queue:', len(queue), s.length(), s, s.angle())
dirty = True # flag used to check if the ring changes
if pi - delta < s.angle() < pi + delta:
# if two segments are approximately collinear.
remove_middle_point(s)
elif s.length() <= tau:
_a1 = s.prev_seg.slope_as_angle()
_a2 = s.next_seg.slope_as_angle()
if abs(_a1 - _a2) > math.pi:
if _a1 > _a2:
_a2 += math.pi * 2
else:
_a1 += math.pi * 2
alpha = abs(_a1 - _a2)
alpha = min(alpha, abs(alpha - pi*2))
if 0 <= alpha <= epsilon:
conditional_segment_regression(s)
elif pi - alpha <= epsilon:
translate_segment(s)
else:
# Intersection of two lines obtained by extending the previous and next segments
q = intersection(s.prev_seg, s.next_seg)
_gamma = s.length() if gamma is None else gamma
_gamma = min(_gamma, tau)
if q is not None and LineString([s.sp, s.ep]).distance(Point(q)) <= _gamma:
join_segment(s, q)
elif s.prev_seg.length() < s.next_seg.length():
remove_from_queue(s.prev_seg)
remove_middle_point(s.prev_seg)
else:
remove_middle_point(s)
else:
dirty = False
if _debug_mode:
# print(queue)
if dirty:
print(Polygon(ring.coordinates).wkt)
if len(ring.coordinates) < 3:
return None
return LinearRing(ring.coordinates)
def _test():
"""
Test simplification with a simple polygon.
:return:
"""
polygon = loads('POLYGON ((0 0, 2 0, 2 -1.1, 2.1 -1.1, 2.1 0, 4 0, 1 1.0001, 0 2, -1 1, -1 0.99, -2 0, 0 0))')
print(polygon.wkt)
new_polygon = simplify(polygon)
print(new_polygon.wkt)
hausdorff = polygon.hausdorff_distance(new_polygon)
print('Hausdorff Distance', hausdorff)
union = polygon.union(new_polygon)
intersection = polygon.intersection(new_polygon)
area_ratio = intersection.area/union.area
print('Jaccard Index', area_ratio)
if __name__ == '__main__':
_debug_mode = False
_test()
``` |
{
"source": "JoonSeongLee/dss7-coupon",
"score": 2
} |
#### File: dss7-coupon/jw/custom_ml.py
```python
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import numpy as np
import scipy as sp
import pandas as pd
import statsmodels.api as sm
import statsmodels.formula.api as smf
import statsmodels.stats.api as sms
import sklearn as sk
import matplotlib as mpl
import matplotlib.pylab as plt
import matplotlib.font_manager as fm
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
sns.set()
sns.set_style("whitegrid")
sns.set_color_codes()
# Model
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import Perceptron
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn import svm
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
# preprocss with etc
from sklearn.model_selection import train_test_split
from sklearn.metrics import *
from sklearn.preprocessing import label_binarize
from sklearn.multiclass import OneVsRestClassifier
from scipy import interp
def df_to_nparray():
return 0
def cls_conf(X, y, model, test_ratio=0.5):
'''
a, b = cls_conf(X,y,GaussianNB(),0.5)
a, b = cls_conf(X,y,QuadraticDiscriminantAnalysis(store_covariance=True),0.5)
a, b = cls_conf(X,y,LinearDiscriminantAnalysis(n_components=3,
solver="svd", store_covariance=True),0.5)
a, b = cls_conf(X,y,LogisticRegression(), 0.5)
a, b = cls_conf(X,y,SVC(gamma=0.0026, C=10), 0.5)
'''
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_ratio,
random_state=0)
model = model.fit(X_train, y_train)
y_pred = model.predict(X_test)
return confusion_matrix(y_test, y_pred), classification_report(y_test, y_pred)
def bin_roc(X,y,model,test_ratio=0.5):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_ratio,
random_state=0)
classifier = model.fit(X_train, y_train)
fpr, tpr, threshold = roc_curve(y_test, classifier.decision_function(X_test))
plt.figure(figsize=(10, 10))
lw = 2
plt.plot(fpr, tpr, label = 'auc area = {}'.format(auc(fpr,tpr)))
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([-0.1, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate',fontsize=20)
plt.ylabel('True Positive Rate',fontsize=20)
plt.title('ROC',fontsize=20)
plt.legend(loc='lower right',fontsize=20)
plt.show()
def multi_roc(X, y, model, test_ratio=0.5):
y = label_binarize(y, classes=np.unique(y))
n_classes = y.shape[1]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_ratio,
random_state=0)
ovr = OneVsRestClassifier(model)
y_score = ovr.fit(X_train, y_train).predict_proba(X_test)
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
plt.figure(figsize=(10, 10))
lw = 2
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], lw=lw,
label='class {} (auc area = {})'.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([-0.1, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate',fontsize=20)
plt.ylabel('True Positive Rate',fontsize=20)
plt.title('ROC',fontsize=20)
plt.legend(loc='lower right',fontsize=20)
plt.show()
``` |
{
"source": "Joonsey/jae-website",
"score": 3
} |
#### File: Joonsey/jae-website/server.py
```python
from flask import Flask, render_template, make_response, request
import os
from fetchDB import fetchAllData
app = Flask(__name__, static_folder=os.path.abspath('static'))
data = fetchAllData()
@app.route('/')
def index():
return render_template('index.html')
@app.route('/rev')
def news():
return data[0].headerText + " " + data[0].contentText
@app.route('/contact')
def contact():
return render_template('contact.html')
@app.route('/about')
def about():
return render_template('about.html')
if __name__ == '__main__':
app.run(debug=True, port=80)
``` |
{
"source": "joon-solutions/airbyte",
"score": 2
} |
#### File: source-emarsys/source_emarsys/streams.py
```python
import base64
import hashlib
import os
import re
from abc import ABC
from binascii import hexlify
from datetime import datetime
from typing import Any, Iterable, List, Mapping, MutableMapping, Optional, Tuple
from urllib.parse import parse_qs, urljoin, urlparse
import requests
from airbyte_cdk.models.airbyte_protocol import SyncMode
from airbyte_cdk.sources.streams.http import HttpStream
from requests import models
from requests.auth import AuthBase
PATTERN_ALL = "^.*$"
class EmarsysAuthenticator(AuthBase):
def __init__(self, username, password) -> None:
self.username = username
self._password = password
def _get_wsse(self):
"""Create X-WSSE header value from username & password.
Returns:
str: Header value.
"""
nonce = hexlify(os.urandom(16)).decode("utf-8")
created = datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S+00:00")
sha1 = hashlib.sha1(str.encode(nonce + created + self._password)).hexdigest()
password_digest = bytes.decode(base64.b64encode(str.encode(sha1)))
return ('UsernameToken Username="{}", ' + 'PasswordDigest="{}", Nonce="{}", Created="{}"').format(
self.username, password_digest, nonce, created
)
def __call__(self, r: models.PreparedRequest) -> models.PreparedRequest:
r.headers["X-WSSE"] = self._get_wsse()
return r
class EmarsysStream(HttpStream, ABC):
@property
def url_base(self) -> str:
return "https://api.emarsys.net/api/v2/"
def request_headers(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> Mapping[str, Any]:
"""
Default headers.
"""
return {"Accept": "application/json", "Content-Type": "application/json"}
def backoff_time(self, response: requests.Response) -> Optional[float]:
headers = response.headers
try:
reset_ts = datetime.utcfromtimestamp((int(headers.get("X-Ratelimit-Reset"))))
current_ts = datetime.utcnow()
if reset_ts >= current_ts:
# Pause at least 1 second
pause_secs = max((reset_ts - current_ts).total_seconds(), 1)
self.logger.info("Delay API call for %s seconds", pause_secs)
return pause_secs
except ValueError:
self.logger.warning("Could not parse X-Ratelimit-Reset timestamp. Fallback to exponential backoff.")
return None
return 1
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
"""
Parse data from response
:return an iterable containing each record in the response
"""
data = response.json().get("data", [])
yield from data
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None
class PaginatedEmarsysStream(EmarsysStream):
def __init__(self, authenticator=None, limit=10000):
super().__init__(authenticator)
self.limit = limit
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
"""
Return next offset if returned response contains data.
:param response: the most recent response from the API
:return Return None if there is no data in the response; elsewise, return last offset + # data records
"""
data = response.json().get("data", [])
if not data:
return None
queries = parse_qs(urlparse(response.request.url).query)
offset = int(queries.get("offset", [0])[0]) + len(data)
self.logger.info("Next offset: %s", offset)
return {"offset": offset}
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
"""
Override this method to define the query parameters that should be set on an outgoing HTTP request given the inputs.
E.g: you might want to define query parameters for paging if next_page_token is not None.
"""
params = {"offset": 0}
if next_page_token:
params["offset"] = next_page_token.get("offset", 0)
if self.limit > 0:
params["limit"] = self.limit
return params
class Fields(EmarsysStream):
primary_key = "id"
def path(
self,
*,
stream_state: Mapping[str, Any] = None,
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> str:
return "field"
class ContactLists(EmarsysStream):
primary_key = "id"
def __init__(self, pattern_list_text: str, **kwargs):
super().__init__(**kwargs)
self.pattern_list = pattern_list_text.split(",") if pattern_list_text else [PATTERN_ALL]
def path(
self,
*,
stream_state: Mapping[str, Any] = None,
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> str:
return "contactlist"
def use_cache(self):
return True
def _match_record_by_pattern_list(self,record_list):
matched_name_set = set()
for pattern in self.pattern_list:
self.logger.info(f"Match pattern {pattern}")
compiled_pattern = re.compile(pattern)
matched = {record["name"] for record in record_list if compiled_pattern.match(record["name"])}
matched_name_set.update(matched)
matched_record_list = [record for record in record_list if record["name"] in matched_name_set]
self.logger.info(f"Found {len(matched_record_list)} matched records")
return matched_record_list
def parse_response(self, response: requests.Response, **kwargs) -> Iterable[Mapping]:
"""
Parse data from response
:return an iterable containing each record in the response
"""
data = response.json().get("data", [])
if data:
matched_data = self._match_record_by_pattern_list(data)
# matched_data = data
yield from matched_data
class Segments(EmarsysStream):
primary_key = "id"
def path(
self,
*,
stream_state: Mapping[str, Any] = None,
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> str:
return "filter"
class SubPaginatedEmarsysStream(PaginatedEmarsysStream):
def __init__(self, parent: HttpStream, **kwargs):
"""
:param parent: should be the instance of HttpStream class
"""
super().__init__(**kwargs)
self.parent = parent
def stream_slices(
self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None
) -> Iterable[Optional[Mapping[str, Any]]]:
parent_stream_slices = self.parent.stream_slices(
sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_state=stream_state
)
# iterate over all parent stream_slices
for stream_slice in parent_stream_slices:
parent_records = self.parent.read_records(
sync_mode=SyncMode.full_refresh,
cursor_field=cursor_field,
stream_slice=stream_slice,
stream_state=stream_state,
)
# iterate over all parent records with current stream_slice
for index, record in enumerate(parent_records):
self.logger.info("Start slice #%s: %s", index + 1, record)
yield {"parent": record}
self.logger.info("Finished slice #%s: %s", index + 1, record)
class ContactListMemberships(SubPaginatedEmarsysStream):
primary_key = ["contact_list_id", "id"]
def path(
self,
*,
stream_state: Mapping[str, Any] = None,
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> str:
return f"contactlist/{stream_slice['parent']['id']}/"
def parse_response(
self,
response: requests.Response,
*,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> Iterable[Mapping]:
contact_list_id = stream_slice["parent"]["id"]
data = response.json()["data"]
for contact_id in data:
yield {"id": contact_id, "contact_list_id": contact_list_id}
class Contacts(SubPaginatedEmarsysStream):
primary_key = "id"
def __init__(self, parent: HttpStream, fields: List, recur_list_patterns=None, **kwargs):
super().__init__(parent, **kwargs)
self.field_string_ids = fields
self._field_dict = None
self._field_string_2_id = None
self.yielded_contact_ids = set()
self.recur_list_patterns = recur_list_patterns or []
@property
def field_dict(self):
if not self._field_dict:
self._field_dict, self._field_string_2_id = self._build_field_mapping()
return self._field_dict
@property
def field_string_2_id(self):
if not self._field_string_2_id:
self._field_dict, self._field_string_2_id = self._build_field_mapping()
return self._field_string_2_id
def _filter_recur_lists(self, records: List[Mapping[str, Any]]) -> List[Mapping[str, Any]]:
"""Filter any recurring contact list record that matchs pattern and is not the latest.
Args:
records (List[Mapping[str, Any]]): List of records
Returns:
List[Mapping[str, Any]]: List of records after filtering
"""
no_recurs = []
recurs = {}
for record in records:
matched = False
for pattern in self.recur_list_patterns:
# Use only the latest list if name matchs pattern
if re.match(pattern, record["name"]):
matched = True
match_list = recurs.setdefault(pattern, [])
match_list.append(record)
if not matched:
no_recurs.append(record)
for pattern, match_list in recurs.items():
match_list.sort(key=lambda x: x["created"], reverse=True)
self.logger.info("For pattern %s, use the latest list %s", pattern, match_list[0])
ignores = match_list[1:]
if ignores:
self.logger.info(
"And ignore %s lists from %s to %s", len(ignores), ignores[-1]["name"], ignores[0]["name"]
)
# Unique latest recurring contact lists
unique_recurs = {match_list[0]["id"]: match_list[0] for match_list in recurs.values() if len(match_list) > 0}
return no_recurs + list(unique_recurs.values())
def stream_slices(
self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None
) -> Iterable[Optional[Mapping[str, Any]]]:
parent_stream_slices = self.parent.stream_slices(
sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_state=stream_state
)
for stream_slice in parent_stream_slices:
parent_records = list(
self.parent.read_records(
sync_mode=SyncMode.full_refresh,
cursor_field=cursor_field,
stream_slice=stream_slice,
stream_state=stream_state,
)
)
parent_records = self._filter_recur_lists(parent_records)
for index, record in enumerate(parent_records):
self.logger.info("Start slice #%s: %s", index + 1, record)
yield {"parent": record}
self.logger.info("Finished slice #%s: %s", index + 1, record)
def _build_field_mapping(self) -> Tuple[Mapping[str, Any], Mapping[str, Any]]:
"""Build field dictionary and mapping from field string_id to id.
Returns:
Tuple[Mapping[str, Any], Mapping[str, Any]]: Tuple of field dict & mapping
"""
url = urljoin(self.url_base, "field")
response = self._session.get(url, headers={"Accept": "application/json", "Content-Type": "application/json"})
data = response.json()["data"]
field_dict = {}
field_string_2_id = {}
for field in data:
field_dict[str(field["id"])] = field
field_string_2_id[field["string_id"]] = str(field["id"])
return field_dict, field_string_2_id
def path(
self,
*,
stream_state: Mapping[str, Any] = None,
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> str:
return f"contactlist/{stream_slice['parent']['id']}/contacts/data"
def request_params(
self,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> MutableMapping[str, Any]:
params = super().request_params(stream_state, stream_slice, next_page_token)
# Map field string_id to id
params["fields"] = ",".join(str(self.field_string_2_id[string_id]) for string_id in self.field_string_ids)
return params
def get_airbyte_format(self, field_string_id: str) -> Mapping[str, any]:
"""Get Airbyte field specification from Emarsys field string_id.
Args:
field_string_id (str): Emarsys field string_id
Returns:
Mapping[str, any]: Airbyte field specification
"""
field_id = self.field_string_2_id[field_string_id]
airbyte_format = {"type": ["null", "string"]}
# if self.field_dict[field_id]["application_type"] == "numeric": #Commented out since field monthly_emails_only has "TRUE" as a value in contact list id 737496849, contact id 18963422
# airbyte_format = {"type": ["null", "number"]}
if self.field_dict[field_id]["application_type"] == "date":
airbyte_format = {"type": ["null", "string"], "format": "date"}
return airbyte_format
def get_json_schema(self) -> Mapping[str, Any]:
schema = super().get_json_schema()
for string_id in self.field_string_ids:
schema["properties"][string_id] = self.get_airbyte_format(string_id)
return schema
def parse_response(
self,
response: requests.Response,
*,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> Iterable[Mapping]:
data = response.json()["data"]
if data and isinstance(data, dict):
for contact_id, contact_data in data.items():
# One contact can be in multiple contact lists. Try to yield only once
# for each contact data record.
if contact_id in self.yielded_contact_ids:
continue
self.yielded_contact_ids.add(contact_id)
output_record = {"id": contact_id}
for field_id, value in contact_data["fields"].items():
# Mapping field Id to field string_id
if field_id == "uid":
output_record["uid"] = value
elif field_id.isdigit():
field_string_id = self.field_dict[field_id]["string_id"]
output_record[field_string_id] = value
yield output_record
yield from []
```
#### File: source-emarsys/unit_tests/test_client.py
```python
import io
import json
from unittest.mock import patch
import pytest
import requests
from source_emarsys.client import EmarsysClient
def make_response(status_code, data=None, headers=None):
response = requests.Response()
response.status_code = status_code
response.headers = headers or {}
if isinstance(data, (list, tuple, dict)):
_data = json.dumps({"data": data})
response.raw = io.BytesIO(_data.encode())
return response
@pytest.fixture
def client():
return EmarsysClient("username", "secret")
@patch("source_emarsys.api.requests.request")
def test_get__return_data(mock_request, client):
mock_request.return_value = make_response(200, [1, 2])
data = client.get("url")
assert data == [1, 2]
def test__params_pagination__output(client):
params = client._params_pagination({}, 100)
assert params["offset"] == 100
assert params["limit"] == client.limit
@patch("source_emarsys.api.requests.request")
def test_fetch_all__no_result(mock_request, client):
response = make_response(200, [])
# Client should make only 1 request. If it makes the 2nd call
# an exception will be thrown
mock_request.side_effect = [response, Exception()]
data = list(client.fetch_all("url"))
assert mock_request.call_count == 1
assert data == []
@patch("source_emarsys.api.requests.request")
def test_fetch_all__one_page(mock_request, client):
mock_request.side_effect = [make_response(200, [1, 2]), make_response(200, [])]
data = list(client.fetch_all("url"))
assert mock_request.call_count == 2
assert data == [1, 2]
_, _, kwargs = mock_request.mock_calls[1]
assert kwargs["params"]["offset"] == 2
@patch("source_emarsys.api.requests.request")
def test_fetch_all__multi_pages(mock_request, client):
mock_request.side_effect = [
make_response(200, [1, 2]),
make_response(200, [3, 4, 5]),
make_response(200, [6]),
make_response(200, []),
make_response(200, []),
]
data = list(client.fetch_all("url"))
assert mock_request.call_count == 4
assert data == [1, 2, 3, 4, 5, 6]
_, _, kwargs = mock_request.mock_calls[-1]
assert kwargs["params"]["offset"] == 6
@patch("source_emarsys.api.requests.request")
def test_list_fields(mock_request, client):
mock_data = [{"id": 1, "name": "email"}, {"id": 2, "name": "address"}]
mock_request.side_effect = [
make_response(200, mock_data),
make_response(200, []),
make_response(200, []),
]
data = list(client.list_fields())
assert mock_request.call_count == 2
_, args, _ = mock_request.mock_calls[0]
assert "v2/field" in args[1]
assert data == mock_data
@patch("source_emarsys.api.requests.request")
def test_list_contact_lists(mock_request, client):
mock_data = [{"id": 1, "name": "list A"}, {"id": 2, "name": "List B"}]
mock_request.side_effect = [
make_response(200, mock_data),
make_response(200, []),
make_response(200, []),
]
data = list(client.list_contact_lists())
assert mock_request.call_count == 2
_, args, _ = mock_request.mock_calls[0]
assert "v2/contactlist" in args[1]
assert data == mock_data
@patch("source_emarsys.api.requests.request")
def test_list_contacts_in_list(mock_request, client):
list_id = 123
mock_data = [{"id": 1}, {"id": 2}]
mock_request.side_effect = [
make_response(200, mock_data),
make_response(200, []),
make_response(200, []),
]
data = list(client.list_contacts_in_list(list_id))
assert mock_request.call_count == 2
_, args, _ = mock_request.mock_calls[0]
assert f"v2/contactlist/{list_id}" in args[1]
assert data == mock_data
@patch("source_emarsys.api.requests.request")
def test_list_contact_data_in_lists(mock_request, client):
list_id = 123
field_ids = [1, 2, 3, 4]
mock_data = [{"id": 1}, {"id": 2}]
mock_request.side_effect = [
make_response(200, mock_data),
make_response(200, []),
make_response(200, []),
]
data = list(client.list_contact_data_in_list(list_id, field_ids))
assert mock_request.call_count == 2
_, args, kwargs = mock_request.mock_calls[0]
assert f"v2/contactlist/{list_id}/contacts/data" in args[1]
assert data == mock_data
assert kwargs["params"]["fields"] == "1,2,3,4"
@patch("source_emarsys.api.requests.request")
def test_list_segments(mock_request, client):
mock_data = [{"id": 1}, {"id": 2}]
mock_request.side_effect = [
make_response(200, mock_data),
make_response(200, []),
make_response(200, []),
]
data = list(client.list_segments())
assert mock_request.call_count == 2
_, args, _ = mock_request.mock_calls[0]
assert "v2/filter" in args[1]
assert data == mock_data
```
#### File: source-emarsys/unit_tests/test_streams.py
```python
import re
from datetime import datetime, timedelta
from http import HTTPStatus
from unittest.mock import MagicMock
import pytz
import pytest
from airbyte_cdk.models.airbyte_protocol import SyncMode
from source_emarsys.streams import ContactLists, Contacts, EmarsysStream, PaginatedEmarsysStream, EmarsysAuthenticator
@pytest.fixture
def patch_base_class(mocker):
# Mock abstract methods to enable instantiating abstract class
mocker.patch.object(EmarsysStream, "path", "v0/example_endpoint")
mocker.patch.object(EmarsysStream, "primary_key", "test_primary_key")
mocker.patch.object(EmarsysStream, "__abstractmethods__", set())
@pytest.fixture
def patch_paginated_class(mocker):
# Mock abstract methods to enable instantiating abstract class
mocker.patch.object(PaginatedEmarsysStream, "path", "v0/example_endpoint")
mocker.patch.object(PaginatedEmarsysStream, "primary_key", "test_primary_key")
mocker.patch.object(PaginatedEmarsysStream, "__abstractmethods__", set())
def test_request_params(patch_base_class):
stream = EmarsysStream()
inputs = {"stream_slice": None, "stream_state": None, "next_page_token": None}
expected_params = {}
assert stream.request_params(**inputs) == expected_params
def test_request_params__paginated_stream(patch_paginated_class):
stream = PaginatedEmarsysStream()
inputs = {"stream_slice": None, "stream_state": None, "next_page_token": None}
expected_params = {"offset": 0, "limit": 10000}
assert stream.request_params(**inputs) == expected_params
def test_request_params__paginated_stream__next_page_token(patch_paginated_class):
stream = PaginatedEmarsysStream()
inputs = {"stream_slice": None, "stream_state": None, "next_page_token": {"offset": 10000}}
expected_params = {"offset": 10000, "limit": 10000}
assert stream.request_params(**inputs) == expected_params
def test_next_page_token__data_empty__return_none(patch_paginated_class):
stream = PaginatedEmarsysStream()
mock = MagicMock()
mock.json.return_value = {"data": []}
inputs = {"response": mock}
expected_token = None
assert stream.next_page_token(**inputs) == expected_token
def test_next_page_token__data_exists__return_offset(patch_paginated_class):
stream = PaginatedEmarsysStream()
mock = MagicMock()
mock.json.return_value = {"data": [1, 2, 3]}
mock.request.url = "http://api.com?offset=7"
inputs = {"response": mock}
expected_token = {"offset": 10}
assert stream.next_page_token(**inputs) == expected_token
@pytest.mark.parametrize("payload, expected", (({}, None), ({"data": [1, 2, 3]}, 1), ({"not_data": "abc"}, None)))
def test_parse_response__return_data(payload, expected, patch_base_class):
stream = EmarsysStream()
mock = MagicMock()
mock.json.return_value = payload
inputs = {"response": mock}
assert next(stream.parse_response(**inputs), None) == expected
def test_authenticator__get_auth_header():
auth = EmarsysAuthenticator("user1", "password")
pattern = r'UsernameToken Username="user1", PasswordDigest="[a-zA-Z0-9+=/]+", Nonce="[a-zA-Z0-9]+", Created="\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\+00:00"'
request = MagicMock()
request.headers = {}
auth_request = auth(request)
assert "X-WSSE" in auth_request.headers
assert re.match(pattern, auth_request.headers["X-WSSE"])
def test_request_headers(patch_base_class):
stream = EmarsysStream()
inputs = {"stream_slice": None, "stream_state": None, "next_page_token": None}
expected_headers = {"Content-Type": "application/json", "Accept": "application/json"}
assert stream.request_headers(**inputs) == expected_headers
def test_http_method(patch_base_class):
stream = EmarsysStream()
expected_method = "GET"
assert stream.http_method == expected_method
@pytest.mark.parametrize(
("http_status", "should_retry"),
[
(HTTPStatus.OK, False),
(HTTPStatus.BAD_REQUEST, False),
(HTTPStatus.TOO_MANY_REQUESTS, True),
(HTTPStatus.INTERNAL_SERVER_ERROR, True),
],
)
def test_should_retry(patch_base_class, http_status, should_retry):
response_mock = MagicMock()
response_mock.status_code = http_status
stream = EmarsysStream()
assert stream.should_retry(response_mock) == should_retry
def test_backoff_time__no_rate_limit__one_sec(patch_base_class):
response_mock = MagicMock()
stream = EmarsysStream()
expected_backoff_time = 1
assert stream.backoff_time(response_mock) == expected_backoff_time
def test_backoff_time__rate_limit(patch_base_class):
response_mock = MagicMock()
reset_dt = datetime.utcnow() + timedelta(seconds=60)
response_mock.headers = {"X-Ratelimit-Reset": str(int(reset_dt.replace(tzinfo=pytz.UTC).timestamp()))}
stream = EmarsysStream()
expected_backoff_time = 59
assert stream.backoff_time(response_mock) > expected_backoff_time
def test_backoff_time__stale_rate_limit__one_sec(patch_base_class):
response_mock = MagicMock()
reset_dt = datetime.utcnow() - timedelta(seconds=60)
response_mock.headers = {"X-Ratelimit-Reset": str(int(reset_dt.replace(tzinfo=pytz.UTC).timestamp()))}
stream = EmarsysStream()
expected_backoff_time = 1
assert stream.backoff_time(response_mock) == expected_backoff_time
@pytest.fixture
def mock_get_fields(mocker):
mock_response = MagicMock()
mock_response.json.return_value = {
"data": [
{"id": 1, "string_id": "field_a", "application_type": "bigtext"},
{"id": 2, "string_id": "field_b", "application_type": "numeric"},
{"id": 3, "string_id": "field_c", "application_type": "date"},
]
}
mocker.patch("source_emarsys.streams.requests.Session.get", return_value=mock_response)
def test_contacts_build_field_mapping(mock_get_fields):
stream = Contacts(parent=ContactLists(), fields=["field_x"])
field_dict, field_string_2_id = stream._build_field_mapping()
expected_field_dict = {
"1": {"id": 1, "string_id": "field_a", "application_type": "bigtext"},
"2": {"id": 2, "string_id": "field_b", "application_type": "numeric"},
"3": {"id": 3, "string_id": "field_c", "application_type": "date"},
}
expected_field_string_2_id = {"field_a": "1", "field_b": "2", "field_c": "3"}
assert expected_field_dict == field_dict
assert expected_field_string_2_id == field_string_2_id
@pytest.mark.parametrize(
"contact_lists, expected_contact_lists",
(
(
[
{"id": 1, "name": "list a", "created": "0001-01-01 00:00:00"},
{"id": 2, "name": "recur old", "created": "0002-02-02 00:00:00"},
{"id": 3, "name": "list b", "created": "0003-03-03 00:00:00"},
{"id": 4, "name": "recur new", "created": "0004-04-04 00:00:00"},
],
[
{"parent": {"id": 1, "name": "list a", "created": "0001-01-01 00:00:00"}},
{"parent": {"id": 3, "name": "list b", "created": "0003-03-03 00:00:00"}},
{"parent": {"id": 4, "name": "recur new", "created": "0004-04-04 00:00:00"}},
],
),
(
[
{"id": 1, "name": "list a", "created": "0001-01-01 00:00:00"},
{"id": 2, "name": "list b", "created": "0003-03-03 00:00:00"},
],
[
{"parent": {"id": 1, "name": "list a", "created": "0001-01-01 00:00:00"}},
{"parent": {"id": 2, "name": "list b", "created": "0003-03-03 00:00:00"}},
],
),
(
[
{"id": 1, "name": "recur old", "created": "0002-02-02 00:00:00"},
{"id": 2, "name": "recur new", "created": "0004-04-04 00:00:00"},
{"id": 3, "name": "recur very new", "created": "2022-03-03 00:00:00"},
],
[
{"parent": {"id": 3, "name": "recur very new", "created": "2022-03-03 00:00:00"}},
],
),
),
)
def test_contacts__stream_slices(contact_lists, expected_contact_lists, mocker):
mocker.patch("source_emarsys.streams.ContactLists.read_records", return_value=iter(contact_lists))
stream = Contacts(parent=ContactLists(), fields=["field_x"], recur_list_patterns=["^recur.*"])
inputs = {"sync_mode": SyncMode.full_refresh, "cursor_field": None, "stream_state": None}
assert list(stream.stream_slices(**inputs)) == expected_contact_lists
def test_contacts__request_params(mock_get_fields):
stream = Contacts(parent=ContactLists(), fields=["field_a", "field_b", "field_c"])
expected_param_fields = "1,2,3"
inputs = {"stream_state": None, "stream_slice": None, "next_page_token": None}
assert stream.request_params(**inputs)["fields"] == expected_param_fields
def test_contacts__get_airbyte_format(mock_get_fields):
stream = Contacts(parent=ContactLists(), fields=[])
assert stream.get_airbyte_format("field_a") == {"type": ["null", "string"]}
assert stream.get_airbyte_format("field_b") == {"type": ["null", "number"]}
assert stream.get_airbyte_format("field_c") == {"type": ["null", "string"], "format": "date"}
def test_contacts__get_json_schema(mock_get_fields):
stream = Contacts(parent=ContactLists(), fields=["field_a", "field_b", "field_c"])
expected_schema_properties = {
"id": {"type": ["null", "string"]},
"uid": {"type": ["null", "string"]},
"field_a": {"type": ["null", "string"]},
"field_b": {"type": ["null", "number"]},
"field_c": {"type": ["null", "string"], "format": "date"},
}
assert stream.get_json_schema()["properties"] == expected_schema_properties
def test_contacts__parse_response(mock_get_fields):
stream = Contacts(parent=ContactLists(), fields=[])
mock_response = MagicMock()
mock_response.json.return_value = {
"data": {
"1": {"fields": {"id": "1", "uid": "1a", "1": "aaa", "2": 111, "3": "0001-01-01 00:00:00"}},
"2": {"fields": {"id": "2", "uid": "2a", "1": None, "2": None, "3": None}},
}
}
inputs = {"response": mock_response, "stream_state": None, "stream_slice": None, "next_page_token": None}
expected = [
{"id": "1", "uid": "1a", "field_a": "aaa", "field_b": 111, "field_c": "0001-01-01 00:00:00"},
{"id": "2", "uid": "2a", "field_a": None, "field_b": None, "field_c": None},
]
assert list(stream.parse_response(**inputs)) == expected
``` |
{
"source": "joonson/face_trainer",
"score": 2
} |
#### File: face_trainer/models/ResNet18.py
```python
import torchvision
def MainModel(nOut=256, **kwargs):
return torchvision.models.resnet18(num_classes=nOut)
```
#### File: joonson/face_trainer/trainEmbedNet.py
```python
import sys, time, os, argparse, socket
import yaml
import pdb
import glob
import datetime
from utils import *
from EmbedNet import *
from DatasetLoader import get_data_loader
import torchvision.transforms as transforms
# ## ===== ===== ===== ===== ===== ===== ===== =====
# ## Parse arguments
# ## ===== ===== ===== ===== ===== ===== ===== =====
parser = argparse.ArgumentParser(description = "FaceNet");
parser.add_argument('--config', type=str, default=None, help='Config YAML file');
## Data loader
parser.add_argument('--batch_size', type=int, default=200, help='Batch size, number of classes per batch');
parser.add_argument('--max_img_per_cls', type=int, default=500, help='Maximum number of images per class per epoch');
parser.add_argument('--nDataLoaderThread', type=int, default=5, help='Number of loader threads');
## Training details
parser.add_argument('--test_interval', type=int, default=5, help='Test and save every [test_interval] epochs');
parser.add_argument('--max_epoch', type=int, default=100, help='Maximum number of epochs');
parser.add_argument('--trainfunc', type=str, default="softmax", help='Loss function');
## Optimizer
parser.add_argument('--optimizer', type=str, default="adam", help='sgd or adam');
parser.add_argument('--scheduler', type=str, default="steplr", help='Learning rate scheduler');
parser.add_argument('--lr', type=float, default=0.001, help='Learning rate');
parser.add_argument("--lr_decay", type=float, default=0.90, help='Learning rate decay every [test_interval] epochs');
parser.add_argument('--weight_decay', type=float, default=0, help='Weight decay in the optimizer');
## Loss functions
parser.add_argument("--hard_prob", type=float, default=0.5, help='Hard negative mining probability, otherwise random, only for some loss functions');
parser.add_argument("--hard_rank", type=int, default=10, help='Hard negative mining rank in the batch, only for some loss functions');
parser.add_argument('--margin', type=float, default=0.1, help='Loss margin, only for some loss functions');
parser.add_argument('--scale', type=float, default=30, help='Loss scale, only for some loss functions');
parser.add_argument('--nPerClass', type=int, default=1, help='Number of images per class per batch, only for metric learning based losses');
parser.add_argument('--nClasses', type=int, default=8700, help='Number of classes in the softmax layer, only for softmax-based losses');
## Load and save
parser.add_argument('--initial_model', type=str, default="", help='Initial model weights');
parser.add_argument('--save_path', type=str, default="exps/exp1", help='Path for model and logs');
## Training and test data
parser.add_argument('--train_path', type=str, default="data/vggface2", help='Absolute path to the train set');
parser.add_argument('--train_ext', type=str, default="jpg", help='Training files extension');
parser.add_argument('--test_path', type=str, default="data/test", help='Absolute path to the test set');
parser.add_argument('--test_list', type=str, default="data/test_list.csv", help='Evaluation list');
## Model definition
parser.add_argument('--model', type=str, default="ResNet18", help='Name of model definition');
parser.add_argument('--nOut', type=int, default=512, help='Embedding size in the last FC layer');
## For test only
parser.add_argument('--eval', dest='eval', action='store_true', help='Eval only')
## Distributed and mixed precision training
parser.add_argument('--mixedprec', dest='mixedprec', action='store_true', help='Enable mixed precision training')
args = parser.parse_args();
## Parse YAML
def find_option_type(key, parser):
for opt in parser._get_optional_actions():
if ('--' + key) in opt.option_strings:
return opt.type
raise ValueError
if args.config is not None:
with open(args.config, "r") as f:
yml_config = yaml.load(f, Loader=yaml.FullLoader)
for k, v in yml_config.items():
if k in args.__dict__:
typ = find_option_type(k, parser)
args.__dict__[k] = typ(v)
else:
sys.stderr.write("Ignored unknown parameter {} in yaml.\n".format(k))
# ## ===== ===== ===== ===== ===== ===== ===== =====
# ## Trainer script
# ## ===== ===== ===== ===== ===== ===== ===== =====
def main_worker(args):
## Load models
s = EmbedNet(**vars(args)).cuda();
it = 1
## Write args to scorefile
scorefile = open(args.result_save_path+"/scores.txt", "a+");
strtime = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
scorefile.write('%s\n%s\n'%(strtime,args))
scorefile.flush()
## Input transformations for training
train_transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Resize(256),
transforms.RandomCrop([224,224]),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
## Input transformations for evaluation
test_transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Resize(256),
transforms.CenterCrop([224,224]),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])])
## Initialise trainer and data loader
trainLoader = get_data_loader(transform=train_transform, **vars(args));
trainer = ModelTrainer(s, **vars(args))
## Load model weights
modelfiles = glob.glob('%s/model0*.model'%args.model_save_path)
modelfiles.sort()
## If the target directory already exists, start from the existing file
if len(modelfiles) >= 1:
trainer.loadParameters(modelfiles[-1]);
print("Model %s loaded from previous state!"%modelfiles[-1]);
it = int(os.path.splitext(os.path.basename(modelfiles[-1]))[0][5:]) + 1
elif(args.initial_model != ""):
trainer.loadParameters(args.initial_model);
print("Model %s loaded!"%args.initial_model);
## If the current iteration is not 1, update the scheduler
for ii in range(1,it):
trainer.__scheduler__.step()
## Evaluation code
if args.eval == True:
sc, lab = trainer.evaluateFromList(transform=test_transform, **vars(args))
result = tuneThresholdfromScore(sc, lab, [1, 0.1]);
print('EER %2.4f'%(result[1]))
quit();
## Core training script
for it in range(it,args.max_epoch+1):
clr = [x['lr'] for x in trainer.__optimizer__.param_groups]
print(time.strftime("%Y-%m-%d %H:%M:%S"), it, "Training epoch %d with LR %f "%(it,max(clr)));
loss, traineer = trainer.train_network(trainLoader, verbose=True);
if it % args.test_interval == 0:
sc, lab = trainer.evaluateFromList(transform=test_transform, **vars(args))
result = tuneThresholdfromScore(sc, lab, [1, 0.1]);
print("IT %d, VEER %2.4f"%(it, result[1]));
scorefile.write("IT %d, VEER %2.4f\n"%(it, result[1]));
trainer.saveParameters(args.model_save_path+"/model%09d.model"%it);
print(time.strftime("%Y-%m-%d %H:%M:%S"), "TEER/TAcc %2.2f, TLOSS %f"%( traineer, loss));
scorefile.write("IT %d, TEER/TAcc %2.2f, TLOSS %f\n"%(it, traineer, loss));
scorefile.flush()
scorefile.close();
# ## ===== ===== ===== ===== ===== ===== ===== =====
# ## Main function
# ## ===== ===== ===== ===== ===== ===== ===== =====
def main():
args.model_save_path = args.save_path+"/model"
args.result_save_path = args.save_path+"/result"
args.feat_save_path = ""
if not(os.path.exists(args.model_save_path)):
os.makedirs(args.model_save_path)
if not(os.path.exists(args.result_save_path)):
os.makedirs(args.result_save_path)
main_worker(args)
if __name__ == '__main__':
main()
``` |
{
"source": "JoonSoo-Park/Yet_Another_Algorithms_Repository",
"score": 4
} |
#### File: Algorithms/shell_sort/python-shell_sort.py
```python
def shell_sort(array):
n = len(array)
gap = n/2
while gap > 0:
for i in range(gap,n):
temp = array[i]
j = i
while j >= gap and array[j-gap] >temp:
array[j] = array[j-gap]
j -= gap
array[j] = temp
gap /= 2
#shell_sort
``` |
{
"source": "joonty/vim-do",
"score": 3
} |
#### File: autoload/python/buffer.py
```python
import vim
class VimBuffer:
def __init__(self, buffer):
self._buffer = buffer
def replace(self, content):
self._buffer[:] = content
def line(self, number):
return self._buffer[number]
def write(self, msg, overwrite):
last_line = len(self._buffer)
if isinstance(msg, list):
to_write = msg
else:
to_write = str(msg).split('\n')
if len(to_write) == 1 and to_write[0] == "":
return (last_line, last_line)
if overwrite or self.is_empty():
self._buffer[:] = to_write
else:
self._buffer.append(to_write)
return (last_line, last_line + len(to_write))
def overwrite(self, msg, lineno, allowEmpty):
""" insert into current position in buffer"""
if not msg and allowEmpty == False:
return
if isinstance(msg, list):
to_write = msg
else:
to_write = str(msg).split('\n')
lstart = lineno - 1
lend = lstart + len(to_write)
self._buffer[lstart:lend] = to_write
return (lstart, lend)
def delete(self, start_line, end_line = None):
try:
if not end_line:
end_line = start_line + 1
self._buffer[end_line]
remaining_buffer = self._buffer[end_line:]
del self._buffer[start_line:]
self._buffer.append(remaining_buffer)
except IndexError:
del self._buffer[start_line:]
def contents(self):
return self._buffer[:]
def clean(self):
self._buffer[:] = []
def is_empty(self):
if len(self._buffer) == 1 and len(self._buffer[0]) == 0:
return True
else:
return False
class HiddenBuffer:
def __init__(self, buffer = []):
self._buffer = buffer[:]
def line(self, number):
return self._buffer[number]
def replace(self, contents):
self._buffer[:] = contents[:]
def write(self, msg, overwrite):
last_line = len(self._buffer)
if isinstance(msg, list):
to_write = msg
else:
to_write = str(msg).split('\n')
if len(to_write) == 1 and to_write[0] == "":
return (last_line, last_line)
to_write = str(msg).split('\n')
if overwrite or self.is_empty():
self._buffer[:] = to_write
else:
self._buffer.extend(to_write)
return (last_line, last_line + len(to_write))
def overwrite(self, msg, lineno, allowEmpty):
""" insert into current position in buffer"""
if not msg and allowEmpty == False:
return
if isinstance(msg, list):
to_write = msg
else:
to_write = str(msg).split('\n')
last_line = len(self._buffer)
lstart = lineno - 1
lend = lstart + len(to_write)
self._buffer[lstart:lend] = to_write
return (lstart, lend)
def delete(self, start_line, end_line = None):
try:
if not end_line:
end_line = start_line + 1
self._buffer[start_line:end_line] = []
except IndexError:
del self._buffer[start_line:]
def clean(self):
self._buffer[:] = []
def contents(self):
return self._buffer[:]
def is_empty(self):
return not self._buffer
```
#### File: autoload/python/do.py
```python
import os
import inspect
import sys
directory = os.path.dirname(inspect.getfile(inspect.currentframe()))
sys.path.append(directory)
import rendering
import async
import window
import vim
import time
import string
import signal
from utils import *
class Do:
def __init__(self):
self.__process_pool = async.ProcessPool()
self.__processes = ProcessCollection()
self.__process_renderer = rendering.ProcessRenderer()
self.__au_assigned = False
self.__last_check = time.time() * 1000
def __del__(self):
self.stop()
def execute(self, cmd, quiet = False):
pid = self.__process_pool.execute(cmd)
log("Started command with pid %i: %s" %(pid, cmd))
process = self.__processes.add(cmd, pid)
self.__process_renderer.add_process(process, quiet)
self.__assign_autocommands()
self.check()
def reload_options(self):
Options.reload()
def toggle_command_window(self):
self.__process_renderer.toggle_command_window()
def mark_command_window_as_closed(self):
self.__process_renderer.destroy_command_window()
def mark_process_window_as_closed(self):
try:
self.__process_renderer.destroy_process_window()
except Exception, e:
log("Error: %s" % str(e))
def show_process_from_command_window(self):
lineno = vim.current.window.cursor[0]
pid = self.__process_renderer.get_pid_by_line_number(lineno)
process = self.__processes.get_by_pid(pid)
if process is not None:
self.__process_renderer.show_process(process)
def check(self):
log("check()")
if (1000 * time.time()) - self.__last_check > Options.check_interval():
self.check_now()
self.__last_check = time.time() * 1000
def check_now(self):
log("Checking background threads output")
outputs = self.__process_pool.get_outputs()
changed_processes = set()
for output in outputs:
if output[1] is not None:
log("Process %s has finished with exit status %s"
%(output[0], output[1]))
process = self.__processes.update(*output)
changed_processes.add(process)
for process in changed_processes:
self.__process_renderer.update_process(process)
self.__process_pool.cleanup()
if self.__processes.all_finished():
log("All background threads completed")
self.__unassign_autocommands()
else:
s = 'feedkeys("\\%s")' % Options.refresh_key()
log(s)
vim.eval(s)
def enable_logger(self, path):
Log.set_logger(FileLogger(Logger.DEBUG, path))
def stop(self):
self.__processes.kill_all()
self.__process_pool.stop()
def __assign_autocommands(self):
if self.__au_assigned:
return
log("Assigning autocommands for background checking")
vim.command('call do#AssignAutocommands()')
self.__au_assigned = True
def __unassign_autocommands(self):
log("Unassigning autocommands")
vim.command('call do#UnassignAutocommands()')
self.__au_assigned = False
class ProcessCollection:
def __init__(self):
self.__processes = {}
def add(self, command, pid):
process = Process(command, pid)
self.__processes[pid] = process
return process
def get_by_pid(self, pid):
return next((p for p in self.__processes.values() if p.get_pid() == pid), None)
def update(self, pid, exit_status, stdout, stderr):
process = self.__processes[pid]
if process is not None:
if exit_status is not None:
process.mark_as_complete(exit_status)
if stdout or stderr:
process.output().append(stdout, stderr)
return process
def all_finished(self):
return len(self.get_running()) == 0
def get_running(self):
return filter(lambda p: p.is_running(), self.__processes.values())
def kill_all(self):
for process in self.get_running():
process.kill()
class Process:
def __init__(self, command, pid):
self.__command = command
self.__pid = str(pid)
self.__start_time = time.time()
self.__output = Output()
self.__exit_code = None
self.__time = None
def mark_as_complete(self, exit_code):
self.__exit_code = str(exit_code)
self.__time = round((time.time() - self.__start_time) * 1000)
def has_finished(self):
return self.__exit_code is not None
def is_running(self):
return not self.has_finished()
def get_pid(self):
return self.__pid
def get_status(self):
if self.__exit_code is None:
return "Running"
else:
return "exited <%s>" % self.__exit_code
def get_command(self):
return self.__command
def get_time(self):
if self.__time:
return self.__time
else:
return round((time.time() - self.__start_time) * 1000)
def output(self):
return self.__output
def name(self):
return "DoOutput(%s)" % self.__pid
def kill(self):
try:
os.kill(int(self.__pid), signal.SIGTERM)
except:
pass
class Output:
def __init__(self):
self.__output = []
def all(self):
return self.__output
def __len__(self):
return len(self.__output)
def from_line(self, line):
return self.__output[line:]
def append(self, stdout, stderr):
if stdout is not None:
self.__output.append(stdout)
if stderr is not None:
self.__output.append("E> %s" % stderr)
```
#### File: autoload/python/rendering.py
```python
import window
from utils import Options, log
class ProcessRenderer:
def __init__(self):
self.__command_window = window.CommandWindow()
self.__command_window.write(CommandWindowHeaderFormat())
self.__command_window_line_maps = {}
self.__command_window_line_map_order = []
self.__process_window = window.ProcessWindow()
self.__process_window_output_line = 0
self.__process_window_process = None
def get_pid_by_line_number(self, lineno):
try:
# Account for header
return self.__command_window_line_map_order[lineno - 4]
except IndexError:
return None
def add_process(self, process, quiet):
if not quiet and Options.auto_show_process_window():
self.show_process(process)
(first_line, _) = self.__command_window.write(CommandWindowProcessFormat(process))
self.__command_window_line_maps[process.get_pid()] = first_line + 1
self.__command_window_line_map_order.append(process.get_pid())
def show_process(self, process):
log("showing process output: %s" % process.get_pid())
self.__process_window_process = process
self.__process_window.clean()
self.__process_window_output_line = 0
self.__process_window.create(Options.new_process_window_command())
self.__process_window.write(ProcessWindowHeaderFormat(process))
self.__write_output(process.output().all())
def __write_output(self, output):
(first, last) = self.__process_window.write(output)
self.__process_window_output_line += last - first
def update_process(self, process):
self.__command_window.overwrite(CommandWindowProcessFormat(process),
self.__command_window_line_maps[process.get_pid()],
True)
if self.__process_window_process == process:
log("updating process output: %s, %s"
%(process.get_pid(),process.get_status()))
self.__write_output(process.output().from_line(self.__process_window_output_line))
self.__process_window.overwrite(ProcessWindowHeaderFormat(process),
1, True)
def toggle_command_window(self):
self.__command_window.toggle("rightbelow 7new")
def destroy_command_window(self):
self.__command_window.destroy()
def destroy_process_window(self):
self.__process_window.destroy()
class ProcessWindowHeaderFormat:
def __init__(self, process):
self.__process = process
def __str__(self):
values = (self.__process.get_command(),
self.__process.get_status(),
self.__formatted_time(),
self.__process.get_pid())
max_length = max(map(len, values)) + 12
title = "=" * max_length + "\n"
title += " [command] %s\n" % values[0]
title += " [status] %s\n" % values[1]
title += " [time] %s\n" % values[2]
title += " [pid] %s\n" % values[3]
title += "=" * max_length
return title
def __formatted_time(self):
time = self.__process.get_time()
if time > 1000.0:
time = round(time / 1000.0, 2)
unit = "s"
else:
unit = "ms"
return "{:,}".format(time) + unit
class CommandWindowHeaderFormat:
def __str__(self):
return '''
=============================================================================
PID | COMMAND | STATUS
=============================================================================
'''[1:-1]
class CommandWindowProcessFormat:
def __init__(self, process):
self.__process = process
def __str__(self):
s = ""
cmd = self.__process.get_command()
cmd = cmd if len(cmd) <= 30 else cmd[:27] + "..."
s += " %-7s | %-51s | %s" %(self.__process.get_pid(), cmd,
self.__process.get_status())
return s
``` |
{
"source": "joonty/vim-xdebug",
"score": 2
} |
#### File: vim-xdebug/plugin/debugger.py
```python
import os
import sys
import vim
import socket
import base64
import traceback
import xml.dom.minidom
import re
import unicodedata
#######################################################################################################################
# #
# this diagram is little outdated. #
# #
# #
# +---[ class Debugger ]-----------+ #
# | [m] run() | #
# | [m] mark() | #
# | [m] command() | #
# | [m] stop() | #
# +--------- [m] handle_msg() ------------------+ #
# | | | | handle all other tags #
# if error +--------> [m] handle_error() | | comming from server #
# | | [m] handle_*() <-----------------+ #
# | | | #
# if <response > +--------> [m] handle_response() -------------+ #
# | | | if <response command='*'> #
# | [m] handle_response_*() <----------+ #
# | | #
# | +--[ class DbgProtocol ]--+ | #
# +-------+ 1. connect | | | | #
# |debug | ---------------------> [m] accept() | | #
# | | <-- 2. send ---------- [m] send_msg() | | #
# | server| --- 3. recv ---------> [m] recv_msg() | | #
# +-------+ | | | | #
# | +-------------------------+ | #
# | | #
# | +--[ class BreakPoint ]---+ | #
# | | manage breakpoints | | #
# | | [m] add() | | #
# | | [m] remove() | | #
# | | [m] list() | | #
# | +-------------------------+ | VIM #
# | | +--------------+-----+ #
# [m] method | +--[ class DebugUI ]------+ | | | | <----+ #
# [f] class | | [m] debug_mode() | ------------------ | +-----+ | #
# | | [m] normal_mode() | | controls | srv | | <----+ #
# | | [m] goto() | | all vim | view +-----+ | #
# | | [m] stackwrite() | | windows | | | <----+ #
# | | [m] stackwrite() | | | +-----+ | #
# | +-------------------------+ | | | | <----+ #
# | | | +-----+ | #
# | +--[ class VimWindow ]----+ | | | | <----+ #
# | | [m] create() | | +--------------+-----+ | #
# | | [m] write() | | | #
# | | [m] create() | ------------------------------------------------+ #
# | | [m] create() | | controls each debug window #
# | +-------------------------+ | (except src view) #
# | | #
# +--------------------------------+ #
# #
# global debugger <----+ #
# | creates #
# [f] debugger_init() --+ #
# [f] debugger_run() <-+ #
# [f] debugger_context() | #
# [f] debugger_command() +------ map <F5> :python debugger_run() #
# [f] debugger_stop() | ... etc ... #
# [f] debugger_mark() <-+ #
# #
# #
#######################################################################################################################
#class XMLPrintFold(XMLPrint):
# def fixup_childs(self, line, node, level):
# line = ('{{{' + str(level+1)).ljust(level*4+6) + line + '\n'
# line += self.xml_stringfy_childs(node, level+1)
# line += '}}}' + str(level+1) + '\n'
# return line
# def fixup_single(self, line, node, level):
# return ''.ljust(level*4+6) + line + '\n'
#
class VimWindow:
""" wrapper class of window of vim """
def __init__(self, name = 'DEBUG_WINDOW'):
""" initialize """
self.name = name
self.buffer = None
self.firstwrite = 1
def isprepared(self):
""" check window is OK """
if self.buffer == None or len(dir(self.buffer)) == 0 or self.getwinnr() == -1:
return 0
return 1
def prepare(self):
""" check window is OK, if not then create """
if not self.isprepared():
self.create()
def on_create(self):
pass
def getwinnr(self):
return int(vim.eval("bufwinnr('"+self.name+"')"))
def xml_on_element(self, node,insert):
line = str(node.nodeName)
if node.hasAttributes():
for (n,v) in node.attributes.items():
line += str(' %s=%s' % (n,v))
return line
def xml_on_attribute(self, node,insert):
return str(node.nodeName)
def xml_on_entity(self, node,insert):
return 'entity node'
def xml_on_comment(self, node,insert):
return 'comment node'
def xml_on_document(self, node,insert):
return '#document'
def xml_on_document_type(self, node,insert):
return 'document type node'
def xml_on_notation(self, node,insert):
return 'notation node'
def xml_on_text(self, node,insert):
return node.data
def xml_on_processing_instruction(self, node,insert):
return 'processing instruction'
def xml_on_cdata_section(self, node,insert):
return node.data
def write(self, msg):
""" append last """
if type(msg) is unicode:
msg = unicodedata.normalize('NFKD',msg).encode('ascii','ignore')
self.prepare()
if self.firstwrite == 1:
self.firstwrite = 0
self.buffer[:] = str(msg).split('\n')
else:
self.buffer.append(str(msg).split('\n'))
self.command('normal G')
#self.window.cursor = (len(self.buffer), 1)
def insert(self, msg, lineno = None, overwrite = False, allowEmpty = False):
""" insert into current position in buffer"""
if len(msg) == 0 and allowEmpty == False:
return
self.prepare()
if self.firstwrite == 1:
self.firstwrite = 0
self.buffer[:] = str(msg).split('\n')
else:
if lineno == None:
(lineno, rol) = vim.current.window.cursor
remaining_buffer = str(msg).split('\n')
if overwrite:
lfrom = lineno + 1
else:
lfrom = lineno
remaining_buffer.extend(self.buffer[lfrom:])
del self.buffer[lineno:]
for line in remaining_buffer:
self.buffer.append(line)
def create(self, method = 'new'):
""" create window """
vim.command('silent ' + method + ' ' + self.name)
#if self.name != 'LOG___WINDOW':
vim.command("setlocal buftype=nofile")
self.buffer = vim.current.buffer
self.width = int( vim.eval("winwidth(0)") )
self.height = int( vim.eval("winheight(0)") )
self.on_create()
def destroy(self):
""" destroy window """
if self.buffer == None or len(dir(self.buffer)) == 0:
return
#if self.name == 'LOG___WINDOW':
# self.command('hide')
#else:
self.command('bdelete ' + self.name)
self.firstwrite = 1
def clean(self):
""" clean all datas in buffer """
self.prepare()
self.buffer[:] = []
self.firstwrite = 1
def command(self, cmd):
""" go to my window & execute command """
self.prepare()
winnr = self.getwinnr()
if winnr != int(vim.eval("winnr()")):
vim.command(str(winnr) + 'wincmd w')
vim.command(cmd)
def _xml_stringfy(self, node, insert, level = 0, encoding = None):
if node.nodeType == node.ELEMENT_NODE:
line = self.xml_on_element(node,insert)
elif node.nodeType == node.ATTRIBUTE_NODE:
line = self.xml_on_attribute(node,insert)
elif node.nodeType == node.ENTITY_NODE:
line = self.xml_on_entity(node,insert)
elif node.nodeType == node.COMMENT_NODE:
line = self.xml_on_comment(node,insert)
elif node.nodeType == node.DOCUMENT_NODE:
line = self.xml_on_document(node,insert)
elif node.nodeType == node.DOCUMENT_TYPE_NODE:
line = self.xml_on_document_type(node,insert)
elif node.nodeType == node.NOTATION_NODE:
line = self.xml_on_notation(node,insert)
elif node.nodeType == node.PROCESSING_INSTRUCTION_NODE:
line = self.xml_on_processing_instruction(node,insert)
elif node.nodeType == node.CDATA_SECTION_NODE:
line = self.xml_on_cdata_section(node,insert)
elif node.nodeType == node.TEXT_NODE:
line = self.xml_on_text(node,insert)
else:
line = 'unknown node type'
if node.hasChildNodes():
return self.fixup_childs(line, node, insert, level)
elif len(line) > 0:
return self.fixup_single(line, node, insert, level)
return line
def fixup_childs(self, line, node, insert, level):
line = ''.ljust(level*4) + line + '\n'
line += self.xml_stringfy_childs(node, insert, level+1)
return line
def fixup_single(self, line, node, insert, level):
return ''.ljust(level*4) + line + '\n'
def xml_stringfy(self, xml, insert = False):
return self._xml_stringfy(xml,insert)
def xml_stringfy_childs(self, node, insert = False, level = 0):
line = ''
for cnode in node.childNodes:
line += self._xml_stringfy(cnode, insert, level)
return line
def write_xml(self, xml):
self.write(self.xml_stringfy(xml))
def write_xml_childs(self, xml):
self.write(self.xml_stringfy_childs(xml))
def insert_xml(self, xml,lineno):
level = self.determine_current_level(lineno)
string = self.xml_stringfy(xml,True,level-1)
self.insert(string.strip("\n"),lineno,True)
def insert_xml_childs(self, xml,lineno):
level = self.count_left_spaces(lineno)
string = self.xml_stringfy_childs(xml,True,level-1)
self.insert(string.strip("\n"),lineno,True)
def count_left_spaces(self,lineno):
line = self.buffer[lineno]
matches = re.match("^(\s*)",line)
if matches:
spaces = matches.group(1)
return len(spaces)
else:
return 0
class StackWindow(VimWindow):
def __init__(self, name = 'STACK_WINDOW'):
VimWindow.__init__(self, name)
def xml_on_element(self, node, insert):
if node.nodeName != 'stack':
return VimWindow.xml_on_element(self, node, insert)
else:
if node.getAttribute('where') != '{main}':
fmark = '()'
else:
fmark = ''
return str('%-2s %-15s %s:%s' % ( \
node.getAttribute('level'), \
node.getAttribute('where')+fmark, \
node.getAttribute('filename')[7:], \
node.getAttribute('lineno')))
def on_create(self):
self.command('highlight CurStack term=reverse ctermfg=White ctermbg=Red gui=reverse')
self.highlight_stack(0)
def highlight_stack(self, no):
self.command('syntax clear')
self.command('syntax region CurStack start="^' +str(no)+ ' " end="$"')
class LogWindow(VimWindow):
def __init__(self, name = 'LOG___WINDOW'):
VimWindow.__init__(self, name)
def on_create(self):
self.command('setlocal wrap fdm=marker fmr={{{,}}} fdl=0')
class TraceWindow(VimWindow):
def __init__(self, name = 'TRACE_WINDOW'):
VimWindow.__init__(self, name)
self.created = 0
def xml_on_element(self, node, insert):
if node.nodeName != 'error':
return VimWindow.xml_on_element(self, node, insert)
else:
desc = ''
if node.hasAttribute('code'):
desc = ' : '+error_msg[int(node.getAttribute('code'))]
return VimWindow.xml_on_element(self, node, insert) + desc
def create(self,method="new"):
self.created = 1
VimWindow.create(self,method)
def write(self,msg):
if self.created == 0:
self.create('rightbelow 1new')
VimWindow.write(self,msg)
def on_create(self):
self.command('set wrap fdm=marker fmr={{{,}}} fdl=0')
class CmdWindow(VimWindow):
def __init__(self, name = 'CMD_WINDOW'):
VimWindow.__init__(self, name)
def input(self, mode, arg = ''):
line = self.buffer[-1]
if line[:len(mode)+1] == '{'+mode+'}':
self.buffer[-1] = line + arg
else:
self.buffer.append('{'+mode+'} '+arg)
def get_command(self,latest = True):
if latest == True:
line = self.buffer[-1]
else:
(lnum, rol) = vim.current.window.cursor
line = self.buffer[lnum-1]
if line[0] == '#':
raise CmdInvalidError({"message":"Line is a comment, not a command"})
allowed_cmds = ["eval","property_get","property_insert","context_get","context_class","context_global","context_names"]
matches = re.match('^\{([^}]+)\}\s*(.*)$',line)
if matches:
if matches.group(1) in allowed_cmds:
return (matches.group(1),matches.group(2))
else:
raise CmdInvalidError({"message":"Not a command: "+matches.group(1)})
else:
raise CmdInvalidError({"message":"Unrecognised format for command line"})
def on_create(self):
self.command('set nowrap number fdm=marker fmr={{{,}}} fdl=0')
self.command('inoremap <buffer> <cr> <esc>:python debugger.watch_execute(False)<cr>')
self.command('nnoremap <buffer> <cr> <esc>:python debugger.watch_execute(False)<cr>')
self.write("# Choice of commands: \n\
# {context_get}, {property_get} <property>, {eval} <expr>, \
{context_global}, {context_class}\n#")
class WatchWindow(VimWindow):
def __init__(self, name = 'WATCH_WINDOW'):
VimWindow.__init__(self, name)
self.cline = None
def fixup_single(self, line, node, insert, level):
line = ''.ljust(level*1) + line
if len(line.strip()) > 0:
line += ";"
numchildren = node.getAttribute('children').decode('utf-8')
if len(numchildren) and int(numchildren) == 1:
line += " #>> press <CR> to expand"
line += "\n"
return line
def fixup_childs(self, line, node, insert, level):
global z
if len(node.childNodes) == 1 and \
(node.firstChild.nodeType == node.TEXT_NODE or \
node.firstChild.nodeType == node.CDATA_SECTION_NODE):
line = str(''.ljust(level*1) + line)
if node.getAttribute('name') == 'CLASSNAME':
return ""
encoding = node.getAttribute('encoding')
if encoding == 'base64':
s = base64.decodestring(str(node.firstChild.data)).decode('utf-8')
line += " '" + s.replace("'","\\'") + "'"
elif encoding == '':
line += " "+str(node.firstChild.data).decode('utf-8')
else:
line += '(e:'+encoding+') ' + str(node.firstChild.data).decode(encoding)
if len(line.strip()) > 0:
line += ';\n'
else:
if level == 0:
if len(line.strip()) > 0:
line += ';\n'
line += self.xml_stringfy_childs(node, insert, level+1)
if len(line.strip()) > 0:
line += '\n'
else:
fold = False
if len(line.strip()) > 0:
fold = True
line = (''.ljust(level*1) + str(line) + ';')
child_str = self.xml_stringfy_childs(node, insert, level+1)
if len(child_str.strip()) > 0:
if fold:
line = line.ljust(self.width-20) + ''.ljust(level*1) + '/*{{{' + str(level) + '*/' + '\n'
line += child_str
if fold:
line += (''.ljust(level*1) + ''.ljust(level*1)).ljust(self.width-20) + ''.ljust(level*1) + '/*}}}' + str(level) + '*/'
else:
numchildren = node.getAttribute('children').decode('utf-8')
if len(numchildren) > 0 and int(numchildren) == 1:
line += " #>> press <CR> to expand"
line += '\n'
return line
def xml_on_element(self, node, insert):
if node.nodeName == 'property':
self.type = node.getAttribute('type')
extra = ""
classname = node.getAttribute('classname').decode('utf-8')
if classname != '':
extra = " "+classname
if self.type == "array":
extra = " ["+node.getAttribute('numchildren')+"]"
name = node.getAttribute('name').decode('utf-8')
fullname = node.getAttribute('fullname').decode('utf-8')
if name == 'CLASSNAME':
return ''
elif debugger.lastcmd == "eval":
name = self.get_eval_name(node,"")
fullname = name
if self.type == 'uninitialized':
return str(('%-20s' % fullname) + " = /* uninitialized */;")
elif self.type == 'null':
return str(('%-20s' % fullname) + " = (null)")
else:
return str('%-20s' % fullname) + ' = (' + self.type + extra+')'
elif node.nodeName == 'response':
if insert == True:
return ''
else:
line = "// Command = " + node.getAttribute('command')
if debugger.lastcmd == 'eval':
line += "\n// Evaluating: "+debugger.lastarg
return line
else:
return VimWindow.xml_on_element(self, node, insert)
def get_eval_name(self, node, name):
if node.parentNode.nodeName == "response":
return self.get_eval_arg()+name
else:
if node.parentNode.getAttribute('type') == 'object':
return self.get_eval_name(node.parentNode,"->"+node.getAttribute('name').decode('utf-8')+name)
else:
return self.get_eval_name(node.parentNode,"["+node.getAttribute('name').decode('utf-8')+"]"+name)
def get_eval_arg(self):
arg = debugger.lastarg
if arg.endswith(';'):
return arg[:-1]
return arg
def write_xml_childs(self,xml):
self.clean()
VimWindow.write_xml_childs(self,xml)
def xml_on_text(self, node, insert):
if self.type == 'string':
return "'" + str(node.data) + "'"
else:
return str(node.data)
def xml_on_cdata_section(self, node, insert):
if self.type == 'string':
return "'" + str(node.data) + "'"
else:
return str(node.data)
def line_needs_children(self,lineno):
line = self.buffer[lineno]
match = re.search(r'\((array \[([0-9]+)\]|object)',line,re.M|re.I)
if match:
if match.group(1) == 'object':
if "{{{" in line:
return False
else:
return True
else:
if int(match.group(2)) > 0:
nlcnt = self.count_left_spaces(lineno+1)
clcnt = self.count_left_spaces(lineno)
if nlcnt <= clcnt:
return True
return False
def expand(self):
(row, rol) = vim.current.window.cursor
if self.line_needs_children(row-1):
line = self.buffer[row-1]
self.cline = row-1
eqpos = line.find("=")
if eqpos > -1:
var = line[:eqpos].strip()
debugger.property_insert(var)
else:
self.command("echohl Error | echo \"Cannot find variable under cursor\" | echohl None")
def clean(self):
VimWindow.clean(self)
self.write('<?')
def on_create(self):
self.command('set noai nocin')
self.command('set nowrap fdm=marker fmr={{{,}}} ft=php fdl=1 foldlevel=1')
self.command('noremap <buffer> <cr> <esc>:python debugger.ui.watchwin.expand()<cr>')
class HelpWindow(VimWindow):
def __init__(self, name = 'HELP__WINDOW'):
VimWindow.__init__(self, name)
def on_create(self):
self.write( \
'[ Function Keys ] | \n' + \
' <F1> resize | [ Normal Mode ] \n' + \
' <F2> step into | ,e eval \n' + \
' <F3> step over | \n' + \
' <F4> step out | \n' + \
' <F5> start debuging & run | [ Command Mode ] \n' + \
' <F6> stop debugging | :Bp toggle breakpoint \n' + \
' | :Up stack up \n' + \
' <F11> get all context | :Dn stack down \n' + \
' <F12> get property at cursor | \n' + \
'\n')
self.command('1')
class DebugUI:
""" DEBUGUI class """
def __init__(self):
""" initialize object """
self.watchwin = WatchWindow()
self.stackwin = StackWindow()
self.tracewin = TraceWindow()
self.cmdwin = CmdWindow()
self.helpwin = HelpWindow('HELP__WINDOW')
self.mode = 0 # normal mode
self.file = None
self.line = None
self.winbuf = {}
self.tabno = None
self.cursign = None
self.sessfile = "/tmp/debugger_vim_saved_session." + str(os.getpid())
def debug_mode(self):
""" change mode to debug """
if self.mode == 1: # is debug mode ?
return
self.mode = 1
vim.command('tabnew')
self.tabno = vim.eval('tabpagenr()')
# save session
vim.command('mksession! ' + self.sessfile)
for i in range(1, len(vim.windows)+1):
vim.command(str(i)+'wincmd w')
self.winbuf[i] = vim.eval('bufnr("%")') # save buffer number, mksession does not do job perfectly
# when buffer is not saved at all.
vim.command('silent leftabove new') # create srcview window (winnr=1)
for i in range(2, len(vim.windows)+1):
vim.command(str(i)+'wincmd w')
vim.command('hide')
self.create()
vim.command('2wincmd w') # goto srcview window(nr=1, top-left)
self.cursign = '1'
self.set_highlight()
def normal_mode(self):
""" restore mode to normal """
if self.mode == 0: # is normal mode ?
return
vim.command('sign unplace 1')
vim.command('sign unplace 2')
# destory all created windows
self.destroy()
# restore session
"vim.command('source ' + self.sessfile)"
try:
vim.command('tabc! '+self.tabno)
except vim.error:
# Tab has already been closed?
print "UI error"
os.system('rm -f ' + self.sessfile)
self.set_highlight()
self.winbuf.clear()
self.file = None
self.line = None
self.mode = 0
self.cursign = None
def create(self):
""" create windows """
self.watchwin.create('vertical belowright new')
"self.helpwin.create('belowright new')"
self.stackwin.create('belowright 12new')
self.cmdwin.create('rightbelow 4new')
"self.tracewin.create('rightbelow 1new')"
def set_highlight(self):
""" set vim highlight of debugger sign """
vim.command("highlight DbgCurrent term=reverse ctermfg=White ctermbg=Red gui=reverse")
vim.command("highlight DbgBreakPt term=reverse ctermfg=White ctermbg=Green gui=reverse")
def destroy(self):
""" destroy windows """
"self.helpwin.destroy()"
self.watchwin.destroy()
self.stackwin.destroy()
if self.tracewin.created == 1:
self.tracewin.destroy()
self.cmdwin.destroy()
def go_srcview(self):
vim.command('2wincmd w')
def next_sign(self):
if self.cursign == '1':
return '2'
else:
return '1'
def rm_cursign(self):
vim.command('sign unplace ' + self.cursign)
def set_srcview(self, file, line):
""" set srcview windows to file:line and replace current sign """
if file == self.file and self.line == line:
return
nextsign = self.next_sign()
if file != self.file:
self.file = file
self.go_srcview()
vim.command('silent edit ' + file)
vim.command('sign place ' + nextsign + ' name=current line='+str(line)+' file='+file)
vim.command('sign unplace ' + self.cursign)
vim.command('sign jump ' + nextsign + ' file='+file)
#vim.command('normal z.')
self.line = line
self.cursign = nextsign
class DbgProtocol:
""" DBGp Procotol class """
def __init__(self, port=9000):
self.port = port
self.sock = None
self.isconned = 0
def isconnected(self):
return self.isconned
def accept(self):
print 'Waiting for a connection (this message will self-destruct in 30 seconds...)'
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
serv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
serv.settimeout(30)
serv.bind(('', self.port))
serv.listen(5)
(self.sock, address) = serv.accept()
self.sock.settimeout(None)
except socket.timeout:
serv.close()
self.stop()
print 'timeout'
return
print 'connection from ', address
self.isconned = 1
serv.close()
def close(self):
if self.sock != None:
self.sock.close()
self.sock = None
self.isconned = 0
def recv_length(self):
#print '* recv len'
length = ''
while 1:
c = self.sock.recv(1)
if c == '':
self.close()
raise EOFError, 'Socket Closed'
#print ' GET(',c, ':', ord(c), ') : length=', len(c)
if c == '\0':
return int(length)
if c.isdigit():
length = length + c
def recv_null(self):
while 1:
c = self.sock.recv(1)
if c == '':
self.close()
raise EOFError, 'Socket Closed'
if c == '\0':
return
def recv_body(self, to_recv):
body = ''
while to_recv > 0:
buf = self.sock.recv(to_recv)
if buf == '':
self.close()
raise EOFError, 'Socket Closed'
to_recv -= len(buf)
body = body + buf
return body
def recv_msg(self):
length = self.recv_length()
body = self.recv_body(length)
self.recv_null()
return body
def send_msg(self, cmd):
self.sock.send(cmd + '\0')
class BreakPoint:
""" Breakpoint class """
def __init__(self):
""" initalize """
self.breakpt = {}
self.revmap = {}
self.startbno = 10000
self.types = ['line','exception','watch','call','return','conditional']
self.maxbno = self.startbno
def isType(self,type):
if type in self.types:
return True
else:
return False
def clear(self):
""" clear of breakpoint number """
self.breakpt.clear()
self.revmap.clear()
self.maxbno = self.startbno
def parseArgs(self,args):
args = args.strip()
if len(args):
argWords = args.split()
#print argWords
expr = ""
if self.isType(argWords[0]):
if len(argWords) > 1:
exprWords = argWords[1:]
expr = " ".join(exprWords)
type = argWords[0]
else:
if len(argWords) > 0:
expr = " ".join(argWords)
type = "line"
return (type,expr)
else:
return ('line','')
def add(self, file, line, args = ''):
""" add break point at file:line """
self.maxbno = self.maxbno + 1
parsedArgs = self.parseArgs(args)
#print parsedArgs
type = parsedArgs[0]
extra = ''
exp = ''
if type == 'line' or type == "conditional":
if file is None:
raise BreakpointError("Invalid file: cannot place breakpoint")
exp = parsedArgs[1]
else:
if len(parsedArgs) == 0:
raise BreakpointError("Breakpoint of type "+type+" requires an argument")
file = None
line = None
if type == "watch":
exp = parsedArgs[1]
else:
extra = parsedArgs[1]
self.breakpt[self.maxbno] = { 'file':file, 'line':line, 'exp':exp, 'extra':extra, 'id':None, 'type':type }
return self.maxbno
def remove(self, bno):
""" remove break point numbered with bno """
del self.breakpt[bno]
def find(self, file, line):
""" find break point and return bno(breakpoint number) """
if file is None or line is None:
return None
for bno in self.breakpt.keys():
if self.breakpt[bno]['file'] == file and self.breakpt[bno]['line'] == line:
return bno
return None
def getfile(self, bno):
""" get file name of breakpoint numbered with bno """
return self.breakpt[bno]['file']
def getline(self, bno):
""" get line number of breakpoint numbered with bno """
return self.breakpt[bno]['line']
def getexp(self, bno):
""" get expression of breakpoint numbered with bno """
return self.breakpt[bno]['exp']
def getid(self, bno):
""" get Debug Server's breakpoint numbered with bno """
return self.breakpt[bno]['id']
def gettype(self, bno):
""" get Debug Server's breakpoint numbered with bno """
return self.breakpt[bno]['type']
def getcmd(self,bno):
bpt = self.breakpt[bno]
cmd = '-t '+bpt['type']
type = bpt['type']
if bpt['file']:
cmd += ' -f '+bpt['file']
if bpt['line']:
cmd += ' -n '+str(bpt['line'])
if type == "exception":
cmd += " -x "+bpt['extra']
elif type == "return" or type == "call":
cmd += " -m "+bpt['extra']
#print cmd
return cmd
def setid(self, bno, id):
""" get Debug Server's breakpoint numbered with bno """
self.breakpt[bno]['id'] = id
def list(self):
""" return list of breakpoint number """
return self.breakpt.keys()
def show(self):
if len(self.breakpt) == 0:
print "No breakpoints set\n"
return
print_str = "Breakpoints:\n"
for bno in self.list():
bp = self.breakpt[bno]
print_str += "[" + str(bno) + "] " + bp['type'] + ": "
if bp['type'] == 'line' or bp['type'] == 'conditional':
print_str += bp['file'] + ":" + str(bp['line'])
if bp['extra'] is not None:
print_str += bp['extra']
if len(bp['exp']) > 0:
print_str += " (condition: "+bp['exp']+")"
print_str += "\n"
print print_str
class Debugger:
""" Main Debugger class """
#################################################################################################################
# Internal functions
#
def __init__(self, port = 9000, debug = 0, autoContext = 0):
""" initialize Debugger """
socket.setdefaulttimeout(100)
self.port = port
self.debug = debug
self.autoContext = autoContext
self.current = None
self.file = None
self.lasterror = None
self.msgid = 0
self.running = 0
self.stacks = []
self.curstack = 0
self.laststack = 0
self.bptsetlst = {}
self.lastcmd = None
self.lastarg = None
self.protocol = DbgProtocol(self.port)
self.ui = DebugUI()
self.breakpt = BreakPoint()
vim.command('sign unplace *')
def clear(self):
self.current = None
self.lasterror = None
self.msgid = 0
self.running = 0
self.stacks = []
self.curstack = 0
self.laststack = 0
self.bptsetlst = {}
self.protocol.close()
def send(self, msg):
""" send message """
self.protocol.send_msg(msg)
# log message
if self.debug == 1:
self.ui.tracewin.write(str(self.msgid) + ' : send =====> ' + msg)
def recv(self, count=10000):
""" receive message until response is last transaction id or received count's message """
while count>0:
count = count - 1
# recv message and convert to XML object
txt = self.protocol.recv_msg()
res = xml.dom.minidom.parseString(txt.decode('utf-8'))
# log messages {{{
if self.debug == 1:
self.ui.tracewin.write( str(self.msgid) + ' : recv <===== {{{ ' + txt)
self.ui.tracewin.write('}}}')
# handle message }}}
self.handle_msg(res)
# exit, if response's transaction id == last transaction id
try:
if int(res.firstChild.getAttribute('transaction_id')) == int(self.msgid):
return
except:
pass
def send_command(self, cmd, arg1 = '', arg2 = ''):
""" send command (do not receive response) """
self.lastcmd = cmd
self.msgid = self.msgid + 1
line = cmd + ' -i ' + str(self.msgid)
if arg1 != '':
line = line + ' ' + arg1
self.lastarg = arg1
if arg2 != '':
self.lastarg = arg2
line = line + ' -- ' + base64.encodestring(arg2)[0:-1]
self.send(line)
return self.msgid
#
#
#################################################################################################################
#################################################################################################################
# Internal message handlers
#
def handle_msg(self, res):
""" call appropraite message handler member function, handle_XXX() """
fc = res.firstChild
try:
handler = getattr(self, 'handle_' + fc.tagName)
handler(res)
except AttributeError:
print 'Debugger.handle_'+fc.tagName+'() not found, please see the LOG___WINDOW'
self.ui.go_srcview()
def handle_response(self, res):
""" call appropraite response message handler member function, handle_response_XXX() """
if res.firstChild.hasAttribute('reason') and res.firstChild.getAttribute('reason') == 'error':
self.handle_response_error(res)
return
errors = res.getElementsByTagName('error')
if len(errors)>0:
self.handle_response_error(res)
return
command = res.firstChild.getAttribute('command')
try:
handler = getattr(self, 'handle_response_' + command)
except AttributeError:
print 'Debugger.handle_response_'+command+'() not found, please see the LOG___WINDOW'
return
handler(res)
return
def handle_init(self, res):
"""handle <init> tag
<init appid="7035" fileuri="file:///home/segv/htdocs/index.php" language="PHP" protocol_version="1.0">
<engine version="2.0.0beta1">
Xdebug
</engine>
<author>
<NAME>
</author>
<url>
http://xdebug.org
</url>
<copyright>
Copyright (c) 2002-2004 by <NAME>
</copyright>
</init>"""
file = res.firstChild.getAttribute('fileuri')[7:]
self.ui.set_srcview(file, 1)
def handle_response_error(self, res):
""" handle <error> tag """
self.ui.tracewin.write_xml_childs(res)
# print res.toprettyxml()
# print '------------------------------------'
#
errors = res.getElementsByTagName('error')
# #print 'list: ', len(errors), errors
# if len(errors)>0:
# return
for error in errors:
code = int(error.getAttribute('code'))
vim.command('echohl Error | echo "'+error_msg[code].replace('"','')+'" | echohl None')
# print res
def handle_response_feature_set(self,res):
"""<response command="feature_set"
feature="feature_name"
success="0|1"
transaction_id="transaction_id"/>"""
#featureName = res.firstChild.getAttribute('feature')
#featureSet = res.firstChild.getAttribute('success')
return
def handle_response_stack_get(self, res):
"""handle <response command=stack_get> tag
<response command="stack_get" transaction_id="1 ">
<stack filename="file:///home/segv/htdocs/index.php" level="0" lineno="41" where="{main}"/>
</response>"""
stacks = res.getElementsByTagName('stack')
if len(stacks)>0:
self.curstack = 0
self.laststack = len(stacks) - 1
self.stacks = []
for s in stacks:
self.stacks.append( {'file': s.getAttribute('filename')[7:], \
'line': int(s.getAttribute('lineno')), \
'where': s.getAttribute('where'), \
'level': int(s.getAttribute('level'))
} )
self.ui.stackwin.clean()
self.ui.stackwin.highlight_stack(self.curstack)
self.ui.stackwin.write_xml_childs(res.firstChild) #str(res.toprettyxml()))
self.ui.set_srcview( self.stacks[self.curstack]['file'], self.stacks[self.curstack]['line'] )
def handle_response_step_out(self, res):
"""handle <response command=step_out> tag
<response command="step_out" reason="ok" status="break" transaction_id="1 "/>"""
if res.firstChild.hasAttribute('reason') and res.firstChild.getAttribute('reason') == 'ok':
return
else:
print res.toprettyxml()
def handle_response_step_over(self, res):
"""handle <response command=step_over> tag
<response command="step_over" reason="ok" status="break" transaction_id="1 "/>"""
if res.firstChild.hasAttribute('reason') and res.firstChild.getAttribute('reason') == 'ok':
return
else:
print res.toprettyxml()
def handle_response_step_into(self, res):
"""handle <response command=step_into> tag
<response command="step_into" reason="ok" status="break" transaction_id="1 "/>"""
if res.firstChild.hasAttribute('reason') and res.firstChild.getAttribute('reason') == 'ok':
return
else:
print res.toprettyxml()
def handle_response_run(self, res):
"""handle <response command=run> tag
<response command="step_over" reason="ok" status="break" transaction_id="1 "/>"""
pass
def handle_response_breakpoint_remove(self, res):
pass
def handle_response_breakpoint_set(self, res):
"""handle <response command=breakpoint_set> tag
<responsponse command="breakpoint_set" id="110180001" transaction_id="1"/>"""
if res.firstChild.hasAttribute('id'):
tid = int(res.firstChild.getAttribute('transaction_id'))
bno = self.bptsetlst[tid]
del self.bptsetlst[tid]
self.breakpt.setid(bno, str(res.firstChild.getAttribute('id')))
#try:
#except:
# print "can't find bptsetlst tid=", tid
# pass
def handle_response_eval(self, res):
"""handle <response command=eval> tag """
if self.debug == 1:
self.ui.tracewin.write(res.toxml())
self.ui.watchwin.write_xml_childs(res)
def handle_response_property_get(self, res):
"""handle <response command=property_get> tag """
cmd = self.ui.cmdwin.get_command()
if cmd[0] == "property_get":
self.ui.watchwin.write_xml_childs(res)
else:
self.ui.watchwin.insert_xml_childs(res,self.ui.watchwin.cline)
def handle_response_context_get(self, res):
"""handle <response command=context_get> tag """
if self.debug == 1:
self.ui.tracewin.write(res.toxml())
self.ui.watchwin.write_xml_childs(res)
def handle_response_status(self, res):
if res.firstChild.hasAttribute('status'):
status = res.firstChild.getAttribute('status')
if status == 'stopping':
raise DBGPStoppingError("Debugger is shutting down")
elif status == 'stopped':
raise DBGPStoppedError("Debugger session has ended")
return
else:
print res.toprettyxml()
def handle_response_default(self, res):
"""handle <response command=context_get> tag """
if self.debug == 1:
self.ui.tracewin.write(res.toprettyxml())
#
#
#################################################################################################################
#################################################################################################################
# debugger command functions
#
# usage:
#
# dbg = Debugger() # create Debugger Object
# dbg.run() # run() method initialize windows, debugger connection and send breakpoints, ...
# dbg.run() # run() method sends 'run -i ...' message
# dbg.command('step_into') # sends 'step_into' message
# dbg.stop() # stop debugger
#
def command(self, cmd, arg1 = '', arg2 = ''):
""" general command sender (receive response too) """
if self.running == 0:
print "Not connected\n"
return
msgid = self.send_command(cmd, arg1, arg2)
self.recv()
return msgid
def run(self):
""" start debugger or continue """
if self.protocol.isconnected():
self.ui.rm_cursign()
self.command('run')
self.command('status')
self.command('stack_get')
if self.autoContext:
self.command('context_get', ('-d %d' % self.curstack))
else:
self.clear()
self.protocol.accept()
self.ui.debug_mode()
self.running = 1
self.recv(1)
self.set_max_depth(2)
self.command('step_into')
flag = 0
for bno in self.breakpt.list():
msgid = self.send_command('breakpoint_set', \
self.breakpt.getcmd(bno), \
self.breakpt.getexp(bno))
self.bptsetlst[msgid] = bno
flag = 1
if flag:
self.recv()
self.ui.go_srcview()
def quit(self):
self.ui.normal_mode()
self.clear()
def stop(self):
self.clear()
def up(self):
if self.curstack > 0:
self.curstack -= 1
self.ui.stackwin.highlight_stack(self.curstack)
self.ui.set_srcview(self.stacks[self.curstack]['file'], self.stacks[self.curstack]['line'])
def down(self):
if self.curstack < self.laststack:
self.curstack += 1
self.ui.stackwin.highlight_stack(self.curstack)
self.ui.set_srcview(self.stacks[self.curstack]['file'], self.stacks[self.curstack]['line'])
def mark(self, args = ''):
(row, rol) = vim.current.window.cursor
file = vim.current.buffer.name
bno = self.breakpt.find(file, row)
if bno != None:
id = self.breakpt.getid(bno)
self.breakpt.remove(bno)
vim.command('sign unplace ' + str(bno))
if self.protocol.isconnected():
self.send_command('breakpoint_remove', '-d ' + str(id))
self.recv()
else:
bno = self.breakpt.add( file, row, args)
type = self.breakpt.gettype(bno)
if type == "line" or type == "conditional":
vim.command('sign place ' + str(bno) + ' name=breakpt line=' + str(row) + ' file=' + file)
if self.protocol.isconnected():
msgid = self.send_command('breakpoint_set', \
self.breakpt.getcmd(bno), \
self.breakpt.getexp(bno))
self.bptsetlst[msgid] = bno
self.recv()
def unmark(self, bno = None):
if bno is None:
for bno in self.breakpt.list():
self.remove_breakpoint(bno)
else:
if bno in self.breakpt.breakpt:
self.remove_breakpoint(bno)
def remove_breakpoint(self,bno):
bp = self.breakpt.breakpt[bno]
if bp['id'] is not None and self.protocol.isconnected():
self.send_command('breakpoint_remove',"-d "+str(bp['id']))
if bp['type'] == "line" or bp['type'] == "conditional":
vim.command('sign unplace ' + str(bno))
self.breakpt.remove(bno)
print "Removed "+bp['type']+" breakpoint "+str(bno)
def watch_input(self, mode, arg = ''):
self.ui.cmdwin.input(mode, arg)
self.ui.cmdwin.command('normal G')
def set_max_depth(self,depth):
self.command('feature_set','-n max_depth -v '+str(depth))
def property_get(self, name = ''):
if name == '':
name = vim.eval('expand("<cword>")')
self.ui.cmdwin.write('{property_get} '+name)
self.command('property_get', '-n '+name)
def property_insert(self, name = ''):
if name == '':
name = vim.eval('expand("<cword>")')
self.ui.cmdwin.write('{property_insert} '+name)
self.command('property_get', '-d 0 -n '+name)
def watch_execute(self,latest = True):
""" execute command in watch window """
try:
(cmd, expr) = self.ui.cmdwin.get_command(latest)
except CmdInvalidError, e:
msg = str(e.args[0]['message'])
vim.command('echohl Error | echo "'+msg+'" |echohl None')
return
if cmd == 'exec':
self.command('exec', '', expr)
print cmd, '--', expr
elif cmd == 'eval':
self.command('eval', '', expr)
print "Evaluating expression: ", expr
elif cmd == 'property_get':
self.command('property_get', '-d %d -n %s' % (self.curstack, expr))
print "Getting property: ", expr
elif cmd == 'context_get':
self.command('context_get', ('-d %d' % self.curstack))
print "Getting current context with depth ",str(self.curstack)
elif cmd == 'context_global':
self.command('context_get', ('-d %d -c 1' % self.curstack))
print "Getting global variables in current context"
elif cmd == 'context_class':
self.command('context_get', ('-d %d -c 2' % self.curstack))
print "Getting current context with class variables"
elif cmd == 'context_names':
self.command('context_names',('-d %d' % self.curstack))
#
#
#################################################################################################################
#################################################################################################################
#
# Try - Catch Warpper
#
#################################################################################################################
def debugger_init():
global debugger
port = int(vim.eval("g:debuggerPort"))
debug = int(vim.eval("g:debuggerDebugMode"))
autoContext = int(vim.eval("g:debuggerAutoContext"))
#print "Listening on port "+str(port)+", debug mode = "+str(debug)
debugger = Debugger(port, debug, autoContext)
def debugger_command(msg, arg1 = '', arg2 = ''):
try:
debugger.command(msg, arg1, arg2)
debugger.command('stack_get')
if debugger.autoContext:
debugger.command('context_get')
except DBGPStoppedError:
debugger.stop()
print 'Debugger has shut down', sys.exc_info()
except DBGPStoppingError:
debugger.stop()
print 'Debugger is shutting down', sys.exc_info()
except EOFError:
vim.command('echohl Error | echo "Debugger socket closed" | echohl None')
except:
debugger.ui.tracewin.write(sys.exc_info())
debugger.ui.tracewin.write("".join(traceback.format_tb( sys.exc_info()[2])))
debugger.stop()
print 'Connection closed, stop debugging', sys.exc_info()
def debugger_run():
try:
debugger.run()
except DBGPStoppedError:
debugger.stop()
print 'Debugger has shut down'
except DBGPStoppingError:
debugger.stop()
print 'Debugger is shutting down'
except EOFError:
vim.command('echohl Error | echo "Debugger socket closed" | echohl None')
except:
debugger.ui.tracewin.write(sys.exc_info())
debugger.ui.tracewin.write("".join(traceback.format_tb( sys.exc_info()[2])))
debugger.stop()
print 'Connection closed, stop debugging', sys.exc_info()
def debugger_visual_eval():
selection = vim.eval("xdebug:get_visual_selection()")
debugger_watch_input('eval',selection)
def debugger_watch_input(cmd, arg = ''):
try:
if arg == '<cword>':
arg = vim.eval('expand("<cword>")')
debugger.watch_input(cmd, arg)
except EOFError:
vim.command('echohl Error | echo "Debugger socket closed" | echohl None')
except:
debugger.ui.tracewin.write( sys.exc_info() )
debugger.ui.tracewin.write( "".join(traceback.format_tb(sys.exc_info()[2])) )
debugger.stop()
print 'Connection closed, stop debugging'
def debugger_globals():
try:
debugger.ui.cmdwin.write('{context_global}')
debugger.watch_execute()
except EOFError:
vim.command('echohl Error | echo "Debugger socket closed" | echohl None')
except:
debugger.ui.tracewin.write(sys.exc_info())
debugger.ui.tracewin.write("".join(traceback.format_tb( sys.exc_info()[2])))
debugger.stop()
print 'Connection closed, stop debugging'
def debugger_context():
try:
debugger.ui.cmdwin.write('{context_get}')
debugger.command('context_get')
except EOFError:
vim.command('echohl Error | echo "Debugger socket closed" | echohl None')
except:
debugger.ui.tracewin.write(sys.exc_info())
debugger.ui.tracewin.write("".join(traceback.format_tb( sys.exc_info()[2])))
debugger.stop()
print 'Connection closed, stop debugging'
def debugger_cmd(cmd):
try:
debugger.ui.cmdwin.write('{'+cmd+'}')
debugger.watch_execute(True)
except EOFError:
vim.command('echohl Error | echo "Debugger socket closed" | echohl None')
except:
debugger.ui.tracewin.write(sys.exc_info())
debugger.ui.tracewin.write("".join(traceback.format_tb( sys.exc_info()[2])))
debugger.stop()
print 'Connection closed, stop debugging'
def debugger_set_depth(depth):
try:
depth = int(depth)
if depth > 0:
debugger.set_max_depth(depth)
else:
print "Invalid maximum depth"
except EOFError:
vim.command('echohl Error | echo "Debugger socket closed" | echohl None')
except:
debugger.ui.tracewin.write(sys.exc_info())
debugger.ui.tracewin.write("".join(traceback.format_tb( sys.exc_info()[2])))
debugger.stop()
print 'Connection closed, stop debugging'
def debugger_property(name = ''):
try:
debugger.property_get(name)
except EOFError:
vim.command('echohl Error | echo "Debugger socket closed" | echohl None')
except:
debugger.ui.tracewin.write(sys.exc_info())
debugger.ui.tracewin.write("".join(traceback.format_tb( sys.exc_info()[2])))
debugger.stop()
print 'Connection closed, stop debugging', sys.exc_info()
def debugger_mark(args = ''):
try:
debugger.mark(args)
except BreakpointError, e:
msg = str(e.args[0])
vim.command('echohl Error | echo "'+msg+'" |echohl None')
except EOFError:
vim.command('echohl Error | echo "Debugger socket closed" | echohl None')
except:
debugger.ui.tracewin.write(sys.exc_info())
debugger.ui.tracewin.write("".join(traceback.format_tb( sys.exc_info()[2])))
debugger.stop()
print 'Connection closed, stop debugging', sys.exc_info()
def debugger_list_breakpoints():
try:
if debugger.breakpt:
debugger.breakpt.show()
except BreakpointError, e:
msg = str(e.args[0])
vim.command('echohl Error | echo "'+msg+'" |echohl None')
def debugger_remove_breakpoint(bno = ''):
try:
if len(bno):
try:
bno = int(bno)
except ValueError:
bno = None
else:
bno = None
debugger.unmark(bno)
except BreakpointError, e:
msg = str(e.args[0])
vim.command('echohl Error | echo "'+msg+'" |echohl None')
except EOFError:
vim.command('echohl Error | echo "Debugger socket closed" | echohl None')
except:
debugger.ui.tracewin.write(sys.exc_info())
debugger.ui.tracewin.write("".join(traceback.format_tb( sys.exc_info()[2])))
debugger.stop()
print 'Connection closed, stop debugging', sys.exc_info()
def debugger_up():
try:
debugger.up()
except EOFError:
vim.command('echohl Error | echo "Debugger socket closed" | echohl None')
except:
debugger.ui.tracewin.write(sys.exc_info())
debugger.ui.tracewin.write("".join(traceback.format_tb( sys.exc_info()[2])))
debugger.stop()
print 'Connection closed, stop debugging', sys.exc_info()
def debugger_down():
try:
debugger.down()
except:
debugger.ui.tracewin.write(sys.exc_info())
debugger.ui.tracewin.write("".join(traceback.format_tb( sys.exc_info()[2])))
debugger.stop()
print 'Connection closed, stop debugging', sys.exc_info()
def debugger_quit():
global debugger
debugger.quit()
mode = 0
def debugger_resize():
global mode
mode = mode + 1
if mode >= 3:
mode = 0
if mode == 0:
vim.command("wincmd =")
elif mode == 1:
vim.command("wincmd |")
if mode == 2:
vim.command("wincmd _")
class DBGPStoppingError(Exception):
pass
class DBGPStoppedError(Exception):
pass
class CmdInvalidError(Exception):
pass
class BreakpointError(Exception):
pass
error_msg = { \
# 000 Command parsing errors
0 : """no error""", \
1 : """parse error in command""", \
2 : """duplicate arguments in command""", \
3 : """invalid options (ie, missing a required option)""", \
4 : """Unimplemented command""", \
5 : """Command not available (Is used for async commands. For instance if the engine is in state "run" than only "break" and "status" are available). """, \
# 100 : File related errors
100 : """can not open file (as a reply to a "source" command if the requested source file can't be opened)""", \
101 : """stream redirect failed """, \
# 200 Breakpoint, or code flow errors
200 : """breakpoint could not be set (for some reason the breakpoint could not be set due to problems registering it)""", \
201 : """breakpoint type not supported (for example I don't support 'watch' yet and thus return this error)""", \
202 : """invalid breakpoint (the IDE tried to set a breakpoint on a line that does not exist in the file (ie "line 0" or lines past the end of the file)""", \
203 : """no code on breakpoint line (the IDE tried to set a breakpoint on a line which does not have any executable code. The debugger engine is NOT required to """ + \
"""return this type if it is impossible to determine if there is code on a given location. (For example, in the PHP debugger backend this will only be """ + \
"""returned in some special cases where the current scope falls into the scope of the breakpoint to be set)).""", \
204 : """Invalid breakpoint state (using an unsupported breakpoint state was attempted)""", \
205 : """No such breakpoint (used in breakpoint_get etc. to show that there is no breakpoint with the given ID)""", \
206 : """Error evaluating code (use from eval() (or perhaps property_get for a full name get))""", \
207 : """Invalid expression (the expression used for a non-eval() was invalid) """, \
# 300 Data errors
300 : """Can not get property (when the requested property to get did not exist, this is NOT used for an existing but uninitialized property, which just gets the """ + \
"""type "uninitialised" (See: PreferredTypeNames)).""", \
301 : """Stack depth invalid (the -d stack depth parameter did not exist (ie, there were less stack elements than the number requested) or the parameter was < 0)""", \
302 : """Context invalid (an non existing context was requested) """, \
# 900 Protocol errors
900 : """Encoding not supported""", \
998 : """An internal exception in the debugger occurred""", \
999 : """Unknown error """ \
}
``` |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.